diff --git a/.gitignore b/.gitignore index 41ca801d0..d3ffa03e0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /target Cargo.lock *.pcap +sim_logs/ \ No newline at end of file diff --git a/build.rs b/build.rs index 908186137..fe6881d99 100644 --- a/build.rs +++ b/build.rs @@ -11,6 +11,7 @@ static CONFIGS: &[(&str, usize)] = &[ ("IFACE_MAX_SIXLOWPAN_ADDRESS_CONTEXT_COUNT", 4), ("IFACE_NEIGHBOR_CACHE_COUNT", 4), ("IFACE_MAX_ROUTE_COUNT", 2), + ("IFACE_MAX_MULTICAST_DUPLICATION_COUNT", 16), ("FRAGMENTATION_BUFFER_SIZE", 1500), ("ASSEMBLER_MAX_SEGMENT_COUNT", 4), ("REASSEMBLY_BUFFER_SIZE", 1500), @@ -22,6 +23,7 @@ static CONFIGS: &[(&str, usize)] = &[ ("RPL_RELATIONS_BUFFER_COUNT", 16), ("RPL_PARENTS_BUFFER_COUNT", 8), ("RPL_MAX_OPTIONS", 2), + ("RPL_MAX_NEXT_HOP_PER_DESTINATION", 4), // END AUTOGENERATED CONFIG FEATURES ]; diff --git a/examples/benchmark.rs b/examples/benchmark.rs index ad2c6e142..c122c3aff 100644 --- a/examples/benchmark.rs +++ b/examples/benchmark.rs @@ -12,6 +12,7 @@ use std::thread; use smoltcp::iface::{Config, Interface, SocketSet}; use smoltcp::phy::{wait as phy_wait, Device, Medium}; use smoltcp::socket::tcp; +use smoltcp::storage::PacketMetadata; use smoltcp::time::{Duration, Instant}; use smoltcp::wire::{EthernetAddress, IpAddress, IpCidr}; @@ -97,7 +98,13 @@ fn main() { }; config.random_seed = rand::random(); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new(IpAddress::v4(192, 168, 69, 1), 24)) diff --git a/examples/client.rs b/examples/client.rs index 17b21ff06..7f085ff1a 100644 --- a/examples/client.rs +++ b/examples/client.rs @@ -1,6 +1,7 @@ mod utils; use log::debug; +use smoltcp::storage::PacketMetadata; use std::os::unix::io::AsRawFd; use std::str::{self, FromStr}; @@ -38,7 +39,13 @@ fn main() { }; config.random_seed = rand::random(); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new(IpAddress::v4(192, 168, 69, 1), 24)) diff --git a/examples/dhcp_client.rs b/examples/dhcp_client.rs index 348e9676c..46711846d 100644 --- a/examples/dhcp_client.rs +++ b/examples/dhcp_client.rs @@ -2,6 +2,7 @@ mod utils; use log::*; +use smoltcp::storage::PacketMetadata; use std::os::unix::io::AsRawFd; use smoltcp::iface::{Config, Interface, SocketSet}; @@ -36,7 +37,13 @@ fn main() { Medium::Ieee802154 => todo!(), }; config.random_seed = rand::random(); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); // Create sockets let mut dhcp_socket = dhcpv4::Socket::new(); diff --git a/examples/dns.rs b/examples/dns.rs index 41789fa32..4a003d04f 100644 --- a/examples/dns.rs +++ b/examples/dns.rs @@ -4,6 +4,7 @@ use smoltcp::iface::{Config, Interface, SocketSet}; use smoltcp::phy::Device; use smoltcp::phy::{wait as phy_wait, Medium}; use smoltcp::socket::dns::{self, GetQueryResultError}; +use smoltcp::storage::PacketMetadata; use smoltcp::time::Instant; use smoltcp::wire::{DnsQueryType, EthernetAddress, IpAddress, IpCidr, Ipv4Address, Ipv6Address}; use std::os::unix::io::AsRawFd; @@ -33,7 +34,13 @@ fn main() { }; config.random_seed = rand::random(); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new(IpAddress::v4(192, 168, 69, 1), 24)) diff --git a/examples/httpclient.rs b/examples/httpclient.rs index c55eed7b9..f8ca2f9e8 100644 --- a/examples/httpclient.rs +++ b/examples/httpclient.rs @@ -1,6 +1,7 @@ mod utils; use log::debug; +use smoltcp::storage::PacketMetadata; use std::os::unix::io::AsRawFd; use std::str::{self, FromStr}; use url::Url; @@ -38,7 +39,13 @@ fn main() { }; config.random_seed = rand::random(); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new(IpAddress::v4(192, 168, 69, 1), 24)) diff --git a/examples/loopback.rs b/examples/loopback.rs index 74fcffb34..393783da4 100644 --- a/examples/loopback.rs +++ b/examples/loopback.rs @@ -12,6 +12,7 @@ use log::{debug, error, info}; use smoltcp::iface::{Config, Interface, SocketSet}; use smoltcp::phy::{Device, Loopback, Medium}; use smoltcp::socket::tcp; +use smoltcp::storage::PacketMetadata; use smoltcp::time::{Duration, Instant}; use smoltcp::wire::{EthernetAddress, IpAddress, IpCidr}; @@ -91,7 +92,17 @@ fn main() { Medium::Ieee802154 => todo!(), }; - let mut iface = Interface::new(config, &mut device, Instant::now()); + // Setup multicast queues + let mut metadata = [PacketMetadata::EMPTY; 16]; + let mut multicast_packets = [0; 2048]; + + let mut iface = Interface::new( + config, + &mut device, + &mut metadata[..], + &mut multicast_packets[..], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new(IpAddress::v4(127, 0, 0, 1), 8)) diff --git a/examples/multicast.rs b/examples/multicast.rs index ea89a2e93..4bc6816c2 100644 --- a/examples/multicast.rs +++ b/examples/multicast.rs @@ -5,6 +5,7 @@ use std::os::unix::io::AsRawFd; use smoltcp::iface::{Config, Interface, SocketSet}; use smoltcp::phy::{wait as phy_wait, Device, Medium}; use smoltcp::socket::{raw, udp}; +use smoltcp::storage::PacketMetadata; use smoltcp::time::Instant; use smoltcp::wire::{ EthernetAddress, IgmpPacket, IgmpRepr, IpAddress, IpCidr, IpProtocol, IpVersion, Ipv4Address, @@ -37,7 +38,13 @@ fn main() { }; config.random_seed = rand::random(); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new(IpAddress::v4(192, 168, 69, 1), 24)) diff --git a/examples/ping.rs b/examples/ping.rs index 29c6bcd4c..a8dbd3ad3 100644 --- a/examples/ping.rs +++ b/examples/ping.rs @@ -2,6 +2,7 @@ mod utils; use byteorder::{ByteOrder, NetworkEndian}; use smoltcp::iface::{Interface, SocketSet}; +use smoltcp::storage::PacketMetadata; use std::cmp; use std::collections::HashMap; use std::os::unix::io::AsRawFd; @@ -114,7 +115,13 @@ fn main() { }; config.random_seed = rand::random(); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new(IpAddress::v4(192, 168, 69, 1), 24)) diff --git a/examples/server.rs b/examples/server.rs index 33d95c5d5..8754ee1ee 100644 --- a/examples/server.rs +++ b/examples/server.rs @@ -1,6 +1,7 @@ mod utils; use log::debug; +use smoltcp::storage::PacketMetadata; use std::fmt::Write; use std::os::unix::io::AsRawFd; @@ -34,7 +35,13 @@ fn main() { config.random_seed = rand::random(); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new(IpAddress::v4(192, 168, 69, 1), 24)) diff --git a/examples/sixlowpan.rs b/examples/sixlowpan.rs index b7f74a340..44b7b8f39 100644 --- a/examples/sixlowpan.rs +++ b/examples/sixlowpan.rs @@ -43,6 +43,7 @@ mod utils; use log::debug; +use smoltcp::storage::PacketMetadata; use std::os::unix::io::AsRawFd; use std::str; @@ -83,7 +84,13 @@ fn main() { config.rpl_config = None; } - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new( diff --git a/examples/sixlowpan_benchmark.rs b/examples/sixlowpan_benchmark.rs index 4e61491fe..cba50c25c 100644 --- a/examples/sixlowpan_benchmark.rs +++ b/examples/sixlowpan_benchmark.rs @@ -49,6 +49,7 @@ use std::str; use smoltcp::iface::{Config, Interface, SocketSet}; use smoltcp::phy::{wait as phy_wait, Device, Medium, RawSocket}; use smoltcp::socket::tcp; +use smoltcp::storage::PacketMetadata; use smoltcp::wire::{EthernetAddress, Ieee802154Address, Ieee802154Pan, IpAddress, IpCidr}; //For benchmark @@ -159,7 +160,13 @@ fn main() { config.random_seed = rand::random(); config.pan_id = Some(Ieee802154Pan(0xbeef)); - let mut iface = Interface::new(config, &mut device, Instant::now()); + let mut iface = Interface::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::now(), + ); iface.update_ip_addrs(|ip_addrs| { ip_addrs .push(IpCidr::new( diff --git a/src/iface/interface/ethernet.rs b/src/iface/interface/ethernet.rs index 4d29faa11..50d14b1c9 100644 --- a/src/iface/interface/ethernet.rs +++ b/src/iface/interface/ethernet.rs @@ -7,6 +7,7 @@ impl InterfaceInner { meta: crate::phy::PacketMeta, frame: &'frame [u8], fragments: &'frame mut FragmentsBuffer, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, ) -> Option> { let eth_frame = check!(EthernetFrame::new_checked(frame)); @@ -31,8 +32,14 @@ impl InterfaceInner { #[cfg(feature = "proto-ipv6")] EthernetProtocol::Ipv6 => { let ipv6_packet = check!(Ipv6Packet::new_checked(eth_frame.payload())); - self.process_ipv6(sockets, meta, &ipv6_packet) - .map(EthernetPacket::Ip) + self.process_ipv6( + sockets, + meta, + &ipv6_packet, + Some(ð_frame.src_addr().into()), + multicast_queue, + ) + .map(EthernetPacket::Ip) } // Drop all other traffic. _ => None, diff --git a/src/iface/interface/ieee802154.rs b/src/iface/interface/ieee802154.rs index 8d7a01ba1..a616ed5a5 100644 --- a/src/iface/interface/ieee802154.rs +++ b/src/iface/interface/ieee802154.rs @@ -15,6 +15,7 @@ impl InterfaceInner { meta: PacketMeta, sixlowpan_payload: &'payload [u8], _fragments: &'output mut FragmentsBuffer, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, ) -> Option> { let ieee802154_frame = check!(Ieee802154Frame::new_checked(sixlowpan_payload)); @@ -41,9 +42,14 @@ impl InterfaceInner { self.current_frame = Some(ieee802154_repr); match ieee802154_frame.payload() { - Some(payload) => { - self.process_sixlowpan(sockets, meta, &ieee802154_repr, payload, _fragments) - } + Some(payload) => self.process_sixlowpan( + sockets, + meta, + &ieee802154_repr, + payload, + _fragments, + multicast_queue, + ), None => None, } } diff --git a/src/iface/interface/igmp.rs b/src/iface/interface/igmp.rs index 7d339b2a5..137ce5f41 100644 --- a/src/iface/interface/igmp.rs +++ b/src/iface/interface/igmp.rs @@ -1,126 +1,6 @@ use super::*; -/// Error type for `join_multicast_group`, `leave_multicast_group`. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -pub enum MulticastError { - /// The hardware device transmit buffer is full. Try again later. - Exhausted, - /// The table of joined multicast groups is already full. - GroupTableFull, - /// IPv6 multicast is not yet supported. - Ipv6NotSupported, -} - -impl core::fmt::Display for MulticastError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - match self { - MulticastError::Exhausted => write!(f, "Exhausted"), - MulticastError::GroupTableFull => write!(f, "GroupTableFull"), - MulticastError::Ipv6NotSupported => write!(f, "Ipv6NotSupported"), - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for MulticastError {} - -impl Interface { - /// Add an address to a list of subscribed multicast IP addresses. - /// - /// Returns `Ok(announce_sent)` if the address was added successfully, where `announce_sent` - /// indicates whether an initial immediate announcement has been sent. - pub fn join_multicast_group>( - &mut self, - device: &mut D, - addr: T, - timestamp: Instant, - ) -> Result - where - D: Device + ?Sized, - { - self.inner.now = timestamp; - - match addr.into() { - IpAddress::Ipv4(addr) => { - let is_not_new = self - .inner - .ipv4_multicast_groups - .insert(addr, ()) - .map_err(|_| MulticastError::GroupTableFull)? - .is_some(); - if is_not_new { - Ok(false) - } else if let Some(pkt) = self.inner.igmp_report_packet(IgmpVersion::Version2, addr) - { - // Send initial membership report - let tx_token = device - .transmit(timestamp) - .ok_or(MulticastError::Exhausted)?; - - // NOTE(unwrap): packet destination is multicast, which is always routable and doesn't require neighbor discovery. - self.inner - .dispatch_ip(tx_token, PacketMeta::default(), pkt, &mut self.fragmenter) - .unwrap(); - - Ok(true) - } else { - Ok(false) - } - } - // Multicast is not yet implemented for other address families - #[allow(unreachable_patterns)] - _ => Err(MulticastError::Ipv6NotSupported), - } - } - - /// Remove an address from the subscribed multicast IP addresses. - /// - /// Returns `Ok(leave_sent)` if the address was removed successfully, where `leave_sent` - /// indicates whether an immediate leave packet has been sent. - pub fn leave_multicast_group>( - &mut self, - device: &mut D, - addr: T, - timestamp: Instant, - ) -> Result - where - D: Device + ?Sized, - { - self.inner.now = timestamp; - - match addr.into() { - IpAddress::Ipv4(addr) => { - let was_not_present = self.inner.ipv4_multicast_groups.remove(&addr).is_none(); - if was_not_present { - Ok(false) - } else if let Some(pkt) = self.inner.igmp_leave_packet(addr) { - // Send group leave packet - let tx_token = device - .transmit(timestamp) - .ok_or(MulticastError::Exhausted)?; - - // NOTE(unwrap): packet destination is multicast, which is always routable and doesn't require neighbor discovery. - self.inner - .dispatch_ip(tx_token, PacketMeta::default(), pkt, &mut self.fragmenter) - .unwrap(); - - Ok(true) - } else { - Ok(false) - } - } - // Multicast is not yet implemented for other address families - #[allow(unreachable_patterns)] - _ => Err(MulticastError::Ipv6NotSupported), - } - } - - /// Check whether the interface listens to given destination multicast IP address. - pub fn has_multicast_group>(&self, addr: T) -> bool { - self.inner.has_multicast_group(addr) - } - +impl Interface<'_> { /// Depending on `igmp_report_state` and the therein contained /// timeouts, send IGMP membership reports. pub(crate) fn igmp_egress(&mut self, device: &mut D) -> bool @@ -138,7 +18,14 @@ impl Interface { if let Some(tx_token) = device.transmit(self.inner.now) { // NOTE(unwrap): packet destination is multicast, which is always routable and doesn't require neighbor discovery. self.inner - .dispatch_ip(tx_token, PacketMeta::default(), pkt, &mut self.fragmenter) + .dispatch_ip( + tx_token, + PacketMeta::default(), + pkt, + None, + &mut self.fragmenter, + &mut self.multicast_queue, + ) .unwrap(); } else { return false; @@ -172,7 +59,9 @@ impl Interface { tx_token, PacketMeta::default(), pkt, + None, &mut self.fragmenter, + &mut self.multicast_queue, ) .unwrap(); } else { diff --git a/src/iface/interface/ipv4.rs b/src/iface/interface/ipv4.rs index 3a5a864ee..0892be17f 100644 --- a/src/iface/interface/ipv4.rs +++ b/src/iface/interface/ipv4.rs @@ -1,6 +1,6 @@ use super::*; -impl Interface { +impl Interface<'_> { /// Process fragments that still need to be sent for IPv4 packets. /// /// This function returns a boolean value indicating whether any packets were diff --git a/src/iface/interface/ipv6.rs b/src/iface/interface/ipv6.rs index 9238b6abf..b2a057d33 100644 --- a/src/iface/interface/ipv6.rs +++ b/src/iface/interface/ipv6.rs @@ -166,7 +166,9 @@ impl InterfaceInner { IpCidr::Ipv6(cidr) if cidr.address() != Ipv6Address::LOOPBACK => { // Take the lower order 24 bits of the IPv6 address and // append those bits to FF02:0:0:0:0:1:FF00::/104. - addr.as_bytes()[14..] == cidr.address().as_bytes()[14..] + addr.as_bytes()[..14] + == Ipv6Address::new(0xFF02, 0, 0, 0, 0, 1, 0xFF00, 0).as_bytes()[..14] + && addr.as_bytes()[14..] == cidr.address().as_bytes()[14..] } _ => false, } @@ -187,6 +189,8 @@ impl InterfaceInner { sockets: &mut SocketSet, meta: PacketMeta, ipv6_packet: &Ipv6Packet<&'frame [u8]>, + previous_hop: Option<&HardwareAddress>, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, ) -> Option> { let mut ipv6_repr = check!(Ipv6Repr::parse(ipv6_packet)); @@ -207,6 +211,7 @@ impl InterfaceInner { (None, ipv6_repr.next_header, ipv6_packet.payload()) }; + // Forward if not for us if !self.has_ip_addr(ipv6_repr.dst_addr) && !self.has_multicast_group(ipv6_repr.dst_addr) && !ipv6_repr.dst_addr.is_loopback() @@ -227,6 +232,50 @@ impl InterfaceInner { } } + // Disallow list of forwardable multicast packets + let should_forward_multicast = match ipv6_repr.dst_addr.into() { + #[cfg(feature = "proto-ipv6")] + IpAddress::Ipv6(Ipv6Address::LINK_LOCAL_ALL_NODES) => false, + #[cfg(feature = "proto-rpl")] + IpAddress::Ipv6(Ipv6Address::LINK_LOCAL_ALL_RPL_NODES) => false, + _ => true, + }; + // if for us and multicast, process further and schedule forwarding + if should_forward_multicast && ipv6_repr.dst_addr.is_multicast() { + // Construct forwarding packet if possible + let forwarding_packet = self.forward(ipv6_repr, hbh, None, ip_payload); + // Lookup hardware addresses to which we would like to forward the multicast packet + let haddrs = + self.lookup_hardware_addr_multicast(&ipv6_repr.dst_addr.into(), previous_hop); + + // Schedule forwarding and process further if possible + match (&forwarding_packet, haddrs) { + #[cfg(feature = "proto-ipv6")] + (Some(Packet::Ipv6(forwarding_packet)), Ok(haddrs)) => { + if !haddrs.is_empty() { + let _ = self + .schedule_multicast_packet( + meta, + forwarding_packet, + haddrs, + multicast_queue, + ) + .inspect_err(|err| { + net_trace!( + "Could not schedule multicast packets with reason {:?}", + err + ); + }); + } + } + #[cfg(feature = "proto-ipv4")] + (Some(Packet::Ipv4(_)), Ok(_haddrs)) => { + unimplemented!() + } + _ => {} + } + } + #[cfg(feature = "socket-raw")] let handled_by_raw_socket = self.raw_socket_filter(sockets, &ipv6_repr.into(), ip_payload); #[cfg(not(feature = "socket-raw"))] diff --git a/src/iface/interface/mod.rs b/src/iface/interface/mod.rs index 926e6715c..31d668833 100644 --- a/src/iface/interface/mod.rs +++ b/src/iface/interface/mod.rs @@ -27,13 +27,14 @@ mod tcp; #[cfg(any(feature = "socket-udp", feature = "socket-dns"))] mod udp; -#[cfg(feature = "proto-igmp")] -pub use igmp::MulticastError; +mod multicast; +use super::multicast::MulticastMetadata; use super::packet::*; use core::result::Result; use heapless::{LinearMap, Vec}; +use managed::ManagedSlice; #[cfg(feature = "_proto-fragmentation")] use super::fragmentation::FragKey; @@ -47,7 +48,7 @@ use super::socket_set::SocketSet; #[cfg(feature = "proto-rpl")] use super::RplConfig; use crate::config::{ - IFACE_MAX_ADDR_COUNT, IFACE_MAX_MULTICAST_GROUP_COUNT, + IFACE_MAX_ADDR_COUNT, IFACE_MAX_MULTICAST_DUPLICATION_COUNT, IFACE_MAX_MULTICAST_GROUP_COUNT, IFACE_MAX_SIXLOWPAN_ADDRESS_CONTEXT_COUNT, }; use crate::iface::Routes; @@ -55,6 +56,7 @@ use crate::phy::PacketMeta; use crate::phy::{ChecksumCapabilities, Device, DeviceCapabilities, Medium, RxToken, TxToken}; use crate::rand::Rand; use crate::socket::*; +use crate::storage::{PacketBuffer, PacketMetadata}; use crate::time::{Duration, Instant}; use crate::wire::*; @@ -81,10 +83,11 @@ use check; /// The network interface logically owns a number of other data structures; to avoid /// a dependency on heap allocation, it instead owns a `BorrowMut<[T]>`, which can be /// a `&mut [T]`, or `Vec` if a heap is available. -pub struct Interface { +pub struct Interface<'a> { pub(crate) inner: InterfaceInner, fragments: FragmentsBuffer, fragmenter: Fragmenter, + multicast_queue: PacketBuffer<'a, MulticastMetadata>, } /// The device independent part of an Ethernet network interface. @@ -127,6 +130,8 @@ pub struct InterfaceInner { #[cfg(feature = "proto-rpl")] rpl: super::Rpl, + #[cfg(feature = "rpl-mop-3")] + rpl_targets_multicast: heapless::Vec, } /// Configuration structure used for creating a network interface. @@ -169,15 +174,23 @@ impl Config { } } -impl Interface { +impl<'a> Interface<'a> { /// Create a network interface using the previously provided configuration. /// /// # Panics /// This function panics if the [`Config::hardware_address`] does not match /// the medium of the device. - pub fn new(config: Config, device: &mut D, now: Instant) -> Self + pub fn new( + config: Config, + device: &mut D, + metadata_storage: MS, + payload_storage: PS, + now: Instant, + ) -> Self where D: Device + ?Sized, + MS: Into>>, + PS: Into>, { let caps = device.capabilities(); assert_eq!( @@ -231,6 +244,8 @@ impl Interface { reassembly_timeout: Duration::from_secs(60), }, fragmenter: Fragmenter::new(), + multicast_queue: PacketBuffer::new(metadata_storage, payload_storage), + inner: InterfaceInner { now, caps, @@ -260,7 +275,9 @@ impl Interface { current_frame: None, #[cfg(feature = "proto-rpl")] - rpl: super::Rpl::new(config.rpl_config.unwrap(), now), + rpl: super::Rpl::new(config.rpl_config.unwrap_or_default(), now), + #[cfg(feature = "rpl-mop-3")] + rpl_targets_multicast: Default::default(), }, } } @@ -422,6 +439,183 @@ impl Interface { self.fragments.reassembly_timeout = timeout; } + /// Add an address to a list of subscribed multicast IP addresses. + /// + /// Returns `Ok(announce_sent)` if the address was added successfully, where `announce_sent` + /// indicates whether an initial immediate announcement has been sent. + pub fn join_multicast_group>( + &mut self, + device: &mut D, + addr: T, + timestamp: Instant, + ) -> Result + where + D: Device + ?Sized, + { + self.inner.now = timestamp; + + match addr.into() { + #[cfg(all(feature = "proto-ipv4", feature = "proto-igmp"))] + IpAddress::Ipv4(addr) => { + let is_not_new = self + .inner + .ipv4_multicast_groups + .insert(addr, ()) + .map_err(|_| MulticastError::GroupTableFull)? + .is_some(); + if is_not_new { + Ok(false) + } else if let Some(pkt) = self.inner.igmp_report_packet(IgmpVersion::Version2, addr) + { + // Send initial membership report + let tx_token = device + .transmit(timestamp) + .ok_or(MulticastError::Exhausted)?; + + // NOTE(unwrap): packet destination is multicast, which is always routable and doesn't require neighbor discovery. + self.inner + .dispatch_ip( + tx_token, + PacketMeta::default(), + pkt, + None, + &mut self.fragmenter, + &mut self.multicast_queue, + ) + .unwrap(); + + Ok(true) + } else { + Ok(false) + } + } + #[cfg(all(feature = "proto-ipv6", feature = "rpl-mop-3"))] + IpAddress::Ipv6(addr) => { + // Check if the multicast address is present in the current multicast targets + if !self.inner.rpl_targets_multicast.contains(&addr) { + // Try to add the multicast target, otherwise abort + self.inner + .rpl_targets_multicast + .push(addr) + .map_err(|_err| MulticastError::GroupTableFull)?; + + // Schedule a new DAO for transmission if part of a dodag + match &mut self.inner.rpl.dodag { + Some(dodag) => { + if let Some(parent) = &dodag.parent { + dodag + .schedule_dao( + self.inner.rpl.mode_of_operation, + &[], + &[addr], + *parent, + self.inner.now, + false, + ) + .map_err(|_err| MulticastError::Exhausted)?; + } + + Ok(true) + } + None => Ok(false), + } + } else { + Ok(false) + } + } + // Multicast is not implemented/enabled for other address families + #[allow(unreachable_patterns)] + _ => Err(MulticastError::Unaddressable), + } + } + + /// Remove an address from the subscribed multicast IP addresses. + /// + /// Returns `Ok(leave_sent)` if the address was removed successfully, where `leave_sent` + /// indicates whether an immediate leave packet has been sent. + pub fn leave_multicast_group>( + &mut self, + device: &mut D, + addr: T, + timestamp: Instant, + ) -> Result + where + D: Device + ?Sized, + { + self.inner.now = timestamp; + + match addr.into() { + #[cfg(all(feature = "proto-ipv4", feature = "proto-igmp"))] + IpAddress::Ipv4(addr) => { + let was_not_present = self.inner.ipv4_multicast_groups.remove(&addr).is_none(); + if was_not_present { + Ok(false) + } else if let Some(pkt) = self.inner.igmp_leave_packet(addr) { + // Send group leave packet + let tx_token = device + .transmit(timestamp) + .ok_or(MulticastError::Exhausted)?; + + // NOTE(unwrap): packet destination is multicast, which is always routable and doesn't require neighbor discovery. + self.inner + .dispatch_ip( + tx_token, + PacketMeta::default(), + pkt, + None, + &mut self.fragmenter, + &mut self.multicast_queue, + ) + .unwrap(); + + Ok(true) + } else { + Ok(false) + } + } + #[cfg(all(feature = "proto-ipv6", feature = "rpl-mop-3"))] + IpAddress::Ipv6(addr) => { + if self.inner.rpl_targets_multicast.contains(&addr) { + // Try to add the multicast target, otherwise abort + self.inner + .rpl_targets_multicast + .retain(|multicast_group| multicast_group == &addr); + + // Schedule a new DAO for transmission if part of a dodag + match &mut self.inner.rpl.dodag { + Some(dodag) => { + if let Some(parent) = &dodag.parent { + dodag + .schedule_dao( + self.inner.rpl.mode_of_operation, + &[], + &[addr], + *parent, + self.inner.now, + true, + ) + .map_err(|_err| MulticastError::Exhausted)?; + } + + Ok(true) + } + None => Ok(false), + } + } else { + Ok(false) + } + } + // Multicast is not implemented/enabled for other address families + #[allow(unreachable_patterns)] + _ => Err(MulticastError::Unaddressable), + } + } + + /// Check whether the interface listens to given destination multicast IP address. + pub fn has_multicast_group>(&self, addr: T) -> bool { + self.inner.has_multicast_group(addr) + } + /// Transmit packets queued in the given sockets, and receive packets queued /// in the device. /// @@ -442,6 +636,9 @@ impl Interface { #[cfg(feature = "_proto-fragmentation")] self.fragments.assembler.remove_expired(timestamp); + // Poll multicast queue and dispatch if possible + self.poll_multicast(device); + match self.inner.caps.medium { #[cfg(feature = "medium-ieee802154")] Medium::Ieee802154 => @@ -502,6 +699,8 @@ impl Interface { return Some(Instant::from_millis(0)); } + let poll_at_multicast = self.poll_at_multicast(); + #[cfg(feature = "proto-rpl")] let poll_at_rpl = self.poll_at_rpl(); @@ -520,6 +719,7 @@ impl Interface { #[cfg(feature = "proto-rpl")] let poll_at = poll_at.chain(core::iter::once(poll_at_rpl)); + let poll_at = poll_at.chain(poll_at_multicast); poll_at.min() } @@ -561,25 +761,35 @@ impl Interface { rx_meta, frame, &mut self.fragments, + &mut self.multicast_queue, ) { - if let Err(err) = - self.inner.dispatch(tx_token, packet, &mut self.fragmenter) - { + if let Err(err) = self.inner.dispatch( + tx_token, + packet, + &mut self.fragmenter, + &mut self.multicast_queue, + ) { net_debug!("Failed to send response: {:?}", err); } } } #[cfg(feature = "medium-ip")] Medium::Ip => { - if let Some(packet) = - self.inner - .process_ip(sockets, rx_meta, frame, &mut self.fragments) - { + if let Some(packet) = self.inner.process_ip( + sockets, + rx_meta, + frame, + None, + &mut self.fragments, + &mut self.multicast_queue, + ) { if let Err(err) = self.inner.dispatch_ip( tx_token, PacketMeta::default(), packet, + None, &mut self.fragmenter, + &mut self.multicast_queue, ) { net_debug!("Failed to send response: {:?}", err); } @@ -592,12 +802,22 @@ impl Interface { rx_meta, frame, &mut self.fragments, + &mut self.multicast_queue, ) { + let frame = Ieee802154Frame::new_checked(&*frame).ok(); + let src_addr = frame + .as_ref() + .and_then(|frame| Ieee802154Repr::parse(frame).ok()) + .and_then(|repr| repr.src_addr) + .map(HardwareAddress::Ieee802154); + if let Err(err) = self.inner.dispatch_ip( tx_token, PacketMeta::default(), packet, + src_addr.as_ref(), &mut self.fragmenter, + &mut self.multicast_queue, ) { net_debug!("Failed to send response: {:?}", err); } @@ -640,7 +860,14 @@ impl Interface { })?; inner - .dispatch_ip(t, meta, response, &mut self.fragmenter) + .dispatch_ip( + t, + meta, + response, + None, + &mut self.fragmenter, + &mut self.multicast_queue, + ) .map_err(EgressError::Dispatch)?; emitted_any = true; @@ -809,7 +1036,9 @@ impl InterfaceInner { #[cfg(feature = "proto-rpl")] IpAddress::Ipv6(Ipv6Address::LINK_LOCAL_ALL_RPL_NODES) => true, #[cfg(feature = "proto-ipv6")] - IpAddress::Ipv6(addr) => self.has_solicited_node(addr), + IpAddress::Ipv6(addr) if self.has_solicited_node(addr) => true, + #[cfg(all(feature = "proto-ipv6", feature = "rpl-mop-3"))] + IpAddress::Ipv6(addr) if self.rpl_targets_multicast.contains(&addr) => true, #[allow(unreachable_patterns)] _ => false, } @@ -821,7 +1050,9 @@ impl InterfaceInner { sockets: &mut SocketSet, meta: PacketMeta, ip_payload: &'frame [u8], + previous_hop: Option<&HardwareAddress>, frag: &'frame mut FragmentsBuffer, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, ) -> Option> { match IpVersion::of_packet(ip_payload) { #[cfg(feature = "proto-ipv4")] @@ -833,7 +1064,7 @@ impl InterfaceInner { #[cfg(feature = "proto-ipv6")] Ok(IpVersion::Ipv6) => { let ipv6_packet = check!(Ipv6Packet::new_checked(ip_payload)); - self.process_ipv6(sockets, meta, &ipv6_packet) + self.process_ipv6(sockets, meta, &ipv6_packet, previous_hop, multicast_queue) } // Drop all other traffic. _ => None, @@ -879,6 +1110,7 @@ impl InterfaceInner { tx_token: Tx, packet: EthernetPacket, frag: &mut Fragmenter, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, ) -> Result<(), DispatchError> where Tx: TxToken, @@ -901,9 +1133,14 @@ impl InterfaceInner { arp_repr.emit(&mut packet); }) } - EthernetPacket::Ip(packet) => { - self.dispatch_ip(tx_token, PacketMeta::default(), packet, frag) - } + EthernetPacket::Ip(packet) => self.dispatch_ip( + tx_token, + PacketMeta::default(), + packet, + None, + frag, + multicast_queue, + ), } } @@ -924,6 +1161,21 @@ impl InterfaceInner { } fn has_neighbor(&self, addr: &IpAddress) -> bool { + if addr.is_multicast() { + #[cfg(feature = "proto-rpl")] + { + if let Some(dodag) = &self.rpl.dodag { + return dodag + .relations + .iter() + .any(|rel| &IpAddress::Ipv6(rel.destination()) == addr) + || dodag.parent.is_some(); + } + } + // FIXME: Do something useful here + return true; + } + match self.route(addr, self.now) { Some(_routed_addr) => match self.caps.medium { #[cfg(feature = "medium-ethernet")] @@ -937,66 +1189,145 @@ impl InterfaceInner { } } + /// Lookup the hardware address when the destination is broadcast + fn lookup_hardware_addr_broadcast( + &mut self, + dst_addr: &IpAddress, + ) -> Result, DispatchError> + { + debug_assert!(dst_addr.is_broadcast()); + let hardware_addr = match self.caps.medium { + #[cfg(feature = "medium-ethernet")] + Medium::Ethernet => HardwareAddress::Ethernet(EthernetAddress::BROADCAST), + #[cfg(feature = "medium-ieee802154")] + Medium::Ieee802154 => HardwareAddress::Ieee802154(Ieee802154Address::BROADCAST), + #[cfg(feature = "medium-ip")] + Medium::Ip => unreachable!(), + }; + + Ok(heapless::Vec::from_iter(core::iter::once(hardware_addr))) + } + + /// Lookup the hardware address when the destination is multicast + fn lookup_hardware_addr_multicast( + &mut self, + dst_addr: &IpAddress, + previous_hop: Option<&HardwareAddress>, + ) -> Result, DispatchError> + { + debug_assert!(dst_addr.is_multicast()); + + let b = dst_addr.as_bytes(); + let hardware_addresses = match *dst_addr { + #[cfg(feature = "proto-ipv4")] + IpAddress::Ipv4(_addr) => match self.caps.medium { + #[cfg(feature = "medium-ethernet")] + Medium::Ethernet => { + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ethernet( + EthernetAddress::from_bytes(&[0x01, 0x00, 0x5e, b[1] & 0x7F, b[2], b[3]]), + ))) + } + #[cfg(feature = "medium-ieee802154")] + Medium::Ieee802154 => unreachable!(), + #[cfg(feature = "medium-ip")] + Medium::Ip => unreachable!(), + }, + #[cfg(feature = "proto-ipv6")] + IpAddress::Ipv6(addr) => match self.caps.medium { + #[cfg(feature = "medium-ethernet")] + Medium::Ethernet => { + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ethernet( + EthernetAddress::from_bytes(&[0x33, 0x33, b[12], b[13], b[14], b[15]]), + ))) + } + #[cfg(feature = "medium-ieee802154")] + Medium::Ieee802154 => { + match addr { + // Handle well known multicast groups + Ipv6Address::LINK_LOCAL_ALL_RPL_NODES => { + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ieee802154( + Ieee802154Address::BROADCAST, + ))) + } + Ipv6Address::LINK_LOCAL_ALL_NODES | Ipv6Address::LINK_LOCAL_ALL_ROUTERS => { + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ieee802154( + Ieee802154Address::BROADCAST, + ))) + } + // Handle the joined multicast groups + _ => { + #[cfg(feature = "rpl-mop-3")] + // If in a DODAG, filter out the previous hop and compile a list of the remaining canditates + if let Some(dodag) = &self.rpl.dodag { + let parent = dodag.parent.iter().copied(); + let next_hops = dodag.relations.find_next_hop(addr); + let downwards = next_hops + .iter() + .flat_map(|hops| hops.iter()) + .map(|hop| hop.ip); + let hardware_addrs = parent + .chain(downwards) + .flat_map(|hop| { + match self.neighbor_cache.lookup(&hop.into(), self.now) { + NeighborAnswer::Found(haddr) => Some(haddr), + NeighborAnswer::NotFound => None, + NeighborAnswer::RateLimited => None, + } + }) + .filter(|haddr| Some(haddr) != previous_hop); + + heapless::Vec::from_iter(hardware_addrs) + } else { + // Not sure if this is correct + heapless::Vec::from_iter(core::iter::once( + HardwareAddress::Ieee802154(Ieee802154Address::BROADCAST), + )) + } + #[cfg(not(feature = "rpl-mop-3"))] + { + heapless::Vec::from_iter(core::iter::once( + HardwareAddress::Ieee802154(Ieee802154Address::BROADCAST), + )) + } + } + } + } + #[cfg(feature = "medium-ip")] + Medium::Ip => unreachable!(), + }, + }; + + Ok(hardware_addresses) + } + #[cfg(any(feature = "medium-ethernet", feature = "medium-ieee802154"))] fn lookup_hardware_addr( &mut self, tx_token: Tx, + previous_hop: Option<&HardwareAddress>, src_addr: &IpAddress, dst_addr: &IpAddress, + // previous_hop: Option, #[allow(unused)] fragmenter: &mut Fragmenter, - ) -> Result<(HardwareAddress, Tx), DispatchError> + ) -> Result< + ( + heapless::Vec, + Tx, + ), + DispatchError, + > where Tx: TxToken, { if self.is_broadcast(dst_addr) { - let hardware_addr = match self.caps.medium { - #[cfg(feature = "medium-ethernet")] - Medium::Ethernet => HardwareAddress::Ethernet(EthernetAddress::BROADCAST), - #[cfg(feature = "medium-ieee802154")] - Medium::Ieee802154 => HardwareAddress::Ieee802154(Ieee802154Address::BROADCAST), - #[cfg(feature = "medium-ip")] - Medium::Ip => unreachable!(), - }; - - return Ok((hardware_addr, tx_token)); + return Ok((self.lookup_hardware_addr_broadcast(dst_addr)?, tx_token)); } if dst_addr.is_multicast() { - let b = dst_addr.as_bytes(); - let hardware_addr = match *dst_addr { - #[cfg(feature = "proto-ipv4")] - IpAddress::Ipv4(_addr) => match self.caps.medium { - #[cfg(feature = "medium-ethernet")] - Medium::Ethernet => HardwareAddress::Ethernet(EthernetAddress::from_bytes(&[ - 0x01, - 0x00, - 0x5e, - b[1] & 0x7F, - b[2], - b[3], - ])), - #[cfg(feature = "medium-ieee802154")] - Medium::Ieee802154 => unreachable!(), - #[cfg(feature = "medium-ip")] - Medium::Ip => unreachable!(), - }, - #[cfg(feature = "proto-ipv6")] - IpAddress::Ipv6(_addr) => match self.caps.medium { - #[cfg(feature = "medium-ethernet")] - Medium::Ethernet => HardwareAddress::Ethernet(EthernetAddress::from_bytes(&[ - 0x33, 0x33, b[12], b[13], b[14], b[15], - ])), - #[cfg(feature = "medium-ieee802154")] - Medium::Ieee802154 => { - // Not sure if this is correct - HardwareAddress::Ieee802154(Ieee802154Address::BROADCAST) - } - #[cfg(feature = "medium-ip")] - Medium::Ip => unreachable!(), - }, - }; - - return Ok((hardware_addr, tx_token)); + return Ok(( + self.lookup_hardware_addr_multicast(dst_addr, previous_hop)?, + tx_token, + )); } let dst_addr = self @@ -1007,12 +1338,17 @@ impl InterfaceInner { let dst_addr = if let IpAddress::Ipv6(dst_addr) = dst_addr { #[cfg(any(feature = "rpl-mop-1", feature = "rpl-mop-2", feature = "rpl-mop3"))] if let Some(dodag) = &self.rpl.dodag { - if let Some(next_hop) = dodag.relations.find_next_hop(dst_addr) { - if next_hop == self.ipv6_addr().unwrap() { + if let Some(next_hop) = dodag + .relations + .find_next_hop(dst_addr) + .and_then(|hop| hop.first()) + // In unicast it is not possible to have multiple next_hops per destination + { + if next_hop.ip == self.ipv6_addr().unwrap() { dst_addr.into() } else { - net_trace!("next hop {}", next_hop); - next_hop.into() + net_trace!("next hops {:?}", next_hop); + next_hop.ip.into() } } else if let Some(parent) = dodag.parent { parent.into() @@ -1030,7 +1366,12 @@ impl InterfaceInner { }; match self.neighbor_cache.lookup(&dst_addr, self.now) { - NeighborAnswer::Found(hardware_addr) => return Ok((hardware_addr, tx_token)), + NeighborAnswer::Found(hardware_addr) => { + return Ok(( + heapless::Vec::from_iter(core::iter::once(hardware_addr)), + tx_token, + )) + } NeighborAnswer::RateLimited => { net_debug!("neighbor {} pending", dst_addr); return Err(DispatchError::NeighborPending); @@ -1072,7 +1413,10 @@ impl InterfaceInner { if let NeighborAnswer::Found(hardware_addr) = self.neighbor_cache.lookup(&parent.into(), self.now) { - return Ok((hardware_addr, tx_token)); + return Ok(( + heapless::Vec::from_iter(core::iter::once(hardware_addr)), + tx_token, + )); } } } @@ -1125,6 +1469,31 @@ impl InterfaceInner { self.neighbor_cache.flush() } + /// Convenience method for scheduling a multicast packet for later transmission + fn schedule_multicast_packet( + &self, + meta: PacketMeta, + packet: &PacketV6<'_>, + ll_addrs: heapless::Vec, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, + ) -> Result<(), DispatchError> { + let buffer = multicast_queue + .enqueue( + packet.payload().buffer_len(), + MulticastMetadata::new(meta, packet, ll_addrs), + ) + .map_err(|_err| DispatchError::Exhausted)?; + packet + .payload() + .emit(&(*packet.header()).into(), buffer, &self.caps); + + Ok(()) + } + + /// Transmit an IP packet or schedule it into multiple transmissions when + /// fragmentation is needed or retransmissions with multicast + /// + /// If the hardware address is already known, use this one, otherwise do a lookup fn dispatch_ip( &mut self, // NOTE(unused_mut): tx_token isn't always mutated, depending on @@ -1132,6 +1501,92 @@ impl InterfaceInner { #[allow(unused_mut)] mut tx_token: Tx, meta: PacketMeta, packet: Packet, + previous_hop: Option<&HardwareAddress>, + frag: &mut Fragmenter, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, + ) -> Result<(), DispatchError> { + let (hardware_addr, tx_token) = self.handle_hardware_addr_lookup( + &packet, + meta, + previous_hop, + frag, + multicast_queue, + tx_token, + )?; + + self.transmit_ip(tx_token, meta, packet, hardware_addr, frag) + } + + fn handle_hardware_addr_lookup( + &mut self, + packet: &Packet, + meta: PacketMeta, + previous_hop: Option<&HardwareAddress>, + frag: &mut Fragmenter, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, + tx_token: Tx, + ) -> Result<(HardwareAddress, Tx), DispatchError> + where + Tx: TxToken, + { + let (mut addr, tx_token) = match self.caps.medium { + medium + if matches_cfg!([feature = "medium-ethernet"] medium, Medium::Ethernet) + || matches_cfg!( + [feature = "medium-ieee802154"] + medium, + Medium::Ieee802154 + ) => + { + self.lookup_hardware_addr( + tx_token, + previous_hop, + &packet.ip_repr().src_addr(), + &packet.ip_repr().dst_addr(), + frag, + )? + } + #[cfg(feature = "medium-ethernet")] + _ => ( + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ethernet( + EthernetAddress([0; 6]), + ))), + tx_token, + ), + #[cfg(all(not(feature = "medium-ethernet"), feature = "medium-ieee802154"))] + _ => ( + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ieee802154( + Ieee802154Address::BROADCAST, + ))), + tx_token, + ), + }; + let first_addr = addr.pop().ok_or(DispatchError::NoRoute)?; + + if !addr.is_empty() { + match packet { + #[cfg(feature = "proto-ipv4")] + Packet::Ipv4(_) => unimplemented!(), + #[cfg(feature = "proto-ipv6")] + Packet::Ipv6(packet) => { + if !addr.is_empty() { + self.schedule_multicast_packet(meta, packet, addr, multicast_queue)?; + } + } + } + } + + Ok((first_addr, tx_token)) + } + + fn transmit_ip( + &mut self, + // NOTE(unused_mut): tx_token isn't always mutated, depending on + // the feature set that is used. + #[allow(unused_mut)] mut tx_token: Tx, + meta: PacketMeta, + packet: Packet, + hardware_addr: HardwareAddress, frag: &mut Fragmenter, ) -> Result<(), DispatchError> { let mut ip_repr = packet.ip_repr(); @@ -1141,21 +1596,21 @@ impl InterfaceInner { #[cfg(feature = "medium-ieee802154")] if matches!(self.caps.medium, Medium::Ieee802154) { - let (addr, tx_token) = self.lookup_hardware_addr( - tx_token, - &ip_repr.src_addr(), - &ip_repr.dst_addr(), - frag, - )?; - let addr = addr.ieee802154_or_panic(); - + // Schedule the remaining multicast transmissions let packet = match packet { Packet::Ipv6(packet) => packet, #[allow(unreachable_patterns)] _ => unreachable!(), }; - self.dispatch_ieee802154(addr, tx_token, meta, packet, frag); + self.dispatch_ieee802154( + hardware_addr.ieee802154_or_panic(), + tx_token, + meta, + packet, + frag, + ); + return Ok(()); } @@ -1177,18 +1632,8 @@ impl InterfaceInner { // If the medium is Ethernet, then we need to retrieve the destination hardware address. #[cfg(feature = "medium-ethernet")] - let (dst_hardware_addr, mut tx_token) = match self.caps.medium { - Medium::Ethernet => { - match self.lookup_hardware_addr( - tx_token, - &ip_repr.src_addr(), - &ip_repr.dst_addr(), - frag, - )? { - (HardwareAddress::Ethernet(addr), tx_token) => (addr, tx_token), - (_, _) => unreachable!(), - } - } + let (hardware_addr, mut tx_token) = match self.caps.medium { + Medium::Ethernet => (hardware_addr.ethernet_or_panic(), tx_token), _ => (EthernetAddress([0; 6]), tx_token), }; @@ -1199,7 +1644,7 @@ impl InterfaceInner { let src_addr = self.hardware_addr.ethernet_or_panic(); frame.set_src_addr(src_addr); - frame.set_dst_addr(dst_hardware_addr); + frame.set_dst_addr(hardware_addr); match repr.version() { #[cfg(feature = "proto-ipv4")] @@ -1216,7 +1661,7 @@ impl InterfaceInner { repr.emit(&mut tx_buffer, &self.caps.checksum); let payload = &mut tx_buffer[repr.header_len()..]; - packet.emit_payload(repr, payload, &caps) + packet.emit_payload(payload, &caps) }; let total_ip_len = ip_repr.buffer_len(); @@ -1246,7 +1691,7 @@ impl InterfaceInner { #[cfg(feature = "medium-ethernet")] { - frag.ipv4.dst_hardware_addr = dst_hardware_addr; + frag.ipv4.dst_hardware_addr = hardware_addr; } // Save the total packet len (without the Ethernet header, but with the first @@ -1342,4 +1787,34 @@ enum DispatchError { /// the neighbor for it yet. Discovery has been initiated, dispatch /// should be retried later. NeighborPending, + /// When we cannot immediatly dispatch a packet and need to wait for the + /// underlying physical layer to process its current tasks, a packet may + /// need to be stored somewhere. If this storage buffer is full, we cannot + /// schedule it for later transmission. + Exhausted, +} + +/// Error type for `join_multicast_group`, `leave_multicast_group`. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum MulticastError { + /// The hardware device transmit buffer is full. Try again later. + Exhausted, + /// The table of joined multicast groups is already full. + GroupTableFull, + /// The addresstype is unsupported + Unaddressable, +} + +impl core::fmt::Display for MulticastError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + match self { + MulticastError::Exhausted => write!(f, "Exhausted"), + MulticastError::GroupTableFull => write!(f, "GroupTableFull"), + MulticastError::Unaddressable => write!(f, "Unaddressable"), + } + } } + +#[cfg(feature = "std")] +impl std::error::Error for MulticastError {} diff --git a/src/iface/interface/multicast.rs b/src/iface/interface/multicast.rs new file mode 100644 index 000000000..dbf3e1d34 --- /dev/null +++ b/src/iface/interface/multicast.rs @@ -0,0 +1,80 @@ +use crate::{phy::Device, time::Instant}; + +use super::{Interface, IpPayload, Packet}; + +impl Interface<'_> { + /// Poll the multicast queue and dispatch the next multicast packet if available + pub(super) fn poll_multicast(&mut self, device: &mut D) -> bool + where + D: Device + ?Sized, + { + // Dequeue empty multicast packets + self.flush_multicast_queue(); + + // If we did not find any still active multicast packets, we can stop here + let Ok((meta, payload)) = self.multicast_queue.peek_mut() else { + return true; + }; + // If this panics, something went horibly wrong while checking for a valid multicast packet + let next_ll_addr = meta.pop_next_ll_addr().unwrap(); + + // Rehydrate the multicast packet from the queue + let Ok(packet) = IpPayload::parse_unchecked( + payload, + meta.payload_type(), + meta.header(), + &self.inner.checksum_caps(), + ) + .inspect_err(|_err| net_trace!("Parsing of queued packet has failed, dropping")) else { + return false; + }; + + // Try to acquire a tx_token + let Some(tx_token) = device.transmit(self.inner.now) else { + return false; // Device is busy, retry later + }; + + let metadata = meta.meta(); + let header = *meta.header(); + let _ = self + .inner + .transmit_ip( + tx_token, + metadata, + Packet::new_ipv6(header, packet), + next_ll_addr, + &mut self.fragmenter, + ) + .inspect_err(|err| { + net_trace!( + "Failed to transmit scheduled multicast transmission with reason {:?}", + err + ) + }); + + true + } + + /// Request to poll again asap if there are still packets to be transmitted in the queue + pub(super) fn poll_at_multicast(&mut self) -> Option { + if !self.multicast_queue.is_empty() { + Some(self.inner.now) + } else { + None + } + } + + /// Remove empty multicast packets from the multicast queue + fn flush_multicast_queue(&mut self) { + // We may get an error if the queue is empty, but then flushing was succesful + let _ = self.multicast_queue.dequeue_with( + |meta, _packet| { + if meta.finished() { + Ok(()) + } else { + Err(123) + } + }, + ); + } +} diff --git a/src/iface/interface/rpl.rs b/src/iface/interface/rpl.rs index be5de51c6..f250cc64c 100644 --- a/src/iface/interface/rpl.rs +++ b/src/iface/interface/rpl.rs @@ -10,7 +10,7 @@ use crate::wire::{Ipv6HopByHopRepr, Ipv6OptionRepr, RplDao, RplDaoAck}; use crate::iface::rpl::*; use heapless::Vec; -impl Interface { +impl Interface<'_> { pub(super) fn poll_rpl(&mut self, device: &mut D) -> bool where D: Device + ?Sized, @@ -19,7 +19,9 @@ impl Interface { ctx: &mut InterfaceInner, device: &mut D, packet: Packet, + previous_hop: Option<&HardwareAddress>, fragmenter: &mut Fragmenter, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, ) -> bool where D: Device + ?Sized, @@ -28,7 +30,14 @@ impl Interface { return false; }; - match ctx.dispatch_ip(tx_token, PacketMeta::default(), packet, fragmenter) { + match ctx.dispatch_ip( + tx_token, + PacketMeta::default(), + packet, + previous_hop, + fragmenter, + multicast_queue, + ) { Ok(()) => true, Err(e) => { net_debug!("failed to send packet: {:?}", e); @@ -40,6 +49,7 @@ impl Interface { let Interface { inner: ctx, fragmenter, + multicast_queue, .. } = self; @@ -67,7 +77,9 @@ impl Interface { ctx, device, Packet::new_ipv6(ipv6_repr, IpPayload::Icmpv6(icmp_rpl)), + None, fragmenter, + multicast_queue, ); } @@ -85,9 +97,25 @@ impl Interface { // If we did not hear from our parent for some time, // remove our parent. Ideally, we should check if we could find another parent. if parent.last_heard < ctx.now - dodag.dio_timer.max_expiration() * 2 { - dodag.remove_parent( + // dodag.remove_parent( + // ctx.rpl.mode_of_operation, + // our_addr, + // &ctx.rpl.of, + // ctx.now, + // &mut ctx.rand, + // ); + dodag.remove_parent(); + dodag.find_new_parent( ctx.rpl.mode_of_operation, - our_addr, + &[our_addr], // FIXME: what about multiple unicast targets + { + #[cfg(feature = "rpl-mop-3")] + { + &ctx.rpl_targets_multicast + } + #[cfg(not(feature = "rpl-mop-3"))] + &[] + }, &ctx.rpl.of, ctx.now, &mut ctx.rand, @@ -114,13 +142,31 @@ impl Interface { ctx, device, Packet::new_ipv6(ipv6_repr, IpPayload::Icmpv6(icmp)), + None, fragmenter, + multicast_queue, ); } #[cfg(any(feature = "rpl-mop-1", feature = "rpl-mop-2", feature = "rpl-mop-3"))] if dodag.dao_expiration <= ctx.now { - dodag.schedule_dao(ctx.rpl.mode_of_operation, our_addr, parent_address, ctx.now); + let _ = dodag + .schedule_dao( + ctx.rpl.mode_of_operation, + &[our_addr], + { + #[cfg(feature = "rpl-mop-3")] + { + &ctx.rpl_targets_multicast[..] + } + #[cfg(not(feature = "rpl-mop-3"))] + &[] + }, + parent_address, + ctx.now, + false, + ) + .inspect_err(|err| net_trace!("Could not transmit DAO with reason: {}", err)); } } @@ -168,7 +214,14 @@ impl Interface { { p.header_mut().dst_addr = new_dst_addr; p.add_routing(source_route); - return transmit(ctx, device, Packet::Ipv6(p), fragmenter); + return transmit( + ctx, + device, + Packet::Ipv6(p), + None, + fragmenter, + multicast_queue, + ); } }; @@ -183,7 +236,14 @@ impl Interface { })) .unwrap(); p.add_hop_by_hop(Ipv6HopByHopRepr { options }); - return transmit(ctx, device, Packet::Ipv6(p), fragmenter); + return transmit( + ctx, + device, + Packet::Ipv6(p), + None, + fragmenter, + multicast_queue, + ); } // Transmit any DAO that are queued. @@ -200,7 +260,20 @@ impl Interface { dodag.daos.iter_mut().for_each(|dao| { if !dao.needs_sending { let Some(next_tx) = dao.next_tx else { - dao.next_tx = Some(ctx.now + dodag.dio_timer.min_expiration()); + let next_tx = dodag.dio_timer.min_expiration(); + // Add a random noise offset between 0ms up to 128ms + let noise = Duration::from_millis((ctx.rand.rand_u16() % 0x4) as u64) * 32; + // We always want the multicast DAO to come later than + // similary scheduled unicast DAOs, so we introduce here + // a small offset such that the unicast DAO always + // arrives first if scheduled at the same time. + let multicast_bias = if dao.has_multicast_target() { + Duration::from_millis(128) + } else { + Duration::from_micros(0) + }; + + dao.next_tx = Some(ctx.now + next_tx + multicast_bias + noise); return; }; @@ -244,7 +317,14 @@ impl Interface { p.add_hop_by_hop(hbh); net_trace!("transmitting DAO"); - return transmit(ctx, device, Packet::Ipv6(p), fragmenter); + return transmit( + ctx, + device, + Packet::Ipv6(p), + None, + fragmenter, + multicast_queue, + ); } } @@ -271,7 +351,9 @@ impl Interface { ctx, device, Packet::new_ipv6(ipv6_repr, IpPayload::Icmpv6(icmp)), + None, fragmenter, + multicast_queue, ); } @@ -361,9 +443,7 @@ impl InterfaceInner { dis: RplDis<'payload>, ) -> Option> { // We cannot handle a DIS when we are not part of any DODAG. - let Some(dodag) = &mut self.rpl.dodag else { - return None; - }; + let dodag = self.rpl.dodag.as_mut()?; if let Some(frame) = self.current_frame.as_ref() { self.neighbor_cache.fill_with_expiration( @@ -635,9 +715,25 @@ impl InterfaceInner { dodag.parent_set.clear(); // We do NOT send a No-path DAO. - let _ = dodag.remove_parent( + // let _ = dodag.remove_parent( + // self.rpl.mode_of_operation, + // // our_addr, + // &self.rpl.of, + // self.now, + // &mut self.rand, + // ); + let _ = dodag.remove_parent(); + dodag.find_new_parent( self.rpl.mode_of_operation, - our_addr, + &[our_addr], // FIXME + { + #[cfg(feature = "rpl-mop-3")] + { + &self.rpl_targets_multicast[..] + } + #[cfg(not(feature = "rpl-mop-3"))] + &[][..] + }, &self.rpl.of, self.now, &mut self.rand, @@ -669,9 +765,25 @@ impl InterfaceInner { net_trace!("parent leaving, removing parent"); // Don't need to send a no-path DOA when parent is leaving. - let _ = dodag.remove_parent( + // let _ = dodag.remove_parent( + // self.rpl.mode_of_operation, + // // our_addr, + // &self.rpl.of, + // self.now, + // &mut self.rand, + // ); + let _ = dodag.remove_parent(); + dodag.find_new_parent( self.rpl.mode_of_operation, - our_addr, + &[our_addr], // FIXME + { + #[cfg(feature = "rpl-mop-3")] + { + &self.rpl_targets_multicast[..] + } + #[cfg(not(feature = "rpl-mop-3"))] + &[] + }, &self.rpl.of, self.now, &mut self.rand, @@ -756,7 +868,15 @@ impl InterfaceInner { // Select and schedule DAO to new parent. dodag.find_new_parent( self.rpl.mode_of_operation, - our_addr, + &[our_addr], + { + #[cfg(feature = "rpl-mop-3")] + { + &self.rpl_targets_multicast[..] + } + #[cfg(not(feature = "rpl-mop-3"))] + &[] + }, &self.rpl.of, self.now, &mut self.rand, @@ -826,7 +946,9 @@ impl InterfaceInner { // DAO. for target in &targets { net_trace!("remove {} relation (NO-PATH)", target); - dodag.relations.remove_relation(*target); + dodag + .relations + .remove_hop_from_relation(*target, ip_repr.src_addr); } } else { let next_hop = match self.rpl.mode_of_operation { @@ -844,14 +966,22 @@ impl InterfaceInner { for target in &targets { net_trace!("adding {} => {} relation", target, next_hop); - dodag.relations.add_relation( - *target, - next_hop, - self.now, - crate::time::Duration::from_secs( - transit.path_lifetime as u64 * dodag.lifetime_unit as u64, - ), - ); + let _ = dodag + .relations + .add_relation( + *target, + &[next_hop], + self.now, + crate::time::Duration::from_secs( + transit.path_lifetime as u64 * dodag.lifetime_unit as u64, + ), + ) + .inspect_err(|err| { + net_trace!( + "Could not add a relation to the dodag with reason {}", + err + ) + }); } targets.clear(); @@ -960,7 +1090,12 @@ impl InterfaceInner { // is moving to a new Version number. However, the standard does not define when a new // Version number should be used. Therefore, we immediately drop the packet when a Rank // error is detected, or when the bit was already set. - let rank = self.rpl.dodag.as_ref().unwrap().rank; + let rank = self + .rpl + .dodag + .as_ref() + .map(|dodag| dodag.rank) + .unwrap_or(Rank::new(u16::MAX, 1)); if hbh.rank_error || (hbh.down && rank <= sender_rank) || (!hbh.down && rank >= sender_rank) { net_trace!("RPL HBH: inconsistency detected, resetting trickle timer, dropping packet"); @@ -1090,18 +1225,19 @@ pub(crate) fn create_source_routing_header( loop { let next_hop = dodag.relations.find_next_hop(next); - if let Some(next_hop) = next_hop { + if let Some(next_hop) = next_hop.and_then(|hop| hop.first()) { + // We only support unicast in SRH net_trace!(" via {}", next_hop); - if next_hop == our_addr { + if next_hop.ip == our_addr { break; } - if route.push(next_hop).is_err() { + if route.push(next_hop.ip).is_err() { net_trace!("could not add hop to route buffer"); return None; } - next = next_hop; + next = next_hop.ip; } else { net_trace!("no route found, last next hop is {}", next); return None; diff --git a/src/iface/interface/sixlowpan.rs b/src/iface/interface/sixlowpan.rs index 09cd2f228..6a75610d1 100644 --- a/src/iface/interface/sixlowpan.rs +++ b/src/iface/interface/sixlowpan.rs @@ -5,7 +5,7 @@ use crate::wire::Result; // TODO: lower. Should be (6lowpan mtu) - (min 6lowpan header size) + (max ipv6 header size) pub(crate) const MAX_DECOMPRESSED_LEN: usize = 1500; -impl Interface { +impl Interface<'_> { /// Process fragments that still need to be sent for 6LoWPAN packets. /// /// This function returns a boolean value indicating whether any packets were @@ -65,6 +65,7 @@ impl InterfaceInner { ieee802154_repr: &Ieee802154Repr, payload: &'payload [u8], f: &'output mut FragmentsBuffer, + multicast_queue: &mut PacketBuffer<'_, MulticastMetadata>, ) -> Option> { let payload = match check!(SixlowpanPacket::dispatch(payload)) { #[cfg(not(feature = "proto-sixlowpan-fragmentation"))] @@ -100,7 +101,13 @@ impl InterfaceInner { }; let packet = check!(Ipv6Packet::new_checked(payload)); - self.process_ipv6(sockets, meta, &packet) + self.process_ipv6( + sockets, + meta, + &packet, + ieee802154_repr.src_addr.map(|addr| addr.into()).as_ref(), + multicast_queue, + ) } #[cfg(feature = "proto-sixlowpan-fragmentation")] @@ -771,7 +778,10 @@ impl<'p> PacketSixlowpan<'p> { ); if let Some(checksum) = checksum { - udp_packet.set_checksum(checksum); + // FIXME: The extra if is probably the result of the existence of a bug in reading the checksum from a packet. This happened while forwarding a UDP packet through multicast where the forwarded checksum suddenly got 0. + if checksum != 0 { + udp_packet.set_checksum(checksum); + } } } #[cfg(feature = "proto-rpl")] diff --git a/src/iface/interface/tests/ipv4.rs b/src/iface/interface/tests/ipv4.rs index 316a9d58a..eb465851d 100644 --- a/src/iface/interface/tests/ipv4.rs +++ b/src/iface/interface/tests/ipv4.rs @@ -41,6 +41,7 @@ fn test_any_ip_accept_arp(#[case] medium: Medium) { PacketMeta::default(), ETHERNET_FRAME_ARP(buffer.as_mut()), &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ) .is_none()); @@ -54,6 +55,7 @@ fn test_any_ip_accept_arp(#[case] medium: Medium) { PacketMeta::default(), ETHERNET_FRAME_ARP(buffer.as_mut()), &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ) .is_some()); } @@ -438,7 +440,8 @@ fn test_handle_valid_arp_request(#[case] medium: Medium) { &mut sockets, PacketMeta::default(), frame.into_inner(), - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), Some(EthernetPacket::Arp(ArpRepr::EthernetIpv4 { operation: ArpOperation::Reply, @@ -453,11 +456,15 @@ fn test_handle_valid_arp_request(#[case] medium: Medium) { assert_eq!( iface.inner.lookup_hardware_addr( MockTxToken, + None, &IpAddress::Ipv4(local_ip_addr), &IpAddress::Ipv4(remote_ip_addr), &mut iface.fragmenter, ), - Ok((HardwareAddress::Ethernet(remote_hw_addr), MockTxToken)) + Ok(( + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ethernet(remote_hw_addr))), + MockTxToken + )) ); } @@ -493,7 +500,8 @@ fn test_handle_other_arp_request(#[case] medium: Medium) { &mut sockets, PacketMeta::default(), frame.into_inner(), - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), None ); @@ -502,6 +510,7 @@ fn test_handle_other_arp_request(#[case] medium: Medium) { assert_eq!( iface.inner.lookup_hardware_addr( MockTxToken, + None, &IpAddress::Ipv4(Ipv4Address([0x7f, 0x00, 0x00, 0x01])), &IpAddress::Ipv4(remote_ip_addr), &mut iface.fragmenter, @@ -546,7 +555,8 @@ fn test_arp_flush_after_update_ip(#[case] medium: Medium) { &mut sockets, PacketMeta::default(), frame.into_inner(), - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), Some(EthernetPacket::Arp(ArpRepr::EthernetIpv4 { operation: ArpOperation::Reply, @@ -561,11 +571,15 @@ fn test_arp_flush_after_update_ip(#[case] medium: Medium) { assert_eq!( iface.inner.lookup_hardware_addr( MockTxToken, + None, &IpAddress::Ipv4(local_ip_addr), &IpAddress::Ipv4(remote_ip_addr), &mut iface.fragmenter, ), - Ok((HardwareAddress::Ethernet(remote_hw_addr), MockTxToken)) + Ok(( + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ethernet(remote_hw_addr))), + MockTxToken + )) ); // Update IP addrs to trigger ARP cache flush diff --git a/src/iface/interface/tests/ipv6.rs b/src/iface/interface/tests/ipv6.rs index 519595b58..4a37c2bcb 100644 --- a/src/iface/interface/tests/ipv6.rs +++ b/src/iface/interface/tests/ipv6.rs @@ -51,7 +51,9 @@ fn multicast_source_address(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -99,7 +101,9 @@ fn hop_by_hop_skip_with_icmp(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -134,7 +138,9 @@ fn hop_by_hop_discard_with_icmp(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -188,7 +194,9 @@ fn hop_by_hop_discard_param_problem(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -245,7 +253,9 @@ fn hop_by_hop_discard_with_multicast(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -304,7 +314,9 @@ fn imcp_empty_echo_request(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -364,7 +376,9 @@ fn icmp_echo_request(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -411,7 +425,9 @@ fn icmp_echo_reply_as_input(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -554,7 +570,9 @@ fn ndsic_neighbor_advertisement_ethernet(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -610,7 +628,9 @@ fn ndsic_neighbor_advertisement_ethernet_multicast_addr(#[case] medium: Medium) iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -662,7 +682,9 @@ fn ndsic_neighbor_advertisement_ieee802154(#[case] medium: Medium) { iface.inner.process_ipv6( &mut sockets, PacketMeta::default(), - &Ipv6Packet::new_checked(&data[..]).unwrap() + &Ipv6Packet::new_checked(&data[..]).unwrap(), + None, + &mut PacketBuffer::new(vec![], vec![]), ), response ); @@ -735,7 +757,8 @@ fn test_handle_valid_ndisc_request(#[case] medium: Medium) { &mut sockets, PacketMeta::default(), frame.into_inner(), - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), Some(EthernetPacket::Ip(Packet::new_ipv6( ipv6_expected, @@ -747,11 +770,15 @@ fn test_handle_valid_ndisc_request(#[case] medium: Medium) { assert_eq!( iface.inner.lookup_hardware_addr( MockTxToken, + None, &IpAddress::Ipv6(local_ip_addr), &IpAddress::Ipv6(remote_ip_addr), &mut iface.fragmenter, ), - Ok((HardwareAddress::Ethernet(remote_hw_addr), MockTxToken)) + Ok(( + heapless::Vec::from_iter(core::iter::once(HardwareAddress::Ethernet(remote_hw_addr))), + MockTxToken + )) ); } diff --git a/src/iface/interface/tests/mod.rs b/src/iface/interface/tests/mod.rs index ffe07f277..7a177a595 100644 --- a/src/iface/interface/tests/mod.rs +++ b/src/iface/interface/tests/mod.rs @@ -65,7 +65,15 @@ impl TxToken for MockTxToken { fn test_new_panic() { let mut device = Loopback::new(Medium::Ethernet); let config = Config::new(HardwareAddress::Ip); - Interface::new(config, &mut device, Instant::ZERO); + let mut meta = []; + let mut payload = []; + Interface::new( + config, + &mut device, + &mut meta[..], + &mut payload[..], + Instant::ZERO, + ); } #[rstest] diff --git a/src/iface/interface/tests/rpl.rs b/src/iface/interface/tests/rpl.rs index e3c2f3be3..7540472ae 100644 --- a/src/iface/interface/tests/rpl.rs +++ b/src/iface/interface/tests/rpl.rs @@ -11,6 +11,8 @@ use crate::iface::RplModeOfOperation; #[cfg(feature = "rpl-mop-1")] #[case::mop2(RplModeOfOperation::StoringMode)] #[cfg(feature = "rpl-mop-2")] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast)] +#[cfg(feature = "rpl-mop-3")] fn unicast_dis(#[case] mop: RplModeOfOperation) { use crate::iface::rpl::{Dodag, Rank, RplInstanceId}; @@ -105,6 +107,8 @@ fn unicast_dis(#[case] mop: RplModeOfOperation) { #[cfg(feature = "rpl-mop-1")] #[case::mop2(RplModeOfOperation::StoringMode)] #[cfg(feature = "rpl-mop-2")] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast)] +#[cfg(feature = "rpl-mop-3")] fn dio_without_configuration(#[case] mop: RplModeOfOperation) { use crate::iface::rpl::{Rank, RplInstanceId}; @@ -160,6 +164,8 @@ fn dio_without_configuration(#[case] mop: RplModeOfOperation) { #[cfg(feature = "rpl-mop-1")] #[case::mop2(RplModeOfOperation::StoringMode)] #[cfg(feature = "rpl-mop-2")] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast)] +#[cfg(feature = "rpl-mop-3")] fn dio_with_increased_version_number(#[case] mop: RplModeOfOperation) { use crate::iface::rpl::{Dodag, ObjectiveFunction0, Parent, ParentSet, Rank, RplInstanceId}; @@ -368,3 +374,178 @@ fn dio_with_increased_version_number(#[case] mop: RplModeOfOperation) { // know they have to leave the network assert_eq!(response, expected,); } + +#[rstest] +fn packet_forwarding_with_multicast() { + use crate::iface::rpl::{Dodag, ObjectiveFunction0, Parent, ParentSet, Rank}; + + const MULTICAST_GROUP: Ipv6Address = Ipv6Address::new(0xff02, 0, 0, 0, 0, 0, 0, 3); + const MULTICAST_HOP: Ipv6Address = Ipv6Address::new(0xfd00, 0, 0, 0, 0, 0, 0, 2); + const MULTICAST_HOP_LL: HardwareAddress = + HardwareAddress::Ieee802154(Ieee802154Address::Extended([0, 0, 0, 0, 0, 0, 0, 2])); + + let (mut iface, _, _) = setup(Medium::Ieee802154); + + let ll_addr = Ieee802154Address::Extended([0, 0, 0, 0, 0, 0, 0, 1]); + let addr = ll_addr.as_link_local_address().unwrap(); + + let now = Instant::now(); + let mut set = ParentSet::default(); + let _ = set.add(Parent::new( + addr, + Rank::ROOT, + Default::default(), + RplSequenceCounter::from(240), + Default::default(), + now, + )); + + // Setting a dodag configuration with parent + iface.inner.rpl.mode_of_operation = RplModeOfOperation::StoringModeWithMulticast; + iface.inner.rpl.of = ObjectiveFunction0::default(); + iface.inner.rpl.is_root = false; + iface.inner.rpl.dodag = Some(Dodag { + instance_id: RplInstanceId::Local(30), + id: Default::default(), + version_number: Default::default(), + preference: 0, + rank: Rank::new(1024, 16), + dio_timer: Default::default(), + dao_expiration: Instant::now(), + dao_seq_number: Default::default(), + dao_acks: Default::default(), + daos: Default::default(), + parent: Some(addr), + without_parent: Default::default(), + authentication_enabled: Default::default(), + path_control_size: Default::default(), + dtsn: Default::default(), + dtsn_incremented_at: Instant::now(), + default_lifetime: Default::default(), + lifetime_unit: Default::default(), + grounded: false, + parent_set: set, + relations: Default::default(), + }); + iface + .inner + .neighbor_cache + .fill(addr.into(), ll_addr.into(), Instant::from_secs(10 * 60)); + iface.inner.neighbor_cache.fill( + MULTICAST_HOP.into(), + MULTICAST_HOP_LL, + Instant::from_secs(10 * 60), + ); + + let _response = iface.inner.process_rpl_dao( + Ipv6Repr { + src_addr: MULTICAST_HOP, + dst_addr: Ipv6Address::new(0xfd00, 0, 0, 0, 0, 0, 0, 1), + next_header: IpProtocol::Icmpv6, + payload_len: 0, // does not matter + hop_limit: 0xff, // does not matter + }, + RplDao { + rpl_instance_id: RplInstanceId::Local(30), + expect_ack: false, + sequence: RplSequenceCounter::new(42), + dodag_id: Default::default(), + options: heapless::Vec::from_iter([ + RplOptionRepr::RplTarget(RplTarget { + prefix_length: 64, + prefix: heapless::Vec::from_slice(MULTICAST_GROUP.as_bytes()).unwrap(), + }), + RplOptionRepr::TransitInformation(RplTransitInformation { + external: false, + path_control: 0, + path_sequence: 0, + path_lifetime: 0xff, + parent_address: Some(Ipv6Address::new(0xfd00, 0, 0, 0, 0, 0, 0, 1)), + }), + ]), + }, + ); + let _response = iface.inner.process_rpl_dao( + Ipv6Repr { + src_addr: MULTICAST_HOP, + dst_addr: Ipv6Address::new(0xfd00, 0, 0, 0, 0, 0, 0, 1), + next_header: IpProtocol::Icmpv6, + payload_len: 0, // does not matter + hop_limit: 0xff, // does not matter + }, + RplDao { + rpl_instance_id: RplInstanceId::Local(30), + expect_ack: false, + sequence: RplSequenceCounter::new(42), + dodag_id: Default::default(), + options: heapless::Vec::from_iter([ + RplOptionRepr::RplTarget(RplTarget { + prefix_length: 64, + prefix: heapless::Vec::from_slice( + Ipv6Address::new(0xfd00, 0, 0, 0, 0, 0, 0, 123).as_bytes(), // Just some other random child + ) + .unwrap(), + }), + RplOptionRepr::TransitInformation(RplTransitInformation { + external: false, + path_control: 0, + path_sequence: 0, + path_lifetime: 0xff, + parent_address: Some(Ipv6Address::new(0xfd00, 0, 0, 0, 0, 0, 0, 1)), + }), + ]), + }, + ); + + let dodag = iface.inner.rpl.dodag.as_ref().unwrap(); + assert!( + dodag + .relations + .iter() + .any(|rel| rel.is_multicast() + && rel.next_hop().iter().any(|hop| hop.ip == MULTICAST_HOP)), + "There should now be a relation with a multicast address added" + ); + + // Lookup haddrs if originating from this node + let haddrs = iface + .inner + .lookup_hardware_addr_multicast(&MULTICAST_GROUP.into(), None) + .unwrap(); + let expected_haddrs: heapless::Vec<_, { IFACE_MAX_MULTICAST_DUPLICATION_COUNT }> = + heapless::Vec::from_slice(&[ll_addr.into(), MULTICAST_HOP_LL]).unwrap(); + assert_eq!( + haddrs, expected_haddrs, + "If originating from this mote, the multicast packet should be forwarded up and down" + ); + + // Lookup haddrs if originating from the parent + let haddrs = iface + .inner + .lookup_hardware_addr_multicast(&MULTICAST_GROUP.into(), Some(&ll_addr.into())) + .unwrap(); + let expected_haddrs: heapless::Vec<_, { IFACE_MAX_MULTICAST_DUPLICATION_COUNT }> = + heapless::Vec::from_slice(&[MULTICAST_HOP_LL]).unwrap(); + assert_eq!( + haddrs, expected_haddrs, + "If originating from the parent, the multicast packet should only forward the packet down" + ); + + // Lookup haddrs if originating from one of the children + let haddrs = iface + .inner + .lookup_hardware_addr_multicast(&MULTICAST_GROUP.into(), Some(&MULTICAST_HOP_LL)) + .unwrap(); + let expected_haddrs: heapless::Vec<_, { IFACE_MAX_MULTICAST_DUPLICATION_COUNT }> = + heapless::Vec::from_slice(&[ll_addr.into()]).unwrap(); + assert_eq!(haddrs, expected_haddrs, "If originating from one of the children, the multicast packet should be forwarded up and to the other interested children"); + + // Lookup haddrs of all local rpl motes, coming from this mote + let haddrs = iface + .inner + .lookup_hardware_addr_multicast(&Ipv6Address::LINK_LOCAL_ALL_RPL_NODES.into(), None) + .unwrap(); + let expected_haddrs: heapless::Vec<_, { IFACE_MAX_MULTICAST_DUPLICATION_COUNT }> = + heapless::Vec::from_slice(&[Ieee802154Address::BROADCAST.into()]).unwrap(); + assert_eq!(haddrs, expected_haddrs); +} diff --git a/src/iface/interface/tests/sixlowpan.rs b/src/iface/interface/tests/sixlowpan.rs index 4dd74d462..dbfd3b925 100644 --- a/src/iface/interface/tests/sixlowpan.rs +++ b/src/iface/interface/tests/sixlowpan.rs @@ -18,7 +18,8 @@ fn ieee802154_wrong_pan_id(#[case] medium: Medium) { &mut sockets, PacketMeta::default(), &data[..], - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), response, ); @@ -73,7 +74,8 @@ fn icmp_echo_request(#[case] medium: Medium) { &mut sockets, PacketMeta::default(), &data[..], - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), response, ); @@ -176,7 +178,8 @@ fn test_echo_request_sixlowpan_128_bytes() { PacketMeta::default(), &ieee802154_repr, &request_first_part_packet.into_inner()[..], - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), None ); @@ -204,6 +207,7 @@ fn test_echo_request_sixlowpan_128_bytes() { &ieee802154_repr, &request_second_part, &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ) .unwrap() { @@ -346,7 +350,8 @@ fn test_sixlowpan_udp_with_fragmentation() { PacketMeta::default(), &ieee802154_repr, udp_first_part, - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), None ); @@ -366,7 +371,8 @@ fn test_sixlowpan_udp_with_fragmentation() { PacketMeta::default(), &ieee802154_repr, udp_second_part, - &mut iface.fragments + &mut iface.fragments, + &mut PacketBuffer::new(vec![], vec![]), ), None ); diff --git a/src/iface/mod.rs b/src/iface/mod.rs index 85cd6c918..5fb0170fb 100644 --- a/src/iface/mod.rs +++ b/src/iface/mod.rs @@ -6,6 +6,7 @@ provides lookup and caching of hardware addresses, and handles management packet mod fragmentation; mod interface; +mod multicast; #[cfg(any(feature = "medium-ethernet", feature = "medium-ieee802154"))] mod neighbor; mod route; @@ -31,3 +32,5 @@ pub use self::rpl::{ #[cfg(feature = "proto-rpl")] use self::rpl::Rpl; + +pub use multicast::MulticastMetadata; diff --git a/src/iface/multicast.rs b/src/iface/multicast.rs new file mode 100644 index 000000000..f211f5dcf --- /dev/null +++ b/src/iface/multicast.rs @@ -0,0 +1,50 @@ +use crate::{ + config::IFACE_MAX_MULTICAST_DUPLICATION_COUNT, + phy::PacketMeta, + wire::{HardwareAddress, Ipv6Repr}, +}; + +use super::packet::{IpPayloadType, PacketV6}; + +#[derive(Debug, Clone)] +pub struct MulticastMetadata { + ll_send_to: heapless::Vec, + packet_metadata: PacketMeta, + header: Ipv6Repr, + ip_payload_type: IpPayloadType, +} + +impl MulticastMetadata { + pub(crate) fn new( + packet_metadata: PacketMeta, + packet: &PacketV6<'_>, + ll_send_to: heapless::Vec, + ) -> Self { + Self { + packet_metadata, + ll_send_to, + header: *packet.header(), + ip_payload_type: packet.payload().payload_type(), + } + } + + pub fn finished(&self) -> bool { + self.ll_send_to.is_empty() + } + + pub fn pop_next_ll_addr(&mut self) -> Option { + self.ll_send_to.pop() + } + + pub fn header(&self) -> &Ipv6Repr { + &self.header + } + + pub fn meta(&self) -> PacketMeta { + self.packet_metadata + } + + pub(crate) fn payload_type(&self) -> IpPayloadType { + self.ip_payload_type.clone() + } +} diff --git a/src/iface/packet.rs b/src/iface/packet.rs index 79d124501..cb0afed08 100644 --- a/src/iface/packet.rs +++ b/src/iface/packet.rs @@ -1,4 +1,4 @@ -use crate::phy::DeviceCapabilities; +use crate::phy::{ChecksumCapabilities, DeviceCapabilities}; use crate::wire::*; #[allow(clippy::large_enum_variant)] @@ -71,82 +71,8 @@ impl<'p> Packet<'p> { } } - pub(crate) fn emit_payload( - &self, - _ip_repr: &IpRepr, - payload: &mut [u8], - caps: &DeviceCapabilities, - ) { - match self.payload() { - #[cfg(feature = "proto-ipv4")] - IpPayload::Icmpv4(icmpv4_repr) => { - icmpv4_repr.emit(&mut Icmpv4Packet::new_unchecked(payload), &caps.checksum) - } - #[cfg(feature = "proto-igmp")] - IpPayload::Igmp(igmp_repr) => igmp_repr.emit(&mut IgmpPacket::new_unchecked(payload)), - #[cfg(feature = "proto-ipv6")] - IpPayload::Icmpv6(icmpv6_repr) => { - let ipv6_repr = match _ip_repr { - #[cfg(feature = "proto-ipv4")] - IpRepr::Ipv4(_) => unreachable!(), - IpRepr::Ipv6(repr) => repr, - }; - - icmpv6_repr.emit( - &ipv6_repr.src_addr, - &ipv6_repr.dst_addr, - &mut Icmpv6Packet::new_unchecked(payload), - &caps.checksum, - ) - } - #[cfg(any(feature = "socket-raw", feature = "proto-rpl"))] - IpPayload::Raw(raw_packet) => payload.copy_from_slice(raw_packet), - #[cfg(any(feature = "socket-udp", feature = "socket-dns"))] - IpPayload::Udp(udp_repr, inner_payload) => udp_repr.emit( - &mut UdpPacket::new_unchecked(payload), - &_ip_repr.src_addr(), - &_ip_repr.dst_addr(), - inner_payload.len(), - |buf| buf.copy_from_slice(inner_payload), - &caps.checksum, - ), - #[cfg(feature = "socket-tcp")] - IpPayload::Tcp(mut tcp_repr) => { - // This is a terrible hack to make TCP performance more acceptable on systems - // where the TCP buffers are significantly larger than network buffers, - // e.g. a 64 kB TCP receive buffer (and so, when empty, a 64k window) - // together with four 1500 B Ethernet receive buffers. If left untreated, - // this would result in our peer pushing our window and sever packet loss. - // - // I'm really not happy about this "solution" but I don't know what else to do. - if let Some(max_burst_size) = caps.max_burst_size { - let mut max_segment_size = caps.max_transmission_unit; - max_segment_size -= _ip_repr.header_len(); - max_segment_size -= tcp_repr.header_len(); - - let max_window_size = max_burst_size * max_segment_size; - if tcp_repr.window_len as usize > max_window_size { - tcp_repr.window_len = max_window_size as u16; - } - } - - tcp_repr.emit( - &mut TcpPacket::new_unchecked(payload), - &_ip_repr.src_addr(), - &_ip_repr.dst_addr(), - &caps.checksum, - ); - } - #[cfg(feature = "socket-dhcpv4")] - IpPayload::Dhcpv4(udp_repr, dhcp_repr) => udp_repr.emit( - &mut UdpPacket::new_unchecked(payload), - &_ip_repr.src_addr(), - &_ip_repr.dst_addr(), - dhcp_repr.buffer_len(), - |buf| dhcp_repr.emit(&mut DhcpPacket::new_unchecked(buf)).unwrap(), - &caps.checksum, - ), - } + pub(crate) fn emit_payload(&self, payload: &mut [u8], caps: &DeviceCapabilities) { + self.payload().emit(&self.ip_repr(), payload, caps); } } @@ -251,6 +177,24 @@ impl<'p> PacketV6<'p> { } } +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum IpPayloadType { + #[cfg(feature = "proto-ipv4")] + Icmpv4, + #[cfg(feature = "proto-igmp")] + Igmp, + #[cfg(feature = "proto-ipv6")] + Icmpv6, + #[cfg(any(feature = "socket-raw", feature = "proto-rpl"))] + Raw, + #[cfg(any(feature = "socket-udp", feature = "socket-dns"))] + Udp, + #[cfg(feature = "socket-tcp")] + Tcp, + #[cfg(feature = "socket-dhcpv4")] + Dhcpv4, +} + #[derive(Debug, PartialEq)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub(crate) enum IpPayload<'p> { @@ -270,6 +214,239 @@ pub(crate) enum IpPayload<'p> { Dhcpv4(UdpRepr, DhcpRepr<'p>), } +impl<'a> IpPayload<'a> { + pub fn buffer_len(&self) -> usize { + match self { + #[cfg(feature = "proto-ipv4")] + Self::Icmpv4(repr) => repr.buffer_len(), + #[cfg(feature = "proto-igmp")] + Self::Igmp(repr) => repr.buffer_len(), + #[cfg(feature = "proto-ipv6")] + Self::Icmpv6(repr) => repr.buffer_len(), + #[cfg(any(feature = "socket-raw", feature = "proto-rpl"))] + Self::Raw(repr) => repr.len(), + #[cfg(any(feature = "socket-udp", feature = "socket-dns"))] + Self::Udp(repr, data) => repr.header_len() + data.len(), + #[cfg(feature = "socket-tcp")] + Self::Tcp(repr) => repr.buffer_len(), + #[cfg(feature = "socket-dhcpv4")] + Self::Dhcpv4(repr, data) => repr.header_len() + data.buffer_len(), + } + } + + pub fn parse_unchecked( + buffer: &'a [u8], + payload_type: IpPayloadType, + header: &Ipv6Repr, + checksum_caps: &ChecksumCapabilities, + ) -> crate::wire::Result { + match payload_type { + #[cfg(feature = "proto-ipv4")] + IpPayloadType::Icmpv4 => Ok(Self::Icmpv4( + Icmpv4Repr::parse(&Icmpv4Packet::new_unchecked(buffer), checksum_caps) + .map_err(|_| crate::wire::Error)?, + )), + #[cfg(feature = "proto-igmp")] + IpPayloadType::Igmp => Ok(Self::Igmp(IgmpRepr::parse(&IgmpPacket::new_unchecked( + buffer, + ))?)), + #[cfg(feature = "proto-ipv6")] + IpPayloadType::Icmpv6 => Ok(Self::Icmpv6(Icmpv6Repr::parse( + &header.src_addr, + &header.dst_addr, + &Icmpv6Packet::new_unchecked(buffer), + checksum_caps, + )?)), + #[cfg(any(feature = "socket-raw", feature = "proto-rpl"))] + IpPayloadType::Raw => Ok(Self::Raw(buffer)), + #[cfg(any(feature = "socket-udp", feature = "socket-dns"))] + IpPayloadType::Udp => { + let packet = &UdpPacket::new_unchecked(buffer); + let repr = UdpRepr::parse( + packet, + &header.src_addr.into(), + &header.dst_addr.into(), + checksum_caps, + )?; + + Ok(Self::Udp(repr, packet.payload())) + } + #[cfg(feature = "socket-tcp")] + IpPayloadType::Tcp => Ok(Self::Tcp(TcpRepr::parse( + &TcpPacket::new_unchecked(buffer), + &header.src_addr.into(), + &header.dst_addr.into(), + checksum_caps, + )?)), + #[cfg(feature = "socket-dhcpv4")] + IpPayloadType::Dhcpv4 => { + // FIXME: actually use the DHCP repr + let packet = &UdpPacket::new_unchecked(buffer); + let repr = UdpRepr::parse( + packet, + &header.src_addr.into(), + &header.dst_addr.into(), + checksum_caps, + )?; + + Ok(Self::Udp(repr, packet.payload())) + } + } + } + + pub fn parse( + buffer: &'a [u8], + payload_type: IpPayloadType, + header: &Ipv6Repr, + checksum_caps: &ChecksumCapabilities, + ) -> crate::wire::Result { + match payload_type { + #[cfg(feature = "proto-ipv4")] + IpPayloadType::Icmpv4 => Ok(Self::Icmpv4( + Icmpv4Repr::parse(&Icmpv4Packet::new_checked(buffer)?, checksum_caps) + .map_err(|_| crate::wire::Error)?, + )), + #[cfg(feature = "proto-igmp")] + IpPayloadType::Igmp => Ok(Self::Igmp(IgmpRepr::parse(&IgmpPacket::new_checked( + buffer, + )?)?)), + #[cfg(feature = "proto-ipv6")] + IpPayloadType::Icmpv6 => Ok(Self::Icmpv6(Icmpv6Repr::parse( + &header.src_addr, + &header.dst_addr, + &Icmpv6Packet::new_checked(buffer)?, + checksum_caps, + )?)), + #[cfg(any(feature = "socket-raw", feature = "proto-rpl"))] + IpPayloadType::Raw => Ok(Self::Raw(buffer)), + #[cfg(any(feature = "socket-udp", feature = "socket-dns"))] + IpPayloadType::Udp => { + let packet = &UdpPacket::new_checked(buffer)?; + let repr = UdpRepr::parse( + packet, + &header.src_addr.into(), + &header.dst_addr.into(), + checksum_caps, + )?; + + Ok(Self::Udp(repr, packet.payload())) + } + #[cfg(feature = "socket-tcp")] + IpPayloadType::Tcp => Ok(Self::Tcp(TcpRepr::parse( + &TcpPacket::new_checked(buffer)?, + &header.src_addr.into(), + &header.dst_addr.into(), + checksum_caps, + )?)), + #[cfg(feature = "socket-dhcpv4")] + IpPayloadType::Dhcpv4 => { + // FIXME and actually use the DHCP representation + let packet = &UdpPacket::new_checked(buffer)?; + let repr = UdpRepr::parse( + packet, + &header.src_addr.into(), + &header.dst_addr.into(), + checksum_caps, + )?; + + Ok(Self::Udp(repr, packet.payload())) + } + } + } + + pub fn payload_type(&self) -> IpPayloadType { + match self { + #[cfg(feature = "proto-ipv4")] + Self::Icmpv4(_) => IpPayloadType::Icmpv4, + #[cfg(feature = "proto-igmp")] + Self::Igmp(_) => IpPayloadType::Igmp, + #[cfg(feature = "proto-ipv6")] + Self::Icmpv6(_) => IpPayloadType::Icmpv6, + #[cfg(any(feature = "socket-raw", feature = "proto-rpl"))] + Self::Raw(_) => IpPayloadType::Raw, + #[cfg(any(feature = "socket-udp", feature = "socket-dns"))] + Self::Udp(_, _) => IpPayloadType::Udp, + #[cfg(feature = "socket-tcp")] + Self::Tcp(_) => IpPayloadType::Tcp, + #[cfg(feature = "socket-dhcpv4")] + Self::Dhcpv4(_, _) => IpPayloadType::Dhcpv4, + } + } + + pub(crate) fn emit(&self, header: &IpRepr, payload: &mut [u8], caps: &DeviceCapabilities) { + match self { + #[cfg(feature = "proto-ipv4")] + IpPayload::Icmpv4(icmpv4_repr) => { + icmpv4_repr.emit(&mut Icmpv4Packet::new_unchecked(payload), &caps.checksum) + } + #[cfg(feature = "proto-igmp")] + IpPayload::Igmp(igmp_repr) => igmp_repr.emit(&mut IgmpPacket::new_unchecked(payload)), + #[cfg(feature = "proto-ipv6")] + IpPayload::Icmpv6(icmpv6_repr) => { + let ipv6_repr = match header { + #[cfg(feature = "proto-ipv4")] + IpRepr::Ipv4(_) => unreachable!(), + IpRepr::Ipv6(repr) => repr, + }; + + icmpv6_repr.emit( + &ipv6_repr.src_addr, + &ipv6_repr.dst_addr, + &mut Icmpv6Packet::new_unchecked(payload), + &caps.checksum, + ) + } + #[cfg(any(feature = "socket-raw", feature = "proto-rpl"))] + IpPayload::Raw(raw_packet) => payload.copy_from_slice(raw_packet), + #[cfg(any(feature = "socket-udp", feature = "socket-dns"))] + IpPayload::Udp(udp_repr, inner_payload) => udp_repr.emit( + &mut UdpPacket::new_unchecked(payload), + &header.src_addr(), + &header.dst_addr(), + inner_payload.len(), + |buf| buf.copy_from_slice(inner_payload), + &caps.checksum, + ), + #[cfg(feature = "socket-tcp")] + IpPayload::Tcp(mut tcp_repr) => { + // This is a terrible hack to make TCP performance more acceptable on systems + // where the TCP buffers are significantly larger than network buffers, + // e.g. a 64 kB TCP receive buffer (and so, when empty, a 64k window) + // together with four 1500 B Ethernet receive buffers. If left untreated, + // this would result in our peer pushing our window and sever packet loss. + // + // I'm really not happy about this "solution" but I don't know what else to do. + if let Some(max_burst_size) = caps.max_burst_size { + let mut max_segment_size = caps.max_transmission_unit; + max_segment_size -= header.header_len(); + max_segment_size -= tcp_repr.header_len(); + + let max_window_size = max_burst_size * max_segment_size; + if tcp_repr.window_len as usize > max_window_size { + tcp_repr.window_len = max_window_size as u16; + } + } + + tcp_repr.emit( + &mut TcpPacket::new_unchecked(payload), + &header.src_addr(), + &header.dst_addr(), + &caps.checksum, + ); + } + #[cfg(feature = "socket-dhcpv4")] + IpPayload::Dhcpv4(udp_repr, dhcp_repr) => udp_repr.emit( + &mut UdpPacket::new_unchecked(payload), + &header.src_addr(), + &header.dst_addr(), + dhcp_repr.buffer_len(), + |buf| dhcp_repr.emit(&mut DhcpPacket::new_unchecked(buf)).unwrap(), + &caps.checksum, + ), + } + } +} + #[cfg(any(feature = "proto-ipv4", feature = "proto-ipv6"))] pub(crate) fn icmp_reply_payload_len(len: usize, mtu: usize, header_len: usize) -> usize { // Send back as much of the original payload as will fit within diff --git a/src/iface/rpl/mod.rs b/src/iface/rpl/mod.rs index ed6312fc7..4f1fe28fd 100644 --- a/src/iface/rpl/mod.rs +++ b/src/iface/rpl/mod.rs @@ -7,6 +7,7 @@ mod rank; mod relations; mod trickle; +use crate::config::RPL_MAX_OPTIONS; use crate::rand::Rand; use crate::time::{Duration, Instant}; use crate::wire::{ @@ -33,6 +34,21 @@ pub enum ModeOfOperation { StoringModeWithMulticast, } +#[cfg(feature = "std")] +impl core::fmt::Display for ModeOfOperation { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + ModeOfOperation::NoDownwardRoutesMaintained => write!(f, "mop0"), + #[cfg(feature = "rpl-mop-1")] + ModeOfOperation::NonStoringMode => write!(f, "mop1"), + #[cfg(feature = "rpl-mop-2")] + ModeOfOperation::StoringMode => write!(f, "mop1"), + #[cfg(feature = "rpl-mop-3")] + ModeOfOperation::StoringModeWithMulticast => write!(f, "mop3"), + } + } +} + impl From for ModeOfOperation { fn from(value: crate::wire::rpl::ModeOfOperation) -> Self { use crate::wire::rpl::ModeOfOperation as WireMop; @@ -66,12 +82,22 @@ impl From for crate::wire::rpl::ModeOfOperation { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { pub mode_of_operation: ModeOfOperation, pub root: Option, } +impl Default for Config { + fn default() -> Self { + // TODO: Make some kind of leaf mode + Self { + mode_of_operation: ModeOfOperation::NoDownwardRoutesMaintained, + root: None, + } + } +} + impl Config { /// Create a new RPL configuration. pub fn new(mode_of_operation: ModeOfOperation) -> Self { @@ -92,7 +118,7 @@ impl Config { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct RootConfig { pub instance_id: RplInstanceId, pub dodag_id: Ipv6Address, @@ -177,7 +203,7 @@ pub(crate) struct Dao { pub next_tx: Option, pub sent_count: u8, pub to: Ipv6Address, - pub child: Ipv6Address, + pub targets: heapless::Vec, pub parent: Option, pub sequence: RplSequenceCounter, pub is_no_path: bool, @@ -192,7 +218,7 @@ impl Dao { #[allow(clippy::too_many_arguments)] pub(crate) fn new( to: Ipv6Address, - child: Ipv6Address, + targets: &[Ipv6Address; RPL_MAX_OPTIONS - 1], parent: Option, sequence: RplSequenceCounter, lifetime: u8, @@ -205,7 +231,7 @@ impl Dao { next_tx: None, sent_count: 0, to, - child, + targets: heapless::Vec::from_slice(targets).unwrap(), // Length check in types parent, sequence, lifetime, @@ -218,7 +244,7 @@ impl Dao { pub(crate) fn no_path( to: Ipv6Address, - child: Ipv6Address, + targets: heapless::Vec, sequence: RplSequenceCounter, instance_id: RplInstanceId, dodag_id: Option, @@ -229,7 +255,7 @@ impl Dao { next_tx: None, sent_count: 0, to, - child, + targets, parent: None, sequence, lifetime: 0, @@ -242,12 +268,14 @@ impl Dao { pub(crate) fn as_rpl_dao_repr<'dao>(&mut self) -> RplRepr<'dao> { let mut options = heapless::Vec::new(); - options - .push(RplOptionRepr::RplTarget(RplTarget { - prefix_length: 64, // TODO: get the prefix length from the address. - prefix: heapless::Vec::from_slice(self.child.as_bytes()).unwrap(), - })) - .unwrap(); + for target in &self.targets { + options + .push(RplOptionRepr::RplTarget(RplTarget { + prefix_length: 64, // TODO: get the prefix length from the address. + prefix: heapless::Vec::from_slice(target.as_bytes()).unwrap(), + })) + .unwrap(); + } options .push(RplOptionRepr::TransitInformation(RplTransitInformation { external: false, @@ -266,6 +294,10 @@ impl Dao { options, }) } + + pub(crate) fn has_multicast_target(&mut self) -> bool { + self.targets.iter().any(|target| target.is_multicast()) + } } impl Rpl { @@ -412,20 +444,22 @@ impl Dodag { } /// ## Panics /// This function will panic if the DODAG does not have a parent selected. - pub(crate) fn remove_parent( + // pub(crate) fn remove_parent( + pub(crate) fn remove_parent( &mut self, - mop: ModeOfOperation, - our_addr: Ipv6Address, - of: &OF, - now: Instant, - rand: &mut Rand, + // mop: ModeOfOperation, + // our_addr: Ipv6Address, + // of: &OF, + // now: Instant, + // rand: &mut Rand, ) -> Ipv6Address { let old_parent = self.parent.unwrap(); self.parent = None; self.parent_set.remove(&old_parent); - self.find_new_parent(mop, our_addr, of, now, rand); + // FIXME: Probably not a good idea to have a recursive loop in function calls + // self.find_new_parent(mop, our_addr, of, now, rand); old_parent } @@ -436,32 +470,54 @@ impl Dodag { pub(crate) fn remove_parent_with_no_path( &mut self, mop: ModeOfOperation, - our_addr: Ipv6Address, - child: Ipv6Address, + // our_addr: Ipv6Address, + targets: &[Ipv6Address], + targets_multicast: &[Ipv6Address], of: &OF, now: Instant, rand: &mut Rand, ) { - let old_parent = self.remove_parent(mop, our_addr, of, now, rand); - - #[cfg(feature = "rpl-mop-2")] - self.daos - .push(Dao::no_path( - old_parent, - child, - self.dao_seq_number, - self.instance_id, - Some(self.id), - self.rank, - )) - .unwrap(); - self.dao_seq_number.increment(); + // let old_parent = self.remove_parent(mop, our_addr, of, now, rand); + let old_parent = self.remove_parent(); + + #[cfg(any(feature = "rpl-mop-2", feature = "rpl-mop-3"))] + { + for targets in targets.chunks(RPL_MAX_OPTIONS - 1) { + self.daos + .push(Dao::no_path( + old_parent, + heapless::Vec::from_slice(targets).unwrap(), + self.dao_seq_number, + self.instance_id, + Some(self.id), + self.rank, + )) + .unwrap(); + self.dao_seq_number.increment(); + } + + #[cfg(feature = "rpl-mop-3")] + for targets in targets_multicast.chunks(RPL_MAX_OPTIONS - 1) { + self.daos + .push(Dao::no_path( + old_parent, + heapless::Vec::from_slice(targets).unwrap(), + self.dao_seq_number, + self.instance_id, + Some(self.id), + self.rank, + )) + .unwrap(); + self.dao_seq_number.increment(); + } + } } pub(crate) fn find_new_parent( &mut self, mop: ModeOfOperation, - child: Ipv6Address, + targets: &[Ipv6Address], + targets_multicast: &[Ipv6Address], of: &OF, now: Instant, rand: &mut Rand, @@ -474,21 +530,40 @@ impl Dodag { if let Some(parent) = of.preferred_parent(&self.parent_set) { // Send a NO-PATH DAO in MOP 2 when we already had a parent. - #[cfg(feature = "rpl-mop-2")] + #[cfg(any(feature = "rpl-mop-2", feature = "rpl-mop-3"))] if let Some(old_parent) = old_parent { - if matches!(mop, ModeOfOperation::StoringMode) && old_parent != parent { - net_trace!("scheduling NO-PATH DAO for {} to {}", child, old_parent); - match self.daos.push(Dao::no_path( - old_parent, - child, - self.dao_seq_number, - self.instance_id, - Some(self.id), - self.rank, - )) { - Ok(_) => self.dao_seq_number.increment(), - Err(_) => net_trace!("could not schedule DAO"), + let is_mop2 = { + #[cfg(feature = "rpl-mop-2")] + { + matches!(mop, ModeOfOperation::StoringMode) + } + #[cfg(not(feature = "rpl-mop-2"))] + false + }; + let is_mop3 = { + #[cfg(feature = "rpl-mop-3")] + { + matches!(mop, ModeOfOperation::StoringModeWithMulticast) } + #[cfg(not(feature = "rpl-mop-3"))] + false + }; + if (is_mop2 || is_mop3) && old_parent != parent { + net_trace!( + "scheduling NO-PATH DAO for {:?} and {:?} to {}", + targets, + targets_multicast, + old_parent + ); + self.remove_parent_with_no_path( + mop, + // our_addr, + targets, + targets_multicast, + of, + now, + rand, + ) } } @@ -502,7 +577,7 @@ impl Dodag { #[cfg(any(feature = "rpl-mop-1", feature = "rpl-mop-2", feature = "rpl-mop-3"))] if !matches!(mop, ModeOfOperation::NoDownwardRoutesMaintained) { - self.schedule_dao(mop, child, parent, now); + self.schedule_dao(mop, targets, targets_multicast, parent, now, false); } } } else { @@ -515,50 +590,119 @@ impl Dodag { pub(crate) fn schedule_dao( &mut self, mop: ModeOfOperation, - child: Ipv6Address, + targets: &[Ipv6Address], + targets_multicast: &[Ipv6Address], parent: Ipv6Address, now: Instant, - ) { + is_no_path: bool, + ) -> Result<(), DodagTransmissionError> { + use heapless::LinearMap; + #[cfg(feature = "rpl-mop-1")] if matches!(mop, ModeOfOperation::NonStoringMode) { - net_trace!("scheduling DAO: {} is parent of {}", parent, child); - self.daos - .push(Dao::new( - self.id, - child, - Some(parent), - self.dao_seq_number, - self.default_lifetime, - self.instance_id, - Some(self.id), - self.rank, - )) - .unwrap(); - self.dao_seq_number.increment(); + net_trace!("scheduling DAO: {} is parent of {:?}", parent, targets); + for targets in targets.chunks(RPL_MAX_OPTIONS - 1) { + self.daos + .push(if is_no_path { + Dao::no_path( + self.id, + targets.try_into().unwrap(), // Checks in the types + self.dao_seq_number, + self.instance_id, + Some(self.id), + self.rank, + ) + } else { + Dao::new( + self.id, + targets.try_into().unwrap(), // Checks in the types + Some(parent), + self.dao_seq_number, + self.default_lifetime, + self.instance_id, + Some(self.id), + self.rank, + ) + }) + .map_err(|_err| DodagTransmissionError::DaoExhausted); + self.dao_seq_number.increment(); + } } - #[cfg(feature = "rpl-mop-2")] - if matches!(mop, ModeOfOperation::StoringMode) { - net_trace!("scheduling DAO: {} is parent of {}", parent, child); - self.daos - .push(Dao::new( - parent, - child, - None, - self.dao_seq_number, - self.default_lifetime, - self.instance_id, - Some(self.id), - self.rank, - )) - .unwrap(); - self.dao_seq_number.increment(); + #[cfg(all(feature = "rpl-mop-2", feature = "rpl-mop-3"))] + if matches!( + mop, + ModeOfOperation::StoringMode | ModeOfOperation::StoringModeWithMulticast + ) { + net_trace!("scheduling DAO: {} is parent of {:?}", parent, targets); + for targets in targets.chunks(RPL_MAX_OPTIONS - 1) { + self.daos + .push(if is_no_path { + Dao::no_path( + parent, + targets.try_into().unwrap(), // Checks in the types + self.dao_seq_number, + self.instance_id, + Some(self.id), + self.rank, + ) + } else { + Dao::new( + parent, + targets.try_into().unwrap(), // Checks in the types + None, + self.dao_seq_number, + self.default_lifetime, + self.instance_id, + Some(self.id), + self.rank, + ) + }) + .unwrap(); + + self.dao_seq_number.increment(); + } + + // If we are in MOP3, we also send a DOA with our subscribed multicast addresses. + #[cfg(feature = "rpl-mop-3")] + { + net_trace!("scheduling multicast DAO"); + for targets in targets_multicast.chunks(RPL_MAX_OPTIONS - 1) { + self.daos + .push(if is_no_path { + Dao::no_path( + parent, + targets.try_into().unwrap(), // Checks in the types + self.dao_seq_number, + self.instance_id, + Some(self.id), + self.rank, + ) + } else { + Dao::new( + parent, + targets.try_into().unwrap(), // Checks in the types + None, + self.dao_seq_number, + self.default_lifetime, + self.instance_id, + Some(self.id), + self.rank, + ) + }) + .unwrap(); + + self.dao_seq_number.increment(); + } + } } let exp = (self.lifetime_unit as u64 * self.default_lifetime as u64) .checked_sub(2 * 60) .unwrap_or(2 * 60); self.dao_expiration = now + Duration::from_secs(exp); + + Ok(()) } /// ## Panics @@ -579,3 +723,25 @@ impl Dodag { }) } } + +#[derive(Debug, Clone)] +pub enum DodagTransmissionError { + DaoExhausted, +} + +impl core::fmt::Display for DodagTransmissionError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::DaoExhausted => write!(f, "DAO buffer is exhausted"), + } + } +} + +#[cfg(feature = "defmt")] +impl defmt::Format for DodagTransmissionError { + fn format(&self, f: defmt::Formatter<'_>) { + match self { + Self::DaoExhausted => defmt::write!(f, "DAO buffer is exhausted"), + } + } +} diff --git a/src/iface/rpl/relations.rs b/src/iface/rpl/relations.rs index b4efbd184..f636830f9 100644 --- a/src/iface/rpl/relations.rs +++ b/src/iface/rpl/relations.rs @@ -1,38 +1,296 @@ use crate::time::{Duration, Instant}; use crate::wire::Ipv6Address; -use crate::config::RPL_RELATIONS_BUFFER_COUNT; +use crate::config::{RPL_MAX_NEXT_HOP_PER_DESTINATION, RPL_RELATIONS_BUFFER_COUNT}; #[derive(Debug)] -pub struct Relation { +pub enum RelationError { + NextHopExhausted, + ToFewNextHops, + RelationTypeNotSupported, +} + +#[cfg(feature = "std")] +impl std::error::Error for RelationError {} + +impl core::fmt::Display for RelationError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + RelationError::NextHopExhausted => write!(f, "Next hop exhausted"), + RelationError::ToFewNextHops => write!(f, "Expected at least 1 next hop"), + RelationError::RelationTypeNotSupported => { + write!(f, "The type of destination is not supported as a relation") + } + } + } +} + +#[cfg(feature = "defmt")] +impl defmt::Format for RelationError { + fn format(&self, f: defmt::Formatter<'_>) { + match self { + RelationError::NextHopExhausted => defmt::write!(f, "Next hop exhausted"), + RelationError::ToFewNextHops => defmt::write!(f, "Expected at least 1 next hop"), + RelationError::RelationTypeNotSupported => { + defmt::write!(f, "The type of destination is not supported as a relation") + } + } + } +} + +#[derive(Debug)] +pub struct UnicastRelation { destination: Ipv6Address, - next_hop: Ipv6Address, - added: Instant, - lifetime: Duration, + next_hop: [RelationHop; 1], } -impl core::fmt::Display for Relation { +impl core::fmt::Display for UnicastRelation { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!( f, - "{} via {} (expires at {})", + "{} via [{}] (expires at {})", self.destination, - self.next_hop, - self.added + self.lifetime + self.next_hop[0], + self.next_hop[0].added + self.next_hop[0].lifetime ) } } #[cfg(feature = "defmt")] -impl defmt::Format for Relation { - fn format(&self, fmt: defmt::Formatter) { +impl defmt::Format for UnicastRelation { + fn format(&self, f: defmt::Formatter) { defmt::write!( - fmt, - "{} via {} (expires at {})", + f, + "{} via [{}] (expires at {})", self.destination, - self.next_hop, - self.added + self.lifetime - ); + self.next_hop[0], + self.next_hop[0].added + self.next_hop[0].lifetime + ) + } +} + +#[cfg(feature = "rpl-mop-3")] +#[derive(Debug)] +pub struct MulticastRelation { + destination: Ipv6Address, + next_hops: heapless::Vec, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct RelationHop { + pub ip: Ipv6Address, + pub added: Instant, + pub lifetime: Duration, +} + +impl RelationHop { + pub fn expires_at(&self) -> Instant { + self.added + self.lifetime + } + + pub fn has_expired(&self, now: Instant) -> bool { + self.expires_at() <= now + } +} + +impl core::fmt::Display for RelationHop { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{} (expires at {})", self.ip, self.added + self.lifetime) + } +} + +#[cfg(feature = "defmt")] +impl defmt::Format for RelationHop { + fn format(&self, f: defmt::Formatter) { + defmt::write!(f, "{} (expires at {})", self.ip, self.added + self.lifetime) + } +} + +#[cfg(feature = "rpl-mop-3")] +impl MulticastRelation { + /// Insert a next hop for this relation. If the next hop already exists, if + /// will return Ok(true) otherwise Ok(false) + fn insert_next_hop( + &mut self, + ip: Ipv6Address, + added: Instant, + lifetime: Duration, + ) -> Result { + if let Some(next_hop) = self.next_hops.iter_mut().find(|hop| hop.ip == ip) { + next_hop.added = added; + next_hop.lifetime = lifetime; + + Ok(true) + } else { + self.next_hops + .push(RelationHop { + ip, + added, + lifetime, + }) + .map_err(|_err| RelationError::NextHopExhausted)?; + Ok(false) + } + } + + /// Removes the next_hop from this relation + pub fn remove_next_hop(&mut self, ip: Ipv6Address) { + self.next_hops.retain(|next_hop| next_hop.ip == ip); + } +} + +#[cfg(feature = "rpl-mop-3")] +impl core::fmt::Display for MulticastRelation { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{} via [", self.destination)?; + + for hop in &self.next_hops { + write!(f, "{},", hop)?; + } + + write!(f, "]")?; + + Ok(()) + } +} + +#[cfg(all(feature = "defmt", feature = "rpl-mop-3"))] +impl defmt::Format for MulticastRelation { + fn format(&self, f: defmt::Formatter) { + defmt::write!(f, "{} via [", self.destination); + + for hop in &self.next_hops { + defmt::write!(f, "{},", hop); + } + + defmt::write!(f, "]"); + } +} + +#[derive(Debug)] +pub enum Relation { + Unicast(UnicastRelation), + #[cfg(feature = "rpl-mop-3")] + Multicast(MulticastRelation), +} + +#[cfg(feature = "defmt")] +impl defmt::Format for Relation { + fn format(&self, fmt: defmt::Formatter) { + match self { + Self::Unicast(rel) => rel.format(fmt), + #[cfg(feature = "rpl-mop-3")] + Self::Multicast(rel) => rel.format(fmt), + } + } +} + +impl Relation { + pub fn new( + destination: Ipv6Address, + next_hops: &[Ipv6Address], + now: Instant, + lifetime: Duration, + ) -> Result { + if destination.is_multicast() { + #[cfg(feature = "rpl-mop-3")] + { + Ok(Self::Multicast(MulticastRelation { + destination, + next_hops: heapless::Vec::from_iter(next_hops.iter().map(|hop| RelationHop { + ip: *hop, + added: now, + lifetime, + })), + })) + } + #[cfg(not(feature = "rpl-mop-3"))] + Err(RelationError::RelationTypeNotSupported) + } else { + if next_hops.len() > 1 { + return Err(RelationError::NextHopExhausted); + } + Ok(Self::Unicast(UnicastRelation { + destination, + next_hop: [RelationHop { + ip: next_hops[0], + added: now, + lifetime, + }], + })) + } + } + + pub fn destination(&self) -> Ipv6Address { + match self { + Self::Unicast(rel) => rel.destination, + #[cfg(feature = "rpl-mop-3")] + Self::Multicast(rel) => rel.destination, + } + } + + /// Insert a next hop for the given relation. If this is a unicast relation, + /// the previous will be overwritten and if it is a multicast relation it + /// will add an extra hop if the hop does not already exist. If there already + /// exists a hop in the multicast relation, the lifetime related metadata + /// will be updated. + pub fn insert_next_hop( + &mut self, + ip: Ipv6Address, + added: Instant, + lifetime: Duration, + ) -> Result { + match self { + Self::Unicast(rel) => { + let next_hop = &mut rel.next_hop[0]; + next_hop.ip = ip; + next_hop.added = added; + next_hop.lifetime = lifetime; + Ok(true) + } + #[cfg(feature = "rpl-mop-3")] + Self::Multicast(rel) => rel.insert_next_hop(ip, added, lifetime), + } + } + + pub fn next_hop(&self) -> &[RelationHop] { + match self { + Self::Unicast(rel) => &rel.next_hop, + #[cfg(feature = "rpl-mop-3")] + Self::Multicast(rel) => &rel.next_hops, + } + } + + /// A relation has expired if all its possible hops have expired + pub fn has_expired(&self, now: Instant) -> bool { + match self { + Self::Unicast(rel) => rel.next_hop.iter().all(|hop| hop.has_expired(now)), + #[cfg(feature = "rpl-mop-3")] + Self::Multicast(rel) => rel.next_hops.iter().all(|hop| hop.has_expired(now)), + } + } + + pub fn is_multicast(&self) -> bool { + #[cfg(feature = "rpl-mop-3")] + { + matches!(self, Self::Multicast(_)) + } + #[cfg(not(feature = "rpl-mop-3"))] + false + } + + pub fn is_unicast(&self) -> bool { + matches!(self, Self::Unicast(_)) + } +} + +impl core::fmt::Display for Relation { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::Unicast(rel) => rel.fmt(f), + #[cfg(feature = "rpl-mop-3")] + Self::Multicast(rel) => rel.fmt(f), + } } } @@ -47,43 +305,67 @@ impl Relations { pub fn add_relation( &mut self, destination: Ipv6Address, - next_hop: Ipv6Address, + next_hops: &[Ipv6Address], now: Instant, lifetime: Duration, - ) { + ) -> Result<(), RelationError> { if let Some(r) = self .relations .iter_mut() - .find(|r| r.destination == destination) + .find(|r| r.destination() == destination) { - net_trace!("Updating old relation information"); - r.next_hop = next_hop; - r.added = now; - r.lifetime = lifetime; - } else { - let relation = Relation { + net_trace!( + "Updating old relation information for destination: {} with hops: {:?}", destination, - next_hop, - added: now, - lifetime, - }; + next_hops + ); + for next_hop in next_hops { + r.insert_next_hop(*next_hop, now, lifetime)?; + } + } else { + let relation = Relation::new(destination, next_hops, now, lifetime)?; if let Err(e) = self.relations.push(relation) { net_trace!("unable to add relation, buffer is full"); } } + + Ok(()) } /// Remove all relation entries for a specific destination. - pub fn remove_relation(&mut self, destination: Ipv6Address) { - self.relations.retain(|r| r.destination != destination) + pub fn remove_hop_from_relation(&mut self, destination: Ipv6Address, hop: Ipv6Address) { + self.relations.retain_mut(|r| match r { + Relation::Unicast(r) => !(r.destination == destination && r.next_hop[0].ip == hop), + Relation::Multicast(r) => { + // Do nothing if the destination is not correct + if r.destination != destination { + return true; + } + + let pos = r.next_hops.iter().position(|h| h.ip == hop); + + // Remove if position exists + let Some(pos) = pos else { + return true; // Does not exist, so do nothing + }; + r.next_hops.swap_remove(pos); + + // Remove relation if still it has no more hops + !r.next_hops.is_empty() + } + }) } /// Return the next hop for a specific IPv6 address, if there is one. - pub fn find_next_hop(&self, destination: Ipv6Address) -> Option { + pub fn find_next_hop(&self, destination: Ipv6Address) -> Option<&[RelationHop]> { self.relations.iter().find_map(|r| { - if r.destination == destination { - Some(r.next_hop) + if r.destination() == destination { + match r { + Relation::Unicast(r) => Some(&r.next_hop[..]), + #[cfg(feature = "rpl-mop-3")] + Relation::Multicast(r) => Some(&r.next_hops), + } } else { None } @@ -95,12 +377,30 @@ impl Relations { /// Returns `true` when a relation was actually removed. pub fn flush(&mut self, now: Instant) -> bool { let len = self.relations.len(); - for r in &self.relations { - if r.added + r.lifetime <= now { - net_trace!("removing {} relation (expired)", r.destination); + self.relations.retain_mut(|r| { + // First flush all relations if it is a multicast relation + let has_expired = match r { + Relation::Unicast(rel) => rel.next_hop[0].has_expired(now), + #[cfg(feature = "rpl-mop-3")] + Relation::Multicast(rel) => { + rel.next_hops.retain(|hop| { + if hop.has_expired(now) { + net_trace!("Removing {} hop (expired)", hop); + false + } else { + true + } + }); + rel.next_hops.is_empty() + } + }; + + if has_expired { + net_trace!("Removing {} (destination)", r.destination()); } - } - self.relations.retain(|r| r.added + r.lifetime > now); + + !has_expired + }); self.relations.len() != len } @@ -131,7 +431,7 @@ mod tests { let mut relations = Relations::default(); relations.add_relation( addrs[0], - addrs[1], + &[addrs[1]], Instant::now(), Duration::from_secs(60 * 30), ); @@ -146,7 +446,7 @@ mod tests { // The size of the buffer should still be RPL_RELATIONS_BUFFER_COUNT. let mut relations = Relations::default(); for a in addrs { - relations.add_relation(a, a, Instant::now(), Duration::from_secs(60 * 30)); + relations.add_relation(a, &[a], Instant::now(), Duration::from_secs(60 * 30)); } assert_eq!(relations.relations.len(), RPL_RELATIONS_BUFFER_COUNT); @@ -159,7 +459,7 @@ mod tests { let mut relations = Relations::default(); relations.add_relation( addrs[0], - addrs[1], + &[addrs[1]], Instant::now(), Duration::from_secs(60 * 30), ); @@ -167,13 +467,16 @@ mod tests { relations.add_relation( addrs[0], - addrs[2], + &[addrs[2]], Instant::now(), Duration::from_secs(60 * 30), ); assert_eq!(relations.relations.len(), 1); - assert_eq!(relations.find_next_hop(addrs[0]), Some(addrs[2])); + assert_eq!( + relations.find_next_hop(addrs[0]).map(|hop| hop[0].ip), + Some(addrs[2]) + ); } #[test] @@ -183,24 +486,30 @@ mod tests { let mut relations = Relations::default(); relations.add_relation( addrs[0], - addrs[1], + &[addrs[1]], Instant::now(), Duration::from_secs(60 * 30), ); assert_eq!(relations.relations.len(), 1); - assert_eq!(relations.find_next_hop(addrs[0]), Some(addrs[1])); + assert_eq!( + relations.find_next_hop(addrs[0]).map(|hop| hop[0].ip), + Some(addrs[1]) + ); relations.add_relation( addrs[0], - addrs[2], + &[addrs[2]], Instant::now(), Duration::from_secs(60 * 30), ); assert_eq!(relations.relations.len(), 1); - assert_eq!(relations.find_next_hop(addrs[0]), Some(addrs[2])); + assert_eq!( + relations.find_next_hop(addrs[0]).map(|hop| hop[0].ip), + Some(addrs[2]) + ); // Find the next hop of a destination not in the buffer. - assert_eq!(relations.find_next_hop(addrs[1]), None); + assert_eq!(relations.find_next_hop(addrs[1]).map(|hop| hop[0].ip), None); } #[test] @@ -210,13 +519,13 @@ mod tests { let mut relations = Relations::default(); relations.add_relation( addrs[0], - addrs[1], + &[addrs[1]], Instant::now(), Duration::from_secs(60 * 30), ); assert_eq!(relations.relations.len(), 1); - relations.remove_relation(addrs[0]); + relations.remove_hop_from_relation(addrs[0], addrs[1]); assert!(relations.relations.is_empty()); } @@ -227,7 +536,7 @@ mod tests { let mut relations = Relations::default(); relations.add_relation( addrs[0], - addrs[1], + &[addrs[1]], Instant::now() - Duration::from_secs(60 * 30 + 1), Duration::from_secs(60 * 30), ); diff --git a/src/iface/rpl/trickle.rs b/src/iface/rpl/trickle.rs index ea37ba492..37e97a755 100644 --- a/src/iface/rpl/trickle.rs +++ b/src/iface/rpl/trickle.rs @@ -14,7 +14,7 @@ use crate::{ time::{Duration, Instant}, }; -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct TrickleTimer { pub(crate) i_min: u32, diff --git a/src/lib.rs b/src/lib.rs index c758bd6d6..e81643c6a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -142,11 +142,13 @@ pub mod config { pub const IFACE_MAX_ROUTE_COUNT: usize = 4; pub const IFACE_MAX_SIXLOWPAN_ADDRESS_CONTEXT_COUNT: usize = 4; pub const IFACE_NEIGHBOR_CACHE_COUNT: usize = 3; + pub const IFACE_MAX_MULTICAST_DUPLICATION_COUNT: usize = 16; pub const REASSEMBLY_BUFFER_COUNT: usize = 4; pub const REASSEMBLY_BUFFER_SIZE: usize = 1500; pub const RPL_RELATIONS_BUFFER_COUNT: usize = 16; pub const RPL_PARENTS_BUFFER_COUNT: usize = 8; pub const RPL_MAX_OPTIONS: usize = 2; + pub const RPL_MAX_NEXT_HOP_PER_DESTINATION: usize = 4; pub const IPV6_HBH_MAX_OPTIONS: usize = 2; } diff --git a/src/macros.rs b/src/macros.rs index e899d24ec..e31fd6708 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -167,3 +167,46 @@ macro_rules! set { NetworkEndian::write_u32(&mut $buffer.as_mut()[$field], $value); }}; } + +macro_rules! matches_cfg { + ([$($sel:tt)*] $expression:expr, $pattern:pat $(if $guard:expr)? $(,)?) => { + { + #[cfg($($sel)*)] + { + matches!($expression, $pattern $(if $guard)?) + } + #[cfg(not($($sel)*))] + { + false + } + } + }; +} + +macro_rules! cfg_match { + (($expression:expr) { + $($(cfg[$($sel:tt)*])? ($pattern:pat) => $arm_expr:block)* + }) => { + match $expression { + $( + $(#[cfg($($sel)*)])? + $pattern => $arm_expr, + )* + } + }; +} + +macro_rules! cfg_or { + ([$($sel:tt)*] $expression:expr, $or_other:expr) => { + { + #[cfg($($sel)*)] + { + $expression + } + #[cfg(not($($sel)*))] + { + $or_other + } + } + }; +} diff --git a/src/storage/packet_buffer.rs b/src/storage/packet_buffer.rs index 28119fa10..de255a271 100644 --- a/src/storage/packet_buffer.rs +++ b/src/storage/packet_buffer.rs @@ -229,6 +229,19 @@ impl<'a, H> PacketBuffer<'a, H> { } } + pub fn peek_mut(&mut self) -> Result<(&mut H, &mut [u8]), Empty> { + self.dequeue_padding(); + + if let Some(metadata) = self.metadata_ring.get_allocated_mut(0, 1).first_mut() { + Ok(( + metadata.header.as_mut().unwrap(), + self.payload_ring.get_allocated_mut(0, metadata.size), + )) + } else { + Err(Empty) + } + } + /// Return the maximum number packets that can be stored. pub fn packet_capacity(&self) -> usize { self.metadata_ring.capacity() diff --git a/src/storage/ring_buffer.rs b/src/storage/ring_buffer.rs index 7d461b68c..8951058e8 100644 --- a/src/storage/ring_buffer.rs +++ b/src/storage/ring_buffer.rs @@ -369,6 +369,29 @@ impl<'a, T: 'a> RingBuffer<'a, T> { &self.storage[start_at..start_at + size] } + /// Return the largest contiguous slice of allocated buffer elements starting + /// at the given offset past the first allocated element, and up to the given size. + #[must_use] + pub fn get_allocated_mut(&mut self, offset: usize, mut size: usize) -> &mut [T] { + let start_at = self.get_idx(offset); + // We can't read past the end of the allocated data. + if offset > self.length { + return &mut []; + } + // We can't read more than we have allocated. + let clamped_length = self.length - offset; + if size > clamped_length { + size = clamped_length + } + // We can't contiguously dequeue past the end of the storage. + let until_end = self.capacity() - start_at; + if size > until_end { + size = until_end + } + + &mut self.storage[start_at..start_at + size] + } + /// Read as many elements from allocated buffer elements into the given slice /// starting at the given offset past the first allocated element, and return /// the amount read. diff --git a/src/tests.rs b/src/tests.rs index 377acff1b..1e3ade402 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -1,7 +1,7 @@ use crate::iface::*; use crate::wire::*; -pub(crate) fn setup<'a>(medium: Medium) -> (Interface, SocketSet<'a>, TestingDevice) { +pub(crate) fn setup<'a>(medium: Medium) -> (Interface<'static>, SocketSet<'a>, TestingDevice) { let mut device = TestingDevice::new(medium); let config = Config::new(match medium { @@ -43,7 +43,7 @@ pub(crate) fn setup<'a>(medium: Medium) -> (Interface, SocketSet<'a>, TestingDev ..config }; - let mut iface = Interface::new(config, &mut device, Instant::ZERO); + let mut iface = Interface::new(config, &mut device, &mut [][..], &mut [][..], Instant::ZERO); #[cfg(feature = "proto-ipv4")] { diff --git a/tests/rpl.rs b/tests/rpl.rs index 465b717d1..cbda14ed7 100644 --- a/tests/rpl.rs +++ b/tests/rpl.rs @@ -21,6 +21,7 @@ fn init() { #[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained)] #[case::mop1(RplModeOfOperation::NonStoringMode)] #[case::mop2(RplModeOfOperation::StoringMode)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast)] fn root_node_only(#[case] mop: RplModeOfOperation) { let mut sim = sim::NetworkSim::new(); sim.create_node(RplConfig::new(mop).add_root_config(RplRootConfig::new( @@ -28,7 +29,8 @@ fn root_node_only(#[case] mop: RplModeOfOperation) { Ipv6Address::default(), ))); - sim.run(Duration::from_millis(500), ONE_HOUR); + sim.init(); + sim.run(Duration::from_millis(500), ONE_HOUR, None); assert!(!sim.msgs().is_empty()); @@ -49,11 +51,13 @@ fn root_node_only(#[case] mop: RplModeOfOperation) { #[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained)] #[case::mop1(RplModeOfOperation::NonStoringMode)] #[case::mop2(RplModeOfOperation::StoringMode)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast)] fn normal_node_without_dodag(#[case] mop: RplModeOfOperation) { let mut sim = sim::NetworkSim::new(); sim.create_node(RplConfig::new(mop)); - sim.run(Duration::from_millis(500), ONE_HOUR); + sim.init(); + sim.run(Duration::from_millis(500), ONE_HOUR, None); assert!(!sim.msgs().is_empty()); @@ -72,15 +76,54 @@ fn normal_node_without_dodag(#[case] mop: RplModeOfOperation) { /// For MOP1, MOP2 and MOP3, DAOs and DAO-ACKs should be transmitted. /// We run the simulation for 15 minutes. During this period, around 7 DIOs should be transmitted /// by each node (root and normal node). In MOP1, MOP2 and MOP3, the normal node should transmit 1 -/// DAO and the root 1 DAO-ACK. By default, DAOs require an ACK in smoltcp. +/// DAO and the root 1 DAO-ACK. By default, DAOs require an ACK in smoltcp, unless one of the nodes +/// has joined a multicast group. Then there should be an extra DAO for the multicast group to +/// which the node is subscribed #[rstest] -#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained)] -#[case::mop1(RplModeOfOperation::NonStoringMode)] -#[case::mop2(RplModeOfOperation::StoringMode)] -fn root_and_normal_node(#[case] mop: RplModeOfOperation) { +#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained, None)] +#[case::mop1(RplModeOfOperation::NonStoringMode, None)] +#[case::mop2(RplModeOfOperation::StoringMode, None)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast, None)] +#[case::mop3_multicast(RplModeOfOperation::StoringModeWithMulticast, Some(Ipv6Address::from_parts(&[0xff02, 0, 0, 0, 0, 0, 0, 3])))] +fn root_and_normal_node( + #[case] mop: RplModeOfOperation, + #[case] multicast_group: Option, +) { + init(); + let mut sim = sim::topology(sim::NetworkSim::new(), mop, 1, 1); + if let Some(multicast_group) = multicast_group { + let last_child = sim.nodes_mut().last_mut().unwrap(); + last_child + .interface + .join_multicast_group(&mut last_child.device, multicast_group, Instant::ZERO) + .expect("last_child should be able to join the multicast group"); + } - sim.run(Duration::from_millis(500), Duration::from_secs(60 * 15)); + // let mut pcap_file = None; + let mut pcap_file = Some( + sim::PcapFile::new(std::path::Path::new(&format!( + "sim_logs/root_and_normal_node-{}-{}.pcap", + match mop { + RplModeOfOperation::NoDownwardRoutesMaintained => "mop0", + RplModeOfOperation::NonStoringMode => "mop1", + RplModeOfOperation::StoringMode => "mop2", + RplModeOfOperation::StoringModeWithMulticast => "mop3", + }, + if multicast_group.is_some() { + "with-multicast" + } else { + "no-multicast" + } + ))) + .unwrap(), + ); + sim.init(); + sim.run( + Duration::from_millis(500), + Duration::from_secs(60 * 15), + pcap_file.as_mut(), + ); assert!(!sim.msgs().is_empty()); @@ -95,8 +138,8 @@ fn root_and_normal_node(#[case] mop: RplModeOfOperation) { let dao_count = sim.msgs().iter().filter(|m| m.is_dao()).count(); let dao_ack_count = sim.msgs().iter().filter(|m| m.is_dao_ack()).count(); - assert_eq!(dao_count, 1); - assert_eq!(dao_ack_count, 1); + assert_eq!(dao_count, if multicast_group.is_some() { 2 } else { 1 }); + assert_eq!(dao_ack_count, dao_count); } _ => (), } @@ -118,13 +161,33 @@ fn root_and_normal_node(#[case] mop: RplModeOfOperation) { } #[rstest] -#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained)] -#[case::mop1(RplModeOfOperation::NonStoringMode)] -#[case::mop2(RplModeOfOperation::StoringMode)] -fn root_and_normal_node_moved_out_of_range(#[case] mop: RplModeOfOperation) { +#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained, None)] +#[case::mop1(RplModeOfOperation::NonStoringMode, None)] +#[case::mop2(RplModeOfOperation::StoringMode, None)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast, None)] +#[case::mop3_multicast(RplModeOfOperation::StoringModeWithMulticast, Some(Ipv6Address::from_parts(&[0xff02, 0, 0, 0, 0, 0, 0, 3])))] +fn root_and_normal_node_moved_out_of_range( + #[case] mop: RplModeOfOperation, + #[case] multicast_group: Option, +) { let mut sim = sim::topology(sim::NetworkSim::new(), mop, 1, 1); + if let Some(multicast_group) = multicast_group { + let last_child = sim.nodes_mut().last_mut().unwrap(); + last_child + .interface + .join_multicast_group(&mut last_child.device, multicast_group, Instant::ZERO) + .expect("last_child should be able to join the multicast group"); + } - sim.run(Duration::from_millis(100), ONE_HOUR); + // Setup pcap file for multicast + let mut pcap_file = if multicast_group.is_some() { + use std::path::Path; + Some(sim::PcapFile::new(Path::new(&format!("sim_logs/multicast-{mop}.pcap"))).unwrap()) + } else { + None + }; + sim.init(); + sim.run(Duration::from_millis(100), ONE_HOUR, pcap_file.as_mut()); assert!(!sim.msgs().is_empty()); @@ -149,7 +212,7 @@ fn root_and_normal_node_moved_out_of_range(#[case] mop: RplModeOfOperation) { // Move the node far from the root node. sim.nodes_mut()[1].set_position(sim::Position((1000., 0.))); - sim.run(Duration::from_millis(400), ONE_HOUR); + sim.run(Duration::from_millis(400), ONE_HOUR, pcap_file.as_mut()); match mop { RplModeOfOperation::NonStoringMode | RplModeOfOperation::StoringMode => { @@ -197,7 +260,7 @@ fn root_and_normal_node_moved_out_of_range(#[case] mop: RplModeOfOperation) { // Move the node back in range of the root node. sim.nodes_mut()[1].set_position(sim::Position((100., 0.))); - sim.run(Duration::from_millis(100), ONE_HOUR); + sim.run(Duration::from_millis(100), ONE_HOUR, pcap_file.as_mut()); // NOTE: in rare cases, I don't know why, 2 DIS messages are transmitted instead of just 1. let dis_count = sim.msgs().iter().filter(|m| m.is_dis()).count(); @@ -222,18 +285,48 @@ fn root_and_normal_node_moved_out_of_range(#[case] mop: RplModeOfOperation) { } #[rstest] -#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained)] +#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained, None)] //#[case::mop1(RplModeOfOperation::NonStoringMode)] -#[case::mop2(RplModeOfOperation::StoringMode)] -fn message_forwarding_to_root(#[case] mop: RplModeOfOperation) { +#[case::mop2(RplModeOfOperation::StoringMode, None)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast, None)] +#[case::mop3_multicast(RplModeOfOperation::StoringModeWithMulticast, Some(Ipv6Address::from_parts(&[0xff02, 0, 0, 0, 0, 0, 0, 3])))] +fn message_forwarding_to_root( + #[case] mop: RplModeOfOperation, + #[case] multicast_group: Option, +) { let mut sim = sim::topology(sim::NetworkSim::new(), mop, 1, 2); + if let Some(multicast_group) = multicast_group { + let last_child = sim.nodes_mut().last_mut().unwrap(); + last_child + .interface + .join_multicast_group(&mut last_child.device, multicast_group, Instant::ZERO) + .expect("last_child should be able to join the multicast group"); + } let dst_addr = sim.nodes()[0].ip_address; sim::udp_receiver_node(&mut sim.nodes_mut()[0], 1234); sim::udp_sender_node(&mut sim.nodes_mut()[2], 1234, dst_addr); sim.init(); - sim.run(Duration::from_millis(500), ONE_HOUR); + // let mut pcap_file = None; + let mut pcap_file = Some( + sim::PcapFile::new(std::path::Path::new(&format!( + "sim_logs/message-forwarding-to-root-{}-{}.pcap", + match mop { + RplModeOfOperation::NoDownwardRoutesMaintained => "mop0", + RplModeOfOperation::NonStoringMode => "mop1", + RplModeOfOperation::StoringMode => "mop2", + RplModeOfOperation::StoringModeWithMulticast => "mop3", + }, + if multicast_group.is_some() { + "with-multicast" + } else { + "no-multicast" + } + ))) + .unwrap(), + ); + sim.run(Duration::from_millis(500), ONE_HOUR, pcap_file.as_mut()); assert!(!sim.msgs().is_empty()); @@ -268,18 +361,53 @@ fn message_forwarding_to_root(#[case] mop: RplModeOfOperation) { } #[rstest] -#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained)] +#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained, None)] //#[case::mop1(RplModeOfOperation::NonStoringMode)] -#[case::mop2(RplModeOfOperation::StoringMode)] -fn message_forwarding_up_and_down(#[case] mop: RplModeOfOperation) { +#[case::mop2(RplModeOfOperation::StoringMode, None)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast, None)] +#[case::mop3_multicast(RplModeOfOperation::StoringModeWithMulticast, Some(Ipv6Address::from_parts(&[0xff02, 0, 0, 0, 0, 0, 0, 3])))] +fn message_forwarding_up_and_down( + #[case] mop: RplModeOfOperation, + #[case] multicast_group: Option, +) { + init(); + let mut sim = sim::topology(sim::NetworkSim::new(), mop, 2, 2); + if let Some(multicast_group) = multicast_group { + let last_child = &mut sim.nodes_mut()[4]; + last_child + .interface + .join_multicast_group(&mut last_child.device, multicast_group, Instant::ZERO) + .expect("last_child should be able to join the multicast group"); + } let dst_addr = sim.nodes()[3].ip_address; sim::udp_receiver_node(&mut sim.nodes_mut()[3], 1234); sim::udp_sender_node(&mut sim.nodes_mut()[4], 1234, dst_addr); sim.init(); - sim.run(Duration::from_millis(500), Duration::from_secs(60 * 15)); + let mut pcap_file = Some( + sim::PcapFile::new(std::path::Path::new(&format!( + "sim_logs/message_forwarding_up_and_down-{}-{}.pcap", + match mop { + RplModeOfOperation::NoDownwardRoutesMaintained => "mop0", + RplModeOfOperation::NonStoringMode => "mop1", + RplModeOfOperation::StoringMode => "mop2", + RplModeOfOperation::StoringModeWithMulticast => "mop3", + }, + if multicast_group.is_some() { + "with-multicast" + } else { + "no-multicast" + } + ))) + .unwrap(), + ); + sim.run( + Duration::from_millis(500), + Duration::from_secs(60 * 15), + pcap_file.as_mut(), + ); assert!(!sim.msgs().is_empty()); @@ -343,10 +471,18 @@ fn message_forwarding_up_and_down(#[case] mop: RplModeOfOperation) { assert!(dao_ack_packets_with_routing == 4,); assert!(dao_ack_packets_without_routing == 2,); } - RplModeOfOperation::StoringMode | RplModeOfOperation::StoringModeWithMulticast => { + RplModeOfOperation::StoringMode => { assert!(dao_ack_packets_with_routing == 0,); assert!(dao_ack_packets_without_routing == 6,); } + RplModeOfOperation::StoringModeWithMulticast if multicast_group.is_none() => { + assert_eq!(dao_ack_packets_with_routing, 0,); + assert_eq!(dao_ack_packets_without_routing, 6,); + } + RplModeOfOperation::StoringModeWithMulticast if multicast_group.is_some() => { + assert_eq!(dao_ack_packets_with_routing, 0,); + assert_eq!(dao_ack_packets_without_routing, 6 + 2,); // 1x joining multicast generates 2 DAOs + } _ => { assert!(dao_ack_packets_with_routing == 0,); assert!(dao_ack_packets_without_routing == 0,); @@ -355,14 +491,157 @@ fn message_forwarding_up_and_down(#[case] mop: RplModeOfOperation) { } #[rstest] -#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained)] +#[case::one(&[4])] +#[case::two(&[4, 2])] +#[case::three(&[4, 2, 3])] +fn forward_multicast_up_and_down(#[case] multicast_receivers: &[usize]) { + init(); + + const MULTICAST_GROUP: Ipv6Address = Ipv6Address::new(0xff02, 0, 0, 0, 0, 0, 0, 3); + let mut sim = sim::topology( + sim::NetworkSim::new(), + RplModeOfOperation::StoringModeWithMulticast, + 2, + 2, + ); + // Subscribe to multicast group + for receiver in multicast_receivers { + let node = &mut sim.nodes_mut()[*receiver]; + node.interface + .join_multicast_group(&mut node.device, MULTICAST_GROUP, Instant::ZERO) + .expect("node should be able to join the multicast group"); + + sim::udp_receiver_node(node, 1234); + } + + // Setup UDP sender + sim::udp_sender_node(&mut sim.nodes_mut()[4], 1234, MULTICAST_GROUP); + + let mut pcap_file = Some( + sim::PcapFile::new(std::path::Path::new(&format!( + "sim_logs/forward_multicast_up_and_down{}.pcap", + multicast_receivers + .iter() + .map(|id| id.to_string()) + .fold(String::new(), |a, b| a + "-" + &b), + ))) + .unwrap(), + ); + + sim.init(); + sim.run( + Duration::from_millis(500), + Duration::from_secs(60 * 5), + pcap_file.as_mut(), + ); + + assert!(!sim.msgs().is_empty()); +} + +#[rstest] +#[case::root_one(&[4], 0)] +#[case::child_one(&[4], 4)] +#[case::root_two(&[4, 2], 0)] +#[case::child_two(&[4, 2], 4)] +#[case::root_three(&[4, 2, 3], 0)] +#[case::child_three(&[4, 2, 3], 4)] +fn forward_multicast_staged_initialization( + #[case] multicast_receivers: &[usize], + #[case] multicast_sender: usize, +) { + init(); + + const MULTICAST_GROUP: Ipv6Address = Ipv6Address::new(0xff02, 0, 0, 0, 0, 0, 0, 3); + let mut sim = sim::topology( + sim::NetworkSim::new(), + RplModeOfOperation::StoringModeWithMulticast, + 2, + 2, + ); + // Subscribe to multicast group + for receiver in multicast_receivers { + let node = &mut sim.nodes_mut()[*receiver]; + node.interface + .join_multicast_group(&mut node.device, MULTICAST_GROUP, Instant::ZERO) + .expect("node should be able to join the multicast group"); + + sim::udp_receiver_node(node, 1234); + } + + // Setup UDP sender + sim::udp_sender_node( + &mut sim.nodes_mut()[multicast_sender], + 1234, + MULTICAST_GROUP, + ); + + let mut pcap_file = Some( + sim::PcapFile::new(std::path::Path::new(&format!( + "sim_logs/forward_multicast_staged_init_r{}-s{}.pcap", + multicast_receivers + .iter() + .map(|id| id.to_string()) + .fold(String::new(), |a, b| a + "-" + &b), + multicast_sender, + ))) + .unwrap(), + ); + + let nodes_len = sim.nodes().len(); + for node in 0..nodes_len { + let node = &mut sim.nodes_mut()[node]; + node.init(); + + // Run for a while + sim.run( + Duration::from_millis(500), + Duration::from_secs(60 * 5), + pcap_file.as_mut(), + ); + sim.clear_msgs(); + } + + // At the end run with the entire network up + sim.init(); + sim.run( + Duration::from_millis(500), + Duration::from_secs(60 * 5), + pcap_file.as_mut(), + ); + + assert!(!sim.msgs().is_empty()); +} + +#[rstest] +#[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained, None)] //#[case::mop1(RplModeOfOperation::NonStoringMode)] -#[case::mop2(RplModeOfOperation::StoringMode)] -fn normal_node_change_parent(#[case] mop: RplModeOfOperation) { +#[case::mop2(RplModeOfOperation::StoringMode, None)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast, None)] +#[case::mop3_multicast( + RplModeOfOperation::StoringModeWithMulticast, + Some(Ipv6Address::new(0xff02, 0, 0, 0, 0, 0, 0, 3)) +)] +fn normal_node_change_parent( + #[case] mop: RplModeOfOperation, + #[case] multicast_group: Option, +) { init(); let mut sim = sim::topology(sim::NetworkSim::new(), mop, 1, 3); - sim.run(Duration::from_millis(500), Duration::from_secs(60 * 5)); + if let Some(multicast_group) = multicast_group { + let last_child = sim.nodes_mut().last_mut().unwrap(); + last_child + .interface + .join_multicast_group(&mut last_child.device, multicast_group, Instant::ZERO) + .expect("last_child should be able to join the multicast group"); + } + + sim.init(); + sim.run( + Duration::from_millis(500), + Duration::from_secs(60 * 5), + None, + ); assert!(!sim.msgs().is_empty()); @@ -370,7 +649,7 @@ fn normal_node_change_parent(#[case] mop: RplModeOfOperation) { sim.nodes_mut()[3].set_position(sim::Position((150., -50.))); sim.clear_msgs(); - sim.run(Duration::from_millis(500), ONE_HOUR); + sim.run(Duration::from_millis(500), ONE_HOUR, None); // Counter for sent NO-PATH DAOs let mut no_path_dao_count = 0; @@ -433,9 +712,11 @@ fn normal_node_change_parent(#[case] mop: RplModeOfOperation) { #[case::mop0(RplModeOfOperation::NoDownwardRoutesMaintained)] //#[case::mop1(RplModeOfOperation::NonStoringMode)] #[case::mop2(RplModeOfOperation::StoringMode)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast)] fn parent_leaves_network_no_other_parent(#[case] mop: RplModeOfOperation) { let mut sim = sim::topology(sim::NetworkSim::new(), mop, 4, 2); - sim.run(Duration::from_millis(500), ONE_HOUR); + sim.init(); + sim.run(Duration::from_millis(500), ONE_HOUR, None); // Parent leaves network, child node does not have an alternative parent. // The child node should send INFINITE_RANK DIO and after that only send DIS messages @@ -444,7 +725,7 @@ fn parent_leaves_network_no_other_parent(#[case] mop: RplModeOfOperation) { sim.clear_msgs(); - sim.run(Duration::from_millis(500), ONE_HOUR); + sim.run(Duration::from_millis(500), ONE_HOUR, None); let no_parent_node_msgs: Vec<_> = sim.msgs().iter().filter(|m| m.from.0 == 5).collect(); @@ -467,6 +748,7 @@ fn parent_leaves_network_no_other_parent(#[case] mop: RplModeOfOperation) { // In MOP 2 the DTSN is incremented when a parent does not hear anymore from one of its children. #[rstest] #[case::mop2(RplModeOfOperation::StoringMode)] +#[case::mop3(RplModeOfOperation::StoringModeWithMulticast)] fn dtsn_incremented_when_child_leaves_network(#[case] mop: RplModeOfOperation) { use std::collections::HashMap; @@ -474,14 +756,15 @@ fn dtsn_incremented_when_child_leaves_network(#[case] mop: RplModeOfOperation) { sim.nodes_mut()[4].set_position(sim::Position((200., 100.))); sim.nodes_mut()[5].set_position(sim::Position((-100., 0.))); - sim.run(Duration::from_millis(500), ONE_HOUR); + sim.init(); + sim.run(Duration::from_millis(500), ONE_HOUR, None); // One node is moved out of the range of its parent. sim.nodes_mut()[4].set_position(sim::Position((500., 500.))); sim.clear_msgs(); - sim.run(Duration::from_millis(500), ONE_HOUR); + sim.run(Duration::from_millis(500), ONE_HOUR, None); // Keep track of when was the first DIO with increased DTSN sent let mut dio_at = Instant::ZERO; diff --git a/tests/sim/mod.rs b/tests/sim/mod.rs index cb4516d20..ad5a82bf6 100644 --- a/tests/sim/mod.rs +++ b/tests/sim/mod.rs @@ -1,3 +1,5 @@ +use std::fs::File; + use smoltcp::iface::*; use smoltcp::phy::{PcapLinkType, PcapSink}; use smoltcp::time::*; @@ -181,20 +183,18 @@ impl NetworkSim { .find(|node| node.ieee_address == destination) } - /// Initialize the simulation. + /// Initialize the simulation. This is a shortcut to initialize all the nodes individually. + /// Nodes need to be initialized to be enabled, otherwise they will not show up in the simulation. pub fn init(&mut self) { for node in &mut self.nodes { - if let Some(init) = &node.init { - let handles = init(&mut node.sockets); - node.socket_handles = handles; - } + node.init(); } } /// Run the simluation for a specific duration with a specified step. /// *NOTE*: the simulation uses the step as a maximum step. If a smoltcp interface needs to be /// polled more often, then the simulation will do so. - pub fn run(&mut self, step: Duration, duration: Duration) { + pub fn run(&mut self, step: Duration, duration: Duration, pcap_file: Option<&mut PcapFile>) { let start = self.now; while self.now < start + duration { let (new_step, _, _) = self.on_tick(self.now, step); @@ -207,6 +207,10 @@ impl NetworkSim { self.now += step; } } + + if let Some(file) = pcap_file { + file.append_messages(self) + } } /// Run the simulation. @@ -305,17 +309,32 @@ impl NetworkSim { (step, broadcast_msgs, unicast_msgs) } +} - /// Save the messages to a specified path in the PCAP format. - #[allow(unused)] - pub fn save_pcap(&self, path: &std::path::Path) -> std::io::Result<()> { - let mut pcap_file = std::fs::File::create(path).unwrap(); - PcapSink::global_header(&mut pcap_file, PcapLinkType::Ieee802154WithoutFcs); +#[allow(unused)] +/// Helper for writing messages from the simulator to a PCAP file +pub struct PcapFile { + file: File, +} - for msg in &self.messages { - PcapSink::packet(&mut pcap_file, msg.at, &msg.data); +#[allow(unused)] +impl PcapFile { + pub fn new(path: &std::path::Path) -> std::io::Result { + let parent = path.parent(); + if let Some(parent) = parent { + if !parent.exists() { + std::fs::create_dir_all(parent)?; + } } + let mut file = std::fs::File::create(path)?; + PcapSink::global_header(&mut file, PcapLinkType::Ieee802154WithoutFcs); - Ok(()) + Ok(Self { file }) + } + + pub fn append_messages(&mut self, sim: &NetworkSim) { + for msg in &sim.messages { + PcapSink::packet(&mut self.file, msg.at, &msg.data); + } } } diff --git a/tests/sim/node.rs b/tests/sim/node.rs index 58a635e78..682d81b92 100644 --- a/tests/sim/node.rs +++ b/tests/sim/node.rs @@ -1,9 +1,11 @@ use super::Message; use super::Position; use smoltcp::iface::*; +use smoltcp::storage::PacketMetadata; use smoltcp::time::*; use smoltcp::wire::*; use std::collections::VecDeque; +use std::fmt::Display; type InitFn = Box) -> Vec + Send + Sync + 'static>; @@ -28,7 +30,7 @@ pub struct Node { pub pan_id: Ieee802154Pan, pub device: NodeDevice, pub last_transmitted: Instant, - pub interface: Interface, + pub interface: Interface<'static>, pub sockets: SocketSet<'static>, pub socket_handles: Vec, pub init: Option, @@ -36,6 +38,13 @@ pub struct Node { pub next_poll: Option, } +impl Display for Node { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Node[{}] with {}", self.id, self.device)?; + Ok(()) + } +} + impl Node { /// Create a new node. pub fn new(id: usize, mut rpl: RplConfig) -> Self { @@ -56,7 +65,13 @@ impl Node { config.rpl_config = Some(rpl); config.random_seed = Instant::now().total_micros() as u64; - let mut interface = Interface::new(config, &mut device, Instant::ZERO); + let mut interface = Interface::<'static>::new( + config, + &mut device, + vec![PacketMetadata::EMPTY; 16], + vec![0; 2048], + Instant::ZERO, + ); interface.update_ip_addrs(|addresses| { addresses .push(IpCidr::Ipv6(Ipv6Cidr::new(ipv6_address, 10))) @@ -64,10 +79,10 @@ impl Node { }); Self { - id: id as usize, + id, range: 101., position: Position::from((0., 0.)), - enabled: true, + enabled: false, is_sending: false, parent_changed: false, previous_parent: None, @@ -86,6 +101,15 @@ impl Node { } } + /// Initializes the node + pub fn init(&mut self) { + self.enabled = true; + if let Some(init) = &self.init { + let handles = init(&mut self.sockets); + self.socket_handles = handles; + } + } + /// Set the position of the node. pub fn set_position(&mut self, position: Position) { self.position = position; @@ -123,6 +147,20 @@ pub struct NodeDevice { pub tx_queue: VecDeque, } +impl Display for NodeDevice { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "NodeDevice[{}] at ({}, {})", + self.id, + self.position.x(), + self.position.y() + )?; + + Ok(()) + } +} + impl NodeDevice { pub fn new(id: usize, position: Position) -> Self { Self {