diff --git a/Cargo.lock b/Cargo.lock index f8d90ac81..ae2f34387 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -455,6 +455,7 @@ dependencies = [ name = "coap" version = "0.1.0" dependencies = [ + "arrayvec", "coap-handler", "coap-handler-implementations", "coap-message", @@ -464,6 +465,7 @@ dependencies = [ "coap-numbers", "coap-request", "coap-request-implementations", + "coap-scroll-ring-server", "embassy-executor", "embassy-futures", "embassy-net", @@ -480,6 +482,7 @@ dependencies = [ "minicbor 0.23.0", "riot-rs", "riot-rs-boards", + "scroll-ring", "smoltcp", "static-alloc", ] @@ -592,6 +595,20 @@ dependencies = [ "coap-request", ] +[[package]] +name = "coap-scroll-ring-server" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54221724caa364079527fcd8be89bc401f25921f0927e6574c934a8ecb9778a0" +dependencies = [ + "coap-handler", + "coap-handler-implementations", + "coap-message", + "coap-message-utils", + "coap-numbers", + "scroll-ring", +] + [[package]] name = "codespan-reporting" version = "0.11.1" @@ -731,6 +748,12 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + [[package]] name = "crunchy" version = "0.2.2" @@ -3179,6 +3202,15 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +[[package]] +name = "ringbuf" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79abed428d1fd2a128201cec72c5f6938e2da607c6f3745f769fabea399d950a" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "ringbuffer" version = "0.1.0" @@ -3491,6 +3523,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "scroll-ring" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eda0dff67036ba3b032aa68eb47ceb9b53b6e848445f53d10bf93e033b17df2" +dependencies = [ + "ringbuf", + "try-lock", +] + [[package]] name = "sec1" version = "0.7.3" @@ -3937,6 +3979,12 @@ dependencies = [ "winnow 0.6.6", ] +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + [[package]] name = "trybuild" version = "1.0.91" diff --git a/examples/coap/Cargo.toml b/examples/coap/Cargo.toml index 993a0db67..6608fe642 100644 --- a/examples/coap/Cargo.toml +++ b/examples/coap/Cargo.toml @@ -44,6 +44,9 @@ liboscore-msgbackend = { git = "https://gitlab.com/oscore/liboscore/", features ], rev = "e7a4ecd037cbb9c7f085047fec5896f4bdc68d50" } coap-message-implementations = "0.1.1" static-alloc = "0.2.5" +arrayvec = { version = "0.7.4", default-features = false } +coap-scroll-ring-server = "0.2.0" +scroll-ring = "0.1.1" [features] default = ["proto-ipv4"] # shame diff --git a/examples/coap/src/main.rs b/examples/coap/src/main.rs index 5656e86f4..8aebc64f0 100644 --- a/examples/coap/src/main.rs +++ b/examples/coap/src/main.rs @@ -9,6 +9,8 @@ use embassy_net::udp::{PacketMetadata, UdpSocket}; // Moving work from https://github.com/embassy-rs/embassy/pull/2519 in here for the time being mod udp_nal; +// Might warrant a standalone crate at some point +mod oluru; mod seccontext; @@ -63,11 +65,22 @@ async fn run(mut sock: S) where S: embedded_nal_async::UnconnectedUdp, { - use coap_handler_implementations::HandlerBuilder; + use coap_handler_implementations::{HandlerBuilder, ReportingHandlerBuilder}; let log = None; - - let mut secpool: seccontext::SecContextPool = Default::default(); + let buffer = scroll_ring::Buffer::<512>::default(); + // FIXME: Why doesn't scroll_ring provide that? + struct Stdout<'a>(&'a scroll_ring::Buffer::<512>); + impl<'a> core::fmt::Write for Stdout<'a> { + fn write_str(&mut self, s: &str) -> Result<(), core::fmt::Error> { + self.0.write(s.as_bytes()); + Ok(()) + } + } + let mut stdout = Stdout(&buffer); + use core::fmt::Write; + writeln!(stdout, "We have our own stdout now."); + writeln!(stdout, "With rings and atomics."); use hexlit::hex; const R: &[u8] = &hex!("72cc4761dbd4c78f758931aa589d348d1ef874a7e303ede2f140dcf3e6aa4aac"); @@ -76,9 +89,11 @@ where R, ); - let mut handler = coap_message_demos::full_application_tree(log); + let mut handler = coap_message_demos::full_application_tree(log) + .at(&["stdout"], coap_scroll_ring_server::BufferHandler::new(&buffer)) + .with_wkc(); - let mut handler = seccontext::OscoreEdhocHandler::new(own_identity, handler); + let mut handler = seccontext::OscoreEdhocHandler::new(own_identity, handler, stdout); println!("Server is ready."); diff --git a/examples/coap/src/oluru.rs b/examples/coap/src/oluru.rs new file mode 100644 index 000000000..50b94dd7b --- /dev/null +++ b/examples/coap/src/oluru.rs @@ -0,0 +1,257 @@ +//! An owned heapless container with capacity N that maintains order both through properties of its +//! entries and by time of access -- a mix between a priority queue and an LRU cache. +//! +//! This was inspired by the [`uluru`](https://github.com/servo/uluru) crate. Adapting that crate +//! to support priority levels proved to be impractical -- and many properties have changed since. +//! +//! See the [`OrderedPool`] documentation for details. +//! +//! # Terminology +//! +//! The ordering imposed on the cache entries is determined by a priority (see [`PriorityLevel`]). +//! Throughout the documentation, "high" and "low" indicate priorities, whereas "small" and "large" +//! indicate numeric values (including of priorities, where "high" corresponds to "small" and "low" +//! to "large"). +#![deny(unsafe_code)] + +use arrayvec::ArrayVec; + +/// Required trait on [OrderedPool] entries that allows ordering. +/// +/// Priority levels follow the conventions common with schedulers: 0 is the highest priority, and +/// will only get evicted if the cache is full with other entries of the same priority. Larger +/// numeric values indicate increasingly lower priority. +pub trait PriorityLevel { + /// Calculate the priority of the instance + /// + /// An instance's priority level may change while being mutated; [OrderedPool] will account for + /// that. + /// + /// The level should not change due to global effects (or internal mutability, if shared access + /// is later implemented). If it does, the ordering of a OrderedPool containing it may become + /// arbitrary, even after the elemnt whose level changed has been removed. + fn level(&self) -> usize; +} + +/// An owned heapless container with capacity N that maintains order both through properties of its +/// entries and by time of access. +/// +/// Operations that are supposed to be fast are: +/// +/// * Finding an element by iteration +/// * Moving that element to the front of its relevant level +/// +/// There is no remove an item; instead, the `&mut T` of [`Self::lookup`] can be replaced with a +/// low-priority placeholder value. (In fact, future iterations may require that such a value +/// exists and is Default). +/// +/// # Usage +/// +/// ``` +/// use crate::oluru::{OrderedPool, PriorityLevel}; +/// +/// #[derive(Debug)] +/// struct MyValue { +/// id: u32, +/// name: Option<&'static str>, +/// } +/// +/// impl PriorityLevel for MyValue { +/// fn level(&self) -> usize { +/// if self.name.is_some() { +/// 0 +/// } else { +/// 1 +/// } +/// } +/// } +/// +/// // A cache with a capacity of three. +/// type MyCache = OrderedPool; +/// +/// // Create an empty cache, then insert some items. +/// let mut cache = MyCache::new(); +/// cache.insert(MyValue { id: 1, name: Some("Mercury") }); +/// cache.insert(MyValue { id: 2, name: Some("Venus") }); +/// cache.insert(MyValue { id: 3, name: None }); +/// +/// let item = cache.lookup(|x| x.id == 1, |x| format!("Found {}", x.name.unwrap_or("unnamed object"))); +/// assert_eq!(item.unwrap().as_str(), "Found Mercury"); +/// +/// // If the cache is full, inserting a new item evicts one item. +/// // +/// // While Venus (ID 2) was used least recently, it has higher priority than the no-name planet +/// // with index 3, so that gets evicted first instead. +/// let returned = cache.insert(MyValue { id: 4, name: Some("Mars") }); +/// assert!(returned.expect("Pool was full").is_some_and(|item| item.id == 3)) +/// ``` +/// +/// # Implementation +/// +/// Right now, this is implemented as a separate entry vector and an index vector, where the latter +/// is often rotated internally. Future changes may change this to only be a single list, using a +/// doubly linked list, and keeping head indices of each level (which is why the number of levels +/// `L` is part of the type). +/// +/// The value list following the style of a Vec means that popping elements from anywhere but the +/// tail is costly, which it should better not be; a slab allocator style would improve that. +/// +/// ## Terminology +/// +/// A "position" is a key to `.sorted`. An "index" is a key to `.entries` (and thus a value of +/// `.sorted`). +/// +/// ## Invariants +/// +/// Before and after any public function, these hold: +/// +/// * `.sorted` has the same length as `.entries` +/// * `.sorted` is a permutation of `.entries`' (and thus its) index space. Therefore, each of its +/// values is unique, and is an index into `.entries`. +/// * If `T::level` is constant, `self.sorted.iter().map(|i| self.entries[i].level())` is sorted. +#[derive(Debug)] +pub struct OrderedPool { + /// Elements without regard for ordering + pub entries: ArrayVec, + /// A sorted list of indices into entries: high priority first, ties broken by recentness + pub sorted: ArrayVec, +} + +impl OrderedPool { + /// Create an empty cache. + pub const fn new() -> Self { + assert!(N < u16::MAX as usize, "Capacity overflow"); + // Clipping levels to u16 because they may be stored if the implementation changes. + assert!(L < u16::MAX as usize, "Level overflow"); + OrderedPool { + entries: ArrayVec::new_const(), + sorted: ArrayVec::new_const(), + } + } + + /// Iterate over items from highest priority to lowest, most recently used first. + /// + /// If the function `f_test` (which receives a shared reference to the entry) returns + /// `Some(r)`, `f_use` is called with a *mutable* reference to the item as well as the first + /// function's result. That item is regarded as "used" and thus shifted to the front, and the + /// second function's return value is passed on. + /// + /// This differs from `uluru` in two ways: + /// * There is no `find()` method: As the level may change through mutation, we can not hand + /// out a `&mut T` unless we can sure to process any level changes when it is returned. (A + /// linear type drop guard wrapper may afford that, but is not available in Rust at the time + /// of writing) + /// * The callback is split in a test part and a use part, which ensures that elements that are + /// not looked up do not get mutated; only . + pub fn lookup(&mut self, mut f_test: Ftest, f_use: Fuse) -> Option + where + Ftest: FnMut(&T) -> bool, + Fuse: FnOnce(&mut T) -> R, + { + for (position, &index) in self.sorted.iter().enumerate() { + if f_test(&self.entries[usize::from(index)]) { + let r = f_use(&mut self.entries[usize::from(index)]); + self.touch(position); + return Some(r); + } + } + None + } + + /// Insert an element. + /// + /// If the new element's priority is lower than the lowest in the queue, it is returned as an + /// Err. Otherwise, the element is inserted, and any dropped lower priority element is + /// returned in the Ok value. + pub fn insert(&mut self, new: T) -> Result, T> { + let new_index = self.entries.len(); + if new_index < N { + self.entries.push(new); + self.sorted.push(new_index.try_into().expect("Range is checked at construction time")); + self.touch(new_index); + Ok(None) + } else { + let last_slot = &mut self.entries[usize::from(*self.sorted.last().expect("Full array is not empty"))]; + let last_level = last_slot.level(); + let new_level = new.level(); + debug_assert!(new_level < L, "Level exceeds limit L={L} in type"); + if new_level <= last_level { + let last = core::mem::replace(last_slot, new); + self.touch(N - 1); + Ok(Some(last)) + } else { + Err(new) + } + } + } + + // FIXME: It is not fully clear when we would use insert and force_insert -- we'll need some + // kind of force_insert to get out of situations where all authenticated connections are really + // timing out. But at the same time, if we're being bombarded with bad requests, we should + // retain the ability to randomly keep some incoming connection for longer, so that a message 3 + // that comes through can establish the connection. In the end, we may need some more flexible + // policy than just levels and LRU. + // + // Unless we want to keep track of connections somewhere in parallel, timeouts may also involve + // some function called on all present items to mark them as "not used in a long time", + // downgrading their priority. + + /// Insert an element without regard for its level + /// + /// The element is inserted unconditionally, and the least priority element is returned by + /// value. + pub fn force_insert(&mut self, new: T) -> Option { + let new_index = self.entries.len(); + if new_index < N { + self.entries.push(new); + self.sorted.push(new_index.try_into().expect("Range is checked at construction time")); + self.touch(new_index); + None + } else { + let last_slot = &mut self.entries[usize::from(*self.sorted.last().expect("Full array is not empty"))]; + let last = core::mem::replace(last_slot, new); + self.touch(N - 1); + Some(last) + } + } + + fn touch(&mut self, position: usize) { + let level = self.entries[usize::from(self.sorted[position])].level(); + debug_assert!(level < L, "Level exceeds limit L={L} in type"); + let mut new_position = position; + // Common case: level stayed the same, but we move to front; also applicable when numeric + // level decrased + while new_position + .checked_sub(1) + .is_some_and(|n| self.entries[usize::from(self.sorted[n])].level() >= level) + { + new_position -= 1; + } + if new_position != position { + // Push our entry out right and in left in the front + self.sorted[new_position..=position].rotate_right(1); + } else { + // Level may instead have increased + while new_position < self.sorted.len() - 1 + && self.entries[usize::from(self.sorted[new_position + 1])].level() < level + { + new_position += 1; + } + // Push our entry out left and in right in the rear + if new_position != position { + self.sorted[position..=new_position].rotate_left(1); + } + } + } + + /// Iterate over all items in no particular order + pub fn iter(&self) -> impl Iterator { + self.entries.iter() + } +} + +impl core::default::Default for OrderedPool { + fn default() -> Self { + Self::new() + } +} diff --git a/examples/coap/src/seccontext.rs b/examples/coap/src/seccontext.rs index 23216988e..e716e9532 100644 --- a/examples/coap/src/seccontext.rs +++ b/examples/coap/src/seccontext.rs @@ -3,7 +3,7 @@ use coap_message::{ MutableWritableMessage, ReadableMessage, }; use coap_message_utils::{Error as CoAPError, OptionsExt as _}; -use core::borrow::Borrow; +use core::fmt::Write; extern crate alloc; use static_alloc::Bump; @@ -19,34 +19,7 @@ const MAX_CONTEXTS: usize = 4; type LakersCrypto = lakers_crypto_rustcrypto::Crypto; /// A pool of security contexts sharable by several users inside a thread. -/// -/// Access through the inner RefCell always happens with panicking errors, because all accessors -/// (in this module, given it's a private member) promise to not call code would cause double -/// locking. (And it's !Sync, so accessors will not be preempted). -#[derive(Default)] -pub struct SecContextPool(core::cell::RefCell<[SecContextState; MAX_CONTEXTS]>); - -impl SecContextPool { - /// Place a non-empty state in a slot and return its index, or return a 5.03 Service - /// Unavailable error - fn place_in_empty_slot(&self, new: SecContextState) -> Result { - for (index, place) in self.0.borrow_mut().iter_mut().enumerate() { - if matches!(place, SecContextState::Empty) { - *place = new; - return Ok(index); - } - } - for (index, place) in self.0.borrow_mut().iter_mut().enumerate() { - // As a possible improvement, define a "keep value" in 0..n, and find the slot withthe - // minimum keep value. - if place.is_gc_eligible() { - *place = new; - return Ok(index); - } - } - Err(CoAPError::service_unavailable()) - } -} +pub type SecContextPool = crate::oluru::OrderedPool; /// An own identifier for a security context /// @@ -121,7 +94,12 @@ enum SecContextState { // that once the states are ready and we see which ones are the big ones. Possible outcomes are // to just do it, to store the message in the handler's RequestData, or to have one or a few // slots in parallel to this in the SecContextPool. - EdhocResponderProcessedM1(lakers::EdhocResponderProcessedM1<'static, LakersCrypto>), + EdhocResponderProcessedM1 { + responder: lakers::EdhocResponderProcessedM1<'static, LakersCrypto>, + // May be removed if lakers keeps access to those around if they are set at this point at + // all + c_r: COwn, + }, // EdhocResponderSentM2 { responder: lakers::EdhocResponderWaitM3, @@ -132,30 +110,52 @@ enum SecContextState { Oscore(liboscore::PrimitiveContext), } -impl SecContextState { - fn corresponding_cown(&self) -> Option { +impl core::fmt::Display for SecContextState { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + use SecContextState::*; match self { - SecContextState::Empty => None, - SecContextState::EdhocResponderProcessedM1(_) => None, // yet - SecContextState::EdhocResponderSentM2 { c_r, .. } => Some(*c_r), - SecContextState::Oscore(ctx) => COwn::from_kid(ctx.recipient_id()), + Empty => f.write_str("empty"), + EdhocResponderProcessedM1 { c_r, .. } => write!(f, "ProcessedM1, C_R = {:?}", c_r), + EdhocResponderSentM2 { c_r, .. } => write!(f, "SentM3, C_R = {:?}", c_r), + Oscore(ctx) => write!(f, "OSCORE, C_R = {:?}", COwn::from_kid(ctx.recipient_id()).unwrap()), } } +} + +const LEVEL_ADMIN: usize = 0; +const LEVEL_AUTHENTICATED: usize = 1; +const LEVEL_ONGOING: usize = 2; +const LEVEL_EMTPY: usize = 3; +const LEVEL_COUNT: usize = 4; - fn is_gc_eligible(&self) -> bool { +impl crate::oluru::PriorityLevel for SecContextState { + fn level(&self) -> usize { match self { - SecContextState::Empty => true, // but won't come to it - SecContextState::EdhocResponderProcessedM1(_) => { + SecContextState::Empty => LEVEL_EMTPY, + SecContextState::EdhocResponderProcessedM1 { .. } => { // If this is ever tested, means we're outbound message limited, so let's try to // get one through rather than pointlessly sending errors - false + LEVEL_ONGOING } SecContextState::EdhocResponderSentM2 { .. } => { // So far, the peer didn't prove they have anything other than entropy (maybe not // even that) - true + LEVEL_ONGOING } - SecContextState::Oscore(_) => false, + SecContextState::Oscore(_) => LEVEL_AUTHENTICATED, + } + } +} + +impl SecContextState { + fn corresponding_cown(&self) -> Option { + match self { + SecContextState::Empty => None, + // We're keeping a c_r in there assigned early so that we can find the context when + // building the response; nothing in the responder is tied to c_r yet. + SecContextState::EdhocResponderProcessedM1 { c_r, .. } => Some(*c_r), + SecContextState::EdhocResponderSentM2 { c_r, .. } => Some(*c_r), + SecContextState::Oscore(ctx) => COwn::from_kid(ctx.recipient_id()), } } } @@ -165,7 +165,10 @@ impl SecContextState { /// While the EDHOC part could be implemented as a handler that is to be added into the tree, the /// OSCORE part needs to wrap the inner handler anyway, and EDHOC and OSCORE are intertwined rather /// strongly in processing the EDHOC option. -pub struct OscoreEdhocHandler<'a, H: coap_handler::Handler> { +pub struct OscoreEdhocHandler<'a, H: coap_handler::Handler, L: Write> { + // It'd be tempted to have sharing among multiple handlers for multiple CoAP stacks, but + // locks for such sharing could still be acquired in a factory (at which point it may make + // sense to make this a &mut). pool: SecContextPool, // FIXME: That 'static is going to bite us -- but EdhocResponderProcessedM1 holds a reference // to it -- see SecContextState::EdhocResponderProcessedM1 @@ -176,25 +179,28 @@ pub struct OscoreEdhocHandler<'a, H: coap_handler::Handler> { // or a single item that has two AsMut accessors for separate encrypted and // unencrypted tree. inner: H, + + log: L, } -impl<'a, H: coap_handler::Handler> OscoreEdhocHandler<'a, H> { - pub fn new(own_identity: (&'a lakers::CredentialRPK, &'static [u8]), inner: H) -> Self { +impl<'a, H: coap_handler::Handler, L: Write> OscoreEdhocHandler<'a, H, L> { + pub fn new(own_identity: (&'a lakers::CredentialRPK, &'static [u8]), inner: H, log: L) -> Self { Self { pool: Default::default(), own_identity, inner, + log, } } } pub enum EdhocResponse { // Taking a small state here: We already have a slot in the pool, storing the big data there - OkSend2(usize), + OkSend2(COwn), // Could have a state Message3Processed -- but do we really want to implement that? (like, just // use the EDHOC option) OscoreRequest { - slot: usize, + kid: COwn, correlation: liboscore::raw::oscore_requestid_t, extracted: I, }, @@ -252,7 +258,7 @@ impl RenderableOnMinimal for OrI } } -impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler<'a, H> { +impl<'a, H: coap_handler::Handler, L: Write> coap_handler::Handler for OscoreEdhocHandler<'a, H, L> { type RequestData = OrInner>, H::RequestData>; @@ -361,26 +367,56 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< return Err(Own(CoAPError::bad_request())); } - Ok(Own(EdhocResponse::OkSend2(self.pool.place_in_empty_slot( - SecContextState::EdhocResponderProcessedM1(responder), - )?))) + // Let's pick one now already: this allows us to use the identifier in our + // request data. + let c_r = COwn::not_in_iter( + self.pool + .iter() + .filter_map(|entry| entry.corresponding_cown()), + ); + + writeln!(self.log, "Entries in pool:"); + for (i, e) in self.pool.entries.iter().enumerate() { + writeln!(self.log, "{i}. {e}"); + } + write!(self.log, "Sequence: "); + for index in self.pool.sorted.iter() { + write!(self.log, "{index},"); + } + writeln!(self.log, ""); + let evicted = + self.pool + .force_insert(SecContextState::EdhocResponderProcessedM1 { + c_r, + responder, + }); + if let Some(evicted) = evicted { + writeln!(self.log, "To insert new EDHOC, evicted {}", evicted); + } else { + writeln!(self.log, "To insert new EDHOC, evicted none"); + } + + Ok(Own(EdhocResponse::OkSend2(c_r))) } else { // for the time being we'll only take the EDHOC option Err(Own(CoAPError::bad_request())) } } Edhoc { kid } | Oscore { kid } => { - use crate::println; let payload = request.payload(); // This whole loop-and-tree could become a single take_responder_wait3 method? - let cown = COwn::from_kid(&[kid]); - let mut pool_lock = self.pool.0.borrow_mut(); - let (slot, matched) = pool_lock - .iter_mut() - .enumerate() - .filter(|(slot, c)| c.corresponding_cown() == cown) - .next() + let kid = COwn::from_kid(&[kid]).unwrap(); + // If we don't make progress, we're dropping it altogether. Unless we use the + // responder we might legally continue (because we didn't send data to EDHOC), but + // once we've received something that (as we now know) looks like a message 3 and + // isn't processable, it's unlikely that another one would come up and be. + let mut taken = self + .pool + .lookup( + |c| c.corresponding_cown() == Some(kid), + |matched| core::mem::replace(matched, Default::default()), + ) // following RFC8613 Section 8.2 item 2.2 // FIXME unauthorized (unreleased in coap-message-utils) .ok_or_else(CoAPError::bad_request)?; @@ -396,13 +432,8 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< .map_err(|_| Own(CoAPError::bad_request()))?; let cutoff = decoder.position(); - // If we don't make progress, we're dropping it altogether. Unless we use the - // responder we might legally continue (because we didn't send data to EDHOC), but - // once we've received something that (as we now know) looks like a message 3 and - // isn't processable, it's unlikely that another one would come up and be. - let mut taken = core::mem::replace(matched, Default::default()); - if let SecContextState::EdhocResponderSentM2 { responder, c_r } = taken { + debug_assert_eq!(c_r, kid, "State was looked up by KID"); let msg_3 = lakers::EdhocMessageBuffer::new_from_slice(&payload[..cutoff]) .map_err(|e| Own(too_small(e)))?; @@ -416,9 +447,9 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< // FIXME: Right now this can only do credential-by-value if id_cred_i.reference_only() { - println!("Got reference only, need to upgrade"); + writeln!(self.log, "Got reference only, need to upgrade"); } else { - println!("Got full credential, need to evaluate") + writeln!(self.log, "Got full credential, need to evaluate"); } use hexlit::hex; @@ -435,11 +466,11 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< let oscore_salt = responder.edhoc_exporter(1u8, &[], 8); // label is 1 let oscore_secret = &oscore_secret[..16]; let oscore_salt = &oscore_salt[..8]; - println!("OSCORE secret: {:?}...", &oscore_secret[..5]); - println!("OSCORE salt: {:?}", &oscore_salt); + writeln!(self.log, "OSCORE secret: {:?}...", &oscore_secret[..5]); + writeln!(self.log, "OSCORE salt: {:?}", &oscore_salt); let sender_id = 0x08; // FIXME: lakers can't export that? - let recipient_id = kid; + let recipient_id = kid.0; // FIXME probe cipher suite let hkdf = liboscore::HkdfAlg::from_number(5).unwrap(); @@ -461,13 +492,12 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< let context = liboscore::PrimitiveContext::new_from_fresh_material(immutables); - *matched = SecContextState::Oscore(context); + taken = SecContextState::Oscore(context); } else { // Return the state. Best bet is that it was already advanced to an OSCORE // state, and the peer sent message 3 with multiple concurrent in-flight // messages. We're ignoring the EDHOC value and continue with OSCORE // processing. - *matched = taken; } cutoff @@ -475,7 +505,7 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< 0 }; - let SecContextState::Oscore(oscore_context) = matched else { + let SecContextState::Oscore(mut oscore_context) = taken else { // FIXME: How'd we even get there? return Err(Own(CoAPError::bad_request())); }; @@ -513,19 +543,28 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< .set_payload(&payload[front_trim_payload..]) .unwrap(); - let Ok((correlation, extracted)) = liboscore::unprotect_request( + let decrypted = liboscore::unprotect_request( allocated_message, oscore_option, - oscore_context, + &mut oscore_context, |request| self.inner.extract_request_data(request), - ) else { - // FIXME is that the righ tcode? - println!("Decryption failure"); + ); + + // With any luck, this never moves out. + // + // Storing it even on decryption failure to avoid DoS from the first message (but + // FIXME, should we increment an error count and lower priority?) + let evicted = self.pool.force_insert(SecContextState::Oscore(oscore_context)); + debug_assert!(matches!(evicted, Some(SecContextState::Empty) | None), "A Default (Empty) was placed when an item was taken, which should have the lowest priority"); + + let Ok((correlation, extracted)) = decrypted else { + // FIXME is that the right code? + writeln!(self.log, "Decryption failure"); return Err(Own(CoAPError::unauthorized())); }; Ok(Own(EdhocResponse::OscoreRequest { - slot, + kid, correlation, extracted, })) @@ -546,40 +585,59 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< use OrInner::{Inner, Own}; Ok(match req { - Own(EdhocResponse::OkSend2(slot)) => { + Own(EdhocResponse::OkSend2(c_r)) => { // FIXME: Why does the From not do the map_err? response.set_code( M::Code::new(coap_numbers::code::CHANGED).map_err(|x| Own(x.into()))?, ); - let pool = &mut self.pool.0.borrow_mut(); - let SecContextState::EdhocResponderProcessedM1(responder) = - core::mem::replace(&mut pool[slot], SecContextState::Empty) - else { + let message_2 = self.pool.lookup( + |c| c.corresponding_cown() == Some(c_r), + |matched| { + // temporary default will not live long (and may be only constructed if + // prepare_message_2 fails) + let taken = core::mem::replace(matched, Default::default()); + let SecContextState::EdhocResponderProcessedM1 { + c_r: matched_c_r, + responder: taken, + } = taken + else { + todo!(); + }; + debug_assert_eq!( + matched_c_r, c_r, + "The first lookup function ensured this property" + ); + let (responder, message_2) = taken + // We're sending our ID by reference: we have a CCS and don't expect anyone to + // run EDHOC with us who can not verify who we are (and from the CCS there is + // no better way). Also, conveniently, this covers our privacy well. + // (Sending ByValue would still work) + .prepare_message_2( + lakers::CredentialTransfer::ByReference, + Some(c_r.0), + &None, + ) + // FIXME error handling + .unwrap(); + *matched = SecContextState::EdhocResponderSentM2 { responder, c_r }; + message_2 + }, + ); + + let Some(message_2) = message_2 else { // FIXME render late error (it'd help if CoAPError also offered a type that unions it // with an arbitrary other error). As it is, depending on the CoAP stack, there may be // DoS if a peer can send many requests before the server starts rendering responses. panic!("State vanished before respone was built."); }; - // We have a lock, let's pick one now - let c_r = - COwn::not_in_iter(pool.iter().filter_map(|entry| entry.corresponding_cown())); - - let (responder, message_2) = responder - // We're sending our ID by reference: we have a CCS and don't expect anyone to - // run EDHOC with us who can not verify who we are (and from the CCS there is - // no better way). Also, conveniently, this covers our privacy well. - // (Sending ByValue would still work) - .prepare_message_2(lakers::CredentialTransfer::ByReference, Some(c_r.0), &None) - .unwrap(); - pool[slot] = SecContextState::EdhocResponderSentM2 { responder, c_r }; response .set_payload(message_2.as_slice()) .map_err(|x| Own(x.into()))?; } Own(EdhocResponse::OscoreRequest { - slot, + kid, mut correlation, extracted, }) => { @@ -587,86 +645,86 @@ impl<'a, H: coap_handler::Handler> coap_handler::Handler for OscoreEdhocHandler< M::Code::new(coap_numbers::code::CHANGED).map_err(|x| Own(x.into()))?, ); - let pool = &mut self.pool.0.borrow_mut(); - let SecContextState::Oscore(ref mut oscore_context) = &mut pool[slot] else { - // FIXME render late error (it'd help if CoAPError also offered a type that unions it - // with an arbitrary other error). As it is, depending on the CoAP stack, there may be - // DoS if a peer can send many requests before the server starts rendering responses. - panic!("State vanished before respone was built."); - }; - - // Almost-but-not: This'd require 'static on Message which we can't have b/c the - // type may be shortlived for good reason. - /* - let response: &mut dyn core::any::Any = response; - let response: &mut coap_message_implementations::inmemory_write::Message = response.downcast_mut() - .expect("libOSCORE currently only works with CoAP stacks whose response messages are inmemory_write"); - */ - // FIXME! - let response: &mut M = response; - let response: &mut coap_message_implementations::inmemory_write::Message = - unsafe { core::mem::transmute(response) }; - - response.set_code(coap_numbers::code::CHANGED); - - use crate::println; - - if liboscore::protect_response( - response, - // SECURITY BIG FIXME: How do we make sure that our correlation is really for - // what we find in the pool and not for what wound up there by the time we send - // the response? (Can't happen with the current stack, but conceptually there - // should be a tie; carry the OSCORE context in an owned way?). - oscore_context, - &mut correlation, - |response| match extracted { - Ok(extracted) => match self.inner.build_response(response, extracted) { - Ok(()) => { - println!("All fine"); - }, - // One attempt to render rendering errors - // FIXME rewind message - Err(e) => match e.render(response) { - Ok(()) => { - println!("Rendering error to successful extraction shown"); - }, - Err(_) => { - println!("Rendering error to successful extraction failed"); - // FIXME rewind message - response.set_code(coap_numbers::code::INTERNAL_SERVER_ERROR); - } - }, - }, - Err(inner_request_error) => { - match inner_request_error.render(response) { - Ok(()) => { - println!("Extraction failed, inner error rendered successfully"); - }, - Err(e) => { - // Two attempts to render extraction errors + self.pool + .lookup(|c| c.corresponding_cown() == Some(kid), |matched| { + let SecContextState::Oscore(ref mut oscore_context) = matched else { + // FIXME render late error (it'd help if CoAPError also offered a type that unions it + // with an arbitrary other error). As it is, depending on the CoAP stack, there may be + // DoS if a peer can send many requests before the server starts rendering responses. + panic!("State vanished before respone was built."); + }; + + // Almost-but-not: This'd require 'static on Message which we can't have b/c the + // type may be shortlived for good reason. + /* + let response: &mut dyn core::any::Any = response; + let response: &mut coap_message_implementations::inmemory_write::Message = response.downcast_mut() + .expect("libOSCORE currently only works with CoAP stacks whose response messages are inmemory_write"); + */ + // FIXME! + let response: &mut M = response; + let response: &mut coap_message_implementations::inmemory_write::Message = + unsafe { core::mem::transmute(response) }; + + response.set_code(coap_numbers::code::CHANGED); + + if liboscore::protect_response( + response, + // SECURITY BIG FIXME: How do we make sure that our correlation is really for + // what we find in the pool and not for what wound up there by the time we send + // the response? (Can't happen with the current stack, but conceptually there + // should be a tie; carry the OSCORE context in an owned way?). + oscore_context, + &mut correlation, + |response| match extracted { + Ok(extracted) => match self.inner.build_response(response, extracted) { + Ok(()) => { + // All fine, response was built + }, + // One attempt to render rendering errors // FIXME rewind message - match e.render(response) { + Err(e) => match e.render(response) { Ok(()) => { - println!("Extraction failed, inner error rendered through fallback"); + writeln!(self.log, "Rendering error to successful extraction shown"); }, Err(_) => { - println!("Extraction failed, inner error rendering failed"); + writeln!(self.log, "Rendering error to successful extraction failed"); + // FIXME rewind message + response.set_code(coap_numbers::code::INTERNAL_SERVER_ERROR); + } + }, + }, + Err(inner_request_error) => { + match inner_request_error.render(response) { + Ok(()) => { + writeln!(self.log, "Extraction failed, inner error rendered successfully"); + }, + Err(e) => { + // Two attempts to render extraction errors // FIXME rewind message - response.set_code( - coap_numbers::code::INTERNAL_SERVER_ERROR, - ); + match e.render(response) { + Ok(()) => { + writeln!(self.log, "Extraction failed, inner error rendered through fallback"); + }, + Err(_) => { + writeln!(self.log, "Extraction failed, inner error rendering failed"); + // FIXME rewind message + response.set_code( + coap_numbers::code::INTERNAL_SERVER_ERROR, + ); + } + } } } } - } + }, + ) + .is_err() + { + writeln!(self.log, "Oups, responding with weird state"); + // todo!("Thanks to the protect API we've lost access to our response"); } - }, - ) - .is_err() - { - println!("Oups, responding with weird state"); - // todo!("Thanks to the protect API we've lost access to our response"); - } + }); } Inner(i) => self.inner.build_response(response, i).map_err(Inner)?, })