diff --git a/logging/Cargo.toml b/logging/Cargo.toml index cc8050c7..a5712960 100644 --- a/logging/Cargo.toml +++ b/logging/Cargo.toml @@ -12,6 +12,7 @@ readme = "README.md" [dependencies] critical-section = "1" bbqueue = "0.5" +static_cell = "2.1.0" [dependencies.defmt] optional = true diff --git a/logging/src/defmt.rs b/logging/src/defmt.rs index c4e6e7f0..de4435b1 100644 --- a/logging/src/defmt.rs +++ b/logging/src/defmt.rs @@ -17,88 +17,119 @@ use imxrt_hal::{dma::channel::Channel, lpuart::Lpuart}; mod frontend { use super::Producer; use core::{ + cell::{Cell, RefCell}, mem::MaybeUninit, sync::atomic::{AtomicBool, Ordering}, }; use critical_section::RestoreState; - /// The defmt global_logger is a static singleton. This differs - /// from the log singleton, which is dynamically set at runtime. - /// - /// Since the singleton is static, it could be invoked before - /// PRODUCER has been initialized. This INIT flag guards against - /// that case. - static INIT: AtomicBool = AtomicBool::new(false); - /// The producer holds encoded defmt frames. - /// - /// Assume initialized when INIT is true. - static mut PRODUCER: MaybeUninit = MaybeUninit::uninit(); - - /// The restore state for the critical section. - static mut RESTORE_STATE: RestoreState = RestoreState::invalid(); - - pub fn init(producer: Producer) { - // Safety: module inspection shows that this function is only called once, - // so we're not changing the pointed-to value while another accessor is - // looking at this memory. - unsafe { PRODUCER.write(producer) }; - INIT.store(true, Ordering::Release); + pub(super) struct Frontend { + init: AtomicBool, + producer: RefCell>, + restore_state: Cell, + encoder: RefCell, } - /// Try to acquire the logger, skipping everything if the logger isn't initialized. - pub fn acquire(func: impl FnOnce(&mut Producer, &mut defmt::Encoder) -> R) -> Option { - INIT.load(Ordering::Acquire).then(|| { - // Safety: we're relying on the defmt::Logger user to meet some of these - // critical section guarantees. We're also relying on the firmware developer - // to either - // - // - build separate binaries for multi-core systems - // - select a proper critical_section implementation for those multi-core systems - // that needed to share the logging queue. - unsafe { RESTORE_STATE = critical_section::acquire() }; - + // Safety: The frontend uses internal critical sections to prevent + // concurrent access. The frontend uses an initializatino flag to + // track uninitialized memory. + unsafe impl Sync for Frontend {} + + impl Frontend { + const fn new() -> Self { + Self { + init: AtomicBool::new(false), + producer: RefCell::new(MaybeUninit::uninit()), + restore_state: Cell::new(RestoreState::invalid()), + encoder: RefCell::new(defmt::Encoder::new()), + } + } + + pub(super) fn init(&self, producer: Producer) { + // Safety: module inspection shows that this function is only called once, + // so we're not changing the pointed-to value while another accessor is + // looking at this memory. + *self.producer.borrow_mut() = MaybeUninit::new(producer); + self.init.store(true, Ordering::Release); + } + + /// # Safety + /// + /// Caller must check that init is true, and that we're within a critical + /// section. + unsafe fn with_producer_encoder( + &self, + func: impl FnOnce(&mut Producer, &mut defmt::Encoder) -> R, + ) -> R { // Safety: - // - the memory is initialized because INIT is true. + // - the memory is initialized because init is true. // - we have exclusive access due to acquire. - func(unsafe { PRODUCER.assume_init_mut() }, encoder()) - }) - } - - /// Try to release the logger, skipping everything if the loger isn't initialized. - pub fn release(func: impl FnOnce(&mut Producer, &mut defmt::Encoder) -> R) -> Option { - INIT.load(Ordering::Acquire).then(|| { - // Safety: - // - the memory is initialized. - // - the defmt user can only call this after calling - // acquire, so this is the only mutable access. - let result = func(unsafe { PRODUCER.assume_init_mut() }, encoder()); - - // Safety: we're relying on the defmt::Logger user to meet some critical - // section guarantees. See the related safety comment on acquire. - unsafe { critical_section::release(RESTORE_STATE) }; - result - }) + unsafe { + let mut producer = self.producer.borrow_mut(); + let producer = producer.assume_init_mut(); + + let mut encoder = self.encoder.borrow_mut(); + func(producer, &mut encoder) + } + } + + /// Try to acquire the logger, skipping everything if the logger isn't initialized. + pub fn acquire( + &self, + func: impl FnOnce(&mut Producer, &mut defmt::Encoder) -> R, + ) -> Option { + self.init.load(Ordering::Acquire).then(|| { + // Safety: we're relying on the defmt::Logger user to meet some of these + // critical section guarantees. We're also relying on the firmware developer + // to either + // + // - build separate binaries for multi-core systems + // - select a proper critical_section implementation for those multi-core systems + // that needed to share the logging queue. + unsafe { self.restore_state.set(critical_section::acquire()) }; + + // Safety: Acquired critical section, and init is true. + unsafe { self.with_producer_encoder(func) } + }) + } + + /// Try to release the logger, skipping everything if the loger isn't initialized. + pub fn release( + &self, + func: impl FnOnce(&mut Producer, &mut defmt::Encoder) -> R, + ) -> Option { + self.init.load(Ordering::Acquire).then(|| { + // Safety: + // - init is true. + // - if we're releasing the critical section, we must + // already have acquired the critical section. + let result = unsafe { self.with_producer_encoder(func) }; + + // Safety: we're relying on the defmt::Logger user to meet some critical + // section guarantees. See the related safety comment on acquire. + unsafe { + critical_section::release(self.restore_state.replace(RestoreState::invalid())) + }; + + result + }) + } + + pub fn write( + &self, + func: impl FnOnce(&mut Producer, &mut defmt::Encoder) -> R, + ) -> Option { + self.init.load(Ordering::Acquire).then(|| { + // Safety: + // - the memory is initialized because INIT is true. + // - the defmt user can only call this after calling + // acquire, so there's only one mutable access. + unsafe { self.with_producer_encoder(func) } + }) + } } - pub fn write(func: impl FnOnce(&mut Producer, &mut defmt::Encoder) -> R) -> Option { - INIT.load(Ordering::Acquire).then(|| { - // Safety: - // - the memory is initialized because INIT is true. - // - the defmt user can only call this after calling - // acquire, so there's only one mutable access. - func(unsafe { PRODUCER.assume_init_mut() }, encoder()) - }) - } - - /// Access the defmt log frame encoder. - fn encoder() -> &'static mut defmt::Encoder { - static mut ENCODER: defmt::Encoder = defmt::Encoder::new(); - // Safety: memory is statically initialized. Inspection of this - // module shows that this is always accessed within the acquire- - // release critical section, so there's only one access to this - // mutable data. - unsafe { &mut *core::ptr::addr_of_mut!(ENCODER) } - } + pub(super) static FRONTEND: Frontend = Frontend::new(); } #[defmt::global_logger] @@ -108,7 +139,7 @@ struct Logger; // defmt::Logger safety contract. unsafe impl defmt::Logger for Logger { fn acquire() { - frontend::acquire(|producer, encoder| { + frontend::FRONTEND.acquire(|producer, encoder| { encoder.start_frame(|buffer| { let _ = crate::try_write_producer(buffer, producer); }); @@ -123,7 +154,7 @@ unsafe impl defmt::Logger for Logger { // make this assumption safe. unsafe fn release() { - frontend::release(|producer, encoder| { + frontend::FRONTEND.release(|producer, encoder| { encoder.end_frame(|buffer| { let _ = crate::try_write_producer(buffer, producer); }); @@ -131,7 +162,7 @@ unsafe impl defmt::Logger for Logger { } unsafe fn write(bytes: &[u8]) { - frontend::write(|producer, encoder| { + frontend::FRONTEND.write(|producer, encoder| { encoder.write(bytes, |buffer| { let _ = crate::try_write_producer(buffer, producer); }); @@ -158,10 +189,9 @@ pub fn usb_with_config( }; critical_section::with(|_| { - frontend::init(producer); - // Safety: BUFFER.try_split() guarantees that this is only called once. - unsafe { crate::usbd::init(peripherals, interrupts, consumer, backend_config) }; - Ok(crate::Poller::new(crate::usbd::VTABLE)) + frontend::FRONTEND.init(producer); + let backend = crate::usbd::init(peripherals, interrupts, consumer, backend_config); + Ok(crate::Poller::new(backend)) }) } @@ -196,10 +226,8 @@ pub fn lpuart( }; critical_section::with(|_| { - frontend::init(producer); - // Safety: "called once" requirement met by the buffer split, above. - // Only the first successful split flows into this call. - unsafe { crate::lpuart::init(lpuart, dma_channel, consumer, interrupts) }; - Ok(crate::Poller::new(crate::lpuart::VTABLE)) + frontend::FRONTEND.init(producer); + let backend = crate::lpuart::init(lpuart, dma_channel, consumer, interrupts); + Ok(crate::Poller::new(backend)) }) } diff --git a/logging/src/lib.rs b/logging/src/lib.rs index 06f3a3f7..91aa1328 100644 --- a/logging/src/lib.rs +++ b/logging/src/lib.rs @@ -357,8 +357,7 @@ type Consumer = bbqueue::Consumer<'static, { crate::BUFFER_SIZE }>; /// Since it manages static, mutable state, there can only be one instance /// of a `Poller` in any program. pub struct Poller { - _marker: core::marker::PhantomData<*mut ()>, - vtable: PollerVTable, + inner: Inner, } // Safety: it's OK to move this across execution contexts. @@ -367,10 +366,9 @@ pub struct Poller { unsafe impl Send for Poller {} impl Poller { - fn new(vtable: PollerVTable) -> Self { + fn new>(backend: B) -> Self { Poller { - _marker: core::marker::PhantomData, - vtable, + inner: backend.into(), } } @@ -382,15 +380,34 @@ impl Poller { /// in each transfer. #[inline] pub fn poll(&mut self) { - // Safety: poll() implementations can only be called from one execution - // context, to completion. Taking a &mut receiver ensures that this call - // isn't happening concurrently with itself. - unsafe { (self.vtable.poll)() }; + self.inner.poll(); } } -struct PollerVTable { - poll: unsafe fn(), +enum Inner { + Lpuart(&'static mut lpuart::Backend), + Usbd(&'static mut usbd::Backend), +} + +impl From<&'static mut lpuart::Backend> for Inner { + fn from(backend: &'static mut lpuart::Backend) -> Self { + Inner::Lpuart(backend) + } +} + +impl From<&'static mut usbd::Backend> for Inner { + fn from(backend: &'static mut usbd::Backend) -> Self { + Inner::Usbd(backend) + } +} + +impl Inner { + fn poll(&mut self) { + match self { + Self::Lpuart(backend) => backend.poll(), + Self::Usbd(backend) => backend.poll(), + } + } } #[cfg(test)] diff --git a/logging/src/log.rs b/logging/src/log.rs index c069ffae..21ca9dfb 100644 --- a/logging/src/log.rs +++ b/logging/src/log.rs @@ -96,15 +96,12 @@ pub fn usbd_with_config( Err(_) => return Err(crate::AlreadySetError::new(peripherals)), }; - // Safety: both can only be called once. We use try_split - // (above) to meet that requirement. If that method is called - // more than once, subsequent calls are an error. - critical_section::with(|_| unsafe { + critical_section::with(|_| { if frontend::init(producer, frontend_config).is_err() { return Err(crate::AlreadySetError::new(peripherals)); } - crate::usbd::init(peripherals, interrupts, consumer, backend_config); - Ok(Poller::new(crate::usbd::VTABLE)) + let backend = crate::usbd::init(peripherals, interrupts, consumer, backend_config); + Ok(Poller::new(backend)) }) } @@ -140,14 +137,12 @@ pub fn lpuart_with_config( Err(_) => return Err(crate::AlreadySetError::new((lpuart, dma_channel))), }; - // Safety: all of this can only happen once. We use try_split - // to meet that requirement. - critical_section::with(|_| unsafe { + critical_section::with(|_| { if frontend::init(producer, frontend_config).is_err() { return Err(crate::AlreadySetError::new((lpuart, dma_channel))); } - crate::lpuart::init(lpuart, dma_channel, consumer, interrupts); - Ok(Poller::new(crate::lpuart::VTABLE)) + let backend = crate::lpuart::init(lpuart, dma_channel, consumer, interrupts); + Ok(Poller::new(backend)) }) } diff --git a/logging/src/log/frontend.rs b/logging/src/log/frontend.rs index 28fd95a4..02e71aca 100644 --- a/logging/src/log/frontend.rs +++ b/logging/src/log/frontend.rs @@ -4,12 +4,13 @@ //! Asynchronous peripherals can then read the queue to //! transport data. -use core::{cell::RefCell, mem::MaybeUninit}; +use core::cell::RefCell; use super::Filters; use bbqueue as bbq; use critical_section::Mutex; +use static_cell::StaticCell; struct Logger<'a, const N: usize> { producer: Mutex>>, @@ -55,24 +56,18 @@ impl core::fmt::Write for Writer<'_, '_, N> { /// Initialize the logging frontend. /// -/// # Safety +/// # Panics /// -/// Caller must ensure that this function is only called once. -pub(crate) unsafe fn init( +/// Panics if called more than once. +pub(crate) fn init( producer: bbq::Producer<'static, { crate::BUFFER_SIZE }>, config: &super::LoggingConfig, ) -> Result<(), crate::AlreadySetError<()>> { - static mut LOGGER: MaybeUninit> = MaybeUninit::uninit(); - // Safety: write to static mut. Assumed that this only happens once. - // We should be preventing multiple callers with the critical section, - // so the "only happens once" is to ensure that we're not changing the - // static while the logger is active. - let logger = unsafe { - LOGGER.write(Logger { - producer: Mutex::new(RefCell::new(producer)), - filters: super::Filters(config.filters), - }) - }; + static LOGGER: StaticCell> = StaticCell::new(); + let logger = LOGGER.init(Logger { + producer: Mutex::new(RefCell::new(producer)), + filters: super::Filters(config.filters), + }); ::log::set_logger(logger) .map(|_| ::log::set_max_level(config.max_level)) .map_err(|_| crate::AlreadySetError::new(())) diff --git a/logging/src/lpuart.rs b/logging/src/lpuart.rs index 43e37810..11aa5340 100644 --- a/logging/src/lpuart.rs +++ b/logging/src/lpuart.rs @@ -6,136 +6,140 @@ //! with a borrowed producer, then finish the initialization once everything //! is OK from the first phase. -use core::mem::MaybeUninit; - use imxrt_hal::{ dma::{channel, peripheral::Destination}, lpuart::{Direction, Lpuart}, }; +use static_cell::StaticCell; -static mut CONSUMER: MaybeUninit = MaybeUninit::uninit(); -static mut CHANNEL: MaybeUninit = MaybeUninit::uninit(); - -pub(crate) const VTABLE: crate::PollerVTable = crate::PollerVTable { poll }; - -/// Drive the logging behavior. -/// -/// # Safety -/// -/// This may only be called from one execution context. It can only be called -/// after `CONSUMER` and `CHANNEL` are initialized. -/// -/// By exposing this function through a [`Poller`](crate::Poller), we make both -/// of these guarantees. The `Poller` indirectly "owns" the static mut memory, -/// and the crate design ensures that there's only one `Poller` object in existence. -unsafe fn poll() { - // Safety: caller ensures that these are initializd, and that the function - // isn't reentrant. - let (consumer, channel) = unsafe { (CONSUMER.assume_init_mut(), CHANNEL.assume_init_mut()) }; - - // Could be high if the user enabled DMA interrupts. - while channel.is_interrupt() { - channel.clear_interrupt(); - } - - assert!(!channel.is_error(), "{:?}", channel.error_status()); +pub(crate) struct Backend { + consumer: crate::Consumer, + channel: channel::Channel, +} - // Don't schedule another transfer while one is enabled. - // DMA channel configuration will automatically disable - // when the transfer completes (set_disable_on_completion). - if channel.is_enabled() { - return; - } +impl Backend { + /// Drive the logging behavior. + /// + /// # Safety + /// + /// This may only be called from one execution context. It can only be called + /// after `CONSUMER` and `CHANNEL` are initialized. + /// + /// By exposing this function through a [`Poller`](crate::Poller), we make both + /// of these guarantees. The `Poller` indirectly "owns" the static mut memory, + /// and the crate design ensures that there's only one `Poller` object in existence. + pub(crate) fn poll(&mut self) { + // Could be high if the user enabled DMA interrupts. + while self.channel.is_interrupt() { + self.channel.clear_interrupt(); + } - let complete = { - let mut complete = false; - while channel.is_complete() { - channel.clear_complete(); - complete = true; + assert!( + !self.channel.is_error(), + "{:?}", + self.channel.error_status() + ); + + // Don't schedule another transfer while one is enabled. + // DMA channel configuration will automatically disable + // when the transfer completes (set_disable_on_completion). + if self.channel.is_enabled() { + return; } - complete - }; - // If we're at this point, there's no active transfer. So if - // there's data available, we should try to schedule that transfer. - // - // The DMA controller references a slice of the buffer that we haven't - // yet released. If the last transfer completed, it's time for us to - // release that buffer for the producer. - // - // The goal is to only call read once, and handle cases where - // transfers complete, and / or there's new data. It helps to decompose - // into two branches, then studying the paths through these two branches. - // - // if complete { /* Call read, and release grant based on last transfer size */} - // if let Ok(grant) = consumer.read() { - // /* Schedule next transfer with grant contents. */ - // } - if let Ok(grant) = consumer.read() { - // completed holds whatever we previously transferred. - // new either holds - // - // 1. the data accumulated since the start of the last transfer. - // 2. all of the data accumulated since the last call to poll. - let (completed, new) = if complete { - let transferred: usize = channel.beginning_transfer_iterations().into(); - let buf = grant.buf(); - (&buf[..transferred], &buf[transferred..]) - } else { - (&[][..], grant.buf()) + let complete = { + let mut complete = false; + while self.channel.is_complete() { + self.channel.clear_complete(); + complete = true; + } + complete }; - if !new.is_empty() { - // Safety: the buffer is static and will always be valid while the transfer - // is active. - unsafe { channel::set_source_linear_buffer(channel, new) }; - // Safety: the iterations are based on the number of elements in the collection, - // so we're not indexing out of bounds. - unsafe { channel.set_transfer_iterations(new.len().min(u16::MAX as usize) as u16) }; - // Safety: transfer is correctly set up here, and in the init method. - unsafe { channel.enable() }; - } - - if !completed.is_empty() { - let completed = completed.len(); - grant.release(completed); + // If we're at this point, there's no active transfer. So if + // there's data available, we should try to schedule that transfer. + // + // The DMA controller references a slice of the buffer that we haven't + // yet released. If the last transfer completed, it's time for us to + // release that buffer for the producer. + // + // The goal is to only call read once, and handle cases where + // transfers complete, and / or there's new data. It helps to decompose + // into two branches, then studying the paths through these two branches. + // + // if complete { /* Call read, and release grant based on last transfer size */} + // if let Ok(grant) = consumer.read() { + // /* Schedule next transfer with grant contents. */ + // } + if let Ok(grant) = self.consumer.read() { + // completed holds whatever we previously transferred. + // new either holds + // + // 1. the data accumulated since the start of the last transfer. + // 2. all of the data accumulated since the last call to poll. + let (completed, new) = if complete { + let transferred: usize = self.channel.beginning_transfer_iterations().into(); + let buf = grant.buf(); + (&buf[..transferred], &buf[transferred..]) + } else { + (&[][..], grant.buf()) + }; + + if !new.is_empty() { + // Safety: the buffer is static and will always be valid while the transfer + // is active. + unsafe { channel::set_source_linear_buffer(&mut self.channel, new) }; + // Safety: the iterations are based on the number of elements in the collection, + // so we're not indexing out of bounds. + unsafe { + self.channel + .set_transfer_iterations(new.len().min(u16::MAX as usize) as u16) + }; + // Safety: transfer is correctly set up here, and in the init method. + unsafe { self.channel.enable() }; + } + + if !completed.is_empty() { + let completed = completed.len(); + grant.release(completed); + } } } } /// Initialize the LPUART logger. /// -/// # Safety +/// # Panics /// -/// This call must only be called once. This call must happen before `poll` is invoked. -pub(crate) unsafe fn init( +/// Panics if called more than once. +pub(crate) fn init( mut lpuart: Lpuart, - channel: channel::Channel, + mut channel: channel::Channel, consumer: crate::Consumer, interrupts: crate::Interrupts, -) { +) -> &'static mut Backend { channel.disable(); channel.clear_complete(); channel.clear_error(); - // Safety: mutable static access. Caller only calls this once, and poll() isn't - // accessible by this point. There's no race on the consumer. - unsafe { CONSUMER.write(consumer) }; - // Safety: mutable static access. See above. - let channel = unsafe { CHANNEL.write(channel) }; + static BACKEND: StaticCell = StaticCell::new(); + BACKEND.init_with(move || { + channel.set_disable_on_completion(true); + channel.set_interrupt_on_completion(interrupts == crate::Interrupts::Enabled); - channel.set_disable_on_completion(true); - channel.set_interrupt_on_completion(interrupts == crate::Interrupts::Enabled); + channel + .set_channel_configuration(channel::Configuration::enable(lpuart.destination_signal())); + // Safety: element size is appropriate for the buffer type. + unsafe { channel.set_minor_loop_bytes(core::mem::size_of::() as u32) }; - channel.set_channel_configuration(channel::Configuration::enable(lpuart.destination_signal())); - // Safety: element size is appropriate for the buffer type. - unsafe { channel.set_minor_loop_bytes(core::mem::size_of::() as u32) }; + // Safety: hardware address is valid. + unsafe { channel::set_destination_hardware(&mut channel, lpuart.destination_address()) }; - // Safety: hardware address is valid. - unsafe { channel::set_destination_hardware(channel, lpuart.destination_address()) }; + lpuart.disable(|lpuart| { + lpuart.disable_fifo(Direction::Tx); + }); + lpuart.enable_destination(); // Note: this call is never undone. - lpuart.disable(|lpuart| { - lpuart.disable_fifo(Direction::Tx); - }); - lpuart.enable_destination(); // Note: this call is never undone. + Backend { channel, consumer } + }) } diff --git a/logging/src/usbd.rs b/logging/src/usbd.rs index 0ecc5ed0..40a633d9 100644 --- a/logging/src/usbd.rs +++ b/logging/src/usbd.rs @@ -1,6 +1,6 @@ //! USB serial (CDC) backend using imxrt-usbd. -use core::mem::MaybeUninit; +use static_cell::StaticCell; use usb_device::device::UsbDeviceState; const VID_PID: usb_device::device::UsbVidPid = usb_device::device::UsbVidPid(0x5824, 0x27dd); @@ -20,11 +20,6 @@ type BusAllocator = usb_device::bus::UsbBusAllocator; type Class<'a> = usbd_serial::CdcAcmClass<'a, Bus>; type Device<'a> = usb_device::device::UsbDevice<'a, Bus>; -static mut BUS: MaybeUninit = MaybeUninit::uninit(); -static mut CLASS: MaybeUninit> = MaybeUninit::uninit(); -static mut DEVICE: MaybeUninit> = MaybeUninit::uninit(); -static mut CONSUMER: MaybeUninit = MaybeUninit::uninit(); - /// High-speed bulk endpoint limit. const MAX_PACKET_SIZE: usize = crate::config::USB_BULK_MPS; /// Size for control transfers on endpoint 0. @@ -32,168 +27,137 @@ const EP0_CONTROL_PACKET_SIZE: usize = 64; /// The USB GPT timer we use to (infrequently) check for data. const GPT_INSTANCE: imxrt_usbd::gpt::Instance = imxrt_usbd::gpt::Instance::Gpt0; -pub(crate) const VTABLE: crate::PollerVTable = crate::PollerVTable { poll }; - -/// Drive the logging behavior. -/// -/// # Safety -/// -/// This may only be called from one execution context. It can only be called -/// after all `static mut`s are initialized. -/// -/// By exposing this function through a [`Poller`](crate::Poller), we make both -/// of these guarantees. The `Poller` indirectly "owns" the static mut memory, -/// and the crate design ensures that there's only one `Poller` object in existence. -unsafe fn poll() { - static mut CONFIGURED: bool = false; - #[inline(always)] - fn is_configured() -> bool { - // Safety: poll() isn't reentrant. Only poll() can - // read this state. - unsafe { CONFIGURED } - } - #[inline(always)] - fn set_configured(configured: bool) { - // Safety: poll() isn't reentrant. Only poll() can - // modify this state.s - unsafe { CONFIGURED = configured }; - } - - // Safety: caller ensures that these are initializd, and that the function - // isn't reentrant. - let (device, class) = unsafe { (DEVICE.assume_init_mut(), CLASS.assume_init_mut()) }; +pub(crate) struct Backend { + class: Class<'static>, + device: Device<'static>, + consumer: crate::Consumer, + configured: bool, +} - // Is there a CDC class event, like a completed transfer? If so, check - // the consumer immediately, even if a timer hasn't expired. - // - // Checking the consumer on class traffic lets the driver burst out data. - // Suppose the user wants to use the USB GPT timer, and they configure a very - // long interval. That interval expires, and we see tons of data in the consumer. - // We should write that out as fast as possible, even if the timer hasn't elapsed. - // That's the behavior provided by the class_event flag. - let class_event = device.poll(&mut [class]); - let timer_event = device.bus().gpt_mut(GPT_INSTANCE, |gpt| { - let mut elapsed = false; - while gpt.is_elapsed() { - gpt.clear_elapsed(); - elapsed = true; - } - // Simulate a timer event if the timer is not running. +impl Backend { + pub(crate) fn poll(&mut self) { + // Is there a CDC class event, like a completed transfer? If so, check + // the consumer immediately, even if a timer hasn't expired. // - // If the timer is not running, its because the user disabled interrupts, - // and they're using their own timer / polling loop. There might not always - // be a class traffic (transfer complete) event when the user polls, so - // signaling true allows the poll to check the consumer for new data and - // send it. - // - // If the timer is running, checking the consumer depends on the elapsed - // timer. - elapsed || !gpt.is_running() - }); - let check_consumer = class_event || timer_event; + // Checking the consumer on class traffic lets the driver burst out data. + // Suppose the user wants to use the USB GPT timer, and they configure a very + // long interval. That interval expires, and we see tons of data in the consumer. + // We should write that out as fast as possible, even if the timer hasn't elapsed. + // That's the behavior provided by the class_event flag. + let class_event = self.device.poll(&mut [&mut self.class]); + let timer_event = self.device.bus().gpt_mut(GPT_INSTANCE, |gpt| { + let mut elapsed = false; + while gpt.is_elapsed() { + gpt.clear_elapsed(); + elapsed = true; + } + // Simulate a timer event if the timer is not running. + // + // If the timer is not running, its because the user disabled interrupts, + // and they're using their own timer / polling loop. There might not always + // be a class traffic (transfer complete) event when the user polls, so + // signaling true allows the poll to check the consumer for new data and + // send it. + // + // If the timer is running, checking the consumer depends on the elapsed + // timer. + elapsed || !gpt.is_running() + }); + let check_consumer = class_event || timer_event; - if device.state() != UsbDeviceState::Configured { - if is_configured() { - // Turn off the timer, but only if we were previously configured. - device.bus().gpt_mut(GPT_INSTANCE, |gpt| gpt.stop()); + if self.device.state() != UsbDeviceState::Configured { + if self.configured { + // Turn off the timer, but only if we were previously configured. + self.device.bus().gpt_mut(GPT_INSTANCE, |gpt| gpt.stop()); + } + self.configured = false; + // We can't use the class if we're not configured, + // so bail out here. + return; } - set_configured(false); - // We can't use the class if we're not configured, - // so bail out here. - return; - } - // We're now configured. Are we newly configured? - if !is_configured() { - // Must call this when we transition into configured. - device.bus().configure(); - device.bus().gpt_mut(GPT_INSTANCE, |gpt| { - // There's no need for a timer if interrupts are disabled. - // If the user disabled USB interrupts and decided to poll this - // from another timer, this USB timer could unnecessarily block - // that timer from checking the consumer queue. - if gpt.is_interrupt_enabled() { - gpt.run() - } - }); - set_configured(true); - } + // We're now configured. Are we newly configured? + if !self.configured { + // Must call this when we transition into configured. + self.device.bus().configure(); + self.device.bus().gpt_mut(GPT_INSTANCE, |gpt| { + // There's no need for a timer if interrupts are disabled. + // If the user disabled USB interrupts and decided to poll this + // from another timer, this USB timer could unnecessarily block + // that timer from checking the consumer queue. + if gpt.is_interrupt_enabled() { + gpt.run() + } + }); + self.configured = true; + } - // If the host sends us data, pretend to read it. - // This prevents us from continuously NAKing the host, - // which the host might not appreciate. - class.read_packet(&mut []).ok(); + // If the host sends us data, pretend to read it. + // This prevents us from continuously NAKing the host, + // which the host might not appreciate. + self.class.read_packet(&mut []).ok(); - // There's no need to wait if we were are newly configured. - if check_consumer { - // Safety: consumer is initialized if poll() is running. Caller ensures - // that poll() is not reentrant. - let consumer = unsafe { CONSUMER.assume_init_mut() }; - if let Ok(grant) = consumer.read() { - let buf = grant.buf(); - // Don't try to write more than we can fit in a single packet! - // See the usbd-serial documentation for this caveat. We didn't - // statically allocate enough space for anything larger. - if let Ok(written) = class.write_packet(&buf[..MAX_PACKET_SIZE.min(buf.len())]) { - grant.release(written); - // Log data is in the intermediate buffer, so it's OK to release the grant. - // - // If the I/O fails here, we'll try again on the next poll. There's no guarantee - // we'll see a improvement though... - } - } // else, no data, or some error. Let those logs accumulate! + // There's no need to wait if we were are newly configured. + if check_consumer { + if let Ok(grant) = self.consumer.read() { + let buf = grant.buf(); + // Don't try to write more than we can fit in a single packet! + // See the usbd-serial documentation for this caveat. We didn't + // statically allocate enough space for anything larger. + if let Ok(written) = self + .class + .write_packet(&buf[..MAX_PACKET_SIZE.min(buf.len())]) + { + grant.release(written); + // Log data is in the intermediate buffer, so it's OK to release the grant. + // + // If the I/O fails here, we'll try again on the next poll. There's no guarantee + // we'll see a improvement though... + } + } // else, no data, or some error. Let those logs accumulate! + } } } /// Initialize the USB logger. /// -/// # Safety +/// # Panics /// -/// This can only be called once. -pub(crate) unsafe fn init( +/// Panics if called more than once. +pub(crate) fn init( peripherals: P, interrupts: crate::Interrupts, consumer: super::Consumer, config: &UsbdConfig, -) { - // Safety: mutable static write. This only occurs once, and poll() - // cannot run by the time we're writing this memory. - unsafe { CONSUMER.write(consumer) }; - - let bus: &mut usb_device::class_prelude::UsbBusAllocator = { - // Safety: we ensure that the bus, class, and all other related USB objects - // are accessed in poll(). poll() is not reentrant, so there's no racing - // occuring across executing contexts. - let bus = unsafe { - imxrt_usbd::BusAdapter::without_critical_sections( - peripherals, - &ENDPOINT_MEMORY, - &ENDPOINT_STATE, - crate::config::USB_SPEED, - ) - }; - bus.set_interrupts(interrupts == crate::Interrupts::Enabled); - bus.gpt_mut(GPT_INSTANCE, |gpt| { - gpt.stop(); - gpt.clear_elapsed(); - gpt.set_interrupt_enabled(interrupts == crate::Interrupts::Enabled); - gpt.set_mode(imxrt_usbd::gpt::Mode::Repeat); - gpt.set_load(config.poll_interval_us); - gpt.reset(); +) -> &'static mut Backend { + static BACKEND: StaticCell = StaticCell::new(); + BACKEND.init_with(|| { + static BUS: StaticCell = StaticCell::new(); + let bus = BUS.init_with(|| { + // Safety: we ensure that the bus, class, and all other related USB objects + // are accessed in poll(). poll() is not reentrant, so there's no racing + // occuring across executing contexts. + let bus = unsafe { + imxrt_usbd::BusAdapter::without_critical_sections( + peripherals, + &ENDPOINT_MEMORY, + &ENDPOINT_STATE, + crate::config::USB_SPEED, + ) + }; + bus.set_interrupts(interrupts == crate::Interrupts::Enabled); + bus.gpt_mut(GPT_INSTANCE, |gpt| { + gpt.stop(); + gpt.clear_elapsed(); + gpt.set_interrupt_enabled(interrupts == crate::Interrupts::Enabled); + gpt.set_mode(imxrt_usbd::gpt::Mode::Repeat); + gpt.set_load(config.poll_interval_us); + gpt.reset(); + }); + usb_device::bus::UsbBusAllocator::new(bus) }); - let bus = usb_device::bus::UsbBusAllocator::new(bus); - // Safety: mutable static write. This only occurs once, and poll() - // cannot run by the time we're writing this memory. - unsafe { BUS.write(bus) } - }; - - { let class = usbd_serial::CdcAcmClass::new(bus, MAX_PACKET_SIZE as u16); - // Safety: mutable static write. See the above discussion for BUS. - unsafe { CLASS.write(class) }; - } - { let device = usb_device::device::UsbDeviceBuilder::new(bus, VID_PID) .product(PRODUCT) .device_class(usbd_serial::USB_CLASS_CDC) @@ -211,9 +175,13 @@ pub(crate) unsafe fn init( } } - // Safety: mutable static write. See the above discussion for BUS. - unsafe { DEVICE.write(device) }; - } + Backend { + class, + device, + consumer, + configured: false, + } + }) } /// USB device configuration builder.