diff --git a/src/dma/channel.rs b/src/dma/channel.rs index 4e3bc2ab..c65cdaa8 100644 --- a/src/dma/channel.rs +++ b/src/dma/channel.rs @@ -124,24 +124,6 @@ impl<'d> Channel<'d> { let xfercount = (mem_len / xferwidth) - 1; let channel = self.info.ch_num; - // Configure the channel descriptor - // NOTE: the DMA controller expects the memory buffer end address but peripheral address is actual - // SAFETY: unsafe due to use of a mutable static (DESCRIPTORS.list) - unsafe { - DESCRIPTORS.list[channel].reserved = 0; - if dir == Direction::MemoryToPeripheral { - DESCRIPTORS.list[channel].dst_data_end_addr = dstbase as u32; - } else { - DESCRIPTORS.list[channel].dst_data_end_addr = dstbase as u32 + (xfercount * xferwidth) as u32; - } - if dir == Direction::PeripheralToMemory { - DESCRIPTORS.list[channel].src_data_end_addr = srcbase as u32; - } else { - DESCRIPTORS.list[channel].src_data_end_addr = srcbase as u32 + (xfercount * xferwidth) as u32; - } - DESCRIPTORS.list[channel].nxt_desc_link_addr = 0; - } - // Configure for transfer type, no hardware triggering (we'll trigger via software), high priority // SAFETY: unsafe due to .bits usage self.info.regs.channel(channel).cfg().write(|w| unsafe { @@ -164,8 +146,19 @@ impl<'d> Channel<'d> { // SAFETY: unsafe due to .bits usage self.info.regs.channel(channel).xfercfg().write(|w| unsafe { w.cfgvalid().set_bit(); - w.clrtrig().set_bit(); - w.reload().clear_bit(); + + if options.is_continuous { + w.reload().enabled(); + w.clrtrig().clear_bit(); + } else { + w.reload().disabled(); + w.clrtrig().set_bit(); + } + if options.is_sw_trig { + w.swtrig().set_bit(); + } else { + w.swtrig().clear_bit(); + } w.setinta().set_bit(); w.width().bits(options.width.into()); if dir == Direction::PeripheralToMemory { @@ -180,6 +173,35 @@ impl<'d> Channel<'d> { } w.xfercount().bits(xfercount as u16) }); + + // Configure the channel descriptor + // NOTE: the DMA controller expects the memory buffer end address but peripheral address is actual + // SAFETY: unsafe due to use of a mutable static (DESCRIPTORS.list) + unsafe { + if options.is_continuous { + let xfer_cfg = self.info.regs.channel(channel).xfercfg().read(); + DESCRIPTORS.list[channel].reserved_reloadcfg = xfer_cfg.bits(); + } else { + DESCRIPTORS.list[channel].reserved_reloadcfg = 0; + } + + if dir == Direction::MemoryToPeripheral { + DESCRIPTORS.list[channel].dst_data_end_addr = dstbase as u32; + } else { + DESCRIPTORS.list[channel].dst_data_end_addr = dstbase as u32 + (xfercount * xferwidth) as u32; + } + + if dir == Direction::PeripheralToMemory { + DESCRIPTORS.list[channel].src_data_end_addr = srcbase as u32; + } else { + DESCRIPTORS.list[channel].src_data_end_addr = srcbase as u32 + (xfercount * xferwidth) as u32; + } + if options.is_continuous { + DESCRIPTORS.list[channel].nxt_desc_link_addr = &DESCRIPTORS.list[channel] as *const _ as u32; + } else { + DESCRIPTORS.list[channel].nxt_desc_link_addr = 0; + } + } } /// Enable the DMA channel (only after configuring) @@ -192,6 +214,40 @@ impl<'d> Channel<'d> { .modify(|_, w| unsafe { w.ena().bits(1 << channel) }); } + #[cfg(feature = "defmt")] + /// Log the DMA channel transfer configuration + pub fn log_channel(&self) { + // Log DMA Channel Config Register + let channel = self.info.ch_num; + let dma_channel_cfg = self.info.regs.channel(channel).cfg().read(); + defmt::info!( + "DMA Channel {} Config Register: 0x{=u32:x}", + channel, + dma_channel_cfg.bits() + ); + // Log Interrupt Enable Set Register + let intenset0 = self.info.regs.intenset0().read(); + defmt::info!("Interrupt Enable Set Register: 0x{=u32:x}", intenset0.bits()); + + // Log DMA Xfer Config Register + let xfer_cfg = self.info.regs.channel(channel).xfercfg().read(); + defmt::info!( + "DMA Channel {} Xfer Config Register: 0x{=u32:x}", + channel, + xfer_cfg.bits() + ); + // Log DMA Enable Set Register after enabling + let enableset0 = self.info.regs.enableset0().read(); + defmt::info!("DMA Enable Set Register:0x{=u32:x}", enableset0.bits()); + // Log DMA Xfer Config Register after triggering + let xfer_cfg_after = self.info.regs.channel(channel).xfercfg().read(); + defmt::info!( + "DMA Channel {} Xfer Config Register (After Trigger): 0x{=u32:x}", + channel, + xfer_cfg_after.bits() + ); + } + /// Disable the DMA channel pub fn disable_channel(&self) { let channel = self.info.ch_num; diff --git a/src/dma/mod.rs b/src/dma/mod.rs index 56536d10..1327be5b 100644 --- a/src/dma/mod.rs +++ b/src/dma/mod.rs @@ -26,7 +26,7 @@ const DMA_CHANNEL_COUNT: usize = 33; #[derive(Copy, Clone, Debug)] #[repr(C)] struct ChannelDescriptor { - reserved: u32, + reserved_reloadcfg: u32, src_data_end_addr: u32, dst_data_end_addr: u32, nxt_desc_link_addr: u32, @@ -42,7 +42,7 @@ struct DescriptorBlock { /// DMA channel descriptor list static mut DESCRIPTORS: DescriptorBlock = DescriptorBlock { list: [ChannelDescriptor { - reserved: 0, + reserved_reloadcfg: 0, src_data_end_addr: 0, dst_data_end_addr: 0, nxt_desc_link_addr: 0, diff --git a/src/dma/transfer.rs b/src/dma/transfer.rs index 7ea132b7..33a76d81 100644 --- a/src/dma/transfer.rs +++ b/src/dma/transfer.rs @@ -16,6 +16,12 @@ pub struct TransferOptions { /// Transfer priority level pub priority: Priority, + + /// Transfer is intended to be done continuously, such as for a circular buffer + pub is_continuous: bool, + + /// Transfer is software triggerred + pub is_sw_trig: bool, } impl Default for TransferOptions { @@ -23,6 +29,8 @@ impl Default for TransferOptions { Self { width: Width::Bit8, priority: Priority::Priority0, + is_continuous: false, + is_sw_trig: false, } } } diff --git a/src/uart.rs b/src/uart.rs index 5ccdc94a..3128b004 100644 --- a/src/uart.rs +++ b/src/uart.rs @@ -49,6 +49,7 @@ pub struct UartTx<'a, M: Mode> { /// Uart RX driver. pub struct UartRx<'a, M: Mode> { info: Info, + _buffer_config: Option, _rx_dma: Option>, _phantom: PhantomData<(&'a (), M)>, } @@ -218,10 +219,22 @@ impl<'a> UartTx<'a, Blocking> { } } +struct BufferConfig { + #[cfg(feature = "time")] + buffer: &'static mut [u8], + #[cfg(feature = "time")] + write_index: usize, + #[cfg(feature = "time")] + read_index: usize, + #[cfg(feature = "time")] + polling_rate: u64, +} + impl<'a, M: Mode> UartRx<'a, M> { - fn new_inner(_rx_dma: Option>) -> Self { + fn new_inner(_rx_dma: Option>, _buffer_config: Option) -> Self { Self { info: T::info(), + _buffer_config, _rx_dma, _phantom: PhantomData, } @@ -242,7 +255,7 @@ impl<'a> UartRx<'a, Blocking> { let mut _rx = rx.map_into(); Uart::::init::(None, Some(_rx.reborrow()), None, None, config)?; - Ok(Self::new_inner::(None)) + Ok(Self::new_inner::(None, None)) } } @@ -500,7 +513,7 @@ impl<'a> Uart<'a, Blocking> { Ok(Self { info: T::info(), tx: UartTx::new_inner::(None), - rx: UartRx::new_inner::(None), + rx: UartRx::new_inner::(None, None), }) } @@ -687,11 +700,82 @@ impl<'a> UartRx<'a, Async> { let rx_dma = dma::Dma::reserve_channel(rx_dma); - Ok(Self::new_inner::(Some(rx_dma))) + Ok(Self::new_inner::(Some(rx_dma), None)) + } + + /// create a new DMA enabled UART which can only receive data, but uses a buffer to avoid FIFO overflow + /// Note: requires time-driver due to hardware constraint requiring a polled interface (no UART Idle bus indicator) + /// alternative approaches are possible, this was done as it maintains most similarity between Buffered and + /// Unbuffered read interfaces + #[cfg(feature = "time")] + pub fn new_async_with_buffer( + _inner: impl Peripheral

+ 'a, + rx: impl Peripheral

> + 'a, + _irq: impl interrupt::typelevel::Binding> + 'a, + rx_dma: impl Peripheral

> + 'a, + config: Config, + buffer: &'static mut [u8], + polling_rate_us: u64, + ) -> Result { + assert!(buffer.len() <= 1024); + + into_ref!(_inner); + into_ref!(rx); + rx.as_rx(); + + let mut _rx = rx.map_into(); + Uart::::init::(None, Some(_rx.reborrow()), None, None, config)?; + + T::Interrupt::unpend(); + unsafe { T::Interrupt::enable() }; + + let rx_dma = dma::Dma::reserve_channel(rx_dma); + T::info().regs.fifocfg().modify(|_, w| w.dmarx().enabled()); + // immediately configure and enable channel for circular buffered reception + rx_dma.configure_channel( + dma::transfer::Direction::PeripheralToMemory, + T::info().regs.fiford().as_ptr() as *const u8 as *const u32, + buffer as *mut [u8] as *mut u32, + buffer.len(), + dma::transfer::TransferOptions { + width: dma::transfer::Width::Bit8, + priority: dma::transfer::Priority::Priority0, + is_continuous: true, + is_sw_trig: true, + }, + ); + rx_dma.enable_channel(); + rx_dma.trigger_channel(); + + Ok(Self::new_inner::( + Some(rx_dma), + Some(BufferConfig { + buffer, + write_index: 0, + read_index: 0, + polling_rate: polling_rate_us, + }), + )) } /// Read from UART RX asynchronously. pub async fn read(&mut self, buf: &mut [u8]) -> Result<()> { + #[cfg(feature = "time")] + { + if self._buffer_config.is_some() { + self.read_buffered(buf).await + } else { + self.read_unbuffered(buf).await + } + } + + #[cfg(not(feature = "time"))] + { + self.read_unbuffered(buf).await + } + } + + async fn read_unbuffered(&mut self, buf: &mut [u8]) -> Result<()> { let regs = self.info.regs; for chunk in buf.chunks_mut(1024) { @@ -758,6 +842,164 @@ impl<'a> UartRx<'a, Async> { Ok(()) } + + #[cfg(feature = "time")] + + async fn read_buffered(&mut self, buf: &mut [u8]) -> Result<()> { + // unwrap safe here as only entry path to API requires rx_dma instance + let rx_dma = self._rx_dma.as_ref().unwrap(); + let buffer_config = self._buffer_config.as_mut().unwrap(); + + // do not need to break into dma lengthed chunks as we already have a reserved circular buffer + let mut bytes_read = 0; + + // As the Rx Idle interrupt is not present for this processor, we must poll to see if new data is available + while bytes_read < buf.len() { + if rx_dma.is_active() { + let mut remaining_bytes = rx_dma.get_xfer_count() as usize; + remaining_bytes = remaining_bytes + 1; + + if remaining_bytes > buffer_config.buffer.len() { + return Err(Error::InvalidArgument); + } + + // determine current write index where transfer will continue to + buffer_config.write_index = (buffer_config.buffer.len() - remaining_bytes) % buffer_config.buffer.len(); + } + + // if we have fully formed new data in the buffer: + if buffer_config.write_index != buffer_config.read_index { + if buffer_config.write_index > buffer_config.read_index { + // calculate the read_len + let in_len = buffer_config.write_index - buffer_config.read_index; + let remaining_read_len = buf.len() - bytes_read; + let read_len = remaining_read_len.min(in_len); + + // mark start and stop pointers based on read_len + let in_start = buffer_config.read_index; + let in_stop = buffer_config.read_index + read_len; + let out_start = bytes_read; + let out_stop = bytes_read + read_len; + + // copy slices + let incoming_slice = &buffer_config.buffer[in_start..in_stop]; + let outgoing_slice = &mut buf[out_start..out_stop]; + outgoing_slice.copy_from_slice(incoming_slice); + + // save off last actual read index in case of longer transfer than expected + buffer_config.read_index = (buffer_config.read_index + read_len) % buffer_config.buffer.len(); + + // finally increment total bytes read + bytes_read += read_len; + } else { + // handle roll over + // do the first part (dangling portion of buffer) + // calculate the read_len + let in_len = buffer_config.buffer.len() - buffer_config.read_index; + let remaining_read_len = buf.len() - bytes_read; + let read_len = remaining_read_len.min(in_len); + + // mark start and stop pointers based on read_len + let in_start = buffer_config.read_index; + let in_stop = buffer_config.read_index + read_len; + let out_start = bytes_read; + let out_stop = bytes_read + read_len; + + // copy slices + let incoming_slice = &buffer_config.buffer[in_start..in_stop]; + let outgoing_slice = &mut buf[out_start..out_stop]; + outgoing_slice.copy_from_slice(incoming_slice); + + // save off last actual read index in case of longer transfer than expected + buffer_config.read_index = (buffer_config.read_index + read_len) % buffer_config.buffer.len(); + + // finally increment total bytes read + bytes_read += read_len; + // if we have more bytes to read, do the second part + // only need to copy second part if there's data remaining at the begining of the buffer and we + // still have more bytes requested from read() awaiter + if bytes_read < buf.len() && buffer_config.write_index > 0 { + // do the second part (begining of buffer up to write index) + // buffer_config.read_index should be 0 now + // calculate the read_len + let in_len = buffer_config.write_index - buffer_config.read_index; + let remaining_read_len = buf.len() - bytes_read; + let read_len = remaining_read_len.min(in_len); + + // mark start and stop pointers based on read_len + let in_start = buffer_config.read_index; + let in_stop = buffer_config.read_index + read_len; + let out_start = bytes_read; + let out_stop = bytes_read + read_len; + + // copy slices + let incoming_slice = &buffer_config.buffer[in_start..in_stop]; + let outgoing_slice = &mut buf[out_start..out_stop]; + outgoing_slice.copy_from_slice(incoming_slice); + + // save off last actual read index in case of longer transfer than expected + buffer_config.read_index = (buffer_config.read_index + read_len) % buffer_config.buffer.len(); + + // finally increment total bytes read + bytes_read += read_len; + } + } + } else { + // sleep until next polling epoch, or if we detect a UART transfer error event + let res = select( + // use embassy_time to enable polling the bus for more data + embassy_time::Timer::after_micros(buffer_config.polling_rate), + // detect bus errors + poll_fn(|cx| { + UART_WAKERS[self.info.index].register(cx.waker()); + + self.info.regs.intenset().write(|w| { + w.framerren() + .set_bit() + .parityerren() + .set_bit() + .rxnoiseen() + .set_bit() + .aberren() + .set_bit() + }); + + let stat = self.info.regs.stat().read(); + + self.info.regs.stat().write(|w| { + w.framerrint() + .clear_bit_by_one() + .parityerrint() + .clear_bit_by_one() + .rxnoiseint() + .clear_bit_by_one() + .aberr() + .clear_bit_by_one() + }); + + if stat.framerrint().bit_is_set() { + Poll::Ready(Err(Error::Framing)) + } else if stat.parityerrint().bit_is_set() { + Poll::Ready(Err(Error::Parity)) + } else if stat.rxnoiseint().bit_is_set() { + Poll::Ready(Err(Error::Noise)) + } else if stat.aberr().bit_is_set() { + Poll::Ready(Err(Error::Fail)) + } else { + Poll::Pending + } + }), + ) + .await; + + match res { + Either::First(()) | Either::Second(Ok(())) => (), + Either::Second(e) => return e, + } + } + } + Ok(()) + } } impl<'a> Uart<'a, Async> { @@ -789,7 +1031,66 @@ impl<'a> Uart<'a, Async> { Ok(Self { info: T::info(), tx: UartTx::new_inner::(Some(tx_dma)), - rx: UartRx::new_inner::(Some(rx_dma)), + rx: UartRx::new_inner::(Some(rx_dma), None), + }) + } + /// Create a new DMA enabled UART with Rx buffering enabled + #[cfg(feature = "time")] + pub fn new_async_with_buffer( + _inner: impl Peripheral

+ 'a, + tx: impl Peripheral

> + 'a, + rx: impl Peripheral

> + 'a, + _irq: impl interrupt::typelevel::Binding> + 'a, + tx_dma: impl Peripheral

> + 'a, + rx_dma: impl Peripheral

> + 'a, + config: Config, + buffer: &'static mut [u8], + polling_rate_us: u64, + ) -> Result { + assert!(buffer.len() <= 1024); + into_ref!(_inner); + into_ref!(tx); + into_ref!(rx); + + tx.as_tx(); + rx.as_rx(); + + let mut tx = tx.map_into(); + let mut rx = rx.map_into(); + + let tx_dma = dma::Dma::reserve_channel(tx_dma); + let rx_dma = dma::Dma::reserve_channel(rx_dma); + + Self::init::(Some(tx.reborrow()), Some(rx.reborrow()), None, None, config)?; + T::info().regs.fifocfg().modify(|_, w| w.dmarx().enabled()); + // immediately configure and enable channel for circular buffered reception + rx_dma.configure_channel( + dma::transfer::Direction::PeripheralToMemory, + T::info().regs.fiford().as_ptr() as *const u8 as *const u32, + buffer as *mut [u8] as *mut u32, + buffer.len(), + dma::transfer::TransferOptions { + width: dma::transfer::Width::Bit8, + priority: dma::transfer::Priority::Priority0, + is_continuous: true, + is_sw_trig: true, + }, + ); + rx_dma.enable_channel(); + rx_dma.trigger_channel(); + + Ok(Self { + info: T::info(), + tx: UartTx::new_inner::(Some(tx_dma)), + rx: UartRx::new_inner::( + Some(rx_dma), + Some(BufferConfig { + buffer, + write_index: 0, + read_index: 0, + polling_rate: polling_rate_us, + }), + ), }) } @@ -835,7 +1136,7 @@ impl<'a> Uart<'a, Async> { Ok(Self { info: T::info(), tx: UartTx::new_inner::(Some(tx_dma)), - rx: UartRx::new_inner::(Some(rx_dma)), + rx: UartRx::new_inner::(Some(rx_dma), None), }) }