From 3c3b43fb00355a5db64a34416dc2f19042a3fc5a Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: GPDAM linked-list + ringbuffer support --- embassy-stm32/src/dma/gpdma.rs | 339 ----------------- embassy-stm32/src/dma/gpdma/linked_list.rs | 236 ++++++++++++ embassy-stm32/src/dma/gpdma/mod.rs | 572 +++++++++++++++++++++++++++++ embassy-stm32/src/dma/gpdma/ringbuffer.rs | 283 ++++++++++++++ embassy-stm32/src/dma/ringbuffer/mod.rs | 2 - embassy-stm32/src/sai/mod.rs | 9 - 6 files changed, 1091 insertions(+), 350 deletions(-) delete mode 100644 embassy-stm32/src/dma/gpdma.rs create mode 100644 embassy-stm32/src/dma/gpdma/linked_list.rs create mode 100644 embassy-stm32/src/dma/gpdma/mod.rs create mode 100644 embassy-stm32/src/dma/gpdma/ringbuffer.rs diff --git a/embassy-stm32/src/dma/gpdma.rs b/embassy-stm32/src/dma/gpdma.rs deleted file mode 100644 index 151e4ab9f..000000000 --- a/embassy-stm32/src/dma/gpdma.rs +++ /dev/null @@ -1,339 +0,0 @@ -#![macro_use] - -use core::future::Future; -use core::pin::Pin; -use core::sync::atomic::{fence, Ordering}; -use core::task::{Context, Poll}; - -use embassy_hal_internal::Peri; -use embassy_sync::waitqueue::AtomicWaker; - -use super::word::{Word, WordSize}; -use super::{AnyChannel, Channel, Dir, Request, STATE}; -use crate::interrupt::typelevel::Interrupt; -use crate::interrupt::Priority; -use crate::pac; -use crate::pac::gpdma::vals; - -pub(crate) struct ChannelInfo { - pub(crate) dma: pac::gpdma::Gpdma, - pub(crate) num: usize, - #[cfg(feature = "_dual-core")] - pub(crate) irq: pac::Interrupt, -} - -/// GPDMA transfer options. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -#[non_exhaustive] -pub struct TransferOptions {} - -impl Default for TransferOptions { - fn default() -> Self { - Self {} - } -} - -impl From for vals::Dw { - fn from(raw: WordSize) -> Self { - match raw { - WordSize::OneByte => Self::BYTE, - WordSize::TwoBytes => Self::HALF_WORD, - WordSize::FourBytes => Self::WORD, - } - } -} - -pub(crate) struct ChannelState { - waker: AtomicWaker, -} - -impl ChannelState { - pub(crate) const NEW: Self = Self { - waker: AtomicWaker::new(), - }; -} - -/// safety: must be called only once -pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { - foreach_interrupt! { - ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { - crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); - #[cfg(not(feature = "_dual-core"))] - crate::interrupt::typelevel::$irq::enable(); - }; - } - crate::_generated::init_gpdma(); -} - -impl AnyChannel { - /// Safety: Must be called with a matching set of parameters for a valid dma channel - pub(crate) unsafe fn on_irq(&self) { - let info = self.info(); - #[cfg(feature = "_dual-core")] - { - use embassy_hal_internal::interrupt::InterruptExt as _; - info.irq.enable(); - } - - let state = &STATE[self.id as usize]; - - let ch = info.dma.ch(info.num); - let sr = ch.sr().read(); - - if sr.dtef() { - panic!( - "DMA: data transfer error on DMA@{:08x} channel {}", - info.dma.as_ptr() as u32, - info.num - ); - } - if sr.usef() { - panic!( - "DMA: user settings error on DMA@{:08x} channel {}", - info.dma.as_ptr() as u32, - info.num - ); - } - - if sr.suspf() || sr.tcf() { - // disable all xxIEs to prevent the irq from firing again. - ch.cr().write(|_| {}); - - // Wake the future. It'll look at tcf and see it's set. - state.waker.wake(); - } - } -} - -/// DMA transfer. -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Transfer<'a> { - channel: Peri<'a, AnyChannel>, -} - -impl<'a> Transfer<'a> { - /// Create a new read DMA transfer (peripheral to memory). - pub unsafe fn new_read( - channel: Peri<'a, impl Channel>, - request: Request, - peri_addr: *mut W, - buf: &'a mut [W], - options: TransferOptions, - ) -> Self { - Self::new_read_raw(channel, request, peri_addr, buf, options) - } - - /// Create a new read DMA transfer (peripheral to memory), using raw pointers. - pub unsafe fn new_read_raw( - channel: Peri<'a, impl Channel>, - request: Request, - peri_addr: *mut PW, - buf: *mut [MW], - options: TransferOptions, - ) -> Self { - Self::new_inner( - channel.into(), - request, - Dir::PeripheralToMemory, - peri_addr as *const u32, - buf as *mut MW as *mut u32, - buf.len(), - true, - PW::size(), - MW::size(), - options, - ) - } - - /// Create a new write DMA transfer (memory to peripheral). - pub unsafe fn new_write( - channel: Peri<'a, impl Channel>, - request: Request, - buf: &'a [MW], - peri_addr: *mut PW, - options: TransferOptions, - ) -> Self { - Self::new_write_raw(channel, request, buf, peri_addr, options) - } - - /// Create a new write DMA transfer (memory to peripheral), using raw pointers. - pub unsafe fn new_write_raw( - channel: Peri<'a, impl Channel>, - request: Request, - buf: *const [MW], - peri_addr: *mut PW, - options: TransferOptions, - ) -> Self { - Self::new_inner( - channel.into(), - request, - Dir::MemoryToPeripheral, - peri_addr as *const u32, - buf as *const MW as *mut u32, - buf.len(), - true, - MW::size(), - PW::size(), - options, - ) - } - - /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly. - pub unsafe fn new_write_repeated( - channel: Peri<'a, impl Channel>, - request: Request, - repeated: &'a MW, - count: usize, - peri_addr: *mut PW, - options: TransferOptions, - ) -> Self { - Self::new_inner( - channel.into(), - request, - Dir::MemoryToPeripheral, - peri_addr as *const u32, - repeated as *const MW as *mut u32, - count, - false, - MW::size(), - PW::size(), - options, - ) - } - - unsafe fn new_inner( - channel: Peri<'a, AnyChannel>, - request: Request, - dir: Dir, - peri_addr: *const u32, - mem_addr: *mut u32, - mem_len: usize, - incr_mem: bool, - data_size: WordSize, - dst_size: WordSize, - _options: TransferOptions, - ) -> Self { - // BNDT is specified as bytes, not as number of transfers. - let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { - panic!("DMA transfers may not be larger than 65535 bytes."); - }; - - let info = channel.info(); - let ch = info.dma.ch(info.num); - - // "Preceding reads and writes cannot be moved past subsequent writes." - fence(Ordering::SeqCst); - - let this = Self { channel }; - - ch.cr().write(|w| w.set_reset(true)); - ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs - ch.llr().write(|_| {}); // no linked list - ch.tr1().write(|w| { - w.set_sdw(data_size.into()); - w.set_ddw(dst_size.into()); - w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); - w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); - }); - ch.tr2().write(|w| { - w.set_dreq(match dir { - Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL, - Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL, - }); - w.set_reqsel(request); - }); - ch.tr3().write(|_| {}); // no address offsets. - ch.br1().write(|w| w.set_bndt(bndt)); - - match dir { - Dir::MemoryToPeripheral => { - ch.sar().write_value(mem_addr as _); - ch.dar().write_value(peri_addr as _); - } - Dir::PeripheralToMemory => { - ch.sar().write_value(peri_addr as _); - ch.dar().write_value(mem_addr as _); - } - } - - ch.cr().write(|w| { - // Enable interrupts - w.set_tcie(true); - w.set_useie(true); - w.set_dteie(true); - w.set_suspie(true); - - // Start it - w.set_en(true); - }); - - this - } - - /// Request the transfer to stop. - /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - let info = self.channel.info(); - let ch = info.dma.ch(info.num); - - ch.cr().modify(|w| w.set_susp(true)) - } - - /// Return whether this transfer is still running. - /// - /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). - pub fn is_running(&mut self) -> bool { - let info = self.channel.info(); - let ch = info.dma.ch(info.num); - - let sr = ch.sr().read(); - !sr.tcf() && !sr.suspf() - } - - /// Gets the total remaining transfers for the channel - /// Note: this will be zero for transfers that completed without cancellation. - pub fn get_remaining_transfers(&self) -> u16 { - let info = self.channel.info(); - let ch = info.dma.ch(info.num); - - ch.br1().read().bndt() - } - - /// Blocking wait until the transfer finishes. - pub fn blocking_wait(mut self) { - while self.is_running() {} - - // "Subsequent reads and writes cannot be moved ahead of preceding reads." - fence(Ordering::SeqCst); - - core::mem::forget(self); - } -} - -impl<'a> Drop for Transfer<'a> { - fn drop(&mut self) { - self.request_stop(); - while self.is_running() {} - - // "Subsequent reads and writes cannot be moved ahead of preceding reads." - fence(Ordering::SeqCst); - } -} - -impl<'a> Unpin for Transfer<'a> {} -impl<'a> Future for Transfer<'a> { - type Output = (); - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let state = &STATE[self.channel.id as usize]; - state.waker.register(cx.waker()); - - if self.is_running() { - Poll::Pending - } else { - Poll::Ready(()) - } - } -} diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs new file mode 100644 index 000000000..b24b2e7eb --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -0,0 +1,236 @@ +//! Implementation of the GPDMA linked list and linked list items. +#![macro_use] + +use stm32_metapac::gpdma::{regs, vals::Dreq}; + +use super::TransferOptions; +use crate::dma::{ + word::{Word, WordSize}, + Dir, Request, +}; +use core::{ + ptr, + sync::atomic::{AtomicUsize, Ordering}, +}; + +/// The mode in which to run the linked list. +#[derive(Debug)] +pub enum RunMode { + /// List items are not linked together. + Unlinked, + /// The list is linked sequentially and only run once. + Once, + /// The list is linked sequentially, and the end of the list is linked to the beginning. + Repeat, +} + +/// A linked-list item for linear GPDMA transfers. +/// +/// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities. +#[derive(Debug, Copy, Clone, Default)] +#[repr(C)] +pub struct LinearItem { + /// Transfer register 1. + pub tr1: u32, + /// Transfer register 2. + pub tr2: u32, + /// Block register 2. + pub br1: u32, + /// Source address register. + pub sar: u32, + /// Destination address register. + pub dar: u32, + /// Linked-list address register. + pub llr: u32, +} + +impl LinearItem { + /// Create a new read DMA transfer (peripheral to memory). + pub unsafe fn new_read<'d, W: Word>( + request: Request, + peri_addr: *mut W, + buf: &'d mut [W], + options: TransferOptions, + ) -> Self { + Self::new_inner( + request, + Dir::PeripheralToMemory, + peri_addr as *const u32, + buf as *mut [W] as *mut W as *mut u32, + buf.len(), + true, + W::size(), + W::size(), + options, + ) + } + + /// Create a new write DMA transfer (memory to peripheral). + pub unsafe fn new_write<'d, MW: Word, PW: Word>( + request: Request, + buf: &'d [MW], + peri_addr: *mut PW, + options: TransferOptions, + ) -> Self { + Self::new_inner( + request, + Dir::MemoryToPeripheral, + peri_addr as *const u32, + buf as *const [MW] as *const MW as *mut u32, + buf.len(), + true, + MW::size(), + PW::size(), + options, + ) + } + + unsafe fn new_inner( + request: Request, + dir: Dir, + peri_addr: *const u32, + mem_addr: *mut u32, + mem_len: usize, + incr_mem: bool, + data_size: WordSize, + dst_size: WordSize, + _options: TransferOptions, + ) -> Self { + // BNDT is specified as bytes, not as number of transfers. + let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { + panic!("DMA transfers may not be larger than 65535 bytes."); + }; + + let mut br1 = regs::ChBr1(0); + br1.set_bndt(bndt); + + let mut tr1 = regs::ChTr1(0); + tr1.set_sdw(data_size.into()); + tr1.set_ddw(dst_size.into()); + tr1.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); + tr1.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); + + let mut tr2 = regs::ChTr2(0); + tr2.set_dreq(match dir { + Dir::MemoryToPeripheral => Dreq::DESTINATION_PERIPHERAL, + Dir::PeripheralToMemory => Dreq::SOURCE_PERIPHERAL, + }); + tr2.set_reqsel(request); + + let (sar, dar) = match dir { + Dir::MemoryToPeripheral => (mem_addr as _, peri_addr as _), + Dir::PeripheralToMemory => (peri_addr as _, mem_addr as _), + }; + + let llr = regs::ChLlr(0); + + Self { + tr1: tr1.0, + tr2: tr2.0, + br1: br1.0, + sar, + dar, + llr: llr.0, + } + } + + /// Link to the next linear item at the given address. + /// + /// Enables channel update bits. + fn link_to(&mut self, next: u16) { + let mut llr = regs::ChLlr(0); + + llr.set_ut1(true); + llr.set_ut2(true); + llr.set_ub1(true); + llr.set_usa(true); + llr.set_uda(true); + llr.set_ull(true); + llr.set_la(next); + + self.llr = llr.0; + } + + /// Unlink the next linear item. + /// + /// Disables channel update bits. + fn unlink(&mut self) { + self.llr = regs::ChLlr(0).0; + } +} + +pub struct Table { + current_index: AtomicUsize, + items: [LinearItem; ITEM_COUNT], +} + +impl Table { + /// Create a new table. + pub fn new(items: [LinearItem; ITEM_COUNT], run_mode: RunMode) -> Self { + assert!(!items.is_empty()); + + let mut this = Self { + current_index: AtomicUsize::new(0), + items, + }; + + if matches!(run_mode, RunMode::Once | RunMode::Repeat) { + this.link_sequential(); + } + + if matches!(run_mode, RunMode::Repeat) { + this.link_repeat(); + } + + this + } + + pub fn len(&self) -> usize { + self.items.len() + } + + /// Items are linked together sequentially. + pub fn link_sequential(&mut self) { + if self.items.len() > 1 { + for index in 0..(self.items.len() - 1) { + let next = ptr::addr_of!(self.items[index + 1]) as u16; + self.items[index].link_to(next); + } + } + } + + /// Last item links to first item. + pub fn link_repeat(&mut self) { + let first_item = self.items.first().unwrap(); + let first_address = ptr::addr_of!(first_item) as u16; + self.items.last_mut().unwrap().link_to(first_address); + } + + /// The index of the next item. + pub fn next_index(&self) -> usize { + let mut next_index = self.current_index.load(Ordering::Relaxed) + 1; + if next_index >= self.len() { + next_index = 0; + } + + next_index + } + + /// Unlink the next item. + pub fn unlink_next(&mut self) { + let next_index = self.next_index(); + self.items[next_index].unlink(); + } + + /// Linked list base address (upper 16 address bits). + pub fn base_address(&self) -> u16 { + ((ptr::addr_of!(self.items) as u32) >> 16) as _ + } + + /// Linked list offset address (lower 16 address bits) at the selected index. + pub fn offset_address(&self, index: usize) -> u16 { + assert!(self.items.len() > index); + + (ptr::addr_of!(self.items[index]) as u32) as _ + } +} diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs new file mode 100644 index 000000000..07acd2cf0 --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -0,0 +1,572 @@ +#![macro_use] + +use core::future::Future; +use core::pin::Pin; +use core::sync::atomic::{fence, AtomicUsize, Ordering}; +use core::task::{Context, Poll}; + +use embassy_hal_internal::Peri; +use embassy_sync::waitqueue::AtomicWaker; +use linked_list::Table; +use stm32_metapac::gpdma::regs; + +use super::word::{Word, WordSize}; +use super::{AnyChannel, Channel, Dir, Request, STATE}; +use crate::interrupt::typelevel::Interrupt; +use crate::pac; +use crate::pac::gpdma::vals; + +mod linked_list; +mod ringbuffer; + +pub(crate) struct ChannelInfo { + pub(crate) dma: pac::gpdma::Gpdma, + pub(crate) num: usize, + #[cfg(feature = "_dual-core")] + pub(crate) irq: pac::Interrupt, +} + +/// DMA request priority +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Priority { + /// Low Priority + Low, + /// Medium Priority + Medium, + /// High Priority + High, + /// Very High Priority + VeryHigh, +} + +impl From for pac::gpdma::vals::Prio { + fn from(value: Priority) -> Self { + match value { + Priority::Low => pac::gpdma::vals::Prio::LOW_WITH_LOWH_WEIGHT, + Priority::Medium => pac::gpdma::vals::Prio::LOW_WITH_MID_WEIGHT, + Priority::High => pac::gpdma::vals::Prio::LOW_WITH_HIGH_WEIGHT, + Priority::VeryHigh => pac::gpdma::vals::Prio::HIGH, + } + } +} + +/// GPDMA transfer options. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +#[non_exhaustive] +pub struct TransferOptions { + priority: Priority, + half_transfer_ir: bool, + complete_transfer_ir: bool, +} + +impl Default for TransferOptions { + fn default() -> Self { + Self { + priority: Priority::VeryHigh, + half_transfer_ir: false, + complete_transfer_ir: true, + } + } +} + +impl From for vals::Dw { + fn from(raw: WordSize) -> Self { + match raw { + WordSize::OneByte => Self::BYTE, + WordSize::TwoBytes => Self::HALF_WORD, + WordSize::FourBytes => Self::WORD, + } + } +} + +pub(crate) struct ChannelState { + waker: AtomicWaker, + complete_count: AtomicUsize, +} + +impl ChannelState { + pub(crate) const NEW: Self = Self { + waker: AtomicWaker::new(), + complete_count: AtomicUsize::new(0), + }; +} + +/// safety: must be called only once +pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { + foreach_interrupt! { + ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { + crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); + #[cfg(not(feature = "_dual-core"))] + crate::interrupt::typelevel::$irq::enable(); + }; + } + crate::_generated::init_gpdma(); +} + +impl AnyChannel { + /// Safety: Must be called with a matching set of parameters for a valid dma channel + pub(crate) unsafe fn on_irq(&self) { + let info = self.info(); + #[cfg(feature = "_dual-core")] + { + use embassy_hal_internal::interrupt::InterruptExt as _; + info.irq.enable(); + } + + let state = &STATE[self.id as usize]; + + let ch = info.dma.ch(info.num); + let sr = ch.sr().read(); + + if sr.dtef() { + panic!( + "DMA: data transfer error on DMA@{:08x} channel {}", + info.dma.as_ptr() as u32, + info.num + ); + } + if sr.usef() { + panic!( + "DMA: user settings error on DMA@{:08x} channel {}", + info.dma.as_ptr() as u32, + info.num + ); + } + if sr.ulef() { + panic!( + "DMA: link transfer error on DMA@{:08x} channel {}", + info.dma.as_ptr() as u32, + info.num + ); + } + + if sr.tcf() { + state.complete_count.fetch_add(1, Ordering::Release); + } + + if sr.suspf() || sr.tcf() { + // disable all xxIEs to prevent the irq from firing again. + ch.cr().write(|_| {}); + + // Wake the future. It'll look at tcf and see it's set. + state.waker.wake(); + } + } + + fn get_remaining_transfers(&self) -> u16 { + let info = self.info(); + let ch = info.dma.ch(info.num); + + ch.br1().read().bndt() + } + + unsafe fn configure( + &self, + request: Request, + dir: Dir, + peri_addr: *const u32, + mem_addr: *mut u32, + mem_len: usize, + incr_mem: bool, + data_size: WordSize, + dst_size: WordSize, + options: TransferOptions, + ) { + // BNDT is specified as bytes, not as number of transfers. + let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { + panic!("DMA transfers may not be larger than 65535 bytes."); + }; + + let info = self.info(); + let ch = info.dma.ch(info.num); + + // "Preceding reads and writes cannot be moved past subsequent writes." + fence(Ordering::SeqCst); + + ch.cr().write(|w| w.set_reset(true)); + ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs + ch.llr().write(|_| {}); // no linked list + ch.tr1().write(|w| { + w.set_sdw(data_size.into()); + w.set_ddw(dst_size.into()); + w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); + w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); + }); + ch.tr2().write(|w| { + w.set_dreq(match dir { + Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL, + Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL, + }); + w.set_reqsel(request); + }); + ch.tr3().write(|_| {}); // no address offsets. + ch.br1().write(|w| w.set_bndt(bndt)); + + match dir { + Dir::MemoryToPeripheral => { + ch.sar().write_value(mem_addr as _); + ch.dar().write_value(peri_addr as _); + } + Dir::PeripheralToMemory => { + ch.sar().write_value(peri_addr as _); + ch.dar().write_value(mem_addr as _); + } + } + + ch.cr().write(|w| { + w.set_prio(options.priority.into()); + w.set_htie(options.half_transfer_ir); + w.set_tcie(options.complete_transfer_ir); + w.set_useie(true); + w.set_dteie(true); + w.set_suspie(true); + }); + } + + unsafe fn configure_linked_list( + &self, + table: &Table, + options: TransferOptions, + ) { + let info = self.info(); + let ch = info.dma.ch(info.num); + + // "Preceding reads and writes cannot be moved past subsequent writes." + fence(Ordering::SeqCst); + + ch.cr().write(|w| w.set_reset(true)); + ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs + + ch.lbar().write(|reg| reg.set_lba(table.base_address())); + + // Enable all linked-list field updates. + let mut llr = regs::ChLlr(0); + llr.set_ut1(true); + llr.set_ut2(true); + llr.set_ub1(true); + llr.set_usa(true); + llr.set_uda(true); + llr.set_ull(true); + + llr.set_la(table.offset_address(0)); + + ch.llr().write(|_| llr.0); + + ch.tr3().write(|_| {}); // no address offsets. + + ch.cr().write(|w| { + w.set_prio(options.priority.into()); + w.set_htie(options.half_transfer_ir); + w.set_tcie(options.complete_transfer_ir); + w.set_useie(true); + w.set_uleie(true); + w.set_dteie(true); + w.set_suspie(true); + }); + } + + fn start(&self) { + let info = self.info(); + let ch = info.dma.ch(info.num); + + ch.cr().modify(|w| w.set_en(true)); + } + + fn request_stop(&self) { + let info = self.info(); + let ch = info.dma.ch(info.num); + + ch.cr().modify(|w| w.set_susp(true)) + } + + fn is_running(&self) -> bool { + let info = self.info(); + let ch = info.dma.ch(info.num); + + let sr = ch.sr().read(); + !sr.tcf() && !sr.suspf() + } + + fn poll_stop(&self) -> Poll<()> { + use core::sync::atomic::compiler_fence; + compiler_fence(Ordering::SeqCst); + + if !self.is_running() { + Poll::Ready(()) + } else { + Poll::Pending + } + } +} + +/// Linked-list DMA transfer. +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> { + channel: PeripheralRef<'a, AnyChannel>, + table: Table, +} + +impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { + /// Create a new linked-list transfer. + pub unsafe fn new_linked_list( + channel: impl Peripheral

+ 'a, + table: Table, + options: TransferOptions, + ) -> Self { + into_ref!(channel); + + Self::new_inner_linked_list(channel.map_into(), table, options) + } + + unsafe fn new_inner_linked_list( + channel: PeripheralRef<'a, AnyChannel>, + table: Table, + options: TransferOptions, + ) -> Self { + channel.configure_linked_list(&table, options); + channel.start(); + + Self { channel, table } + } + + /// Request the transfer to stop. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Return whether this transfer is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } + + /// Gets the total remaining transfers for the channel + /// Note: this will be zero for transfers that completed without cancellation. + pub fn get_remaining_transfers(&self) -> u16 { + self.channel.get_remaining_transfers() + } + + /// Blocking wait until the transfer finishes. + pub fn blocking_wait(mut self) { + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + + core::mem::forget(self); + } +} + +impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} + +impl<'a, const ITEM_COUNT: usize> Unpin for LinkedListTransfer<'a, ITEM_COUNT> {} +impl<'a, const ITEM_COUNT: usize> Future for LinkedListTransfer<'a, ITEM_COUNT> { + type Output = (); + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let state = &STATE[self.channel.id as usize]; + state.waker.register(cx.waker()); + + if self.is_running() { + Poll::Pending + } else { + Poll::Ready(()) + } + } +} + +/// DMA transfer. +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct Transfer<'a> { + channel: Peri<'a, AnyChannel>, +} + +impl<'a> Transfer<'a> { + /// Create a new read DMA transfer (peripheral to memory). + pub unsafe fn new_read( + channel: Peri<'a, impl Channel>, + request: Request, + peri_addr: *mut W, + buf: &'a mut [W], + options: TransferOptions, + ) -> Self { + Self::new_read_raw(channel, request, peri_addr, buf, options) + } + + /// Create a new read DMA transfer (peripheral to memory), using raw pointers. + pub unsafe fn new_read_raw( + channel: Peri<'a, impl Channel>, + request: Request, + peri_addr: *mut PW, + buf: *mut [MW], + options: TransferOptions, + ) -> Self { + Self::new_inner( + channel.into(), + request, + Dir::PeripheralToMemory, + peri_addr as *const u32, + buf as *mut MW as *mut u32, + buf.len(), + true, + PW::size(), + MW::size(), + options, + ) + } + + /// Create a new write DMA transfer (memory to peripheral). + pub unsafe fn new_write( + channel: Peri<'a, impl Channel>, + request: Request, + buf: &'a [MW], + peri_addr: *mut PW, + options: TransferOptions, + ) -> Self { + Self::new_write_raw(channel, request, buf, peri_addr, options) + } + + /// Create a new write DMA transfer (memory to peripheral), using raw pointers. + pub unsafe fn new_write_raw( + channel: Peri<'a, impl Channel>, + request: Request, + buf: *const [MW], + peri_addr: *mut PW, + options: TransferOptions, + ) -> Self { + Self::new_inner( + channel.into(), + request, + Dir::MemoryToPeripheral, + peri_addr as *const u32, + buf as *const MW as *mut u32, + buf.len(), + true, + MW::size(), + PW::size(), + options, + ) + } + + /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly. + pub unsafe fn new_write_repeated( + channel: Peri<'a, impl Channel>, + request: Request, + repeated: &'a MW, + count: usize, + peri_addr: *mut PW, + options: TransferOptions, + ) -> Self { + Self::new_inner( + channel.into(), + request, + Dir::MemoryToPeripheral, + peri_addr as *const u32, + repeated as *const MW as *mut u32, + count, + false, + MW::size(), + PW::size(), + options, + ) + } + + unsafe fn new_inner( + channel: Peri<'a, AnyChannel>, + request: Request, + dir: Dir, + peri_addr: *const u32, + mem_addr: *mut u32, + mem_len: usize, + incr_mem: bool, + data_size: WordSize, + peripheral_size: WordSize, + options: TransferOptions, + ) -> Self { + assert!(mem_len > 0 && mem_len <= 0xFFFF); + + channel.configure( + _request, + dir, + peri_addr, + mem_addr, + mem_len, + incr_mem, + data_size, + peripheral_size, + options, + ); + channel.start(); + + Self { channel } + } + + /// Request the transfer to stop. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Return whether this transfer is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } + + /// Gets the total remaining transfers for the channel + /// Note: this will be zero for transfers that completed without cancellation. + pub fn get_remaining_transfers(&self) -> u16 { + self.channel.get_remaining_transfers() + } + + /// Blocking wait until the transfer finishes. + pub fn blocking_wait(mut self) { + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + + core::mem::forget(self); + } +} + +impl<'a> Drop for Transfer<'a> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} + +impl<'a> Unpin for Transfer<'a> {} +impl<'a> Future for Transfer<'a> { + type Output = (); + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let state = &STATE[self.channel.id as usize]; + state.waker.register(cx.waker()); + + if self.is_running() { + Poll::Pending + } else { + Poll::Ready(()) + } + } +} diff --git a/embassy-stm32/src/dma/gpdma/ringbuffer.rs b/embassy-stm32/src/dma/gpdma/ringbuffer.rs new file mode 100644 index 000000000..c327e811e --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/ringbuffer.rs @@ -0,0 +1,283 @@ +//! GPDMA ring buffer implementation. +//! +//! FIXME: add request_pause functionality? +use core::{ + sync::atomic::{fence, Ordering}, + task::Waker, +}; + +use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; + +use crate::dma::{ + gpdma::linked_list::{LinearItem, RunMode, Table}, + ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}, + word::Word, + Channel, Dir, Request, +}; + +use super::{AnyChannel, TransferOptions, STATE}; + +struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>); + +impl<'a> DmaCtrl for DmaCtrlImpl<'a> { + fn get_remaining_transfers(&self) -> usize { + self.0.get_remaining_transfers() as _ + } + + fn reset_complete_count(&mut self) -> usize { + let state = &STATE[self.0.id as usize]; + + return state.complete_count.swap(0, Ordering::AcqRel); + } + + fn set_waker(&mut self, waker: &Waker) { + STATE[self.0.id as usize].waker.register(waker); + } +} + +/// Ringbuffer for receiving data using GPDMA linked-list mode. +pub struct ReadableRingBuffer<'a, W: Word> { + channel: PeripheralRef<'a, AnyChannel>, + ringbuf: ReadableDmaRingBuffer<'a, W>, + table: Table<2>, +} + +impl<'a, W: Word> ReadableRingBuffer<'a, W> { + /// Create a new ring buffer. + pub unsafe fn new( + channel: impl Peripheral

+ 'a, + request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + mut options: TransferOptions, + ) -> Self { + into_ref!(channel); + let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + + let half_len = buffer.len() / 2; + assert_eq!(half_len * 2, buffer.len()); + + options.half_transfer_ir = false; + options.complete_transfer_ir = true; + + let items = [ + LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options), + LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options), + ]; + + let table = Table::new(items, RunMode::Once); + + let this = Self { + channel, + ringbuf: ReadableDmaRingBuffer::new(buffer), + table, + }; + + this.channel.configure_linked_list(&this.table, options); + + this + } + + /// Start the ring buffer operation. + /// + /// You must call this after creating it for it to work. + pub fn start(&mut self) { + self.channel.start(); + } + + /// Clear all data in the ring buffer. + pub fn clear(&mut self) { + self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); + } + + /// Read elements from the ring buffer + /// Return a tuple of the length read and the length remaining in the buffer + /// If not all of the elements were read, then there will be some elements in the buffer remaining + /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read + /// Error is returned if the portion to be read was overwritten by the DMA controller. + pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> { + self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf) + } + + /// Read an exact number of elements from the ringbuffer. + /// + /// Returns the remaining number of elements available for immediate reading. + /// Error is returned if the portion to be read was overwritten by the DMA controller. + /// + /// Async/Wake Behavior: + /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, + /// and when it wraps around. This means that when called with a buffer of length 'M', when this + /// ring buffer was created with a buffer of size 'N': + /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. + /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. + pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result { + self.ringbuf + .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await + } + + /// The current length of the ringbuffer + pub fn len(&mut self) -> Result { + Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) + } + + /// The capacity of the ringbuffer + pub const fn capacity(&self) -> usize { + self.ringbuf.cap() + } + + /// Set a waker to be woken when at least one byte is received. + pub fn set_waker(&mut self, waker: &Waker) { + DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); + } + + /// Request the DMA to stop. + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Return whether DMA is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } +} + +impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} + +/// Ringbuffer for writing data using DMA circular mode. +pub struct WritableRingBuffer<'a, W: Word> { + channel: PeripheralRef<'a, AnyChannel>, + ringbuf: WritableDmaRingBuffer<'a, W>, +} + +impl<'a, W: Word> WritableRingBuffer<'a, W> { + /// Create a new ring buffer. + pub unsafe fn new( + channel: impl Peripheral

+ 'a, + _request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + mut options: TransferOptions, + ) -> Self { + into_ref!(channel); + let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + + let len = buffer.len(); + let dir = Dir::MemoryToPeripheral; + let data_size = W::size(); + let buffer_ptr = buffer.as_mut_ptr(); + + options.half_transfer_ir = true; + options.complete_transfer_ir = true; + + channel.configure( + _request, + dir, + peri_addr as *mut u32, + buffer_ptr as *mut u32, + len, + true, + data_size, + data_size, + options, + ); + + Self { + channel, + ringbuf: WritableDmaRingBuffer::new(buffer), + } + } + + /// Start the ring buffer operation. + /// + /// You must call this after creating it for it to work. + pub fn start(&mut self) { + self.channel.start(); + } + + /// Clear all data in the ring buffer. + pub fn clear(&mut self) { + self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); + } + + /// Write elements directly to the raw buffer. + /// This can be used to fill the buffer before starting the DMA transfer. + pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { + self.ringbuf.write_immediate(buf) + } + + /// Write elements from the ring buffer + /// Return a tuple of the length written and the length remaining in the buffer + pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { + self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf) + } + + /// Write an exact number of elements to the ringbuffer. + pub async fn write_exact(&mut self, buffer: &[W]) -> Result { + self.ringbuf + .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await + } + + /// Wait for any ring buffer write error. + pub async fn wait_write_error(&mut self) -> Result { + self.ringbuf + .wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow())) + .await + } + + /// The current length of the ringbuffer + pub fn len(&mut self) -> Result { + Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) + } + + /// The capacity of the ringbuffer + pub const fn capacity(&self) -> usize { + self.ringbuf.cap() + } + + /// Set a waker to be woken when at least one byte is received. + pub fn set_waker(&mut self, waker: &Waker) { + DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); + } + + /// Request the DMA to stop. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Return whether DMA is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } +} + +impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs index 44ea497fe..e462c71d4 100644 --- a/embassy-stm32/src/dma/ringbuffer/mod.rs +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs @@ -1,5 +1,3 @@ -#![cfg_attr(gpdma, allow(unused))] - use core::future::poll_fn; use core::task::{Poll, Waker}; diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs index 4965f8b04..88cc225dd 100644 --- a/embassy-stm32/src/sai/mod.rs +++ b/embassy-stm32/src/sai/mod.rs @@ -1,13 +1,11 @@ //! Serial Audio Interface (SAI) #![macro_use] -#![cfg_attr(gpdma, allow(unused))] use core::marker::PhantomData; use embassy_hal_internal::PeripheralType; pub use crate::dma::word; -#[cfg(not(gpdma))] use crate::dma::{ringbuffer, Channel, ReadableRingBuffer, Request, TransferOptions, WritableRingBuffer}; use crate::gpio::{AfType, AnyPin, OutputType, Pull, SealedPin as _, Speed}; use crate::pac::sai::{vals, Sai as Regs}; @@ -26,7 +24,6 @@ pub enum Error { Overrun, } -#[cfg(not(gpdma))] impl From for Error { fn from(#[allow(unused)] err: ringbuffer::Error) -> Self { #[cfg(feature = "defmt")] @@ -652,7 +649,6 @@ impl Config { } } -#[cfg(not(gpdma))] enum RingBuffer<'d, W: word::Word> { Writable(WritableRingBuffer<'d, W>), Readable(ReadableRingBuffer<'d, W>), @@ -679,7 +675,6 @@ fn get_af_types(mode: Mode, tx_rx: TxRx) -> (AfType, AfType) { ) } -#[cfg(not(gpdma))] fn get_ring_buffer<'d, T: Instance, W: word::Word>( dma: Peri<'d, impl Channel>, dma_buf: &'d mut [W], @@ -750,14 +745,10 @@ pub struct Sai<'d, T: Instance, W: word::Word> { fs: Option>, sck: Option>, mclk: Option>, - #[cfg(gpdma)] - ring_buffer: PhantomData, - #[cfg(not(gpdma))] ring_buffer: RingBuffer<'d, W>, sub_block: WhichSubBlock, } -#[cfg(not(gpdma))] impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> { /// Create a new SAI driver in asynchronous mode with MCLK. /// -- cgit From cf5b1ea9f593d1d80b718b88330f041b59d071f1 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: gpdma support (wip) --- embassy-stm32/src/cryp/mod.rs | 3 - embassy-stm32/src/dma/gpdma/linked_list.rs | 77 ++--- embassy-stm32/src/dma/gpdma/mod.rs | 91 ++++-- embassy-stm32/src/dma/gpdma/ringbuffer.rs | 283 ------------------ embassy-stm32/src/dma/gpdma/ringbuffered.rs | 433 ++++++++++++++++++++++++++++ embassy-stm32/src/dma/mod.rs | 2 + embassy-stm32/src/dma/ringbuffer/mod.rs | 20 ++ embassy-stm32/src/spdifrx/mod.rs | 4 - embassy-stm32/src/usart/mod.rs | 2 - 9 files changed, 561 insertions(+), 354 deletions(-) delete mode 100644 embassy-stm32/src/dma/gpdma/ringbuffer.rs create mode 100644 embassy-stm32/src/dma/gpdma/ringbuffered.rs diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 35d9f8cce..0173b2b5d 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -1814,7 +1814,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> { // Configure DMA to transfer input to crypto core. let dst_ptr: *mut u32 = T::regs().din().as_ptr(); let options = TransferOptions { - #[cfg(not(gpdma))] priority: crate::dma::Priority::High, ..Default::default() }; @@ -1834,7 +1833,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> { // Configure DMA to transfer input to crypto core. let dst_ptr: *mut u32 = T::regs().din().as_ptr(); let options = TransferOptions { - #[cfg(not(gpdma))] priority: crate::dma::Priority::High, ..Default::default() }; @@ -1853,7 +1851,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> { // Configure DMA to get output from crypto core. let src_ptr = T::regs().dout().as_ptr(); let options = TransferOptions { - #[cfg(not(gpdma))] priority: crate::dma::Priority::VeryHigh, ..Default::default() }; diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index b24b2e7eb..7de9a1441 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -8,10 +8,6 @@ use crate::dma::{ word::{Word, WordSize}, Dir, Request, }; -use core::{ - ptr, - sync::atomic::{AtomicUsize, Ordering}, -}; /// The mode in which to run the linked list. #[derive(Debug)] @@ -28,6 +24,7 @@ pub enum RunMode { /// /// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities. #[derive(Debug, Copy, Clone, Default)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[repr(C)] pub struct LinearItem { /// Transfer register 1. @@ -146,7 +143,9 @@ impl LinearItem { llr.set_usa(true); llr.set_uda(true); llr.set_ull(true); - llr.set_la(next); + + // Lower two bits are ignored: 32 bit aligned. + llr.set_la(next >> 2); self.llr = llr.0; } @@ -159,78 +158,82 @@ impl LinearItem { } } +/// A table of linked list items. +#[repr(C)] pub struct Table { - current_index: AtomicUsize, - items: [LinearItem; ITEM_COUNT], + /// The items. + pub items: [LinearItem; ITEM_COUNT], } impl Table { /// Create a new table. - pub fn new(items: [LinearItem; ITEM_COUNT], run_mode: RunMode) -> Self { + pub fn new(items: [LinearItem; ITEM_COUNT]) -> Self { assert!(!items.is_empty()); - let mut this = Self { - current_index: AtomicUsize::new(0), - items, - }; + Self { items } + } + pub fn link(&mut self, run_mode: RunMode) { if matches!(run_mode, RunMode::Once | RunMode::Repeat) { - this.link_sequential(); + self.link_sequential(); } if matches!(run_mode, RunMode::Repeat) { - this.link_repeat(); + self.link_repeat(); } - - this } + /// The number of linked list items.s pub fn len(&self) -> usize { self.items.len() } - /// Items are linked together sequentially. + /// Link items of given indices together: first -> second. + pub fn link_indices(&mut self, first: usize, second: usize) { + assert!(first < self.len()); + assert!(second < self.len()); + + let second_item = self.offset_address(second); + self.items[first].link_to(second_item); + } + + /// Link items sequentially. pub fn link_sequential(&mut self) { - if self.items.len() > 1 { + if self.len() > 1 { for index in 0..(self.items.len() - 1) { - let next = ptr::addr_of!(self.items[index + 1]) as u16; + let next = self.offset_address(index + 1); self.items[index].link_to(next); } } } - /// Last item links to first item. + /// Link last to first item. pub fn link_repeat(&mut self) { - let first_item = self.items.first().unwrap(); - let first_address = ptr::addr_of!(first_item) as u16; + let first_address = self.offset_address(0); self.items.last_mut().unwrap().link_to(first_address); } - /// The index of the next item. - pub fn next_index(&self) -> usize { - let mut next_index = self.current_index.load(Ordering::Relaxed) + 1; - if next_index >= self.len() { - next_index = 0; + /// Unlink all items. + pub fn unlink(&mut self) { + for item in self.items.iter_mut() { + item.unlink(); } - - next_index - } - - /// Unlink the next item. - pub fn unlink_next(&mut self) { - let next_index = self.next_index(); - self.items[next_index].unlink(); } /// Linked list base address (upper 16 address bits). pub fn base_address(&self) -> u16 { - ((ptr::addr_of!(self.items) as u32) >> 16) as _ + ((&raw const self.items as u32) >> 16) as _ } /// Linked list offset address (lower 16 address bits) at the selected index. pub fn offset_address(&self, index: usize) -> u16 { assert!(self.items.len() > index); - (ptr::addr_of!(self.items[index]) as u32) as _ + let address = &raw const self.items[index] as _; + + // Ensure 32 bit address alignment. + assert_eq!(address & 0b11, 0); + + address } } diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 07acd2cf0..f65048d1f 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -8,7 +8,6 @@ use core::task::{Context, Poll}; use embassy_hal_internal::Peri; use embassy_sync::waitqueue::AtomicWaker; use linked_list::Table; -use stm32_metapac::gpdma::regs; use super::word::{Word, WordSize}; use super::{AnyChannel, Channel, Dir, Request, STATE}; @@ -16,8 +15,8 @@ use crate::interrupt::typelevel::Interrupt; use crate::pac; use crate::pac::gpdma::vals; -mod linked_list; -mod ringbuffer; +pub mod linked_list; +pub mod ringbuffered; pub(crate) struct ChannelInfo { pub(crate) dma: pac::gpdma::Gpdma, @@ -56,9 +55,12 @@ impl From for pac::gpdma::vals::Prio { #[cfg_attr(feature = "defmt", derive(defmt::Format))] #[non_exhaustive] pub struct TransferOptions { - priority: Priority, - half_transfer_ir: bool, - complete_transfer_ir: bool, + /// Request priority level. + pub priority: Priority, + /// Enable half transfer interrupt. + pub half_transfer_ir: bool, + /// Enable transfer complete interrupt. + pub complete_transfer_ir: bool, } impl Default for TransferOptions { @@ -81,6 +83,17 @@ impl From for vals::Dw { } } +impl From for WordSize { + fn from(raw: vals::Dw) -> Self { + match raw { + vals::Dw::BYTE => Self::OneByte, + vals::Dw::HALF_WORD => Self::TwoBytes, + vals::Dw::WORD => Self::FourBytes, + _ => panic!("Invalid word size"), + } + } +} + pub(crate) struct ChannelState { waker: AtomicWaker, complete_count: AtomicUsize, @@ -94,7 +107,7 @@ impl ChannelState { } /// safety: must be called only once -pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { +pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: crate::interrupt::Priority) { foreach_interrupt! { ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); @@ -142,24 +155,30 @@ impl AnyChannel { ); } + if sr.htf() { + ch.fcr().write(|w| w.set_htf(true)); + } + if sr.tcf() { + ch.fcr().write(|w| w.set_tcf(true)); state.complete_count.fetch_add(1, Ordering::Release); } - if sr.suspf() || sr.tcf() { + if sr.suspf() { // disable all xxIEs to prevent the irq from firing again. ch.cr().write(|_| {}); // Wake the future. It'll look at tcf and see it's set. - state.waker.wake(); } + state.waker.wake(); } fn get_remaining_transfers(&self) -> u16 { let info = self.info(); let ch = info.dma.ch(info.num); + let word_size: WordSize = ch.tr1().read().ddw().into(); - ch.br1().read().bndt() + ch.br1().read().bndt() / word_size.bytes() as u16 } unsafe fn configure( @@ -238,21 +257,23 @@ impl AnyChannel { ch.cr().write(|w| w.set_reset(true)); ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs - ch.lbar().write(|reg| reg.set_lba(table.base_address())); - // Enable all linked-list field updates. - let mut llr = regs::ChLlr(0); - llr.set_ut1(true); - llr.set_ut2(true); - llr.set_ub1(true); - llr.set_usa(true); - llr.set_uda(true); - llr.set_ull(true); - - llr.set_la(table.offset_address(0)); + // Empty LLI0. + ch.br1().write(|w| w.set_bndt(0)); - ch.llr().write(|_| llr.0); + // Enable all linked-list field updates. + ch.llr().write(|w| { + w.set_ut1(true); + w.set_ut2(true); + w.set_ub1(true); + w.set_usa(true); + w.set_uda(true); + w.set_ull(true); + + // Lower two bits are ignored: 32 bit aligned. + w.set_la(table.offset_address(0) >> 2); + }); ch.tr3().write(|_| {}); // no address offsets. @@ -281,12 +302,23 @@ impl AnyChannel { ch.cr().modify(|w| w.set_susp(true)) } + fn request_pause(&self) { + let info = self.info(); + let ch = info.dma.ch(info.num); + + // Disable the channel without overwriting the existing configuration + ch.cr().modify(|w| { + w.set_en(false); + }); + } + fn is_running(&self) -> bool { let info = self.info(); let ch = info.dma.ch(info.num); let sr = ch.sr().read(); - !sr.tcf() && !sr.suspf() + + !sr.tcf() && !sr.suspf() && !sr.idlef() } fn poll_stop(&self) -> Poll<()> { @@ -305,7 +337,6 @@ impl AnyChannel { #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> { channel: PeripheralRef<'a, AnyChannel>, - table: Table, } impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { @@ -328,7 +359,7 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { channel.configure_linked_list(&table, options); channel.start(); - Self { channel, table } + Self { channel } } /// Request the transfer to stop. @@ -515,12 +546,22 @@ impl<'a> Transfer<'a> { } /// Request the transfer to stop. + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_stop(&mut self) { self.channel.request_stop() } + /// Request the transfer to pause, keeping the existing configuration for this channel. + /// To restart the transfer, call [`start`](Self::start) again. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_pause(&mut self) { + self.channel.request_pause() + } + /// Return whether this transfer is still running. /// /// If this returns `false`, it can be because either the transfer finished, or diff --git a/embassy-stm32/src/dma/gpdma/ringbuffer.rs b/embassy-stm32/src/dma/gpdma/ringbuffer.rs deleted file mode 100644 index c327e811e..000000000 --- a/embassy-stm32/src/dma/gpdma/ringbuffer.rs +++ /dev/null @@ -1,283 +0,0 @@ -//! GPDMA ring buffer implementation. -//! -//! FIXME: add request_pause functionality? -use core::{ - sync::atomic::{fence, Ordering}, - task::Waker, -}; - -use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; - -use crate::dma::{ - gpdma::linked_list::{LinearItem, RunMode, Table}, - ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}, - word::Word, - Channel, Dir, Request, -}; - -use super::{AnyChannel, TransferOptions, STATE}; - -struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>); - -impl<'a> DmaCtrl for DmaCtrlImpl<'a> { - fn get_remaining_transfers(&self) -> usize { - self.0.get_remaining_transfers() as _ - } - - fn reset_complete_count(&mut self) -> usize { - let state = &STATE[self.0.id as usize]; - - return state.complete_count.swap(0, Ordering::AcqRel); - } - - fn set_waker(&mut self, waker: &Waker) { - STATE[self.0.id as usize].waker.register(waker); - } -} - -/// Ringbuffer for receiving data using GPDMA linked-list mode. -pub struct ReadableRingBuffer<'a, W: Word> { - channel: PeripheralRef<'a, AnyChannel>, - ringbuf: ReadableDmaRingBuffer<'a, W>, - table: Table<2>, -} - -impl<'a, W: Word> ReadableRingBuffer<'a, W> { - /// Create a new ring buffer. - pub unsafe fn new( - channel: impl Peripheral

+ 'a, - request: Request, - peri_addr: *mut W, - buffer: &'a mut [W], - mut options: TransferOptions, - ) -> Self { - into_ref!(channel); - let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); - - let half_len = buffer.len() / 2; - assert_eq!(half_len * 2, buffer.len()); - - options.half_transfer_ir = false; - options.complete_transfer_ir = true; - - let items = [ - LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options), - LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options), - ]; - - let table = Table::new(items, RunMode::Once); - - let this = Self { - channel, - ringbuf: ReadableDmaRingBuffer::new(buffer), - table, - }; - - this.channel.configure_linked_list(&this.table, options); - - this - } - - /// Start the ring buffer operation. - /// - /// You must call this after creating it for it to work. - pub fn start(&mut self) { - self.channel.start(); - } - - /// Clear all data in the ring buffer. - pub fn clear(&mut self) { - self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); - } - - /// Read elements from the ring buffer - /// Return a tuple of the length read and the length remaining in the buffer - /// If not all of the elements were read, then there will be some elements in the buffer remaining - /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read - /// Error is returned if the portion to be read was overwritten by the DMA controller. - pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> { - self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf) - } - - /// Read an exact number of elements from the ringbuffer. - /// - /// Returns the remaining number of elements available for immediate reading. - /// Error is returned if the portion to be read was overwritten by the DMA controller. - /// - /// Async/Wake Behavior: - /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, - /// and when it wraps around. This means that when called with a buffer of length 'M', when this - /// ring buffer was created with a buffer of size 'N': - /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. - /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. - pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result { - self.ringbuf - .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) - .await - } - - /// The current length of the ringbuffer - pub fn len(&mut self) -> Result { - Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) - } - - /// The capacity of the ringbuffer - pub const fn capacity(&self) -> usize { - self.ringbuf.cap() - } - - /// Set a waker to be woken when at least one byte is received. - pub fn set_waker(&mut self, waker: &Waker) { - DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); - } - - /// Request the DMA to stop. - /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. - /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() - } - - /// Return whether DMA is still running. - /// - /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). - pub fn is_running(&mut self) -> bool { - self.channel.is_running() - } -} - -impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { - fn drop(&mut self) { - self.request_stop(); - while self.is_running() {} - - // "Subsequent reads and writes cannot be moved ahead of preceding reads." - fence(Ordering::SeqCst); - } -} - -/// Ringbuffer for writing data using DMA circular mode. -pub struct WritableRingBuffer<'a, W: Word> { - channel: PeripheralRef<'a, AnyChannel>, - ringbuf: WritableDmaRingBuffer<'a, W>, -} - -impl<'a, W: Word> WritableRingBuffer<'a, W> { - /// Create a new ring buffer. - pub unsafe fn new( - channel: impl Peripheral

+ 'a, - _request: Request, - peri_addr: *mut W, - buffer: &'a mut [W], - mut options: TransferOptions, - ) -> Self { - into_ref!(channel); - let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); - - let len = buffer.len(); - let dir = Dir::MemoryToPeripheral; - let data_size = W::size(); - let buffer_ptr = buffer.as_mut_ptr(); - - options.half_transfer_ir = true; - options.complete_transfer_ir = true; - - channel.configure( - _request, - dir, - peri_addr as *mut u32, - buffer_ptr as *mut u32, - len, - true, - data_size, - data_size, - options, - ); - - Self { - channel, - ringbuf: WritableDmaRingBuffer::new(buffer), - } - } - - /// Start the ring buffer operation. - /// - /// You must call this after creating it for it to work. - pub fn start(&mut self) { - self.channel.start(); - } - - /// Clear all data in the ring buffer. - pub fn clear(&mut self) { - self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); - } - - /// Write elements directly to the raw buffer. - /// This can be used to fill the buffer before starting the DMA transfer. - pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { - self.ringbuf.write_immediate(buf) - } - - /// Write elements from the ring buffer - /// Return a tuple of the length written and the length remaining in the buffer - pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { - self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf) - } - - /// Write an exact number of elements to the ringbuffer. - pub async fn write_exact(&mut self, buffer: &[W]) -> Result { - self.ringbuf - .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) - .await - } - - /// Wait for any ring buffer write error. - pub async fn wait_write_error(&mut self) -> Result { - self.ringbuf - .wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow())) - .await - } - - /// The current length of the ringbuffer - pub fn len(&mut self) -> Result { - Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) - } - - /// The capacity of the ringbuffer - pub const fn capacity(&self) -> usize { - self.ringbuf.cap() - } - - /// Set a waker to be woken when at least one byte is received. - pub fn set_waker(&mut self, waker: &Waker) { - DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); - } - - /// Request the DMA to stop. - /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() - } - - /// Return whether DMA is still running. - /// - /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). - pub fn is_running(&mut self) -> bool { - self.channel.is_running() - } -} - -impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { - fn drop(&mut self) { - self.request_stop(); - while self.is_running() {} - - // "Subsequent reads and writes cannot be moved ahead of preceding reads." - fence(Ordering::SeqCst); - } -} diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs new file mode 100644 index 000000000..fd0a98e23 --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -0,0 +1,433 @@ +//! GPDMA ring buffer implementation. +//! +//! FIXME: add request_pause functionality? +use core::{ + future::poll_fn, + sync::atomic::{fence, Ordering}, + task::Waker, +}; + +use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; + +use crate::dma::{ + gpdma::linked_list::{LinearItem, RunMode, Table}, + ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}, + word::Word, + Channel, Request, +}; + +use super::{AnyChannel, TransferOptions, STATE}; + +struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>); + +impl<'a> DmaCtrl for DmaCtrlImpl<'a> { + fn get_remaining_transfers(&self) -> usize { + self.0.get_remaining_transfers() as _ + } + + fn reset_complete_count(&mut self) -> usize { + let state = &STATE[self.0.id as usize]; + + state.complete_count.swap(0, Ordering::AcqRel) + } + + fn set_waker(&mut self, waker: &Waker) { + STATE[self.0.id as usize].waker.register(waker); + } +} + +/// The current buffer half (e.g. for DMA or the user application). +#[derive(Debug, PartialEq, PartialOrd)] +enum BufferHalf { + First, + Second, +} + +impl BufferHalf { + fn toggle(&mut self) { + *self = match *self { + Self::First => Self::Second, + Self::Second => Self::First, + }; + } +} + +/// Ringbuffer for receiving data using GPDMA linked-list mode. +pub struct ReadableRingBuffer<'a, W: Word> { + channel: PeripheralRef<'a, AnyChannel>, + ringbuf: ReadableDmaRingBuffer<'a, W>, + table: Table<1>, + user_buffer_half: BufferHalf, +} + +impl<'a, W: Word> ReadableRingBuffer<'a, W> { + /// Create a new ring buffer. + pub unsafe fn new( + channel: impl Peripheral

+ 'a, + request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + mut options: TransferOptions, + ) -> Self { + into_ref!(channel); + let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + + let half_len = buffer.len() / 2; + assert_eq!(half_len * 2, buffer.len()); + + options.half_transfer_ir = true; + options.complete_transfer_ir = true; + + // let items = [ + // LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options), + // LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options), + // ]; + let items = [LinearItem::new_read(request, peri_addr, buffer, options)]; + + let table = Table::new(items); + + let this = Self { + channel, + ringbuf: ReadableDmaRingBuffer::new(buffer), + table, + user_buffer_half: BufferHalf::First, + }; + + this.channel.configure_linked_list(&this.table, options); + + this + } + + /// Start the ring buffer operation. + /// + /// You must call this after creating it for it to work. + pub fn start(&mut self) { + self.channel.start(); + } + + /// Clear all data in the ring buffer. + pub fn clear(&mut self) { + self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); + } + + /// Read elements from the ring buffer + /// Return a tuple of the length read and the length remaining in the buffer + /// If not all of the elements were read, then there will be some elements in the buffer remaining + /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read + /// Error is returned if the portion to be read was overwritten by the DMA controller. + pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> { + self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf) + } + + /// Read an exact number of elements from the ringbuffer. + /// + /// Returns the remaining number of elements available for immediate reading. + /// Error is returned if the portion to be read was overwritten by the DMA controller. + /// + /// Async/Wake Behavior: + /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, + /// and when it wraps around. This means that when called with a buffer of length 'M', when this + /// ring buffer was created with a buffer of size 'N': + /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. + /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. + pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result { + self.ringbuf + .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await + } + + /// The current length of the ringbuffer + pub fn len(&mut self) -> Result { + Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) + } + + /// The capacity of the ringbuffer + pub const fn capacity(&self) -> usize { + self.ringbuf.cap() + } + + /// Set a waker to be woken when at least one byte is received. + pub fn set_waker(&mut self, waker: &Waker) { + DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); + } + + /// Request the DMA to stop. + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Request the transfer to pause, keeping the existing configuration for this channel. + /// To restart the transfer, call [`start`](Self::start) again. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_pause(&mut self) { + self.channel.request_pause() + } + + /// Return whether DMA is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } + + /// Stop the DMA transfer and await until the buffer is full. + /// + /// This disables the DMA transfer's circular mode so that the transfer + /// stops when the buffer is full. + /// + /// This is designed to be used with streaming input data such as the + /// I2S/SAI or ADC. + /// + /// When using the UART, you probably want `request_stop()`. + pub async fn stop(&mut self) { + // wait until cr.susp reads as true + poll_fn(|cx| { + self.set_waker(cx.waker()); + self.channel.poll_stop() + }) + .await + } +} + +impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} + +/// Ringbuffer for writing data using DMA circular mode. +pub struct WritableRingBuffer<'a, W: Word> { + channel: PeripheralRef<'a, AnyChannel>, + ringbuf: WritableDmaRingBuffer<'a, W>, + table: Table<1>, + user_buffer_half: BufferHalf, +} + +impl<'a, W: Word> WritableRingBuffer<'a, W> { + /// Create a new ring buffer. + pub unsafe fn new( + channel: impl Peripheral

+ 'a, + request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + mut options: TransferOptions, + ) -> Self { + into_ref!(channel); + let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + + let half_len = buffer.len() / 2; + assert_eq!(half_len * 2, buffer.len()); + + options.half_transfer_ir = true; + options.complete_transfer_ir = true; + + // let items = [ + // LinearItem::new_write(request, &mut buffer[..half_len], peri_addr, options), + // LinearItem::new_write(request, &mut buffer[half_len..], peri_addr, options), + // ]; + let items = [LinearItem::new_write(request, buffer, peri_addr, options)]; + let table = Table::new(items); + + let this = Self { + channel, + ringbuf: WritableDmaRingBuffer::new(buffer), + table, + user_buffer_half: BufferHalf::First, + }; + + this + } + + fn dma_buffer_half(&self) -> BufferHalf { + if self.ringbuf.read_index(0) < self.ringbuf.cap() / 2 { + BufferHalf::First + } else { + BufferHalf::Second + } + } + + fn link_next_buffer(&mut self) { + self.table.unlink(); + + match self.user_buffer_half { + BufferHalf::First => self.table.link_indices(0, 1), + BufferHalf::Second => self.table.link_indices(1, 0), + } + + self.user_buffer_half.toggle(); + } + + /// Start the ring buffer operation. + /// + /// You must call this after creating it for it to work. + pub fn start(&mut self) { + unsafe { + self.channel.configure_linked_list( + &self.table, + TransferOptions { + half_transfer_ir: true, + complete_transfer_ir: true, + ..Default::default() + }, + ) + }; + self.table.link(RunMode::Repeat); + self.channel.start(); + } + + /// Clear all data in the ring buffer. + pub fn clear(&mut self) { + self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); + } + + /// Write elements directly to the raw buffer. + /// This can be used to fill the buffer before starting the DMA transfer. + pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { + self.ringbuf.write_immediate(buf) + } + + /// Write elements from the ring buffer + /// Return a tuple of the length written and the length remaining in the buffer + pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { + self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf) + } + + /// Write an exact number of elements to the ringbuffer. + pub async fn write_exact(&mut self, buffer: &[W]) -> Result { + return self + .ringbuf + .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await; + + let mut remaining = buffer.len(); + + let mut remaining_cap = 0; + let cap = self.ringbuf.cap(); + + while remaining > 0 { + let dma_buffer_half = self.dma_buffer_half(); + if dma_buffer_half == self.user_buffer_half { + self.link_next_buffer(); + } + + let write_index = self.ringbuf.write_index(0); + let len = match dma_buffer_half { + BufferHalf::First => { + // if write_index < cap / 2 { + // error!("write index: {}", write_index); + // panic!() + // } + info!("Write second"); + + // Fill up second buffer half when DMA reads the first. + cap - write_index + } + BufferHalf::Second => { + // if write_index >= cap / 2 { + // error!("write index: {}", write_index); + // panic!() + // } + info!("Write first"); + + // Fill up first buffer half when DMA reads the second. + cap / 2 - write_index + } + } + .min(remaining); + + remaining_cap = self + .ringbuf + .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await?; + + remaining -= len; + } + + Ok(remaining_cap) + } + + /// Wait for any ring buffer write error. + pub async fn wait_write_error(&mut self) -> Result { + self.ringbuf + .wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow())) + .await + } + + /// The current length of the ringbuffer + pub fn len(&mut self) -> Result { + Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) + } + + /// The capacity of the ringbuffer + pub const fn capacity(&self) -> usize { + self.ringbuf.cap() + } + + /// Set a waker to be woken when at least one byte is received. + pub fn set_waker(&mut self, waker: &Waker) { + DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); + } + + /// Request the DMA to stop. + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Request the transfer to pause, keeping the existing configuration for this channel. + /// To restart the transfer, call [`start`](Self::start) again. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_pause(&mut self) { + self.channel.request_pause() + } + + /// Return whether DMA is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } + + /// Stop the DMA transfer and await until the buffer is full. + /// + /// This disables the DMA transfer's circular mode so that the transfer + /// stops when the buffer is full. + /// + /// This is designed to be used with streaming input data such as the + /// I2S/SAI or ADC. + /// + /// When using the UART, you probably want `request_stop()`. + pub async fn stop(&mut self) { + // wait until cr.susp reads as true + poll_fn(|cx| { + self.set_waker(cx.waker()); + self.channel.poll_stop() + }) + .await + } +} + +impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs index d3b070a6d..030f906d2 100644 --- a/embassy-stm32/src/dma/mod.rs +++ b/embassy-stm32/src/dma/mod.rs @@ -9,6 +9,8 @@ pub use dma_bdma::*; #[cfg(gpdma)] pub(crate) mod gpdma; #[cfg(gpdma)] +pub use gpdma::ringbuffered::*; +#[cfg(gpdma)] pub use gpdma::*; #[cfg(dmamux)] diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs index e462c71d4..99960bc74 100644 --- a/embassy-stm32/src/dma/ringbuffer/mod.rs +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs @@ -92,6 +92,16 @@ impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> { } } + /// The current ring-buffer read index. + pub fn read_index(&self, offset: usize) -> usize { + self.read_index.as_index(self.cap(), offset) + } + + /// The current ring-buffer write index. + pub fn write_index(&self, offset: usize) -> usize { + self.write_index.as_index(self.cap(), offset) + } + /// Reset the ring buffer to its initial state. pub fn reset(&mut self, dma: &mut impl DmaCtrl) { dma.reset_complete_count(); @@ -208,6 +218,16 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { } } + /// The current ring-buffer read index. + pub fn read_index(&self, offset: usize) -> usize { + self.read_index.as_index(self.cap(), offset) + } + + /// The current ring-buffer write index. + pub fn write_index(&self, offset: usize) -> usize { + self.write_index.as_index(self.cap(), offset) + } + /// Reset the ring buffer to its initial state. The buffer after the reset will be full. pub fn reset(&mut self, dma: &mut impl DmaCtrl) { dma.reset_complete_count(); diff --git a/embassy-stm32/src/spdifrx/mod.rs b/embassy-stm32/src/spdifrx/mod.rs index 9c42217f0..d3b4a0b10 100644 --- a/embassy-stm32/src/spdifrx/mod.rs +++ b/embassy-stm32/src/spdifrx/mod.rs @@ -8,7 +8,6 @@ use embassy_sync::waitqueue::AtomicWaker; use crate::dma::ringbuffer::Error as RingbufferError; pub use crate::dma::word; -#[cfg(not(gpdma))] use crate::dma::ReadableRingBuffer; use crate::dma::{Channel, TransferOptions}; use crate::gpio::{AfType, AnyPin, Pull, SealedPin as _}; @@ -58,7 +57,6 @@ macro_rules! impl_spdifrx_pin { /// Ring-buffered SPDIFRX driver. /// /// Data is read by DMAs and stored in a ring buffer. -#[cfg(not(gpdma))] pub struct Spdifrx<'d, T: Instance> { _peri: Peri<'d, T>, spdifrx_in: Option>, @@ -118,7 +116,6 @@ impl Default for Config { } } -#[cfg(not(gpdma))] impl<'d, T: Instance> Spdifrx<'d, T> { fn dma_opts() -> TransferOptions { TransferOptions { @@ -236,7 +233,6 @@ impl<'d, T: Instance> Spdifrx<'d, T> { } } -#[cfg(not(gpdma))] impl<'d, T: Instance> Drop for Spdifrx<'d, T> { fn drop(&mut self) { T::info().regs.cr().modify(|cr| cr.set_spdifen(0x00)); diff --git a/embassy-stm32/src/usart/mod.rs b/embassy-stm32/src/usart/mod.rs index 5bece6d66..3d95de897 100644 --- a/embassy-stm32/src/usart/mod.rs +++ b/embassy-stm32/src/usart/mod.rs @@ -1965,9 +1965,7 @@ pub use buffered::*; pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler; mod buffered; -#[cfg(not(gpdma))] mod ringbuffered; -#[cfg(not(gpdma))] pub use ringbuffered::RingBufferedUartRx; #[cfg(any(usart_v1, usart_v2))] -- cgit From 4155adbf8ad2aa8acbc6e94d059739c9f373323b Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: ping-pong buffers --- embassy-stm32/src/dma/gpdma/linked_list.rs | 22 ++++++++++++- embassy-stm32/src/dma/gpdma/mod.rs | 49 ++++++++++++++++++++++++++++- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 37 +++++++++++++--------- 3 files changed, 92 insertions(+), 16 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index 7de9a1441..3d2114282 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -156,6 +156,15 @@ impl LinearItem { fn unlink(&mut self) { self.llr = regs::ChLlr(0).0; } + + /// The item's transfer count in number of words. + fn transfer_count(&self) -> usize { + let br1 = regs::ChBr1(self.br1); + let tr1 = regs::ChTr1(self.tr1); + let word_size: WordSize = tr1.ddw().into(); + + br1.bndt() as usize / word_size.bytes() + } } /// A table of linked list items. @@ -173,6 +182,7 @@ impl Table { Self { items } } + /// Link the table as given by the run mode. pub fn link(&mut self, run_mode: RunMode) { if matches!(run_mode, RunMode::Once | RunMode::Repeat) { self.link_sequential(); @@ -183,11 +193,21 @@ impl Table { } } - /// The number of linked list items.s + /// The number of linked list items. pub fn len(&self) -> usize { self.items.len() } + /// The total transfer count of the table in number of words. + pub fn transfer_count(&self) -> usize { + let mut count = 0; + for item in self.items { + count += item.transfer_count() as usize + } + + count + } + /// Link items of given indices together: first -> second. pub fn link_indices(&mut self, first: usize, second: usize) { assert!(first < self.len()); diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index f65048d1f..1d2811ab4 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -94,15 +94,31 @@ impl From for WordSize { } } +pub(crate) struct LLiState { + /// The number of linked-list items. + count: AtomicUsize, + /// The index of the current linked-list item. + index: AtomicUsize, + /// The total transfer count of all linked-list items in number of words. + transfer_count: AtomicUsize, +} + pub(crate) struct ChannelState { waker: AtomicWaker, complete_count: AtomicUsize, + lli_state: LLiState, } impl ChannelState { pub(crate) const NEW: Self = Self { waker: AtomicWaker::new(), complete_count: AtomicUsize::new(0), + + lli_state: LLiState { + count: AtomicUsize::new(0), + index: AtomicUsize::new(0), + transfer_count: AtomicUsize::new(0), + }, }; } @@ -161,7 +177,25 @@ impl AnyChannel { if sr.tcf() { ch.fcr().write(|w| w.set_tcf(true)); - state.complete_count.fetch_add(1, Ordering::Release); + + let lli_count = state.lli_state.count.load(Ordering::Relaxed); + let complete = if lli_count > 0 { + let next_lli_index = state.lli_state.index.load(Ordering::Relaxed) + 1; + let complete = next_lli_index >= lli_count; + + state + .lli_state + .index + .store(if complete { 0 } else { next_lli_index }, Ordering::Relaxed); + + complete + } else { + true + }; + + if complete { + state.complete_count.fetch_add(1, Ordering::Release); + } } if sr.suspf() { @@ -242,6 +276,11 @@ impl AnyChannel { w.set_dteie(true); w.set_suspie(true); }); + + let state = &STATE[self.id as usize]; + state.lli_state.count.store(0, Ordering::Relaxed); + state.lli_state.index.store(0, Ordering::Relaxed); + state.lli_state.transfer_count.store(0, Ordering::Relaxed) } unsafe fn configure_linked_list( @@ -286,6 +325,14 @@ impl AnyChannel { w.set_dteie(true); w.set_suspie(true); }); + + let state = &STATE[self.id as usize]; + state.lli_state.count.store(ITEM_COUNT, Ordering::Relaxed); + state.lli_state.index.store(0, Ordering::Relaxed); + state + .lli_state + .transfer_count + .store(table.transfer_count(), Ordering::Relaxed) } fn start(&self) { diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index fd0a98e23..65ba00b3a 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -22,7 +22,19 @@ struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>); impl<'a> DmaCtrl for DmaCtrlImpl<'a> { fn get_remaining_transfers(&self) -> usize { - self.0.get_remaining_transfers() as _ + let state = &STATE[self.0.id as usize]; + let current_remaining = self.0.get_remaining_transfers() as usize; + + let lli_count = state.lli_state.count.load(Ordering::Relaxed); + + if lli_count > 0 { + let lli_index = state.lli_state.index.load(Ordering::Relaxed); + let single_transfer_count = state.lli_state.transfer_count.load(Ordering::Relaxed) / lli_count; + + (lli_count - lli_index - 1) * single_transfer_count + current_remaining + } else { + current_remaining + } } fn reset_complete_count(&mut self) -> usize { @@ -56,7 +68,7 @@ impl BufferHalf { pub struct ReadableRingBuffer<'a, W: Word> { channel: PeripheralRef<'a, AnyChannel>, ringbuf: ReadableDmaRingBuffer<'a, W>, - table: Table<1>, + table: Table<2>, user_buffer_half: BufferHalf, } @@ -78,12 +90,10 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { options.half_transfer_ir = true; options.complete_transfer_ir = true; - // let items = [ - // LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options), - // LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options), - // ]; - let items = [LinearItem::new_read(request, peri_addr, buffer, options)]; - + let items = [ + LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options), + LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options), + ]; let table = Table::new(items); let this = Self { @@ -209,7 +219,7 @@ impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { pub struct WritableRingBuffer<'a, W: Word> { channel: PeripheralRef<'a, AnyChannel>, ringbuf: WritableDmaRingBuffer<'a, W>, - table: Table<1>, + table: Table<2>, user_buffer_half: BufferHalf, } @@ -231,11 +241,10 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { options.half_transfer_ir = true; options.complete_transfer_ir = true; - // let items = [ - // LinearItem::new_write(request, &mut buffer[..half_len], peri_addr, options), - // LinearItem::new_write(request, &mut buffer[half_len..], peri_addr, options), - // ]; - let items = [LinearItem::new_write(request, buffer, peri_addr, options)]; + let items = [ + LinearItem::new_write(request, &mut buffer[..half_len], peri_addr, options), + LinearItem::new_write(request, &mut buffer[half_len..], peri_addr, options), + ]; let table = Table::new(items); let this = Self { -- cgit From 51b28aaa3162391bcccef1a0fc99f686471d515f Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: style: formatting --- embassy-stm32/src/dma/gpdma/linked_list.rs | 9 ++++----- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 19 +++++++------------ embassy-stm32/src/spdifrx/mod.rs | 3 +-- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index 3d2114282..b0c0dffad 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -1,13 +1,12 @@ //! Implementation of the GPDMA linked list and linked list items. #![macro_use] -use stm32_metapac::gpdma::{regs, vals::Dreq}; +use stm32_metapac::gpdma::regs; +use stm32_metapac::gpdma::vals::Dreq; use super::TransferOptions; -use crate::dma::{ - word::{Word, WordSize}, - Dir, Request, -}; +use crate::dma::word::{Word, WordSize}; +use crate::dma::{Dir, Request}; /// The mode in which to run the linked list. #[derive(Debug)] diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 65ba00b3a..c5c18930b 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -1,22 +1,17 @@ //! GPDMA ring buffer implementation. //! //! FIXME: add request_pause functionality? -use core::{ - future::poll_fn, - sync::atomic::{fence, Ordering}, - task::Waker, -}; +use core::future::poll_fn; +use core::sync::atomic::{fence, Ordering}; +use core::task::Waker; use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; -use crate::dma::{ - gpdma::linked_list::{LinearItem, RunMode, Table}, - ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}, - word::Word, - Channel, Request, -}; - use super::{AnyChannel, TransferOptions, STATE}; +use crate::dma::gpdma::linked_list::{LinearItem, RunMode, Table}; +use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}; +use crate::dma::word::Word; +use crate::dma::{Channel, Request}; struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>); diff --git a/embassy-stm32/src/spdifrx/mod.rs b/embassy-stm32/src/spdifrx/mod.rs index d3b4a0b10..466639e83 100644 --- a/embassy-stm32/src/spdifrx/mod.rs +++ b/embassy-stm32/src/spdifrx/mod.rs @@ -8,8 +8,7 @@ use embassy_sync::waitqueue::AtomicWaker; use crate::dma::ringbuffer::Error as RingbufferError; pub use crate::dma::word; -use crate::dma::ReadableRingBuffer; -use crate::dma::{Channel, TransferOptions}; +use crate::dma::{Channel, ReadableRingBuffer, TransferOptions}; use crate::gpio::{AfType, AnyPin, Pull, SealedPin as _}; use crate::interrupt::typelevel::Interrupt; use crate::pac::spdifrx::Spdifrx as Regs; -- cgit From f0fc1a15da774f2cc6338697e40f9d1fc7975eb5 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: disable half-complete interrupt --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index c5c18930b..6bd48258b 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -82,7 +82,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); - options.half_transfer_ir = true; + options.half_transfer_ir = false; options.complete_transfer_ir = true; let items = [ @@ -233,7 +233,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); - options.half_transfer_ir = true; + options.half_transfer_ir = false; options.complete_transfer_ir = true; let items = [ -- cgit From be881875917b93a8cdb7a4ab07876e1239fbe1be Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: transfer options --- embassy-stm32/src/dma/gpdma/mod.rs | 3 +++ embassy-stm32/src/dma/gpdma/ringbuffered.rs | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 1d2811ab4..b23c22dfb 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -283,6 +283,9 @@ impl AnyChannel { state.lli_state.transfer_count.store(0, Ordering::Relaxed) } + /// Configure a linked-list transfer. + /// + /// Transfer options apply only to the base register transfer, not the linked-list items. unsafe fn configure_linked_list( &self, table: &Table, diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 6bd48258b..a5b127d08 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -69,6 +69,8 @@ pub struct ReadableRingBuffer<'a, W: Word> { impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// Create a new ring buffer. + /// + /// Transfer options are applied to the individual linked list items. pub unsafe fn new( channel: impl Peripheral

+ 'a, request: Request, @@ -220,6 +222,8 @@ pub struct WritableRingBuffer<'a, W: Word> { impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Create a new ring buffer. + /// + /// Transfer options are applied to the individual linked list items. pub unsafe fn new( channel: impl Peripheral

+ 'a, request: Request, @@ -279,7 +283,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { self.channel.configure_linked_list( &self.table, TransferOptions { - half_transfer_ir: true, + half_transfer_ir: false, complete_transfer_ir: true, ..Default::default() }, -- cgit From 1541f1e0c2c3b2f8d5e5764966393eedac95ebf0 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: chore: clean up transfer options --- embassy-stm32/src/dma/gpdma/linked_list.rs | 18 ++---------------- embassy-stm32/src/dma/gpdma/mod.rs | 2 -- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 22 ++++------------------ 3 files changed, 6 insertions(+), 36 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index b0c0dffad..b0cf96f96 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -4,7 +4,6 @@ use stm32_metapac::gpdma::regs; use stm32_metapac::gpdma::vals::Dreq; -use super::TransferOptions; use crate::dma::word::{Word, WordSize}; use crate::dma::{Dir, Request}; @@ -42,12 +41,7 @@ pub struct LinearItem { impl LinearItem { /// Create a new read DMA transfer (peripheral to memory). - pub unsafe fn new_read<'d, W: Word>( - request: Request, - peri_addr: *mut W, - buf: &'d mut [W], - options: TransferOptions, - ) -> Self { + pub unsafe fn new_read<'d, W: Word>(request: Request, peri_addr: *mut W, buf: &'d mut [W]) -> Self { Self::new_inner( request, Dir::PeripheralToMemory, @@ -57,17 +51,11 @@ impl LinearItem { true, W::size(), W::size(), - options, ) } /// Create a new write DMA transfer (memory to peripheral). - pub unsafe fn new_write<'d, MW: Word, PW: Word>( - request: Request, - buf: &'d [MW], - peri_addr: *mut PW, - options: TransferOptions, - ) -> Self { + pub unsafe fn new_write<'d, MW: Word, PW: Word>(request: Request, buf: &'d [MW], peri_addr: *mut PW) -> Self { Self::new_inner( request, Dir::MemoryToPeripheral, @@ -77,7 +65,6 @@ impl LinearItem { true, MW::size(), PW::size(), - options, ) } @@ -90,7 +77,6 @@ impl LinearItem { incr_mem: bool, data_size: WordSize, dst_size: WordSize, - _options: TransferOptions, ) -> Self { // BNDT is specified as bytes, not as number of transfers. let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index b23c22dfb..e906c7559 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -284,8 +284,6 @@ impl AnyChannel { } /// Configure a linked-list transfer. - /// - /// Transfer options apply only to the base register transfer, not the linked-list items. unsafe fn configure_linked_list( &self, table: &Table, diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index a5b127d08..9bee12d99 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -222,14 +222,12 @@ pub struct WritableRingBuffer<'a, W: Word> { impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Create a new ring buffer. - /// - /// Transfer options are applied to the individual linked list items. pub unsafe fn new( channel: impl Peripheral

+ 'a, request: Request, peri_addr: *mut W, buffer: &'a mut [W], - mut options: TransferOptions, + _options: TransferOptions, ) -> Self { into_ref!(channel); let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); @@ -237,12 +235,9 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); - options.half_transfer_ir = false; - options.complete_transfer_ir = true; - let items = [ - LinearItem::new_write(request, &mut buffer[..half_len], peri_addr, options), - LinearItem::new_write(request, &mut buffer[half_len..], peri_addr, options), + LinearItem::new_write(request, &mut buffer[..half_len], peri_addr), + LinearItem::new_write(request, &mut buffer[half_len..], peri_addr), ]; let table = Table::new(items); @@ -279,16 +274,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { /// /// You must call this after creating it for it to work. pub fn start(&mut self) { - unsafe { - self.channel.configure_linked_list( - &self.table, - TransferOptions { - half_transfer_ir: false, - complete_transfer_ir: true, - ..Default::default() - }, - ) - }; + unsafe { self.channel.configure_linked_list(&self.table, Default::default()) }; self.table.link(RunMode::Repeat); self.channel.start(); } -- cgit From bfd82ff82c1a1cc5159fc07997af2ca87622a679 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: read transfer options --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 9bee12d99..c49c6c73d 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -76,7 +76,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { request: Request, peri_addr: *mut W, buffer: &'a mut [W], - mut options: TransferOptions, + _options: TransferOptions, ) -> Self { into_ref!(channel); let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); @@ -84,31 +84,26 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); - options.half_transfer_ir = false; - options.complete_transfer_ir = true; - let items = [ - LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options), - LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options), + LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]), + LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]), ]; let table = Table::new(items); - let this = Self { + Self { channel, ringbuf: ReadableDmaRingBuffer::new(buffer), table, user_buffer_half: BufferHalf::First, - }; - - this.channel.configure_linked_list(&this.table, options); - - this + } } /// Start the ring buffer operation. /// /// You must call this after creating it for it to work. pub fn start(&mut self) { + unsafe { self.channel.configure_linked_list(&self.table, Default::default()) }; + self.table.link(RunMode::Repeat); self.channel.start(); } -- cgit From 50224583db79fcbfe340056eef855414c884f281 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: load/store ordering --- embassy-stm32/src/dma/gpdma/mod.rs | 6 +++--- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index e906c7559..a158d30b8 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -178,15 +178,15 @@ impl AnyChannel { if sr.tcf() { ch.fcr().write(|w| w.set_tcf(true)); - let lli_count = state.lli_state.count.load(Ordering::Relaxed); + let lli_count = state.lli_state.count.load(Ordering::Acquire); let complete = if lli_count > 0 { - let next_lli_index = state.lli_state.index.load(Ordering::Relaxed) + 1; + let next_lli_index = state.lli_state.index.load(Ordering::Acquire) + 1; let complete = next_lli_index >= lli_count; state .lli_state .index - .store(if complete { 0 } else { next_lli_index }, Ordering::Relaxed); + .store(if complete { 0 } else { next_lli_index }, Ordering::Release); complete } else { diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index c49c6c73d..20f46b103 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -20,11 +20,11 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { let state = &STATE[self.0.id as usize]; let current_remaining = self.0.get_remaining_transfers() as usize; - let lli_count = state.lli_state.count.load(Ordering::Relaxed); + let lli_count = state.lli_state.count.load(Ordering::Acquire); if lli_count > 0 { - let lli_index = state.lli_state.index.load(Ordering::Relaxed); - let single_transfer_count = state.lli_state.transfer_count.load(Ordering::Relaxed) / lli_count; + let lli_index = state.lli_state.index.load(Ordering::Acquire); + let single_transfer_count = state.lli_state.transfer_count.load(Ordering::Acquire) / lli_count; (lli_count - lli_index - 1) * single_transfer_count + current_remaining } else { -- cgit From 2baa4399a7dc4c38ed478f723bbf3b7417dcc0f5 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: wip gpdma --- embassy-stm32/src/dma/gpdma/mod.rs | 6 ++---- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 25 ++++++++++++++----------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index a158d30b8..9868ce52d 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -199,10 +199,8 @@ impl AnyChannel { } if sr.suspf() { - // disable all xxIEs to prevent the irq from firing again. + // Disable all xxIEs to prevent the irq from firing again. ch.cr().write(|_| {}); - - // Wake the future. It'll look at tcf and see it's set. } state.waker.wake(); } @@ -366,7 +364,7 @@ impl AnyChannel { let sr = ch.sr().read(); - !sr.tcf() && !sr.suspf() && !sr.idlef() + !sr.suspf() && !sr.idlef() } fn poll_stop(&self) -> Poll<()> { diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 20f46b103..5ba45358b 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -23,11 +23,14 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { let lli_count = state.lli_state.count.load(Ordering::Acquire); if lli_count > 0 { + // In linked-list mode, the remaining transfers are the sum of the full lengths of LLIs that follow, + // and the remaining transfers for the current LLI. let lli_index = state.lli_state.index.load(Ordering::Acquire); let single_transfer_count = state.lli_state.transfer_count.load(Ordering::Acquire) / lli_count; (lli_count - lli_index - 1) * single_transfer_count + current_remaining } else { + // No linked-list mode. current_remaining } } @@ -81,6 +84,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { into_ref!(channel); let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + // Buffer halves should be the same length. let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); @@ -227,6 +231,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { into_ref!(channel); let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + // Buffer halves should be the same length. let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); @@ -258,8 +263,8 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { self.table.unlink(); match self.user_buffer_half { - BufferHalf::First => self.table.link_indices(0, 1), - BufferHalf::Second => self.table.link_indices(1, 0), + BufferHalf::First => self.table.link_indices(1, 0), + BufferHalf::Second => self.table.link_indices(0, 1), } self.user_buffer_half.toggle(); @@ -298,48 +303,46 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) .await; - let mut remaining = buffer.len(); + let mut writable_length = buffer.len(); let mut remaining_cap = 0; let cap = self.ringbuf.cap(); - while remaining > 0 { + while writable_length > 0 { let dma_buffer_half = self.dma_buffer_half(); if dma_buffer_half == self.user_buffer_half { self.link_next_buffer(); } let write_index = self.ringbuf.write_index(0); - let len = match dma_buffer_half { + let write_length = match dma_buffer_half { BufferHalf::First => { // if write_index < cap / 2 { // error!("write index: {}", write_index); // panic!() // } - info!("Write second"); // Fill up second buffer half when DMA reads the first. - cap - write_index + cap / 2 - write_index } BufferHalf::Second => { // if write_index >= cap / 2 { // error!("write index: {}", write_index); // panic!() // } - info!("Write first"); // Fill up first buffer half when DMA reads the second. - cap / 2 - write_index + cap - write_index } } - .min(remaining); + .min(writable_length); remaining_cap = self .ringbuf .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) .await?; - remaining -= len; + writable_length -= write_length; } Ok(remaining_cap) -- cgit From 78364b966eb76c071d5450c2a13cc788d7e5be80 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: chore: change naming --- embassy-stm32/src/dma/gpdma/linked_list.rs | 6 +- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 92 +++++++++++++---------------- embassy-stm32/src/dma/ringbuffer/mod.rs | 46 +++++++++++++-- 3 files changed, 87 insertions(+), 57 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index b0cf96f96..a95e5590e 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -15,7 +15,7 @@ pub enum RunMode { /// The list is linked sequentially and only run once. Once, /// The list is linked sequentially, and the end of the list is linked to the beginning. - Repeat, + Circular, } /// A linked-list item for linear GPDMA transfers. @@ -169,11 +169,11 @@ impl Table { /// Link the table as given by the run mode. pub fn link(&mut self, run_mode: RunMode) { - if matches!(run_mode, RunMode::Once | RunMode::Repeat) { + if matches!(run_mode, RunMode::Once | RunMode::Circular) { self.link_sequential(); } - if matches!(run_mode, RunMode::Repeat) { + if matches!(run_mode, RunMode::Circular) { self.link_repeat(); } } diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 5ba45358b..99c85a221 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -48,6 +48,7 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { /// The current buffer half (e.g. for DMA or the user application). #[derive(Debug, PartialEq, PartialOrd)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] enum BufferHalf { First, Second, @@ -107,7 +108,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// You must call this after creating it for it to work. pub fn start(&mut self) { unsafe { self.channel.configure_linked_list(&self.table, Default::default()) }; - self.table.link(RunMode::Repeat); + self.table.link(RunMode::Circular); self.channel.start(); } @@ -251,31 +252,12 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { this } - fn dma_buffer_half(&self) -> BufferHalf { - if self.ringbuf.read_index(0) < self.ringbuf.cap() / 2 { - BufferHalf::First - } else { - BufferHalf::Second - } - } - - fn link_next_buffer(&mut self) { - self.table.unlink(); - - match self.user_buffer_half { - BufferHalf::First => self.table.link_indices(1, 0), - BufferHalf::Second => self.table.link_indices(0, 1), - } - - self.user_buffer_half.toggle(); - } - /// Start the ring buffer operation. /// /// You must call this after creating it for it to work. pub fn start(&mut self) { unsafe { self.channel.configure_linked_list(&self.table, Default::default()) }; - self.table.link(RunMode::Repeat); + self.table.link(RunMode::Circular); self.channel.start(); } @@ -303,47 +285,57 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) .await; - let mut writable_length = buffer.len(); + let mut written_len = 0; + let len = buffer.len(); let mut remaining_cap = 0; let cap = self.ringbuf.cap(); - while writable_length > 0 { - let dma_buffer_half = self.dma_buffer_half(); - if dma_buffer_half == self.user_buffer_half { - self.link_next_buffer(); - } - - let write_index = self.ringbuf.write_index(0); - let write_length = match dma_buffer_half { + let dma = &mut DmaCtrlImpl(self.channel.reborrow()); + let user_buffer_half = &mut self.user_buffer_half; + let ringbuf = &mut self.ringbuf; + let table = &mut self.table; + + while written_len != len { + // info!( + // "read {}, write {}, cap {}", + // ringbuf.read_index(0), + // ringbuf.write_index(0), + // ringbuf.cap() + // ); + + let dma_buffer_half = if ringbuf.read_index(0) < ringbuf.cap() / 2 { + BufferHalf::First + } else { + BufferHalf::Second + }; + + // if dma_buffer_half == *user_buffer_half { + // info!("swap user from {}", user_buffer_half); + // table.unlink(); + + // match user_buffer_half { + // BufferHalf::First => table.link_indices(1, 0), + // BufferHalf::Second => table.link_indices(0, 1), + // } + + // user_buffer_half.toggle(); + // } + + let index = match dma_buffer_half { BufferHalf::First => { - // if write_index < cap / 2 { - // error!("write index: {}", write_index); - // panic!() - // } - // Fill up second buffer half when DMA reads the first. - cap / 2 - write_index + cap - 1 } BufferHalf::Second => { - // if write_index >= cap / 2 { - // error!("write index: {}", write_index); - // panic!() - // } - // Fill up first buffer half when DMA reads the second. - cap - write_index + cap / 2 - 1 } - } - .min(writable_length); - - remaining_cap = self - .ringbuf - .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) - .await?; + }; - writable_length -= write_length; + (written_len, remaining_cap) = ringbuf.write_until(dma, &buffer, index).await?; } + info!("done"); Ok(remaining_cap) } diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs index 99960bc74..c4bf4dd60 100644 --- a/embassy-stm32/src/dma/ringbuffer/mod.rs +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs @@ -303,17 +303,19 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { } /// Write an exact number of elements to the ringbuffer. + /// + /// Returns the remaining write capacity in the buffer. pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result { - let mut written_data = 0; + let mut written_len = 0; let buffer_len = buffer.len(); poll_fn(|cx| { dma.set_waker(cx.waker()); - match self.write(dma, &buffer[written_data..buffer_len]) { + match self.write(dma, &buffer[written_len..buffer_len]) { Ok((len, remaining)) => { - written_data += len; - if written_data == buffer_len { + written_len += len; + if written_len == buffer_len { Poll::Ready(Ok(remaining)) } else { Poll::Pending @@ -325,6 +327,42 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { .await } + /// Write until a given write index. + /// + /// Returns a tuple of the written length, and the remaining write capacity in the buffer. + pub async fn write_until( + &mut self, + dma: &mut impl DmaCtrl, + buffer: &[W], + index: usize, + ) -> Result<(usize, usize), Error> { + let mut written_len = 0; + let write_len = index + .saturating_sub(self.write_index.as_index(self.cap(), 0)) + .min(buffer.len()); + + if write_len == 0 { + return Err(Error::Overrun); + } + + poll_fn(|cx| { + dma.set_waker(cx.waker()); + + match self.write(dma, &buffer[written_len..write_len]) { + Ok((len, remaining)) => { + written_len += len; + if written_len == write_len { + Poll::Ready(Ok((written_len, remaining))) + } else { + Poll::Pending + } + } + Err(e) => Poll::Ready(Err(e)), + } + }) + .await + } + fn write_raw(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), Error> { let writable = self.len(dma)?.min(buf.len()); for i in 0..writable { -- cgit From a4d3b4b6ae3f3265ea372e446a6e7b5d3685ea3a Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: wip, write buffer in halves --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 84 ++++------------------------- embassy-stm32/src/dma/ringbuffer/mod.rs | 76 ++++++++++++++------------ embassy-stm32/src/usart/ringbuffered.rs | 2 +- 3 files changed, 54 insertions(+), 108 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 99c85a221..a5d2c700c 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -46,29 +46,11 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { } } -/// The current buffer half (e.g. for DMA or the user application). -#[derive(Debug, PartialEq, PartialOrd)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -enum BufferHalf { - First, - Second, -} - -impl BufferHalf { - fn toggle(&mut self) { - *self = match *self { - Self::First => Self::Second, - Self::Second => Self::First, - }; - } -} - /// Ringbuffer for receiving data using GPDMA linked-list mode. pub struct ReadableRingBuffer<'a, W: Word> { channel: PeripheralRef<'a, AnyChannel>, ringbuf: ReadableDmaRingBuffer<'a, W>, table: Table<2>, - user_buffer_half: BufferHalf, } impl<'a, W: Word> ReadableRingBuffer<'a, W> { @@ -99,7 +81,6 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { channel, ringbuf: ReadableDmaRingBuffer::new(buffer), table, - user_buffer_half: BufferHalf::First, } } @@ -217,7 +198,6 @@ pub struct WritableRingBuffer<'a, W: Word> { channel: PeripheralRef<'a, AnyChannel>, ringbuf: WritableDmaRingBuffer<'a, W>, table: Table<2>, - user_buffer_half: BufferHalf, } impl<'a, W: Word> WritableRingBuffer<'a, W> { @@ -246,7 +226,6 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { channel, ringbuf: WritableDmaRingBuffer::new(buffer), table, - user_buffer_half: BufferHalf::First, }; this @@ -280,62 +259,21 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Write an exact number of elements to the ringbuffer. pub async fn write_exact(&mut self, buffer: &[W]) -> Result { - return self - .ringbuf - .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) - .await; + // return self + // .ringbuf + // .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + // .await; + let mut remaining_cap = 0; let mut written_len = 0; - let len = buffer.len(); - let mut remaining_cap = 0; - let cap = self.ringbuf.cap(); - - let dma = &mut DmaCtrlImpl(self.channel.reborrow()); - let user_buffer_half = &mut self.user_buffer_half; - let ringbuf = &mut self.ringbuf; - let table = &mut self.table; - - while written_len != len { - // info!( - // "read {}, write {}, cap {}", - // ringbuf.read_index(0), - // ringbuf.write_index(0), - // ringbuf.cap() - // ); - - let dma_buffer_half = if ringbuf.read_index(0) < ringbuf.cap() / 2 { - BufferHalf::First - } else { - BufferHalf::Second - }; - - // if dma_buffer_half == *user_buffer_half { - // info!("swap user from {}", user_buffer_half); - // table.unlink(); - - // match user_buffer_half { - // BufferHalf::First => table.link_indices(1, 0), - // BufferHalf::Second => table.link_indices(0, 1), - // } - - // user_buffer_half.toggle(); - // } - - let index = match dma_buffer_half { - BufferHalf::First => { - // Fill up second buffer half when DMA reads the first. - cap - 1 - } - BufferHalf::Second => { - // Fill up first buffer half when DMA reads the second. - cap / 2 - 1 - } - }; - - (written_len, remaining_cap) = ringbuf.write_until(dma, &buffer, index).await?; + while written_len < buffer.len() { + (written_len, remaining_cap) = self + .ringbuf + .write_half(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await?; + // info!("Written: {}/{}", written_len, buffer.len()); } - info!("done"); Ok(remaining_cap) } diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs index c4bf4dd60..8d00d822d 100644 --- a/embassy-stm32/src/dma/ringbuffer/mod.rs +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs @@ -3,6 +3,14 @@ use core::task::{Poll, Waker}; use crate::dma::word::Word; +/// The current buffer half (e.g. for DMA or the user application). +#[derive(Debug, PartialEq, PartialOrd)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +enum BufferHalf { + First, + Second, +} + pub trait DmaCtrl { /// Get the NDTR register value, i.e. the space left in the underlying /// buffer until the dma writer wraps. @@ -92,16 +100,6 @@ impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> { } } - /// The current ring-buffer read index. - pub fn read_index(&self, offset: usize) -> usize { - self.read_index.as_index(self.cap(), offset) - } - - /// The current ring-buffer write index. - pub fn write_index(&self, offset: usize) -> usize { - self.write_index.as_index(self.cap(), offset) - } - /// Reset the ring buffer to its initial state. pub fn reset(&mut self, dma: &mut impl DmaCtrl) { dma.reset_complete_count(); @@ -218,14 +216,13 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { } } - /// The current ring-buffer read index. - pub fn read_index(&self, offset: usize) -> usize { - self.read_index.as_index(self.cap(), offset) - } - - /// The current ring-buffer write index. - pub fn write_index(&self, offset: usize) -> usize { - self.write_index.as_index(self.cap(), offset) + /// The buffer half that is in use by the DMA. + fn dma_half(&self) -> BufferHalf { + if self.read_index.as_index(self.cap(), 0) < self.cap() / 2 { + BufferHalf::First + } else { + BufferHalf::Second + } } /// Reset the ring buffer to its initial state. The buffer after the reset will be full. @@ -305,6 +302,7 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { /// Write an exact number of elements to the ringbuffer. /// /// Returns the remaining write capacity in the buffer. + #[allow(dead_code)] pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result { let mut written_len = 0; let buffer_len = buffer.len(); @@ -327,31 +325,41 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { .await } - /// Write until a given write index. + /// Write the user's current buffer half - not used by the DMA. /// /// Returns a tuple of the written length, and the remaining write capacity in the buffer. - pub async fn write_until( - &mut self, - dma: &mut impl DmaCtrl, - buffer: &[W], - index: usize, - ) -> Result<(usize, usize), Error> { + #[allow(dead_code)] + pub async fn write_half(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<(usize, usize), Error> { let mut written_len = 0; - let write_len = index - .saturating_sub(self.write_index.as_index(self.cap(), 0)) - .min(buffer.len()); - - if write_len == 0 { - return Err(Error::Overrun); - } + let buffer_len = buffer.len(); poll_fn(|cx| { dma.set_waker(cx.waker()); - match self.write(dma, &buffer[written_len..write_len]) { + let dma_half = self.dma_half(); + // let user_half = self.user_half(); + + // if dma_half == user_half { + // info!("ups"); + // return Poll::Ready(Err(Error::Overrun)); + // } + + let write_index = self.write_index.as_index(self.cap(), 0); + let target_write_len = match dma_half { + BufferHalf::First => self.cap().saturating_sub(write_index), + BufferHalf::Second => (self.cap() / 2).saturating_sub(write_index), + }; + let write_end_index = (target_write_len + written_len).min(buffer_len); + + // info!( + // "buf_len: {}, write_len: {}, write_index: {}", + // buffer_len, target_write_len, write_index + // ); + + match self.write(dma, &buffer[written_len..write_end_index]) { Ok((len, remaining)) => { written_len += len; - if written_len == write_len { + if written_len == write_end_index { Poll::Ready(Ok((written_len, remaining))) } else { Poll::Pending diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index 1d4a44896..5f4e87834 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs @@ -381,7 +381,7 @@ impl ReadReady for RingBufferedUartRx<'_> { crate::dma::ringbuffer::Error::Overrun => Self::Error::Overrun, crate::dma::ringbuffer::Error::DmaUnsynced => { error!( - "Ringbuffer error: DmaUNsynced, driver implementation is + "Ringbuffer error: DmaUNsynced, driver implementation is probably bugged please open an issue" ); // we report this as overrun since its recoverable in the same way -- cgit From 7a62b8eee8f2f466fbe1878aab42d63aa171ddaa Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: build issues --- embassy-stm32/src/dma/gpdma/mod.rs | 12 +++++------- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 18 ++++++++---------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 9868ce52d..2132f070a 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -382,23 +382,21 @@ impl AnyChannel { /// Linked-list DMA transfer. #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> { - channel: PeripheralRef<'a, AnyChannel>, + channel: Peri<'a, AnyChannel>, } impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { /// Create a new linked-list transfer. pub unsafe fn new_linked_list( - channel: impl Peripheral

+ 'a, + channel: Peri<'a, impl Channel>, table: Table, options: TransferOptions, ) -> Self { - into_ref!(channel); - - Self::new_inner_linked_list(channel.map_into(), table, options) + Self::new_inner_linked_list(channel.into(), table, options) } unsafe fn new_inner_linked_list( - channel: PeripheralRef<'a, AnyChannel>, + channel: Peri<'a, AnyChannel>, table: Table, options: TransferOptions, ) -> Self { @@ -576,7 +574,7 @@ impl<'a> Transfer<'a> { assert!(mem_len > 0 && mem_len <= 0xFFFF); channel.configure( - _request, + request, dir, peri_addr, mem_addr, diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index a5d2c700c..88ec666dc 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -5,7 +5,7 @@ use core::future::poll_fn; use core::sync::atomic::{fence, Ordering}; use core::task::Waker; -use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; +use embassy_hal_internal::Peri; use super::{AnyChannel, TransferOptions, STATE}; use crate::dma::gpdma::linked_list::{LinearItem, RunMode, Table}; @@ -13,7 +13,7 @@ use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaR use crate::dma::word::Word; use crate::dma::{Channel, Request}; -struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>); +struct DmaCtrlImpl<'a>(Peri<'a, AnyChannel>); impl<'a> DmaCtrl for DmaCtrlImpl<'a> { fn get_remaining_transfers(&self) -> usize { @@ -48,7 +48,7 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { /// Ringbuffer for receiving data using GPDMA linked-list mode. pub struct ReadableRingBuffer<'a, W: Word> { - channel: PeripheralRef<'a, AnyChannel>, + channel: Peri<'a, AnyChannel>, ringbuf: ReadableDmaRingBuffer<'a, W>, table: Table<2>, } @@ -58,14 +58,13 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// /// Transfer options are applied to the individual linked list items. pub unsafe fn new( - channel: impl Peripheral

+ 'a, + channel: Peri<'a, impl Channel>, request: Request, peri_addr: *mut W, buffer: &'a mut [W], _options: TransferOptions, ) -> Self { - into_ref!(channel); - let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + let channel: Peri<'a, AnyChannel> = channel.into(); // Buffer halves should be the same length. let half_len = buffer.len() / 2; @@ -195,7 +194,7 @@ impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { /// Ringbuffer for writing data using DMA circular mode. pub struct WritableRingBuffer<'a, W: Word> { - channel: PeripheralRef<'a, AnyChannel>, + channel: Peri<'a, AnyChannel>, ringbuf: WritableDmaRingBuffer<'a, W>, table: Table<2>, } @@ -203,14 +202,13 @@ pub struct WritableRingBuffer<'a, W: Word> { impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Create a new ring buffer. pub unsafe fn new( - channel: impl Peripheral

+ 'a, + channel: Peri<'a, impl Channel>, request: Request, peri_addr: *mut W, buffer: &'a mut [W], _options: TransferOptions, ) -> Self { - into_ref!(channel); - let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + let channel: Peri<'a, AnyChannel> = channel.into(); // Buffer halves should be the same length. let half_len = buffer.len() / 2; -- cgit From 3d161e98a1a56d6d73473c40431480d53ee67b70 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: simplify --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 23 +++--------- embassy-stm32/src/dma/ringbuffer/mod.rs | 55 ----------------------------- 2 files changed, 5 insertions(+), 73 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 88ec666dc..f9d77ab73 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -1,6 +1,7 @@ //! GPDMA ring buffer implementation. //! -//! FIXME: add request_pause functionality? +//! FIXME: Add request_pause functionality? +//! FIXME: Stop the DMA, if a user does not queue new transfers (chain of linked-list items ends automatically). use core::future::poll_fn; use core::sync::atomic::{fence, Ordering}; use core::task::Waker; @@ -257,23 +258,9 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Write an exact number of elements to the ringbuffer. pub async fn write_exact(&mut self, buffer: &[W]) -> Result { - // return self - // .ringbuf - // .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) - // .await; - - let mut remaining_cap = 0; - let mut written_len = 0; - - while written_len < buffer.len() { - (written_len, remaining_cap) = self - .ringbuf - .write_half(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) - .await?; - // info!("Written: {}/{}", written_len, buffer.len()); - } - - Ok(remaining_cap) + self.ringbuf + .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await } /// Wait for any ring buffer write error. diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs index 8d00d822d..2d61204a2 100644 --- a/embassy-stm32/src/dma/ringbuffer/mod.rs +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs @@ -216,15 +216,6 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { } } - /// The buffer half that is in use by the DMA. - fn dma_half(&self) -> BufferHalf { - if self.read_index.as_index(self.cap(), 0) < self.cap() / 2 { - BufferHalf::First - } else { - BufferHalf::Second - } - } - /// Reset the ring buffer to its initial state. The buffer after the reset will be full. pub fn reset(&mut self, dma: &mut impl DmaCtrl) { dma.reset_complete_count(); @@ -325,52 +316,6 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { .await } - /// Write the user's current buffer half - not used by the DMA. - /// - /// Returns a tuple of the written length, and the remaining write capacity in the buffer. - #[allow(dead_code)] - pub async fn write_half(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<(usize, usize), Error> { - let mut written_len = 0; - let buffer_len = buffer.len(); - - poll_fn(|cx| { - dma.set_waker(cx.waker()); - - let dma_half = self.dma_half(); - // let user_half = self.user_half(); - - // if dma_half == user_half { - // info!("ups"); - // return Poll::Ready(Err(Error::Overrun)); - // } - - let write_index = self.write_index.as_index(self.cap(), 0); - let target_write_len = match dma_half { - BufferHalf::First => self.cap().saturating_sub(write_index), - BufferHalf::Second => (self.cap() / 2).saturating_sub(write_index), - }; - let write_end_index = (target_write_len + written_len).min(buffer_len); - - // info!( - // "buf_len: {}, write_len: {}, write_index: {}", - // buffer_len, target_write_len, write_index - // ); - - match self.write(dma, &buffer[written_len..write_end_index]) { - Ok((len, remaining)) => { - written_len += len; - if written_len == write_end_index { - Poll::Ready(Ok((written_len, remaining))) - } else { - Poll::Pending - } - } - Err(e) => Poll::Ready(Err(e)), - } - }) - .await - } - fn write_raw(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), Error> { let writable = self.len(dma)?.min(buf.len()); for i in 0..writable { -- cgit From 40a0d5d8f2fc7e2cfc216eb31b47729dd95a24cd Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: build warnings --- embassy-stm32/src/dma/ringbuffer/mod.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs index 2d61204a2..659ffa9e5 100644 --- a/embassy-stm32/src/dma/ringbuffer/mod.rs +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs @@ -3,14 +3,6 @@ use core::task::{Poll, Waker}; use crate::dma::word::Word; -/// The current buffer half (e.g. for DMA or the user application). -#[derive(Debug, PartialEq, PartialOrd)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -enum BufferHalf { - First, - Second, -} - pub trait DmaCtrl { /// Get the NDTR register value, i.e. the space left in the underlying /// buffer until the dma writer wraps. -- cgit From 7d224d94c47e4e457aed7c4832be8b4a52dfcef8 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: docstring --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index f9d77ab73..87c482bfb 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -193,7 +193,7 @@ impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { } } -/// Ringbuffer for writing data using DMA circular mode. +/// Ringbuffer for writing data using GPDMA linked-list mode. pub struct WritableRingBuffer<'a, W: Word> { channel: Peri<'a, AnyChannel>, ringbuf: WritableDmaRingBuffer<'a, W>, -- cgit From fec14213ea7b79badc14eae38c4a0b0197499f3f Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: modified dma channel state management See https://github.com/embassy-rs/embassy/pull/3923#discussion_r2094570176 --- embassy-stm32/src/dma/gpdma/mod.rs | 66 ++++++++++++++++++++--------- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 54 +++++++++++++---------- embassy-stm32/src/ucpd.rs | 4 +- embassy-stm32/src/usart/ringbuffered.rs | 2 +- 4 files changed, 80 insertions(+), 46 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 2132f070a..d06eac60e 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -341,21 +341,25 @@ impl AnyChannel { ch.cr().modify(|w| w.set_en(true)); } - fn request_stop(&self) { + fn request_suspend(&self) { let info = self.info(); let ch = info.dma.ch(info.num); ch.cr().modify(|w| w.set_susp(true)) } - fn request_pause(&self) { + fn request_resume(&self) { let info = self.info(); let ch = info.dma.ch(info.num); - // Disable the channel without overwriting the existing configuration - ch.cr().modify(|w| { - w.set_en(false); - }); + ch.cr().modify(|w| w.set_susp(false)); + } + + fn request_reset(&self) { + let info = self.info(); + let ch = info.dma.ch(info.num); + + ch.cr().modify(|w| w.set_reset(true)); } fn is_running(&self) -> bool { @@ -406,11 +410,26 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { Self { channel } } - /// Request the transfer to stop. + /// Request the transfer to suspend. + /// + /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() + pub fn request_suspend(&mut self) { + self.channel.request_suspend() + } + + /// Request the transfer to resume after being suspended. + pub fn request_resume(&mut self) { + self.channel.request_resume() + } + + /// Request the DMA to reset. + /// + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_suspend`](Self::request_suspend) instead. + pub fn request_reset(&mut self) { + self.channel.request_reset() } /// Return whether this transfer is still running. @@ -440,7 +459,7 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> { fn drop(&mut self) { - self.request_stop(); + self.request_suspend(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." @@ -589,21 +608,26 @@ impl<'a> Transfer<'a> { Self { channel } } - /// Request the transfer to stop. - /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + /// Request the transfer to suspend. + /// + /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() + pub fn request_suspend(&mut self) { + self.channel.request_suspend() } - /// Request the transfer to pause, keeping the existing configuration for this channel. - /// To restart the transfer, call [`start`](Self::start) again. + /// Request the transfer to resume after being suspended. + pub fn request_resume(&mut self) { + self.channel.request_resume() + } + + /// Request the DMA to reset. /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_pause(&mut self) { - self.channel.request_pause() + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_suspend`](Self::request_suspend) instead. + pub fn request_reset(&mut self) { + self.channel.request_reset() } /// Return whether this transfer is still running. @@ -633,7 +657,7 @@ impl<'a> Transfer<'a> { impl<'a> Drop for Transfer<'a> { fn drop(&mut self) { - self.request_stop(); + self.request_suspend(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 87c482bfb..4532bda57 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -139,21 +139,26 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); } - /// Request the DMA to stop. - /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + /// Request the DMA to suspend. + /// + /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() + pub fn request_suspend(&mut self) { + self.channel.request_suspend() + } + + /// Request the DMA to resume transfers after being suspended. + pub fn request_resume(&mut self) { + self.channel.request_resume() } - /// Request the transfer to pause, keeping the existing configuration for this channel. - /// To restart the transfer, call [`start`](Self::start) again. + /// Request the DMA to reset. /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_pause(&mut self) { - self.channel.request_pause() + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_suspend`](Self::request_suspend) instead. + pub fn request_reset(&mut self) { + self.channel.request_reset() } /// Return whether DMA is still running. @@ -185,7 +190,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { fn drop(&mut self) { - self.request_stop(); + self.request_suspend(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." @@ -285,21 +290,26 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); } - /// Request the DMA to stop. - /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + /// Request the DMA to suspend. + /// + /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() + pub fn request_suspend(&mut self) { + self.channel.request_suspend() + } + + /// Request the DMA to resume transfers after being suspended. + pub fn request_resume(&mut self) { + self.channel.request_resume() } - /// Request the transfer to pause, keeping the existing configuration for this channel. - /// To restart the transfer, call [`start`](Self::start) again. + /// Request the DMA to reset. /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_pause(&mut self) { - self.channel.request_pause() + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_suspend`](Self::request_suspend) instead. + pub fn request_reset(&mut self) { + self.channel.request_reset() } /// Return whether DMA is still running. @@ -331,7 +341,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { fn drop(&mut self) { - self.request_stop(); + self.request_suspend(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." diff --git a/embassy-stm32/src/ucpd.rs b/embassy-stm32/src/ucpd.rs index 0a80adb8f..967e43a8a 100644 --- a/embassy-stm32/src/ucpd.rs +++ b/embassy-stm32/src/ucpd.rs @@ -490,14 +490,14 @@ impl<'d, T: Instance> PdPhy<'d, T> { let sr = r.sr().read(); if sr.rxhrstdet() { - dma.request_stop(); + dma.request_suspend(); // Clean and re-enable hard reset receive interrupt. r.icr().write(|w| w.set_rxhrstdetcf(true)); r.imr().modify(|w| w.set_rxhrstdetie(true)); Poll::Ready(Err(RxError::HardReset)) } else if sr.rxmsgend() { - dma.request_stop(); + dma.request_suspend(); // Should be read immediately on interrupt. rxpaysz = r.rx_payszr().read().rxpaysz().into(); diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index 5f4e87834..bea56c991 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs @@ -165,7 +165,7 @@ impl<'d> RingBufferedUartRx<'d> { /// Stop DMA backed UART receiver fn stop_uart(&mut self) { - self.ring_buf.request_pause(); + self.ring_buf.request_suspend(); let r = self.info.regs; // clear all interrupts and DMA Rx Request -- cgit From c0b8e9c7e5002a424e9ade711b085c2451a58b97 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: writing reserved bits --- embassy-stm32/src/dma/gpdma/mod.rs | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index d06eac60e..7c05715ee 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -237,7 +237,16 @@ impl AnyChannel { fence(Ordering::SeqCst); ch.cr().write(|w| w.set_reset(true)); - ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs + ch.fcr().write(|w| { + // Clear all irqs + w.set_dtef(true); + w.set_htf(true); + w.set_suspf(true); + w.set_tcf(true); + w.set_tof(true); + w.set_ulef(true); + w.set_usef(true); + }); ch.llr().write(|_| {}); // no linked list ch.tr1().write(|w| { w.set_sdw(data_size.into()); @@ -294,7 +303,16 @@ impl AnyChannel { fence(Ordering::SeqCst); ch.cr().write(|w| w.set_reset(true)); - ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs + ch.fcr().write(|w| { + // Clear all irqs + w.set_dtef(true); + w.set_htf(true); + w.set_suspf(true); + w.set_tcf(true); + w.set_tof(true); + w.set_ulef(true); + w.set_usef(true); + }); ch.lbar().write(|reg| reg.set_lba(table.base_address())); // Empty LLI0. -- cgit From 277c59857bc577e8565c920861feb5b6721ac9ae Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: custom DMA channel configuration See https://github.com/embassy-rs/embassy/pull/3923#issuecomment-2888810087 The default configuration of the channel which was done in `start()` is now done in `new()` this allows overriding some settings through the new `get_dma_channel` function. Only ringbuffers support this; `LinkedListTransfer` and `Transfer` do not support that yet. --- embassy-stm32/src/dma/gpdma/mod.rs | 5 +++++ embassy-stm32/src/dma/gpdma/ringbuffered.rs | 28 ++++++++++++++++++++++------ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 7c05715ee..752b39416 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -352,6 +352,11 @@ impl AnyChannel { .store(table.transfer_count(), Ordering::Relaxed) } + fn get_dma_channel(&self) -> pac::gpdma::Channel { + let info = self.info(); + info.dma.ch(info.num) + } + fn start(&self) { let info = self.info(); let ch = info.dma.ch(info.num); diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 4532bda57..66c4b74ec 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -77,6 +77,9 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { ]; let table = Table::new(items); + // Apply the default configuration to the channel. + unsafe { channel.configure_linked_list(&table, Default::default()) }; + Self { channel, ringbuf: ReadableDmaRingBuffer::new(buffer), @@ -85,14 +88,19 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { } /// Start the ring buffer operation. - /// - /// You must call this after creating it for it to work. pub fn start(&mut self) { - unsafe { self.channel.configure_linked_list(&self.table, Default::default()) }; self.table.link(RunMode::Circular); self.channel.start(); } + /// Get a handle to the GPDMA channel for configuring the DMA. + /// + /// Usually, **this is not needed** as a default configuration is already + /// applied, but it may be useful to setup trigger sources, etc. + pub fn get_dma_channel(&mut self) -> stm32_metapac::gpdma::Channel { + self.channel.get_dma_channel() + } + /// Clear all data in the ring buffer. pub fn clear(&mut self) { self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); @@ -226,6 +234,9 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { ]; let table = Table::new(items); + // Apply the default configuration to the channel. + unsafe { channel.configure_linked_list(&table, Default::default()) }; + let this = Self { channel, ringbuf: WritableDmaRingBuffer::new(buffer), @@ -236,14 +247,19 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { } /// Start the ring buffer operation. - /// - /// You must call this after creating it for it to work. pub fn start(&mut self) { - unsafe { self.channel.configure_linked_list(&self.table, Default::default()) }; self.table.link(RunMode::Circular); self.channel.start(); } + /// Get a handle to the GPDMA channel for configuring the DMA. + /// + /// Usually, **this is not needed** as a default configuration is already + /// applied, but it may be useful to setup trigger sources, etc. + pub fn get_dma_channel(&mut self) -> stm32_metapac::gpdma::Channel { + self.channel.get_dma_channel() + } + /// Clear all data in the ring buffer. pub fn clear(&mut self) { self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); -- cgit From 49990691985c8814500bf518bdec446bb85587e3 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: use provided TransferOptions instead of defaults --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 66c4b74ec..c74c7bd2b 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -63,7 +63,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { request: Request, peri_addr: *mut W, buffer: &'a mut [W], - _options: TransferOptions, + options: TransferOptions, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); @@ -78,7 +78,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { let table = Table::new(items); // Apply the default configuration to the channel. - unsafe { channel.configure_linked_list(&table, Default::default()) }; + unsafe { channel.configure_linked_list(&table, options) }; Self { channel, @@ -220,7 +220,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { request: Request, peri_addr: *mut W, buffer: &'a mut [W], - _options: TransferOptions, + options: TransferOptions, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); @@ -235,7 +235,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { let table = Table::new(items); // Apply the default configuration to the channel. - unsafe { channel.configure_linked_list(&table, Default::default()) }; + unsafe { channel.configure_linked_list(&table, options) }; let this = Self { channel, -- cgit From f67365a067634b62747c819253fb461624c29568 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: suspend before reset This follows the procedure outlined in the STM32U5 reference manual at page 696. --- embassy-stm32/src/dma/gpdma/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 752b39416..604db2852 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -382,6 +382,9 @@ impl AnyChannel { let info = self.info(); let ch = info.dma.ch(info.num); + self.request_suspend(); + while self.is_running() {} + ch.cr().modify(|w| w.set_reset(true)); } -- cgit From 2f24568de08e846d4bfafff90a5b9ba352d86431 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: custom dma configuration using RegisterUpdaters struct See this PR comment: https://github.com/embassy-rs/embassy/pull/3923#issuecomment-2889283939 --- embassy-stm32/src/dma/gpdma/linked_list.rs | 20 ++++++++++++++++++-- embassy-stm32/src/dma/gpdma/mod.rs | 22 ++++++++++++++++++++++ embassy-stm32/src/dma/gpdma/ringbuffered.rs | 16 +++++++++++----- embassy-stm32/src/sai/mod.rs | 5 +++-- embassy-stm32/src/usart/ringbuffered.rs | 3 ++- 5 files changed, 56 insertions(+), 10 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index a95e5590e..76381def3 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -4,6 +4,7 @@ use stm32_metapac::gpdma::regs; use stm32_metapac::gpdma::vals::Dreq; +use super::RegisterUpdaters; use crate::dma::word::{Word, WordSize}; use crate::dma::{Dir, Request}; @@ -41,7 +42,12 @@ pub struct LinearItem { impl LinearItem { /// Create a new read DMA transfer (peripheral to memory). - pub unsafe fn new_read<'d, W: Word>(request: Request, peri_addr: *mut W, buf: &'d mut [W]) -> Self { + pub unsafe fn new_read<'d, W: Word>( + request: Request, + peri_addr: *mut W, + buf: &'d mut [W], + register_updaters: &RegisterUpdaters, + ) -> Self { Self::new_inner( request, Dir::PeripheralToMemory, @@ -51,11 +57,17 @@ impl LinearItem { true, W::size(), W::size(), + register_updaters, ) } /// Create a new write DMA transfer (memory to peripheral). - pub unsafe fn new_write<'d, MW: Word, PW: Word>(request: Request, buf: &'d [MW], peri_addr: *mut PW) -> Self { + pub unsafe fn new_write<'d, MW: Word, PW: Word>( + request: Request, + buf: &'d [MW], + peri_addr: *mut PW, + register_updaters: &RegisterUpdaters, + ) -> Self { Self::new_inner( request, Dir::MemoryToPeripheral, @@ -65,6 +77,7 @@ impl LinearItem { true, MW::size(), PW::size(), + register_updaters, ) } @@ -77,6 +90,7 @@ impl LinearItem { incr_mem: bool, data_size: WordSize, dst_size: WordSize, + register_updaters: &RegisterUpdaters, ) -> Self { // BNDT is specified as bytes, not as number of transfers. let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { @@ -91,6 +105,7 @@ impl LinearItem { tr1.set_ddw(dst_size.into()); tr1.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); tr1.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); + (register_updaters.tr1)(&mut tr1); let mut tr2 = regs::ChTr2(0); tr2.set_dreq(match dir { @@ -98,6 +113,7 @@ impl LinearItem { Dir::PeripheralToMemory => Dreq::SOURCE_PERIPHERAL, }); tr2.set_reqsel(request); + (register_updaters.tr2)(&mut tr2); let (sar, dar) = match dir { Dir::MemoryToPeripheral => (mem_addr as _, peri_addr as _), diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 604db2852..58f93ffb0 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -73,6 +73,28 @@ impl Default for TransferOptions { } } +/// GPDMA linked-list item register updater functions. +#[derive(Debug, Copy, Clone)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct RegisterUpdaters { + /// Function used to overwrite transfer register 1. + pub tr1: fn(&mut pac::gpdma::regs::ChTr1), + /// Function used to overwrite transfer register 2. + pub tr2: fn(&mut pac::gpdma::regs::ChTr2), + /// Function used to overwrite transfer register 3. + pub tr3: fn(&mut pac::gpdma::regs::ChTr3), +} + +impl Default for RegisterUpdaters { + fn default() -> Self { + Self { + tr1: |_| {}, + tr2: |_| {}, + tr3: |_| {}, + } + } +} + impl From for vals::Dw { fn from(raw: WordSize) -> Self { match raw { diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index c74c7bd2b..dfc031627 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -8,7 +8,7 @@ use core::task::Waker; use embassy_hal_internal::Peri; -use super::{AnyChannel, TransferOptions, STATE}; +use super::{AnyChannel, RegisterUpdaters, TransferOptions, STATE}; use crate::dma::gpdma::linked_list::{LinearItem, RunMode, Table}; use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}; use crate::dma::word::Word; @@ -52,6 +52,7 @@ pub struct ReadableRingBuffer<'a, W: Word> { channel: Peri<'a, AnyChannel>, ringbuf: ReadableDmaRingBuffer<'a, W>, table: Table<2>, + options: TransferOptions, } impl<'a, W: Word> ReadableRingBuffer<'a, W> { @@ -64,6 +65,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { peri_addr: *mut W, buffer: &'a mut [W], options: TransferOptions, + register_updaters: RegisterUpdaters, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); @@ -72,8 +74,8 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { assert_eq!(half_len * 2, buffer.len()); let items = [ - LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]), - LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]), + LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], ®ister_updaters), + LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], ®ister_updaters), ]; let table = Table::new(items); @@ -84,6 +86,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { channel, ringbuf: ReadableDmaRingBuffer::new(buffer), table, + options, } } @@ -211,6 +214,7 @@ pub struct WritableRingBuffer<'a, W: Word> { channel: Peri<'a, AnyChannel>, ringbuf: WritableDmaRingBuffer<'a, W>, table: Table<2>, + options: TransferOptions, } impl<'a, W: Word> WritableRingBuffer<'a, W> { @@ -221,6 +225,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { peri_addr: *mut W, buffer: &'a mut [W], options: TransferOptions, + register_updaters: RegisterUpdaters, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); @@ -229,8 +234,8 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { assert_eq!(half_len * 2, buffer.len()); let items = [ - LinearItem::new_write(request, &mut buffer[..half_len], peri_addr), - LinearItem::new_write(request, &mut buffer[half_len..], peri_addr), + LinearItem::new_write(request, &mut buffer[..half_len], peri_addr, ®ister_updaters), + LinearItem::new_write(request, &mut buffer[half_len..], peri_addr, ®ister_updaters), ]; let table = Table::new(items); @@ -241,6 +246,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { channel, ringbuf: WritableDmaRingBuffer::new(buffer), table, + options, }; this diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs index 88cc225dd..ac1ab2505 100644 --- a/embassy-stm32/src/sai/mod.rs +++ b/embassy-stm32/src/sai/mod.rs @@ -687,12 +687,13 @@ fn get_ring_buffer<'d, T: Instance, W: word::Word>( //the new_write() and new_read() always use circular mode ..Default::default() }; + let updaters = Default::default(); match tx_rx { TxRx::Transmitter => RingBuffer::Writable(unsafe { - WritableRingBuffer::new(dma, request, dr(T::REGS, sub_block), dma_buf, opts) + WritableRingBuffer::new(dma, request, dr(T::REGS, sub_block), dma_buf, opts, updaters) }), TxRx::Receiver => RingBuffer::Readable(unsafe { - ReadableRingBuffer::new(dma, request, dr(T::REGS, sub_block), dma_buf, opts) + ReadableRingBuffer::new(dma, request, dr(T::REGS, sub_block), dma_buf, opts, updaters) }), } } diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index bea56c991..78bf4b72f 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs @@ -103,6 +103,7 @@ impl<'d> UartRx<'d, Async> { assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF); let opts = Default::default(); + let updaters = Default::default(); // Safety: we forget the struct before this function returns. let rx_dma = self.rx_dma.as_mut().unwrap(); @@ -112,7 +113,7 @@ impl<'d> UartRx<'d, Async> { let info = self.info; let state = self.state; let kernel_clock = self.kernel_clock; - let ring_buf = unsafe { ReadableRingBuffer::new(rx_dma, request, rdr(info.regs), dma_buf, opts) }; + let ring_buf = unsafe { ReadableRingBuffer::new(rx_dma, request, rdr(info.regs), dma_buf, opts, updaters) }; let rx = unsafe { self.rx.as_ref().map(|x| x.clone_unchecked()) }; let rts = unsafe { self.rts.as_ref().map(|x| x.clone_unchecked()) }; -- cgit From a2daa9739f375301425a4581601b65470ba5f459 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: removed functions exposing channel registers These functions could be used to cause UB. --- embassy-stm32/src/dma/gpdma/mod.rs | 5 ----- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 16 ---------------- 2 files changed, 21 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 58f93ffb0..4893fed94 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -374,11 +374,6 @@ impl AnyChannel { .store(table.transfer_count(), Ordering::Relaxed) } - fn get_dma_channel(&self) -> pac::gpdma::Channel { - let info = self.info(); - info.dma.ch(info.num) - } - fn start(&self) { let info = self.info(); let ch = info.dma.ch(info.num); diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index dfc031627..3a7ceb292 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -96,14 +96,6 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { self.channel.start(); } - /// Get a handle to the GPDMA channel for configuring the DMA. - /// - /// Usually, **this is not needed** as a default configuration is already - /// applied, but it may be useful to setup trigger sources, etc. - pub fn get_dma_channel(&mut self) -> stm32_metapac::gpdma::Channel { - self.channel.get_dma_channel() - } - /// Clear all data in the ring buffer. pub fn clear(&mut self) { self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); @@ -258,14 +250,6 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { self.channel.start(); } - /// Get a handle to the GPDMA channel for configuring the DMA. - /// - /// Usually, **this is not needed** as a default configuration is already - /// applied, but it may be useful to setup trigger sources, etc. - pub fn get_dma_channel(&mut self) -> stm32_metapac::gpdma::Channel { - self.channel.get_dma_channel() - } - /// Clear all data in the ring buffer. pub fn clear(&mut self) { self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); -- cgit From 4291a092bedb0f45d236a1847a9b85fd093d3af9 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: moved channel configuration from new() to start() See this PR comment explaining why configuration in `new()` is a bad idea: https://github.com/embassy-rs/embassy/pull/3923#issuecomment-2889193736 --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 3a7ceb292..359bc83b3 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -79,9 +79,6 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { ]; let table = Table::new(items); - // Apply the default configuration to the channel. - unsafe { channel.configure_linked_list(&table, options) }; - Self { channel, ringbuf: ReadableDmaRingBuffer::new(buffer), @@ -92,6 +89,8 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// Start the ring buffer operation. pub fn start(&mut self) { + // Apply the default configuration to the channel. + unsafe { self.channel.configure_linked_list(&self.table, self.options) }; self.table.link(RunMode::Circular); self.channel.start(); } @@ -231,9 +230,6 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { ]; let table = Table::new(items); - // Apply the default configuration to the channel. - unsafe { channel.configure_linked_list(&table, options) }; - let this = Self { channel, ringbuf: WritableDmaRingBuffer::new(buffer), @@ -246,6 +242,8 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Start the ring buffer operation. pub fn start(&mut self) { + // Apply the default configuration to the channel. + unsafe { self.channel.configure_linked_list(&self.table, self.options) }; self.table.link(RunMode::Circular); self.channel.start(); } -- cgit From 50e2e2ec60ca32a2da53b91f4a30c3a71d4e9f30 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: add new_with_table() initializer for ring-buffers and removal of RegisterUpdaters - It is now possible to pass a linked-list table to the ring-buffer with the `new_with_table()` function or use the `new()` function for a basic ring-buffer setup. - A `simple_ring_buffer_table()` function was added to the read and write ring-buffers to generate the same table as the one created by `new()` in case the user only wants to customize the default table options. - RegisterUpdaters have been removed as the user now has direct access to the table and its items if needed. See: https://github.com/elagil/embassy/pull/1#issuecomment-2891997294 --- embassy-stm32/src/dma/gpdma/linked_list.rs | 21 +----- embassy-stm32/src/dma/gpdma/mod.rs | 22 ------ embassy-stm32/src/dma/gpdma/ringbuffered.rs | 112 ++++++++++++++++++++-------- embassy-stm32/src/sai/mod.rs | 9 +-- embassy-stm32/src/usart/ringbuffered.rs | 5 +- 5 files changed, 90 insertions(+), 79 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index 76381def3..ca2d4fb7f 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -4,7 +4,6 @@ use stm32_metapac::gpdma::regs; use stm32_metapac::gpdma::vals::Dreq; -use super::RegisterUpdaters; use crate::dma::word::{Word, WordSize}; use crate::dma::{Dir, Request}; @@ -23,7 +22,6 @@ pub enum RunMode { /// /// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities. #[derive(Debug, Copy, Clone, Default)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[repr(C)] pub struct LinearItem { /// Transfer register 1. @@ -42,12 +40,7 @@ pub struct LinearItem { impl LinearItem { /// Create a new read DMA transfer (peripheral to memory). - pub unsafe fn new_read<'d, W: Word>( - request: Request, - peri_addr: *mut W, - buf: &'d mut [W], - register_updaters: &RegisterUpdaters, - ) -> Self { + pub unsafe fn new_read<'d, W: Word>(request: Request, peri_addr: *mut W, buf: &'d mut [W]) -> Self { Self::new_inner( request, Dir::PeripheralToMemory, @@ -57,17 +50,11 @@ impl LinearItem { true, W::size(), W::size(), - register_updaters, ) } /// Create a new write DMA transfer (memory to peripheral). - pub unsafe fn new_write<'d, MW: Word, PW: Word>( - request: Request, - buf: &'d [MW], - peri_addr: *mut PW, - register_updaters: &RegisterUpdaters, - ) -> Self { + pub unsafe fn new_write<'d, MW: Word, PW: Word>(request: Request, buf: &'d [MW], peri_addr: *mut PW) -> Self { Self::new_inner( request, Dir::MemoryToPeripheral, @@ -77,7 +64,6 @@ impl LinearItem { true, MW::size(), PW::size(), - register_updaters, ) } @@ -90,7 +76,6 @@ impl LinearItem { incr_mem: bool, data_size: WordSize, dst_size: WordSize, - register_updaters: &RegisterUpdaters, ) -> Self { // BNDT is specified as bytes, not as number of transfers. let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { @@ -105,7 +90,6 @@ impl LinearItem { tr1.set_ddw(dst_size.into()); tr1.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); tr1.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); - (register_updaters.tr1)(&mut tr1); let mut tr2 = regs::ChTr2(0); tr2.set_dreq(match dir { @@ -113,7 +97,6 @@ impl LinearItem { Dir::PeripheralToMemory => Dreq::SOURCE_PERIPHERAL, }); tr2.set_reqsel(request); - (register_updaters.tr2)(&mut tr2); let (sar, dar) = match dir { Dir::MemoryToPeripheral => (mem_addr as _, peri_addr as _), diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 4893fed94..074447148 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -73,28 +73,6 @@ impl Default for TransferOptions { } } -/// GPDMA linked-list item register updater functions. -#[derive(Debug, Copy, Clone)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -pub struct RegisterUpdaters { - /// Function used to overwrite transfer register 1. - pub tr1: fn(&mut pac::gpdma::regs::ChTr1), - /// Function used to overwrite transfer register 2. - pub tr2: fn(&mut pac::gpdma::regs::ChTr2), - /// Function used to overwrite transfer register 3. - pub tr3: fn(&mut pac::gpdma::regs::ChTr3), -} - -impl Default for RegisterUpdaters { - fn default() -> Self { - Self { - tr1: |_| {}, - tr2: |_| {}, - tr3: |_| {}, - } - } -} - impl From for vals::Dw { fn from(raw: WordSize) -> Self { match raw { diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 359bc83b3..d7451285e 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -8,7 +8,7 @@ use core::task::Waker; use embassy_hal_internal::Peri; -use super::{AnyChannel, RegisterUpdaters, TransferOptions, STATE}; +use super::{AnyChannel, TransferOptions, STATE}; use crate::dma::gpdma::linked_list::{LinearItem, RunMode, Table}; use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}; use crate::dma::word::Word; @@ -48,14 +48,14 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { } /// Ringbuffer for receiving data using GPDMA linked-list mode. -pub struct ReadableRingBuffer<'a, W: Word> { +pub struct ReadableRingBuffer<'a, W: Word, const L: usize> { channel: Peri<'a, AnyChannel>, ringbuf: ReadableDmaRingBuffer<'a, W>, - table: Table<2>, + table: Table, options: TransferOptions, } -impl<'a, W: Word> ReadableRingBuffer<'a, W> { +impl<'a, W: Word> ReadableRingBuffer<'a, W, 2> { /// Create a new ring buffer. /// /// Transfer options are applied to the individual linked list items. @@ -65,19 +65,30 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { peri_addr: *mut W, buffer: &'a mut [W], options: TransferOptions, - register_updaters: RegisterUpdaters, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); + let table = Self::simple_ring_buffer_table(request, peri_addr, buffer); - // Buffer halves should be the same length. - let half_len = buffer.len() / 2; - assert_eq!(half_len * 2, buffer.len()); + Self { + channel, + ringbuf: ReadableDmaRingBuffer::new(buffer), + table, + options, + } + } +} - let items = [ - LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], ®ister_updaters), - LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], ®ister_updaters), - ]; - let table = Table::new(items); +impl<'a, W: Word, const L: usize> ReadableRingBuffer<'a, W, L> { + /// Create a new ring buffer with a provided linked-list table. + /// + /// Transfer options are applied to the individual linked list items. + pub fn new_with_table( + channel: Peri<'a, impl Channel>, + buffer: &'a mut [W], + options: TransferOptions, + table: Table, + ) -> Self { + let channel: Peri<'a, AnyChannel> = channel.into(); Self { channel, @@ -87,6 +98,21 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { } } + /// Create a new simple linked-list table. + /// + /// This uses two linked-list items, one for each half of the buffer. + pub unsafe fn simple_ring_buffer_table(request: Request, peri_addr: *mut W, buffer: &mut [W]) -> Table<2> { + // Buffer halves should be the same length. + let half_len = buffer.len() / 2; + assert_eq!(half_len * 2, buffer.len()); + + let items = [ + LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]), + LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]), + ]; + Table::new(items) + } + /// Start the ring buffer operation. pub fn start(&mut self) { // Apply the default configuration to the channel. @@ -190,7 +216,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { } } -impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { +impl<'a, W: Word, const L: usize> Drop for ReadableRingBuffer<'a, W, L> { fn drop(&mut self) { self.request_suspend(); while self.is_running() {} @@ -201,43 +227,69 @@ impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { } /// Ringbuffer for writing data using GPDMA linked-list mode. -pub struct WritableRingBuffer<'a, W: Word> { +pub struct WritableRingBuffer<'a, W: Word, const L: usize> { channel: Peri<'a, AnyChannel>, ringbuf: WritableDmaRingBuffer<'a, W>, - table: Table<2>, + table: Table, options: TransferOptions, } -impl<'a, W: Word> WritableRingBuffer<'a, W> { +impl<'a, W: Word> WritableRingBuffer<'a, W, 2> { /// Create a new ring buffer. + /// + /// Transfer options are applied to the individual linked list items. pub unsafe fn new( channel: Peri<'a, impl Channel>, request: Request, peri_addr: *mut W, buffer: &'a mut [W], options: TransferOptions, - register_updaters: RegisterUpdaters, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); + let table = Self::simple_ring_buffer_table(request, peri_addr, buffer); - // Buffer halves should be the same length. - let half_len = buffer.len() / 2; - assert_eq!(half_len * 2, buffer.len()); + Self { + channel, + ringbuf: WritableDmaRingBuffer::new(buffer), + table, + options, + } + } +} - let items = [ - LinearItem::new_write(request, &mut buffer[..half_len], peri_addr, ®ister_updaters), - LinearItem::new_write(request, &mut buffer[half_len..], peri_addr, ®ister_updaters), - ]; - let table = Table::new(items); +impl<'a, W: Word, const L: usize> WritableRingBuffer<'a, W, L> { + /// Create a new ring buffer with a provided linked-list table. + /// + /// Transfer options are applied to the individual linked list items. + pub fn new_with_table( + channel: Peri<'a, impl Channel>, + buffer: &'a mut [W], + options: TransferOptions, + table: Table, + ) -> Self { + let channel: Peri<'a, AnyChannel> = channel.into(); - let this = Self { + Self { channel, ringbuf: WritableDmaRingBuffer::new(buffer), table, options, - }; + } + } - this + /// Create a new simple linked-list table. + /// + /// This uses two linked-list items, one for each half of the buffer. + pub unsafe fn simple_ring_buffer_table(request: Request, peri_addr: *mut W, buffer: &mut [W]) -> Table<2> { + // Buffer halves should be the same length. + let half_len = buffer.len() / 2; + assert_eq!(half_len * 2, buffer.len()); + + let items = [ + LinearItem::new_write(request, &mut buffer[..half_len], peri_addr), + LinearItem::new_write(request, &mut buffer[half_len..], peri_addr), + ]; + Table::new(items) } /// Start the ring buffer operation. @@ -343,7 +395,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { } } -impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { +impl<'a, W: Word, const L: usize> Drop for WritableRingBuffer<'a, W, L> { fn drop(&mut self) { self.request_suspend(); while self.is_running() {} diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs index ac1ab2505..410b2243c 100644 --- a/embassy-stm32/src/sai/mod.rs +++ b/embassy-stm32/src/sai/mod.rs @@ -650,8 +650,8 @@ impl Config { } enum RingBuffer<'d, W: word::Word> { - Writable(WritableRingBuffer<'d, W>), - Readable(ReadableRingBuffer<'d, W>), + Writable(WritableRingBuffer<'d, W, 2>), + Readable(ReadableRingBuffer<'d, W, 2>), } fn dr(w: crate::pac::sai::Sai, sub_block: WhichSubBlock) -> *mut W { @@ -687,13 +687,12 @@ fn get_ring_buffer<'d, T: Instance, W: word::Word>( //the new_write() and new_read() always use circular mode ..Default::default() }; - let updaters = Default::default(); match tx_rx { TxRx::Transmitter => RingBuffer::Writable(unsafe { - WritableRingBuffer::new(dma, request, dr(T::REGS, sub_block), dma_buf, opts, updaters) + WritableRingBuffer::new(dma, request, dr(T::REGS, sub_block), dma_buf, opts) }), TxRx::Receiver => RingBuffer::Readable(unsafe { - ReadableRingBuffer::new(dma, request, dr(T::REGS, sub_block), dma_buf, opts, updaters) + ReadableRingBuffer::new(dma, request, dr(T::REGS, sub_block), dma_buf, opts) }), } } diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index 78bf4b72f..8a607a31a 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs @@ -83,7 +83,7 @@ pub struct RingBufferedUartRx<'d> { kernel_clock: Hertz, rx: Option>, rts: Option>, - ring_buf: ReadableRingBuffer<'d, u8>, + ring_buf: ReadableRingBuffer<'d, u8, 2>, } impl<'d> SetConfig for RingBufferedUartRx<'d> { @@ -103,7 +103,6 @@ impl<'d> UartRx<'d, Async> { assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF); let opts = Default::default(); - let updaters = Default::default(); // Safety: we forget the struct before this function returns. let rx_dma = self.rx_dma.as_mut().unwrap(); @@ -113,7 +112,7 @@ impl<'d> UartRx<'d, Async> { let info = self.info; let state = self.state; let kernel_clock = self.kernel_clock; - let ring_buf = unsafe { ReadableRingBuffer::new(rx_dma, request, rdr(info.regs), dma_buf, opts, updaters) }; + let ring_buf = unsafe { ReadableRingBuffer::new(rx_dma, request, rdr(info.regs), dma_buf, opts) }; let rx = unsafe { self.rx.as_ref().map(|x| x.clone_unchecked()) }; let rts = unsafe { self.rts.as_ref().map(|x| x.clone_unchecked()) }; -- cgit From 47bb14514f63a713600d7fa1c6cec2cbd1493591 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: use register wrappers instead of u32 for LinearItem Since the register structs are no-field structs with `repr(transparent)`, we can use them in the LinearItem with `repr(C)`. This allows the user to call the convenient named setter functions for the registers instead of manually changing the bits of the u32. --- embassy-stm32/src/dma/gpdma/linked_list.rs | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index ca2d4fb7f..f494bd5f5 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -25,17 +25,17 @@ pub enum RunMode { #[repr(C)] pub struct LinearItem { /// Transfer register 1. - pub tr1: u32, + pub tr1: regs::ChTr1, /// Transfer register 2. - pub tr2: u32, + pub tr2: regs::ChTr2, /// Block register 2. - pub br1: u32, + pub br1: regs::ChBr1, /// Source address register. pub sar: u32, /// Destination address register. pub dar: u32, /// Linked-list address register. - pub llr: u32, + pub llr: regs::ChLlr, } impl LinearItem { @@ -106,12 +106,12 @@ impl LinearItem { let llr = regs::ChLlr(0); Self { - tr1: tr1.0, - tr2: tr2.0, - br1: br1.0, + tr1, + tr2, + br1, sar, dar, - llr: llr.0, + llr, } } @@ -131,23 +131,20 @@ impl LinearItem { // Lower two bits are ignored: 32 bit aligned. llr.set_la(next >> 2); - self.llr = llr.0; + self.llr = llr; } /// Unlink the next linear item. /// /// Disables channel update bits. fn unlink(&mut self) { - self.llr = regs::ChLlr(0).0; + self.llr = regs::ChLlr(0); } /// The item's transfer count in number of words. fn transfer_count(&self) -> usize { - let br1 = regs::ChBr1(self.br1); - let tr1 = regs::ChTr1(self.tr1); - let word_size: WordSize = tr1.ddw().into(); - - br1.bndt() as usize / word_size.bytes() + let word_size: WordSize = self.tr1.ddw().into(); + self.br1.bndt() as usize / word_size.bytes() } } -- cgit From 51e7fafc3c9f5dfd432a7b4d112cd8e54092b9ef Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: removed unnecessary mut reference --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index d7451285e..c98f2bb80 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -280,14 +280,14 @@ impl<'a, W: Word, const L: usize> WritableRingBuffer<'a, W, L> { /// Create a new simple linked-list table. /// /// This uses two linked-list items, one for each half of the buffer. - pub unsafe fn simple_ring_buffer_table(request: Request, peri_addr: *mut W, buffer: &mut [W]) -> Table<2> { + pub unsafe fn simple_ring_buffer_table(request: Request, peri_addr: *mut W, buffer: &[W]) -> Table<2> { // Buffer halves should be the same length. let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); let items = [ - LinearItem::new_write(request, &mut buffer[..half_len], peri_addr), - LinearItem::new_write(request, &mut buffer[half_len..], peri_addr), + LinearItem::new_write(request, &buffer[..half_len], peri_addr), + LinearItem::new_write(request, &buffer[half_len..], peri_addr), ]; Table::new(items) } -- cgit From d3718c6d4e0a8485cdef8ecf6deb05c3eff5af08 Mon Sep 17 00:00:00 2001 From: etiennecollin Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: renamed simple table as per ST nomenclature Co-authored-by: elagil --- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index c98f2bb80..136eca1c3 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -67,7 +67,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W, 2> { options: TransferOptions, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); - let table = Self::simple_ring_buffer_table(request, peri_addr, buffer); + let table = Self::new_ping_pong_table(request, peri_addr, buffer); Self { channel, @@ -101,7 +101,7 @@ impl<'a, W: Word, const L: usize> ReadableRingBuffer<'a, W, L> { /// Create a new simple linked-list table. /// /// This uses two linked-list items, one for each half of the buffer. - pub unsafe fn simple_ring_buffer_table(request: Request, peri_addr: *mut W, buffer: &mut [W]) -> Table<2> { + pub unsafe fn new_ping_pong_table(request: Request, peri_addr: *mut W, buffer: &mut [W]) -> Table<2> { // Buffer halves should be the same length. let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); @@ -246,7 +246,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W, 2> { options: TransferOptions, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); - let table = Self::simple_ring_buffer_table(request, peri_addr, buffer); + let table = Self::new_ping_pong_table(request, peri_addr, buffer); Self { channel, @@ -280,7 +280,7 @@ impl<'a, W: Word, const L: usize> WritableRingBuffer<'a, W, L> { /// Create a new simple linked-list table. /// /// This uses two linked-list items, one for each half of the buffer. - pub unsafe fn simple_ring_buffer_table(request: Request, peri_addr: *mut W, buffer: &[W]) -> Table<2> { + pub unsafe fn new_ping_pong_table(request: Request, peri_addr: *mut W, buffer: &[W]) -> Table<2> { // Buffer halves should be the same length. let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); -- cgit From e9783ee28e9bdd89ffaeffb24bbff207c1ceb837 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: build --- embassy-stm32/src/dma/gpdma/linked_list.rs | 15 +++++ embassy-stm32/src/dma/gpdma/ringbuffered.rs | 100 ++++------------------------ embassy-stm32/src/sai/mod.rs | 4 +- embassy-stm32/src/ucpd.rs | 4 +- embassy-stm32/src/usart/ringbuffered.rs | 4 +- 5 files changed, 35 insertions(+), 92 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index f494bd5f5..627da0055 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -163,6 +163,21 @@ impl Table { Self { items } } + /// Create a ping-pong linked-list table. + /// + /// This uses two linked-list items, one for each half of the buffer. + pub unsafe fn new_ping_pong(request: Request, peri_addr: *mut W, buffer: &mut [W]) -> Table<2> { + // Buffer halves should be the same length. + let half_len = buffer.len() / 2; + assert_eq!(half_len * 2, buffer.len()); + + let items = [ + LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]), + LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]), + ]; + Table::new(items) + } + /// Link the table as given by the run mode. pub fn link(&mut self, run_mode: RunMode) { if matches!(run_mode, RunMode::Once | RunMode::Circular) { diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 136eca1c3..2f17a0587 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -9,7 +9,7 @@ use core::task::Waker; use embassy_hal_internal::Peri; use super::{AnyChannel, TransferOptions, STATE}; -use crate::dma::gpdma::linked_list::{LinearItem, RunMode, Table}; +use crate::dma::gpdma::linked_list::{RunMode, Table}; use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}; use crate::dma::word::Word; use crate::dma::{Channel, Request}; @@ -48,14 +48,14 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { } /// Ringbuffer for receiving data using GPDMA linked-list mode. -pub struct ReadableRingBuffer<'a, W: Word, const L: usize> { +pub struct ReadableRingBuffer<'a, W: Word> { channel: Peri<'a, AnyChannel>, ringbuf: ReadableDmaRingBuffer<'a, W>, - table: Table, + table: Table<2>, options: TransferOptions, } -impl<'a, W: Word> ReadableRingBuffer<'a, W, 2> { +impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// Create a new ring buffer. /// /// Transfer options are applied to the individual linked list items. @@ -67,28 +67,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W, 2> { options: TransferOptions, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); - let table = Self::new_ping_pong_table(request, peri_addr, buffer); - - Self { - channel, - ringbuf: ReadableDmaRingBuffer::new(buffer), - table, - options, - } - } -} - -impl<'a, W: Word, const L: usize> ReadableRingBuffer<'a, W, L> { - /// Create a new ring buffer with a provided linked-list table. - /// - /// Transfer options are applied to the individual linked list items. - pub fn new_with_table( - channel: Peri<'a, impl Channel>, - buffer: &'a mut [W], - options: TransferOptions, - table: Table, - ) -> Self { - let channel: Peri<'a, AnyChannel> = channel.into(); + let table = Table::<2>::new_ping_pong::(request, peri_addr, buffer); Self { channel, @@ -98,21 +77,6 @@ impl<'a, W: Word, const L: usize> ReadableRingBuffer<'a, W, L> { } } - /// Create a new simple linked-list table. - /// - /// This uses two linked-list items, one for each half of the buffer. - pub unsafe fn new_ping_pong_table(request: Request, peri_addr: *mut W, buffer: &mut [W]) -> Table<2> { - // Buffer halves should be the same length. - let half_len = buffer.len() / 2; - assert_eq!(half_len * 2, buffer.len()); - - let items = [ - LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]), - LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]), - ]; - Table::new(items) - } - /// Start the ring buffer operation. pub fn start(&mut self) { // Apply the default configuration to the channel. @@ -172,7 +136,7 @@ impl<'a, W: Word, const L: usize> ReadableRingBuffer<'a, W, L> { /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_suspend(&mut self) { + pub fn request_pause(&mut self) { self.channel.request_suspend() } @@ -216,9 +180,9 @@ impl<'a, W: Word, const L: usize> ReadableRingBuffer<'a, W, L> { } } -impl<'a, W: Word, const L: usize> Drop for ReadableRingBuffer<'a, W, L> { +impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { fn drop(&mut self) { - self.request_suspend(); + self.request_pause(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." @@ -227,14 +191,14 @@ impl<'a, W: Word, const L: usize> Drop for ReadableRingBuffer<'a, W, L> { } /// Ringbuffer for writing data using GPDMA linked-list mode. -pub struct WritableRingBuffer<'a, W: Word, const L: usize> { +pub struct WritableRingBuffer<'a, W: Word> { channel: Peri<'a, AnyChannel>, ringbuf: WritableDmaRingBuffer<'a, W>, - table: Table, + table: Table<2>, options: TransferOptions, } -impl<'a, W: Word> WritableRingBuffer<'a, W, 2> { +impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Create a new ring buffer. /// /// Transfer options are applied to the individual linked list items. @@ -246,7 +210,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W, 2> { options: TransferOptions, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); - let table = Self::new_ping_pong_table(request, peri_addr, buffer); + let table = Table::<2>::new_ping_pong::(request, peri_addr, buffer); Self { channel, @@ -255,42 +219,6 @@ impl<'a, W: Word> WritableRingBuffer<'a, W, 2> { options, } } -} - -impl<'a, W: Word, const L: usize> WritableRingBuffer<'a, W, L> { - /// Create a new ring buffer with a provided linked-list table. - /// - /// Transfer options are applied to the individual linked list items. - pub fn new_with_table( - channel: Peri<'a, impl Channel>, - buffer: &'a mut [W], - options: TransferOptions, - table: Table, - ) -> Self { - let channel: Peri<'a, AnyChannel> = channel.into(); - - Self { - channel, - ringbuf: WritableDmaRingBuffer::new(buffer), - table, - options, - } - } - - /// Create a new simple linked-list table. - /// - /// This uses two linked-list items, one for each half of the buffer. - pub unsafe fn new_ping_pong_table(request: Request, peri_addr: *mut W, buffer: &[W]) -> Table<2> { - // Buffer halves should be the same length. - let half_len = buffer.len() / 2; - assert_eq!(half_len * 2, buffer.len()); - - let items = [ - LinearItem::new_write(request, &buffer[..half_len], peri_addr), - LinearItem::new_write(request, &buffer[half_len..], peri_addr), - ]; - Table::new(items) - } /// Start the ring buffer operation. pub fn start(&mut self) { @@ -351,7 +279,7 @@ impl<'a, W: Word, const L: usize> WritableRingBuffer<'a, W, L> { /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_suspend(&mut self) { + pub fn request_pause(&mut self) { self.channel.request_suspend() } @@ -395,7 +323,7 @@ impl<'a, W: Word, const L: usize> WritableRingBuffer<'a, W, L> { } } -impl<'a, W: Word, const L: usize> Drop for WritableRingBuffer<'a, W, L> { +impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { fn drop(&mut self) { self.request_suspend(); while self.is_running() {} diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs index 410b2243c..88cc225dd 100644 --- a/embassy-stm32/src/sai/mod.rs +++ b/embassy-stm32/src/sai/mod.rs @@ -650,8 +650,8 @@ impl Config { } enum RingBuffer<'d, W: word::Word> { - Writable(WritableRingBuffer<'d, W, 2>), - Readable(ReadableRingBuffer<'d, W, 2>), + Writable(WritableRingBuffer<'d, W>), + Readable(ReadableRingBuffer<'d, W>), } fn dr(w: crate::pac::sai::Sai, sub_block: WhichSubBlock) -> *mut W { diff --git a/embassy-stm32/src/ucpd.rs b/embassy-stm32/src/ucpd.rs index 967e43a8a..18aff4fbd 100644 --- a/embassy-stm32/src/ucpd.rs +++ b/embassy-stm32/src/ucpd.rs @@ -490,14 +490,14 @@ impl<'d, T: Instance> PdPhy<'d, T> { let sr = r.sr().read(); if sr.rxhrstdet() { - dma.request_suspend(); + dma.request_pause(); // Clean and re-enable hard reset receive interrupt. r.icr().write(|w| w.set_rxhrstdetcf(true)); r.imr().modify(|w| w.set_rxhrstdetie(true)); Poll::Ready(Err(RxError::HardReset)) } else if sr.rxmsgend() { - dma.request_suspend(); + dma.request_pause(); // Should be read immediately on interrupt. rxpaysz = r.rx_payszr().read().rxpaysz().into(); diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index 8a607a31a..5f4e87834 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs @@ -83,7 +83,7 @@ pub struct RingBufferedUartRx<'d> { kernel_clock: Hertz, rx: Option>, rts: Option>, - ring_buf: ReadableRingBuffer<'d, u8, 2>, + ring_buf: ReadableRingBuffer<'d, u8>, } impl<'d> SetConfig for RingBufferedUartRx<'d> { @@ -165,7 +165,7 @@ impl<'d> RingBufferedUartRx<'d> { /// Stop DMA backed UART receiver fn stop_uart(&mut self) { - self.ring_buf.request_suspend(); + self.ring_buf.request_pause(); let r = self.info.regs; // clear all interrupts and DMA Rx Request -- cgit From db7828538f43d4ebf39ca4291057bd67876bbfb3 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: fix: consolidate naming --- embassy-stm32/src/dma/gpdma/mod.rs | 20 ++++++++++---------- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 10 +++++----- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 074447148..7b317d4bb 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -359,7 +359,7 @@ impl AnyChannel { ch.cr().modify(|w| w.set_en(true)); } - fn request_suspend(&self) { + fn request_pause(&self) { let info = self.info(); let ch = info.dma.ch(info.num); @@ -377,7 +377,7 @@ impl AnyChannel { let info = self.info(); let ch = info.dma.ch(info.num); - self.request_suspend(); + self.request_pause(); while self.is_running() {} ch.cr().modify(|w| w.set_reset(true)); @@ -436,8 +436,8 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_suspend(&mut self) { - self.channel.request_suspend() + pub fn request_pause(&mut self) { + self.channel.request_pause() } /// Request the transfer to resume after being suspended. @@ -448,7 +448,7 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { /// Request the DMA to reset. /// /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_suspend`](Self::request_suspend) instead. + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. pub fn request_reset(&mut self) { self.channel.request_reset() } @@ -480,7 +480,7 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> { fn drop(&mut self) { - self.request_suspend(); + self.request_pause(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." @@ -634,8 +634,8 @@ impl<'a> Transfer<'a> { /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_suspend(&mut self) { - self.channel.request_suspend() + pub fn request_pause(&mut self) { + self.channel.request_pause() } /// Request the transfer to resume after being suspended. @@ -646,7 +646,7 @@ impl<'a> Transfer<'a> { /// Request the DMA to reset. /// /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_suspend`](Self::request_suspend) instead. + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. pub fn request_reset(&mut self) { self.channel.request_reset() } @@ -678,7 +678,7 @@ impl<'a> Transfer<'a> { impl<'a> Drop for Transfer<'a> { fn drop(&mut self) { - self.request_suspend(); + self.request_pause(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 2f17a0587..0f1c42a8b 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -137,7 +137,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_pause(&mut self) { - self.channel.request_suspend() + self.channel.request_pause() } /// Request the DMA to resume transfers after being suspended. @@ -148,7 +148,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// Request the DMA to reset. /// /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_suspend`](Self::request_suspend) instead. + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. pub fn request_reset(&mut self) { self.channel.request_reset() } @@ -280,7 +280,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_pause(&mut self) { - self.channel.request_suspend() + self.channel.request_pause() } /// Request the DMA to resume transfers after being suspended. @@ -291,7 +291,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Request the DMA to reset. /// /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_suspend`](Self::request_suspend) instead. + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. pub fn request_reset(&mut self) { self.channel.request_reset() } @@ -325,7 +325,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { fn drop(&mut self) { - self.request_suspend(); + self.request_pause(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." -- cgit From 1e627cab29297be5be81d1033ca3a87b92e0c746 Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: refactor: make dma implementations match in interface --- embassy-stm32/src/dma/dma_bdma.rs | 102 ++++++++++++++++------------ embassy-stm32/src/dma/gpdma/mod.rs | 17 ++--- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 11 ++- embassy-stm32/src/i2c/v2.rs | 2 +- 4 files changed, 70 insertions(+), 62 deletions(-) diff --git a/embassy-stm32/src/dma/dma_bdma.rs b/embassy-stm32/src/dma/dma_bdma.rs index 464823bfc..73ecab070 100644 --- a/embassy-stm32/src/dma/dma_bdma.rs +++ b/embassy-stm32/src/dma/dma_bdma.rs @@ -498,46 +498,52 @@ impl AnyChannel { } } - fn request_stop(&self) { + fn request_pause(&self) { let info = self.info(); match self.info().dma { #[cfg(dma)] DmaInfo::Dma(r) => { - // Disable the channel. Keep the IEs enabled so the irqs still fire. - r.st(info.num).cr().write(|w| { - w.set_teie(true); - w.set_tcie(true); + // Disable the channel without overwriting the existing configuration + r.st(info.num).cr().modify(|w| { + w.set_en(false); }); } #[cfg(bdma)] DmaInfo::Bdma(r) => { - // Disable the channel. Keep the IEs enabled so the irqs still fire. - r.ch(info.num).cr().write(|w| { - w.set_teie(true); - w.set_tcie(true); + // Disable the channel without overwriting the existing configuration + r.ch(info.num).cr().modify(|w| { + w.set_en(false); }); } } } - fn request_pause(&self) { + fn request_resume(&self) { + self.start() + } + + fn request_reset(&self) { let info = self.info(); match self.info().dma { #[cfg(dma)] DmaInfo::Dma(r) => { - // Disable the channel without overwriting the existing configuration - r.st(info.num).cr().modify(|w| { - w.set_en(false); + // Disable the channel. Keep the IEs enabled so the irqs still fire. + r.st(info.num).cr().write(|w| { + w.set_teie(true); + w.set_tcie(true); }); } #[cfg(bdma)] DmaInfo::Bdma(r) => { - // Disable the channel without overwriting the existing configuration - r.ch(info.num).cr().modify(|w| { - w.set_en(false); + // Disable the channel. Keep the IEs enabled so the irqs still fire. + r.ch(info.num).cr().write(|w| { + w.set_teie(true); + w.set_tcie(true); }); } } + + while self.is_running() {} } fn is_running(&self) -> bool { @@ -710,27 +716,31 @@ impl<'a> Transfer<'a> { Self { channel } } - /// Request the transfer to stop. - /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. - /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() - } - /// Request the transfer to pause, keeping the existing configuration for this channel. - /// To restart the transfer, call [`start`](Self::start) again. /// + /// To resume the transfer, call [`request_resume`](Self::request_resume) again. /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_pause(&mut self) { self.channel.request_pause() } + /// Request the transfer to resume after having been paused. + pub fn request_resume(&mut self) { + self.channel.request_resume() + } + + /// Request the DMA to reset. + /// + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + pub fn request_reset(&mut self) { + self.channel.request_reset() + } + /// Return whether this transfer is still running. /// /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). + /// it was requested to stop early with [`request_pause`](Self::request_pause). pub fn is_running(&mut self) -> bool { self.channel.is_running() } @@ -754,7 +764,7 @@ impl<'a> Transfer<'a> { impl<'a> Drop for Transfer<'a> { fn drop(&mut self) { - self.request_stop(); + self.request_reset(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." @@ -901,15 +911,6 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); } - /// Request the DMA to stop. - /// The configuration for this channel will **not be preserved**. If you need to restart the transfer - /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. - /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() - } - /// Request the transfer to pause, keeping the existing configuration for this channel. /// To restart the transfer, call [`start`](Self::start) again. /// @@ -918,10 +919,23 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { self.channel.request_pause() } + /// Request the transfer to resume after having been paused. + pub fn request_resume(&mut self) { + self.channel.request_resume() + } + + /// Request the DMA to reset. + /// + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + pub fn request_reset(&mut self) { + self.channel.request_reset() + } + /// Return whether DMA is still running. /// /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). + /// it was requested to stop early with [`request_reset`](Self::request_reset). pub fn is_running(&mut self) -> bool { self.channel.is_running() } @@ -934,7 +948,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// This is designed to be used with streaming input data such as the /// I2S/SAI or ADC. /// - /// When using the UART, you probably want `request_stop()`. + /// When using the UART, you probably want `request_reset()`. pub async fn stop(&mut self) { self.channel.disable_circular_mode(); //wait until cr.susp reads as true @@ -948,7 +962,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { fn drop(&mut self) { - self.request_stop(); + self.request_reset(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." @@ -1058,8 +1072,8 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - self.channel.request_stop() + pub fn request_reset(&mut self) { + self.channel.request_reset() } /// Request the transfer to pause, keeping the existing configuration for this channel. @@ -1073,7 +1087,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { /// Return whether DMA is still running. /// /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). + /// it was requested to stop early with [`request_reset`](Self::request_reset). pub fn is_running(&mut self) -> bool { self.channel.is_running() } @@ -1098,7 +1112,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { fn drop(&mut self) { - self.request_stop(); + self.request_reset(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs index 7b317d4bb..4a14c2a8e 100644 --- a/embassy-stm32/src/dma/gpdma/mod.rs +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -431,16 +431,15 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { Self { channel } } - /// Request the transfer to suspend. + /// Request the transfer to pause, keeping the existing configuration for this channel. /// /// To resume the transfer, call [`request_resume`](Self::request_resume) again. - /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_pause(&mut self) { self.channel.request_pause() } - /// Request the transfer to resume after being suspended. + /// Request the transfer to resume after having been paused. pub fn request_resume(&mut self) { self.channel.request_resume() } @@ -456,7 +455,7 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { /// Return whether this transfer is still running. /// /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). + /// it was requested to stop early with [`request_pause`](Self::request_pause). pub fn is_running(&mut self) -> bool { self.channel.is_running() } @@ -480,8 +479,7 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> { fn drop(&mut self) { - self.request_pause(); - while self.is_running() {} + self.request_reset(); // "Subsequent reads and writes cannot be moved ahead of preceding reads." fence(Ordering::SeqCst); @@ -629,9 +627,8 @@ impl<'a> Transfer<'a> { Self { channel } } - /// Request the transfer to suspend. - /// - /// To resume the transfer, call [`request_resume`](Self::request_resume) again. + /// Request the transfer to pause, keeping the existing configuration for this channel. + /// To restart the transfer, call [`start`](Self::start) again. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_pause(&mut self) { @@ -654,7 +651,7 @@ impl<'a> Transfer<'a> { /// Return whether this transfer is still running. /// /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). + /// it was requested to stop early with [`request_pause`](Self::request_pause). pub fn is_running(&mut self) -> bool { self.channel.is_running() } diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 0f1c42a8b..55486d5cc 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -131,16 +131,15 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); } - /// Request the DMA to suspend. + /// Request the transfer to pause, keeping the existing configuration for this channel. /// /// To resume the transfer, call [`request_resume`](Self::request_resume) again. - /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_pause(&mut self) { self.channel.request_pause() } - /// Request the DMA to resume transfers after being suspended. + /// Request the transfer to resume after having been paused. pub fn request_resume(&mut self) { self.channel.request_resume() } @@ -153,10 +152,10 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { self.channel.request_reset() } - /// Return whether DMA is still running. + /// Return whether this transfer is still running. /// /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). + /// it was requested to stop early with [`request_pause`](Self::request_pause). pub fn is_running(&mut self) -> bool { self.channel.is_running() } @@ -168,8 +167,6 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { /// /// This is designed to be used with streaming input data such as the /// I2S/SAI or ADC. - /// - /// When using the UART, you probably want `request_stop()`. pub async fn stop(&mut self) { // wait until cr.susp reads as true poll_fn(|cx| { diff --git a/embassy-stm32/src/i2c/v2.rs b/embassy-stm32/src/i2c/v2.rs index 6b20a601b..0bfc795ac 100644 --- a/embassy-stm32/src/i2c/v2.rs +++ b/embassy-stm32/src/i2c/v2.rs @@ -1283,7 +1283,7 @@ impl<'d> I2c<'d, Async, MultiMaster> { } else if isr.stopf() { self.info.regs.icr().write(|reg| reg.set_stopcf(true)); if remaining_len > 0 { - dma_transfer.request_stop(); + dma_transfer.request_pause(); Poll::Ready(Ok(SendStatus::LeftoverBytes(remaining_len as usize))) } else { Poll::Ready(Ok(SendStatus::Done)) -- cgit From 40eb5576824d45dfbe2a0609e69743a230475253 Mon Sep 17 00:00:00 2001 From: Adrian Figueroa Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: SAI example --- examples/stm32h5/src/bin/sai.rs | 53 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 examples/stm32h5/src/bin/sai.rs diff --git a/examples/stm32h5/src/bin/sai.rs b/examples/stm32h5/src/bin/sai.rs new file mode 100644 index 000000000..086a847f7 --- /dev/null +++ b/examples/stm32h5/src/bin/sai.rs @@ -0,0 +1,53 @@ +#![no_std] +#![no_main] + +use defmt::info; +use embassy_executor::Spawner; + +use embassy_stm32::{sai, Config}; +use {defmt_rtt as _, panic_probe as _}; + +#[embassy_executor::main] +async fn main(_spawner: Spawner) { + info!("Hello world."); + + let mut config = Config::default(); + { + use embassy_stm32::rcc::*; + + config.rcc.pll2 = Some(Pll { + source: PllSource::HSI, + prediv: PllPreDiv::DIV16, + mul: PllMul::MUL32, + divp: Some(PllDiv::DIV16), // 8 MHz SAI clock + divq: None, + divr: None, + }); + + config.rcc.mux.sai1sel = mux::Saisel::PLL2_P; + } + let p = embassy_stm32::init(config); + + let mut write_buffer = [0u16; 1024]; + let (_, sai_b) = sai::split_subblocks(p.SAI1); + + let mut sai_b = sai::Sai::new_asynchronous( + sai_b, + p.PF8, + p.PE3, + p.PF9, + p.GPDMA1_CH0, + &mut write_buffer, + Default::default(), + ); + + // Populate arbitrary data. + let mut data = [0u16; 256]; + for (index, sample) in data.iter_mut().enumerate() { + *sample = index as u16; + } + + loop { + sai_b.write(&data).await.unwrap(); + } +} -- cgit From 1e54841632fc12399f9e54f54a6190c6a4b6eb7e Mon Sep 17 00:00:00 2001 From: Adrian Figueroa Date: Mon, 25 Aug 2025 21:14:16 +0200 Subject: chore: add changelog entry --- embassy-stm32/CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/embassy-stm32/CHANGELOG.md b/embassy-stm32/CHANGELOG.md index 4cc48ed97..8ed4dbd65 100644 --- a/embassy-stm32/CHANGELOG.md +++ b/embassy-stm32/CHANGELOG.md @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - feat: stm32/adc/v3: allow DMA reads to loop through enable channels - fix: Fix XSPI not disabling alternate bytes when they were previously enabled - fix: Fix stm32h7rs init when using external flash via XSPI +- feat: Add GPDMA linked-list + ringbuffer support ([#3923](https://github.com/embassy-rs/embassy/pull/3923)) ## 0.3.0 - 2025-08-12 @@ -135,7 +136,7 @@ GPIO: - Refactor AfType ([#3031](https://github.com/embassy-rs/embassy/pull/3031)) - Gpiov1: Do not call set_speed for AFType::Input ([#2996](https://github.com/embassy-rs/embassy/pull/2996)) -UART: +UART: - Add embedded-io impls ([#2739](https://github.com/embassy-rs/embassy/pull/2739)) - Add support for changing baud rate ([#3512](https://github.com/embassy-rs/embassy/pull/3512)) - Add split_ref ([#3500](https://github.com/embassy-rs/embassy/pull/3500)) @@ -159,7 +160,7 @@ UART: - Wake receive task for each received byte ([#2722](https://github.com/embassy-rs/embassy/pull/2722)) - Fix dma and idle line detection in ringbuffereduartrx ([#3319](https://github.com/embassy-rs/embassy/pull/3319)) -SPI: +SPI: - Add MISO pullup configuration option ([#2943](https://github.com/embassy-rs/embassy/pull/2943)) - Add slew rate configuration options ([#3669](https://github.com/embassy-rs/embassy/pull/3669)) - Fix blocking_write on nosck spi. ([#3035](https://github.com/embassy-rs/embassy/pull/3035)) -- cgit From c487034dc707282497f4ff2450493a07f76d3027 Mon Sep 17 00:00:00 2001 From: Adrian Figueroa Date: Mon, 25 Aug 2025 21:21:54 +0200 Subject: style: formatting --- examples/stm32h5/src/bin/sai.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/stm32h5/src/bin/sai.rs b/examples/stm32h5/src/bin/sai.rs index 086a847f7..0e182f9cf 100644 --- a/examples/stm32h5/src/bin/sai.rs +++ b/examples/stm32h5/src/bin/sai.rs @@ -3,7 +3,6 @@ use defmt::info; use embassy_executor::Spawner; - use embassy_stm32::{sai, Config}; use {defmt_rtt as _, panic_probe as _}; -- cgit From 2e2562d8dc38844b9907e282f6c098e6ac2fd096 Mon Sep 17 00:00:00 2001 From: Adrian Figueroa Date: Tue, 26 Aug 2025 23:11:18 +0200 Subject: fix: ping-pong helper DMA direction --- embassy-stm32/src/dma/gpdma/linked_list.rs | 22 +++++++++++++++++----- embassy-stm32/src/dma/gpdma/ringbuffered.rs | 7 ++++--- embassy-stm32/src/dma/mod.rs | 5 ++++- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs index 627da0055..f7c1fbbed 100644 --- a/embassy-stm32/src/dma/gpdma/linked_list.rs +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -166,15 +166,27 @@ impl Table { /// Create a ping-pong linked-list table. /// /// This uses two linked-list items, one for each half of the buffer. - pub unsafe fn new_ping_pong(request: Request, peri_addr: *mut W, buffer: &mut [W]) -> Table<2> { + pub unsafe fn new_ping_pong( + request: Request, + peri_addr: *mut W, + buffer: &mut [W], + direction: Dir, + ) -> Table<2> { // Buffer halves should be the same length. let half_len = buffer.len() / 2; assert_eq!(half_len * 2, buffer.len()); - let items = [ - LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]), - LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]), - ]; + let items = match direction { + Dir::MemoryToPeripheral => [ + LinearItem::new_write(request, &mut buffer[..half_len], peri_addr), + LinearItem::new_write(request, &mut buffer[half_len..], peri_addr), + ], + Dir::PeripheralToMemory => [ + LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]), + LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]), + ], + }; + Table::new(items) } diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs index 55486d5cc..9ee52193b 100644 --- a/embassy-stm32/src/dma/gpdma/ringbuffered.rs +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs @@ -12,7 +12,7 @@ use super::{AnyChannel, TransferOptions, STATE}; use crate::dma::gpdma::linked_list::{RunMode, Table}; use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}; use crate::dma::word::Word; -use crate::dma::{Channel, Request}; +use crate::dma::{Channel, Dir, Request}; struct DmaCtrlImpl<'a>(Peri<'a, AnyChannel>); @@ -67,7 +67,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { options: TransferOptions, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); - let table = Table::<2>::new_ping_pong::(request, peri_addr, buffer); + let table = Table::<2>::new_ping_pong::(request, peri_addr, buffer, Dir::PeripheralToMemory); Self { channel, @@ -207,7 +207,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { options: TransferOptions, ) -> Self { let channel: Peri<'a, AnyChannel> = channel.into(); - let table = Table::<2>::new_ping_pong::(request, peri_addr, buffer); + let table = Table::<2>::new_ping_pong::(request, peri_addr, buffer, Dir::MemoryToPeripheral); Self { channel, @@ -222,6 +222,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { // Apply the default configuration to the channel. unsafe { self.channel.configure_linked_list(&self.table, self.options) }; self.table.link(RunMode::Circular); + self.channel.start(); } diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs index 030f906d2..5989bfd7c 100644 --- a/embassy-stm32/src/dma/mod.rs +++ b/embassy-stm32/src/dma/mod.rs @@ -28,10 +28,13 @@ use embassy_hal_internal::{impl_peripheral, PeripheralType}; use crate::interrupt; +/// The direction of a DMA transfer. #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] -enum Dir { +pub enum Dir { + /// Transfer from memory to a peripheral. MemoryToPeripheral, + /// Transfer from a peripheral to memory. PeripheralToMemory, } -- cgit