From 3c3b43fb00355a5db64a34416dc2f19042a3fc5a Mon Sep 17 00:00:00 2001 From: elagil Date: Mon, 25 Aug 2025 21:10:59 +0200 Subject: feat: GPDAM linked-list + ringbuffer support --- embassy-stm32/src/dma/gpdma.rs | 339 ----------------- embassy-stm32/src/dma/gpdma/linked_list.rs | 236 ++++++++++++ embassy-stm32/src/dma/gpdma/mod.rs | 572 +++++++++++++++++++++++++++++ embassy-stm32/src/dma/gpdma/ringbuffer.rs | 283 ++++++++++++++ embassy-stm32/src/dma/ringbuffer/mod.rs | 2 - embassy-stm32/src/sai/mod.rs | 9 - 6 files changed, 1091 insertions(+), 350 deletions(-) delete mode 100644 embassy-stm32/src/dma/gpdma.rs create mode 100644 embassy-stm32/src/dma/gpdma/linked_list.rs create mode 100644 embassy-stm32/src/dma/gpdma/mod.rs create mode 100644 embassy-stm32/src/dma/gpdma/ringbuffer.rs diff --git a/embassy-stm32/src/dma/gpdma.rs b/embassy-stm32/src/dma/gpdma.rs deleted file mode 100644 index 151e4ab9f..000000000 --- a/embassy-stm32/src/dma/gpdma.rs +++ /dev/null @@ -1,339 +0,0 @@ -#![macro_use] - -use core::future::Future; -use core::pin::Pin; -use core::sync::atomic::{fence, Ordering}; -use core::task::{Context, Poll}; - -use embassy_hal_internal::Peri; -use embassy_sync::waitqueue::AtomicWaker; - -use super::word::{Word, WordSize}; -use super::{AnyChannel, Channel, Dir, Request, STATE}; -use crate::interrupt::typelevel::Interrupt; -use crate::interrupt::Priority; -use crate::pac; -use crate::pac::gpdma::vals; - -pub(crate) struct ChannelInfo { - pub(crate) dma: pac::gpdma::Gpdma, - pub(crate) num: usize, - #[cfg(feature = "_dual-core")] - pub(crate) irq: pac::Interrupt, -} - -/// GPDMA transfer options. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -#[non_exhaustive] -pub struct TransferOptions {} - -impl Default for TransferOptions { - fn default() -> Self { - Self {} - } -} - -impl From for vals::Dw { - fn from(raw: WordSize) -> Self { - match raw { - WordSize::OneByte => Self::BYTE, - WordSize::TwoBytes => Self::HALF_WORD, - WordSize::FourBytes => Self::WORD, - } - } -} - -pub(crate) struct ChannelState { - waker: AtomicWaker, -} - -impl ChannelState { - pub(crate) const NEW: Self = Self { - waker: AtomicWaker::new(), - }; -} - -/// safety: must be called only once -pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { - foreach_interrupt! { - ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { - crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); - #[cfg(not(feature = "_dual-core"))] - crate::interrupt::typelevel::$irq::enable(); - }; - } - crate::_generated::init_gpdma(); -} - -impl AnyChannel { - /// Safety: Must be called with a matching set of parameters for a valid dma channel - pub(crate) unsafe fn on_irq(&self) { - let info = self.info(); - #[cfg(feature = "_dual-core")] - { - use embassy_hal_internal::interrupt::InterruptExt as _; - info.irq.enable(); - } - - let state = &STATE[self.id as usize]; - - let ch = info.dma.ch(info.num); - let sr = ch.sr().read(); - - if sr.dtef() { - panic!( - "DMA: data transfer error on DMA@{:08x} channel {}", - info.dma.as_ptr() as u32, - info.num - ); - } - if sr.usef() { - panic!( - "DMA: user settings error on DMA@{:08x} channel {}", - info.dma.as_ptr() as u32, - info.num - ); - } - - if sr.suspf() || sr.tcf() { - // disable all xxIEs to prevent the irq from firing again. - ch.cr().write(|_| {}); - - // Wake the future. It'll look at tcf and see it's set. - state.waker.wake(); - } - } -} - -/// DMA transfer. -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Transfer<'a> { - channel: Peri<'a, AnyChannel>, -} - -impl<'a> Transfer<'a> { - /// Create a new read DMA transfer (peripheral to memory). - pub unsafe fn new_read( - channel: Peri<'a, impl Channel>, - request: Request, - peri_addr: *mut W, - buf: &'a mut [W], - options: TransferOptions, - ) -> Self { - Self::new_read_raw(channel, request, peri_addr, buf, options) - } - - /// Create a new read DMA transfer (peripheral to memory), using raw pointers. - pub unsafe fn new_read_raw( - channel: Peri<'a, impl Channel>, - request: Request, - peri_addr: *mut PW, - buf: *mut [MW], - options: TransferOptions, - ) -> Self { - Self::new_inner( - channel.into(), - request, - Dir::PeripheralToMemory, - peri_addr as *const u32, - buf as *mut MW as *mut u32, - buf.len(), - true, - PW::size(), - MW::size(), - options, - ) - } - - /// Create a new write DMA transfer (memory to peripheral). - pub unsafe fn new_write( - channel: Peri<'a, impl Channel>, - request: Request, - buf: &'a [MW], - peri_addr: *mut PW, - options: TransferOptions, - ) -> Self { - Self::new_write_raw(channel, request, buf, peri_addr, options) - } - - /// Create a new write DMA transfer (memory to peripheral), using raw pointers. - pub unsafe fn new_write_raw( - channel: Peri<'a, impl Channel>, - request: Request, - buf: *const [MW], - peri_addr: *mut PW, - options: TransferOptions, - ) -> Self { - Self::new_inner( - channel.into(), - request, - Dir::MemoryToPeripheral, - peri_addr as *const u32, - buf as *const MW as *mut u32, - buf.len(), - true, - MW::size(), - PW::size(), - options, - ) - } - - /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly. - pub unsafe fn new_write_repeated( - channel: Peri<'a, impl Channel>, - request: Request, - repeated: &'a MW, - count: usize, - peri_addr: *mut PW, - options: TransferOptions, - ) -> Self { - Self::new_inner( - channel.into(), - request, - Dir::MemoryToPeripheral, - peri_addr as *const u32, - repeated as *const MW as *mut u32, - count, - false, - MW::size(), - PW::size(), - options, - ) - } - - unsafe fn new_inner( - channel: Peri<'a, AnyChannel>, - request: Request, - dir: Dir, - peri_addr: *const u32, - mem_addr: *mut u32, - mem_len: usize, - incr_mem: bool, - data_size: WordSize, - dst_size: WordSize, - _options: TransferOptions, - ) -> Self { - // BNDT is specified as bytes, not as number of transfers. - let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { - panic!("DMA transfers may not be larger than 65535 bytes."); - }; - - let info = channel.info(); - let ch = info.dma.ch(info.num); - - // "Preceding reads and writes cannot be moved past subsequent writes." - fence(Ordering::SeqCst); - - let this = Self { channel }; - - ch.cr().write(|w| w.set_reset(true)); - ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs - ch.llr().write(|_| {}); // no linked list - ch.tr1().write(|w| { - w.set_sdw(data_size.into()); - w.set_ddw(dst_size.into()); - w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); - w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); - }); - ch.tr2().write(|w| { - w.set_dreq(match dir { - Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL, - Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL, - }); - w.set_reqsel(request); - }); - ch.tr3().write(|_| {}); // no address offsets. - ch.br1().write(|w| w.set_bndt(bndt)); - - match dir { - Dir::MemoryToPeripheral => { - ch.sar().write_value(mem_addr as _); - ch.dar().write_value(peri_addr as _); - } - Dir::PeripheralToMemory => { - ch.sar().write_value(peri_addr as _); - ch.dar().write_value(mem_addr as _); - } - } - - ch.cr().write(|w| { - // Enable interrupts - w.set_tcie(true); - w.set_useie(true); - w.set_dteie(true); - w.set_suspie(true); - - // Start it - w.set_en(true); - }); - - this - } - - /// Request the transfer to stop. - /// - /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. - pub fn request_stop(&mut self) { - let info = self.channel.info(); - let ch = info.dma.ch(info.num); - - ch.cr().modify(|w| w.set_susp(true)) - } - - /// Return whether this transfer is still running. - /// - /// If this returns `false`, it can be because either the transfer finished, or - /// it was requested to stop early with [`request_stop`](Self::request_stop). - pub fn is_running(&mut self) -> bool { - let info = self.channel.info(); - let ch = info.dma.ch(info.num); - - let sr = ch.sr().read(); - !sr.tcf() && !sr.suspf() - } - - /// Gets the total remaining transfers for the channel - /// Note: this will be zero for transfers that completed without cancellation. - pub fn get_remaining_transfers(&self) -> u16 { - let info = self.channel.info(); - let ch = info.dma.ch(info.num); - - ch.br1().read().bndt() - } - - /// Blocking wait until the transfer finishes. - pub fn blocking_wait(mut self) { - while self.is_running() {} - - // "Subsequent reads and writes cannot be moved ahead of preceding reads." - fence(Ordering::SeqCst); - - core::mem::forget(self); - } -} - -impl<'a> Drop for Transfer<'a> { - fn drop(&mut self) { - self.request_stop(); - while self.is_running() {} - - // "Subsequent reads and writes cannot be moved ahead of preceding reads." - fence(Ordering::SeqCst); - } -} - -impl<'a> Unpin for Transfer<'a> {} -impl<'a> Future for Transfer<'a> { - type Output = (); - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let state = &STATE[self.channel.id as usize]; - state.waker.register(cx.waker()); - - if self.is_running() { - Poll::Pending - } else { - Poll::Ready(()) - } - } -} diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs new file mode 100644 index 000000000..b24b2e7eb --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs @@ -0,0 +1,236 @@ +//! Implementation of the GPDMA linked list and linked list items. +#![macro_use] + +use stm32_metapac::gpdma::{regs, vals::Dreq}; + +use super::TransferOptions; +use crate::dma::{ + word::{Word, WordSize}, + Dir, Request, +}; +use core::{ + ptr, + sync::atomic::{AtomicUsize, Ordering}, +}; + +/// The mode in which to run the linked list. +#[derive(Debug)] +pub enum RunMode { + /// List items are not linked together. + Unlinked, + /// The list is linked sequentially and only run once. + Once, + /// The list is linked sequentially, and the end of the list is linked to the beginning. + Repeat, +} + +/// A linked-list item for linear GPDMA transfers. +/// +/// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities. +#[derive(Debug, Copy, Clone, Default)] +#[repr(C)] +pub struct LinearItem { + /// Transfer register 1. + pub tr1: u32, + /// Transfer register 2. + pub tr2: u32, + /// Block register 2. + pub br1: u32, + /// Source address register. + pub sar: u32, + /// Destination address register. + pub dar: u32, + /// Linked-list address register. + pub llr: u32, +} + +impl LinearItem { + /// Create a new read DMA transfer (peripheral to memory). + pub unsafe fn new_read<'d, W: Word>( + request: Request, + peri_addr: *mut W, + buf: &'d mut [W], + options: TransferOptions, + ) -> Self { + Self::new_inner( + request, + Dir::PeripheralToMemory, + peri_addr as *const u32, + buf as *mut [W] as *mut W as *mut u32, + buf.len(), + true, + W::size(), + W::size(), + options, + ) + } + + /// Create a new write DMA transfer (memory to peripheral). + pub unsafe fn new_write<'d, MW: Word, PW: Word>( + request: Request, + buf: &'d [MW], + peri_addr: *mut PW, + options: TransferOptions, + ) -> Self { + Self::new_inner( + request, + Dir::MemoryToPeripheral, + peri_addr as *const u32, + buf as *const [MW] as *const MW as *mut u32, + buf.len(), + true, + MW::size(), + PW::size(), + options, + ) + } + + unsafe fn new_inner( + request: Request, + dir: Dir, + peri_addr: *const u32, + mem_addr: *mut u32, + mem_len: usize, + incr_mem: bool, + data_size: WordSize, + dst_size: WordSize, + _options: TransferOptions, + ) -> Self { + // BNDT is specified as bytes, not as number of transfers. + let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { + panic!("DMA transfers may not be larger than 65535 bytes."); + }; + + let mut br1 = regs::ChBr1(0); + br1.set_bndt(bndt); + + let mut tr1 = regs::ChTr1(0); + tr1.set_sdw(data_size.into()); + tr1.set_ddw(dst_size.into()); + tr1.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); + tr1.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); + + let mut tr2 = regs::ChTr2(0); + tr2.set_dreq(match dir { + Dir::MemoryToPeripheral => Dreq::DESTINATION_PERIPHERAL, + Dir::PeripheralToMemory => Dreq::SOURCE_PERIPHERAL, + }); + tr2.set_reqsel(request); + + let (sar, dar) = match dir { + Dir::MemoryToPeripheral => (mem_addr as _, peri_addr as _), + Dir::PeripheralToMemory => (peri_addr as _, mem_addr as _), + }; + + let llr = regs::ChLlr(0); + + Self { + tr1: tr1.0, + tr2: tr2.0, + br1: br1.0, + sar, + dar, + llr: llr.0, + } + } + + /// Link to the next linear item at the given address. + /// + /// Enables channel update bits. + fn link_to(&mut self, next: u16) { + let mut llr = regs::ChLlr(0); + + llr.set_ut1(true); + llr.set_ut2(true); + llr.set_ub1(true); + llr.set_usa(true); + llr.set_uda(true); + llr.set_ull(true); + llr.set_la(next); + + self.llr = llr.0; + } + + /// Unlink the next linear item. + /// + /// Disables channel update bits. + fn unlink(&mut self) { + self.llr = regs::ChLlr(0).0; + } +} + +pub struct Table { + current_index: AtomicUsize, + items: [LinearItem; ITEM_COUNT], +} + +impl Table { + /// Create a new table. + pub fn new(items: [LinearItem; ITEM_COUNT], run_mode: RunMode) -> Self { + assert!(!items.is_empty()); + + let mut this = Self { + current_index: AtomicUsize::new(0), + items, + }; + + if matches!(run_mode, RunMode::Once | RunMode::Repeat) { + this.link_sequential(); + } + + if matches!(run_mode, RunMode::Repeat) { + this.link_repeat(); + } + + this + } + + pub fn len(&self) -> usize { + self.items.len() + } + + /// Items are linked together sequentially. + pub fn link_sequential(&mut self) { + if self.items.len() > 1 { + for index in 0..(self.items.len() - 1) { + let next = ptr::addr_of!(self.items[index + 1]) as u16; + self.items[index].link_to(next); + } + } + } + + /// Last item links to first item. + pub fn link_repeat(&mut self) { + let first_item = self.items.first().unwrap(); + let first_address = ptr::addr_of!(first_item) as u16; + self.items.last_mut().unwrap().link_to(first_address); + } + + /// The index of the next item. + pub fn next_index(&self) -> usize { + let mut next_index = self.current_index.load(Ordering::Relaxed) + 1; + if next_index >= self.len() { + next_index = 0; + } + + next_index + } + + /// Unlink the next item. + pub fn unlink_next(&mut self) { + let next_index = self.next_index(); + self.items[next_index].unlink(); + } + + /// Linked list base address (upper 16 address bits). + pub fn base_address(&self) -> u16 { + ((ptr::addr_of!(self.items) as u32) >> 16) as _ + } + + /// Linked list offset address (lower 16 address bits) at the selected index. + pub fn offset_address(&self, index: usize) -> u16 { + assert!(self.items.len() > index); + + (ptr::addr_of!(self.items[index]) as u32) as _ + } +} diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs new file mode 100644 index 000000000..07acd2cf0 --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/mod.rs @@ -0,0 +1,572 @@ +#![macro_use] + +use core::future::Future; +use core::pin::Pin; +use core::sync::atomic::{fence, AtomicUsize, Ordering}; +use core::task::{Context, Poll}; + +use embassy_hal_internal::Peri; +use embassy_sync::waitqueue::AtomicWaker; +use linked_list::Table; +use stm32_metapac::gpdma::regs; + +use super::word::{Word, WordSize}; +use super::{AnyChannel, Channel, Dir, Request, STATE}; +use crate::interrupt::typelevel::Interrupt; +use crate::pac; +use crate::pac::gpdma::vals; + +mod linked_list; +mod ringbuffer; + +pub(crate) struct ChannelInfo { + pub(crate) dma: pac::gpdma::Gpdma, + pub(crate) num: usize, + #[cfg(feature = "_dual-core")] + pub(crate) irq: pac::Interrupt, +} + +/// DMA request priority +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Priority { + /// Low Priority + Low, + /// Medium Priority + Medium, + /// High Priority + High, + /// Very High Priority + VeryHigh, +} + +impl From for pac::gpdma::vals::Prio { + fn from(value: Priority) -> Self { + match value { + Priority::Low => pac::gpdma::vals::Prio::LOW_WITH_LOWH_WEIGHT, + Priority::Medium => pac::gpdma::vals::Prio::LOW_WITH_MID_WEIGHT, + Priority::High => pac::gpdma::vals::Prio::LOW_WITH_HIGH_WEIGHT, + Priority::VeryHigh => pac::gpdma::vals::Prio::HIGH, + } + } +} + +/// GPDMA transfer options. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +#[non_exhaustive] +pub struct TransferOptions { + priority: Priority, + half_transfer_ir: bool, + complete_transfer_ir: bool, +} + +impl Default for TransferOptions { + fn default() -> Self { + Self { + priority: Priority::VeryHigh, + half_transfer_ir: false, + complete_transfer_ir: true, + } + } +} + +impl From for vals::Dw { + fn from(raw: WordSize) -> Self { + match raw { + WordSize::OneByte => Self::BYTE, + WordSize::TwoBytes => Self::HALF_WORD, + WordSize::FourBytes => Self::WORD, + } + } +} + +pub(crate) struct ChannelState { + waker: AtomicWaker, + complete_count: AtomicUsize, +} + +impl ChannelState { + pub(crate) const NEW: Self = Self { + waker: AtomicWaker::new(), + complete_count: AtomicUsize::new(0), + }; +} + +/// safety: must be called only once +pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { + foreach_interrupt! { + ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { + crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); + #[cfg(not(feature = "_dual-core"))] + crate::interrupt::typelevel::$irq::enable(); + }; + } + crate::_generated::init_gpdma(); +} + +impl AnyChannel { + /// Safety: Must be called with a matching set of parameters for a valid dma channel + pub(crate) unsafe fn on_irq(&self) { + let info = self.info(); + #[cfg(feature = "_dual-core")] + { + use embassy_hal_internal::interrupt::InterruptExt as _; + info.irq.enable(); + } + + let state = &STATE[self.id as usize]; + + let ch = info.dma.ch(info.num); + let sr = ch.sr().read(); + + if sr.dtef() { + panic!( + "DMA: data transfer error on DMA@{:08x} channel {}", + info.dma.as_ptr() as u32, + info.num + ); + } + if sr.usef() { + panic!( + "DMA: user settings error on DMA@{:08x} channel {}", + info.dma.as_ptr() as u32, + info.num + ); + } + if sr.ulef() { + panic!( + "DMA: link transfer error on DMA@{:08x} channel {}", + info.dma.as_ptr() as u32, + info.num + ); + } + + if sr.tcf() { + state.complete_count.fetch_add(1, Ordering::Release); + } + + if sr.suspf() || sr.tcf() { + // disable all xxIEs to prevent the irq from firing again. + ch.cr().write(|_| {}); + + // Wake the future. It'll look at tcf and see it's set. + state.waker.wake(); + } + } + + fn get_remaining_transfers(&self) -> u16 { + let info = self.info(); + let ch = info.dma.ch(info.num); + + ch.br1().read().bndt() + } + + unsafe fn configure( + &self, + request: Request, + dir: Dir, + peri_addr: *const u32, + mem_addr: *mut u32, + mem_len: usize, + incr_mem: bool, + data_size: WordSize, + dst_size: WordSize, + options: TransferOptions, + ) { + // BNDT is specified as bytes, not as number of transfers. + let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { + panic!("DMA transfers may not be larger than 65535 bytes."); + }; + + let info = self.info(); + let ch = info.dma.ch(info.num); + + // "Preceding reads and writes cannot be moved past subsequent writes." + fence(Ordering::SeqCst); + + ch.cr().write(|w| w.set_reset(true)); + ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs + ch.llr().write(|_| {}); // no linked list + ch.tr1().write(|w| { + w.set_sdw(data_size.into()); + w.set_ddw(dst_size.into()); + w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); + w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); + }); + ch.tr2().write(|w| { + w.set_dreq(match dir { + Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL, + Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL, + }); + w.set_reqsel(request); + }); + ch.tr3().write(|_| {}); // no address offsets. + ch.br1().write(|w| w.set_bndt(bndt)); + + match dir { + Dir::MemoryToPeripheral => { + ch.sar().write_value(mem_addr as _); + ch.dar().write_value(peri_addr as _); + } + Dir::PeripheralToMemory => { + ch.sar().write_value(peri_addr as _); + ch.dar().write_value(mem_addr as _); + } + } + + ch.cr().write(|w| { + w.set_prio(options.priority.into()); + w.set_htie(options.half_transfer_ir); + w.set_tcie(options.complete_transfer_ir); + w.set_useie(true); + w.set_dteie(true); + w.set_suspie(true); + }); + } + + unsafe fn configure_linked_list( + &self, + table: &Table, + options: TransferOptions, + ) { + let info = self.info(); + let ch = info.dma.ch(info.num); + + // "Preceding reads and writes cannot be moved past subsequent writes." + fence(Ordering::SeqCst); + + ch.cr().write(|w| w.set_reset(true)); + ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs + + ch.lbar().write(|reg| reg.set_lba(table.base_address())); + + // Enable all linked-list field updates. + let mut llr = regs::ChLlr(0); + llr.set_ut1(true); + llr.set_ut2(true); + llr.set_ub1(true); + llr.set_usa(true); + llr.set_uda(true); + llr.set_ull(true); + + llr.set_la(table.offset_address(0)); + + ch.llr().write(|_| llr.0); + + ch.tr3().write(|_| {}); // no address offsets. + + ch.cr().write(|w| { + w.set_prio(options.priority.into()); + w.set_htie(options.half_transfer_ir); + w.set_tcie(options.complete_transfer_ir); + w.set_useie(true); + w.set_uleie(true); + w.set_dteie(true); + w.set_suspie(true); + }); + } + + fn start(&self) { + let info = self.info(); + let ch = info.dma.ch(info.num); + + ch.cr().modify(|w| w.set_en(true)); + } + + fn request_stop(&self) { + let info = self.info(); + let ch = info.dma.ch(info.num); + + ch.cr().modify(|w| w.set_susp(true)) + } + + fn is_running(&self) -> bool { + let info = self.info(); + let ch = info.dma.ch(info.num); + + let sr = ch.sr().read(); + !sr.tcf() && !sr.suspf() + } + + fn poll_stop(&self) -> Poll<()> { + use core::sync::atomic::compiler_fence; + compiler_fence(Ordering::SeqCst); + + if !self.is_running() { + Poll::Ready(()) + } else { + Poll::Pending + } + } +} + +/// Linked-list DMA transfer. +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> { + channel: PeripheralRef<'a, AnyChannel>, + table: Table, +} + +impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { + /// Create a new linked-list transfer. + pub unsafe fn new_linked_list( + channel: impl Peripheral

+ 'a, + table: Table, + options: TransferOptions, + ) -> Self { + into_ref!(channel); + + Self::new_inner_linked_list(channel.map_into(), table, options) + } + + unsafe fn new_inner_linked_list( + channel: PeripheralRef<'a, AnyChannel>, + table: Table, + options: TransferOptions, + ) -> Self { + channel.configure_linked_list(&table, options); + channel.start(); + + Self { channel, table } + } + + /// Request the transfer to stop. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Return whether this transfer is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } + + /// Gets the total remaining transfers for the channel + /// Note: this will be zero for transfers that completed without cancellation. + pub fn get_remaining_transfers(&self) -> u16 { + self.channel.get_remaining_transfers() + } + + /// Blocking wait until the transfer finishes. + pub fn blocking_wait(mut self) { + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + + core::mem::forget(self); + } +} + +impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} + +impl<'a, const ITEM_COUNT: usize> Unpin for LinkedListTransfer<'a, ITEM_COUNT> {} +impl<'a, const ITEM_COUNT: usize> Future for LinkedListTransfer<'a, ITEM_COUNT> { + type Output = (); + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let state = &STATE[self.channel.id as usize]; + state.waker.register(cx.waker()); + + if self.is_running() { + Poll::Pending + } else { + Poll::Ready(()) + } + } +} + +/// DMA transfer. +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct Transfer<'a> { + channel: Peri<'a, AnyChannel>, +} + +impl<'a> Transfer<'a> { + /// Create a new read DMA transfer (peripheral to memory). + pub unsafe fn new_read( + channel: Peri<'a, impl Channel>, + request: Request, + peri_addr: *mut W, + buf: &'a mut [W], + options: TransferOptions, + ) -> Self { + Self::new_read_raw(channel, request, peri_addr, buf, options) + } + + /// Create a new read DMA transfer (peripheral to memory), using raw pointers. + pub unsafe fn new_read_raw( + channel: Peri<'a, impl Channel>, + request: Request, + peri_addr: *mut PW, + buf: *mut [MW], + options: TransferOptions, + ) -> Self { + Self::new_inner( + channel.into(), + request, + Dir::PeripheralToMemory, + peri_addr as *const u32, + buf as *mut MW as *mut u32, + buf.len(), + true, + PW::size(), + MW::size(), + options, + ) + } + + /// Create a new write DMA transfer (memory to peripheral). + pub unsafe fn new_write( + channel: Peri<'a, impl Channel>, + request: Request, + buf: &'a [MW], + peri_addr: *mut PW, + options: TransferOptions, + ) -> Self { + Self::new_write_raw(channel, request, buf, peri_addr, options) + } + + /// Create a new write DMA transfer (memory to peripheral), using raw pointers. + pub unsafe fn new_write_raw( + channel: Peri<'a, impl Channel>, + request: Request, + buf: *const [MW], + peri_addr: *mut PW, + options: TransferOptions, + ) -> Self { + Self::new_inner( + channel.into(), + request, + Dir::MemoryToPeripheral, + peri_addr as *const u32, + buf as *const MW as *mut u32, + buf.len(), + true, + MW::size(), + PW::size(), + options, + ) + } + + /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly. + pub unsafe fn new_write_repeated( + channel: Peri<'a, impl Channel>, + request: Request, + repeated: &'a MW, + count: usize, + peri_addr: *mut PW, + options: TransferOptions, + ) -> Self { + Self::new_inner( + channel.into(), + request, + Dir::MemoryToPeripheral, + peri_addr as *const u32, + repeated as *const MW as *mut u32, + count, + false, + MW::size(), + PW::size(), + options, + ) + } + + unsafe fn new_inner( + channel: Peri<'a, AnyChannel>, + request: Request, + dir: Dir, + peri_addr: *const u32, + mem_addr: *mut u32, + mem_len: usize, + incr_mem: bool, + data_size: WordSize, + peripheral_size: WordSize, + options: TransferOptions, + ) -> Self { + assert!(mem_len > 0 && mem_len <= 0xFFFF); + + channel.configure( + _request, + dir, + peri_addr, + mem_addr, + mem_len, + incr_mem, + data_size, + peripheral_size, + options, + ); + channel.start(); + + Self { channel } + } + + /// Request the transfer to stop. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Return whether this transfer is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } + + /// Gets the total remaining transfers for the channel + /// Note: this will be zero for transfers that completed without cancellation. + pub fn get_remaining_transfers(&self) -> u16 { + self.channel.get_remaining_transfers() + } + + /// Blocking wait until the transfer finishes. + pub fn blocking_wait(mut self) { + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + + core::mem::forget(self); + } +} + +impl<'a> Drop for Transfer<'a> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} + +impl<'a> Unpin for Transfer<'a> {} +impl<'a> Future for Transfer<'a> { + type Output = (); + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let state = &STATE[self.channel.id as usize]; + state.waker.register(cx.waker()); + + if self.is_running() { + Poll::Pending + } else { + Poll::Ready(()) + } + } +} diff --git a/embassy-stm32/src/dma/gpdma/ringbuffer.rs b/embassy-stm32/src/dma/gpdma/ringbuffer.rs new file mode 100644 index 000000000..c327e811e --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/ringbuffer.rs @@ -0,0 +1,283 @@ +//! GPDMA ring buffer implementation. +//! +//! FIXME: add request_pause functionality? +use core::{ + sync::atomic::{fence, Ordering}, + task::Waker, +}; + +use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; + +use crate::dma::{ + gpdma::linked_list::{LinearItem, RunMode, Table}, + ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}, + word::Word, + Channel, Dir, Request, +}; + +use super::{AnyChannel, TransferOptions, STATE}; + +struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>); + +impl<'a> DmaCtrl for DmaCtrlImpl<'a> { + fn get_remaining_transfers(&self) -> usize { + self.0.get_remaining_transfers() as _ + } + + fn reset_complete_count(&mut self) -> usize { + let state = &STATE[self.0.id as usize]; + + return state.complete_count.swap(0, Ordering::AcqRel); + } + + fn set_waker(&mut self, waker: &Waker) { + STATE[self.0.id as usize].waker.register(waker); + } +} + +/// Ringbuffer for receiving data using GPDMA linked-list mode. +pub struct ReadableRingBuffer<'a, W: Word> { + channel: PeripheralRef<'a, AnyChannel>, + ringbuf: ReadableDmaRingBuffer<'a, W>, + table: Table<2>, +} + +impl<'a, W: Word> ReadableRingBuffer<'a, W> { + /// Create a new ring buffer. + pub unsafe fn new( + channel: impl Peripheral

+ 'a, + request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + mut options: TransferOptions, + ) -> Self { + into_ref!(channel); + let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + + let half_len = buffer.len() / 2; + assert_eq!(half_len * 2, buffer.len()); + + options.half_transfer_ir = false; + options.complete_transfer_ir = true; + + let items = [ + LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options), + LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options), + ]; + + let table = Table::new(items, RunMode::Once); + + let this = Self { + channel, + ringbuf: ReadableDmaRingBuffer::new(buffer), + table, + }; + + this.channel.configure_linked_list(&this.table, options); + + this + } + + /// Start the ring buffer operation. + /// + /// You must call this after creating it for it to work. + pub fn start(&mut self) { + self.channel.start(); + } + + /// Clear all data in the ring buffer. + pub fn clear(&mut self) { + self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); + } + + /// Read elements from the ring buffer + /// Return a tuple of the length read and the length remaining in the buffer + /// If not all of the elements were read, then there will be some elements in the buffer remaining + /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read + /// Error is returned if the portion to be read was overwritten by the DMA controller. + pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> { + self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf) + } + + /// Read an exact number of elements from the ringbuffer. + /// + /// Returns the remaining number of elements available for immediate reading. + /// Error is returned if the portion to be read was overwritten by the DMA controller. + /// + /// Async/Wake Behavior: + /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, + /// and when it wraps around. This means that when called with a buffer of length 'M', when this + /// ring buffer was created with a buffer of size 'N': + /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. + /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. + pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result { + self.ringbuf + .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await + } + + /// The current length of the ringbuffer + pub fn len(&mut self) -> Result { + Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) + } + + /// The capacity of the ringbuffer + pub const fn capacity(&self) -> usize { + self.ringbuf.cap() + } + + /// Set a waker to be woken when at least one byte is received. + pub fn set_waker(&mut self, waker: &Waker) { + DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); + } + + /// Request the DMA to stop. + /// The configuration for this channel will **not be preserved**. If you need to restart the transfer + /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Return whether DMA is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } +} + +impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} + +/// Ringbuffer for writing data using DMA circular mode. +pub struct WritableRingBuffer<'a, W: Word> { + channel: PeripheralRef<'a, AnyChannel>, + ringbuf: WritableDmaRingBuffer<'a, W>, +} + +impl<'a, W: Word> WritableRingBuffer<'a, W> { + /// Create a new ring buffer. + pub unsafe fn new( + channel: impl Peripheral

+ 'a, + _request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + mut options: TransferOptions, + ) -> Self { + into_ref!(channel); + let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); + + let len = buffer.len(); + let dir = Dir::MemoryToPeripheral; + let data_size = W::size(); + let buffer_ptr = buffer.as_mut_ptr(); + + options.half_transfer_ir = true; + options.complete_transfer_ir = true; + + channel.configure( + _request, + dir, + peri_addr as *mut u32, + buffer_ptr as *mut u32, + len, + true, + data_size, + data_size, + options, + ); + + Self { + channel, + ringbuf: WritableDmaRingBuffer::new(buffer), + } + } + + /// Start the ring buffer operation. + /// + /// You must call this after creating it for it to work. + pub fn start(&mut self) { + self.channel.start(); + } + + /// Clear all data in the ring buffer. + pub fn clear(&mut self) { + self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); + } + + /// Write elements directly to the raw buffer. + /// This can be used to fill the buffer before starting the DMA transfer. + pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { + self.ringbuf.write_immediate(buf) + } + + /// Write elements from the ring buffer + /// Return a tuple of the length written and the length remaining in the buffer + pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { + self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf) + } + + /// Write an exact number of elements to the ringbuffer. + pub async fn write_exact(&mut self, buffer: &[W]) -> Result { + self.ringbuf + .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) + .await + } + + /// Wait for any ring buffer write error. + pub async fn wait_write_error(&mut self) -> Result { + self.ringbuf + .wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow())) + .await + } + + /// The current length of the ringbuffer + pub fn len(&mut self) -> Result { + Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) + } + + /// The capacity of the ringbuffer + pub const fn capacity(&self) -> usize { + self.ringbuf.cap() + } + + /// Set a waker to be woken when at least one byte is received. + pub fn set_waker(&mut self, waker: &Waker) { + DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); + } + + /// Request the DMA to stop. + /// + /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. + pub fn request_stop(&mut self) { + self.channel.request_stop() + } + + /// Return whether DMA is still running. + /// + /// If this returns `false`, it can be because either the transfer finished, or + /// it was requested to stop early with [`request_stop`](Self::request_stop). + pub fn is_running(&mut self) -> bool { + self.channel.is_running() + } +} + +impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs index 44ea497fe..e462c71d4 100644 --- a/embassy-stm32/src/dma/ringbuffer/mod.rs +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs @@ -1,5 +1,3 @@ -#![cfg_attr(gpdma, allow(unused))] - use core::future::poll_fn; use core::task::{Poll, Waker}; diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs index 4965f8b04..88cc225dd 100644 --- a/embassy-stm32/src/sai/mod.rs +++ b/embassy-stm32/src/sai/mod.rs @@ -1,13 +1,11 @@ //! Serial Audio Interface (SAI) #![macro_use] -#![cfg_attr(gpdma, allow(unused))] use core::marker::PhantomData; use embassy_hal_internal::PeripheralType; pub use crate::dma::word; -#[cfg(not(gpdma))] use crate::dma::{ringbuffer, Channel, ReadableRingBuffer, Request, TransferOptions, WritableRingBuffer}; use crate::gpio::{AfType, AnyPin, OutputType, Pull, SealedPin as _, Speed}; use crate::pac::sai::{vals, Sai as Regs}; @@ -26,7 +24,6 @@ pub enum Error { Overrun, } -#[cfg(not(gpdma))] impl From for Error { fn from(#[allow(unused)] err: ringbuffer::Error) -> Self { #[cfg(feature = "defmt")] @@ -652,7 +649,6 @@ impl Config { } } -#[cfg(not(gpdma))] enum RingBuffer<'d, W: word::Word> { Writable(WritableRingBuffer<'d, W>), Readable(ReadableRingBuffer<'d, W>), @@ -679,7 +675,6 @@ fn get_af_types(mode: Mode, tx_rx: TxRx) -> (AfType, AfType) { ) } -#[cfg(not(gpdma))] fn get_ring_buffer<'d, T: Instance, W: word::Word>( dma: Peri<'d, impl Channel>, dma_buf: &'d mut [W], @@ -750,14 +745,10 @@ pub struct Sai<'d, T: Instance, W: word::Word> { fs: Option>, sck: Option>, mclk: Option>, - #[cfg(gpdma)] - ring_buffer: PhantomData, - #[cfg(not(gpdma))] ring_buffer: RingBuffer<'d, W>, sub_block: WhichSubBlock, } -#[cfg(not(gpdma))] impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> { /// Create a new SAI driver in asynchronous mode with MCLK. /// -- cgit