aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorelagil <[email protected]>2025-08-25 21:10:59 +0200
committerDario Nieuwenhuis <[email protected]>2025-09-05 14:43:29 +0200
commit3c3b43fb00355a5db64a34416dc2f19042a3fc5a (patch)
treef3cfef9a81d56f6e86524bd83a7df6ea8d386749
parent70aaa82e490a8c5637de7e38cb636f04c1d187f7 (diff)
feat: GPDAM linked-list + ringbuffer support
-rw-r--r--embassy-stm32/src/dma/gpdma/linked_list.rs236
-rw-r--r--embassy-stm32/src/dma/gpdma/mod.rs (renamed from embassy-stm32/src/dma/gpdma.rs)379
-rw-r--r--embassy-stm32/src/dma/gpdma/ringbuffer.rs283
-rw-r--r--embassy-stm32/src/dma/ringbuffer/mod.rs2
-rw-r--r--embassy-stm32/src/sai/mod.rs9
5 files changed, 825 insertions, 84 deletions
diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs
new file mode 100644
index 000000000..b24b2e7eb
--- /dev/null
+++ b/embassy-stm32/src/dma/gpdma/linked_list.rs
@@ -0,0 +1,236 @@
1//! Implementation of the GPDMA linked list and linked list items.
2#![macro_use]
3
4use stm32_metapac::gpdma::{regs, vals::Dreq};
5
6use super::TransferOptions;
7use crate::dma::{
8 word::{Word, WordSize},
9 Dir, Request,
10};
11use core::{
12 ptr,
13 sync::atomic::{AtomicUsize, Ordering},
14};
15
16/// The mode in which to run the linked list.
17#[derive(Debug)]
18pub enum RunMode {
19 /// List items are not linked together.
20 Unlinked,
21 /// The list is linked sequentially and only run once.
22 Once,
23 /// The list is linked sequentially, and the end of the list is linked to the beginning.
24 Repeat,
25}
26
27/// A linked-list item for linear GPDMA transfers.
28///
29/// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities.
30#[derive(Debug, Copy, Clone, Default)]
31#[repr(C)]
32pub struct LinearItem {
33 /// Transfer register 1.
34 pub tr1: u32,
35 /// Transfer register 2.
36 pub tr2: u32,
37 /// Block register 2.
38 pub br1: u32,
39 /// Source address register.
40 pub sar: u32,
41 /// Destination address register.
42 pub dar: u32,
43 /// Linked-list address register.
44 pub llr: u32,
45}
46
47impl LinearItem {
48 /// Create a new read DMA transfer (peripheral to memory).
49 pub unsafe fn new_read<'d, W: Word>(
50 request: Request,
51 peri_addr: *mut W,
52 buf: &'d mut [W],
53 options: TransferOptions,
54 ) -> Self {
55 Self::new_inner(
56 request,
57 Dir::PeripheralToMemory,
58 peri_addr as *const u32,
59 buf as *mut [W] as *mut W as *mut u32,
60 buf.len(),
61 true,
62 W::size(),
63 W::size(),
64 options,
65 )
66 }
67
68 /// Create a new write DMA transfer (memory to peripheral).
69 pub unsafe fn new_write<'d, MW: Word, PW: Word>(
70 request: Request,
71 buf: &'d [MW],
72 peri_addr: *mut PW,
73 options: TransferOptions,
74 ) -> Self {
75 Self::new_inner(
76 request,
77 Dir::MemoryToPeripheral,
78 peri_addr as *const u32,
79 buf as *const [MW] as *const MW as *mut u32,
80 buf.len(),
81 true,
82 MW::size(),
83 PW::size(),
84 options,
85 )
86 }
87
88 unsafe fn new_inner(
89 request: Request,
90 dir: Dir,
91 peri_addr: *const u32,
92 mem_addr: *mut u32,
93 mem_len: usize,
94 incr_mem: bool,
95 data_size: WordSize,
96 dst_size: WordSize,
97 _options: TransferOptions,
98 ) -> Self {
99 // BNDT is specified as bytes, not as number of transfers.
100 let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
101 panic!("DMA transfers may not be larger than 65535 bytes.");
102 };
103
104 let mut br1 = regs::ChBr1(0);
105 br1.set_bndt(bndt);
106
107 let mut tr1 = regs::ChTr1(0);
108 tr1.set_sdw(data_size.into());
109 tr1.set_ddw(dst_size.into());
110 tr1.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
111 tr1.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
112
113 let mut tr2 = regs::ChTr2(0);
114 tr2.set_dreq(match dir {
115 Dir::MemoryToPeripheral => Dreq::DESTINATION_PERIPHERAL,
116 Dir::PeripheralToMemory => Dreq::SOURCE_PERIPHERAL,
117 });
118 tr2.set_reqsel(request);
119
120 let (sar, dar) = match dir {
121 Dir::MemoryToPeripheral => (mem_addr as _, peri_addr as _),
122 Dir::PeripheralToMemory => (peri_addr as _, mem_addr as _),
123 };
124
125 let llr = regs::ChLlr(0);
126
127 Self {
128 tr1: tr1.0,
129 tr2: tr2.0,
130 br1: br1.0,
131 sar,
132 dar,
133 llr: llr.0,
134 }
135 }
136
137 /// Link to the next linear item at the given address.
138 ///
139 /// Enables channel update bits.
140 fn link_to(&mut self, next: u16) {
141 let mut llr = regs::ChLlr(0);
142
143 llr.set_ut1(true);
144 llr.set_ut2(true);
145 llr.set_ub1(true);
146 llr.set_usa(true);
147 llr.set_uda(true);
148 llr.set_ull(true);
149 llr.set_la(next);
150
151 self.llr = llr.0;
152 }
153
154 /// Unlink the next linear item.
155 ///
156 /// Disables channel update bits.
157 fn unlink(&mut self) {
158 self.llr = regs::ChLlr(0).0;
159 }
160}
161
162pub struct Table<const ITEM_COUNT: usize> {
163 current_index: AtomicUsize,
164 items: [LinearItem; ITEM_COUNT],
165}
166
167impl<const ITEM_COUNT: usize> Table<ITEM_COUNT> {
168 /// Create a new table.
169 pub fn new(items: [LinearItem; ITEM_COUNT], run_mode: RunMode) -> Self {
170 assert!(!items.is_empty());
171
172 let mut this = Self {
173 current_index: AtomicUsize::new(0),
174 items,
175 };
176
177 if matches!(run_mode, RunMode::Once | RunMode::Repeat) {
178 this.link_sequential();
179 }
180
181 if matches!(run_mode, RunMode::Repeat) {
182 this.link_repeat();
183 }
184
185 this
186 }
187
188 pub fn len(&self) -> usize {
189 self.items.len()
190 }
191
192 /// Items are linked together sequentially.
193 pub fn link_sequential(&mut self) {
194 if self.items.len() > 1 {
195 for index in 0..(self.items.len() - 1) {
196 let next = ptr::addr_of!(self.items[index + 1]) as u16;
197 self.items[index].link_to(next);
198 }
199 }
200 }
201
202 /// Last item links to first item.
203 pub fn link_repeat(&mut self) {
204 let first_item = self.items.first().unwrap();
205 let first_address = ptr::addr_of!(first_item) as u16;
206 self.items.last_mut().unwrap().link_to(first_address);
207 }
208
209 /// The index of the next item.
210 pub fn next_index(&self) -> usize {
211 let mut next_index = self.current_index.load(Ordering::Relaxed) + 1;
212 if next_index >= self.len() {
213 next_index = 0;
214 }
215
216 next_index
217 }
218
219 /// Unlink the next item.
220 pub fn unlink_next(&mut self) {
221 let next_index = self.next_index();
222 self.items[next_index].unlink();
223 }
224
225 /// Linked list base address (upper 16 address bits).
226 pub fn base_address(&self) -> u16 {
227 ((ptr::addr_of!(self.items) as u32) >> 16) as _
228 }
229
230 /// Linked list offset address (lower 16 address bits) at the selected index.
231 pub fn offset_address(&self, index: usize) -> u16 {
232 assert!(self.items.len() > index);
233
234 (ptr::addr_of!(self.items[index]) as u32) as _
235 }
236}
diff --git a/embassy-stm32/src/dma/gpdma.rs b/embassy-stm32/src/dma/gpdma/mod.rs
index 151e4ab9f..07acd2cf0 100644
--- a/embassy-stm32/src/dma/gpdma.rs
+++ b/embassy-stm32/src/dma/gpdma/mod.rs
@@ -2,19 +2,23 @@
2 2
3use core::future::Future; 3use core::future::Future;
4use core::pin::Pin; 4use core::pin::Pin;
5use core::sync::atomic::{fence, Ordering}; 5use core::sync::atomic::{fence, AtomicUsize, Ordering};
6use core::task::{Context, Poll}; 6use core::task::{Context, Poll};
7 7
8use embassy_hal_internal::Peri; 8use embassy_hal_internal::Peri;
9use embassy_sync::waitqueue::AtomicWaker; 9use embassy_sync::waitqueue::AtomicWaker;
10use linked_list::Table;
11use stm32_metapac::gpdma::regs;
10 12
11use super::word::{Word, WordSize}; 13use super::word::{Word, WordSize};
12use super::{AnyChannel, Channel, Dir, Request, STATE}; 14use super::{AnyChannel, Channel, Dir, Request, STATE};
13use crate::interrupt::typelevel::Interrupt; 15use crate::interrupt::typelevel::Interrupt;
14use crate::interrupt::Priority;
15use crate::pac; 16use crate::pac;
16use crate::pac::gpdma::vals; 17use crate::pac::gpdma::vals;
17 18
19mod linked_list;
20mod ringbuffer;
21
18pub(crate) struct ChannelInfo { 22pub(crate) struct ChannelInfo {
19 pub(crate) dma: pac::gpdma::Gpdma, 23 pub(crate) dma: pac::gpdma::Gpdma,
20 pub(crate) num: usize, 24 pub(crate) num: usize,
@@ -22,15 +26,48 @@ pub(crate) struct ChannelInfo {
22 pub(crate) irq: pac::Interrupt, 26 pub(crate) irq: pac::Interrupt,
23} 27}
24 28
29/// DMA request priority
30#[derive(Debug, Copy, Clone, PartialEq, Eq)]
31#[cfg_attr(feature = "defmt", derive(defmt::Format))]
32pub enum Priority {
33 /// Low Priority
34 Low,
35 /// Medium Priority
36 Medium,
37 /// High Priority
38 High,
39 /// Very High Priority
40 VeryHigh,
41}
42
43impl From<Priority> for pac::gpdma::vals::Prio {
44 fn from(value: Priority) -> Self {
45 match value {
46 Priority::Low => pac::gpdma::vals::Prio::LOW_WITH_LOWH_WEIGHT,
47 Priority::Medium => pac::gpdma::vals::Prio::LOW_WITH_MID_WEIGHT,
48 Priority::High => pac::gpdma::vals::Prio::LOW_WITH_HIGH_WEIGHT,
49 Priority::VeryHigh => pac::gpdma::vals::Prio::HIGH,
50 }
51 }
52}
53
25/// GPDMA transfer options. 54/// GPDMA transfer options.
26#[derive(Debug, Copy, Clone, PartialEq, Eq)] 55#[derive(Debug, Copy, Clone, PartialEq, Eq)]
27#[cfg_attr(feature = "defmt", derive(defmt::Format))] 56#[cfg_attr(feature = "defmt", derive(defmt::Format))]
28#[non_exhaustive] 57#[non_exhaustive]
29pub struct TransferOptions {} 58pub struct TransferOptions {
59 priority: Priority,
60 half_transfer_ir: bool,
61 complete_transfer_ir: bool,
62}
30 63
31impl Default for TransferOptions { 64impl Default for TransferOptions {
32 fn default() -> Self { 65 fn default() -> Self {
33 Self {} 66 Self {
67 priority: Priority::VeryHigh,
68 half_transfer_ir: false,
69 complete_transfer_ir: true,
70 }
34 } 71 }
35} 72}
36 73
@@ -46,11 +83,13 @@ impl From<WordSize> for vals::Dw {
46 83
47pub(crate) struct ChannelState { 84pub(crate) struct ChannelState {
48 waker: AtomicWaker, 85 waker: AtomicWaker,
86 complete_count: AtomicUsize,
49} 87}
50 88
51impl ChannelState { 89impl ChannelState {
52 pub(crate) const NEW: Self = Self { 90 pub(crate) const NEW: Self = Self {
53 waker: AtomicWaker::new(), 91 waker: AtomicWaker::new(),
92 complete_count: AtomicUsize::new(0),
54 }; 93 };
55} 94}
56 95
@@ -95,6 +134,17 @@ impl AnyChannel {
95 info.num 134 info.num
96 ); 135 );
97 } 136 }
137 if sr.ulef() {
138 panic!(
139 "DMA: link transfer error on DMA@{:08x} channel {}",
140 info.dma.as_ptr() as u32,
141 info.num
142 );
143 }
144
145 if sr.tcf() {
146 state.complete_count.fetch_add(1, Ordering::Release);
147 }
98 148
99 if sr.suspf() || sr.tcf() { 149 if sr.suspf() || sr.tcf() {
100 // disable all xxIEs to prevent the irq from firing again. 150 // disable all xxIEs to prevent the irq from firing again.
@@ -104,6 +154,238 @@ impl AnyChannel {
104 state.waker.wake(); 154 state.waker.wake();
105 } 155 }
106 } 156 }
157
158 fn get_remaining_transfers(&self) -> u16 {
159 let info = self.info();
160 let ch = info.dma.ch(info.num);
161
162 ch.br1().read().bndt()
163 }
164
165 unsafe fn configure(
166 &self,
167 request: Request,
168 dir: Dir,
169 peri_addr: *const u32,
170 mem_addr: *mut u32,
171 mem_len: usize,
172 incr_mem: bool,
173 data_size: WordSize,
174 dst_size: WordSize,
175 options: TransferOptions,
176 ) {
177 // BNDT is specified as bytes, not as number of transfers.
178 let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
179 panic!("DMA transfers may not be larger than 65535 bytes.");
180 };
181
182 let info = self.info();
183 let ch = info.dma.ch(info.num);
184
185 // "Preceding reads and writes cannot be moved past subsequent writes."
186 fence(Ordering::SeqCst);
187
188 ch.cr().write(|w| w.set_reset(true));
189 ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs
190 ch.llr().write(|_| {}); // no linked list
191 ch.tr1().write(|w| {
192 w.set_sdw(data_size.into());
193 w.set_ddw(dst_size.into());
194 w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
195 w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
196 });
197 ch.tr2().write(|w| {
198 w.set_dreq(match dir {
199 Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL,
200 Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL,
201 });
202 w.set_reqsel(request);
203 });
204 ch.tr3().write(|_| {}); // no address offsets.
205 ch.br1().write(|w| w.set_bndt(bndt));
206
207 match dir {
208 Dir::MemoryToPeripheral => {
209 ch.sar().write_value(mem_addr as _);
210 ch.dar().write_value(peri_addr as _);
211 }
212 Dir::PeripheralToMemory => {
213 ch.sar().write_value(peri_addr as _);
214 ch.dar().write_value(mem_addr as _);
215 }
216 }
217
218 ch.cr().write(|w| {
219 w.set_prio(options.priority.into());
220 w.set_htie(options.half_transfer_ir);
221 w.set_tcie(options.complete_transfer_ir);
222 w.set_useie(true);
223 w.set_dteie(true);
224 w.set_suspie(true);
225 });
226 }
227
228 unsafe fn configure_linked_list<const ITEM_COUNT: usize>(
229 &self,
230 table: &Table<ITEM_COUNT>,
231 options: TransferOptions,
232 ) {
233 let info = self.info();
234 let ch = info.dma.ch(info.num);
235
236 // "Preceding reads and writes cannot be moved past subsequent writes."
237 fence(Ordering::SeqCst);
238
239 ch.cr().write(|w| w.set_reset(true));
240 ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs
241
242 ch.lbar().write(|reg| reg.set_lba(table.base_address()));
243
244 // Enable all linked-list field updates.
245 let mut llr = regs::ChLlr(0);
246 llr.set_ut1(true);
247 llr.set_ut2(true);
248 llr.set_ub1(true);
249 llr.set_usa(true);
250 llr.set_uda(true);
251 llr.set_ull(true);
252
253 llr.set_la(table.offset_address(0));
254
255 ch.llr().write(|_| llr.0);
256
257 ch.tr3().write(|_| {}); // no address offsets.
258
259 ch.cr().write(|w| {
260 w.set_prio(options.priority.into());
261 w.set_htie(options.half_transfer_ir);
262 w.set_tcie(options.complete_transfer_ir);
263 w.set_useie(true);
264 w.set_uleie(true);
265 w.set_dteie(true);
266 w.set_suspie(true);
267 });
268 }
269
270 fn start(&self) {
271 let info = self.info();
272 let ch = info.dma.ch(info.num);
273
274 ch.cr().modify(|w| w.set_en(true));
275 }
276
277 fn request_stop(&self) {
278 let info = self.info();
279 let ch = info.dma.ch(info.num);
280
281 ch.cr().modify(|w| w.set_susp(true))
282 }
283
284 fn is_running(&self) -> bool {
285 let info = self.info();
286 let ch = info.dma.ch(info.num);
287
288 let sr = ch.sr().read();
289 !sr.tcf() && !sr.suspf()
290 }
291
292 fn poll_stop(&self) -> Poll<()> {
293 use core::sync::atomic::compiler_fence;
294 compiler_fence(Ordering::SeqCst);
295
296 if !self.is_running() {
297 Poll::Ready(())
298 } else {
299 Poll::Pending
300 }
301 }
302}
303
304/// Linked-list DMA transfer.
305#[must_use = "futures do nothing unless you `.await` or poll them"]
306pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> {
307 channel: PeripheralRef<'a, AnyChannel>,
308 table: Table<ITEM_COUNT>,
309}
310
311impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> {
312 /// Create a new linked-list transfer.
313 pub unsafe fn new_linked_list<const N: usize>(
314 channel: impl Peripheral<P = impl Channel> + 'a,
315 table: Table<ITEM_COUNT>,
316 options: TransferOptions,
317 ) -> Self {
318 into_ref!(channel);
319
320 Self::new_inner_linked_list(channel.map_into(), table, options)
321 }
322
323 unsafe fn new_inner_linked_list(
324 channel: PeripheralRef<'a, AnyChannel>,
325 table: Table<ITEM_COUNT>,
326 options: TransferOptions,
327 ) -> Self {
328 channel.configure_linked_list(&table, options);
329 channel.start();
330
331 Self { channel, table }
332 }
333
334 /// Request the transfer to stop.
335 ///
336 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
337 pub fn request_stop(&mut self) {
338 self.channel.request_stop()
339 }
340
341 /// Return whether this transfer is still running.
342 ///
343 /// If this returns `false`, it can be because either the transfer finished, or
344 /// it was requested to stop early with [`request_stop`](Self::request_stop).
345 pub fn is_running(&mut self) -> bool {
346 self.channel.is_running()
347 }
348
349 /// Gets the total remaining transfers for the channel
350 /// Note: this will be zero for transfers that completed without cancellation.
351 pub fn get_remaining_transfers(&self) -> u16 {
352 self.channel.get_remaining_transfers()
353 }
354
355 /// Blocking wait until the transfer finishes.
356 pub fn blocking_wait(mut self) {
357 while self.is_running() {}
358
359 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
360 fence(Ordering::SeqCst);
361
362 core::mem::forget(self);
363 }
364}
365
366impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> {
367 fn drop(&mut self) {
368 self.request_stop();
369 while self.is_running() {}
370
371 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
372 fence(Ordering::SeqCst);
373 }
374}
375
376impl<'a, const ITEM_COUNT: usize> Unpin for LinkedListTransfer<'a, ITEM_COUNT> {}
377impl<'a, const ITEM_COUNT: usize> Future for LinkedListTransfer<'a, ITEM_COUNT> {
378 type Output = ();
379 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
380 let state = &STATE[self.channel.id as usize];
381 state.waker.register(cx.waker());
382
383 if self.is_running() {
384 Poll::Pending
385 } else {
386 Poll::Ready(())
387 }
388 }
107} 389}
108 390
109/// DMA transfer. 391/// DMA transfer.
@@ -211,74 +493,32 @@ impl<'a> Transfer<'a> {
211 mem_len: usize, 493 mem_len: usize,
212 incr_mem: bool, 494 incr_mem: bool,
213 data_size: WordSize, 495 data_size: WordSize,
214 dst_size: WordSize, 496 peripheral_size: WordSize,
215 _options: TransferOptions, 497 options: TransferOptions,
216 ) -> Self { 498 ) -> Self {
217 // BNDT is specified as bytes, not as number of transfers. 499 assert!(mem_len > 0 && mem_len <= 0xFFFF);
218 let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { 500
219 panic!("DMA transfers may not be larger than 65535 bytes."); 501 channel.configure(
220 }; 502 _request,
221 503 dir,
222 let info = channel.info(); 504 peri_addr,
223 let ch = info.dma.ch(info.num); 505 mem_addr,
224 506 mem_len,
225 // "Preceding reads and writes cannot be moved past subsequent writes." 507 incr_mem,
226 fence(Ordering::SeqCst); 508 data_size,
227 509 peripheral_size,
228 let this = Self { channel }; 510 options,
229 511 );
230 ch.cr().write(|w| w.set_reset(true)); 512 channel.start();
231 ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs
232 ch.llr().write(|_| {}); // no linked list
233 ch.tr1().write(|w| {
234 w.set_sdw(data_size.into());
235 w.set_ddw(dst_size.into());
236 w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
237 w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
238 });
239 ch.tr2().write(|w| {
240 w.set_dreq(match dir {
241 Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL,
242 Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL,
243 });
244 w.set_reqsel(request);
245 });
246 ch.tr3().write(|_| {}); // no address offsets.
247 ch.br1().write(|w| w.set_bndt(bndt));
248
249 match dir {
250 Dir::MemoryToPeripheral => {
251 ch.sar().write_value(mem_addr as _);
252 ch.dar().write_value(peri_addr as _);
253 }
254 Dir::PeripheralToMemory => {
255 ch.sar().write_value(peri_addr as _);
256 ch.dar().write_value(mem_addr as _);
257 }
258 }
259
260 ch.cr().write(|w| {
261 // Enable interrupts
262 w.set_tcie(true);
263 w.set_useie(true);
264 w.set_dteie(true);
265 w.set_suspie(true);
266
267 // Start it
268 w.set_en(true);
269 });
270 513
271 this 514 Self { channel }
272 } 515 }
273 516
274 /// Request the transfer to stop. 517 /// Request the transfer to stop.
275 /// 518 ///
276 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. 519 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
277 pub fn request_stop(&mut self) { 520 pub fn request_stop(&mut self) {
278 let info = self.channel.info(); 521 self.channel.request_stop()
279 let ch = info.dma.ch(info.num);
280
281 ch.cr().modify(|w| w.set_susp(true))
282 } 522 }
283 523
284 /// Return whether this transfer is still running. 524 /// Return whether this transfer is still running.
@@ -286,20 +526,13 @@ impl<'a> Transfer<'a> {
286 /// If this returns `false`, it can be because either the transfer finished, or 526 /// If this returns `false`, it can be because either the transfer finished, or
287 /// it was requested to stop early with [`request_stop`](Self::request_stop). 527 /// it was requested to stop early with [`request_stop`](Self::request_stop).
288 pub fn is_running(&mut self) -> bool { 528 pub fn is_running(&mut self) -> bool {
289 let info = self.channel.info(); 529 self.channel.is_running()
290 let ch = info.dma.ch(info.num);
291
292 let sr = ch.sr().read();
293 !sr.tcf() && !sr.suspf()
294 } 530 }
295 531
296 /// Gets the total remaining transfers for the channel 532 /// Gets the total remaining transfers for the channel
297 /// Note: this will be zero for transfers that completed without cancellation. 533 /// Note: this will be zero for transfers that completed without cancellation.
298 pub fn get_remaining_transfers(&self) -> u16 { 534 pub fn get_remaining_transfers(&self) -> u16 {
299 let info = self.channel.info(); 535 self.channel.get_remaining_transfers()
300 let ch = info.dma.ch(info.num);
301
302 ch.br1().read().bndt()
303 } 536 }
304 537
305 /// Blocking wait until the transfer finishes. 538 /// Blocking wait until the transfer finishes.
diff --git a/embassy-stm32/src/dma/gpdma/ringbuffer.rs b/embassy-stm32/src/dma/gpdma/ringbuffer.rs
new file mode 100644
index 000000000..c327e811e
--- /dev/null
+++ b/embassy-stm32/src/dma/gpdma/ringbuffer.rs
@@ -0,0 +1,283 @@
1//! GPDMA ring buffer implementation.
2//!
3//! FIXME: add request_pause functionality?
4use core::{
5 sync::atomic::{fence, Ordering},
6 task::Waker,
7};
8
9use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
10
11use crate::dma::{
12 gpdma::linked_list::{LinearItem, RunMode, Table},
13 ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer},
14 word::Word,
15 Channel, Dir, Request,
16};
17
18use super::{AnyChannel, TransferOptions, STATE};
19
20struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>);
21
22impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
23 fn get_remaining_transfers(&self) -> usize {
24 self.0.get_remaining_transfers() as _
25 }
26
27 fn reset_complete_count(&mut self) -> usize {
28 let state = &STATE[self.0.id as usize];
29
30 return state.complete_count.swap(0, Ordering::AcqRel);
31 }
32
33 fn set_waker(&mut self, waker: &Waker) {
34 STATE[self.0.id as usize].waker.register(waker);
35 }
36}
37
38/// Ringbuffer for receiving data using GPDMA linked-list mode.
39pub struct ReadableRingBuffer<'a, W: Word> {
40 channel: PeripheralRef<'a, AnyChannel>,
41 ringbuf: ReadableDmaRingBuffer<'a, W>,
42 table: Table<2>,
43}
44
45impl<'a, W: Word> ReadableRingBuffer<'a, W> {
46 /// Create a new ring buffer.
47 pub unsafe fn new(
48 channel: impl Peripheral<P = impl Channel> + 'a,
49 request: Request,
50 peri_addr: *mut W,
51 buffer: &'a mut [W],
52 mut options: TransferOptions,
53 ) -> Self {
54 into_ref!(channel);
55 let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
56
57 let half_len = buffer.len() / 2;
58 assert_eq!(half_len * 2, buffer.len());
59
60 options.half_transfer_ir = false;
61 options.complete_transfer_ir = true;
62
63 let items = [
64 LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options),
65 LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options),
66 ];
67
68 let table = Table::new(items, RunMode::Once);
69
70 let this = Self {
71 channel,
72 ringbuf: ReadableDmaRingBuffer::new(buffer),
73 table,
74 };
75
76 this.channel.configure_linked_list(&this.table, options);
77
78 this
79 }
80
81 /// Start the ring buffer operation.
82 ///
83 /// You must call this after creating it for it to work.
84 pub fn start(&mut self) {
85 self.channel.start();
86 }
87
88 /// Clear all data in the ring buffer.
89 pub fn clear(&mut self) {
90 self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
91 }
92
93 /// Read elements from the ring buffer
94 /// Return a tuple of the length read and the length remaining in the buffer
95 /// If not all of the elements were read, then there will be some elements in the buffer remaining
96 /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
97 /// Error is returned if the portion to be read was overwritten by the DMA controller.
98 pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> {
99 self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
100 }
101
102 /// Read an exact number of elements from the ringbuffer.
103 ///
104 /// Returns the remaining number of elements available for immediate reading.
105 /// Error is returned if the portion to be read was overwritten by the DMA controller.
106 ///
107 /// Async/Wake Behavior:
108 /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
109 /// and when it wraps around. This means that when called with a buffer of length 'M', when this
110 /// ring buffer was created with a buffer of size 'N':
111 /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
112 /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
113 pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, Error> {
114 self.ringbuf
115 .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
116 .await
117 }
118
119 /// The current length of the ringbuffer
120 pub fn len(&mut self) -> Result<usize, Error> {
121 Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
122 }
123
124 /// The capacity of the ringbuffer
125 pub const fn capacity(&self) -> usize {
126 self.ringbuf.cap()
127 }
128
129 /// Set a waker to be woken when at least one byte is received.
130 pub fn set_waker(&mut self, waker: &Waker) {
131 DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
132 }
133
134 /// Request the DMA to stop.
135 /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
136 /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
137 ///
138 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
139 pub fn request_stop(&mut self) {
140 self.channel.request_stop()
141 }
142
143 /// Return whether DMA is still running.
144 ///
145 /// If this returns `false`, it can be because either the transfer finished, or
146 /// it was requested to stop early with [`request_stop`](Self::request_stop).
147 pub fn is_running(&mut self) -> bool {
148 self.channel.is_running()
149 }
150}
151
152impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
153 fn drop(&mut self) {
154 self.request_stop();
155 while self.is_running() {}
156
157 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
158 fence(Ordering::SeqCst);
159 }
160}
161
162/// Ringbuffer for writing data using DMA circular mode.
163pub struct WritableRingBuffer<'a, W: Word> {
164 channel: PeripheralRef<'a, AnyChannel>,
165 ringbuf: WritableDmaRingBuffer<'a, W>,
166}
167
168impl<'a, W: Word> WritableRingBuffer<'a, W> {
169 /// Create a new ring buffer.
170 pub unsafe fn new(
171 channel: impl Peripheral<P = impl Channel> + 'a,
172 _request: Request,
173 peri_addr: *mut W,
174 buffer: &'a mut [W],
175 mut options: TransferOptions,
176 ) -> Self {
177 into_ref!(channel);
178 let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
179
180 let len = buffer.len();
181 let dir = Dir::MemoryToPeripheral;
182 let data_size = W::size();
183 let buffer_ptr = buffer.as_mut_ptr();
184
185 options.half_transfer_ir = true;
186 options.complete_transfer_ir = true;
187
188 channel.configure(
189 _request,
190 dir,
191 peri_addr as *mut u32,
192 buffer_ptr as *mut u32,
193 len,
194 true,
195 data_size,
196 data_size,
197 options,
198 );
199
200 Self {
201 channel,
202 ringbuf: WritableDmaRingBuffer::new(buffer),
203 }
204 }
205
206 /// Start the ring buffer operation.
207 ///
208 /// You must call this after creating it for it to work.
209 pub fn start(&mut self) {
210 self.channel.start();
211 }
212
213 /// Clear all data in the ring buffer.
214 pub fn clear(&mut self) {
215 self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
216 }
217
218 /// Write elements directly to the raw buffer.
219 /// This can be used to fill the buffer before starting the DMA transfer.
220 pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
221 self.ringbuf.write_immediate(buf)
222 }
223
224 /// Write elements from the ring buffer
225 /// Return a tuple of the length written and the length remaining in the buffer
226 pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
227 self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
228 }
229
230 /// Write an exact number of elements to the ringbuffer.
231 pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> {
232 self.ringbuf
233 .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
234 .await
235 }
236
237 /// Wait for any ring buffer write error.
238 pub async fn wait_write_error(&mut self) -> Result<usize, Error> {
239 self.ringbuf
240 .wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow()))
241 .await
242 }
243
244 /// The current length of the ringbuffer
245 pub fn len(&mut self) -> Result<usize, Error> {
246 Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
247 }
248
249 /// The capacity of the ringbuffer
250 pub const fn capacity(&self) -> usize {
251 self.ringbuf.cap()
252 }
253
254 /// Set a waker to be woken when at least one byte is received.
255 pub fn set_waker(&mut self, waker: &Waker) {
256 DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
257 }
258
259 /// Request the DMA to stop.
260 ///
261 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
262 pub fn request_stop(&mut self) {
263 self.channel.request_stop()
264 }
265
266 /// Return whether DMA is still running.
267 ///
268 /// If this returns `false`, it can be because either the transfer finished, or
269 /// it was requested to stop early with [`request_stop`](Self::request_stop).
270 pub fn is_running(&mut self) -> bool {
271 self.channel.is_running()
272 }
273}
274
275impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
276 fn drop(&mut self) {
277 self.request_stop();
278 while self.is_running() {}
279
280 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
281 fence(Ordering::SeqCst);
282 }
283}
diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs
index 44ea497fe..e462c71d4 100644
--- a/embassy-stm32/src/dma/ringbuffer/mod.rs
+++ b/embassy-stm32/src/dma/ringbuffer/mod.rs
@@ -1,5 +1,3 @@
1#![cfg_attr(gpdma, allow(unused))]
2
3use core::future::poll_fn; 1use core::future::poll_fn;
4use core::task::{Poll, Waker}; 2use core::task::{Poll, Waker};
5 3
diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs
index 4965f8b04..88cc225dd 100644
--- a/embassy-stm32/src/sai/mod.rs
+++ b/embassy-stm32/src/sai/mod.rs
@@ -1,13 +1,11 @@
1//! Serial Audio Interface (SAI) 1//! Serial Audio Interface (SAI)
2#![macro_use] 2#![macro_use]
3#![cfg_attr(gpdma, allow(unused))]
4 3
5use core::marker::PhantomData; 4use core::marker::PhantomData;
6 5
7use embassy_hal_internal::PeripheralType; 6use embassy_hal_internal::PeripheralType;
8 7
9pub use crate::dma::word; 8pub use crate::dma::word;
10#[cfg(not(gpdma))]
11use crate::dma::{ringbuffer, Channel, ReadableRingBuffer, Request, TransferOptions, WritableRingBuffer}; 9use crate::dma::{ringbuffer, Channel, ReadableRingBuffer, Request, TransferOptions, WritableRingBuffer};
12use crate::gpio::{AfType, AnyPin, OutputType, Pull, SealedPin as _, Speed}; 10use crate::gpio::{AfType, AnyPin, OutputType, Pull, SealedPin as _, Speed};
13use crate::pac::sai::{vals, Sai as Regs}; 11use crate::pac::sai::{vals, Sai as Regs};
@@ -26,7 +24,6 @@ pub enum Error {
26 Overrun, 24 Overrun,
27} 25}
28 26
29#[cfg(not(gpdma))]
30impl From<ringbuffer::Error> for Error { 27impl From<ringbuffer::Error> for Error {
31 fn from(#[allow(unused)] err: ringbuffer::Error) -> Self { 28 fn from(#[allow(unused)] err: ringbuffer::Error) -> Self {
32 #[cfg(feature = "defmt")] 29 #[cfg(feature = "defmt")]
@@ -652,7 +649,6 @@ impl Config {
652 } 649 }
653} 650}
654 651
655#[cfg(not(gpdma))]
656enum RingBuffer<'d, W: word::Word> { 652enum RingBuffer<'d, W: word::Word> {
657 Writable(WritableRingBuffer<'d, W>), 653 Writable(WritableRingBuffer<'d, W>),
658 Readable(ReadableRingBuffer<'d, W>), 654 Readable(ReadableRingBuffer<'d, W>),
@@ -679,7 +675,6 @@ fn get_af_types(mode: Mode, tx_rx: TxRx) -> (AfType, AfType) {
679 ) 675 )
680} 676}
681 677
682#[cfg(not(gpdma))]
683fn get_ring_buffer<'d, T: Instance, W: word::Word>( 678fn get_ring_buffer<'d, T: Instance, W: word::Word>(
684 dma: Peri<'d, impl Channel>, 679 dma: Peri<'d, impl Channel>,
685 dma_buf: &'d mut [W], 680 dma_buf: &'d mut [W],
@@ -750,14 +745,10 @@ pub struct Sai<'d, T: Instance, W: word::Word> {
750 fs: Option<Peri<'d, AnyPin>>, 745 fs: Option<Peri<'d, AnyPin>>,
751 sck: Option<Peri<'d, AnyPin>>, 746 sck: Option<Peri<'d, AnyPin>>,
752 mclk: Option<Peri<'d, AnyPin>>, 747 mclk: Option<Peri<'d, AnyPin>>,
753 #[cfg(gpdma)]
754 ring_buffer: PhantomData<W>,
755 #[cfg(not(gpdma))]
756 ring_buffer: RingBuffer<'d, W>, 748 ring_buffer: RingBuffer<'d, W>,
757 sub_block: WhichSubBlock, 749 sub_block: WhichSubBlock,
758} 750}
759 751
760#[cfg(not(gpdma))]
761impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> { 752impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> {
762 /// Create a new SAI driver in asynchronous mode with MCLK. 753 /// Create a new SAI driver in asynchronous mode with MCLK.
763 /// 754 ///