aboutsummaryrefslogtreecommitdiff
path: root/embassy-stm32/src
diff options
context:
space:
mode:
authorelagil <[email protected]>2025-08-25 21:10:59 +0200
committerDario Nieuwenhuis <[email protected]>2025-09-05 14:43:29 +0200
commitcf5b1ea9f593d1d80b718b88330f041b59d071f1 (patch)
tree7d48e58a7549406f964f44c4659bf9c9dec5a44e /embassy-stm32/src
parent3c3b43fb00355a5db64a34416dc2f19042a3fc5a (diff)
feat: gpdma support (wip)
Diffstat (limited to 'embassy-stm32/src')
-rw-r--r--embassy-stm32/src/cryp/mod.rs3
-rw-r--r--embassy-stm32/src/dma/gpdma/linked_list.rs77
-rw-r--r--embassy-stm32/src/dma/gpdma/mod.rs91
-rw-r--r--embassy-stm32/src/dma/gpdma/ringbuffered.rs (renamed from embassy-stm32/src/dma/gpdma/ringbuffer.rs)208
-rw-r--r--embassy-stm32/src/dma/mod.rs2
-rw-r--r--embassy-stm32/src/dma/ringbuffer/mod.rs20
-rw-r--r--embassy-stm32/src/spdifrx/mod.rs4
-rw-r--r--embassy-stm32/src/usart/mod.rs2
8 files changed, 307 insertions, 100 deletions
diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs
index 35d9f8cce..0173b2b5d 100644
--- a/embassy-stm32/src/cryp/mod.rs
+++ b/embassy-stm32/src/cryp/mod.rs
@@ -1814,7 +1814,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
1814 // Configure DMA to transfer input to crypto core. 1814 // Configure DMA to transfer input to crypto core.
1815 let dst_ptr: *mut u32 = T::regs().din().as_ptr(); 1815 let dst_ptr: *mut u32 = T::regs().din().as_ptr();
1816 let options = TransferOptions { 1816 let options = TransferOptions {
1817 #[cfg(not(gpdma))]
1818 priority: crate::dma::Priority::High, 1817 priority: crate::dma::Priority::High,
1819 ..Default::default() 1818 ..Default::default()
1820 }; 1819 };
@@ -1834,7 +1833,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
1834 // Configure DMA to transfer input to crypto core. 1833 // Configure DMA to transfer input to crypto core.
1835 let dst_ptr: *mut u32 = T::regs().din().as_ptr(); 1834 let dst_ptr: *mut u32 = T::regs().din().as_ptr();
1836 let options = TransferOptions { 1835 let options = TransferOptions {
1837 #[cfg(not(gpdma))]
1838 priority: crate::dma::Priority::High, 1836 priority: crate::dma::Priority::High,
1839 ..Default::default() 1837 ..Default::default()
1840 }; 1838 };
@@ -1853,7 +1851,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
1853 // Configure DMA to get output from crypto core. 1851 // Configure DMA to get output from crypto core.
1854 let src_ptr = T::regs().dout().as_ptr(); 1852 let src_ptr = T::regs().dout().as_ptr();
1855 let options = TransferOptions { 1853 let options = TransferOptions {
1856 #[cfg(not(gpdma))]
1857 priority: crate::dma::Priority::VeryHigh, 1854 priority: crate::dma::Priority::VeryHigh,
1858 ..Default::default() 1855 ..Default::default()
1859 }; 1856 };
diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs
index b24b2e7eb..7de9a1441 100644
--- a/embassy-stm32/src/dma/gpdma/linked_list.rs
+++ b/embassy-stm32/src/dma/gpdma/linked_list.rs
@@ -8,10 +8,6 @@ use crate::dma::{
8 word::{Word, WordSize}, 8 word::{Word, WordSize},
9 Dir, Request, 9 Dir, Request,
10}; 10};
11use core::{
12 ptr,
13 sync::atomic::{AtomicUsize, Ordering},
14};
15 11
16/// The mode in which to run the linked list. 12/// The mode in which to run the linked list.
17#[derive(Debug)] 13#[derive(Debug)]
@@ -28,6 +24,7 @@ pub enum RunMode {
28/// 24///
29/// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities. 25/// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities.
30#[derive(Debug, Copy, Clone, Default)] 26#[derive(Debug, Copy, Clone, Default)]
27#[cfg_attr(feature = "defmt", derive(defmt::Format))]
31#[repr(C)] 28#[repr(C)]
32pub struct LinearItem { 29pub struct LinearItem {
33 /// Transfer register 1. 30 /// Transfer register 1.
@@ -146,7 +143,9 @@ impl LinearItem {
146 llr.set_usa(true); 143 llr.set_usa(true);
147 llr.set_uda(true); 144 llr.set_uda(true);
148 llr.set_ull(true); 145 llr.set_ull(true);
149 llr.set_la(next); 146
147 // Lower two bits are ignored: 32 bit aligned.
148 llr.set_la(next >> 2);
150 149
151 self.llr = llr.0; 150 self.llr = llr.0;
152 } 151 }
@@ -159,78 +158,82 @@ impl LinearItem {
159 } 158 }
160} 159}
161 160
161/// A table of linked list items.
162#[repr(C)]
162pub struct Table<const ITEM_COUNT: usize> { 163pub struct Table<const ITEM_COUNT: usize> {
163 current_index: AtomicUsize, 164 /// The items.
164 items: [LinearItem; ITEM_COUNT], 165 pub items: [LinearItem; ITEM_COUNT],
165} 166}
166 167
167impl<const ITEM_COUNT: usize> Table<ITEM_COUNT> { 168impl<const ITEM_COUNT: usize> Table<ITEM_COUNT> {
168 /// Create a new table. 169 /// Create a new table.
169 pub fn new(items: [LinearItem; ITEM_COUNT], run_mode: RunMode) -> Self { 170 pub fn new(items: [LinearItem; ITEM_COUNT]) -> Self {
170 assert!(!items.is_empty()); 171 assert!(!items.is_empty());
171 172
172 let mut this = Self { 173 Self { items }
173 current_index: AtomicUsize::new(0), 174 }
174 items,
175 };
176 175
176 pub fn link(&mut self, run_mode: RunMode) {
177 if matches!(run_mode, RunMode::Once | RunMode::Repeat) { 177 if matches!(run_mode, RunMode::Once | RunMode::Repeat) {
178 this.link_sequential(); 178 self.link_sequential();
179 } 179 }
180 180
181 if matches!(run_mode, RunMode::Repeat) { 181 if matches!(run_mode, RunMode::Repeat) {
182 this.link_repeat(); 182 self.link_repeat();
183 } 183 }
184
185 this
186 } 184 }
187 185
186 /// The number of linked list items.s
188 pub fn len(&self) -> usize { 187 pub fn len(&self) -> usize {
189 self.items.len() 188 self.items.len()
190 } 189 }
191 190
192 /// Items are linked together sequentially. 191 /// Link items of given indices together: first -> second.
192 pub fn link_indices(&mut self, first: usize, second: usize) {
193 assert!(first < self.len());
194 assert!(second < self.len());
195
196 let second_item = self.offset_address(second);
197 self.items[first].link_to(second_item);
198 }
199
200 /// Link items sequentially.
193 pub fn link_sequential(&mut self) { 201 pub fn link_sequential(&mut self) {
194 if self.items.len() > 1 { 202 if self.len() > 1 {
195 for index in 0..(self.items.len() - 1) { 203 for index in 0..(self.items.len() - 1) {
196 let next = ptr::addr_of!(self.items[index + 1]) as u16; 204 let next = self.offset_address(index + 1);
197 self.items[index].link_to(next); 205 self.items[index].link_to(next);
198 } 206 }
199 } 207 }
200 } 208 }
201 209
202 /// Last item links to first item. 210 /// Link last to first item.
203 pub fn link_repeat(&mut self) { 211 pub fn link_repeat(&mut self) {
204 let first_item = self.items.first().unwrap(); 212 let first_address = self.offset_address(0);
205 let first_address = ptr::addr_of!(first_item) as u16;
206 self.items.last_mut().unwrap().link_to(first_address); 213 self.items.last_mut().unwrap().link_to(first_address);
207 } 214 }
208 215
209 /// The index of the next item. 216 /// Unlink all items.
210 pub fn next_index(&self) -> usize { 217 pub fn unlink(&mut self) {
211 let mut next_index = self.current_index.load(Ordering::Relaxed) + 1; 218 for item in self.items.iter_mut() {
212 if next_index >= self.len() { 219 item.unlink();
213 next_index = 0;
214 } 220 }
215
216 next_index
217 }
218
219 /// Unlink the next item.
220 pub fn unlink_next(&mut self) {
221 let next_index = self.next_index();
222 self.items[next_index].unlink();
223 } 221 }
224 222
225 /// Linked list base address (upper 16 address bits). 223 /// Linked list base address (upper 16 address bits).
226 pub fn base_address(&self) -> u16 { 224 pub fn base_address(&self) -> u16 {
227 ((ptr::addr_of!(self.items) as u32) >> 16) as _ 225 ((&raw const self.items as u32) >> 16) as _
228 } 226 }
229 227
230 /// Linked list offset address (lower 16 address bits) at the selected index. 228 /// Linked list offset address (lower 16 address bits) at the selected index.
231 pub fn offset_address(&self, index: usize) -> u16 { 229 pub fn offset_address(&self, index: usize) -> u16 {
232 assert!(self.items.len() > index); 230 assert!(self.items.len() > index);
233 231
234 (ptr::addr_of!(self.items[index]) as u32) as _ 232 let address = &raw const self.items[index] as _;
233
234 // Ensure 32 bit address alignment.
235 assert_eq!(address & 0b11, 0);
236
237 address
235 } 238 }
236} 239}
diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs
index 07acd2cf0..f65048d1f 100644
--- a/embassy-stm32/src/dma/gpdma/mod.rs
+++ b/embassy-stm32/src/dma/gpdma/mod.rs
@@ -8,7 +8,6 @@ use core::task::{Context, Poll};
8use embassy_hal_internal::Peri; 8use embassy_hal_internal::Peri;
9use embassy_sync::waitqueue::AtomicWaker; 9use embassy_sync::waitqueue::AtomicWaker;
10use linked_list::Table; 10use linked_list::Table;
11use stm32_metapac::gpdma::regs;
12 11
13use super::word::{Word, WordSize}; 12use super::word::{Word, WordSize};
14use super::{AnyChannel, Channel, Dir, Request, STATE}; 13use super::{AnyChannel, Channel, Dir, Request, STATE};
@@ -16,8 +15,8 @@ use crate::interrupt::typelevel::Interrupt;
16use crate::pac; 15use crate::pac;
17use crate::pac::gpdma::vals; 16use crate::pac::gpdma::vals;
18 17
19mod linked_list; 18pub mod linked_list;
20mod ringbuffer; 19pub mod ringbuffered;
21 20
22pub(crate) struct ChannelInfo { 21pub(crate) struct ChannelInfo {
23 pub(crate) dma: pac::gpdma::Gpdma, 22 pub(crate) dma: pac::gpdma::Gpdma,
@@ -56,9 +55,12 @@ impl From<Priority> for pac::gpdma::vals::Prio {
56#[cfg_attr(feature = "defmt", derive(defmt::Format))] 55#[cfg_attr(feature = "defmt", derive(defmt::Format))]
57#[non_exhaustive] 56#[non_exhaustive]
58pub struct TransferOptions { 57pub struct TransferOptions {
59 priority: Priority, 58 /// Request priority level.
60 half_transfer_ir: bool, 59 pub priority: Priority,
61 complete_transfer_ir: bool, 60 /// Enable half transfer interrupt.
61 pub half_transfer_ir: bool,
62 /// Enable transfer complete interrupt.
63 pub complete_transfer_ir: bool,
62} 64}
63 65
64impl Default for TransferOptions { 66impl Default for TransferOptions {
@@ -81,6 +83,17 @@ impl From<WordSize> for vals::Dw {
81 } 83 }
82} 84}
83 85
86impl From<vals::Dw> for WordSize {
87 fn from(raw: vals::Dw) -> Self {
88 match raw {
89 vals::Dw::BYTE => Self::OneByte,
90 vals::Dw::HALF_WORD => Self::TwoBytes,
91 vals::Dw::WORD => Self::FourBytes,
92 _ => panic!("Invalid word size"),
93 }
94 }
95}
96
84pub(crate) struct ChannelState { 97pub(crate) struct ChannelState {
85 waker: AtomicWaker, 98 waker: AtomicWaker,
86 complete_count: AtomicUsize, 99 complete_count: AtomicUsize,
@@ -94,7 +107,7 @@ impl ChannelState {
94} 107}
95 108
96/// safety: must be called only once 109/// safety: must be called only once
97pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { 110pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: crate::interrupt::Priority) {
98 foreach_interrupt! { 111 foreach_interrupt! {
99 ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { 112 ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => {
100 crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); 113 crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
@@ -142,24 +155,30 @@ impl AnyChannel {
142 ); 155 );
143 } 156 }
144 157
158 if sr.htf() {
159 ch.fcr().write(|w| w.set_htf(true));
160 }
161
145 if sr.tcf() { 162 if sr.tcf() {
163 ch.fcr().write(|w| w.set_tcf(true));
146 state.complete_count.fetch_add(1, Ordering::Release); 164 state.complete_count.fetch_add(1, Ordering::Release);
147 } 165 }
148 166
149 if sr.suspf() || sr.tcf() { 167 if sr.suspf() {
150 // disable all xxIEs to prevent the irq from firing again. 168 // disable all xxIEs to prevent the irq from firing again.
151 ch.cr().write(|_| {}); 169 ch.cr().write(|_| {});
152 170
153 // Wake the future. It'll look at tcf and see it's set. 171 // Wake the future. It'll look at tcf and see it's set.
154 state.waker.wake();
155 } 172 }
173 state.waker.wake();
156 } 174 }
157 175
158 fn get_remaining_transfers(&self) -> u16 { 176 fn get_remaining_transfers(&self) -> u16 {
159 let info = self.info(); 177 let info = self.info();
160 let ch = info.dma.ch(info.num); 178 let ch = info.dma.ch(info.num);
179 let word_size: WordSize = ch.tr1().read().ddw().into();
161 180
162 ch.br1().read().bndt() 181 ch.br1().read().bndt() / word_size.bytes() as u16
163 } 182 }
164 183
165 unsafe fn configure( 184 unsafe fn configure(
@@ -238,21 +257,23 @@ impl AnyChannel {
238 257
239 ch.cr().write(|w| w.set_reset(true)); 258 ch.cr().write(|w| w.set_reset(true));
240 ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs 259 ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs
241
242 ch.lbar().write(|reg| reg.set_lba(table.base_address())); 260 ch.lbar().write(|reg| reg.set_lba(table.base_address()));
243 261
244 // Enable all linked-list field updates. 262 // Empty LLI0.
245 let mut llr = regs::ChLlr(0); 263 ch.br1().write(|w| w.set_bndt(0));
246 llr.set_ut1(true);
247 llr.set_ut2(true);
248 llr.set_ub1(true);
249 llr.set_usa(true);
250 llr.set_uda(true);
251 llr.set_ull(true);
252
253 llr.set_la(table.offset_address(0));
254 264
255 ch.llr().write(|_| llr.0); 265 // Enable all linked-list field updates.
266 ch.llr().write(|w| {
267 w.set_ut1(true);
268 w.set_ut2(true);
269 w.set_ub1(true);
270 w.set_usa(true);
271 w.set_uda(true);
272 w.set_ull(true);
273
274 // Lower two bits are ignored: 32 bit aligned.
275 w.set_la(table.offset_address(0) >> 2);
276 });
256 277
257 ch.tr3().write(|_| {}); // no address offsets. 278 ch.tr3().write(|_| {}); // no address offsets.
258 279
@@ -281,12 +302,23 @@ impl AnyChannel {
281 ch.cr().modify(|w| w.set_susp(true)) 302 ch.cr().modify(|w| w.set_susp(true))
282 } 303 }
283 304
305 fn request_pause(&self) {
306 let info = self.info();
307 let ch = info.dma.ch(info.num);
308
309 // Disable the channel without overwriting the existing configuration
310 ch.cr().modify(|w| {
311 w.set_en(false);
312 });
313 }
314
284 fn is_running(&self) -> bool { 315 fn is_running(&self) -> bool {
285 let info = self.info(); 316 let info = self.info();
286 let ch = info.dma.ch(info.num); 317 let ch = info.dma.ch(info.num);
287 318
288 let sr = ch.sr().read(); 319 let sr = ch.sr().read();
289 !sr.tcf() && !sr.suspf() 320
321 !sr.tcf() && !sr.suspf() && !sr.idlef()
290 } 322 }
291 323
292 fn poll_stop(&self) -> Poll<()> { 324 fn poll_stop(&self) -> Poll<()> {
@@ -305,7 +337,6 @@ impl AnyChannel {
305#[must_use = "futures do nothing unless you `.await` or poll them"] 337#[must_use = "futures do nothing unless you `.await` or poll them"]
306pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> { 338pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> {
307 channel: PeripheralRef<'a, AnyChannel>, 339 channel: PeripheralRef<'a, AnyChannel>,
308 table: Table<ITEM_COUNT>,
309} 340}
310 341
311impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { 342impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> {
@@ -328,7 +359,7 @@ impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> {
328 channel.configure_linked_list(&table, options); 359 channel.configure_linked_list(&table, options);
329 channel.start(); 360 channel.start();
330 361
331 Self { channel, table } 362 Self { channel }
332 } 363 }
333 364
334 /// Request the transfer to stop. 365 /// Request the transfer to stop.
@@ -515,12 +546,22 @@ impl<'a> Transfer<'a> {
515 } 546 }
516 547
517 /// Request the transfer to stop. 548 /// Request the transfer to stop.
549 /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
550 /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
518 /// 551 ///
519 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. 552 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
520 pub fn request_stop(&mut self) { 553 pub fn request_stop(&mut self) {
521 self.channel.request_stop() 554 self.channel.request_stop()
522 } 555 }
523 556
557 /// Request the transfer to pause, keeping the existing configuration for this channel.
558 /// To restart the transfer, call [`start`](Self::start) again.
559 ///
560 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
561 pub fn request_pause(&mut self) {
562 self.channel.request_pause()
563 }
564
524 /// Return whether this transfer is still running. 565 /// Return whether this transfer is still running.
525 /// 566 ///
526 /// If this returns `false`, it can be because either the transfer finished, or 567 /// If this returns `false`, it can be because either the transfer finished, or
diff --git a/embassy-stm32/src/dma/gpdma/ringbuffer.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs
index c327e811e..fd0a98e23 100644
--- a/embassy-stm32/src/dma/gpdma/ringbuffer.rs
+++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs
@@ -2,6 +2,7 @@
2//! 2//!
3//! FIXME: add request_pause functionality? 3//! FIXME: add request_pause functionality?
4use core::{ 4use core::{
5 future::poll_fn,
5 sync::atomic::{fence, Ordering}, 6 sync::atomic::{fence, Ordering},
6 task::Waker, 7 task::Waker,
7}; 8};
@@ -12,7 +13,7 @@ use crate::dma::{
12 gpdma::linked_list::{LinearItem, RunMode, Table}, 13 gpdma::linked_list::{LinearItem, RunMode, Table},
13 ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}, 14 ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer},
14 word::Word, 15 word::Word,
15 Channel, Dir, Request, 16 Channel, Request,
16}; 17};
17 18
18use super::{AnyChannel, TransferOptions, STATE}; 19use super::{AnyChannel, TransferOptions, STATE};
@@ -27,7 +28,7 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
27 fn reset_complete_count(&mut self) -> usize { 28 fn reset_complete_count(&mut self) -> usize {
28 let state = &STATE[self.0.id as usize]; 29 let state = &STATE[self.0.id as usize];
29 30
30 return state.complete_count.swap(0, Ordering::AcqRel); 31 state.complete_count.swap(0, Ordering::AcqRel)
31 } 32 }
32 33
33 fn set_waker(&mut self, waker: &Waker) { 34 fn set_waker(&mut self, waker: &Waker) {
@@ -35,11 +36,28 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
35 } 36 }
36} 37}
37 38
39/// The current buffer half (e.g. for DMA or the user application).
40#[derive(Debug, PartialEq, PartialOrd)]
41enum BufferHalf {
42 First,
43 Second,
44}
45
46impl BufferHalf {
47 fn toggle(&mut self) {
48 *self = match *self {
49 Self::First => Self::Second,
50 Self::Second => Self::First,
51 };
52 }
53}
54
38/// Ringbuffer for receiving data using GPDMA linked-list mode. 55/// Ringbuffer for receiving data using GPDMA linked-list mode.
39pub struct ReadableRingBuffer<'a, W: Word> { 56pub struct ReadableRingBuffer<'a, W: Word> {
40 channel: PeripheralRef<'a, AnyChannel>, 57 channel: PeripheralRef<'a, AnyChannel>,
41 ringbuf: ReadableDmaRingBuffer<'a, W>, 58 ringbuf: ReadableDmaRingBuffer<'a, W>,
42 table: Table<2>, 59 table: Table<1>,
60 user_buffer_half: BufferHalf,
43} 61}
44 62
45impl<'a, W: Word> ReadableRingBuffer<'a, W> { 63impl<'a, W: Word> ReadableRingBuffer<'a, W> {
@@ -57,20 +75,22 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
57 let half_len = buffer.len() / 2; 75 let half_len = buffer.len() / 2;
58 assert_eq!(half_len * 2, buffer.len()); 76 assert_eq!(half_len * 2, buffer.len());
59 77
60 options.half_transfer_ir = false; 78 options.half_transfer_ir = true;
61 options.complete_transfer_ir = true; 79 options.complete_transfer_ir = true;
62 80
63 let items = [ 81 // let items = [
64 LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options), 82 // LinearItem::new_read(request, peri_addr, &mut buffer[..half_len], options),
65 LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options), 83 // LinearItem::new_read(request, peri_addr, &mut buffer[half_len..], options),
66 ]; 84 // ];
85 let items = [LinearItem::new_read(request, peri_addr, buffer, options)];
67 86
68 let table = Table::new(items, RunMode::Once); 87 let table = Table::new(items);
69 88
70 let this = Self { 89 let this = Self {
71 channel, 90 channel,
72 ringbuf: ReadableDmaRingBuffer::new(buffer), 91 ringbuf: ReadableDmaRingBuffer::new(buffer),
73 table, 92 table,
93 user_buffer_half: BufferHalf::First,
74 }; 94 };
75 95
76 this.channel.configure_linked_list(&this.table, options); 96 this.channel.configure_linked_list(&this.table, options);
@@ -140,6 +160,14 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
140 self.channel.request_stop() 160 self.channel.request_stop()
141 } 161 }
142 162
163 /// Request the transfer to pause, keeping the existing configuration for this channel.
164 /// To restart the transfer, call [`start`](Self::start) again.
165 ///
166 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
167 pub fn request_pause(&mut self) {
168 self.channel.request_pause()
169 }
170
143 /// Return whether DMA is still running. 171 /// Return whether DMA is still running.
144 /// 172 ///
145 /// If this returns `false`, it can be because either the transfer finished, or 173 /// If this returns `false`, it can be because either the transfer finished, or
@@ -147,6 +175,24 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
147 pub fn is_running(&mut self) -> bool { 175 pub fn is_running(&mut self) -> bool {
148 self.channel.is_running() 176 self.channel.is_running()
149 } 177 }
178
179 /// Stop the DMA transfer and await until the buffer is full.
180 ///
181 /// This disables the DMA transfer's circular mode so that the transfer
182 /// stops when the buffer is full.
183 ///
184 /// This is designed to be used with streaming input data such as the
185 /// I2S/SAI or ADC.
186 ///
187 /// When using the UART, you probably want `request_stop()`.
188 pub async fn stop(&mut self) {
189 // wait until cr.susp reads as true
190 poll_fn(|cx| {
191 self.set_waker(cx.waker());
192 self.channel.poll_stop()
193 })
194 .await
195 }
150} 196}
151 197
152impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { 198impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
@@ -163,13 +209,15 @@ impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
163pub struct WritableRingBuffer<'a, W: Word> { 209pub struct WritableRingBuffer<'a, W: Word> {
164 channel: PeripheralRef<'a, AnyChannel>, 210 channel: PeripheralRef<'a, AnyChannel>,
165 ringbuf: WritableDmaRingBuffer<'a, W>, 211 ringbuf: WritableDmaRingBuffer<'a, W>,
212 table: Table<1>,
213 user_buffer_half: BufferHalf,
166} 214}
167 215
168impl<'a, W: Word> WritableRingBuffer<'a, W> { 216impl<'a, W: Word> WritableRingBuffer<'a, W> {
169 /// Create a new ring buffer. 217 /// Create a new ring buffer.
170 pub unsafe fn new( 218 pub unsafe fn new(
171 channel: impl Peripheral<P = impl Channel> + 'a, 219 channel: impl Peripheral<P = impl Channel> + 'a,
172 _request: Request, 220 request: Request,
173 peri_addr: *mut W, 221 peri_addr: *mut W,
174 buffer: &'a mut [W], 222 buffer: &'a mut [W],
175 mut options: TransferOptions, 223 mut options: TransferOptions,
@@ -177,36 +225,63 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
177 into_ref!(channel); 225 into_ref!(channel);
178 let channel: PeripheralRef<'a, AnyChannel> = channel.map_into(); 226 let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
179 227
180 let len = buffer.len(); 228 let half_len = buffer.len() / 2;
181 let dir = Dir::MemoryToPeripheral; 229 assert_eq!(half_len * 2, buffer.len());
182 let data_size = W::size();
183 let buffer_ptr = buffer.as_mut_ptr();
184 230
185 options.half_transfer_ir = true; 231 options.half_transfer_ir = true;
186 options.complete_transfer_ir = true; 232 options.complete_transfer_ir = true;
187 233
188 channel.configure( 234 // let items = [
189 _request, 235 // LinearItem::new_write(request, &mut buffer[..half_len], peri_addr, options),
190 dir, 236 // LinearItem::new_write(request, &mut buffer[half_len..], peri_addr, options),
191 peri_addr as *mut u32, 237 // ];
192 buffer_ptr as *mut u32, 238 let items = [LinearItem::new_write(request, buffer, peri_addr, options)];
193 len, 239 let table = Table::new(items);
194 true, 240
195 data_size, 241 let this = Self {
196 data_size,
197 options,
198 );
199
200 Self {
201 channel, 242 channel,
202 ringbuf: WritableDmaRingBuffer::new(buffer), 243 ringbuf: WritableDmaRingBuffer::new(buffer),
244 table,
245 user_buffer_half: BufferHalf::First,
246 };
247
248 this
249 }
250
251 fn dma_buffer_half(&self) -> BufferHalf {
252 if self.ringbuf.read_index(0) < self.ringbuf.cap() / 2 {
253 BufferHalf::First
254 } else {
255 BufferHalf::Second
256 }
257 }
258
259 fn link_next_buffer(&mut self) {
260 self.table.unlink();
261
262 match self.user_buffer_half {
263 BufferHalf::First => self.table.link_indices(0, 1),
264 BufferHalf::Second => self.table.link_indices(1, 0),
203 } 265 }
266
267 self.user_buffer_half.toggle();
204 } 268 }
205 269
206 /// Start the ring buffer operation. 270 /// Start the ring buffer operation.
207 /// 271 ///
208 /// You must call this after creating it for it to work. 272 /// You must call this after creating it for it to work.
209 pub fn start(&mut self) { 273 pub fn start(&mut self) {
274 unsafe {
275 self.channel.configure_linked_list(
276 &self.table,
277 TransferOptions {
278 half_transfer_ir: true,
279 complete_transfer_ir: true,
280 ..Default::default()
281 },
282 )
283 };
284 self.table.link(RunMode::Repeat);
210 self.channel.start(); 285 self.channel.start();
211 } 286 }
212 287
@@ -229,9 +304,56 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
229 304
230 /// Write an exact number of elements to the ringbuffer. 305 /// Write an exact number of elements to the ringbuffer.
231 pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> { 306 pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> {
232 self.ringbuf 307 return self
308 .ringbuf
233 .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) 309 .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
234 .await 310 .await;
311
312 let mut remaining = buffer.len();
313
314 let mut remaining_cap = 0;
315 let cap = self.ringbuf.cap();
316
317 while remaining > 0 {
318 let dma_buffer_half = self.dma_buffer_half();
319 if dma_buffer_half == self.user_buffer_half {
320 self.link_next_buffer();
321 }
322
323 let write_index = self.ringbuf.write_index(0);
324 let len = match dma_buffer_half {
325 BufferHalf::First => {
326 // if write_index < cap / 2 {
327 // error!("write index: {}", write_index);
328 // panic!()
329 // }
330 info!("Write second");
331
332 // Fill up second buffer half when DMA reads the first.
333 cap - write_index
334 }
335 BufferHalf::Second => {
336 // if write_index >= cap / 2 {
337 // error!("write index: {}", write_index);
338 // panic!()
339 // }
340 info!("Write first");
341
342 // Fill up first buffer half when DMA reads the second.
343 cap / 2 - write_index
344 }
345 }
346 .min(remaining);
347
348 remaining_cap = self
349 .ringbuf
350 .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
351 .await?;
352
353 remaining -= len;
354 }
355
356 Ok(remaining_cap)
235 } 357 }
236 358
237 /// Wait for any ring buffer write error. 359 /// Wait for any ring buffer write error.
@@ -257,12 +379,22 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
257 } 379 }
258 380
259 /// Request the DMA to stop. 381 /// Request the DMA to stop.
382 /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
383 /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
260 /// 384 ///
261 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. 385 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
262 pub fn request_stop(&mut self) { 386 pub fn request_stop(&mut self) {
263 self.channel.request_stop() 387 self.channel.request_stop()
264 } 388 }
265 389
390 /// Request the transfer to pause, keeping the existing configuration for this channel.
391 /// To restart the transfer, call [`start`](Self::start) again.
392 ///
393 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
394 pub fn request_pause(&mut self) {
395 self.channel.request_pause()
396 }
397
266 /// Return whether DMA is still running. 398 /// Return whether DMA is still running.
267 /// 399 ///
268 /// If this returns `false`, it can be because either the transfer finished, or 400 /// If this returns `false`, it can be because either the transfer finished, or
@@ -270,6 +402,24 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
270 pub fn is_running(&mut self) -> bool { 402 pub fn is_running(&mut self) -> bool {
271 self.channel.is_running() 403 self.channel.is_running()
272 } 404 }
405
406 /// Stop the DMA transfer and await until the buffer is full.
407 ///
408 /// This disables the DMA transfer's circular mode so that the transfer
409 /// stops when the buffer is full.
410 ///
411 /// This is designed to be used with streaming input data such as the
412 /// I2S/SAI or ADC.
413 ///
414 /// When using the UART, you probably want `request_stop()`.
415 pub async fn stop(&mut self) {
416 // wait until cr.susp reads as true
417 poll_fn(|cx| {
418 self.set_waker(cx.waker());
419 self.channel.poll_stop()
420 })
421 .await
422 }
273} 423}
274 424
275impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { 425impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs
index d3b070a6d..030f906d2 100644
--- a/embassy-stm32/src/dma/mod.rs
+++ b/embassy-stm32/src/dma/mod.rs
@@ -9,6 +9,8 @@ pub use dma_bdma::*;
9#[cfg(gpdma)] 9#[cfg(gpdma)]
10pub(crate) mod gpdma; 10pub(crate) mod gpdma;
11#[cfg(gpdma)] 11#[cfg(gpdma)]
12pub use gpdma::ringbuffered::*;
13#[cfg(gpdma)]
12pub use gpdma::*; 14pub use gpdma::*;
13 15
14#[cfg(dmamux)] 16#[cfg(dmamux)]
diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs
index e462c71d4..99960bc74 100644
--- a/embassy-stm32/src/dma/ringbuffer/mod.rs
+++ b/embassy-stm32/src/dma/ringbuffer/mod.rs
@@ -92,6 +92,16 @@ impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> {
92 } 92 }
93 } 93 }
94 94
95 /// The current ring-buffer read index.
96 pub fn read_index(&self, offset: usize) -> usize {
97 self.read_index.as_index(self.cap(), offset)
98 }
99
100 /// The current ring-buffer write index.
101 pub fn write_index(&self, offset: usize) -> usize {
102 self.write_index.as_index(self.cap(), offset)
103 }
104
95 /// Reset the ring buffer to its initial state. 105 /// Reset the ring buffer to its initial state.
96 pub fn reset(&mut self, dma: &mut impl DmaCtrl) { 106 pub fn reset(&mut self, dma: &mut impl DmaCtrl) {
97 dma.reset_complete_count(); 107 dma.reset_complete_count();
@@ -208,6 +218,16 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
208 } 218 }
209 } 219 }
210 220
221 /// The current ring-buffer read index.
222 pub fn read_index(&self, offset: usize) -> usize {
223 self.read_index.as_index(self.cap(), offset)
224 }
225
226 /// The current ring-buffer write index.
227 pub fn write_index(&self, offset: usize) -> usize {
228 self.write_index.as_index(self.cap(), offset)
229 }
230
211 /// Reset the ring buffer to its initial state. The buffer after the reset will be full. 231 /// Reset the ring buffer to its initial state. The buffer after the reset will be full.
212 pub fn reset(&mut self, dma: &mut impl DmaCtrl) { 232 pub fn reset(&mut self, dma: &mut impl DmaCtrl) {
213 dma.reset_complete_count(); 233 dma.reset_complete_count();
diff --git a/embassy-stm32/src/spdifrx/mod.rs b/embassy-stm32/src/spdifrx/mod.rs
index 9c42217f0..d3b4a0b10 100644
--- a/embassy-stm32/src/spdifrx/mod.rs
+++ b/embassy-stm32/src/spdifrx/mod.rs
@@ -8,7 +8,6 @@ use embassy_sync::waitqueue::AtomicWaker;
8 8
9use crate::dma::ringbuffer::Error as RingbufferError; 9use crate::dma::ringbuffer::Error as RingbufferError;
10pub use crate::dma::word; 10pub use crate::dma::word;
11#[cfg(not(gpdma))]
12use crate::dma::ReadableRingBuffer; 11use crate::dma::ReadableRingBuffer;
13use crate::dma::{Channel, TransferOptions}; 12use crate::dma::{Channel, TransferOptions};
14use crate::gpio::{AfType, AnyPin, Pull, SealedPin as _}; 13use crate::gpio::{AfType, AnyPin, Pull, SealedPin as _};
@@ -58,7 +57,6 @@ macro_rules! impl_spdifrx_pin {
58/// Ring-buffered SPDIFRX driver. 57/// Ring-buffered SPDIFRX driver.
59/// 58///
60/// Data is read by DMAs and stored in a ring buffer. 59/// Data is read by DMAs and stored in a ring buffer.
61#[cfg(not(gpdma))]
62pub struct Spdifrx<'d, T: Instance> { 60pub struct Spdifrx<'d, T: Instance> {
63 _peri: Peri<'d, T>, 61 _peri: Peri<'d, T>,
64 spdifrx_in: Option<Peri<'d, AnyPin>>, 62 spdifrx_in: Option<Peri<'d, AnyPin>>,
@@ -118,7 +116,6 @@ impl Default for Config {
118 } 116 }
119} 117}
120 118
121#[cfg(not(gpdma))]
122impl<'d, T: Instance> Spdifrx<'d, T> { 119impl<'d, T: Instance> Spdifrx<'d, T> {
123 fn dma_opts() -> TransferOptions { 120 fn dma_opts() -> TransferOptions {
124 TransferOptions { 121 TransferOptions {
@@ -236,7 +233,6 @@ impl<'d, T: Instance> Spdifrx<'d, T> {
236 } 233 }
237} 234}
238 235
239#[cfg(not(gpdma))]
240impl<'d, T: Instance> Drop for Spdifrx<'d, T> { 236impl<'d, T: Instance> Drop for Spdifrx<'d, T> {
241 fn drop(&mut self) { 237 fn drop(&mut self) {
242 T::info().regs.cr().modify(|cr| cr.set_spdifen(0x00)); 238 T::info().regs.cr().modify(|cr| cr.set_spdifen(0x00));
diff --git a/embassy-stm32/src/usart/mod.rs b/embassy-stm32/src/usart/mod.rs
index 5bece6d66..3d95de897 100644
--- a/embassy-stm32/src/usart/mod.rs
+++ b/embassy-stm32/src/usart/mod.rs
@@ -1965,9 +1965,7 @@ pub use buffered::*;
1965pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler; 1965pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler;
1966mod buffered; 1966mod buffered;
1967 1967
1968#[cfg(not(gpdma))]
1969mod ringbuffered; 1968mod ringbuffered;
1970#[cfg(not(gpdma))]
1971pub use ringbuffered::RingBufferedUartRx; 1969pub use ringbuffered::RingBufferedUartRx;
1972 1970
1973#[cfg(any(usart_v1, usart_v2))] 1971#[cfg(any(usart_v1, usart_v2))]