aboutsummaryrefslogtreecommitdiff
path: root/embassy-stm32
diff options
context:
space:
mode:
authorDario Nieuwenhuis <[email protected]>2024-02-24 02:38:31 +0100
committerDario Nieuwenhuis <[email protected]>2024-02-24 02:41:41 +0100
commite67dfcb04f79aebed52a357b867d418e0ff476af (patch)
tree82d1fb2b40b71a6dd62bfbe79596d61cbb6ce7c8 /embassy-stm32
parentf77d59500e9bbc0282f1ba4b6b27507f83f9d974 (diff)
stm32/dma: add AnyChannel, add support for BDMA on H7.
Diffstat (limited to 'embassy-stm32')
-rw-r--r--embassy-stm32/build.rs179
-rw-r--r--embassy-stm32/src/dcmi.rs122
-rw-r--r--embassy-stm32/src/dma/bdma.rs740
-rw-r--r--embassy-stm32/src/dma/dma.rs1012
-rw-r--r--embassy-stm32/src/dma/dma_bdma.rs913
-rw-r--r--embassy-stm32/src/dma/dmamux.rs34
-rw-r--r--embassy-stm32/src/dma/gpdma.rs168
-rw-r--r--embassy-stm32/src/dma/mod.rs104
-rw-r--r--embassy-stm32/src/sai/mod.rs49
-rw-r--r--embassy-stm32/src/sdmmc/mod.rs15
-rw-r--r--embassy-stm32/src/usart/ringbuffered.rs20
11 files changed, 1207 insertions, 2149 deletions
diff --git a/embassy-stm32/build.rs b/embassy-stm32/build.rs
index 068a74733..4ecc97536 100644
--- a/embassy-stm32/build.rs
+++ b/embassy-stm32/build.rs
@@ -354,50 +354,6 @@ fn main() {
354 g.extend(quote! { pub mod flash_regions { #flash_regions } }); 354 g.extend(quote! { pub mod flash_regions { #flash_regions } });
355 355
356 // ======== 356 // ========
357 // Generate DMA IRQs.
358
359 let mut dma_irqs: BTreeMap<&str, Vec<(&str, &str, &str)>> = BTreeMap::new();
360
361 for p in METADATA.peripherals {
362 if let Some(r) = &p.registers {
363 if r.kind == "dma" || r.kind == "bdma" || r.kind == "gpdma" {
364 if p.name == "BDMA1" {
365 // BDMA1 in H7 doesn't use DMAMUX, which breaks
366 continue;
367 }
368 for irq in p.interrupts {
369 dma_irqs
370 .entry(irq.interrupt)
371 .or_default()
372 .push((r.kind, p.name, irq.signal));
373 }
374 }
375 }
376 }
377
378 let dma_irqs: TokenStream = dma_irqs
379 .iter()
380 .map(|(irq, channels)| {
381 let irq = format_ident!("{}", irq);
382
383 let xdma = format_ident!("{}", channels[0].0);
384 let channels = channels.iter().map(|(_, dma, ch)| format_ident!("{}_{}", dma, ch));
385
386 quote! {
387 #[cfg(feature = "rt")]
388 #[crate::interrupt]
389 unsafe fn #irq () {
390 #(
391 <crate::peripherals::#channels as crate::dma::#xdma::sealed::Channel>::on_irq();
392 )*
393 }
394 }
395 })
396 .collect();
397
398 g.extend(dma_irqs);
399
400 // ========
401 // Extract the rcc registers 357 // Extract the rcc registers
402 let rcc_registers = METADATA 358 let rcc_registers = METADATA
403 .peripherals 359 .peripherals
@@ -664,7 +620,7 @@ fn main() {
664 620
665 #[rustfmt::skip] 621 #[rustfmt::skip]
666 let signals: HashMap<_, _> = [ 622 let signals: HashMap<_, _> = [
667 // (kind, signal) => trait 623 // (kind, signal) => trait
668 (("usart", "TX"), quote!(crate::usart::TxPin)), 624 (("usart", "TX"), quote!(crate::usart::TxPin)),
669 (("usart", "RX"), quote!(crate::usart::RxPin)), 625 (("usart", "RX"), quote!(crate::usart::RxPin)),
670 (("usart", "CTS"), quote!(crate::usart::CtsPin)), 626 (("usart", "CTS"), quote!(crate::usart::CtsPin)),
@@ -897,7 +853,7 @@ fn main() {
897 (("quadspi", "BK2_IO3"), quote!(crate::qspi::BK2D3Pin)), 853 (("quadspi", "BK2_IO3"), quote!(crate::qspi::BK2D3Pin)),
898 (("quadspi", "BK2_NCS"), quote!(crate::qspi::BK2NSSPin)), 854 (("quadspi", "BK2_NCS"), quote!(crate::qspi::BK2NSSPin)),
899 (("quadspi", "CLK"), quote!(crate::qspi::SckPin)), 855 (("quadspi", "CLK"), quote!(crate::qspi::SckPin)),
900 ].into(); 856 ].into();
901 857
902 for p in METADATA.peripherals { 858 for p in METADATA.peripherals {
903 if let Some(regs) = &p.registers { 859 if let Some(regs) = &p.registers {
@@ -959,7 +915,7 @@ fn main() {
959 }; 915 };
960 if let Some(ch) = ch { 916 if let Some(ch) = ch {
961 g.extend(quote! { 917 g.extend(quote! {
962 impl_adc_pin!( #peri, #pin_name, #ch); 918 impl_adc_pin!( #peri, #pin_name, #ch);
963 }) 919 })
964 } 920 }
965 } 921 }
@@ -991,7 +947,7 @@ fn main() {
991 let ch: u8 = pin.signal.strip_prefix("OUT").unwrap().parse().unwrap(); 947 let ch: u8 = pin.signal.strip_prefix("OUT").unwrap().parse().unwrap();
992 948
993 g.extend(quote! { 949 g.extend(quote! {
994 impl_dac_pin!( #peri, #pin_name, #ch); 950 impl_dac_pin!( #peri, #pin_name, #ch);
995 }) 951 })
996 } 952 }
997 } 953 }
@@ -1189,7 +1145,6 @@ fn main() {
1189 let mut interrupts_table: Vec<Vec<String>> = Vec::new(); 1145 let mut interrupts_table: Vec<Vec<String>> = Vec::new();
1190 let mut peripherals_table: Vec<Vec<String>> = Vec::new(); 1146 let mut peripherals_table: Vec<Vec<String>> = Vec::new();
1191 let mut pins_table: Vec<Vec<String>> = Vec::new(); 1147 let mut pins_table: Vec<Vec<String>> = Vec::new();
1192 let mut dma_channels_table: Vec<Vec<String>> = Vec::new();
1193 let mut adc_common_table: Vec<Vec<String>> = Vec::new(); 1148 let mut adc_common_table: Vec<Vec<String>> = Vec::new();
1194 1149
1195 /* 1150 /*
@@ -1283,51 +1238,108 @@ fn main() {
1283 } 1238 }
1284 } 1239 }
1285 1240
1286 let mut dma_channel_count: usize = 0; 1241 let mut dmas = TokenStream::new();
1287 let mut bdma_channel_count: usize = 0; 1242 let has_dmamux = METADATA
1288 let mut gpdma_channel_count: usize = 0; 1243 .peripherals
1244 .iter()
1245 .flat_map(|p| &p.registers)
1246 .any(|p| p.kind == "dmamux");
1247
1248 for (ch_idx, ch) in METADATA.dma_channels.iter().enumerate() {
1249 // Some H7 chips have BDMA1 hardcoded for DFSDM, ie no DMAMUX. It's unsupported, skip it.
1250 if has_dmamux && ch.dmamux.is_none() {
1251 continue;
1252 }
1253
1254 let name = format_ident!("{}", ch.name);
1255 let idx = ch_idx as u8;
1256 g.extend(quote!(dma_channel_impl!(#name, #idx);));
1257
1258 let dma = format_ident!("{}", ch.dma);
1259 let ch_num = ch.channel as usize;
1289 1260
1290 for ch in METADATA.dma_channels {
1291 let mut row = Vec::new();
1292 let dma_peri = METADATA.peripherals.iter().find(|p| p.name == ch.dma).unwrap(); 1261 let dma_peri = METADATA.peripherals.iter().find(|p| p.name == ch.dma).unwrap();
1293 let bi = dma_peri.registers.as_ref().unwrap(); 1262 let bi = dma_peri.registers.as_ref().unwrap();
1294 1263
1295 let num; 1264 let dma_info = match bi.kind {
1296 match bi.kind { 1265 "dma" => quote!(crate::dma::DmaInfo::Dma(crate::pac::#dma)),
1297 "dma" => { 1266 "bdma" => quote!(crate::dma::DmaInfo::Bdma(crate::pac::#dma)),
1298 num = dma_channel_count; 1267 "gpdma" => quote!(crate::pac::#dma),
1299 dma_channel_count += 1; 1268 _ => panic!("bad dma channel kind {}", bi.kind),
1300 } 1269 };
1301 "bdma" => { 1270
1302 num = bdma_channel_count; 1271 let dmamux = match &ch.dmamux {
1303 bdma_channel_count += 1; 1272 Some(dmamux) => {
1273 let dmamux = format_ident!("{}", dmamux);
1274 let num = ch.dmamux_channel.unwrap() as usize;
1275
1276 g.extend(quote!(dmamux_channel_impl!(#name, #dmamux);));
1277
1278 quote! {
1279 dmamux: crate::dma::DmamuxInfo {
1280 mux: crate::pac::#dmamux,
1281 num: #num,
1282 },
1283 }
1304 } 1284 }
1305 "gpdma" => { 1285 None => quote!(),
1306 num = gpdma_channel_count; 1286 };
1307 gpdma_channel_count += 1; 1287
1288 dmas.extend(quote! {
1289 crate::dma::ChannelInfo {
1290 dma: #dma_info,
1291 num: #ch_num,
1292 #dmamux
1293 },
1294 });
1295 }
1296
1297 // ========
1298 // Generate DMA IRQs.
1299
1300 let mut dma_irqs: BTreeMap<&str, Vec<String>> = BTreeMap::new();
1301
1302 for p in METADATA.peripherals {
1303 if let Some(r) = &p.registers {
1304 if r.kind == "dma" || r.kind == "bdma" || r.kind == "gpdma" {
1305 for irq in p.interrupts {
1306 let ch_name = format!("{}_{}", p.name, irq.signal);
1307 let ch = METADATA.dma_channels.iter().find(|c| c.name == ch_name).unwrap();
1308
1309 // Some H7 chips have BDMA1 hardcoded for DFSDM, ie no DMAMUX. It's unsupported, skip it.
1310 if has_dmamux && ch.dmamux.is_none() {
1311 continue;
1312 }
1313
1314 dma_irqs.entry(irq.interrupt).or_default().push(ch_name);
1315 }
1308 } 1316 }
1309 _ => panic!("bad dma channel kind {}", bi.kind),
1310 } 1317 }
1318 }
1311 1319
1312 row.push(ch.name.to_string()); 1320 let dma_irqs: TokenStream = dma_irqs
1313 row.push(ch.dma.to_string()); 1321 .iter()
1314 row.push(bi.kind.to_string()); 1322 .map(|(irq, channels)| {
1315 row.push(ch.channel.to_string()); 1323 let irq = format_ident!("{}", irq);
1316 row.push(num.to_string());
1317 if let Some(dmamux) = &ch.dmamux {
1318 let dmamux_channel = ch.dmamux_channel.unwrap();
1319 row.push(format!("{{dmamux: {}, dmamux_channel: {}}}", dmamux, dmamux_channel));
1320 } else {
1321 row.push("{}".to_string());
1322 }
1323 1324
1324 dma_channels_table.push(row); 1325 let channels = channels.iter().map(|c| format_ident!("{}", c));
1325 } 1326
1327 quote! {
1328 #[cfg(feature = "rt")]
1329 #[crate::interrupt]
1330 unsafe fn #irq () {
1331 #(
1332 <crate::peripherals::#channels as crate::dma::sealed::ChannelInterrupt>::on_irq();
1333 )*
1334 }
1335 }
1336 })
1337 .collect();
1338
1339 g.extend(dma_irqs);
1326 1340
1327 g.extend(quote! { 1341 g.extend(quote! {
1328 pub(crate) const DMA_CHANNEL_COUNT: usize = #dma_channel_count; 1342 pub(crate) const DMA_CHANNELS: &[crate::dma::ChannelInfo] = &[#dmas];
1329 pub(crate) const BDMA_CHANNEL_COUNT: usize = #bdma_channel_count;
1330 pub(crate) const GPDMA_CHANNEL_COUNT: usize = #gpdma_channel_count;
1331 }); 1343 });
1332 1344
1333 for irq in METADATA.interrupts { 1345 for irq in METADATA.interrupts {
@@ -1347,7 +1359,6 @@ fn main() {
1347 make_table(&mut m, "foreach_interrupt", &interrupts_table); 1359 make_table(&mut m, "foreach_interrupt", &interrupts_table);
1348 make_table(&mut m, "foreach_peripheral", &peripherals_table); 1360 make_table(&mut m, "foreach_peripheral", &peripherals_table);
1349 make_table(&mut m, "foreach_pin", &pins_table); 1361 make_table(&mut m, "foreach_pin", &pins_table);
1350 make_table(&mut m, "foreach_dma_channel", &dma_channels_table);
1351 make_table(&mut m, "foreach_adc", &adc_common_table); 1362 make_table(&mut m, "foreach_adc", &adc_common_table);
1352 1363
1353 let out_dir = &PathBuf::from(env::var_os("OUT_DIR").unwrap()); 1364 let out_dir = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
diff --git a/embassy-stm32/src/dcmi.rs b/embassy-stm32/src/dcmi.rs
index 4d02284b2..826b04a4b 100644
--- a/embassy-stm32/src/dcmi.rs
+++ b/embassy-stm32/src/dcmi.rs
@@ -394,19 +394,7 @@ where
394 394
395 /// This method starts the capture and finishes when both the dma transfer and DCMI finish the frame transfer. 395 /// This method starts the capture and finishes when both the dma transfer and DCMI finish the frame transfer.
396 /// The implication is that the input buffer size must be exactly the size of the captured frame. 396 /// The implication is that the input buffer size must be exactly the size of the captured frame.
397 ///
398 /// Note that when `buffer.len() > 0xffff` the capture future requires some real-time guarantees to be upheld
399 /// (must be polled fast enough so the buffers get switched before data is overwritten).
400 /// It is therefore recommended that it is run on higher priority executor.
401 pub async fn capture(&mut self, buffer: &mut [u32]) -> Result<(), Error> { 397 pub async fn capture(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
402 if buffer.len() <= 0xffff {
403 return self.capture_small(buffer).await;
404 } else {
405 return self.capture_giant(buffer).await;
406 }
407 }
408
409 async fn capture_small(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
410 let r = self.inner.regs(); 398 let r = self.inner.regs();
411 let src = r.dr().as_ptr() as *mut u32; 399 let src = r.dr().as_ptr() as *mut u32;
412 let request = self.dma.request(); 400 let request = self.dma.request();
@@ -441,116 +429,6 @@ where
441 429
442 result 430 result
443 } 431 }
444
445 #[cfg(not(dma))]
446 async fn capture_giant(&mut self, _buffer: &mut [u32]) -> Result<(), Error> {
447 panic!("capturing to buffers larger than 0xffff is only supported on DMA for now, not on BDMA or GPDMA.");
448 }
449
450 #[cfg(dma)]
451 async fn capture_giant(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
452 use crate::dma::TransferOptions;
453
454 let data_len = buffer.len();
455 let chunk_estimate = data_len / 0xffff;
456
457 let mut chunks = chunk_estimate + 1;
458 while data_len % chunks != 0 {
459 chunks += 1;
460 }
461
462 let chunk_size = data_len / chunks;
463
464 let mut remaining_chunks = chunks - 2;
465
466 let mut m0ar = buffer.as_mut_ptr();
467 let mut m1ar = unsafe { buffer.as_mut_ptr().add(chunk_size) };
468
469 let channel = &mut self.dma;
470 let request = channel.request();
471
472 let r = self.inner.regs();
473 let src = r.dr().as_ptr() as *mut u32;
474
475 let mut transfer = unsafe {
476 crate::dma::DoubleBuffered::new_read(
477 &mut self.dma,
478 request,
479 src,
480 m0ar,
481 m1ar,
482 chunk_size,
483 TransferOptions::default(),
484 )
485 };
486
487 let mut last_chunk_set_for_transfer = false;
488 let mut buffer0_last_accessible = false;
489 let dma_result = poll_fn(|cx| {
490 transfer.set_waker(cx.waker());
491
492 let buffer0_currently_accessible = transfer.is_buffer0_accessible();
493
494 // check if the accessible buffer changed since last poll
495 if buffer0_last_accessible == buffer0_currently_accessible {
496 return Poll::Pending;
497 }
498 buffer0_last_accessible = !buffer0_last_accessible;
499
500 if remaining_chunks != 0 {
501 if remaining_chunks % 2 == 0 && buffer0_currently_accessible {
502 m0ar = unsafe { m0ar.add(2 * chunk_size) };
503 unsafe { transfer.set_buffer0(m0ar) }
504 remaining_chunks -= 1;
505 } else if !buffer0_currently_accessible {
506 m1ar = unsafe { m1ar.add(2 * chunk_size) };
507 unsafe { transfer.set_buffer1(m1ar) };
508 remaining_chunks -= 1;
509 }
510 } else {
511 if buffer0_currently_accessible {
512 unsafe { transfer.set_buffer0(buffer.as_mut_ptr()) }
513 } else {
514 unsafe { transfer.set_buffer1(buffer.as_mut_ptr()) }
515 }
516 if last_chunk_set_for_transfer {
517 transfer.request_stop();
518 return Poll::Ready(());
519 }
520 last_chunk_set_for_transfer = true;
521 }
522 Poll::Pending
523 });
524
525 Self::clear_interrupt_flags();
526 Self::enable_irqs();
527
528 let result = poll_fn(|cx| {
529 STATE.waker.register(cx.waker());
530
531 let ris = crate::pac::DCMI.ris().read();
532 if ris.err_ris() {
533 crate::pac::DCMI.icr().write(|r| r.set_err_isc(true));
534 Poll::Ready(Err(Error::PeripheralError))
535 } else if ris.ovr_ris() {
536 crate::pac::DCMI.icr().write(|r| r.set_ovr_isc(true));
537 Poll::Ready(Err(Error::Overrun))
538 } else if ris.frame_ris() {
539 crate::pac::DCMI.icr().write(|r| r.set_frame_isc(true));
540 Poll::Ready(Ok(()))
541 } else {
542 Poll::Pending
543 }
544 });
545
546 Self::toggle(true);
547
548 let (_, result) = embassy_futures::join::join(dma_result, result).await;
549
550 Self::toggle(false);
551
552 result
553 }
554} 432}
555 433
556mod sealed { 434mod sealed {
diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs
deleted file mode 100644
index 994bdb1e6..000000000
--- a/embassy-stm32/src/dma/bdma.rs
+++ /dev/null
@@ -1,740 +0,0 @@
1//! Basic Direct Memory Acccess (BDMA)
2
3use core::future::Future;
4use core::pin::Pin;
5use core::sync::atomic::{fence, AtomicUsize, Ordering};
6use core::task::{Context, Poll, Waker};
7
8use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
9use embassy_sync::waitqueue::AtomicWaker;
10
11use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
12use super::word::{Word, WordSize};
13use super::Dir;
14use crate::_generated::BDMA_CHANNEL_COUNT;
15use crate::interrupt::typelevel::Interrupt;
16use crate::interrupt::Priority;
17use crate::pac;
18use crate::pac::bdma::{regs, vals};
19
20/// BDMA transfer options.
21#[derive(Debug, Copy, Clone, PartialEq, Eq)]
22#[cfg_attr(feature = "defmt", derive(defmt::Format))]
23#[non_exhaustive]
24pub struct TransferOptions {
25 /// Enable circular DMA
26 ///
27 /// Note:
28 /// If you enable circular mode manually, you may want to build and `.await` the `Transfer` in a separate task.
29 /// Since DMA in circular mode need manually stop, `.await` in current task would block the task forever.
30 pub circular: bool,
31 /// Enable half transfer interrupt
32 pub half_transfer_ir: bool,
33 /// Enable transfer complete interrupt
34 pub complete_transfer_ir: bool,
35}
36
37impl Default for TransferOptions {
38 fn default() -> Self {
39 Self {
40 circular: false,
41 half_transfer_ir: false,
42 complete_transfer_ir: true,
43 }
44 }
45}
46
47impl From<WordSize> for vals::Size {
48 fn from(raw: WordSize) -> Self {
49 match raw {
50 WordSize::OneByte => Self::BITS8,
51 WordSize::TwoBytes => Self::BITS16,
52 WordSize::FourBytes => Self::BITS32,
53 }
54 }
55}
56
57impl From<Dir> for vals::Dir {
58 fn from(raw: Dir) -> Self {
59 match raw {
60 Dir::MemoryToPeripheral => Self::FROMMEMORY,
61 Dir::PeripheralToMemory => Self::FROMPERIPHERAL,
62 }
63 }
64}
65
66struct State {
67 ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
68 complete_count: [AtomicUsize; BDMA_CHANNEL_COUNT],
69}
70
71impl State {
72 const fn new() -> Self {
73 const ZERO: AtomicUsize = AtomicUsize::new(0);
74 const AW: AtomicWaker = AtomicWaker::new();
75 Self {
76 ch_wakers: [AW; BDMA_CHANNEL_COUNT],
77 complete_count: [ZERO; BDMA_CHANNEL_COUNT],
78 }
79 }
80}
81
82static STATE: State = State::new();
83
84/// safety: must be called only once
85pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
86 foreach_interrupt! {
87 ($peri:ident, bdma, $block:ident, $signal_name:ident, $irq:ident) => {
88 crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
89 crate::interrupt::typelevel::$irq::enable();
90 };
91 }
92 crate::_generated::init_bdma();
93}
94
95foreach_dma_channel! {
96 ($channel_peri:ident, BDMA1, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
97 // BDMA1 in H7 doesn't use DMAMUX, which breaks
98 };
99 ($channel_peri:ident, $dma_peri:ident, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
100 impl sealed::Channel for crate::peripherals::$channel_peri {
101 fn regs(&self) -> pac::bdma::Dma {
102 pac::$dma_peri
103 }
104 fn num(&self) -> usize {
105 $channel_num
106 }
107 fn index(&self) -> usize {
108 $index
109 }
110 fn on_irq() {
111 unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
112 }
113 }
114
115 impl Channel for crate::peripherals::$channel_peri {}
116 };
117}
118
119/// Safety: Must be called with a matching set of parameters for a valid dma channel
120pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index: usize) {
121 let isr = dma.isr().read();
122 let cr = dma.ch(channel_num).cr();
123
124 if isr.teif(channel_num) {
125 panic!("DMA: error on BDMA@{:08x} channel {}", dma.as_ptr() as u32, channel_num);
126 }
127
128 if isr.htif(channel_num) && cr.read().htie() {
129 // Acknowledge half transfer complete interrupt
130 dma.ifcr().write(|w| w.set_htif(channel_num, true));
131 } else if isr.tcif(channel_num) && cr.read().tcie() {
132 // Acknowledge transfer complete interrupt
133 dma.ifcr().write(|w| w.set_tcif(channel_num, true));
134 #[cfg(not(armv6m))]
135 STATE.complete_count[index].fetch_add(1, Ordering::Release);
136 #[cfg(armv6m)]
137 critical_section::with(|_| {
138 let x = STATE.complete_count[index].load(Ordering::Relaxed);
139 STATE.complete_count[index].store(x + 1, Ordering::Release);
140 })
141 } else {
142 return;
143 }
144
145 STATE.ch_wakers[index].wake();
146}
147
148/// DMA request type alias.
149#[cfg(any(bdma_v2, dmamux))]
150pub type Request = u8;
151/// DMA request type alias.
152#[cfg(not(any(bdma_v2, dmamux)))]
153pub type Request = ();
154
155/// DMA channel.
156#[cfg(dmamux)]
157pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
158/// DMA channel.
159#[cfg(not(dmamux))]
160pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
161
162pub(crate) mod sealed {
163 use super::*;
164
165 pub trait Channel {
166 fn regs(&self) -> pac::bdma::Dma;
167 fn num(&self) -> usize;
168 fn index(&self) -> usize;
169 fn on_irq();
170 }
171}
172
173/// DMA transfer.
174#[must_use = "futures do nothing unless you `.await` or poll them"]
175pub struct Transfer<'a, C: Channel> {
176 channel: PeripheralRef<'a, C>,
177}
178
179impl<'a, C: Channel> Transfer<'a, C> {
180 /// Create a new read DMA transfer (peripheral to memory).
181 pub unsafe fn new_read<W: Word>(
182 channel: impl Peripheral<P = C> + 'a,
183 request: Request,
184 peri_addr: *mut W,
185 buf: &'a mut [W],
186 options: TransferOptions,
187 ) -> Self {
188 Self::new_read_raw(channel, request, peri_addr, buf, options)
189 }
190
191 /// Create a new read DMA transfer (peripheral to memory), using raw pointers.
192 pub unsafe fn new_read_raw<W: Word>(
193 channel: impl Peripheral<P = C> + 'a,
194 request: Request,
195 peri_addr: *mut W,
196 buf: *mut [W],
197 options: TransferOptions,
198 ) -> Self {
199 into_ref!(channel);
200
201 let (ptr, len) = super::slice_ptr_parts_mut(buf);
202 assert!(len > 0 && len <= 0xFFFF);
203
204 Self::new_inner(
205 channel,
206 request,
207 Dir::PeripheralToMemory,
208 peri_addr as *const u32,
209 ptr as *mut u32,
210 len,
211 true,
212 W::size(),
213 options,
214 )
215 }
216
217 /// Create a new write DMA transfer (memory to peripheral).
218 pub unsafe fn new_write<W: Word>(
219 channel: impl Peripheral<P = C> + 'a,
220 request: Request,
221 buf: &'a [W],
222 peri_addr: *mut W,
223 options: TransferOptions,
224 ) -> Self {
225 Self::new_write_raw(channel, request, buf, peri_addr, options)
226 }
227
228 /// Create a new write DMA transfer (memory to peripheral), using raw pointers.
229 pub unsafe fn new_write_raw<W: Word>(
230 channel: impl Peripheral<P = C> + 'a,
231 request: Request,
232 buf: *const [W],
233 peri_addr: *mut W,
234 options: TransferOptions,
235 ) -> Self {
236 into_ref!(channel);
237
238 let (ptr, len) = super::slice_ptr_parts(buf);
239 assert!(len > 0 && len <= 0xFFFF);
240
241 Self::new_inner(
242 channel,
243 request,
244 Dir::MemoryToPeripheral,
245 peri_addr as *const u32,
246 ptr as *mut u32,
247 len,
248 true,
249 W::size(),
250 options,
251 )
252 }
253
254 /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
255 pub unsafe fn new_write_repeated<W: Word>(
256 channel: impl Peripheral<P = C> + 'a,
257 request: Request,
258 repeated: &'a W,
259 count: usize,
260 peri_addr: *mut W,
261 options: TransferOptions,
262 ) -> Self {
263 into_ref!(channel);
264
265 Self::new_inner(
266 channel,
267 request,
268 Dir::MemoryToPeripheral,
269 peri_addr as *const u32,
270 repeated as *const W as *mut u32,
271 count,
272 false,
273 W::size(),
274 options,
275 )
276 }
277
278 unsafe fn new_inner(
279 channel: PeripheralRef<'a, C>,
280 _request: Request,
281 dir: Dir,
282 peri_addr: *const u32,
283 mem_addr: *mut u32,
284 mem_len: usize,
285 incr_mem: bool,
286 data_size: WordSize,
287 options: TransferOptions,
288 ) -> Self {
289 let ch = channel.regs().ch(channel.num());
290
291 // "Preceding reads and writes cannot be moved past subsequent writes."
292 fence(Ordering::SeqCst);
293
294 #[cfg(bdma_v2)]
295 critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
296
297 let mut this = Self { channel };
298 this.clear_irqs();
299 STATE.complete_count[this.channel.index()].store(0, Ordering::Release);
300
301 #[cfg(dmamux)]
302 super::dmamux::configure_dmamux(&*this.channel, _request);
303
304 ch.par().write_value(peri_addr as u32);
305 ch.mar().write_value(mem_addr as u32);
306 ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
307 ch.cr().write(|w| {
308 w.set_psize(data_size.into());
309 w.set_msize(data_size.into());
310 w.set_minc(incr_mem);
311 w.set_dir(dir.into());
312 w.set_teie(true);
313 w.set_tcie(options.complete_transfer_ir);
314 w.set_htie(options.half_transfer_ir);
315 w.set_circ(options.circular);
316 if options.circular {
317 debug!("Setting circular mode");
318 }
319 w.set_pl(vals::Pl::VERYHIGH);
320 w.set_en(true);
321 });
322
323 this
324 }
325
326 fn clear_irqs(&mut self) {
327 self.channel.regs().ifcr().write(|w| {
328 w.set_tcif(self.channel.num(), true);
329 w.set_teif(self.channel.num(), true);
330 });
331 }
332
333 /// Request the transfer to stop.
334 ///
335 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
336 pub fn request_stop(&mut self) {
337 let ch = self.channel.regs().ch(self.channel.num());
338
339 // Disable the channel. Keep the IEs enabled so the irqs still fire.
340 ch.cr().write(|w| {
341 w.set_teie(true);
342 w.set_tcie(true);
343 });
344 }
345
346 /// Return whether this transfer is still running.
347 ///
348 /// If this returns `false`, it can be because either the transfer finished, or
349 /// it was requested to stop early with [`request_stop`](Self::request_stop).
350 pub fn is_running(&mut self) -> bool {
351 let ch = self.channel.regs().ch(self.channel.num());
352 let en = ch.cr().read().en();
353 let circular = ch.cr().read().circ();
354 let tcif = STATE.complete_count[self.channel.index()].load(Ordering::Acquire) != 0;
355 en && (circular || !tcif)
356 }
357
358 /// Get the total remaining transfers for the channel.
359 ///
360 /// This will be zero for transfers that completed instead of being canceled with [`request_stop`](Self::request_stop).
361 pub fn get_remaining_transfers(&self) -> u16 {
362 let ch = self.channel.regs().ch(self.channel.num());
363 ch.ndtr().read().ndt()
364 }
365
366 /// Blocking wait until the transfer finishes.
367 pub fn blocking_wait(mut self) {
368 while self.is_running() {}
369 self.request_stop();
370
371 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
372 fence(Ordering::SeqCst);
373
374 core::mem::forget(self);
375 }
376}
377
378impl<'a, C: Channel> Drop for Transfer<'a, C> {
379 fn drop(&mut self) {
380 self.request_stop();
381 while self.is_running() {}
382
383 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
384 fence(Ordering::SeqCst);
385 }
386}
387
388impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
389impl<'a, C: Channel> Future for Transfer<'a, C> {
390 type Output = ();
391 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
392 STATE.ch_wakers[self.channel.index()].register(cx.waker());
393
394 if self.is_running() {
395 Poll::Pending
396 } else {
397 Poll::Ready(())
398 }
399 }
400}
401
402// ==============================
403
404struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
405
406impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
407 fn get_remaining_transfers(&self) -> usize {
408 let ch = self.0.regs().ch(self.0.num());
409 ch.ndtr().read().ndt() as usize
410 }
411
412 fn get_complete_count(&self) -> usize {
413 STATE.complete_count[self.0.index()].load(Ordering::Acquire)
414 }
415
416 fn reset_complete_count(&mut self) -> usize {
417 #[cfg(not(armv6m))]
418 return STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel);
419 #[cfg(armv6m)]
420 return critical_section::with(|_| {
421 let x = STATE.complete_count[self.0.index()].load(Ordering::Acquire);
422 STATE.complete_count[self.0.index()].store(0, Ordering::Release);
423 x
424 });
425 }
426
427 fn set_waker(&mut self, waker: &Waker) {
428 STATE.ch_wakers[self.0.index()].register(waker);
429 }
430}
431
432/// Ringbuffer for reading data using DMA circular mode.
433pub struct ReadableRingBuffer<'a, C: Channel, W: Word> {
434 cr: regs::Cr,
435 channel: PeripheralRef<'a, C>,
436 ringbuf: ReadableDmaRingBuffer<'a, W>,
437}
438
439impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> {
440 /// Create a new ring buffer.
441 pub unsafe fn new(
442 channel: impl Peripheral<P = C> + 'a,
443 _request: Request,
444 peri_addr: *mut W,
445 buffer: &'a mut [W],
446 _options: TransferOptions,
447 ) -> Self {
448 into_ref!(channel);
449
450 let len = buffer.len();
451 assert!(len > 0 && len <= 0xFFFF);
452
453 let dir = Dir::PeripheralToMemory;
454 let data_size = W::size();
455
456 let channel_number = channel.num();
457 let dma = channel.regs();
458
459 // "Preceding reads and writes cannot be moved past subsequent writes."
460 fence(Ordering::SeqCst);
461
462 #[cfg(bdma_v2)]
463 critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
464
465 let mut w = regs::Cr(0);
466 w.set_psize(data_size.into());
467 w.set_msize(data_size.into());
468 w.set_minc(true);
469 w.set_dir(dir.into());
470 w.set_teie(true);
471 w.set_htie(true);
472 w.set_tcie(true);
473 w.set_circ(true);
474 w.set_pl(vals::Pl::VERYHIGH);
475 w.set_en(true);
476
477 let buffer_ptr = buffer.as_mut_ptr();
478 let mut this = Self {
479 channel,
480 cr: w,
481 ringbuf: ReadableDmaRingBuffer::new(buffer),
482 };
483 this.clear_irqs();
484
485 #[cfg(dmamux)]
486 super::dmamux::configure_dmamux(&*this.channel, _request);
487
488 let ch = dma.ch(channel_number);
489 ch.par().write_value(peri_addr as u32);
490 ch.mar().write_value(buffer_ptr as u32);
491 ch.ndtr().write(|w| w.set_ndt(len as u16));
492
493 this
494 }
495
496 /// Start the ring buffer operation.
497 ///
498 /// You must call this after creating it for it to work.
499 pub fn start(&mut self) {
500 let ch = self.channel.regs().ch(self.channel.num());
501 ch.cr().write_value(self.cr)
502 }
503
504 /// Clear all data in the ring buffer.
505 pub fn clear(&mut self) {
506 self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
507 }
508
509 /// Read elements from the ring buffer
510 /// Return a tuple of the length read and the length remaining in the buffer
511 /// If not all of the elements were read, then there will be some elements in the buffer remaining
512 /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
513 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
514 pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
515 self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
516 }
517
518 /// Read an exact number of elements from the ringbuffer.
519 ///
520 /// Returns the remaining number of elements available for immediate reading.
521 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
522 ///
523 /// Async/Wake Behavior:
524 /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
525 /// and when it wraps around. This means that when called with a buffer of length 'M', when this
526 /// ring buffer was created with a buffer of size 'N':
527 /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
528 /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
529 pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> {
530 self.ringbuf
531 .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
532 .await
533 }
534
535 /// The capacity of the ringbuffer.
536 pub const fn capacity(&self) -> usize {
537 self.ringbuf.cap()
538 }
539
540 /// Set a waker to be woken when at least one byte is received.
541 pub fn set_waker(&mut self, waker: &Waker) {
542 DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
543 }
544
545 fn clear_irqs(&mut self) {
546 let dma = self.channel.regs();
547 dma.ifcr().write(|w| {
548 w.set_htif(self.channel.num(), true);
549 w.set_tcif(self.channel.num(), true);
550 w.set_teif(self.channel.num(), true);
551 });
552 }
553
554 /// Request DMA to stop.
555 ///
556 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
557 pub fn request_stop(&mut self) {
558 let ch = self.channel.regs().ch(self.channel.num());
559
560 // Disable the channel. Keep the IEs enabled so the irqs still fire.
561 // If the channel is enabled and transfer is not completed, we need to perform
562 // two separate write access to the CR register to disable the channel.
563 ch.cr().write(|w| {
564 w.set_teie(true);
565 w.set_htie(true);
566 w.set_tcie(true);
567 });
568 }
569
570 /// Return whether DMA is still running.
571 ///
572 /// If this returns `false`, it can be because either the transfer finished, or
573 /// it was requested to stop early with [`request_stop`](Self::request_stop).
574 pub fn is_running(&mut self) -> bool {
575 let ch = self.channel.regs().ch(self.channel.num());
576 ch.cr().read().en()
577 }
578}
579
580impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> {
581 fn drop(&mut self) {
582 self.request_stop();
583 while self.is_running() {}
584
585 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
586 fence(Ordering::SeqCst);
587 }
588}
589
590/// Ringbuffer for writing data using DMA circular mode.
591pub struct WritableRingBuffer<'a, C: Channel, W: Word> {
592 cr: regs::Cr,
593 channel: PeripheralRef<'a, C>,
594 ringbuf: WritableDmaRingBuffer<'a, W>,
595}
596
597impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> {
598 /// Create a new ring buffer.
599 pub unsafe fn new(
600 channel: impl Peripheral<P = C> + 'a,
601 _request: Request,
602 peri_addr: *mut W,
603 buffer: &'a mut [W],
604 _options: TransferOptions,
605 ) -> Self {
606 into_ref!(channel);
607
608 let len = buffer.len();
609 assert!(len > 0 && len <= 0xFFFF);
610
611 let dir = Dir::MemoryToPeripheral;
612 let data_size = W::size();
613
614 let channel_number = channel.num();
615 let dma = channel.regs();
616
617 // "Preceding reads and writes cannot be moved past subsequent writes."
618 fence(Ordering::SeqCst);
619
620 #[cfg(bdma_v2)]
621 critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
622
623 let mut w = regs::Cr(0);
624 w.set_psize(data_size.into());
625 w.set_msize(data_size.into());
626 w.set_minc(true);
627 w.set_dir(dir.into());
628 w.set_teie(true);
629 w.set_htie(true);
630 w.set_tcie(true);
631 w.set_circ(true);
632 w.set_pl(vals::Pl::VERYHIGH);
633 w.set_en(true);
634
635 let buffer_ptr = buffer.as_mut_ptr();
636 let mut this = Self {
637 channel,
638 cr: w,
639 ringbuf: WritableDmaRingBuffer::new(buffer),
640 };
641 this.clear_irqs();
642
643 #[cfg(dmamux)]
644 super::dmamux::configure_dmamux(&*this.channel, _request);
645
646 let ch = dma.ch(channel_number);
647 ch.par().write_value(peri_addr as u32);
648 ch.mar().write_value(buffer_ptr as u32);
649 ch.ndtr().write(|w| w.set_ndt(len as u16));
650
651 this
652 }
653
654 /// Start the ring buffer operation.
655 ///
656 /// You must call this after creating it for it to work.
657 pub fn start(&mut self) {
658 let ch = self.channel.regs().ch(self.channel.num());
659 ch.cr().write_value(self.cr)
660 }
661
662 /// Clear all data in the ring buffer.
663 pub fn clear(&mut self) {
664 self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
665 }
666
667 /// Write elements directly to the raw buffer.
668 /// This can be used to fill the buffer before starting the DMA transfer.
669 #[allow(dead_code)]
670 pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
671 self.ringbuf.write_immediate(buf)
672 }
673
674 /// Write elements to the ring buffer
675 /// Return a tuple of the length written and the length remaining in the buffer
676 pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
677 self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
678 }
679
680 /// Write an exact number of elements to the ringbuffer.
681 pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
682 self.ringbuf
683 .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
684 .await
685 }
686
687 /// The capacity of the ringbuffer.
688 pub const fn capacity(&self) -> usize {
689 self.ringbuf.cap()
690 }
691
692 /// Set a waker to be woken when at least one byte is sent.
693 pub fn set_waker(&mut self, waker: &Waker) {
694 DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
695 }
696
697 fn clear_irqs(&mut self) {
698 let dma = self.channel.regs();
699 dma.ifcr().write(|w| {
700 w.set_htif(self.channel.num(), true);
701 w.set_tcif(self.channel.num(), true);
702 w.set_teif(self.channel.num(), true);
703 });
704 }
705
706 /// Request DMA to stop.
707 ///
708 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
709 pub fn request_stop(&mut self) {
710 let ch = self.channel.regs().ch(self.channel.num());
711
712 // Disable the channel. Keep the IEs enabled so the irqs still fire.
713 // If the channel is enabled and transfer is not completed, we need to perform
714 // two separate write access to the CR register to disable the channel.
715 ch.cr().write(|w| {
716 w.set_teie(true);
717 w.set_htie(true);
718 w.set_tcie(true);
719 });
720 }
721
722 /// Return whether DMA is still running.
723 ///
724 /// If this returns `false`, it can be because either the transfer finished, or
725 /// it was requested to stop early with [`request_stop`](Self::request_stop).
726 pub fn is_running(&mut self) -> bool {
727 let ch = self.channel.regs().ch(self.channel.num());
728 ch.cr().read().en()
729 }
730}
731
732impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> {
733 fn drop(&mut self) {
734 self.request_stop();
735 while self.is_running() {}
736
737 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
738 fence(Ordering::SeqCst);
739 }
740}
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs
deleted file mode 100644
index e762b1bde..000000000
--- a/embassy-stm32/src/dma/dma.rs
+++ /dev/null
@@ -1,1012 +0,0 @@
1use core::future::Future;
2use core::marker::PhantomData;
3use core::pin::Pin;
4use core::sync::atomic::{fence, AtomicUsize, Ordering};
5use core::task::{Context, Poll, Waker};
6
7use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
8use embassy_sync::waitqueue::AtomicWaker;
9
10use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
11use super::word::{Word, WordSize};
12use super::Dir;
13use crate::_generated::DMA_CHANNEL_COUNT;
14use crate::interrupt::typelevel::Interrupt;
15use crate::interrupt::Priority;
16use crate::pac::dma::{regs, vals};
17use crate::{interrupt, pac};
18
19/// DMA transfer options.
20#[derive(Debug, Copy, Clone, PartialEq, Eq)]
21#[cfg_attr(feature = "defmt", derive(defmt::Format))]
22#[non_exhaustive]
23pub struct TransferOptions {
24 /// Peripheral burst transfer configuration
25 pub pburst: Burst,
26 /// Memory burst transfer configuration
27 pub mburst: Burst,
28 /// Flow control configuration
29 pub flow_ctrl: FlowControl,
30 /// FIFO threshold for DMA FIFO mode. If none, direct mode is used.
31 pub fifo_threshold: Option<FifoThreshold>,
32 /// Enable circular DMA
33 ///
34 /// Note:
35 /// If you enable circular mode manually, you may want to build and `.await` the `Transfer` in a separate task.
36 /// Since DMA in circular mode need manually stop, `.await` in current task would block the task forever.
37 pub circular: bool,
38 /// Enable half transfer interrupt
39 pub half_transfer_ir: bool,
40 /// Enable transfer complete interrupt
41 pub complete_transfer_ir: bool,
42}
43
44impl Default for TransferOptions {
45 fn default() -> Self {
46 Self {
47 pburst: Burst::Single,
48 mburst: Burst::Single,
49 flow_ctrl: FlowControl::Dma,
50 fifo_threshold: None,
51 circular: false,
52 half_transfer_ir: false,
53 complete_transfer_ir: true,
54 }
55 }
56}
57
58impl From<WordSize> for vals::Size {
59 fn from(raw: WordSize) -> Self {
60 match raw {
61 WordSize::OneByte => Self::BITS8,
62 WordSize::TwoBytes => Self::BITS16,
63 WordSize::FourBytes => Self::BITS32,
64 }
65 }
66}
67
68impl From<Dir> for vals::Dir {
69 fn from(raw: Dir) -> Self {
70 match raw {
71 Dir::MemoryToPeripheral => Self::MEMORYTOPERIPHERAL,
72 Dir::PeripheralToMemory => Self::PERIPHERALTOMEMORY,
73 }
74 }
75}
76
77/// DMA transfer burst setting.
78#[derive(Debug, Copy, Clone, PartialEq, Eq)]
79#[cfg_attr(feature = "defmt", derive(defmt::Format))]
80pub enum Burst {
81 /// Single transfer
82 Single,
83 /// Incremental burst of 4 beats
84 Incr4,
85 /// Incremental burst of 8 beats
86 Incr8,
87 /// Incremental burst of 16 beats
88 Incr16,
89}
90
91impl From<Burst> for vals::Burst {
92 fn from(burst: Burst) -> Self {
93 match burst {
94 Burst::Single => vals::Burst::SINGLE,
95 Burst::Incr4 => vals::Burst::INCR4,
96 Burst::Incr8 => vals::Burst::INCR8,
97 Burst::Incr16 => vals::Burst::INCR16,
98 }
99 }
100}
101
102/// DMA flow control setting.
103#[derive(Debug, Copy, Clone, PartialEq, Eq)]
104#[cfg_attr(feature = "defmt", derive(defmt::Format))]
105pub enum FlowControl {
106 /// Flow control by DMA
107 Dma,
108 /// Flow control by peripheral
109 Peripheral,
110}
111
112impl From<FlowControl> for vals::Pfctrl {
113 fn from(flow: FlowControl) -> Self {
114 match flow {
115 FlowControl::Dma => vals::Pfctrl::DMA,
116 FlowControl::Peripheral => vals::Pfctrl::PERIPHERAL,
117 }
118 }
119}
120
121/// DMA FIFO threshold.
122#[derive(Debug, Copy, Clone, PartialEq, Eq)]
123#[cfg_attr(feature = "defmt", derive(defmt::Format))]
124pub enum FifoThreshold {
125 /// 1/4 full FIFO
126 Quarter,
127 /// 1/2 full FIFO
128 Half,
129 /// 3/4 full FIFO
130 ThreeQuarters,
131 /// Full FIFO
132 Full,
133}
134
135impl From<FifoThreshold> for vals::Fth {
136 fn from(value: FifoThreshold) -> Self {
137 match value {
138 FifoThreshold::Quarter => vals::Fth::QUARTER,
139 FifoThreshold::Half => vals::Fth::HALF,
140 FifoThreshold::ThreeQuarters => vals::Fth::THREEQUARTERS,
141 FifoThreshold::Full => vals::Fth::FULL,
142 }
143 }
144}
145
146struct State {
147 ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT],
148 complete_count: [AtomicUsize; DMA_CHANNEL_COUNT],
149}
150
151impl State {
152 const fn new() -> Self {
153 const ZERO: AtomicUsize = AtomicUsize::new(0);
154 const AW: AtomicWaker = AtomicWaker::new();
155 Self {
156 ch_wakers: [AW; DMA_CHANNEL_COUNT],
157 complete_count: [ZERO; DMA_CHANNEL_COUNT],
158 }
159 }
160}
161
162static STATE: State = State::new();
163
164/// safety: must be called only once
165pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
166 foreach_interrupt! {
167 ($peri:ident, dma, $block:ident, $signal_name:ident, $irq:ident) => {
168 interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
169 interrupt::typelevel::$irq::enable();
170 };
171 }
172 crate::_generated::init_dma();
173}
174
175foreach_dma_channel! {
176 ($channel_peri:ident, $dma_peri:ident, dma, $channel_num:expr, $index:expr, $dmamux:tt) => {
177 impl sealed::Channel for crate::peripherals::$channel_peri {
178 fn regs(&self) -> pac::dma::Dma {
179 pac::$dma_peri
180 }
181 fn num(&self) -> usize {
182 $channel_num
183 }
184 fn index(&self) -> usize {
185 $index
186 }
187 fn on_irq() {
188 unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
189 }
190 }
191
192 impl Channel for crate::peripherals::$channel_peri {}
193 };
194}
195
196/// Safety: Must be called with a matching set of parameters for a valid dma channel
197pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index: usize) {
198 let cr = dma.st(channel_num).cr();
199 let isr = dma.isr(channel_num / 4).read();
200
201 if isr.teif(channel_num % 4) {
202 panic!("DMA: error on DMA@{:08x} channel {}", dma.as_ptr() as u32, channel_num);
203 }
204
205 if isr.htif(channel_num % 4) && cr.read().htie() {
206 // Acknowledge half transfer complete interrupt
207 dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true));
208 } else if isr.tcif(channel_num % 4) && cr.read().tcie() {
209 // Acknowledge transfer complete interrupt
210 dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
211 STATE.complete_count[index].fetch_add(1, Ordering::Release);
212 } else {
213 return;
214 }
215
216 STATE.ch_wakers[index].wake();
217}
218
219/// DMA request type alias. (also known as DMA channel number in some chips)
220#[cfg(any(dma_v2, dmamux))]
221pub type Request = u8;
222/// DMA request type alias. (also known as DMA channel number in some chips)
223#[cfg(not(any(dma_v2, dmamux)))]
224pub type Request = ();
225
226/// DMA channel.
227#[cfg(dmamux)]
228pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
229/// DMA channel.
230#[cfg(not(dmamux))]
231pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
232
233pub(crate) mod sealed {
234 use super::*;
235
236 pub trait Channel {
237 fn regs(&self) -> pac::dma::Dma;
238 fn num(&self) -> usize;
239 fn index(&self) -> usize;
240 fn on_irq();
241 }
242}
243
244/// DMA transfer.
245#[must_use = "futures do nothing unless you `.await` or poll them"]
246pub struct Transfer<'a, C: Channel> {
247 channel: PeripheralRef<'a, C>,
248}
249
250impl<'a, C: Channel> Transfer<'a, C> {
251 /// Create a new read DMA transfer (peripheral to memory).
252 pub unsafe fn new_read<W: Word>(
253 channel: impl Peripheral<P = C> + 'a,
254 request: Request,
255 peri_addr: *mut W,
256 buf: &'a mut [W],
257 options: TransferOptions,
258 ) -> Self {
259 Self::new_read_raw(channel, request, peri_addr, buf, options)
260 }
261
262 /// Create a new read DMA transfer (peripheral to memory), using raw pointers.
263 pub unsafe fn new_read_raw<W: Word>(
264 channel: impl Peripheral<P = C> + 'a,
265 request: Request,
266 peri_addr: *mut W,
267 buf: *mut [W],
268 options: TransferOptions,
269 ) -> Self {
270 into_ref!(channel);
271
272 let (ptr, len) = super::slice_ptr_parts_mut(buf);
273 assert!(len > 0 && len <= 0xFFFF);
274
275 Self::new_inner(
276 channel,
277 request,
278 Dir::PeripheralToMemory,
279 peri_addr as *const u32,
280 ptr as *mut u32,
281 len,
282 true,
283 W::size(),
284 options,
285 )
286 }
287
288 /// Create a new write DMA transfer (memory to peripheral).
289 pub unsafe fn new_write<W: Word>(
290 channel: impl Peripheral<P = C> + 'a,
291 request: Request,
292 buf: &'a [W],
293 peri_addr: *mut W,
294 options: TransferOptions,
295 ) -> Self {
296 Self::new_write_raw(channel, request, buf, peri_addr, options)
297 }
298
299 /// Create a new write DMA transfer (memory to peripheral), using raw pointers.
300 pub unsafe fn new_write_raw<W: Word>(
301 channel: impl Peripheral<P = C> + 'a,
302 request: Request,
303 buf: *const [W],
304 peri_addr: *mut W,
305 options: TransferOptions,
306 ) -> Self {
307 into_ref!(channel);
308
309 let (ptr, len) = super::slice_ptr_parts(buf);
310 assert!(len > 0 && len <= 0xFFFF);
311
312 Self::new_inner(
313 channel,
314 request,
315 Dir::MemoryToPeripheral,
316 peri_addr as *const u32,
317 ptr as *mut u32,
318 len,
319 true,
320 W::size(),
321 options,
322 )
323 }
324
325 /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
326 pub unsafe fn new_write_repeated<W: Word>(
327 channel: impl Peripheral<P = C> + 'a,
328 request: Request,
329 repeated: &'a W,
330 count: usize,
331 peri_addr: *mut W,
332 options: TransferOptions,
333 ) -> Self {
334 into_ref!(channel);
335
336 Self::new_inner(
337 channel,
338 request,
339 Dir::MemoryToPeripheral,
340 peri_addr as *const u32,
341 repeated as *const W as *mut u32,
342 count,
343 false,
344 W::size(),
345 options,
346 )
347 }
348
349 unsafe fn new_inner(
350 channel: PeripheralRef<'a, C>,
351 _request: Request,
352 dir: Dir,
353 peri_addr: *const u32,
354 mem_addr: *mut u32,
355 mem_len: usize,
356 incr_mem: bool,
357 data_size: WordSize,
358 options: TransferOptions,
359 ) -> Self {
360 let ch = channel.regs().st(channel.num());
361
362 // "Preceding reads and writes cannot be moved past subsequent writes."
363 fence(Ordering::SeqCst);
364
365 let mut this = Self { channel };
366 this.clear_irqs();
367
368 #[cfg(dmamux)]
369 super::dmamux::configure_dmamux(&*this.channel, _request);
370
371 ch.par().write_value(peri_addr as u32);
372 ch.m0ar().write_value(mem_addr as u32);
373 ch.ndtr().write_value(regs::Ndtr(mem_len as _));
374 ch.fcr().write(|w| {
375 if let Some(fth) = options.fifo_threshold {
376 // FIFO mode
377 w.set_dmdis(vals::Dmdis::DISABLED);
378 w.set_fth(fth.into());
379 } else {
380 // Direct mode
381 w.set_dmdis(vals::Dmdis::ENABLED);
382 }
383 });
384 ch.cr().write(|w| {
385 w.set_dir(dir.into());
386 w.set_msize(data_size.into());
387 w.set_psize(data_size.into());
388 w.set_pl(vals::Pl::VERYHIGH);
389 w.set_minc(incr_mem);
390 w.set_pinc(false);
391 w.set_teie(true);
392 w.set_tcie(options.complete_transfer_ir);
393 w.set_circ(options.circular);
394 if options.circular {
395 debug!("Setting circular mode");
396 }
397 #[cfg(dma_v1)]
398 w.set_trbuff(true);
399
400 #[cfg(dma_v2)]
401 w.set_chsel(_request);
402
403 w.set_pburst(options.pburst.into());
404 w.set_mburst(options.mburst.into());
405 w.set_pfctrl(options.flow_ctrl.into());
406
407 w.set_en(true);
408 });
409
410 this
411 }
412
413 fn clear_irqs(&mut self) {
414 let isrn = self.channel.num() / 4;
415 let isrbit = self.channel.num() % 4;
416
417 self.channel.regs().ifcr(isrn).write(|w| {
418 w.set_tcif(isrbit, true);
419 w.set_teif(isrbit, true);
420 });
421 }
422
423 /// Request the transfer to stop.
424 ///
425 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
426 pub fn request_stop(&mut self) {
427 let ch = self.channel.regs().st(self.channel.num());
428
429 // Disable the channel. Keep the IEs enabled so the irqs still fire.
430 ch.cr().write(|w| {
431 w.set_teie(true);
432 w.set_tcie(true);
433 });
434 }
435
436 /// Return whether this transfer is still running.
437 ///
438 /// If this returns `false`, it can be because either the transfer finished, or
439 /// it was requested to stop early with [`request_stop`](Self::request_stop).
440 pub fn is_running(&mut self) -> bool {
441 let ch = self.channel.regs().st(self.channel.num());
442 ch.cr().read().en()
443 }
444
445 /// Gets the total remaining transfers for the channel
446 /// Note: this will be zero for transfers that completed without cancellation.
447 pub fn get_remaining_transfers(&self) -> u16 {
448 let ch = self.channel.regs().st(self.channel.num());
449 ch.ndtr().read().ndt()
450 }
451
452 /// Blocking wait until the transfer finishes.
453 pub fn blocking_wait(mut self) {
454 while self.is_running() {}
455
456 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
457 fence(Ordering::SeqCst);
458
459 core::mem::forget(self);
460 }
461}
462
463impl<'a, C: Channel> Drop for Transfer<'a, C> {
464 fn drop(&mut self) {
465 self.request_stop();
466 while self.is_running() {}
467
468 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
469 fence(Ordering::SeqCst);
470 }
471}
472
473impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
474impl<'a, C: Channel> Future for Transfer<'a, C> {
475 type Output = ();
476 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
477 STATE.ch_wakers[self.channel.index()].register(cx.waker());
478
479 if self.is_running() {
480 Poll::Pending
481 } else {
482 Poll::Ready(())
483 }
484 }
485}
486
487// ==================================
488
489/// Double-buffered DMA transfer.
490pub struct DoubleBuffered<'a, C: Channel, W: Word> {
491 channel: PeripheralRef<'a, C>,
492 _phantom: PhantomData<W>,
493}
494
495impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
496 /// Create a new read DMA transfer (peripheral to memory).
497 pub unsafe fn new_read(
498 channel: impl Peripheral<P = C> + 'a,
499 _request: Request,
500 peri_addr: *mut W,
501 buf0: *mut W,
502 buf1: *mut W,
503 len: usize,
504 options: TransferOptions,
505 ) -> Self {
506 into_ref!(channel);
507 assert!(len > 0 && len <= 0xFFFF);
508
509 let dir = Dir::PeripheralToMemory;
510 let data_size = W::size();
511
512 let channel_number = channel.num();
513 let dma = channel.regs();
514
515 // "Preceding reads and writes cannot be moved past subsequent writes."
516 fence(Ordering::SeqCst);
517
518 let mut this = Self {
519 channel,
520 _phantom: PhantomData,
521 };
522 this.clear_irqs();
523
524 #[cfg(dmamux)]
525 super::dmamux::configure_dmamux(&*this.channel, _request);
526
527 let ch = dma.st(channel_number);
528 ch.par().write_value(peri_addr as u32);
529 ch.m0ar().write_value(buf0 as u32);
530 ch.m1ar().write_value(buf1 as u32);
531 ch.ndtr().write_value(regs::Ndtr(len as _));
532 ch.fcr().write(|w| {
533 if let Some(fth) = options.fifo_threshold {
534 // FIFO mode
535 w.set_dmdis(vals::Dmdis::DISABLED);
536 w.set_fth(fth.into());
537 } else {
538 // Direct mode
539 w.set_dmdis(vals::Dmdis::ENABLED);
540 }
541 });
542 ch.cr().write(|w| {
543 w.set_dir(dir.into());
544 w.set_msize(data_size.into());
545 w.set_psize(data_size.into());
546 w.set_pl(vals::Pl::VERYHIGH);
547 w.set_minc(true);
548 w.set_pinc(false);
549 w.set_teie(true);
550 w.set_tcie(true);
551 #[cfg(dma_v1)]
552 w.set_trbuff(true);
553
554 #[cfg(dma_v2)]
555 w.set_chsel(_request);
556
557 w.set_pburst(options.pburst.into());
558 w.set_mburst(options.mburst.into());
559 w.set_pfctrl(options.flow_ctrl.into());
560
561 w.set_en(true);
562 });
563
564 this
565 }
566
567 fn clear_irqs(&mut self) {
568 let channel_number = self.channel.num();
569 let dma = self.channel.regs();
570 let isrn = channel_number / 4;
571 let isrbit = channel_number % 4;
572
573 dma.ifcr(isrn).write(|w| {
574 w.set_htif(isrbit, true);
575 w.set_tcif(isrbit, true);
576 w.set_teif(isrbit, true);
577 });
578 }
579
580 /// Set the first buffer address.
581 ///
582 /// You may call this while DMA is transferring the other buffer.
583 pub unsafe fn set_buffer0(&mut self, buffer: *mut W) {
584 let ch = self.channel.regs().st(self.channel.num());
585 ch.m0ar().write_value(buffer as _);
586 }
587
588 /// Set the second buffer address.
589 ///
590 /// You may call this while DMA is transferring the other buffer.
591 pub unsafe fn set_buffer1(&mut self, buffer: *mut W) {
592 let ch = self.channel.regs().st(self.channel.num());
593 ch.m1ar().write_value(buffer as _);
594 }
595
596 /// Returh whether buffer0 is accessible (i.e. whether DMA is transferring buffer1 now)
597 pub fn is_buffer0_accessible(&mut self) -> bool {
598 let ch = self.channel.regs().st(self.channel.num());
599 ch.cr().read().ct() == vals::Ct::MEMORY1
600 }
601
602 /// Set a waker to be woken when one of the buffers is being transferred.
603 pub fn set_waker(&mut self, waker: &Waker) {
604 STATE.ch_wakers[self.channel.index()].register(waker);
605 }
606
607 /// Request the transfer to stop.
608 ///
609 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
610 pub fn request_stop(&mut self) {
611 let ch = self.channel.regs().st(self.channel.num());
612
613 // Disable the channel. Keep the IEs enabled so the irqs still fire.
614 ch.cr().write(|w| {
615 w.set_teie(true);
616 w.set_tcie(true);
617 });
618 }
619
620 /// Return whether this transfer is still running.
621 ///
622 /// If this returns `false`, it can be because either the transfer finished, or
623 /// it was requested to stop early with [`request_stop`](Self::request_stop).
624 pub fn is_running(&mut self) -> bool {
625 let ch = self.channel.regs().st(self.channel.num());
626 ch.cr().read().en()
627 }
628
629 /// Gets the total remaining transfers for the channel
630 /// Note: this will be zero for transfers that completed without cancellation.
631 pub fn get_remaining_transfers(&self) -> u16 {
632 let ch = self.channel.regs().st(self.channel.num());
633 ch.ndtr().read().ndt()
634 }
635}
636
637impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
638 fn drop(&mut self) {
639 self.request_stop();
640 while self.is_running() {}
641
642 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
643 fence(Ordering::SeqCst);
644 }
645}
646
647// ==============================
648
649struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
650
651impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
652 fn get_remaining_transfers(&self) -> usize {
653 let ch = self.0.regs().st(self.0.num());
654 ch.ndtr().read().ndt() as usize
655 }
656
657 fn get_complete_count(&self) -> usize {
658 STATE.complete_count[self.0.index()].load(Ordering::Acquire)
659 }
660
661 fn reset_complete_count(&mut self) -> usize {
662 STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel)
663 }
664
665 fn set_waker(&mut self, waker: &Waker) {
666 STATE.ch_wakers[self.0.index()].register(waker);
667 }
668}
669
670/// Ringbuffer for receiving data using DMA circular mode.
671pub struct ReadableRingBuffer<'a, C: Channel, W: Word> {
672 cr: regs::Cr,
673 channel: PeripheralRef<'a, C>,
674 ringbuf: ReadableDmaRingBuffer<'a, W>,
675}
676
677impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> {
678 /// Create a new ring buffer.
679 pub unsafe fn new(
680 channel: impl Peripheral<P = C> + 'a,
681 _request: Request,
682 peri_addr: *mut W,
683 buffer: &'a mut [W],
684 options: TransferOptions,
685 ) -> Self {
686 into_ref!(channel);
687
688 let len = buffer.len();
689 assert!(len > 0 && len <= 0xFFFF);
690
691 let dir = Dir::PeripheralToMemory;
692 let data_size = W::size();
693
694 let channel_number = channel.num();
695 let dma = channel.regs();
696
697 // "Preceding reads and writes cannot be moved past subsequent writes."
698 fence(Ordering::SeqCst);
699
700 let mut w = regs::Cr(0);
701 w.set_dir(dir.into());
702 w.set_msize(data_size.into());
703 w.set_psize(data_size.into());
704 w.set_pl(vals::Pl::VERYHIGH);
705 w.set_minc(true);
706 w.set_pinc(false);
707 w.set_teie(true);
708 w.set_htie(options.half_transfer_ir);
709 w.set_tcie(true);
710 w.set_circ(true);
711 #[cfg(dma_v1)]
712 w.set_trbuff(true);
713 #[cfg(dma_v2)]
714 w.set_chsel(_request);
715 w.set_pburst(options.pburst.into());
716 w.set_mburst(options.mburst.into());
717 w.set_pfctrl(options.flow_ctrl.into());
718 w.set_en(true);
719
720 let buffer_ptr = buffer.as_mut_ptr();
721 let mut this = Self {
722 channel,
723 cr: w,
724 ringbuf: ReadableDmaRingBuffer::new(buffer),
725 };
726 this.clear_irqs();
727
728 #[cfg(dmamux)]
729 super::dmamux::configure_dmamux(&*this.channel, _request);
730
731 let ch = dma.st(channel_number);
732 ch.par().write_value(peri_addr as u32);
733 ch.m0ar().write_value(buffer_ptr as u32);
734 ch.ndtr().write_value(regs::Ndtr(len as _));
735 ch.fcr().write(|w| {
736 if let Some(fth) = options.fifo_threshold {
737 // FIFO mode
738 w.set_dmdis(vals::Dmdis::DISABLED);
739 w.set_fth(fth.into());
740 } else {
741 // Direct mode
742 w.set_dmdis(vals::Dmdis::ENABLED);
743 }
744 });
745
746 this
747 }
748
749 /// Start the ring buffer operation.
750 ///
751 /// You must call this after creating it for it to work.
752 pub fn start(&mut self) {
753 let ch = self.channel.regs().st(self.channel.num());
754 ch.cr().write_value(self.cr);
755 }
756
757 /// Clear all data in the ring buffer.
758 pub fn clear(&mut self) {
759 self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
760 }
761
762 /// Read elements from the ring buffer
763 /// Return a tuple of the length read and the length remaining in the buffer
764 /// If not all of the elements were read, then there will be some elements in the buffer remaining
765 /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
766 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
767 pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
768 self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
769 }
770
771 /// Read an exact number of elements from the ringbuffer.
772 ///
773 /// Returns the remaining number of elements available for immediate reading.
774 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
775 ///
776 /// Async/Wake Behavior:
777 /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
778 /// and when it wraps around. This means that when called with a buffer of length 'M', when this
779 /// ring buffer was created with a buffer of size 'N':
780 /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
781 /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
782 pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> {
783 self.ringbuf
784 .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
785 .await
786 }
787
788 /// The capacity of the ringbuffer
789 pub const fn capacity(&self) -> usize {
790 self.ringbuf.cap()
791 }
792
793 /// Set a waker to be woken when at least one byte is received.
794 pub fn set_waker(&mut self, waker: &Waker) {
795 DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
796 }
797
798 fn clear_irqs(&mut self) {
799 let channel_number = self.channel.num();
800 let dma = self.channel.regs();
801 let isrn = channel_number / 4;
802 let isrbit = channel_number % 4;
803
804 dma.ifcr(isrn).write(|w| {
805 w.set_htif(isrbit, true);
806 w.set_tcif(isrbit, true);
807 w.set_teif(isrbit, true);
808 });
809 }
810
811 /// Request DMA to stop.
812 ///
813 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
814 pub fn request_stop(&mut self) {
815 let ch = self.channel.regs().st(self.channel.num());
816
817 // Disable the channel. Keep the IEs enabled so the irqs still fire.
818 ch.cr().write(|w| {
819 w.set_teie(true);
820 w.set_htie(true);
821 w.set_tcie(true);
822 });
823 }
824
825 /// Return whether DMA is still running.
826 ///
827 /// If this returns `false`, it can be because either the transfer finished, or
828 /// it was requested to stop early with [`request_stop`](Self::request_stop).
829 pub fn is_running(&mut self) -> bool {
830 let ch = self.channel.regs().st(self.channel.num());
831 ch.cr().read().en()
832 }
833}
834
835impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> {
836 fn drop(&mut self) {
837 self.request_stop();
838 while self.is_running() {}
839
840 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
841 fence(Ordering::SeqCst);
842 }
843}
844
845/// Ringbuffer for writing data using DMA circular mode.
846pub struct WritableRingBuffer<'a, C: Channel, W: Word> {
847 cr: regs::Cr,
848 channel: PeripheralRef<'a, C>,
849 ringbuf: WritableDmaRingBuffer<'a, W>,
850}
851
852impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> {
853 /// Create a new ring buffer.
854 pub unsafe fn new(
855 channel: impl Peripheral<P = C> + 'a,
856 _request: Request,
857 peri_addr: *mut W,
858 buffer: &'a mut [W],
859 options: TransferOptions,
860 ) -> Self {
861 into_ref!(channel);
862
863 let len = buffer.len();
864 assert!(len > 0 && len <= 0xFFFF);
865
866 let dir = Dir::MemoryToPeripheral;
867 let data_size = W::size();
868
869 let channel_number = channel.num();
870 let dma = channel.regs();
871
872 // "Preceding reads and writes cannot be moved past subsequent writes."
873 fence(Ordering::SeqCst);
874
875 let mut w = regs::Cr(0);
876 w.set_dir(dir.into());
877 w.set_msize(data_size.into());
878 w.set_psize(data_size.into());
879 w.set_pl(vals::Pl::VERYHIGH);
880 w.set_minc(true);
881 w.set_pinc(false);
882 w.set_teie(true);
883 w.set_htie(options.half_transfer_ir);
884 w.set_tcie(true);
885 w.set_circ(true);
886 #[cfg(dma_v1)]
887 w.set_trbuff(true);
888 #[cfg(dma_v2)]
889 w.set_chsel(_request);
890 w.set_pburst(options.pburst.into());
891 w.set_mburst(options.mburst.into());
892 w.set_pfctrl(options.flow_ctrl.into());
893 w.set_en(true);
894
895 let buffer_ptr = buffer.as_mut_ptr();
896 let mut this = Self {
897 channel,
898 cr: w,
899 ringbuf: WritableDmaRingBuffer::new(buffer),
900 };
901 this.clear_irqs();
902
903 #[cfg(dmamux)]
904 super::dmamux::configure_dmamux(&*this.channel, _request);
905
906 let ch = dma.st(channel_number);
907 ch.par().write_value(peri_addr as u32);
908 ch.m0ar().write_value(buffer_ptr as u32);
909 ch.ndtr().write_value(regs::Ndtr(len as _));
910 ch.fcr().write(|w| {
911 if let Some(fth) = options.fifo_threshold {
912 // FIFO mode
913 w.set_dmdis(vals::Dmdis::DISABLED);
914 w.set_fth(fth.into());
915 } else {
916 // Direct mode
917 w.set_dmdis(vals::Dmdis::ENABLED);
918 }
919 });
920
921 this
922 }
923
924 /// Start the ring buffer operation.
925 ///
926 /// You must call this after creating it for it to work.
927 pub fn start(&mut self) {
928 let ch = self.channel.regs().st(self.channel.num());
929 ch.cr().write_value(self.cr);
930 }
931
932 /// Clear all data in the ring buffer.
933 pub fn clear(&mut self) {
934 self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
935 }
936
937 /// Write elements directly to the raw buffer.
938 /// This can be used to fill the buffer before starting the DMA transfer.
939 #[allow(dead_code)]
940 pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
941 self.ringbuf.write_immediate(buf)
942 }
943
944 /// Write elements from the ring buffer
945 /// Return a tuple of the length written and the length remaining in the buffer
946 pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
947 self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
948 }
949
950 /// Write an exact number of elements to the ringbuffer.
951 pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
952 self.ringbuf
953 .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
954 .await
955 }
956
957 /// The capacity of the ringbuffer
958 pub const fn capacity(&self) -> usize {
959 self.ringbuf.cap()
960 }
961
962 /// Set a waker to be woken when at least one byte is received.
963 pub fn set_waker(&mut self, waker: &Waker) {
964 DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
965 }
966
967 fn clear_irqs(&mut self) {
968 let channel_number = self.channel.num();
969 let dma = self.channel.regs();
970 let isrn = channel_number / 4;
971 let isrbit = channel_number % 4;
972
973 dma.ifcr(isrn).write(|w| {
974 w.set_htif(isrbit, true);
975 w.set_tcif(isrbit, true);
976 w.set_teif(isrbit, true);
977 });
978 }
979
980 /// Request DMA to stop.
981 ///
982 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
983 pub fn request_stop(&mut self) {
984 let ch = self.channel.regs().st(self.channel.num());
985
986 // Disable the channel. Keep the IEs enabled so the irqs still fire.
987 ch.cr().write(|w| {
988 w.set_teie(true);
989 w.set_htie(true);
990 w.set_tcie(true);
991 });
992 }
993
994 /// Return whether DMA is still running.
995 ///
996 /// If this returns `false`, it can be because either the transfer finished, or
997 /// it was requested to stop early with [`request_stop`](Self::request_stop).
998 pub fn is_running(&mut self) -> bool {
999 let ch = self.channel.regs().st(self.channel.num());
1000 ch.cr().read().en()
1001 }
1002}
1003
1004impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> {
1005 fn drop(&mut self) {
1006 self.request_stop();
1007 while self.is_running() {}
1008
1009 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
1010 fence(Ordering::SeqCst);
1011 }
1012}
diff --git a/embassy-stm32/src/dma/dma_bdma.rs b/embassy-stm32/src/dma/dma_bdma.rs
new file mode 100644
index 000000000..08aba2795
--- /dev/null
+++ b/embassy-stm32/src/dma/dma_bdma.rs
@@ -0,0 +1,913 @@
1use core::future::Future;
2use core::pin::Pin;
3use core::sync::atomic::{fence, AtomicUsize, Ordering};
4use core::task::{Context, Poll, Waker};
5
6use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
7use embassy_sync::waitqueue::AtomicWaker;
8
9use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
10use super::word::{Word, WordSize};
11use super::{AnyChannel, Channel, Dir, Request, STATE};
12use crate::interrupt::typelevel::Interrupt;
13use crate::interrupt::Priority;
14use crate::pac;
15
16pub(crate) struct ChannelInfo {
17 pub(crate) dma: DmaInfo,
18 pub(crate) num: usize,
19 #[cfg(dmamux)]
20 pub(crate) dmamux: super::DmamuxInfo,
21}
22
23#[derive(Clone, Copy)]
24pub(crate) enum DmaInfo {
25 #[cfg(dma)]
26 Dma(pac::dma::Dma),
27 #[cfg(bdma)]
28 Bdma(pac::bdma::Dma),
29}
30
31/// DMA transfer options.
32#[derive(Debug, Copy, Clone, PartialEq, Eq)]
33#[cfg_attr(feature = "defmt", derive(defmt::Format))]
34#[non_exhaustive]
35pub struct TransferOptions {
36 /// Peripheral burst transfer configuration
37 #[cfg(dma)]
38 pub pburst: Burst,
39 /// Memory burst transfer configuration
40 #[cfg(dma)]
41 pub mburst: Burst,
42 /// Flow control configuration
43 #[cfg(dma)]
44 pub flow_ctrl: FlowControl,
45 /// FIFO threshold for DMA FIFO mode. If none, direct mode is used.
46 #[cfg(dma)]
47 pub fifo_threshold: Option<FifoThreshold>,
48 /// Enable circular DMA
49 ///
50 /// Note:
51 /// If you enable circular mode manually, you may want to build and `.await` the `Transfer` in a separate task.
52 /// Since DMA in circular mode need manually stop, `.await` in current task would block the task forever.
53 pub circular: bool,
54 /// Enable half transfer interrupt
55 pub half_transfer_ir: bool,
56 /// Enable transfer complete interrupt
57 pub complete_transfer_ir: bool,
58}
59
60impl Default for TransferOptions {
61 fn default() -> Self {
62 Self {
63 #[cfg(dma)]
64 pburst: Burst::Single,
65 #[cfg(dma)]
66 mburst: Burst::Single,
67 #[cfg(dma)]
68 flow_ctrl: FlowControl::Dma,
69 #[cfg(dma)]
70 fifo_threshold: None,
71 circular: false,
72 half_transfer_ir: false,
73 complete_transfer_ir: true,
74 }
75 }
76}
77
78#[cfg(dma)]
79pub use dma_only::*;
80#[cfg(dma)]
81mod dma_only {
82 use pac::dma::vals;
83
84 use super::*;
85
86 impl From<WordSize> for vals::Size {
87 fn from(raw: WordSize) -> Self {
88 match raw {
89 WordSize::OneByte => Self::BITS8,
90 WordSize::TwoBytes => Self::BITS16,
91 WordSize::FourBytes => Self::BITS32,
92 }
93 }
94 }
95
96 impl From<Dir> for vals::Dir {
97 fn from(raw: Dir) -> Self {
98 match raw {
99 Dir::MemoryToPeripheral => Self::MEMORYTOPERIPHERAL,
100 Dir::PeripheralToMemory => Self::PERIPHERALTOMEMORY,
101 }
102 }
103 }
104
105 /// DMA transfer burst setting.
106 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
107 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
108 pub enum Burst {
109 /// Single transfer
110 Single,
111 /// Incremental burst of 4 beats
112 Incr4,
113 /// Incremental burst of 8 beats
114 Incr8,
115 /// Incremental burst of 16 beats
116 Incr16,
117 }
118
119 impl From<Burst> for vals::Burst {
120 fn from(burst: Burst) -> Self {
121 match burst {
122 Burst::Single => vals::Burst::SINGLE,
123 Burst::Incr4 => vals::Burst::INCR4,
124 Burst::Incr8 => vals::Burst::INCR8,
125 Burst::Incr16 => vals::Burst::INCR16,
126 }
127 }
128 }
129
130 /// DMA flow control setting.
131 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
132 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
133 pub enum FlowControl {
134 /// Flow control by DMA
135 Dma,
136 /// Flow control by peripheral
137 Peripheral,
138 }
139
140 impl From<FlowControl> for vals::Pfctrl {
141 fn from(flow: FlowControl) -> Self {
142 match flow {
143 FlowControl::Dma => vals::Pfctrl::DMA,
144 FlowControl::Peripheral => vals::Pfctrl::PERIPHERAL,
145 }
146 }
147 }
148
149 /// DMA FIFO threshold.
150 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
151 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
152 pub enum FifoThreshold {
153 /// 1/4 full FIFO
154 Quarter,
155 /// 1/2 full FIFO
156 Half,
157 /// 3/4 full FIFO
158 ThreeQuarters,
159 /// Full FIFO
160 Full,
161 }
162
163 impl From<FifoThreshold> for vals::Fth {
164 fn from(value: FifoThreshold) -> Self {
165 match value {
166 FifoThreshold::Quarter => vals::Fth::QUARTER,
167 FifoThreshold::Half => vals::Fth::HALF,
168 FifoThreshold::ThreeQuarters => vals::Fth::THREEQUARTERS,
169 FifoThreshold::Full => vals::Fth::FULL,
170 }
171 }
172 }
173}
174
175#[cfg(bdma)]
176mod bdma_only {
177 use pac::bdma::vals;
178
179 use super::*;
180
181 impl From<WordSize> for vals::Size {
182 fn from(raw: WordSize) -> Self {
183 match raw {
184 WordSize::OneByte => Self::BITS8,
185 WordSize::TwoBytes => Self::BITS16,
186 WordSize::FourBytes => Self::BITS32,
187 }
188 }
189 }
190
191 impl From<Dir> for vals::Dir {
192 fn from(raw: Dir) -> Self {
193 match raw {
194 Dir::MemoryToPeripheral => Self::FROMMEMORY,
195 Dir::PeripheralToMemory => Self::FROMPERIPHERAL,
196 }
197 }
198 }
199}
200
201pub(crate) struct ChannelState {
202 waker: AtomicWaker,
203 complete_count: AtomicUsize,
204}
205
206impl ChannelState {
207 pub(crate) const NEW: Self = Self {
208 waker: AtomicWaker::new(),
209 complete_count: AtomicUsize::new(0),
210 };
211}
212
213/// safety: must be called only once
214pub(crate) unsafe fn init(
215 cs: critical_section::CriticalSection,
216 #[cfg(dma)] dma_priority: Priority,
217 #[cfg(bdma)] bdma_priority: Priority,
218) {
219 foreach_interrupt! {
220 ($peri:ident, dma, $block:ident, $signal_name:ident, $irq:ident) => {
221 crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, dma_priority);
222 crate::interrupt::typelevel::$irq::enable();
223 };
224 ($peri:ident, bdma, $block:ident, $signal_name:ident, $irq:ident) => {
225 crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, bdma_priority);
226 crate::interrupt::typelevel::$irq::enable();
227 };
228 }
229 crate::_generated::init_dma();
230 crate::_generated::init_bdma();
231}
232
233impl AnyChannel {
234 /// Safety: Must be called with a matching set of parameters for a valid dma channel
235 pub(crate) unsafe fn on_irq(&self) {
236 let info = self.info();
237 let state = &STATE[self.id as usize];
238 match self.info().dma {
239 #[cfg(dma)]
240 DmaInfo::Dma(r) => {
241 let cr = r.st(info.num).cr();
242 let isr = r.isr(info.num / 4).read();
243
244 if isr.teif(info.num % 4) {
245 panic!("DMA: error on DMA@{:08x} channel {}", r.as_ptr() as u32, info.num);
246 }
247
248 if isr.htif(info.num % 4) && cr.read().htie() {
249 // Acknowledge half transfer complete interrupt
250 r.ifcr(info.num / 4).write(|w| w.set_htif(info.num % 4, true));
251 } else if isr.tcif(info.num % 4) && cr.read().tcie() {
252 // Acknowledge transfer complete interrupt
253 r.ifcr(info.num / 4).write(|w| w.set_tcif(info.num % 4, true));
254 state.complete_count.fetch_add(1, Ordering::Release);
255 } else {
256 return;
257 }
258
259 state.waker.wake();
260 }
261 #[cfg(bdma)]
262 DmaInfo::Bdma(r) => {
263 let isr = r.isr().read();
264 let cr = r.ch(info.num).cr();
265
266 if isr.teif(info.num) {
267 panic!("DMA: error on BDMA@{:08x} channel {}", r.as_ptr() as u32, info.num);
268 }
269
270 if isr.htif(info.num) && cr.read().htie() {
271 // Acknowledge half transfer complete interrupt
272 r.ifcr().write(|w| w.set_htif(info.num, true));
273 } else if isr.tcif(info.num) && cr.read().tcie() {
274 // Acknowledge transfer complete interrupt
275 r.ifcr().write(|w| w.set_tcif(info.num, true));
276 #[cfg(not(armv6m))]
277 state.complete_count.fetch_add(1, Ordering::Release);
278 #[cfg(armv6m)]
279 critical_section::with(|_| {
280 let x = state.complete_count.load(Ordering::Relaxed);
281 state.complete_count.store(x + 1, Ordering::Release);
282 })
283 } else {
284 return;
285 }
286
287 state.waker.wake();
288 }
289 }
290 }
291
292 unsafe fn configure(
293 &self,
294 _request: Request,
295 dir: Dir,
296 peri_addr: *const u32,
297 mem_addr: *mut u32,
298 mem_len: usize,
299 incr_mem: bool,
300 data_size: WordSize,
301 options: TransferOptions,
302 ) {
303 let info = self.info();
304
305 #[cfg(dmamux)]
306 super::dmamux::configure_dmamux(&info.dmamux, _request);
307
308 assert!(mem_len > 0 && mem_len <= 0xFFFF);
309
310 match self.info().dma {
311 #[cfg(dma)]
312 DmaInfo::Dma(r) => {
313 let ch = r.st(info.num);
314
315 // "Preceding reads and writes cannot be moved past subsequent writes."
316 fence(Ordering::SeqCst);
317
318 self.clear_irqs();
319
320 ch.par().write_value(peri_addr as u32);
321 ch.m0ar().write_value(mem_addr as u32);
322 ch.ndtr().write_value(pac::dma::regs::Ndtr(mem_len as _));
323 ch.fcr().write(|w| {
324 if let Some(fth) = options.fifo_threshold {
325 // FIFO mode
326 w.set_dmdis(pac::dma::vals::Dmdis::DISABLED);
327 w.set_fth(fth.into());
328 } else {
329 // Direct mode
330 w.set_dmdis(pac::dma::vals::Dmdis::ENABLED);
331 }
332 });
333 ch.cr().write(|w| {
334 w.set_dir(dir.into());
335 w.set_msize(data_size.into());
336 w.set_psize(data_size.into());
337 w.set_pl(pac::dma::vals::Pl::VERYHIGH);
338 w.set_minc(incr_mem);
339 w.set_pinc(false);
340 w.set_teie(true);
341 w.set_htie(options.half_transfer_ir);
342 w.set_tcie(options.complete_transfer_ir);
343 w.set_circ(options.circular);
344 #[cfg(dma_v1)]
345 w.set_trbuff(true);
346 #[cfg(dma_v2)]
347 w.set_chsel(_request);
348 w.set_pburst(options.pburst.into());
349 w.set_mburst(options.mburst.into());
350 w.set_pfctrl(options.flow_ctrl.into());
351 w.set_en(false); // don't start yet
352 });
353 }
354 #[cfg(bdma)]
355 DmaInfo::Bdma(r) => {
356 #[cfg(bdma_v2)]
357 critical_section::with(|_| r.cselr().modify(|w| w.set_cs(info.num, _request)));
358
359 let state: &ChannelState = &STATE[self.id as usize];
360 let ch = r.ch(info.num);
361
362 state.complete_count.store(0, Ordering::Release);
363 self.clear_irqs();
364
365 ch.par().write_value(peri_addr as u32);
366 ch.mar().write_value(mem_addr as u32);
367 ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
368 ch.cr().write(|w| {
369 w.set_psize(data_size.into());
370 w.set_msize(data_size.into());
371 w.set_minc(incr_mem);
372 w.set_dir(dir.into());
373 w.set_teie(true);
374 w.set_tcie(options.complete_transfer_ir);
375 w.set_htie(options.half_transfer_ir);
376 w.set_circ(options.circular);
377 w.set_pl(pac::bdma::vals::Pl::VERYHIGH);
378 w.set_en(false); // don't start yet
379 });
380 }
381 }
382 }
383
384 fn start(&self) {
385 let info = self.info();
386 match self.info().dma {
387 #[cfg(dma)]
388 DmaInfo::Dma(r) => {
389 let ch = r.st(info.num);
390 ch.cr().modify(|w| w.set_en(true))
391 }
392 #[cfg(bdma)]
393 DmaInfo::Bdma(r) => {
394 let ch = r.ch(info.num);
395 ch.cr().modify(|w| w.set_en(true));
396 }
397 }
398 }
399
400 fn clear_irqs(&self) {
401 let info = self.info();
402 match self.info().dma {
403 #[cfg(dma)]
404 DmaInfo::Dma(r) => {
405 let isrn = info.num / 4;
406 let isrbit = info.num % 4;
407
408 r.ifcr(isrn).write(|w| {
409 w.set_htif(isrbit, true);
410 w.set_tcif(isrbit, true);
411 w.set_teif(isrbit, true);
412 });
413 }
414 #[cfg(bdma)]
415 DmaInfo::Bdma(r) => {
416 r.ifcr().write(|w| {
417 w.set_htif(info.num, true);
418 w.set_tcif(info.num, true);
419 w.set_teif(info.num, true);
420 });
421 }
422 }
423 }
424
425 fn request_stop(&self) {
426 let info = self.info();
427 match self.info().dma {
428 #[cfg(dma)]
429 DmaInfo::Dma(r) => {
430 // Disable the channel. Keep the IEs enabled so the irqs still fire.
431 r.st(info.num).cr().write(|w| {
432 w.set_teie(true);
433 w.set_tcie(true);
434 });
435 }
436 #[cfg(bdma)]
437 DmaInfo::Bdma(r) => {
438 // Disable the channel. Keep the IEs enabled so the irqs still fire.
439 r.ch(info.num).cr().write(|w| {
440 w.set_teie(true);
441 w.set_tcie(true);
442 });
443 }
444 }
445 }
446
447 fn is_running(&self) -> bool {
448 let info = self.info();
449 match self.info().dma {
450 #[cfg(dma)]
451 DmaInfo::Dma(r) => r.st(info.num).cr().read().en(),
452 #[cfg(bdma)]
453 DmaInfo::Bdma(r) => {
454 let state: &ChannelState = &STATE[self.id as usize];
455 let ch = r.ch(info.num);
456 let en = ch.cr().read().en();
457 let circular = ch.cr().read().circ();
458 let tcif = state.complete_count.load(Ordering::Acquire) != 0;
459 en && (circular || !tcif)
460 }
461 }
462 }
463
464 fn get_remaining_transfers(&self) -> u16 {
465 let info = self.info();
466 match self.info().dma {
467 #[cfg(dma)]
468 DmaInfo::Dma(r) => r.st(info.num).ndtr().read().ndt(),
469 #[cfg(bdma)]
470 DmaInfo::Bdma(r) => r.ch(info.num).ndtr().read().ndt(),
471 }
472 }
473}
474
475/// DMA transfer.
476#[must_use = "futures do nothing unless you `.await` or poll them"]
477pub struct Transfer<'a> {
478 channel: PeripheralRef<'a, AnyChannel>,
479}
480
481impl<'a> Transfer<'a> {
482 /// Create a new read DMA transfer (peripheral to memory).
483 pub unsafe fn new_read<W: Word>(
484 channel: impl Peripheral<P = impl Channel> + 'a,
485 request: Request,
486 peri_addr: *mut W,
487 buf: &'a mut [W],
488 options: TransferOptions,
489 ) -> Self {
490 Self::new_read_raw(channel, request, peri_addr, buf, options)
491 }
492
493 /// Create a new read DMA transfer (peripheral to memory), using raw pointers.
494 pub unsafe fn new_read_raw<W: Word>(
495 channel: impl Peripheral<P = impl Channel> + 'a,
496 request: Request,
497 peri_addr: *mut W,
498 buf: *mut [W],
499 options: TransferOptions,
500 ) -> Self {
501 into_ref!(channel);
502
503 let (ptr, len) = super::slice_ptr_parts_mut(buf);
504 assert!(len > 0 && len <= 0xFFFF);
505
506 Self::new_inner(
507 channel.map_into(),
508 request,
509 Dir::PeripheralToMemory,
510 peri_addr as *const u32,
511 ptr as *mut u32,
512 len,
513 true,
514 W::size(),
515 options,
516 )
517 }
518
519 /// Create a new write DMA transfer (memory to peripheral).
520 pub unsafe fn new_write<W: Word>(
521 channel: impl Peripheral<P = impl Channel> + 'a,
522 request: Request,
523 buf: &'a [W],
524 peri_addr: *mut W,
525 options: TransferOptions,
526 ) -> Self {
527 Self::new_write_raw(channel, request, buf, peri_addr, options)
528 }
529
530 /// Create a new write DMA transfer (memory to peripheral), using raw pointers.
531 pub unsafe fn new_write_raw<W: Word>(
532 channel: impl Peripheral<P = impl Channel> + 'a,
533 request: Request,
534 buf: *const [W],
535 peri_addr: *mut W,
536 options: TransferOptions,
537 ) -> Self {
538 into_ref!(channel);
539
540 let (ptr, len) = super::slice_ptr_parts(buf);
541 assert!(len > 0 && len <= 0xFFFF);
542
543 Self::new_inner(
544 channel.map_into(),
545 request,
546 Dir::MemoryToPeripheral,
547 peri_addr as *const u32,
548 ptr as *mut u32,
549 len,
550 true,
551 W::size(),
552 options,
553 )
554 }
555
556 /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
557 pub unsafe fn new_write_repeated<W: Word>(
558 channel: impl Peripheral<P = impl Channel> + 'a,
559 request: Request,
560 repeated: &'a W,
561 count: usize,
562 peri_addr: *mut W,
563 options: TransferOptions,
564 ) -> Self {
565 into_ref!(channel);
566
567 Self::new_inner(
568 channel.map_into(),
569 request,
570 Dir::MemoryToPeripheral,
571 peri_addr as *const u32,
572 repeated as *const W as *mut u32,
573 count,
574 false,
575 W::size(),
576 options,
577 )
578 }
579
580 unsafe fn new_inner(
581 channel: PeripheralRef<'a, AnyChannel>,
582 _request: Request,
583 dir: Dir,
584 peri_addr: *const u32,
585 mem_addr: *mut u32,
586 mem_len: usize,
587 incr_mem: bool,
588 data_size: WordSize,
589 options: TransferOptions,
590 ) -> Self {
591 channel.configure(
592 _request, dir, peri_addr, mem_addr, mem_len, incr_mem, data_size, options,
593 );
594 channel.start();
595
596 Self { channel }
597 }
598
599 /// Request the transfer to stop.
600 ///
601 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
602 pub fn request_stop(&mut self) {
603 self.channel.request_stop()
604 }
605
606 /// Return whether this transfer is still running.
607 ///
608 /// If this returns `false`, it can be because either the transfer finished, or
609 /// it was requested to stop early with [`request_stop`](Self::request_stop).
610 pub fn is_running(&mut self) -> bool {
611 self.channel.is_running()
612 }
613
614 /// Gets the total remaining transfers for the channel
615 /// Note: this will be zero for transfers that completed without cancellation.
616 pub fn get_remaining_transfers(&self) -> u16 {
617 self.channel.get_remaining_transfers()
618 }
619
620 /// Blocking wait until the transfer finishes.
621 pub fn blocking_wait(mut self) {
622 while self.is_running() {}
623
624 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
625 fence(Ordering::SeqCst);
626
627 core::mem::forget(self);
628 }
629}
630
631impl<'a> Drop for Transfer<'a> {
632 fn drop(&mut self) {
633 self.request_stop();
634 while self.is_running() {}
635
636 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
637 fence(Ordering::SeqCst);
638 }
639}
640
641impl<'a> Unpin for Transfer<'a> {}
642impl<'a> Future for Transfer<'a> {
643 type Output = ();
644 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
645 let state: &ChannelState = &STATE[self.channel.id as usize];
646
647 state.waker.register(cx.waker());
648
649 if self.is_running() {
650 Poll::Pending
651 } else {
652 Poll::Ready(())
653 }
654 }
655}
656
657// ==============================
658
659struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>);
660
661impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
662 fn get_remaining_transfers(&self) -> usize {
663 self.0.get_remaining_transfers() as _
664 }
665
666 fn get_complete_count(&self) -> usize {
667 STATE[self.0.id as usize].complete_count.load(Ordering::Acquire)
668 }
669
670 fn reset_complete_count(&mut self) -> usize {
671 let state = &STATE[self.0.id as usize];
672 #[cfg(not(armv6m))]
673 return state.complete_count.swap(0, Ordering::AcqRel);
674 #[cfg(armv6m)]
675 return critical_section::with(|_| {
676 let x = state.complete_count.load(Ordering::Acquire);
677 state.complete_count.store(0, Ordering::Release);
678 x
679 });
680 }
681
682 fn set_waker(&mut self, waker: &Waker) {
683 STATE[self.0.id as usize].waker.register(waker);
684 }
685}
686
687/// Ringbuffer for receiving data using DMA circular mode.
688pub struct ReadableRingBuffer<'a, W: Word> {
689 channel: PeripheralRef<'a, AnyChannel>,
690 ringbuf: ReadableDmaRingBuffer<'a, W>,
691}
692
693impl<'a, W: Word> ReadableRingBuffer<'a, W> {
694 /// Create a new ring buffer.
695 pub unsafe fn new(
696 channel: impl Peripheral<P = impl Channel> + 'a,
697 _request: Request,
698 peri_addr: *mut W,
699 buffer: &'a mut [W],
700 mut options: TransferOptions,
701 ) -> Self {
702 into_ref!(channel);
703 let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
704
705 let buffer_ptr = buffer.as_mut_ptr();
706 let len = buffer.len();
707 let dir = Dir::PeripheralToMemory;
708 let data_size = W::size();
709
710 options.complete_transfer_ir = true;
711 options.circular = true;
712
713 channel.configure(
714 _request,
715 dir,
716 peri_addr as *mut u32,
717 buffer_ptr as *mut u32,
718 len,
719 true,
720 data_size,
721 options,
722 );
723
724 Self {
725 channel,
726 ringbuf: ReadableDmaRingBuffer::new(buffer),
727 }
728 }
729
730 /// Start the ring buffer operation.
731 ///
732 /// You must call this after creating it for it to work.
733 pub fn start(&mut self) {
734 self.channel.start()
735 }
736
737 /// Clear all data in the ring buffer.
738 pub fn clear(&mut self) {
739 self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
740 }
741
742 /// Read elements from the ring buffer
743 /// Return a tuple of the length read and the length remaining in the buffer
744 /// If not all of the elements were read, then there will be some elements in the buffer remaining
745 /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
746 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
747 pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
748 self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
749 }
750
751 /// Read an exact number of elements from the ringbuffer.
752 ///
753 /// Returns the remaining number of elements available for immediate reading.
754 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
755 ///
756 /// Async/Wake Behavior:
757 /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
758 /// and when it wraps around. This means that when called with a buffer of length 'M', when this
759 /// ring buffer was created with a buffer of size 'N':
760 /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
761 /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
762 pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> {
763 self.ringbuf
764 .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
765 .await
766 }
767
768 /// The capacity of the ringbuffer
769 pub const fn capacity(&self) -> usize {
770 self.ringbuf.cap()
771 }
772
773 /// Set a waker to be woken when at least one byte is received.
774 pub fn set_waker(&mut self, waker: &Waker) {
775 DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
776 }
777
778 /// Request DMA to stop.
779 ///
780 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
781 pub fn request_stop(&mut self) {
782 self.channel.request_stop()
783 }
784
785 /// Return whether DMA is still running.
786 ///
787 /// If this returns `false`, it can be because either the transfer finished, or
788 /// it was requested to stop early with [`request_stop`](Self::request_stop).
789 pub fn is_running(&mut self) -> bool {
790 self.channel.is_running()
791 }
792}
793
794impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
795 fn drop(&mut self) {
796 self.request_stop();
797 while self.is_running() {}
798
799 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
800 fence(Ordering::SeqCst);
801 }
802}
803
804/// Ringbuffer for writing data using DMA circular mode.
805pub struct WritableRingBuffer<'a, W: Word> {
806 channel: PeripheralRef<'a, AnyChannel>,
807 ringbuf: WritableDmaRingBuffer<'a, W>,
808}
809
810impl<'a, W: Word> WritableRingBuffer<'a, W> {
811 /// Create a new ring buffer.
812 pub unsafe fn new(
813 channel: impl Peripheral<P = impl Channel> + 'a,
814 _request: Request,
815 peri_addr: *mut W,
816 buffer: &'a mut [W],
817 mut options: TransferOptions,
818 ) -> Self {
819 into_ref!(channel);
820 let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
821
822 let len = buffer.len();
823 let dir = Dir::MemoryToPeripheral;
824 let data_size = W::size();
825 let buffer_ptr = buffer.as_mut_ptr();
826
827 options.complete_transfer_ir = true;
828 options.circular = true;
829
830 channel.configure(
831 _request,
832 dir,
833 peri_addr as *mut u32,
834 buffer_ptr as *mut u32,
835 len,
836 true,
837 data_size,
838 options,
839 );
840
841 Self {
842 channel,
843 ringbuf: WritableDmaRingBuffer::new(buffer),
844 }
845 }
846
847 /// Start the ring buffer operation.
848 ///
849 /// You must call this after creating it for it to work.
850 pub fn start(&mut self) {
851 self.channel.start()
852 }
853
854 /// Clear all data in the ring buffer.
855 pub fn clear(&mut self) {
856 self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
857 }
858
859 /// Write elements directly to the raw buffer.
860 /// This can be used to fill the buffer before starting the DMA transfer.
861 #[allow(dead_code)]
862 pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
863 self.ringbuf.write_immediate(buf)
864 }
865
866 /// Write elements from the ring buffer
867 /// Return a tuple of the length written and the length remaining in the buffer
868 pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
869 self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
870 }
871
872 /// Write an exact number of elements to the ringbuffer.
873 pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
874 self.ringbuf
875 .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
876 .await
877 }
878
879 /// The capacity of the ringbuffer
880 pub const fn capacity(&self) -> usize {
881 self.ringbuf.cap()
882 }
883
884 /// Set a waker to be woken when at least one byte is received.
885 pub fn set_waker(&mut self, waker: &Waker) {
886 DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
887 }
888
889 /// Request DMA to stop.
890 ///
891 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
892 pub fn request_stop(&mut self) {
893 self.channel.request_stop()
894 }
895
896 /// Return whether DMA is still running.
897 ///
898 /// If this returns `false`, it can be because either the transfer finished, or
899 /// it was requested to stop early with [`request_stop`](Self::request_stop).
900 pub fn is_running(&mut self) -> bool {
901 self.channel.is_running()
902 }
903}
904
905impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
906 fn drop(&mut self) {
907 self.request_stop();
908 while self.is_running() {}
909
910 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
911 fence(Ordering::SeqCst);
912 }
913}
diff --git a/embassy-stm32/src/dma/dmamux.rs b/embassy-stm32/src/dma/dmamux.rs
index ac6f44107..1e9ab5944 100644
--- a/embassy-stm32/src/dma/dmamux.rs
+++ b/embassy-stm32/src/dma/dmamux.rs
@@ -1,9 +1,14 @@
1#![macro_use] 1#![macro_use]
2 2
3use crate::{pac, peripherals}; 3use crate::pac;
4 4
5pub(crate) fn configure_dmamux<M: MuxChannel>(channel: &M, request: u8) { 5pub(crate) struct DmamuxInfo {
6 let ch_mux_regs = channel.mux_regs().ccr(channel.mux_num()); 6 pub(crate) mux: pac::dmamux::Dmamux,
7 pub(crate) num: usize,
8}
9
10pub(crate) fn configure_dmamux(info: &DmamuxInfo, request: u8) {
11 let ch_mux_regs = info.mux.ccr(info.num);
7 ch_mux_regs.write(|reg| { 12 ch_mux_regs.write(|reg| {
8 reg.set_nbreq(0); 13 reg.set_nbreq(0);
9 reg.set_dmareq_id(request); 14 reg.set_dmareq_id(request);
@@ -15,11 +20,7 @@ pub(crate) fn configure_dmamux<M: MuxChannel>(channel: &M, request: u8) {
15} 20}
16 21
17pub(crate) mod dmamux_sealed { 22pub(crate) mod dmamux_sealed {
18 use super::*; 23 pub trait MuxChannel {}
19 pub trait MuxChannel {
20 fn mux_regs(&self) -> pac::dmamux::Dmamux;
21 fn mux_num(&self) -> usize;
22 }
23} 24}
24 25
25/// DMAMUX1 instance. 26/// DMAMUX1 instance.
@@ -34,18 +35,11 @@ pub trait MuxChannel: dmamux_sealed::MuxChannel {
34 type Mux; 35 type Mux;
35} 36}
36 37
37foreach_dma_channel! { 38macro_rules! dmamux_channel_impl {
38 ($channel_peri:ident, $dma_peri:ident, $version:ident, $channel_num:expr, $index:expr, {dmamux: $dmamux:ident, dmamux_channel: $dmamux_channel:expr}) => { 39 ($channel_peri:ident, $dmamux:ident) => {
39 impl dmamux_sealed::MuxChannel for peripherals::$channel_peri { 40 impl crate::dma::dmamux_sealed::MuxChannel for crate::peripherals::$channel_peri {}
40 fn mux_regs(&self) -> pac::dmamux::Dmamux { 41 impl crate::dma::MuxChannel for crate::peripherals::$channel_peri {
41 pac::$dmamux 42 type Mux = crate::dma::$dmamux;
42 }
43 fn mux_num(&self) -> usize {
44 $dmamux_channel
45 }
46 }
47 impl MuxChannel for peripherals::$channel_peri {
48 type Mux = $dmamux;
49 } 43 }
50 }; 44 };
51} 45}
diff --git a/embassy-stm32/src/dma/gpdma.rs b/embassy-stm32/src/dma/gpdma.rs
index 337e7b309..ef03970ef 100644
--- a/embassy-stm32/src/dma/gpdma.rs
+++ b/embassy-stm32/src/dma/gpdma.rs
@@ -9,13 +9,17 @@ use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
9use embassy_sync::waitqueue::AtomicWaker; 9use embassy_sync::waitqueue::AtomicWaker;
10 10
11use super::word::{Word, WordSize}; 11use super::word::{Word, WordSize};
12use super::Dir; 12use super::{AnyChannel, Channel, Dir, Request, STATE};
13use crate::_generated::GPDMA_CHANNEL_COUNT;
14use crate::interrupt::typelevel::Interrupt; 13use crate::interrupt::typelevel::Interrupt;
15use crate::interrupt::Priority; 14use crate::interrupt::Priority;
16use crate::pac; 15use crate::pac;
17use crate::pac::gpdma::vals; 16use crate::pac::gpdma::vals;
18 17
18pub(crate) struct ChannelInfo {
19 pub(crate) dma: pac::gpdma::Gpdma,
20 pub(crate) num: usize,
21}
22
19/// GPDMA transfer options. 23/// GPDMA transfer options.
20#[derive(Debug, Copy, Clone, PartialEq, Eq)] 24#[derive(Debug, Copy, Clone, PartialEq, Eq)]
21#[cfg_attr(feature = "defmt", derive(defmt::Format))] 25#[cfg_attr(feature = "defmt", derive(defmt::Format))]
@@ -38,21 +42,16 @@ impl From<WordSize> for vals::ChTr1Dw {
38 } 42 }
39} 43}
40 44
41struct State { 45pub(crate) struct ChannelState {
42 ch_wakers: [AtomicWaker; GPDMA_CHANNEL_COUNT], 46 waker: AtomicWaker,
43} 47}
44 48
45impl State { 49impl ChannelState {
46 const fn new() -> Self { 50 pub(crate) const NEW: Self = Self {
47 const AW: AtomicWaker = AtomicWaker::new(); 51 waker: AtomicWaker::new(),
48 Self { 52 };
49 ch_wakers: [AW; GPDMA_CHANNEL_COUNT],
50 }
51 }
52} 53}
53 54
54static STATE: State = State::new();
55
56/// safety: must be called only once 55/// safety: must be called only once
57pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { 56pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
58 foreach_interrupt! { 57 foreach_interrupt! {
@@ -64,87 +63,50 @@ pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: P
64 crate::_generated::init_gpdma(); 63 crate::_generated::init_gpdma();
65} 64}
66 65
67foreach_dma_channel! { 66impl AnyChannel {
68 ($channel_peri:ident, $dma_peri:ident, gpdma, $channel_num:expr, $index:expr, $dmamux:tt) => { 67 /// Safety: Must be called with a matching set of parameters for a valid dma channel
69 impl sealed::Channel for crate::peripherals::$channel_peri { 68 pub(crate) unsafe fn on_irq(&self) {
70 fn regs(&self) -> pac::gpdma::Gpdma { 69 let info = self.info();
71 pac::$dma_peri 70 let state = &STATE[self.id as usize];
72 }
73 fn num(&self) -> usize {
74 $channel_num
75 }
76 fn index(&self) -> usize {
77 $index
78 }
79 fn on_irq() {
80 unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
81 }
82 }
83
84 impl Channel for crate::peripherals::$channel_peri {}
85 };
86}
87
88/// Safety: Must be called with a matching set of parameters for a valid dma channel
89pub(crate) unsafe fn on_irq_inner(dma: pac::gpdma::Gpdma, channel_num: usize, index: usize) {
90 let ch = dma.ch(channel_num);
91 let sr = ch.sr().read();
92
93 if sr.dtef() {
94 panic!(
95 "DMA: data transfer error on DMA@{:08x} channel {}",
96 dma.as_ptr() as u32,
97 channel_num
98 );
99 }
100 if sr.usef() {
101 panic!(
102 "DMA: user settings error on DMA@{:08x} channel {}",
103 dma.as_ptr() as u32,
104 channel_num
105 );
106 }
107
108 if sr.suspf() || sr.tcf() {
109 // disable all xxIEs to prevent the irq from firing again.
110 ch.cr().write(|_| {});
111
112 // Wake the future. It'll look at tcf and see it's set.
113 STATE.ch_wakers[index].wake();
114 }
115}
116 71
117/// DMA request type alias. (also known as DMA channel number in some chips) 72 let ch = info.dma.ch(info.num);
118pub type Request = u8; 73 let sr = ch.sr().read();
119 74
120/// DMA channel. 75 if sr.dtef() {
121#[cfg(dmamux)] 76 panic!(
122pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {} 77 "DMA: data transfer error on DMA@{:08x} channel {}",
123/// DMA channel. 78 info.dma.as_ptr() as u32,
124#[cfg(not(dmamux))] 79 info.num
125pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {} 80 );
81 }
82 if sr.usef() {
83 panic!(
84 "DMA: user settings error on DMA@{:08x} channel {}",
85 info.dma.as_ptr() as u32,
86 info.num
87 );
88 }
126 89
127pub(crate) mod sealed { 90 if sr.suspf() || sr.tcf() {
128 use super::*; 91 // disable all xxIEs to prevent the irq from firing again.
92 ch.cr().write(|_| {});
129 93
130 pub trait Channel { 94 // Wake the future. It'll look at tcf and see it's set.
131 fn regs(&self) -> pac::gpdma::Gpdma; 95 state.waker.wake();
132 fn num(&self) -> usize; 96 }
133 fn index(&self) -> usize;
134 fn on_irq();
135 } 97 }
136} 98}
137 99
138/// DMA transfer. 100/// DMA transfer.
139#[must_use = "futures do nothing unless you `.await` or poll them"] 101#[must_use = "futures do nothing unless you `.await` or poll them"]
140pub struct Transfer<'a, C: Channel> { 102pub struct Transfer<'a> {
141 channel: PeripheralRef<'a, C>, 103 channel: PeripheralRef<'a, AnyChannel>,
142} 104}
143 105
144impl<'a, C: Channel> Transfer<'a, C> { 106impl<'a> Transfer<'a> {
145 /// Create a new read DMA transfer (peripheral to memory). 107 /// Create a new read DMA transfer (peripheral to memory).
146 pub unsafe fn new_read<W: Word>( 108 pub unsafe fn new_read<W: Word>(
147 channel: impl Peripheral<P = C> + 'a, 109 channel: impl Peripheral<P = impl Channel> + 'a,
148 request: Request, 110 request: Request,
149 peri_addr: *mut W, 111 peri_addr: *mut W,
150 buf: &'a mut [W], 112 buf: &'a mut [W],
@@ -155,7 +117,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
155 117
156 /// Create a new read DMA transfer (peripheral to memory), using raw pointers. 118 /// Create a new read DMA transfer (peripheral to memory), using raw pointers.
157 pub unsafe fn new_read_raw<W: Word>( 119 pub unsafe fn new_read_raw<W: Word>(
158 channel: impl Peripheral<P = C> + 'a, 120 channel: impl Peripheral<P = impl Channel> + 'a,
159 request: Request, 121 request: Request,
160 peri_addr: *mut W, 122 peri_addr: *mut W,
161 buf: *mut [W], 123 buf: *mut [W],
@@ -167,7 +129,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
167 assert!(len > 0 && len <= 0xFFFF); 129 assert!(len > 0 && len <= 0xFFFF);
168 130
169 Self::new_inner( 131 Self::new_inner(
170 channel, 132 channel.map_into(),
171 request, 133 request,
172 Dir::PeripheralToMemory, 134 Dir::PeripheralToMemory,
173 peri_addr as *const u32, 135 peri_addr as *const u32,
@@ -181,7 +143,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
181 143
182 /// Create a new write DMA transfer (memory to peripheral). 144 /// Create a new write DMA transfer (memory to peripheral).
183 pub unsafe fn new_write<W: Word>( 145 pub unsafe fn new_write<W: Word>(
184 channel: impl Peripheral<P = C> + 'a, 146 channel: impl Peripheral<P = impl Channel> + 'a,
185 request: Request, 147 request: Request,
186 buf: &'a [W], 148 buf: &'a [W],
187 peri_addr: *mut W, 149 peri_addr: *mut W,
@@ -192,7 +154,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
192 154
193 /// Create a new write DMA transfer (memory to peripheral), using raw pointers. 155 /// Create a new write DMA transfer (memory to peripheral), using raw pointers.
194 pub unsafe fn new_write_raw<W: Word>( 156 pub unsafe fn new_write_raw<W: Word>(
195 channel: impl Peripheral<P = C> + 'a, 157 channel: impl Peripheral<P = impl Channel> + 'a,
196 request: Request, 158 request: Request,
197 buf: *const [W], 159 buf: *const [W],
198 peri_addr: *mut W, 160 peri_addr: *mut W,
@@ -204,7 +166,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
204 assert!(len > 0 && len <= 0xFFFF); 166 assert!(len > 0 && len <= 0xFFFF);
205 167
206 Self::new_inner( 168 Self::new_inner(
207 channel, 169 channel.map_into(),
208 request, 170 request,
209 Dir::MemoryToPeripheral, 171 Dir::MemoryToPeripheral,
210 peri_addr as *const u32, 172 peri_addr as *const u32,
@@ -218,7 +180,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
218 180
219 /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly. 181 /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
220 pub unsafe fn new_write_repeated<W: Word>( 182 pub unsafe fn new_write_repeated<W: Word>(
221 channel: impl Peripheral<P = C> + 'a, 183 channel: impl Peripheral<P = impl Channel> + 'a,
222 request: Request, 184 request: Request,
223 repeated: &'a W, 185 repeated: &'a W,
224 count: usize, 186 count: usize,
@@ -228,7 +190,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
228 into_ref!(channel); 190 into_ref!(channel);
229 191
230 Self::new_inner( 192 Self::new_inner(
231 channel, 193 channel.map_into(),
232 request, 194 request,
233 Dir::MemoryToPeripheral, 195 Dir::MemoryToPeripheral,
234 peri_addr as *const u32, 196 peri_addr as *const u32,
@@ -241,7 +203,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
241 } 203 }
242 204
243 unsafe fn new_inner( 205 unsafe fn new_inner(
244 channel: PeripheralRef<'a, C>, 206 channel: PeripheralRef<'a, AnyChannel>,
245 request: Request, 207 request: Request,
246 dir: Dir, 208 dir: Dir,
247 peri_addr: *const u32, 209 peri_addr: *const u32,
@@ -251,7 +213,8 @@ impl<'a, C: Channel> Transfer<'a, C> {
251 data_size: WordSize, 213 data_size: WordSize,
252 _options: TransferOptions, 214 _options: TransferOptions,
253 ) -> Self { 215 ) -> Self {
254 let ch = channel.regs().ch(channel.num()); 216 let info = channel.info();
217 let ch = info.dma.ch(info.num);
255 218
256 // "Preceding reads and writes cannot be moved past subsequent writes." 219 // "Preceding reads and writes cannot be moved past subsequent writes."
257 fence(Ordering::SeqCst); 220 fence(Ordering::SeqCst);
@@ -311,10 +274,10 @@ impl<'a, C: Channel> Transfer<'a, C> {
311 /// 274 ///
312 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. 275 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
313 pub fn request_stop(&mut self) { 276 pub fn request_stop(&mut self) {
314 let ch = self.channel.regs().ch(self.channel.num()); 277 let info = self.channel.info();
315 ch.cr().modify(|w| { 278 let ch = info.dma.ch(info.num);
316 w.set_susp(true); 279
317 }) 280 ch.cr().modify(|w| w.set_susp(true))
318 } 281 }
319 282
320 /// Return whether this transfer is still running. 283 /// Return whether this transfer is still running.
@@ -322,7 +285,9 @@ impl<'a, C: Channel> Transfer<'a, C> {
322 /// If this returns `false`, it can be because either the transfer finished, or 285 /// If this returns `false`, it can be because either the transfer finished, or
323 /// it was requested to stop early with [`request_stop`](Self::request_stop). 286 /// it was requested to stop early with [`request_stop`](Self::request_stop).
324 pub fn is_running(&mut self) -> bool { 287 pub fn is_running(&mut self) -> bool {
325 let ch = self.channel.regs().ch(self.channel.num()); 288 let info = self.channel.info();
289 let ch = info.dma.ch(info.num);
290
326 let sr = ch.sr().read(); 291 let sr = ch.sr().read();
327 !sr.tcf() && !sr.suspf() 292 !sr.tcf() && !sr.suspf()
328 } 293 }
@@ -330,7 +295,9 @@ impl<'a, C: Channel> Transfer<'a, C> {
330 /// Gets the total remaining transfers for the channel 295 /// Gets the total remaining transfers for the channel
331 /// Note: this will be zero for transfers that completed without cancellation. 296 /// Note: this will be zero for transfers that completed without cancellation.
332 pub fn get_remaining_transfers(&self) -> u16 { 297 pub fn get_remaining_transfers(&self) -> u16 {
333 let ch = self.channel.regs().ch(self.channel.num()); 298 let info = self.channel.info();
299 let ch = info.dma.ch(info.num);
300
334 ch.br1().read().bndt() 301 ch.br1().read().bndt()
335 } 302 }
336 303
@@ -345,7 +312,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
345 } 312 }
346} 313}
347 314
348impl<'a, C: Channel> Drop for Transfer<'a, C> { 315impl<'a> Drop for Transfer<'a> {
349 fn drop(&mut self) { 316 fn drop(&mut self) {
350 self.request_stop(); 317 self.request_stop();
351 while self.is_running() {} 318 while self.is_running() {}
@@ -355,11 +322,12 @@ impl<'a, C: Channel> Drop for Transfer<'a, C> {
355 } 322 }
356} 323}
357 324
358impl<'a, C: Channel> Unpin for Transfer<'a, C> {} 325impl<'a> Unpin for Transfer<'a> {}
359impl<'a, C: Channel> Future for Transfer<'a, C> { 326impl<'a> Future for Transfer<'a> {
360 type Output = (); 327 type Output = ();
361 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { 328 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
362 STATE.ch_wakers[self.channel.index()].register(cx.waker()); 329 let state = &STATE[self.channel.id as usize];
330 state.waker.register(cx.waker());
363 331
364 if self.is_running() { 332 if self.is_running() {
365 Poll::Pending 333 Poll::Pending
diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs
index 38945ac33..6b1ac6207 100644
--- a/embassy-stm32/src/dma/mod.rs
+++ b/embassy-stm32/src/dma/mod.rs
@@ -1,19 +1,10 @@
1//! Direct Memory Access (DMA) 1//! Direct Memory Access (DMA)
2#![macro_use]
2 3
3#[cfg(dma)] 4#[cfg(any(bdma, dma))]
4pub(crate) mod dma; 5mod dma_bdma;
5#[cfg(dma)] 6#[cfg(any(bdma, dma))]
6pub use dma::*; 7pub use dma_bdma::*;
7
8// stm32h7 has both dma and bdma. In that case, we export dma as "main" dma,
9// and bdma as "secondary", under `embassy_stm32::dma::bdma`.
10#[cfg(all(bdma, dma))]
11pub mod bdma;
12
13#[cfg(all(bdma, not(dma)))]
14pub(crate) mod bdma;
15#[cfg(all(bdma, not(dma)))]
16pub use bdma::*;
17 8
18#[cfg(gpdma)] 9#[cfg(gpdma)]
19pub(crate) mod gpdma; 10pub(crate) mod gpdma;
@@ -22,16 +13,16 @@ pub use gpdma::*;
22 13
23#[cfg(dmamux)] 14#[cfg(dmamux)]
24mod dmamux; 15mod dmamux;
16#[cfg(dmamux)]
17pub use dmamux::*;
25 18
26pub(crate) mod ringbuffer; 19pub(crate) mod ringbuffer;
27pub mod word; 20pub mod word;
28 21
29use core::mem; 22use core::mem;
30 23
31use embassy_hal_internal::impl_peripheral; 24use embassy_hal_internal::{impl_peripheral, Peripheral};
32 25
33#[cfg(dmamux)]
34pub use self::dmamux::*;
35use crate::interrupt::Priority; 26use crate::interrupt::Priority;
36 27
37#[derive(Debug, Copy, Clone, PartialEq, Eq)] 28#[derive(Debug, Copy, Clone, PartialEq, Eq)]
@@ -41,6 +32,73 @@ enum Dir {
41 PeripheralToMemory, 32 PeripheralToMemory,
42} 33}
43 34
35/// DMA request type alias. (also known as DMA channel number in some chips)
36#[cfg(any(dma_v2, bdma_v2, gpdma, dmamux))]
37pub type Request = u8;
38/// DMA request type alias. (also known as DMA channel number in some chips)
39#[cfg(not(any(dma_v2, bdma_v2, gpdma, dmamux)))]
40pub type Request = ();
41
42pub(crate) mod sealed {
43 pub trait Channel {
44 fn id(&self) -> u8;
45 }
46 pub trait ChannelInterrupt {
47 unsafe fn on_irq();
48 }
49}
50
51/// DMA channel.
52pub trait Channel: sealed::Channel + Peripheral<P = Self> + Into<AnyChannel> + 'static {
53 /// Type-erase (degrade) this pin into an `AnyChannel`.
54 ///
55 /// This converts DMA channel singletons (`DMA1_CH3`, `DMA2_CH1`, ...), which
56 /// are all different types, into the same type. It is useful for
57 /// creating arrays of channels, or avoiding generics.
58 #[inline]
59 fn degrade(self) -> AnyChannel {
60 AnyChannel { id: self.id() }
61 }
62}
63
64macro_rules! dma_channel_impl {
65 ($channel_peri:ident, $index:expr) => {
66 impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri {
67 fn id(&self) -> u8 {
68 $index
69 }
70 }
71 impl crate::dma::sealed::ChannelInterrupt for crate::peripherals::$channel_peri {
72 unsafe fn on_irq() {
73 crate::dma::AnyChannel { id: $index }.on_irq();
74 }
75 }
76
77 impl crate::dma::Channel for crate::peripherals::$channel_peri {}
78
79 impl From<crate::peripherals::$channel_peri> for crate::dma::AnyChannel {
80 fn from(x: crate::peripherals::$channel_peri) -> Self {
81 crate::dma::Channel::degrade(x)
82 }
83 }
84 };
85}
86
87/// Type-erased DMA channel.
88pub struct AnyChannel {
89 pub(crate) id: u8,
90}
91impl_peripheral!(AnyChannel);
92
93impl AnyChannel {
94 fn info(&self) -> &ChannelInfo {
95 &crate::_generated::DMA_CHANNELS[self.id as usize]
96 }
97}
98
99const CHANNEL_COUNT: usize = crate::_generated::DMA_CHANNELS.len();
100static STATE: [ChannelState; CHANNEL_COUNT] = [ChannelState::NEW; CHANNEL_COUNT];
101
44/// "No DMA" placeholder. 102/// "No DMA" placeholder.
45/// 103///
46/// You may pass this in place of a real DMA channel when creating a driver 104/// You may pass this in place of a real DMA channel when creating a driver
@@ -70,10 +128,14 @@ pub(crate) unsafe fn init(
70 #[cfg(dma)] dma_priority: Priority, 128 #[cfg(dma)] dma_priority: Priority,
71 #[cfg(gpdma)] gpdma_priority: Priority, 129 #[cfg(gpdma)] gpdma_priority: Priority,
72) { 130) {
73 #[cfg(bdma)] 131 #[cfg(any(dma, bdma))]
74 bdma::init(cs, bdma_priority); 132 dma_bdma::init(
75 #[cfg(dma)] 133 cs,
76 dma::init(cs, dma_priority); 134 #[cfg(dma)]
135 dma_priority,
136 #[cfg(bdma)]
137 bdma_priority,
138 );
77 #[cfg(gpdma)] 139 #[cfg(gpdma)]
78 gpdma::init(cs, gpdma_priority); 140 gpdma::init(cs, gpdma_priority);
79 #[cfg(dmamux)] 141 #[cfg(dmamux)]
diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs
index 5e647612c..b6c3e4028 100644
--- a/embassy-stm32/src/sai/mod.rs
+++ b/embassy-stm32/src/sai/mod.rs
@@ -501,9 +501,9 @@ impl Config {
501 } 501 }
502} 502}
503 503
504enum RingBuffer<'d, C: Channel, W: word::Word> { 504enum RingBuffer<'d, W: word::Word> {
505 Writable(WritableRingBuffer<'d, C, W>), 505 Writable(WritableRingBuffer<'d, W>),
506 Readable(ReadableRingBuffer<'d, C, W>), 506 Readable(ReadableRingBuffer<'d, W>),
507} 507}
508 508
509#[cfg(any(sai_v1, sai_v2, sai_v3, sai_v4))] 509#[cfg(any(sai_v1, sai_v2, sai_v3, sai_v4))]
@@ -528,13 +528,13 @@ fn get_af_types(mode: Mode, tx_rx: TxRx) -> (AFType, AFType) {
528 ) 528 )
529} 529}
530 530
531fn get_ring_buffer<'d, T: Instance, C: Channel, W: word::Word>( 531fn get_ring_buffer<'d, T: Instance, W: word::Word>(
532 dma: impl Peripheral<P = C> + 'd, 532 dma: impl Peripheral<P = impl Channel> + 'd,
533 dma_buf: &'d mut [W], 533 dma_buf: &'d mut [W],
534 request: Request, 534 request: Request,
535 sub_block: WhichSubBlock, 535 sub_block: WhichSubBlock,
536 tx_rx: TxRx, 536 tx_rx: TxRx,
537) -> RingBuffer<'d, C, W> { 537) -> RingBuffer<'d, W> {
538 let opts = TransferOptions { 538 let opts = TransferOptions {
539 half_transfer_ir: true, 539 half_transfer_ir: true,
540 //the new_write() and new_read() always use circular mode 540 //the new_write() and new_read() always use circular mode
@@ -593,17 +593,17 @@ pub fn split_subblocks<'d, T: Instance>(peri: impl Peripheral<P = T> + 'd) -> (S
593} 593}
594 594
595/// SAI sub-block driver. 595/// SAI sub-block driver.
596pub struct Sai<'d, T: Instance, C: Channel, W: word::Word> { 596pub struct Sai<'d, T: Instance, W: word::Word> {
597 _peri: PeripheralRef<'d, T>, 597 _peri: PeripheralRef<'d, T>,
598 sd: Option<PeripheralRef<'d, AnyPin>>, 598 sd: Option<PeripheralRef<'d, AnyPin>>,
599 fs: Option<PeripheralRef<'d, AnyPin>>, 599 fs: Option<PeripheralRef<'d, AnyPin>>,
600 sck: Option<PeripheralRef<'d, AnyPin>>, 600 sck: Option<PeripheralRef<'d, AnyPin>>,
601 mclk: Option<PeripheralRef<'d, AnyPin>>, 601 mclk: Option<PeripheralRef<'d, AnyPin>>,
602 ring_buffer: RingBuffer<'d, C, W>, 602 ring_buffer: RingBuffer<'d, W>,
603 sub_block: WhichSubBlock, 603 sub_block: WhichSubBlock,
604} 604}
605 605
606impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> { 606impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> {
607 /// Create a new SAI driver in asynchronous mode with MCLK. 607 /// Create a new SAI driver in asynchronous mode with MCLK.
608 /// 608 ///
609 /// You can obtain the [`SubBlock`] with [`split_subblocks`]. 609 /// You can obtain the [`SubBlock`] with [`split_subblocks`].
@@ -613,13 +613,10 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
613 sd: impl Peripheral<P = impl SdPin<T, S>> + 'd, 613 sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
614 fs: impl Peripheral<P = impl FsPin<T, S>> + 'd, 614 fs: impl Peripheral<P = impl FsPin<T, S>> + 'd,
615 mclk: impl Peripheral<P = impl MclkPin<T, S>> + 'd, 615 mclk: impl Peripheral<P = impl MclkPin<T, S>> + 'd,
616 dma: impl Peripheral<P = C> + 'd, 616 dma: impl Peripheral<P = impl Channel + Dma<T, S>> + 'd,
617 dma_buf: &'d mut [W], 617 dma_buf: &'d mut [W],
618 mut config: Config, 618 mut config: Config,
619 ) -> Self 619 ) -> Self {
620 where
621 C: Channel + Dma<T, S>,
622 {
623 into_ref!(mclk); 620 into_ref!(mclk);
624 621
625 let (_sd_af_type, ck_af_type) = get_af_types(config.mode, config.tx_rx); 622 let (_sd_af_type, ck_af_type) = get_af_types(config.mode, config.tx_rx);
@@ -642,13 +639,10 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
642 sck: impl Peripheral<P = impl SckPin<T, S>> + 'd, 639 sck: impl Peripheral<P = impl SckPin<T, S>> + 'd,
643 sd: impl Peripheral<P = impl SdPin<T, S>> + 'd, 640 sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
644 fs: impl Peripheral<P = impl FsPin<T, S>> + 'd, 641 fs: impl Peripheral<P = impl FsPin<T, S>> + 'd,
645 dma: impl Peripheral<P = C> + 'd, 642 dma: impl Peripheral<P = impl Channel + Dma<T, S>> + 'd,
646 dma_buf: &'d mut [W], 643 dma_buf: &'d mut [W],
647 config: Config, 644 config: Config,
648 ) -> Self 645 ) -> Self {
649 where
650 C: Channel + Dma<T, S>,
651 {
652 let peri = peri.peri; 646 let peri = peri.peri;
653 into_ref!(peri, dma, sck, sd, fs); 647 into_ref!(peri, dma, sck, sd, fs);
654 648
@@ -671,7 +665,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
671 None, 665 None,
672 Some(sd.map_into()), 666 Some(sd.map_into()),
673 Some(fs.map_into()), 667 Some(fs.map_into()),
674 get_ring_buffer::<T, C, W>(dma, dma_buf, request, sub_block, config.tx_rx), 668 get_ring_buffer::<T, W>(dma, dma_buf, request, sub_block, config.tx_rx),
675 config, 669 config,
676 ) 670 )
677 } 671 }
@@ -682,13 +676,10 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
682 pub fn new_synchronous<S: SubBlockInstance>( 676 pub fn new_synchronous<S: SubBlockInstance>(
683 peri: SubBlock<'d, T, S>, 677 peri: SubBlock<'d, T, S>,
684 sd: impl Peripheral<P = impl SdPin<T, S>> + 'd, 678 sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
685 dma: impl Peripheral<P = C> + 'd, 679 dma: impl Peripheral<P = impl Channel + Dma<T, S>> + 'd,
686 dma_buf: &'d mut [W], 680 dma_buf: &'d mut [W],
687 mut config: Config, 681 mut config: Config,
688 ) -> Self 682 ) -> Self {
689 where
690 C: Channel + Dma<T, S>,
691 {
692 update_synchronous_config(&mut config); 683 update_synchronous_config(&mut config);
693 684
694 let peri = peri.peri; 685 let peri = peri.peri;
@@ -709,7 +700,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
709 None, 700 None,
710 Some(sd.map_into()), 701 Some(sd.map_into()),
711 None, 702 None,
712 get_ring_buffer::<T, C, W>(dma, dma_buf, request, sub_block, config.tx_rx), 703 get_ring_buffer::<T, W>(dma, dma_buf, request, sub_block, config.tx_rx),
713 config, 704 config,
714 ) 705 )
715 } 706 }
@@ -721,7 +712,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
721 mclk: Option<PeripheralRef<'d, AnyPin>>, 712 mclk: Option<PeripheralRef<'d, AnyPin>>,
722 sd: Option<PeripheralRef<'d, AnyPin>>, 713 sd: Option<PeripheralRef<'d, AnyPin>>,
723 fs: Option<PeripheralRef<'d, AnyPin>>, 714 fs: Option<PeripheralRef<'d, AnyPin>>,
724 ring_buffer: RingBuffer<'d, C, W>, 715 ring_buffer: RingBuffer<'d, W>,
725 config: Config, 716 config: Config,
726 ) -> Self { 717 ) -> Self {
727 #[cfg(any(sai_v1, sai_v2, sai_v3, sai_v4))] 718 #[cfg(any(sai_v1, sai_v2, sai_v3, sai_v4))]
@@ -830,7 +821,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
830 } 821 }
831 } 822 }
832 823
833 fn is_transmitter(ring_buffer: &RingBuffer<C, W>) -> bool { 824 fn is_transmitter(ring_buffer: &RingBuffer<W>) -> bool {
834 match ring_buffer { 825 match ring_buffer {
835 RingBuffer::Writable(_) => true, 826 RingBuffer::Writable(_) => true,
836 _ => false, 827 _ => false,
@@ -889,7 +880,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
889 } 880 }
890} 881}
891 882
892impl<'d, T: Instance, C: Channel, W: word::Word> Drop for Sai<'d, T, C, W> { 883impl<'d, T: Instance, W: word::Word> Drop for Sai<'d, T, W> {
893 fn drop(&mut self) { 884 fn drop(&mut self) {
894 let ch = T::REGS.ch(self.sub_block as usize); 885 let ch = T::REGS.ch(self.sub_block as usize);
895 ch.cr1().modify(|w| w.set_saien(false)); 886 ch.cr1().modify(|w| w.set_saien(false));
diff --git a/embassy-stm32/src/sdmmc/mod.rs b/embassy-stm32/src/sdmmc/mod.rs
index 61589a215..bf1d2ca9b 100644
--- a/embassy-stm32/src/sdmmc/mod.rs
+++ b/embassy-stm32/src/sdmmc/mod.rs
@@ -228,10 +228,10 @@ fn clk_div(ker_ck: Hertz, sdmmc_ck: u32) -> Result<(bool, u16, Hertz), Error> {
228} 228}
229 229
230#[cfg(sdmmc_v1)] 230#[cfg(sdmmc_v1)]
231type Transfer<'a, C> = crate::dma::Transfer<'a, C>; 231type Transfer<'a> = crate::dma::Transfer<'a>;
232#[cfg(sdmmc_v2)] 232#[cfg(sdmmc_v2)]
233struct Transfer<'a, C> { 233struct Transfer<'a> {
234 _dummy: core::marker::PhantomData<&'a mut C>, 234 _dummy: PhantomData<&'a ()>,
235} 235}
236 236
237#[cfg(all(sdmmc_v1, dma))] 237#[cfg(all(sdmmc_v1, dma))]
@@ -548,7 +548,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
548 buffer: &'a mut [u32], 548 buffer: &'a mut [u32],
549 length_bytes: u32, 549 length_bytes: u32,
550 block_size: u8, 550 block_size: u8,
551 ) -> Transfer<'a, Dma> { 551 ) -> Transfer<'a> {
552 assert!(block_size <= 14, "Block size up to 2^14 bytes"); 552 assert!(block_size <= 14, "Block size up to 2^14 bytes");
553 let regs = T::regs(); 553 let regs = T::regs();
554 554
@@ -596,12 +596,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
596 /// # Safety 596 /// # Safety
597 /// 597 ///
598 /// `buffer` must be valid for the whole transfer and word aligned 598 /// `buffer` must be valid for the whole transfer and word aligned
599 fn prepare_datapath_write<'a>( 599 fn prepare_datapath_write<'a>(&'a mut self, buffer: &'a [u32], length_bytes: u32, block_size: u8) -> Transfer<'a> {
600 &'a mut self,
601 buffer: &'a [u32],
602 length_bytes: u32,
603 block_size: u8,
604 ) -> Transfer<'a, Dma> {
605 assert!(block_size <= 14, "Block size up to 2^14 bytes"); 600 assert!(block_size <= 14, "Block size up to 2^14 bytes");
606 let regs = T::regs(); 601 let regs = T::regs();
607 602
diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs
index a0ab060a3..b852f0176 100644
--- a/embassy-stm32/src/usart/ringbuffered.rs
+++ b/embassy-stm32/src/usart/ringbuffered.rs
@@ -7,19 +7,19 @@ use embassy_embedded_hal::SetConfig;
7use embassy_hal_internal::PeripheralRef; 7use embassy_hal_internal::PeripheralRef;
8use futures::future::{select, Either}; 8use futures::future::{select, Either};
9 9
10use super::{clear_interrupt_flags, rdr, reconfigure, sr, BasicInstance, Config, ConfigError, Error, RxDma, UartRx}; 10use super::{clear_interrupt_flags, rdr, reconfigure, sr, BasicInstance, Config, ConfigError, Error, UartRx};
11use crate::dma::ReadableRingBuffer; 11use crate::dma::ReadableRingBuffer;
12use crate::usart::{Regs, Sr}; 12use crate::usart::{Regs, Sr};
13 13
14/// Rx-only Ring-buffered UART Driver 14/// Rx-only Ring-buffered UART Driver
15/// 15///
16/// Created with [UartRx::into_ring_buffered] 16/// Created with [UartRx::into_ring_buffered]
17pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> { 17pub struct RingBufferedUartRx<'d, T: BasicInstance> {
18 _peri: PeripheralRef<'d, T>, 18 _peri: PeripheralRef<'d, T>,
19 ring_buf: ReadableRingBuffer<'d, RxDma, u8>, 19 ring_buf: ReadableRingBuffer<'d, u8>,
20} 20}
21 21
22impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> SetConfig for RingBufferedUartRx<'d, T, RxDma> { 22impl<'d, T: BasicInstance> SetConfig for RingBufferedUartRx<'d, T> {
23 type Config = Config; 23 type Config = Config;
24 type ConfigError = ConfigError; 24 type ConfigError = ConfigError;
25 25
@@ -32,7 +32,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
32 /// Turn the `UartRx` into a buffered uart which can continously receive in the background 32 /// Turn the `UartRx` into a buffered uart which can continously receive in the background
33 /// without the possibility of losing bytes. The `dma_buf` is a buffer registered to the 33 /// without the possibility of losing bytes. The `dma_buf` is a buffer registered to the
34 /// DMA controller, and must be large enough to prevent overflows. 34 /// DMA controller, and must be large enough to prevent overflows.
35 pub fn into_ring_buffered(self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d, T, RxDma> { 35 pub fn into_ring_buffered(self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d, T> {
36 assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF); 36 assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF);
37 37
38 let request = self.rx_dma.request(); 38 let request = self.rx_dma.request();
@@ -51,7 +51,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
51 } 51 }
52} 52}
53 53
54impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxDma> { 54impl<'d, T: BasicInstance> RingBufferedUartRx<'d, T> {
55 /// Clear the ring buffer and start receiving in the background 55 /// Clear the ring buffer and start receiving in the background
56 pub fn start(&mut self) -> Result<(), Error> { 56 pub fn start(&mut self) -> Result<(), Error> {
57 // Clear the ring buffer so that it is ready to receive data 57 // Clear the ring buffer so that it is ready to receive data
@@ -208,7 +208,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
208 } 208 }
209} 209}
210 210
211impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T, RxDma> { 211impl<T: BasicInstance> Drop for RingBufferedUartRx<'_, T> {
212 fn drop(&mut self) { 212 fn drop(&mut self) {
213 self.teardown_uart(); 213 self.teardown_uart();
214 214
@@ -245,18 +245,16 @@ fn clear_idle_flag(r: Regs) -> Sr {
245 sr 245 sr
246} 246}
247 247
248impl<T, Rx> embedded_io_async::ErrorType for RingBufferedUartRx<'_, T, Rx> 248impl<T> embedded_io_async::ErrorType for RingBufferedUartRx<'_, T>
249where 249where
250 T: BasicInstance, 250 T: BasicInstance,
251 Rx: RxDma<T>,
252{ 251{
253 type Error = Error; 252 type Error = Error;
254} 253}
255 254
256impl<T, Rx> embedded_io_async::Read for RingBufferedUartRx<'_, T, Rx> 255impl<T> embedded_io_async::Read for RingBufferedUartRx<'_, T>
257where 256where
258 T: BasicInstance, 257 T: BasicInstance,
259 Rx: RxDma<T>,
260{ 258{
261 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> { 259 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
262 self.read(buf).await 260 self.read(buf).await