aboutsummaryrefslogtreecommitdiff
path: root/embassy-mspm0/src/dma.rs
diff options
context:
space:
mode:
Diffstat (limited to 'embassy-mspm0/src/dma.rs')
-rw-r--r--embassy-mspm0/src/dma.rs626
1 files changed, 626 insertions, 0 deletions
diff --git a/embassy-mspm0/src/dma.rs b/embassy-mspm0/src/dma.rs
new file mode 100644
index 000000000..66b79709c
--- /dev/null
+++ b/embassy-mspm0/src/dma.rs
@@ -0,0 +1,626 @@
1//! Direct Memory Access (DMA)
2
3#![macro_use]
4
5use core::future::Future;
6use core::mem;
7use core::pin::Pin;
8use core::sync::atomic::{compiler_fence, Ordering};
9use core::task::{Context, Poll};
10
11use critical_section::CriticalSection;
12use embassy_hal_internal::interrupt::InterruptExt;
13use embassy_hal_internal::{impl_peripheral, PeripheralType};
14use embassy_sync::waitqueue::AtomicWaker;
15use mspm0_metapac::common::{Reg, RW};
16use mspm0_metapac::dma::regs;
17use mspm0_metapac::dma::vals::{self, Autoen, Em, Incr, Preirq, Wdth};
18
19use crate::{interrupt, pac, Peri};
20
21/// The burst size of a DMA transfer.
22#[derive(Debug, Clone, Copy, PartialEq, Eq)]
23pub enum BurstSize {
24 /// The whole block transfer is completed in one transfer without interruption.
25 Complete,
26
27 /// The burst size is 8, after 9 transfers the block transfer is interrupted and the priority
28 /// is reevaluated.
29 _8,
30
31 /// The burst size is 16, after 17 transfers the block transfer is interrupted and the priority
32 /// is reevaluated.
33 _16,
34
35 /// The burst size is 32, after 32 transfers the block transfer is interrupted and the priority
36 /// is reevaluated.
37 _32,
38}
39
40/// DMA channel.
41#[allow(private_bounds)]
42pub trait Channel: Into<AnyChannel> + PeripheralType {}
43
44/// Full DMA channel.
45#[allow(private_bounds)]
46pub trait FullChannel: Channel + Into<AnyFullChannel> {}
47
48/// Type-erased DMA channel.
49pub struct AnyChannel {
50 pub(crate) id: u8,
51}
52impl_peripheral!(AnyChannel);
53
54impl SealedChannel for AnyChannel {
55 fn id(&self) -> u8 {
56 self.id
57 }
58}
59impl Channel for AnyChannel {}
60
61/// Type-erased full DMA channel.
62pub struct AnyFullChannel {
63 pub(crate) id: u8,
64}
65impl_peripheral!(AnyFullChannel);
66
67impl SealedChannel for AnyFullChannel {
68 fn id(&self) -> u8 {
69 self.id
70 }
71}
72impl Channel for AnyFullChannel {}
73impl FullChannel for AnyFullChannel {}
74
75impl From<AnyFullChannel> for AnyChannel {
76 fn from(value: AnyFullChannel) -> Self {
77 Self { id: value.id }
78 }
79}
80
81#[allow(private_bounds)]
82pub trait Word: SealedWord {
83 /// Size in bytes for the width.
84 fn size() -> isize;
85}
86
87impl SealedWord for u8 {
88 fn width() -> vals::Wdth {
89 vals::Wdth::BYTE
90 }
91}
92impl Word for u8 {
93 fn size() -> isize {
94 1
95 }
96}
97
98impl SealedWord for u16 {
99 fn width() -> vals::Wdth {
100 vals::Wdth::HALF
101 }
102}
103impl Word for u16 {
104 fn size() -> isize {
105 2
106 }
107}
108
109impl SealedWord for u32 {
110 fn width() -> vals::Wdth {
111 vals::Wdth::WORD
112 }
113}
114impl Word for u32 {
115 fn size() -> isize {
116 4
117 }
118}
119
120impl SealedWord for u64 {
121 fn width() -> vals::Wdth {
122 vals::Wdth::LONG
123 }
124}
125impl Word for u64 {
126 fn size() -> isize {
127 8
128 }
129}
130
131// TODO: u128 (LONGLONG) support. G350x does support it, but other parts do not such as C110x. More metadata is
132// needed to properly enable this.
133// impl SealedWord for u128 {
134// fn width() -> vals::Wdth {
135// vals::Wdth::LONGLONG
136// }
137// }
138// impl Word for u128 {
139// fn size() -> isize {
140// 16
141// }
142// }
143
144#[derive(Debug, Copy, Clone, PartialEq, Eq)]
145#[cfg_attr(feature = "defmt", derive(defmt::Format))]
146pub enum Error {
147 /// The DMA transfer is too large.
148 ///
149 /// The hardware limits the DMA to 16384 transfers per channel at a time. This means that transferring
150 /// 16384 `u8` and 16384 `u64` are equivalent, since the DMA must copy 16384 values.
151 TooManyTransfers,
152}
153
154/// DMA transfer mode for basic channels.
155#[derive(Debug, Copy, Clone, PartialEq, Eq)]
156#[cfg_attr(feature = "defmt", derive(defmt::Format))]
157pub enum TransferMode {
158 /// Each DMA trigger will transfer a single value.
159 Single,
160
161 /// Each DMA trigger will transfer the complete block with one trigger.
162 Block,
163}
164
165/// DMA transfer options.
166#[derive(Debug, Copy, Clone, PartialEq, Eq)]
167#[cfg_attr(feature = "defmt", derive(defmt::Format))]
168#[non_exhaustive]
169pub struct TransferOptions {
170 /// DMA transfer mode.
171 pub mode: TransferMode,
172 // TODO: Read and write stride.
173}
174
175impl Default for TransferOptions {
176 fn default() -> Self {
177 Self {
178 mode: TransferMode::Single,
179 }
180 }
181}
182
183/// DMA transfer.
184#[must_use = "futures do nothing unless you `.await` or poll them"]
185pub struct Transfer<'a> {
186 channel: Peri<'a, AnyChannel>,
187}
188
189impl<'a> Transfer<'a> {
190 /// Software trigger source.
191 ///
192 /// Using this trigger source means that a transfer will start immediately rather than waiting for
193 /// a hardware event. This can be useful if you want to do a DMA accelerated memcpy.
194 pub const SOFTWARE_TRIGGER: u8 = 0;
195
196 /// Create a new read DMA transfer.
197 pub unsafe fn new_read<SW: Word, DW: Word>(
198 channel: Peri<'a, impl Channel>,
199 trigger_source: u8,
200 src: *mut SW,
201 dst: &'a mut [DW],
202 options: TransferOptions,
203 ) -> Result<Self, Error> {
204 Self::new_read_raw(channel, trigger_source, src, dst, options)
205 }
206
207 /// Create a new read DMA transfer, using raw pointers.
208 pub unsafe fn new_read_raw<SW: Word, DW: Word>(
209 channel: Peri<'a, impl Channel>,
210 trigger_source: u8,
211 src: *mut SW,
212 dst: *mut [DW],
213 options: TransferOptions,
214 ) -> Result<Self, Error> {
215 verify_transfer::<DW>(dst)?;
216
217 let channel = channel.into();
218 channel.configure(
219 trigger_source,
220 src.cast(),
221 SW::width(),
222 dst.cast(),
223 DW::width(),
224 dst.len() as u16,
225 false,
226 true,
227 options,
228 );
229 channel.start();
230
231 Ok(Self { channel })
232 }
233
234 /// Create a new write DMA transfer.
235 pub unsafe fn new_write<SW: Word, DW: Word>(
236 channel: Peri<'a, impl Channel>,
237 trigger_source: u8,
238 src: &'a [SW],
239 dst: *mut DW,
240 options: TransferOptions,
241 ) -> Result<Self, Error> {
242 Self::new_write_raw(channel, trigger_source, src, dst, options)
243 }
244
245 /// Create a new write DMA transfer, using raw pointers.
246 pub unsafe fn new_write_raw<SW: Word, DW: Word>(
247 channel: Peri<'a, impl Channel>,
248 trigger_source: u8,
249 src: *const [SW],
250 dst: *mut DW,
251 options: TransferOptions,
252 ) -> Result<Self, Error> {
253 verify_transfer::<SW>(src)?;
254
255 let channel = channel.into();
256 channel.configure(
257 trigger_source,
258 src.cast(),
259 SW::width(),
260 dst.cast(),
261 DW::width(),
262 src.len() as u16,
263 true,
264 false,
265 options,
266 );
267 channel.start();
268
269 Ok(Self { channel })
270 }
271
272 // TODO: Copy between slices.
273
274 /// Request the transfer to resume.
275 pub fn resume(&mut self) {
276 self.channel.resume();
277 }
278
279 /// Request the transfer to pause, keeping the existing configuration for this channel.
280 /// To restart the transfer, call [`start`](Self::start) again.
281 ///
282 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
283 pub fn request_pause(&mut self) {
284 self.channel.request_pause();
285 }
286
287 /// Return whether this transfer is still running.
288 ///
289 /// If this returns [`false`], it can be because either the transfer finished, or
290 /// it was requested to stop early with [`request_stop`].
291 pub fn is_running(&mut self) -> bool {
292 self.channel.is_running()
293 }
294
295 /// Blocking wait until the transfer finishes.
296 pub fn blocking_wait(mut self) {
297 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
298 compiler_fence(Ordering::SeqCst);
299
300 while self.is_running() {}
301
302 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
303 compiler_fence(Ordering::SeqCst);
304
305 // Prevent drop from being called since we ran to completion (drop will try to pause).
306 mem::forget(self);
307 }
308}
309
310impl<'a> Unpin for Transfer<'a> {}
311impl<'a> Future for Transfer<'a> {
312 type Output = ();
313
314 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
315 let state: &ChannelState = &STATE[self.channel.id as usize];
316
317 state.waker.register(cx.waker());
318
319 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
320 compiler_fence(Ordering::SeqCst);
321
322 if self.channel.is_running() {
323 Poll::Pending
324 } else {
325 Poll::Ready(())
326 }
327 }
328}
329
330impl<'a> Drop for Transfer<'a> {
331 fn drop(&mut self) {
332 self.channel.request_pause();
333 while self.is_running() {}
334
335 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
336 compiler_fence(Ordering::SeqCst);
337 }
338}
339
340// impl details
341
342fn verify_transfer<W: Word>(ptr: *const [W]) -> Result<(), Error> {
343 if ptr.len() > (u16::MAX as usize) {
344 return Err(Error::TooManyTransfers);
345 }
346
347 // TODO: Stride checks
348
349 Ok(())
350}
351
352fn convert_burst_size(value: BurstSize) -> vals::Burstsz {
353 match value {
354 BurstSize::Complete => vals::Burstsz::INFINITI,
355 BurstSize::_8 => vals::Burstsz::BURST_8,
356 BurstSize::_16 => vals::Burstsz::BURST_16,
357 BurstSize::_32 => vals::Burstsz::BURST_32,
358 }
359}
360
361fn convert_mode(mode: TransferMode) -> vals::Tm {
362 match mode {
363 TransferMode::Single => vals::Tm::SINGLE,
364 TransferMode::Block => vals::Tm::BLOCK,
365 }
366}
367
368const CHANNEL_COUNT: usize = crate::_generated::DMA_CHANNELS;
369static STATE: [ChannelState; CHANNEL_COUNT] = [const { ChannelState::new() }; CHANNEL_COUNT];
370
371struct ChannelState {
372 waker: AtomicWaker,
373}
374
375impl ChannelState {
376 const fn new() -> Self {
377 Self {
378 waker: AtomicWaker::new(),
379 }
380 }
381}
382
383/// SAFETY: Must only be called once.
384///
385/// Changing the burst size mid transfer may have some odd behavior.
386pub(crate) unsafe fn init(_cs: CriticalSection, burst_size: BurstSize, round_robin: bool) {
387 pac::DMA.prio().modify(|prio| {
388 prio.set_burstsz(convert_burst_size(burst_size));
389 prio.set_roundrobin(round_robin);
390 });
391 pac::DMA.int_event(0).imask().modify(|w| {
392 w.set_dataerr(true);
393 w.set_addrerr(true);
394 });
395
396 interrupt::DMA.enable();
397}
398
399pub(crate) trait SealedWord {
400 fn width() -> vals::Wdth;
401}
402
403pub(crate) trait SealedChannel {
404 fn id(&self) -> u8;
405
406 #[inline]
407 fn tctl(&self) -> Reg<regs::Tctl, RW> {
408 pac::DMA.trig(self.id() as usize).tctl()
409 }
410
411 #[inline]
412 fn ctl(&self) -> Reg<regs::Ctl, RW> {
413 pac::DMA.chan(self.id() as usize).ctl()
414 }
415
416 #[inline]
417 fn sa(&self) -> Reg<u32, RW> {
418 pac::DMA.chan(self.id() as usize).sa()
419 }
420
421 #[inline]
422 fn da(&self) -> Reg<u32, RW> {
423 pac::DMA.chan(self.id() as usize).da()
424 }
425
426 #[inline]
427 fn sz(&self) -> Reg<regs::Sz, RW> {
428 pac::DMA.chan(self.id() as usize).sz()
429 }
430
431 #[inline]
432 fn mask_interrupt(&self, enable: bool) {
433 // Enabling interrupts is an RMW operation.
434 critical_section::with(|_cs| {
435 pac::DMA.int_event(0).imask().modify(|w| {
436 w.set_ch(self.id() as usize, enable);
437 });
438 })
439 }
440
441 /// # Safety
442 ///
443 /// - `src` must be valid for the lifetime of the transfer.
444 /// - `dst` must be valid for the lifetime of the transfer.
445 unsafe fn configure(
446 &self,
447 trigger_sel: u8,
448 src: *const u32,
449 src_wdth: Wdth,
450 dst: *const u32,
451 dst_wdth: Wdth,
452 transfer_count: u16,
453 increment_src: bool,
454 increment_dst: bool,
455 options: TransferOptions,
456 ) {
457 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
458 compiler_fence(Ordering::SeqCst);
459
460 self.ctl().modify(|w| {
461 // SLAU 5.2.5:
462 // "The DMATSEL bits should be modified only when the DMACTLx.DMAEN bit is
463 // 0; otherwise, unpredictable DMA triggers can occur."
464 //
465 // We also want to stop any transfers before setup.
466 w.set_en(false);
467 w.set_req(false);
468
469 // Not every part supports auto enable, so force its value to 0.
470 w.set_autoen(Autoen::NONE);
471 w.set_preirq(Preirq::PREIRQ_DISABLE);
472 w.set_srcwdth(src_wdth);
473 w.set_dstwdth(dst_wdth);
474 w.set_srcincr(if increment_src {
475 Incr::INCREMENT
476 } else {
477 Incr::UNCHANGED
478 });
479 w.set_dstincr(if increment_dst {
480 Incr::INCREMENT
481 } else {
482 Incr::UNCHANGED
483 });
484
485 w.set_em(Em::NORMAL);
486 // Single and block will clear the enable bit when the transfers finish.
487 w.set_tm(convert_mode(options.mode));
488 });
489
490 self.tctl().write(|w| {
491 w.set_tsel(trigger_sel);
492 // Basic channels do not implement cross triggering.
493 w.set_tint(vals::Tint::EXTERNAL);
494 });
495
496 self.sz().write(|w| {
497 w.set_size(transfer_count);
498 });
499
500 self.sa().write_value(src as u32);
501 self.da().write_value(dst as u32);
502
503 // Enable the channel.
504 self.ctl().modify(|w| {
505 // FIXME: Why did putting set_req later fix some transfers
506 w.set_en(true);
507 w.set_req(true);
508 });
509 }
510
511 fn start(&self) {
512 self.mask_interrupt(true);
513
514 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
515 compiler_fence(Ordering::SeqCst);
516
517 // Request the DMA transfer to start.
518 self.ctl().modify(|w| {
519 w.set_req(true);
520 });
521 }
522
523 fn resume(&self) {
524 self.mask_interrupt(true);
525
526 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
527 compiler_fence(Ordering::SeqCst);
528
529 self.ctl().modify(|w| {
530 // w.set_en(true);
531 w.set_req(true);
532 });
533 }
534
535 fn request_pause(&self) {
536 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
537 compiler_fence(Ordering::SeqCst);
538
539 // Stop the transfer.
540 //
541 // SLAU846 5.2.6:
542 // "A DMA block transfer in progress can be stopped by clearing the DMAEN bit"
543 self.ctl().modify(|w| {
544 // w.set_en(false);
545 w.set_req(false);
546 });
547 }
548
549 fn is_running(&self) -> bool {
550 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
551 compiler_fence(Ordering::SeqCst);
552
553 let ctl = self.ctl().read();
554
555 // Is the transfer requested?
556 ctl.req()
557 // Is the channel enabled?
558 && ctl.en()
559 }
560}
561
562macro_rules! impl_dma_channel {
563 ($instance: ident, $num: expr) => {
564 impl crate::dma::SealedChannel for crate::peripherals::$instance {
565 fn id(&self) -> u8 {
566 $num
567 }
568 }
569
570 impl From<crate::peripherals::$instance> for crate::dma::AnyChannel {
571 fn from(value: crate::peripherals::$instance) -> Self {
572 use crate::dma::SealedChannel;
573
574 Self { id: value.id() }
575 }
576 }
577
578 impl crate::dma::Channel for crate::peripherals::$instance {}
579 };
580}
581
582// C1104 has no full DMA channels.
583#[allow(unused_macros)]
584macro_rules! impl_full_dma_channel {
585 ($instance: ident, $num: expr) => {
586 impl_dma_channel!($instance, $num);
587
588 impl From<crate::peripherals::$instance> for crate::dma::AnyFullChannel {
589 fn from(value: crate::peripherals::$instance) -> Self {
590 use crate::dma::SealedChannel;
591
592 Self { id: value.id() }
593 }
594 }
595
596 impl crate::dma::FullChannel for crate::peripherals::$instance {}
597 };
598}
599
600#[cfg(feature = "rt")]
601#[interrupt]
602fn DMA() {
603 use crate::BitIter;
604
605 let events = pac::DMA.int_event(0);
606 let mis = events.mis().read();
607
608 // TODO: Handle DATAERR and ADDRERR? However we do not know which channel causes an error.
609 if mis.dataerr() {
610 panic!("DMA data error");
611 } else if mis.addrerr() {
612 panic!("DMA address error")
613 }
614
615 // Ignore preirq interrupts (values greater than 16).
616 for i in BitIter(mis.0 & 0x0000_FFFF) {
617 if let Some(state) = STATE.get(i as usize) {
618 state.waker.wake();
619
620 // Notify the future that the counter size hit zero
621 events.imask().modify(|w| {
622 w.set_ch(i as usize, false);
623 });
624 }
625 }
626}