aboutsummaryrefslogtreecommitdiff
path: root/embassy-mcxa/src/dma.rs
diff options
context:
space:
mode:
authorRaul Alimbekov <[email protected]>2025-12-16 09:05:22 +0300
committerGitHub <[email protected]>2025-12-16 09:05:22 +0300
commitc9a04b4b732b7a3b696eb8223664c1a7942b1875 (patch)
tree6dbe5c02e66eed8d8762f13f95afd24f8db2b38c /embassy-mcxa/src/dma.rs
parentcde24a3ef1117653ba5ed4184102b33f745782fb (diff)
parent5ae6e060ec1c90561719aabdc29d5b6e7b8b0a82 (diff)
Merge branch 'main' into main
Diffstat (limited to 'embassy-mcxa/src/dma.rs')
-rw-r--r--embassy-mcxa/src/dma.rs2602
1 files changed, 2602 insertions, 0 deletions
diff --git a/embassy-mcxa/src/dma.rs b/embassy-mcxa/src/dma.rs
new file mode 100644
index 000000000..8d519d99b
--- /dev/null
+++ b/embassy-mcxa/src/dma.rs
@@ -0,0 +1,2602 @@
1//! DMA driver for MCXA276.
2//!
3//! This module provides a typed channel abstraction over the EDMA_0_TCD0 array
4//! and helpers for configuring the channel MUX. The driver supports both
5//! low-level TCD configuration and higher-level async transfer APIs.
6//!
7//! # Architecture
8//!
9//! The MCXA276 has 8 DMA channels (0-7), each with its own interrupt vector.
10//! Each channel has a Transfer Control Descriptor (TCD) that defines the
11//! transfer parameters.
12//!
13//! # Choosing the Right API
14//!
15//! This module provides several API levels to match different use cases:
16//!
17//! ## High-Level Async API (Recommended for Most Users)
18//!
19//! Use the async methods when you want simple, safe DMA transfers:
20//!
21//! | Method | Description |
22//! |--------|-------------|
23//! | [`DmaChannel::mem_to_mem()`] | Memory-to-memory copy |
24//! | [`DmaChannel::memset()`] | Fill memory with a pattern |
25//! | [`DmaChannel::write()`] | Memory-to-peripheral (TX) |
26//! | [`DmaChannel::read()`] | Peripheral-to-memory (RX) |
27//!
28//! These return a [`Transfer`] future that can be `.await`ed:
29//!
30//! ```no_run
31//! # use embassy_mcxa::dma::{DmaChannel, TransferOptions};
32//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
33//! # let src = [0u32; 4];
34//! # let mut dst = [0u32; 4];
35//! // Simple memory-to-memory transfer
36//! unsafe {
37//! dma_ch.mem_to_mem(&src, &mut dst, TransferOptions::default()).await;
38//! }
39//! ```
40//!
41//! ## Setup Methods (For Peripheral Drivers)
42//!
43//! Use setup methods when you need manual lifecycle control:
44//!
45//! | Method | Description |
46//! |--------|-------------|
47//! | [`DmaChannel::setup_write()`] | Configure TX without starting |
48//! | [`DmaChannel::setup_read()`] | Configure RX without starting |
49//!
50//! These configure the TCD but don't start the transfer. You control:
51//! 1. When to call [`DmaChannel::enable_request()`]
52//! 2. How to detect completion (polling or interrupts)
53//! 3. When to clean up with [`DmaChannel::clear_done()`]
54//!
55//! ## Circular/Ring Buffer API (For Continuous Reception)
56//!
57//! Use [`DmaChannel::setup_circular_read()`] for continuous data reception:
58//!
59//! ```no_run
60//! # use embassy_mcxa::dma::DmaChannel;
61//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
62//! # let uart_rx_addr = 0x4000_0000 as *const u8;
63//! static mut RX_BUF: [u8; 64] = [0; 64];
64//!
65//! let ring_buf = unsafe {
66//! dma_ch.setup_circular_read(uart_rx_addr, &mut RX_BUF)
67//! };
68//!
69//! // Read data as it arrives
70//! let mut buf = [0u8; 16];
71//! let n = ring_buf.read(&mut buf).await.unwrap();
72//! ```
73//!
74//! ## Scatter-Gather Builder (For Chained Transfers)
75//!
76//! Use [`ScatterGatherBuilder`] for complex multi-segment transfers:
77//!
78//! ```no_run
79//! # use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
80//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
81//! let mut builder = ScatterGatherBuilder::<u32>::new();
82//! builder.add_transfer(&src1, &mut dst1);
83//! builder.add_transfer(&src2, &mut dst2);
84//!
85//! let transfer = unsafe { builder.build(&dma_ch).unwrap() };
86//! transfer.await;
87//! ```
88//!
89//! ## Direct TCD Access (For Advanced Use Cases)
90//!
91//! For full control, use the channel's `tcd()` method to access TCD registers directly.
92//! See the `dma_*` examples for patterns.
93//!
94//! # Example
95//!
96//! ```no_run
97//! use embassy_mcxa::dma::{DmaChannel, TransferOptions, Direction};
98//!
99//! let dma_ch = DmaChannel::new(p.DMA_CH0);
100//! // Configure and trigger a transfer...
101//! ```
102
103use core::future::Future;
104use core::marker::PhantomData;
105use core::pin::Pin;
106use core::ptr::NonNull;
107use core::sync::atomic::{AtomicUsize, Ordering, fence};
108use core::task::{Context, Poll};
109
110use embassy_hal_internal::PeripheralType;
111use embassy_sync::waitqueue::AtomicWaker;
112
113use crate::clocks::Gate;
114use crate::pac;
115use crate::pac::Interrupt;
116use crate::peripherals::DMA0;
117
118/// Initialize DMA controller (clock enabled, reset released, controller configured).
119///
120/// This function is intended to be called ONCE during HAL initialization (`hal::init()`).
121///
122/// The function enables the DMA0 clock, releases reset, and configures the controller
123/// for normal operation with round-robin arbitration.
124pub(crate) fn init() {
125 unsafe {
126 // Enable DMA0 clock and release reset
127 DMA0::enable_clock();
128 DMA0::release_reset();
129
130 // Configure DMA controller
131 let dma = &(*pac::Dma0::ptr());
132 dma.mp_csr().modify(|_, w| {
133 w.edbg()
134 .enable()
135 .erca()
136 .enable()
137 .halt()
138 .normal_operation()
139 .gclc()
140 .available()
141 .gmrc()
142 .available()
143 });
144 }
145}
146
147// ============================================================================
148// Phase 1: Foundation Types (Embassy-aligned)
149// ============================================================================
150
151/// DMA transfer direction.
152#[derive(Debug, Copy, Clone, PartialEq, Eq)]
153#[cfg_attr(feature = "defmt", derive(defmt::Format))]
154pub enum Direction {
155 /// Transfer from memory to memory.
156 MemoryToMemory,
157 /// Transfer from memory to a peripheral register.
158 MemoryToPeripheral,
159 /// Transfer from a peripheral register to memory.
160 PeripheralToMemory,
161}
162
163/// DMA transfer priority.
164#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
165#[cfg_attr(feature = "defmt", derive(defmt::Format))]
166pub enum Priority {
167 /// Low priority (channel priority 7).
168 Low,
169 /// Medium priority (channel priority 4).
170 Medium,
171 /// High priority (channel priority 1).
172 #[default]
173 High,
174 /// Highest priority (channel priority 0).
175 Highest,
176}
177
178impl Priority {
179 /// Convert to hardware priority value (0 = highest, 7 = lowest).
180 pub fn to_hw_priority(self) -> u8 {
181 match self {
182 Priority::Low => 7,
183 Priority::Medium => 4,
184 Priority::High => 1,
185 Priority::Highest => 0,
186 }
187 }
188}
189
190/// DMA transfer data width.
191#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
192#[cfg_attr(feature = "defmt", derive(defmt::Format))]
193pub enum WordSize {
194 /// 8-bit (1 byte) transfers.
195 OneByte,
196 /// 16-bit (2 byte) transfers.
197 TwoBytes,
198 /// 32-bit (4 byte) transfers.
199 #[default]
200 FourBytes,
201}
202
203impl WordSize {
204 /// Size in bytes.
205 pub const fn bytes(self) -> usize {
206 match self {
207 WordSize::OneByte => 1,
208 WordSize::TwoBytes => 2,
209 WordSize::FourBytes => 4,
210 }
211 }
212
213 /// Convert to hardware SSIZE/DSIZE field value.
214 pub const fn to_hw_size(self) -> u8 {
215 match self {
216 WordSize::OneByte => 0,
217 WordSize::TwoBytes => 1,
218 WordSize::FourBytes => 2,
219 }
220 }
221
222 /// Create from byte width (1, 2, or 4).
223 pub const fn from_bytes(bytes: u8) -> Option<Self> {
224 match bytes {
225 1 => Some(WordSize::OneByte),
226 2 => Some(WordSize::TwoBytes),
227 4 => Some(WordSize::FourBytes),
228 _ => None,
229 }
230 }
231}
232
233/// Trait for types that can be transferred via DMA.
234///
235/// This provides compile-time type safety for DMA transfers.
236pub trait Word: Copy + 'static {
237 /// The word size for this type.
238 fn size() -> WordSize;
239}
240
241impl Word for u8 {
242 fn size() -> WordSize {
243 WordSize::OneByte
244 }
245}
246
247impl Word for u16 {
248 fn size() -> WordSize {
249 WordSize::TwoBytes
250 }
251}
252
253impl Word for u32 {
254 fn size() -> WordSize {
255 WordSize::FourBytes
256 }
257}
258
259/// DMA transfer options.
260///
261/// This struct configures various aspects of a DMA transfer.
262#[derive(Debug, Copy, Clone, PartialEq, Eq)]
263#[cfg_attr(feature = "defmt", derive(defmt::Format))]
264#[non_exhaustive]
265pub struct TransferOptions {
266 /// Transfer priority.
267 pub priority: Priority,
268 /// Enable circular (continuous) mode.
269 ///
270 /// When enabled, the transfer repeats automatically after completing.
271 pub circular: bool,
272 /// Enable interrupt on half transfer complete.
273 pub half_transfer_interrupt: bool,
274 /// Enable interrupt on transfer complete.
275 pub complete_transfer_interrupt: bool,
276}
277
278impl Default for TransferOptions {
279 fn default() -> Self {
280 Self {
281 priority: Priority::High,
282 circular: false,
283 half_transfer_interrupt: false,
284 complete_transfer_interrupt: true,
285 }
286 }
287}
288
289/// DMA error types.
290#[derive(Debug, Copy, Clone, PartialEq, Eq)]
291#[cfg_attr(feature = "defmt", derive(defmt::Format))]
292pub enum Error {
293 /// The DMA controller reported a bus error.
294 BusError,
295 /// The transfer was aborted.
296 Aborted,
297 /// Configuration error (e.g., invalid parameters).
298 Configuration,
299 /// Buffer overrun (for ring buffers).
300 Overrun,
301}
302
303/// Whether to enable the major loop completion interrupt.
304///
305/// This enum provides better readability than a boolean parameter
306/// for functions that configure DMA interrupt behavior.
307#[derive(Debug, Copy, Clone, PartialEq, Eq)]
308#[cfg_attr(feature = "defmt", derive(defmt::Format))]
309pub enum EnableInterrupt {
310 /// Enable the interrupt on major loop completion.
311 Yes,
312 /// Do not enable the interrupt.
313 No,
314}
315
316// ============================================================================
317// DMA Constants
318// ============================================================================
319
320/// Maximum bytes per DMA transfer (eDMA4 CITER/BITER are 15-bit fields).
321///
322/// This is a hardware limitation of the eDMA4 controller. Transfers larger
323/// than this must be split into multiple DMA operations.
324pub const DMA_MAX_TRANSFER_SIZE: usize = 0x7FFF;
325
326// ============================================================================
327// DMA Request Source Types (Type-Safe API)
328// ============================================================================
329
330/// Trait for type-safe DMA request sources.
331///
332/// Each peripheral that can trigger DMA requests implements this trait
333/// with marker types that encode the correct request source number at
334/// compile time. This prevents using the wrong request source for a
335/// peripheral.
336///
337/// # Example
338///
339/// ```ignore
340/// // The LPUART2 RX request source is automatically derived from the type:
341/// channel.set_request_source::<Lpuart2RxRequest>();
342/// ```
343///
344/// This trait is sealed and cannot be implemented outside this crate.
345#[allow(private_bounds)]
346pub trait DmaRequest: sealed::SealedDmaRequest {
347 /// The hardware request source number for the DMA mux.
348 const REQUEST_NUMBER: u8;
349}
350
351/// Macro to define a DMA request type.
352///
353/// Creates a zero-sized marker type that implements `DmaRequest` with
354/// the specified request number.
355macro_rules! define_dma_request {
356 ($(#[$meta:meta])* $name:ident = $num:expr) => {
357 $(#[$meta])*
358 #[derive(Debug, Copy, Clone)]
359 pub struct $name;
360
361 impl sealed::SealedDmaRequest for $name {}
362
363 impl DmaRequest for $name {
364 const REQUEST_NUMBER: u8 = $num;
365 }
366 };
367}
368
369// LPUART DMA request sources (from MCXA276 reference manual Table 4-8)
370define_dma_request!(
371 /// DMA request source for LPUART0 RX.
372 Lpuart0RxRequest = 21
373);
374define_dma_request!(
375 /// DMA request source for LPUART0 TX.
376 Lpuart0TxRequest = 22
377);
378define_dma_request!(
379 /// DMA request source for LPUART1 RX.
380 Lpuart1RxRequest = 23
381);
382define_dma_request!(
383 /// DMA request source for LPUART1 TX.
384 Lpuart1TxRequest = 24
385);
386define_dma_request!(
387 /// DMA request source for LPUART2 RX.
388 Lpuart2RxRequest = 25
389);
390define_dma_request!(
391 /// DMA request source for LPUART2 TX.
392 Lpuart2TxRequest = 26
393);
394define_dma_request!(
395 /// DMA request source for LPUART3 RX.
396 Lpuart3RxRequest = 27
397);
398define_dma_request!(
399 /// DMA request source for LPUART3 TX.
400 Lpuart3TxRequest = 28
401);
402define_dma_request!(
403 /// DMA request source for LPUART4 RX.
404 Lpuart4RxRequest = 29
405);
406define_dma_request!(
407 /// DMA request source for LPUART4 TX.
408 Lpuart4TxRequest = 30
409);
410define_dma_request!(
411 /// DMA request source for LPUART5 RX.
412 Lpuart5RxRequest = 31
413);
414define_dma_request!(
415 /// DMA request source for LPUART5 TX.
416 Lpuart5TxRequest = 32
417);
418
419// ============================================================================
420// Channel Trait (Sealed Pattern)
421// ============================================================================
422
423mod sealed {
424 use crate::pac::Interrupt;
425
426 /// Sealed trait for DMA channels.
427 pub trait SealedChannel {
428 /// Zero-based channel index into the TCD array.
429 fn index(&self) -> usize;
430 /// Interrupt vector for this channel.
431 fn interrupt(&self) -> Interrupt;
432 }
433
434 /// Sealed trait for DMA request sources.
435 pub trait SealedDmaRequest {}
436}
437
438/// Marker trait implemented by HAL peripheral tokens that map to a DMA0
439/// channel backed by one EDMA_0_TCD0 TCD slot.
440///
441/// This trait is sealed and cannot be implemented outside this crate.
442#[allow(private_bounds)]
443pub trait Channel: sealed::SealedChannel + PeripheralType + Into<AnyChannel> + 'static {
444 /// Zero-based channel index into the TCD array.
445 const INDEX: usize;
446 /// Interrupt vector for this channel.
447 const INTERRUPT: Interrupt;
448}
449
450/// Type-erased DMA channel.
451///
452/// This allows storing DMA channels in a uniform way regardless of their
453/// concrete type, useful for async transfer futures and runtime channel selection.
454#[derive(Debug, Clone, Copy)]
455pub struct AnyChannel {
456 index: usize,
457 interrupt: Interrupt,
458}
459
460impl AnyChannel {
461 /// Get the channel index.
462 #[inline]
463 pub const fn index(&self) -> usize {
464 self.index
465 }
466
467 /// Get the channel interrupt.
468 #[inline]
469 pub const fn interrupt(&self) -> Interrupt {
470 self.interrupt
471 }
472
473 /// Get a reference to the TCD register block for this channel.
474 ///
475 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
476 #[inline]
477 fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
478 // Safety: MCXA276 has a single eDMA instance, and we're only accessing
479 // the TCD for this specific channel
480 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
481 edma.tcd(self.index)
482 }
483
484 /// Check if the channel's DONE flag is set.
485 pub fn is_done(&self) -> bool {
486 self.tcd().ch_csr().read().done().bit_is_set()
487 }
488
489 /// Get the waker for this channel.
490 pub fn waker(&self) -> &'static AtomicWaker {
491 &STATES[self.index].waker
492 }
493}
494
495impl sealed::SealedChannel for AnyChannel {
496 fn index(&self) -> usize {
497 self.index
498 }
499
500 fn interrupt(&self) -> Interrupt {
501 self.interrupt
502 }
503}
504
505/// Macro to implement Channel trait for a peripheral.
506macro_rules! impl_channel {
507 ($peri:ident, $index:expr, $irq:ident) => {
508 impl sealed::SealedChannel for crate::peripherals::$peri {
509 fn index(&self) -> usize {
510 $index
511 }
512
513 fn interrupt(&self) -> Interrupt {
514 Interrupt::$irq
515 }
516 }
517
518 impl Channel for crate::peripherals::$peri {
519 const INDEX: usize = $index;
520 const INTERRUPT: Interrupt = Interrupt::$irq;
521 }
522
523 impl From<crate::peripherals::$peri> for AnyChannel {
524 fn from(_: crate::peripherals::$peri) -> Self {
525 AnyChannel {
526 index: $index,
527 interrupt: Interrupt::$irq,
528 }
529 }
530 }
531 };
532}
533
534impl_channel!(DMA_CH0, 0, DMA_CH0);
535impl_channel!(DMA_CH1, 1, DMA_CH1);
536impl_channel!(DMA_CH2, 2, DMA_CH2);
537impl_channel!(DMA_CH3, 3, DMA_CH3);
538impl_channel!(DMA_CH4, 4, DMA_CH4);
539impl_channel!(DMA_CH5, 5, DMA_CH5);
540impl_channel!(DMA_CH6, 6, DMA_CH6);
541impl_channel!(DMA_CH7, 7, DMA_CH7);
542
543/// Strongly-typed handle to a DMA0 channel.
544///
545/// The lifetime of this value is tied to the unique peripheral token
546/// supplied by `embassy_hal_internal::peripherals!`, so safe code cannot
547/// create two `DmaChannel` instances for the same hardware channel.
548pub struct DmaChannel<C: Channel> {
549 _ch: core::marker::PhantomData<C>,
550}
551
552// ============================================================================
553// DMA Transfer Methods - API Overview
554// ============================================================================
555//
556// The DMA API provides two categories of methods for configuring transfers:
557//
558// ## 1. Async Methods (Return `Transfer` Future)
559//
560// These methods return a [`Transfer`] Future that must be `.await`ed:
561//
562// - [`write()`](DmaChannel::write) - Memory-to-peripheral using default eDMA TCD block
563// - [`read()`](DmaChannel::read) - Peripheral-to-memory using default eDMA TCD block
564// - [`write_to_peripheral()`](DmaChannel::write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
565// - [`read_from_peripheral()`](DmaChannel::read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
566// - [`mem_to_mem()`](DmaChannel::mem_to_mem) - Memory-to-memory using default eDMA TCD block
567//
568// The `Transfer` manages the DMA lifecycle automatically:
569// - Enables channel request
570// - Waits for completion via async/await
571// - Cleans up on completion
572//
573// **Important:** `Transfer::Drop` aborts the transfer if dropped before completion.
574// This means you MUST `.await` the Transfer or it will be aborted when it goes out of scope.
575//
576// **Use case:** When you want to use async/await and let the Transfer handle lifecycle management.
577//
578// ## 2. Setup Methods (Configure TCD Only)
579//
580// These methods configure the TCD but do NOT return a `Transfer`:
581//
582// - [`setup_write()`](DmaChannel::setup_write) - Memory-to-peripheral using default eDMA TCD block
583// - [`setup_read()`](DmaChannel::setup_read) - Peripheral-to-memory using default eDMA TCD block
584// - [`setup_write_to_peripheral()`](DmaChannel::setup_write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
585// - [`setup_read_from_peripheral()`](DmaChannel::setup_read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
586//
587// The caller is responsible for the complete DMA lifecycle:
588// 1. Call [`enable_request()`](DmaChannel::enable_request) to start the transfer
589// 2. Poll [`is_done()`](DmaChannel::is_done) or use interrupts to detect completion
590// 3. Call [`disable_request()`](DmaChannel::disable_request), [`clear_done()`](DmaChannel::clear_done),
591// [`clear_interrupt()`](DmaChannel::clear_interrupt) for cleanup
592//
593// **Use case:** Peripheral drivers (like LPUART) that need fine-grained control over
594// DMA setup before starting a `Transfer`.
595//
596// ============================================================================
597
598impl<C: Channel> DmaChannel<C> {
599 /// Wrap a DMA channel token (takes ownership of the Peri wrapper).
600 ///
601 /// Note: DMA is initialized during `hal::init()` via `dma::init()`.
602 #[inline]
603 pub fn new(_ch: embassy_hal_internal::Peri<'_, C>) -> Self {
604 unsafe {
605 cortex_m::peripheral::NVIC::unmask(C::INTERRUPT);
606 }
607 Self {
608 _ch: core::marker::PhantomData,
609 }
610 }
611
612 /// Channel index in the EDMA_0_TCD0 array.
613 #[inline]
614 pub const fn index(&self) -> usize {
615 C::INDEX
616 }
617
618 /// Convert this typed channel into a type-erased `AnyChannel`.
619 #[inline]
620 pub fn into_any(self) -> AnyChannel {
621 AnyChannel {
622 index: C::INDEX,
623 interrupt: C::INTERRUPT,
624 }
625 }
626
627 /// Get a reference to the type-erased channel info.
628 #[inline]
629 pub fn as_any(&self) -> AnyChannel {
630 AnyChannel {
631 index: C::INDEX,
632 interrupt: C::INTERRUPT,
633 }
634 }
635
636 /// Return a reference to the underlying TCD register block.
637 ///
638 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
639 ///
640 /// # Note
641 ///
642 /// This is exposed for advanced use cases that need direct TCD access.
643 /// For most use cases, prefer the higher-level transfer methods.
644 #[inline]
645 pub fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
646 // Safety: MCXA276 has a single eDMA instance
647 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
648 edma.tcd(C::INDEX)
649 }
650
651 fn clear_tcd(t: &'static pac::edma_0_tcd0::Tcd) {
652 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
653 // Reset ALL TCD registers to 0 to clear any stale configuration from
654 // previous transfers. This is critical when reusing a channel.
655 t.tcd_saddr().write(|w| unsafe { w.saddr().bits(0) });
656 t.tcd_soff().write(|w| unsafe { w.soff().bits(0) });
657 t.tcd_attr().write(|w| unsafe { w.bits(0) });
658 t.tcd_nbytes_mloffno().write(|w| unsafe { w.nbytes().bits(0) });
659 t.tcd_slast_sda().write(|w| unsafe { w.slast_sda().bits(0) });
660 t.tcd_daddr().write(|w| unsafe { w.daddr().bits(0) });
661 t.tcd_doff().write(|w| unsafe { w.doff().bits(0) });
662 t.tcd_citer_elinkno().write(|w| unsafe { w.bits(0) });
663 t.tcd_dlast_sga().write(|w| unsafe { w.dlast_sga().bits(0) });
664 t.tcd_csr().write(|w| unsafe { w.bits(0) }); // Clear CSR completely
665 t.tcd_biter_elinkno().write(|w| unsafe { w.bits(0) });
666 }
667
668 #[inline]
669 fn set_major_loop_ct_elinkno(t: &'static pac::edma_0_tcd0::Tcd, count: u16) {
670 t.tcd_biter_elinkno().write(|w| unsafe { w.biter().bits(count) });
671 t.tcd_citer_elinkno().write(|w| unsafe { w.citer().bits(count) });
672 }
673
674 #[inline]
675 fn set_minor_loop_ct_no_offsets(t: &'static pac::edma_0_tcd0::Tcd, count: u32) {
676 t.tcd_nbytes_mloffno().write(|w| unsafe { w.nbytes().bits(count) });
677 }
678
679 #[inline]
680 fn set_no_final_adjustments(t: &'static pac::edma_0_tcd0::Tcd) {
681 // No source/dest adjustment after major loop
682 t.tcd_slast_sda().write(|w| unsafe { w.slast_sda().bits(0) });
683 t.tcd_dlast_sga().write(|w| unsafe { w.dlast_sga().bits(0) });
684 }
685
686 #[inline]
687 fn set_source_ptr<T>(t: &'static pac::edma_0_tcd0::Tcd, p: *const T) {
688 t.tcd_saddr().write(|w| unsafe { w.saddr().bits(p as u32) });
689 }
690
691 #[inline]
692 fn set_source_increment(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) {
693 t.tcd_soff().write(|w| unsafe { w.soff().bits(sz.bytes() as u16) });
694 }
695
696 #[inline]
697 fn set_source_fixed(t: &'static pac::edma_0_tcd0::Tcd) {
698 t.tcd_soff().write(|w| unsafe { w.soff().bits(0) });
699 }
700
701 #[inline]
702 fn set_dest_ptr<T>(t: &'static pac::edma_0_tcd0::Tcd, p: *mut T) {
703 t.tcd_daddr().write(|w| unsafe { w.daddr().bits(p as u32) });
704 }
705
706 #[inline]
707 fn set_dest_increment(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) {
708 t.tcd_doff().write(|w| unsafe { w.doff().bits(sz.bytes() as u16) });
709 }
710
711 #[inline]
712 fn set_dest_fixed(t: &'static pac::edma_0_tcd0::Tcd) {
713 t.tcd_doff().write(|w| unsafe { w.doff().bits(0) });
714 }
715
716 #[inline]
717 fn set_even_transfer_size(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) {
718 let hw_size = sz.to_hw_size();
719 t.tcd_attr()
720 .write(|w| unsafe { w.ssize().bits(hw_size).dsize().bits(hw_size) });
721 }
722
723 #[inline]
724 fn reset_channel_state(t: &'static pac::edma_0_tcd0::Tcd) {
725 // CSR: Resets to all zeroes (disabled), "done" is cleared by writing 1
726 t.ch_csr().write(|w| w.done().clear_bit_by_one());
727 // ES: Resets to all zeroes (disabled), "err" is cleared by writing 1
728 t.ch_es().write(|w| w.err().clear_bit_by_one());
729 // INT: Resets to all zeroes (disabled), "int" is cleared by writing 1
730 t.ch_int().write(|w| w.int().clear_bit_by_one());
731 }
732
733 /// Start an async transfer.
734 ///
735 /// The channel must already be configured. This enables the channel
736 /// request and returns a `Transfer` future that resolves when the
737 /// DMA transfer completes.
738 ///
739 /// # Safety
740 ///
741 /// The caller must ensure the DMA channel has been properly configured
742 /// and that source/destination buffers remain valid for the duration
743 /// of the transfer.
744 pub unsafe fn start_transfer(&self) -> Transfer<'_> {
745 // Clear any previous DONE/INT flags
746 let t = self.tcd();
747 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
748 t.ch_int().write(|w| w.int().clear_bit_by_one());
749
750 // Enable the channel request
751 t.ch_csr().modify(|_, w| w.erq().enable());
752
753 Transfer::new(self.as_any())
754 }
755
756 // ========================================================================
757 // Type-Safe Transfer Methods (Embassy-style API)
758 // ========================================================================
759
760 /// Perform a memory-to-memory DMA transfer (simplified API).
761 ///
762 /// This is a type-safe wrapper that uses the `Word` trait to determine
763 /// the correct transfer width automatically. Uses the global eDMA TCD
764 /// register accessor internally.
765 ///
766 /// # Arguments
767 ///
768 /// * `src` - Source buffer
769 /// * `dst` - Destination buffer (must be at least as large as src)
770 /// * `options` - Transfer configuration options
771 ///
772 /// # Safety
773 ///
774 /// The source and destination buffers must remain valid for the
775 /// duration of the transfer.
776 pub fn mem_to_mem<W: Word>(
777 &self,
778 src: &[W],
779 dst: &mut [W],
780 options: TransferOptions,
781 ) -> Result<Transfer<'_>, Error> {
782 let mut invalid = false;
783 invalid |= src.is_empty();
784 invalid |= src.len() > dst.len();
785 invalid |= src.len() > 0x7fff;
786 if invalid {
787 return Err(Error::Configuration);
788 }
789
790 let size = W::size();
791 let byte_count = (src.len() * size.bytes()) as u32;
792
793 let t = self.tcd();
794
795 // Reset channel state - clear DONE, disable requests, clear errors
796 Self::reset_channel_state(t);
797
798 // Memory barrier to ensure channel state is fully reset before touching TCD
799 cortex_m::asm::dsb();
800
801 Self::clear_tcd(t);
802
803 // Memory barrier after TCD reset
804 cortex_m::asm::dsb();
805
806 // Note: Priority is managed by round-robin arbitration (set in init())
807 // Per-channel priority can be configured via ch_pri() if needed
808
809 // Now configure the new transfer
810
811 // Source address and increment
812 Self::set_source_ptr(t, src.as_ptr());
813 Self::set_source_increment(t, size);
814
815 // Destination address and increment
816 Self::set_dest_ptr(t, dst.as_mut_ptr());
817 Self::set_dest_increment(t, size);
818
819 // Transfer attributes (size)
820 Self::set_even_transfer_size(t, size);
821
822 // Minor loop: transfer all bytes in one minor loop
823 Self::set_minor_loop_ct_no_offsets(t, byte_count);
824
825 // No source/dest adjustment after major loop
826 Self::set_no_final_adjustments(t);
827
828 // Major loop count = 1 (single major loop)
829 // Write BITER first, then CITER (CITER must match BITER at start)
830 Self::set_major_loop_ct_elinkno(t, 1);
831
832 // Memory barrier before setting START
833 cortex_m::asm::dsb();
834
835 // Control/status: interrupt on major complete, start
836 // Write this last after all other TCD registers are configured
837 let int_major = options.complete_transfer_interrupt;
838 t.tcd_csr().write(|w| {
839 w.intmajor()
840 .bit(int_major)
841 .inthalf()
842 .bit(options.half_transfer_interrupt)
843 .dreq()
844 .set_bit() // Auto-disable request after major loop
845 .start()
846 .set_bit() // Start the channel
847 });
848
849 Ok(Transfer::new(self.as_any()))
850 }
851
852 /// Fill a memory buffer with a pattern value (memset).
853 ///
854 /// This performs a DMA transfer where the source address remains fixed
855 /// (pattern value) while the destination address increments through the buffer.
856 /// It's useful for quickly filling large memory regions with a constant value.
857 ///
858 /// # Arguments
859 ///
860 /// * `pattern` - Reference to the pattern value (will be read repeatedly)
861 /// * `dst` - Destination buffer to fill
862 /// * `options` - Transfer configuration options
863 ///
864 /// # Example
865 ///
866 /// ```no_run
867 /// use embassy_mcxa::dma::{DmaChannel, TransferOptions};
868 ///
869 /// let dma_ch = DmaChannel::new(p.DMA_CH0);
870 /// let pattern: u32 = 0xDEADBEEF;
871 /// let mut buffer = [0u32; 256];
872 ///
873 /// unsafe {
874 /// dma_ch.memset(&pattern, &mut buffer, TransferOptions::default()).await;
875 /// }
876 /// // buffer is now filled with 0xDEADBEEF
877 /// ```
878 ///
879 pub fn memset<W: Word>(&self, pattern: &W, dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
880 assert!(!dst.is_empty());
881 assert!(dst.len() <= 0x7fff);
882
883 let size = W::size();
884 let byte_size = size.bytes();
885 // Total bytes to transfer - all in one minor loop for software-triggered transfers
886 let total_bytes = (dst.len() * byte_size) as u32;
887
888 let t = self.tcd();
889
890 // Reset channel state - clear DONE, disable requests, clear errors
891 Self::reset_channel_state(t);
892
893 // Memory barrier to ensure channel state is fully reset before touching TCD
894 cortex_m::asm::dsb();
895
896 Self::clear_tcd(t);
897
898 // Memory barrier after TCD reset
899 cortex_m::asm::dsb();
900
901 // Now configure the new transfer
902 //
903 // For software-triggered memset, we use a SINGLE minor loop that transfers
904 // all bytes at once. The source address stays fixed (SOFF=0) while the
905 // destination increments (DOFF=byte_size). The eDMA will read from the
906 // same source address for each destination word.
907 //
908 // This is necessary because the START bit only triggers ONE minor loop
909 // iteration. Using CITER>1 with software trigger would require multiple
910 // START triggers.
911
912 // Source: pattern address, fixed (soff=0)
913 Self::set_source_ptr(t, pattern);
914 Self::set_source_fixed(t);
915
916 // Destination: memory buffer, incrementing by word size
917 Self::set_dest_ptr(t, dst.as_mut_ptr());
918 Self::set_dest_increment(t, size);
919
920 // Transfer attributes - source and dest are same word size
921 Self::set_even_transfer_size(t, size);
922
923 // Minor loop: transfer ALL bytes in one minor loop (like mem_to_mem)
924 // This allows the entire transfer to complete with a single START trigger
925 Self::set_minor_loop_ct_no_offsets(t, total_bytes);
926
927 // No address adjustment after major loop
928 Self::set_no_final_adjustments(t);
929
930 // Major loop count = 1 (single major loop, all data in minor loop)
931 // Write BITER first, then CITER (CITER must match BITER at start)
932 Self::set_major_loop_ct_elinkno(t, 1);
933
934 // Memory barrier before setting START
935 cortex_m::asm::dsb();
936
937 // Control/status: interrupt on major complete, start immediately
938 // Write this last after all other TCD registers are configured
939 let int_major = options.complete_transfer_interrupt;
940 t.tcd_csr().write(|w| {
941 w.intmajor()
942 .bit(int_major)
943 .inthalf()
944 .bit(options.half_transfer_interrupt)
945 .dreq()
946 .set_bit() // Auto-disable request after major loop
947 .start()
948 .set_bit() // Start the channel
949 });
950
951 Transfer::new(self.as_any())
952 }
953
954 /// Write data from memory to a peripheral register.
955 ///
956 /// The destination address remains fixed (peripheral register) while
957 /// the source address increments through the buffer.
958 ///
959 /// # Arguments
960 ///
961 /// * `buf` - Source buffer to write from
962 /// * `peri_addr` - Peripheral register address
963 /// * `options` - Transfer configuration options
964 ///
965 /// # Safety
966 ///
967 /// - The buffer must remain valid for the duration of the transfer.
968 /// - The peripheral address must be valid for writes.
969 pub unsafe fn write<W: Word>(&self, buf: &[W], peri_addr: *mut W, options: TransferOptions) -> Transfer<'_> {
970 self.write_to_peripheral(buf, peri_addr, options)
971 }
972
973 /// Configure a memory-to-peripheral DMA transfer without starting it.
974 ///
975 /// This is a convenience wrapper around [`setup_write_to_peripheral()`](Self::setup_write_to_peripheral)
976 /// that uses the default eDMA TCD register block.
977 ///
978 /// This method configures the TCD but does NOT return a `Transfer`. The caller
979 /// is responsible for the complete DMA lifecycle:
980 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
981 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
982 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
983 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
984 ///
985 /// # Example
986 ///
987 /// ```no_run
988 /// # use embassy_mcxa::dma::DmaChannel;
989 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
990 /// # let uart_tx_addr = 0x4000_0000 as *mut u8;
991 /// let data = [0x48, 0x65, 0x6c, 0x6c, 0x6f]; // "Hello"
992 ///
993 /// unsafe {
994 /// // Configure the transfer
995 /// dma_ch.setup_write(&data, uart_tx_addr, EnableInterrupt::Yes);
996 ///
997 /// // Start when peripheral is ready
998 /// dma_ch.enable_request();
999 ///
1000 /// // Wait for completion (or use interrupt)
1001 /// while !dma_ch.is_done() {}
1002 ///
1003 /// // Clean up
1004 /// dma_ch.clear_done();
1005 /// dma_ch.clear_interrupt();
1006 /// }
1007 /// ```
1008 ///
1009 /// # Arguments
1010 ///
1011 /// * `buf` - Source buffer to write from
1012 /// * `peri_addr` - Peripheral register address
1013 /// * `enable_interrupt` - Whether to enable interrupt on completion
1014 ///
1015 /// # Safety
1016 ///
1017 /// - The buffer must remain valid for the duration of the transfer.
1018 /// - The peripheral address must be valid for writes.
1019 pub unsafe fn setup_write<W: Word>(&self, buf: &[W], peri_addr: *mut W, enable_interrupt: EnableInterrupt) {
1020 self.setup_write_to_peripheral(buf, peri_addr, enable_interrupt)
1021 }
1022
1023 /// Write data from memory to a peripheral register.
1024 ///
1025 /// The destination address remains fixed (peripheral register) while
1026 /// the source address increments through the buffer.
1027 ///
1028 /// # Arguments
1029 ///
1030 /// * `buf` - Source buffer to write from
1031 /// * `peri_addr` - Peripheral register address
1032 /// * `options` - Transfer configuration options
1033 ///
1034 /// # Safety
1035 ///
1036 /// - The buffer must remain valid for the duration of the transfer.
1037 /// - The peripheral address must be valid for writes.
1038 pub unsafe fn write_to_peripheral<W: Word>(
1039 &self,
1040 buf: &[W],
1041 peri_addr: *mut W,
1042 options: TransferOptions,
1043 ) -> Transfer<'_> {
1044 assert!(!buf.is_empty());
1045 assert!(buf.len() <= 0x7fff);
1046
1047 let size = W::size();
1048 let byte_size = size.bytes();
1049
1050 let t = self.tcd();
1051
1052 // Reset channel state
1053 Self::reset_channel_state(t);
1054
1055 // Addresses
1056 Self::set_source_ptr(t, buf.as_ptr());
1057 Self::set_dest_ptr(t, peri_addr);
1058
1059 // Offsets: Source increments, Dest fixed
1060 Self::set_source_increment(t, size);
1061 Self::set_dest_fixed(t);
1062
1063 // Attributes: set size and explicitly disable modulo
1064 Self::set_even_transfer_size(t, size);
1065
1066 // Minor loop: transfer one word per request (match old: only set nbytes)
1067 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
1068
1069 // No final adjustments
1070 Self::set_no_final_adjustments(t);
1071
1072 // Major loop count = number of words
1073 let count = buf.len() as u16;
1074 Self::set_major_loop_ct_elinkno(t, count);
1075
1076 // CSR: interrupt on major loop complete and auto-clear ERQ
1077 t.tcd_csr().write(|w| {
1078 let w = if options.complete_transfer_interrupt {
1079 w.intmajor().enable()
1080 } else {
1081 w.intmajor().disable()
1082 };
1083 w.inthalf()
1084 .disable()
1085 .dreq()
1086 .erq_field_clear() // Disable request when done
1087 .esg()
1088 .normal_format()
1089 .majorelink()
1090 .disable()
1091 .eeop()
1092 .disable()
1093 .esda()
1094 .disable()
1095 .bwc()
1096 .no_stall()
1097 });
1098
1099 // Ensure all TCD writes have completed before DMA engine reads them
1100 cortex_m::asm::dsb();
1101
1102 Transfer::new(self.as_any())
1103 }
1104
1105 /// Read data from a peripheral register to memory.
1106 ///
1107 /// The source address remains fixed (peripheral register) while
1108 /// the destination address increments through the buffer.
1109 ///
1110 /// # Arguments
1111 ///
1112 /// * `peri_addr` - Peripheral register address
1113 /// * `buf` - Destination buffer to read into
1114 /// * `options` - Transfer configuration options
1115 ///
1116 /// # Safety
1117 ///
1118 /// - The buffer must remain valid for the duration of the transfer.
1119 /// - The peripheral address must be valid for reads.
1120 pub unsafe fn read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], options: TransferOptions) -> Transfer<'_> {
1121 self.read_from_peripheral(peri_addr, buf, options)
1122 }
1123
1124 /// Configure a peripheral-to-memory DMA transfer without starting it.
1125 ///
1126 /// This is a convenience wrapper around [`setup_read_from_peripheral()`](Self::setup_read_from_peripheral)
1127 /// that uses the default eDMA TCD register block.
1128 ///
1129 /// This method configures the TCD but does NOT return a `Transfer`. The caller
1130 /// is responsible for the complete DMA lifecycle:
1131 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
1132 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
1133 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
1134 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1135 ///
1136 /// # Example
1137 ///
1138 /// ```no_run
1139 /// # use embassy_mcxa::dma::DmaChannel;
1140 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1141 /// # let uart_rx_addr = 0x4000_0000 as *const u8;
1142 /// let mut buf = [0u8; 32];
1143 ///
1144 /// unsafe {
1145 /// // Configure the transfer
1146 /// dma_ch.setup_read(uart_rx_addr, &mut buf, EnableInterrupt::Yes);
1147 ///
1148 /// // Start when peripheral is ready
1149 /// dma_ch.enable_request();
1150 ///
1151 /// // Wait for completion (or use interrupt)
1152 /// while !dma_ch.is_done() {}
1153 ///
1154 /// // Clean up
1155 /// dma_ch.clear_done();
1156 /// dma_ch.clear_interrupt();
1157 /// }
1158 /// // buf now contains received data
1159 /// ```
1160 ///
1161 /// # Arguments
1162 ///
1163 /// * `peri_addr` - Peripheral register address
1164 /// * `buf` - Destination buffer to read into
1165 /// * `enable_interrupt` - Whether to enable interrupt on completion
1166 ///
1167 /// # Safety
1168 ///
1169 /// - The buffer must remain valid for the duration of the transfer.
1170 /// - The peripheral address must be valid for reads.
1171 pub unsafe fn setup_read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], enable_interrupt: EnableInterrupt) {
1172 self.setup_read_from_peripheral(peri_addr, buf, enable_interrupt)
1173 }
1174
1175 /// Read data from a peripheral register to memory.
1176 ///
1177 /// The source address remains fixed (peripheral register) while
1178 /// the destination address increments through the buffer.
1179 ///
1180 /// # Arguments
1181 ///
1182 /// * `peri_addr` - Peripheral register address
1183 /// * `buf` - Destination buffer to read into
1184 /// * `options` - Transfer configuration options
1185 ///
1186 /// # Safety
1187 ///
1188 /// - The buffer must remain valid for the duration of the transfer.
1189 /// - The peripheral address must be valid for reads.
1190 pub unsafe fn read_from_peripheral<W: Word>(
1191 &self,
1192 peri_addr: *const W,
1193 buf: &mut [W],
1194 options: TransferOptions,
1195 ) -> Transfer<'_> {
1196 assert!(!buf.is_empty());
1197 assert!(buf.len() <= 0x7fff);
1198
1199 let size = W::size();
1200 let byte_size = size.bytes();
1201
1202 let t = self.tcd();
1203
1204 // Reset channel control/error/interrupt state
1205 Self::reset_channel_state(t);
1206
1207 // Source: peripheral register, fixed
1208 Self::set_source_ptr(t, peri_addr);
1209 Self::set_source_fixed(t);
1210
1211 // Destination: memory buffer, incrementing
1212 Self::set_dest_ptr(t, buf.as_mut_ptr());
1213 Self::set_dest_increment(t, size);
1214
1215 // Transfer attributes: set size and explicitly disable modulo
1216 Self::set_even_transfer_size(t, size);
1217
1218 // Minor loop: transfer one word per request, no offsets
1219 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
1220
1221 // Major loop count = number of words
1222 let count = buf.len() as u16;
1223 Self::set_major_loop_ct_elinkno(t, count);
1224
1225 // No address adjustment after major loop
1226 Self::set_no_final_adjustments(t);
1227
1228 // Control/status: interrupt on major complete, auto-clear ERQ when done
1229 t.tcd_csr().write(|w| {
1230 let w = if options.complete_transfer_interrupt {
1231 w.intmajor().enable()
1232 } else {
1233 w.intmajor().disable()
1234 };
1235 let w = if options.half_transfer_interrupt {
1236 w.inthalf().enable()
1237 } else {
1238 w.inthalf().disable()
1239 };
1240 w.dreq()
1241 .erq_field_clear() // Disable request when done (important for peripheral DMA)
1242 .esg()
1243 .normal_format()
1244 .majorelink()
1245 .disable()
1246 .eeop()
1247 .disable()
1248 .esda()
1249 .disable()
1250 .bwc()
1251 .no_stall()
1252 });
1253
1254 // Ensure all TCD writes have completed before DMA engine reads them
1255 cortex_m::asm::dsb();
1256
1257 Transfer::new(self.as_any())
1258 }
1259
1260 /// Configure a memory-to-peripheral DMA transfer without starting it.
1261 ///
1262 /// This configures the TCD for a memory-to-peripheral transfer but does NOT
1263 /// return a Transfer object. The caller is responsible for:
1264 /// 1. Enabling the peripheral's DMA request
1265 /// 2. Calling `enable_request()` to start the transfer
1266 /// 3. Polling `is_done()` or using interrupts to detect completion
1267 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1268 ///
1269 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1270 /// peripheral drivers that have their own completion polling).
1271 ///
1272 /// # Arguments
1273 ///
1274 /// * `buf` - Source buffer to write from
1275 /// * `peri_addr` - Peripheral register address
1276 /// * `enable_interrupt` - Whether to enable interrupt on completion
1277 ///
1278 /// # Safety
1279 ///
1280 /// - The buffer must remain valid for the duration of the transfer.
1281 /// - The peripheral address must be valid for writes.
1282 pub unsafe fn setup_write_to_peripheral<W: Word>(
1283 &self,
1284 buf: &[W],
1285 peri_addr: *mut W,
1286 enable_interrupt: EnableInterrupt,
1287 ) {
1288 assert!(!buf.is_empty());
1289 assert!(buf.len() <= 0x7fff);
1290
1291 let size = W::size();
1292 let byte_size = size.bytes();
1293
1294 let t = self.tcd();
1295
1296 // Reset channel state
1297 Self::reset_channel_state(t);
1298
1299 // Addresses
1300 Self::set_source_ptr(t, buf.as_ptr());
1301 Self::set_dest_ptr(t, peri_addr);
1302
1303 // Offsets: Source increments, Dest fixed
1304 Self::set_source_increment(t, size);
1305 Self::set_dest_fixed(t);
1306
1307 // Attributes: set size and explicitly disable modulo
1308 Self::set_even_transfer_size(t, size);
1309
1310 // Minor loop: transfer one word per request
1311 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
1312
1313 // No final adjustments
1314 Self::set_no_final_adjustments(t);
1315
1316 // Major loop count = number of words
1317 let count = buf.len() as u16;
1318 Self::set_major_loop_ct_elinkno(t, count);
1319
1320 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1321 t.tcd_csr().write(|w| {
1322 let w = match enable_interrupt {
1323 EnableInterrupt::Yes => w.intmajor().enable(),
1324 EnableInterrupt::No => w.intmajor().disable(),
1325 };
1326 w.inthalf()
1327 .disable()
1328 .dreq()
1329 .erq_field_clear()
1330 .esg()
1331 .normal_format()
1332 .majorelink()
1333 .disable()
1334 .eeop()
1335 .disable()
1336 .esda()
1337 .disable()
1338 .bwc()
1339 .no_stall()
1340 });
1341
1342 // Ensure all TCD writes have completed before DMA engine reads them
1343 cortex_m::asm::dsb();
1344 }
1345
1346 /// Configure a peripheral-to-memory DMA transfer without starting it.
1347 ///
1348 /// This configures the TCD for a peripheral-to-memory transfer but does NOT
1349 /// return a Transfer object. The caller is responsible for:
1350 /// 1. Enabling the peripheral's DMA request
1351 /// 2. Calling `enable_request()` to start the transfer
1352 /// 3. Polling `is_done()` or using interrupts to detect completion
1353 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1354 ///
1355 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1356 /// peripheral drivers that have their own completion polling).
1357 ///
1358 /// # Arguments
1359 ///
1360 /// * `peri_addr` - Peripheral register address
1361 /// * `buf` - Destination buffer to read into
1362 /// * `enable_interrupt` - Whether to enable interrupt on completion
1363 ///
1364 /// # Safety
1365 ///
1366 /// - The buffer must remain valid for the duration of the transfer.
1367 /// - The peripheral address must be valid for reads.
1368 pub unsafe fn setup_read_from_peripheral<W: Word>(
1369 &self,
1370 peri_addr: *const W,
1371 buf: &mut [W],
1372 enable_interrupt: EnableInterrupt,
1373 ) {
1374 assert!(!buf.is_empty());
1375 assert!(buf.len() <= 0x7fff);
1376
1377 let size = W::size();
1378 let byte_size = size.bytes();
1379
1380 let t = self.tcd();
1381
1382 // Reset channel control/error/interrupt state
1383 Self::reset_channel_state(t);
1384
1385 // Source: peripheral register, fixed
1386 Self::set_source_ptr(t, peri_addr);
1387 Self::set_source_fixed(t);
1388
1389 // Destination: memory buffer, incrementing
1390 Self::set_dest_ptr(t, buf.as_mut_ptr());
1391 Self::set_dest_increment(t, size);
1392
1393 // Attributes: set size and explicitly disable modulo
1394 Self::set_even_transfer_size(t, size);
1395
1396 // Minor loop: transfer one word per request
1397 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
1398
1399 // No final adjustments
1400 Self::set_no_final_adjustments(t);
1401
1402 // Major loop count = number of words
1403 let count = buf.len() as u16;
1404 Self::set_major_loop_ct_elinkno(t, count);
1405
1406 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1407 t.tcd_csr().write(|w| {
1408 let w = match enable_interrupt {
1409 EnableInterrupt::Yes => w.intmajor().enable(),
1410 EnableInterrupt::No => w.intmajor().disable(),
1411 };
1412 w.inthalf()
1413 .disable()
1414 .dreq()
1415 .erq_field_clear()
1416 .esg()
1417 .normal_format()
1418 .majorelink()
1419 .disable()
1420 .eeop()
1421 .disable()
1422 .esda()
1423 .disable()
1424 .bwc()
1425 .no_stall()
1426 });
1427
1428 // Ensure all TCD writes have completed before DMA engine reads them
1429 cortex_m::asm::dsb();
1430 }
1431
1432 /// Configure the integrated channel MUX to use the given typed
1433 /// DMA request source (e.g., [`Lpuart2TxRequest`] or [`Lpuart2RxRequest`]).
1434 ///
1435 /// This is the type-safe version that uses marker types to ensure
1436 /// compile-time verification of request source validity.
1437 ///
1438 /// # Safety
1439 ///
1440 /// The channel must be properly configured before enabling requests.
1441 /// The caller must ensure the DMA request source matches the peripheral
1442 /// that will drive this channel.
1443 ///
1444 /// # Note
1445 ///
1446 /// The NXP SDK requires a two-step write sequence: first clear
1447 /// the mux to 0, then set the actual source. This is a hardware
1448 /// requirement on eDMA4 for the mux to properly latch.
1449 ///
1450 /// # Example
1451 ///
1452 /// ```ignore
1453 /// use embassy_mcxa::dma::{DmaChannel, Lpuart2RxRequest};
1454 ///
1455 /// // Type-safe: compiler verifies this is a valid DMA request type
1456 /// unsafe {
1457 /// channel.set_request_source::<Lpuart2RxRequest>();
1458 /// }
1459 /// ```
1460 #[inline]
1461 pub unsafe fn set_request_source<R: DmaRequest>(&self) {
1462 // Two-step write per NXP SDK: clear to 0, then set actual source.
1463 self.tcd().ch_mux().write(|w| w.src().bits(0));
1464 cortex_m::asm::dsb(); // Ensure the clear completes before setting new source
1465 self.tcd().ch_mux().write(|w| w.src().bits(R::REQUEST_NUMBER));
1466 }
1467
1468 /// Enable hardware requests for this channel (ERQ=1).
1469 ///
1470 /// # Safety
1471 ///
1472 /// The channel must be properly configured before enabling requests.
1473 pub unsafe fn enable_request(&self) {
1474 let t = self.tcd();
1475 t.ch_csr().modify(|_, w| w.erq().enable());
1476 }
1477
1478 /// Disable hardware requests for this channel (ERQ=0).
1479 ///
1480 /// # Safety
1481 ///
1482 /// Disabling requests on an active transfer may leave the transfer incomplete.
1483 pub unsafe fn disable_request(&self) {
1484 let t = self.tcd();
1485 t.ch_csr().modify(|_, w| w.erq().disable());
1486 }
1487
1488 /// Return true if the channel's DONE flag is set.
1489 pub fn is_done(&self) -> bool {
1490 let t = self.tcd();
1491 t.ch_csr().read().done().bit_is_set()
1492 }
1493
1494 /// Clear the DONE flag for this channel.
1495 ///
1496 /// Uses modify to preserve other bits (especially ERQ) unlike write
1497 /// which would clear ERQ and halt an active transfer.
1498 ///
1499 /// # Safety
1500 ///
1501 /// Clearing DONE while a transfer is in progress may cause undefined behavior.
1502 pub unsafe fn clear_done(&self) {
1503 let t = self.tcd();
1504 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1505 }
1506
1507 /// Clear the channel interrupt flag (CH_INT.INT).
1508 ///
1509 /// # Safety
1510 ///
1511 /// Must be called from the correct interrupt context or with interrupts disabled.
1512 pub unsafe fn clear_interrupt(&self) {
1513 let t = self.tcd();
1514 t.ch_int().write(|w| w.int().clear_bit_by_one());
1515 }
1516
1517 /// Trigger a software start for this channel.
1518 ///
1519 /// # Safety
1520 ///
1521 /// The channel must be properly configured with a valid TCD before triggering.
1522 pub unsafe fn trigger_start(&self) {
1523 let t = self.tcd();
1524 t.tcd_csr().modify(|_, w| w.start().channel_started());
1525 }
1526
1527 /// Get the waker for this channel
1528 pub fn waker(&self) -> &'static AtomicWaker {
1529 &STATES[C::INDEX].waker
1530 }
1531
1532 /// Enable the interrupt for this channel in the NVIC.
1533 pub fn enable_interrupt(&self) {
1534 unsafe {
1535 cortex_m::peripheral::NVIC::unmask(C::INTERRUPT);
1536 }
1537 }
1538
1539 /// Enable Major Loop Linking.
1540 ///
1541 /// When the major loop completes, the hardware will trigger a service request
1542 /// on `link_ch`.
1543 ///
1544 /// # Arguments
1545 ///
1546 /// * `link_ch` - Target channel index (0-7) to link to
1547 ///
1548 /// # Safety
1549 ///
1550 /// The channel must be properly configured before setting up linking.
1551 pub unsafe fn set_major_link(&self, link_ch: usize) {
1552 let t = self.tcd();
1553 t.tcd_csr()
1554 .modify(|_, w| w.majorelink().enable().majorlinkch().bits(link_ch as u8));
1555 }
1556
1557 /// Disable Major Loop Linking.
1558 ///
1559 /// Removes any major loop channel linking previously configured.
1560 ///
1561 /// # Safety
1562 ///
1563 /// The caller must ensure this doesn't disrupt an active transfer that
1564 /// depends on the linking.
1565 pub unsafe fn clear_major_link(&self) {
1566 let t = self.tcd();
1567 t.tcd_csr().modify(|_, w| w.majorelink().disable());
1568 }
1569
1570 /// Enable Minor Loop Linking.
1571 ///
1572 /// After each minor loop, the hardware will trigger a service request
1573 /// on `link_ch`.
1574 ///
1575 /// # Arguments
1576 ///
1577 /// * `link_ch` - Target channel index (0-7) to link to
1578 ///
1579 /// # Note
1580 ///
1581 /// This rewrites CITER and BITER registers to the ELINKYES format.
1582 /// It preserves the current loop count.
1583 ///
1584 /// # Safety
1585 ///
1586 /// The channel must be properly configured before setting up linking.
1587 pub unsafe fn set_minor_link(&self, link_ch: usize) {
1588 let t = self.tcd();
1589
1590 // Read current CITER (assuming ELINKNO format initially)
1591 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1592 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1593
1594 // Write back using ELINKYES format
1595 t.tcd_citer_elinkyes().write(|w| {
1596 w.citer()
1597 .bits(current_citer)
1598 .elink()
1599 .enable()
1600 .linkch()
1601 .bits(link_ch as u8)
1602 });
1603
1604 t.tcd_biter_elinkyes().write(|w| {
1605 w.biter()
1606 .bits(current_biter)
1607 .elink()
1608 .enable()
1609 .linkch()
1610 .bits(link_ch as u8)
1611 });
1612 }
1613
1614 /// Disable Minor Loop Linking.
1615 ///
1616 /// Removes any minor loop channel linking previously configured.
1617 /// This rewrites CITER and BITER registers to the ELINKNO format,
1618 /// preserving the current loop count.
1619 ///
1620 /// # Safety
1621 ///
1622 /// The caller must ensure this doesn't disrupt an active transfer that
1623 /// depends on the linking.
1624 pub unsafe fn clear_minor_link(&self) {
1625 let t = self.tcd();
1626
1627 // Read current CITER (could be in either format, but we only need the count)
1628 // Note: In ELINKYES format, citer is 9 bits; in ELINKNO, it's 15 bits.
1629 // We read from ELINKNO which will give us the combined value.
1630 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1631 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1632
1633 // Write back using ELINKNO format (disabling link)
1634 t.tcd_citer_elinkno()
1635 .write(|w| w.citer().bits(current_citer).elink().disable());
1636
1637 t.tcd_biter_elinkno()
1638 .write(|w| w.biter().bits(current_biter).elink().disable());
1639 }
1640
1641 /// Load a TCD from memory into the hardware channel registers.
1642 ///
1643 /// This is useful for scatter/gather and ping-pong transfers where
1644 /// TCDs are prepared in RAM and then loaded into the hardware.
1645 ///
1646 /// # Safety
1647 ///
1648 /// - The TCD must be properly initialized.
1649 /// - The caller must ensure no concurrent access to the same channel.
1650 pub unsafe fn load_tcd(&self, tcd: &Tcd) {
1651 let t = self.tcd();
1652 t.tcd_saddr().write(|w| w.saddr().bits(tcd.saddr));
1653 t.tcd_soff().write(|w| w.soff().bits(tcd.soff as u16));
1654 t.tcd_attr().write(|w| w.bits(tcd.attr));
1655 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(tcd.nbytes));
1656 t.tcd_slast_sda().write(|w| w.slast_sda().bits(tcd.slast as u32));
1657 t.tcd_daddr().write(|w| w.daddr().bits(tcd.daddr));
1658 t.tcd_doff().write(|w| w.doff().bits(tcd.doff as u16));
1659 t.tcd_citer_elinkno().write(|w| w.citer().bits(tcd.citer));
1660 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(tcd.dlast_sga as u32));
1661 t.tcd_csr().write(|w| w.bits(tcd.csr));
1662 t.tcd_biter_elinkno().write(|w| w.biter().bits(tcd.biter));
1663 }
1664}
1665
1666/// In-memory representation of a Transfer Control Descriptor (TCD).
1667///
1668/// This matches the hardware layout (32 bytes).
1669#[repr(C, align(32))]
1670#[derive(Clone, Copy, Debug, Default)]
1671#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1672pub struct Tcd {
1673 pub saddr: u32,
1674 pub soff: i16,
1675 pub attr: u16,
1676 pub nbytes: u32,
1677 pub slast: i32,
1678 pub daddr: u32,
1679 pub doff: i16,
1680 pub citer: u16,
1681 pub dlast_sga: i32,
1682 pub csr: u16,
1683 pub biter: u16,
1684}
1685
1686struct State {
1687 /// Waker for transfer complete interrupt
1688 waker: AtomicWaker,
1689 /// Waker for half-transfer interrupt
1690 half_waker: AtomicWaker,
1691}
1692
1693impl State {
1694 const fn new() -> Self {
1695 Self {
1696 waker: AtomicWaker::new(),
1697 half_waker: AtomicWaker::new(),
1698 }
1699 }
1700}
1701
1702static STATES: [State; 8] = [
1703 State::new(),
1704 State::new(),
1705 State::new(),
1706 State::new(),
1707 State::new(),
1708 State::new(),
1709 State::new(),
1710 State::new(),
1711];
1712
1713pub(crate) fn waker(idx: usize) -> &'static AtomicWaker {
1714 &STATES[idx].waker
1715}
1716
1717pub(crate) fn half_waker(idx: usize) -> &'static AtomicWaker {
1718 &STATES[idx].half_waker
1719}
1720
1721// ============================================================================
1722// Async Transfer Future
1723// ============================================================================
1724
1725/// An in-progress DMA transfer.
1726///
1727/// This type implements `Future` and can be `.await`ed to wait for the
1728/// transfer to complete. Dropping the transfer will abort it.
1729#[must_use = "futures do nothing unless you `.await` or poll them"]
1730pub struct Transfer<'a> {
1731 channel: AnyChannel,
1732 _phantom: core::marker::PhantomData<&'a ()>,
1733}
1734
1735impl<'a> Transfer<'a> {
1736 /// Create a new transfer for the given channel.
1737 ///
1738 /// The caller must have already configured and started the DMA channel.
1739 pub(crate) fn new(channel: AnyChannel) -> Self {
1740 Self {
1741 channel,
1742 _phantom: core::marker::PhantomData,
1743 }
1744 }
1745
1746 /// Check if the transfer is still running.
1747 pub fn is_running(&self) -> bool {
1748 !self.channel.is_done()
1749 }
1750
1751 /// Get the remaining transfer count.
1752 pub fn remaining(&self) -> u16 {
1753 let t = self.channel.tcd();
1754 t.tcd_citer_elinkno().read().citer().bits()
1755 }
1756
1757 /// Block until the transfer completes.
1758 pub fn blocking_wait(self) {
1759 while self.is_running() {
1760 core::hint::spin_loop();
1761 }
1762
1763 // Ensure all DMA writes are visible
1764 fence(Ordering::SeqCst);
1765
1766 // Don't run drop (which would abort)
1767 core::mem::forget(self);
1768 }
1769
1770 /// Wait for the half-transfer interrupt asynchronously.
1771 ///
1772 /// This is useful for double-buffering scenarios where you want to process
1773 /// the first half of the buffer while the second half is being filled.
1774 ///
1775 /// Returns `true` if the half-transfer occurred, `false` if the transfer
1776 /// completed before the half-transfer interrupt.
1777 ///
1778 /// # Note
1779 ///
1780 /// The transfer must be configured with `TransferOptions::half_transfer_interrupt = true`
1781 /// for this method to work correctly.
1782 pub async fn wait_half(&mut self) -> Result<bool, TransferErrorRaw> {
1783 use core::future::poll_fn;
1784
1785 poll_fn(|cx| {
1786 let state = &STATES[self.channel.index];
1787
1788 // Register the half-transfer waker
1789 state.half_waker.register(cx.waker());
1790
1791 // Check if there's an error
1792 let t = self.channel.tcd();
1793 let es = t.ch_es().read();
1794 if es.err().is_error() {
1795 // Currently, all error fields are in the lowest 8 bits, as-casting truncates
1796 let errs = es.bits() as u8;
1797 return Poll::Ready(Err(TransferErrorRaw(errs)));
1798 }
1799
1800 // Check if we're past the half-way point
1801 let biter = t.tcd_biter_elinkno().read().biter().bits();
1802 let citer = t.tcd_citer_elinkno().read().citer().bits();
1803 let half_point = biter / 2;
1804
1805 if self.channel.is_done() {
1806 // Transfer completed before half-transfer
1807 Poll::Ready(Ok(false))
1808 } else if citer <= half_point {
1809 // We're past the half-way point
1810 fence(Ordering::SeqCst);
1811 Poll::Ready(Ok(true))
1812 } else {
1813 Poll::Pending
1814 }
1815 })
1816 .await
1817 }
1818
1819 /// Abort the transfer.
1820 fn abort(&mut self) {
1821 let t = self.channel.tcd();
1822
1823 // Disable channel requests
1824 t.ch_csr().modify(|_, w| w.erq().disable());
1825
1826 // Clear any pending interrupt
1827 t.ch_int().write(|w| w.int().clear_bit_by_one());
1828
1829 // Clear DONE flag
1830 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1831
1832 fence(Ordering::SeqCst);
1833 }
1834}
1835
1836/// Raw transfer error bits. Can be queried or all errors can be iterated over
1837#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1838#[derive(Copy, Clone, Debug)]
1839pub struct TransferErrorRaw(u8);
1840
1841#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1842#[derive(Copy, Clone, Debug)]
1843pub struct TransferErrorRawIter(u8);
1844
1845impl TransferErrorRaw {
1846 const MAP: &[(u8, TransferError)] = &[
1847 (1 << 0, TransferError::DestinationBus),
1848 (1 << 1, TransferError::SourceBus),
1849 (1 << 2, TransferError::ScatterGatherConfiguration),
1850 (1 << 3, TransferError::NbytesCiterConfiguration),
1851 (1 << 4, TransferError::DestinationOffset),
1852 (1 << 5, TransferError::DestinationAddress),
1853 (1 << 6, TransferError::SourceOffset),
1854 (1 << 7, TransferError::SourceAddress),
1855 ];
1856
1857 /// Convert to an iterator of contained errors
1858 pub fn err_iter(self) -> TransferErrorRawIter {
1859 TransferErrorRawIter(self.0)
1860 }
1861
1862 /// Destination Bus Error
1863 #[inline]
1864 pub fn has_destination_bus_err(&self) -> bool {
1865 (self.0 & (1 << 0)) != 0
1866 }
1867
1868 /// Source Bus Error
1869 #[inline]
1870 pub fn has_source_bus_err(&self) -> bool {
1871 (self.0 & (1 << 1)) != 0
1872 }
1873
1874 /// Indicates that `TCDn_DLAST_SGA` is not on a 32-byte boundary. This field is
1875 /// checked at the beginning of a scatter/gather operation after major loop completion
1876 /// if `TCDn_CSR[ESG]` is enabled.
1877 #[inline]
1878 pub fn has_scatter_gather_configuration_err(&self) -> bool {
1879 (self.0 & (1 << 2)) != 0
1880 }
1881
1882 /// This error indicates that one of the following has occurred:
1883 ///
1884 /// * `TCDn_NBYTES` is not a multiple of `TCDn_ATTR[SSIZE]` and `TCDn_ATTR[DSIZE]`
1885 /// * `TCDn_CITER[CITER]` is equal to zero
1886 /// * `TCDn_CITER[ELINK]` is not equal to `TCDn_BITER[ELINK]`
1887 #[inline]
1888 pub fn has_nbytes_citer_configuration_err(&self) -> bool {
1889 (self.0 & (1 << 3)) != 0
1890 }
1891
1892 /// `TCDn_DOFF` is inconsistent with `TCDn_ATTR[DSIZE]`.
1893 #[inline]
1894 pub fn has_destination_offset_err(&self) -> bool {
1895 (self.0 & (1 << 4)) != 0
1896 }
1897
1898 /// `TCDn_DADDR` is inconsistent with `TCDn_ATTR[DSIZE]`.
1899 #[inline]
1900 pub fn has_destination_address_err(&self) -> bool {
1901 (self.0 & (1 << 5)) != 0
1902 }
1903
1904 /// `TCDn_SOFF` is inconsistent with `TCDn_ATTR[SSIZE]`.
1905 #[inline]
1906 pub fn has_source_offset_err(&self) -> bool {
1907 (self.0 & (1 << 6)) != 0
1908 }
1909
1910 /// `TCDn_SADDR` is inconsistent with `TCDn_ATTR[SSIZE]`
1911 #[inline]
1912 pub fn has_source_address_err(&self) -> bool {
1913 (self.0 & (1 << 7)) != 0
1914 }
1915}
1916
1917impl Iterator for TransferErrorRawIter {
1918 type Item = TransferError;
1919
1920 fn next(&mut self) -> Option<Self::Item> {
1921 if self.0 == 0 {
1922 return None;
1923 }
1924
1925 for (mask, var) in TransferErrorRaw::MAP {
1926 // If the bit is set...
1927 if self.0 | mask != 0 {
1928 // clear the bit
1929 self.0 &= !mask;
1930 // and return the answer
1931 return Some(*var);
1932 }
1933 }
1934
1935 // Shouldn't happen, but oh well.
1936 None
1937 }
1938}
1939
1940#[derive(Copy, Clone, Debug)]
1941#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1942pub enum TransferError {
1943 /// `TCDn_SADDR` is inconsistent with `TCDn_ATTR[SSIZE]`
1944 SourceAddress,
1945 /// `TCDn_SOFF` is inconsistent with `TCDn_ATTR[SSIZE]`.
1946 SourceOffset,
1947 /// `TCDn_DADDR` is inconsistent with `TCDn_ATTR[DSIZE]`.
1948 DestinationAddress,
1949 /// `TCDn_DOFF` is inconsistent with `TCDn_ATTR[DSIZE]`.
1950 DestinationOffset,
1951 /// This error indicates that one of the following has occurred:
1952 ///
1953 /// * `TCDn_NBYTES` is not a multiple of `TCDn_ATTR[SSIZE]` and `TCDn_ATTR[DSIZE]`
1954 /// * `TCDn_CITER[CITER]` is equal to zero
1955 /// * `TCDn_CITER[ELINK]` is not equal to `TCDn_BITER[ELINK]`
1956 NbytesCiterConfiguration,
1957 /// Indicates that `TCDn_DLAST_SGA` is not on a 32-byte boundary. This field is
1958 /// checked at the beginning of a scatter/gather operation after major loop completion
1959 /// if `TCDn_CSR[ESG]` is enabled.
1960 ScatterGatherConfiguration,
1961 /// Source Bus Error
1962 SourceBus,
1963 /// Destination Bus Error
1964 DestinationBus,
1965}
1966
1967impl<'a> Unpin for Transfer<'a> {}
1968
1969impl<'a> Future for Transfer<'a> {
1970 type Output = Result<(), TransferErrorRaw>;
1971
1972 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1973 let state = &STATES[self.channel.index];
1974
1975 // Register waker first
1976 state.waker.register(cx.waker());
1977
1978 let done = self.channel.is_done();
1979
1980 if done {
1981 // Ensure all DMA writes are visible before returning
1982 fence(Ordering::SeqCst);
1983
1984 let es = self.channel.tcd().ch_es().read();
1985 if es.err().is_error() {
1986 // Currently, all error fields are in the lowest 8 bits, as-casting truncates
1987 let errs = es.bits() as u8;
1988 Poll::Ready(Err(TransferErrorRaw(errs)))
1989 } else {
1990 Poll::Ready(Ok(()))
1991 }
1992 } else {
1993 Poll::Pending
1994 }
1995 }
1996}
1997
1998impl<'a> Drop for Transfer<'a> {
1999 fn drop(&mut self) {
2000 // Only abort if the transfer is still running
2001 // If already complete, no need to abort
2002 if self.is_running() {
2003 self.abort();
2004
2005 // Wait for abort to complete
2006 while self.is_running() {
2007 core::hint::spin_loop();
2008 }
2009 }
2010
2011 fence(Ordering::SeqCst);
2012 }
2013}
2014
2015// ============================================================================
2016// Ring Buffer for Circular DMA
2017// ============================================================================
2018
2019/// A ring buffer for continuous DMA reception.
2020///
2021/// This structure manages a circular DMA transfer, allowing continuous
2022/// reception of data without losing bytes between reads. It uses both
2023/// half-transfer and complete-transfer interrupts to track available data.
2024///
2025/// # Example
2026///
2027/// ```no_run
2028/// use embassy_mcxa::dma::{DmaChannel, RingBuffer, TransferOptions};
2029///
2030/// static mut RX_BUF: [u8; 64] = [0; 64];
2031///
2032/// let dma_ch = DmaChannel::new(p.DMA_CH0);
2033/// let ring_buf = unsafe {
2034/// dma_ch.setup_circular_read(
2035/// uart_rx_addr,
2036/// &mut RX_BUF,
2037/// )
2038/// };
2039///
2040/// // Read data as it arrives
2041/// let mut buf = [0u8; 16];
2042/// let n = ring_buf.read(&mut buf).await?;
2043/// ```
2044pub struct RingBuffer<'a, W: Word> {
2045 channel: AnyChannel,
2046 /// Buffer pointer. We use NonNull instead of &mut because DMA acts like
2047 /// a separate thread writing to this buffer, and &mut claims exclusive
2048 /// access which the compiler could optimize incorrectly.
2049 buf: NonNull<[W]>,
2050 /// Buffer length cached for convenience
2051 buf_len: usize,
2052 /// Read position in the buffer (consumer side)
2053 read_pos: AtomicUsize,
2054 /// Phantom data to tie the lifetime to the original buffer
2055 _lt: PhantomData<&'a mut [W]>,
2056}
2057
2058impl<'a, W: Word> RingBuffer<'a, W> {
2059 /// Create a new ring buffer for the given channel and buffer.
2060 ///
2061 /// # Safety
2062 ///
2063 /// The caller must ensure:
2064 /// - The DMA channel has been configured for circular transfer
2065 /// - The buffer remains valid for the lifetime of the ring buffer
2066 /// - Only one RingBuffer exists per DMA channel at a time
2067 pub(crate) unsafe fn new(channel: AnyChannel, buf: &'a mut [W]) -> Self {
2068 let buf_len = buf.len();
2069 Self {
2070 channel,
2071 buf: NonNull::from(buf),
2072 buf_len,
2073 read_pos: AtomicUsize::new(0),
2074 _lt: PhantomData,
2075 }
2076 }
2077
2078 /// Get a slice reference to the buffer.
2079 ///
2080 /// # Safety
2081 ///
2082 /// The caller must ensure that DMA is not actively writing to the
2083 /// portion of the buffer being accessed, or that the access is
2084 /// appropriately synchronized.
2085 #[inline]
2086 unsafe fn buf_slice(&self) -> &[W] {
2087 self.buf.as_ref()
2088 }
2089
2090 /// Get the current DMA write position in the buffer.
2091 ///
2092 /// This reads the current destination address from the DMA controller
2093 /// and calculates the buffer offset.
2094 fn dma_write_pos(&self) -> usize {
2095 let t = self.channel.tcd();
2096 let daddr = t.tcd_daddr().read().daddr().bits() as usize;
2097 let buf_start = self.buf.as_ptr() as *const W as usize;
2098
2099 // Calculate offset from buffer start
2100 let offset = daddr.wrapping_sub(buf_start) / core::mem::size_of::<W>();
2101
2102 // Ensure we're within bounds (DMA wraps around)
2103 offset % self.buf_len
2104 }
2105
2106 /// Returns the number of bytes available to read.
2107 pub fn available(&self) -> usize {
2108 let write_pos = self.dma_write_pos();
2109 let read_pos = self.read_pos.load(Ordering::Acquire);
2110
2111 if write_pos >= read_pos {
2112 write_pos - read_pos
2113 } else {
2114 self.buf_len - read_pos + write_pos
2115 }
2116 }
2117
2118 /// Check if the buffer has overrun (data was lost).
2119 ///
2120 /// This happens when DMA writes faster than the application reads.
2121 pub fn is_overrun(&self) -> bool {
2122 // In a true overrun, the DMA would have wrapped around and caught up
2123 // to our read position. We can detect this by checking if available()
2124 // equals the full buffer size (minus 1 to distinguish from empty).
2125 self.available() >= self.buf_len - 1
2126 }
2127
2128 /// Read data from the ring buffer into the provided slice.
2129 ///
2130 /// Returns the number of elements read, which may be less than
2131 /// `dst.len()` if not enough data is available.
2132 ///
2133 /// This method does not block; use `read_async()` for async waiting.
2134 pub fn read_immediate(&self, dst: &mut [W]) -> usize {
2135 let write_pos = self.dma_write_pos();
2136 let read_pos = self.read_pos.load(Ordering::Acquire);
2137
2138 // Calculate available bytes
2139 let available = if write_pos >= read_pos {
2140 write_pos - read_pos
2141 } else {
2142 self.buf_len - read_pos + write_pos
2143 };
2144
2145 let to_read = dst.len().min(available);
2146 if to_read == 0 {
2147 return 0;
2148 }
2149
2150 // Safety: We only read from portions of the buffer that DMA has
2151 // already written to (between read_pos and write_pos).
2152 let buf = unsafe { self.buf_slice() };
2153
2154 // Read data, handling wrap-around
2155 let first_chunk = (self.buf_len - read_pos).min(to_read);
2156 dst[..first_chunk].copy_from_slice(&buf[read_pos..read_pos + first_chunk]);
2157
2158 if to_read > first_chunk {
2159 let second_chunk = to_read - first_chunk;
2160 dst[first_chunk..to_read].copy_from_slice(&buf[..second_chunk]);
2161 }
2162
2163 // Update read position
2164 let new_read_pos = (read_pos + to_read) % self.buf_len;
2165 self.read_pos.store(new_read_pos, Ordering::Release);
2166
2167 to_read
2168 }
2169
2170 /// Read data from the ring buffer asynchronously.
2171 ///
2172 /// This waits until at least one byte is available, then reads as much
2173 /// as possible into the destination buffer.
2174 ///
2175 /// Returns the number of elements read.
2176 pub async fn read(&self, dst: &mut [W]) -> Result<usize, Error> {
2177 use core::future::poll_fn;
2178
2179 if dst.is_empty() {
2180 return Ok(0);
2181 }
2182
2183 poll_fn(|cx| {
2184 // Check for overrun
2185 if self.is_overrun() {
2186 return Poll::Ready(Err(Error::Overrun));
2187 }
2188
2189 // Try to read immediately
2190 let n = self.read_immediate(dst);
2191 if n > 0 {
2192 return Poll::Ready(Ok(n));
2193 }
2194
2195 // Register wakers for both half and complete interrupts
2196 let state = &STATES[self.channel.index()];
2197 state.waker.register(cx.waker());
2198 state.half_waker.register(cx.waker());
2199
2200 // Check again after registering waker (avoid race)
2201 let n = self.read_immediate(dst);
2202 if n > 0 {
2203 return Poll::Ready(Ok(n));
2204 }
2205
2206 Poll::Pending
2207 })
2208 .await
2209 }
2210
2211 /// Clear the ring buffer, discarding all unread data.
2212 pub fn clear(&self) {
2213 let write_pos = self.dma_write_pos();
2214 self.read_pos.store(write_pos, Ordering::Release);
2215 }
2216
2217 /// Stop the DMA transfer and consume the ring buffer.
2218 ///
2219 /// Returns any remaining unread data count.
2220 pub fn stop(mut self) -> usize {
2221 let res = self.teardown();
2222 drop(self);
2223 res
2224 }
2225
2226 /// Stop the DMA transfer. Intended to be called by `stop()` or `Drop`.
2227 fn teardown(&mut self) -> usize {
2228 let available = self.available();
2229
2230 // Disable the channel
2231 let t = self.channel.tcd();
2232 t.ch_csr().modify(|_, w| w.erq().disable());
2233
2234 // Clear flags
2235 t.ch_int().write(|w| w.int().clear_bit_by_one());
2236 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
2237
2238 fence(Ordering::SeqCst);
2239
2240 available
2241 }
2242}
2243
2244impl<'a, W: Word> Drop for RingBuffer<'a, W> {
2245 fn drop(&mut self) {
2246 self.teardown();
2247 }
2248}
2249
2250impl<C: Channel> DmaChannel<C> {
2251 /// Set up a circular DMA transfer for continuous peripheral-to-memory reception.
2252 ///
2253 /// This configures the DMA channel for circular operation with both half-transfer
2254 /// and complete-transfer interrupts enabled. The transfer runs continuously until
2255 /// stopped via [`RingBuffer::stop()`].
2256 ///
2257 /// # Arguments
2258 ///
2259 /// * `peri_addr` - Peripheral register address to read from
2260 /// * `buf` - Destination buffer (should be power-of-2 size for best efficiency)
2261 ///
2262 /// # Returns
2263 ///
2264 /// A [`RingBuffer`] that can be used to read received data.
2265 ///
2266 /// # Safety
2267 ///
2268 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
2269 /// - The peripheral address must be valid for reads.
2270 /// - The peripheral's DMA request must be configured to trigger this channel.
2271 pub unsafe fn setup_circular_read<'a, W: Word>(&self, peri_addr: *const W, buf: &'a mut [W]) -> RingBuffer<'a, W> {
2272 assert!(!buf.is_empty());
2273 assert!(buf.len() <= 0x7fff);
2274 // For circular mode, buffer size should ideally be power of 2
2275 // but we don't enforce it
2276
2277 let size = W::size();
2278 let byte_size = size.bytes();
2279
2280 let t = self.tcd();
2281
2282 // Reset channel state
2283 Self::reset_channel_state(t);
2284
2285 // Source: peripheral register, fixed
2286 Self::set_source_ptr(t, peri_addr);
2287 Self::set_source_fixed(t);
2288
2289 // Destination: memory buffer, incrementing
2290 Self::set_dest_ptr(t, buf.as_mut_ptr());
2291 Self::set_dest_increment(t, size);
2292
2293 // Transfer attributes
2294 Self::set_even_transfer_size(t, size);
2295
2296 // Minor loop: transfer one word per request
2297 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
2298
2299 // Major loop count = buffer size
2300 let count = buf.len() as u16;
2301 Self::set_major_loop_ct_elinkno(t, count);
2302
2303 // After major loop: reset destination to buffer start (circular)
2304 let buf_bytes = (buf.len() * byte_size) as i32;
2305 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); // Source doesn't change
2306 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits((-buf_bytes) as u32));
2307
2308 // Control/status: enable both half and complete interrupts, NO DREQ (continuous)
2309 t.tcd_csr().write(|w| {
2310 w.intmajor()
2311 .enable()
2312 .inthalf()
2313 .enable()
2314 .dreq()
2315 .channel_not_affected() // Don't clear ERQ on complete (circular)
2316 .esg()
2317 .normal_format()
2318 .majorelink()
2319 .disable()
2320 .eeop()
2321 .disable()
2322 .esda()
2323 .disable()
2324 .bwc()
2325 .no_stall()
2326 });
2327
2328 cortex_m::asm::dsb();
2329
2330 // Enable the channel request
2331 t.ch_csr().modify(|_, w| w.erq().enable());
2332
2333 // Enable NVIC interrupt for this channel so async wakeups work
2334 self.enable_interrupt();
2335
2336 RingBuffer::new(self.as_any(), buf)
2337 }
2338}
2339
2340// ============================================================================
2341// Scatter-Gather Builder
2342// ============================================================================
2343
2344/// Maximum number of TCDs in a scatter-gather chain.
2345pub const MAX_SCATTER_GATHER_TCDS: usize = 16;
2346
2347/// A builder for constructing scatter-gather DMA transfer chains.
2348///
2349/// This provides a type-safe way to build TCD chains for scatter-gather
2350/// transfers without manual TCD manipulation.
2351///
2352/// # Example
2353///
2354/// ```no_run
2355/// use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
2356///
2357/// let mut builder = ScatterGatherBuilder::<u32>::new();
2358///
2359/// // Add transfer segments
2360/// builder.add_transfer(&src1, &mut dst1);
2361/// builder.add_transfer(&src2, &mut dst2);
2362/// builder.add_transfer(&src3, &mut dst3);
2363///
2364/// // Build and execute
2365/// let transfer = unsafe { builder.build(&dma_ch).unwrap() };
2366/// transfer.await;
2367/// ```
2368pub struct ScatterGatherBuilder<'a, W: Word> {
2369 /// TCD pool (must be 32-byte aligned)
2370 tcds: [Tcd; MAX_SCATTER_GATHER_TCDS],
2371 /// Number of TCDs configured
2372 count: usize,
2373 /// Phantom marker for word type
2374 _phantom: core::marker::PhantomData<W>,
2375
2376 _plt: core::marker::PhantomData<&'a mut W>,
2377}
2378
2379impl<'a, W: Word> ScatterGatherBuilder<'a, W> {
2380 /// Create a new scatter-gather builder.
2381 pub fn new() -> Self {
2382 ScatterGatherBuilder {
2383 tcds: [Tcd::default(); MAX_SCATTER_GATHER_TCDS],
2384 count: 0,
2385 _phantom: core::marker::PhantomData,
2386 _plt: core::marker::PhantomData,
2387 }
2388 }
2389
2390 /// Add a memory-to-memory transfer segment to the chain.
2391 ///
2392 /// # Arguments
2393 ///
2394 /// * `src` - Source buffer for this segment
2395 /// * `dst` - Destination buffer for this segment
2396 ///
2397 /// # Panics
2398 ///
2399 /// Panics if the maximum number of segments (16) is exceeded.
2400 pub fn add_transfer<'b: 'a>(&mut self, src: &'b [W], dst: &'b mut [W]) -> &mut Self {
2401 assert!(self.count < MAX_SCATTER_GATHER_TCDS, "Too many scatter-gather segments");
2402 assert!(!src.is_empty());
2403 assert!(dst.len() >= src.len());
2404
2405 let size = W::size();
2406 let byte_size = size.bytes();
2407 let hw_size = size.to_hw_size();
2408 let nbytes = (src.len() * byte_size) as u32;
2409
2410 // Build the TCD for this segment
2411 self.tcds[self.count] = Tcd {
2412 saddr: src.as_ptr() as u32,
2413 soff: byte_size as i16,
2414 attr: ((hw_size as u16) << 8) | (hw_size as u16), // SSIZE | DSIZE
2415 nbytes,
2416 slast: 0,
2417 daddr: dst.as_mut_ptr() as u32,
2418 doff: byte_size as i16,
2419 citer: 1,
2420 dlast_sga: 0, // Will be filled in by build()
2421 csr: 0x0002, // INTMAJOR only (ESG will be set for non-last TCDs)
2422 biter: 1,
2423 };
2424
2425 self.count += 1;
2426 self
2427 }
2428
2429 /// Get the number of transfer segments added.
2430 pub fn segment_count(&self) -> usize {
2431 self.count
2432 }
2433
2434 /// Build the scatter-gather chain and start the transfer.
2435 ///
2436 /// # Arguments
2437 ///
2438 /// * `channel` - The DMA channel to use for the transfer
2439 ///
2440 /// # Returns
2441 ///
2442 /// A `Transfer` future that completes when the entire chain has executed.
2443 pub fn build<C: Channel>(&mut self, channel: &DmaChannel<C>) -> Result<Transfer<'a>, Error> {
2444 if self.count == 0 {
2445 return Err(Error::Configuration);
2446 }
2447
2448 // Link TCDs together
2449 //
2450 // CSR bit definitions:
2451 // - START = bit 0 = 0x0001 (triggers transfer when set)
2452 // - INTMAJOR = bit 1 = 0x0002 (interrupt on major loop complete)
2453 // - ESG = bit 4 = 0x0010 (enable scatter-gather, loads next TCD on complete)
2454 //
2455 // When hardware loads a TCD via scatter-gather (ESG), it copies the TCD's
2456 // CSR directly into the hardware register. If START is not set in that CSR,
2457 // the hardware will NOT auto-execute the loaded TCD.
2458 //
2459 // Strategy:
2460 // - First TCD: ESG | INTMAJOR (no START - we add it manually after loading)
2461 // - Middle TCDs: ESG | INTMAJOR | START (auto-execute when loaded via S/G)
2462 // - Last TCD: INTMAJOR | START (auto-execute, no further linking)
2463 for i in 0..self.count {
2464 let is_first = i == 0;
2465 let is_last = i == self.count - 1;
2466
2467 if is_first {
2468 if is_last {
2469 // Only one TCD - no ESG, no START (we add START manually)
2470 self.tcds[i].dlast_sga = 0;
2471 self.tcds[i].csr = 0x0002; // INTMAJOR only
2472 } else {
2473 // First of multiple - ESG to link, no START (we add START manually)
2474 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2475 self.tcds[i].csr = 0x0012; // ESG | INTMAJOR
2476 }
2477 } else if is_last {
2478 // Last TCD (not first) - no ESG, but START so it auto-executes
2479 self.tcds[i].dlast_sga = 0;
2480 self.tcds[i].csr = 0x0003; // INTMAJOR | START
2481 } else {
2482 // Middle TCD - ESG to link, and START so it auto-executes
2483 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2484 self.tcds[i].csr = 0x0013; // ESG | INTMAJOR | START
2485 }
2486 }
2487
2488 let t = channel.tcd();
2489
2490 // Reset channel state - clear DONE, disable requests, clear errors
2491 // This ensures the channel is in a clean state before loading the TCD
2492 DmaChannel::<C>::reset_channel_state(t);
2493
2494 // Memory barrier to ensure channel state is reset before loading TCD
2495 cortex_m::asm::dsb();
2496
2497 // Load first TCD into hardware
2498 unsafe {
2499 channel.load_tcd(&self.tcds[0]);
2500 }
2501
2502 // Memory barrier before setting START
2503 cortex_m::asm::dsb();
2504
2505 // Start the transfer
2506 t.tcd_csr().modify(|_, w| w.start().channel_started());
2507
2508 Ok(Transfer::new(channel.as_any()))
2509 }
2510
2511 /// Reset the builder for reuse.
2512 pub fn clear(&mut self) {
2513 self.count = 0;
2514 }
2515}
2516
2517impl<W: Word> Default for ScatterGatherBuilder<'_, W> {
2518 fn default() -> Self {
2519 Self::new()
2520 }
2521}
2522
2523/// A completed scatter-gather transfer result.
2524///
2525/// This type is returned after a scatter-gather transfer completes,
2526/// providing access to any error information.
2527#[derive(Debug, Clone, Copy, PartialEq, Eq)]
2528pub struct ScatterGatherResult {
2529 /// Number of segments successfully transferred
2530 pub segments_completed: usize,
2531 /// Error if any occurred
2532 pub error: Option<Error>,
2533}
2534
2535// ============================================================================
2536// Interrupt Handler
2537// ============================================================================
2538
2539/// Interrupt handler helper.
2540///
2541/// Call this from your interrupt handler to clear the interrupt flag and wake the waker.
2542/// This handles both half-transfer and complete-transfer interrupts.
2543///
2544/// # Safety
2545/// Must be called from the correct DMA channel interrupt context.
2546pub unsafe fn on_interrupt(ch_index: usize) {
2547 let p = pac::Peripherals::steal();
2548 let edma = &p.edma_0_tcd0;
2549 let t = edma.tcd(ch_index);
2550
2551 // Read TCD CSR to determine interrupt source
2552 let csr = t.tcd_csr().read();
2553
2554 // Check if this is a half-transfer interrupt
2555 // INTHALF is set and we're at or past the half-way point
2556 if csr.inthalf().bit_is_set() {
2557 let biter = t.tcd_biter_elinkno().read().biter().bits();
2558 let citer = t.tcd_citer_elinkno().read().citer().bits();
2559 let half_point = biter / 2;
2560
2561 if citer <= half_point && citer > 0 {
2562 // Half-transfer interrupt - wake half_waker
2563 half_waker(ch_index).wake();
2564 }
2565 }
2566
2567 // Clear INT flag
2568 t.ch_int().write(|w| w.int().clear_bit_by_one());
2569
2570 // If DONE is set, this is a complete-transfer interrupt
2571 // Only wake the full-transfer waker when the transfer is actually complete
2572 if t.ch_csr().read().done().bit_is_set() {
2573 waker(ch_index).wake();
2574 }
2575}
2576
2577// ============================================================================
2578// Type-level Interrupt Handlers
2579// ============================================================================
2580
2581/// Macro to generate DMA channel interrupt handlers.
2582macro_rules! impl_dma_interrupt_handler {
2583 ($irq:ident, $ch:expr) => {
2584 #[interrupt]
2585 fn $irq() {
2586 unsafe {
2587 on_interrupt($ch);
2588 }
2589 }
2590 };
2591}
2592
2593use crate::pac::interrupt;
2594
2595impl_dma_interrupt_handler!(DMA_CH0, 0);
2596impl_dma_interrupt_handler!(DMA_CH1, 1);
2597impl_dma_interrupt_handler!(DMA_CH2, 2);
2598impl_dma_interrupt_handler!(DMA_CH3, 3);
2599impl_dma_interrupt_handler!(DMA_CH4, 4);
2600impl_dma_interrupt_handler!(DMA_CH5, 5);
2601impl_dma_interrupt_handler!(DMA_CH6, 6);
2602impl_dma_interrupt_handler!(DMA_CH7, 7);