aboutsummaryrefslogtreecommitdiff
path: root/embassy-mcxa/src/dma.rs
diff options
context:
space:
mode:
authorJames Munns <[email protected]>2025-12-05 14:28:47 +0100
committerJames Munns <[email protected]>2025-12-05 14:28:47 +0100
commitb252db845e19603faf528cf93fe0c44757a27430 (patch)
tree99e646d17bed747df244dd607a15f5a67baa530a /embassy-mcxa/src/dma.rs
parent6a1eed83b9df8ffa81b93860f530f5bb3252d996 (diff)
Move
Diffstat (limited to 'embassy-mcxa/src/dma.rs')
-rw-r--r--embassy-mcxa/src/dma.rs2594
1 files changed, 2594 insertions, 0 deletions
diff --git a/embassy-mcxa/src/dma.rs b/embassy-mcxa/src/dma.rs
new file mode 100644
index 000000000..7d1588516
--- /dev/null
+++ b/embassy-mcxa/src/dma.rs
@@ -0,0 +1,2594 @@
1//! DMA driver for MCXA276.
2//!
3//! This module provides a typed channel abstraction over the EDMA_0_TCD0 array
4//! and helpers for configuring the channel MUX. The driver supports both
5//! low-level TCD configuration and higher-level async transfer APIs.
6//!
7//! # Architecture
8//!
9//! The MCXA276 has 8 DMA channels (0-7), each with its own interrupt vector.
10//! Each channel has a Transfer Control Descriptor (TCD) that defines the
11//! transfer parameters.
12//!
13//! # Choosing the Right API
14//!
15//! This module provides several API levels to match different use cases:
16//!
17//! ## High-Level Async API (Recommended for Most Users)
18//!
19//! Use the async methods when you want simple, safe DMA transfers:
20//!
21//! | Method | Description |
22//! |--------|-------------|
23//! | [`DmaChannel::mem_to_mem()`] | Memory-to-memory copy |
24//! | [`DmaChannel::memset()`] | Fill memory with a pattern |
25//! | [`DmaChannel::write()`] | Memory-to-peripheral (TX) |
26//! | [`DmaChannel::read()`] | Peripheral-to-memory (RX) |
27//!
28//! These return a [`Transfer`] future that can be `.await`ed:
29//!
30//! ```no_run
31//! # use embassy_mcxa::dma::{DmaChannel, TransferOptions};
32//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
33//! # let src = [0u32; 4];
34//! # let mut dst = [0u32; 4];
35//! // Simple memory-to-memory transfer
36//! unsafe {
37//! dma_ch.mem_to_mem(&src, &mut dst, TransferOptions::default()).await;
38//! }
39//! ```
40//!
41//! ## Setup Methods (For Peripheral Drivers)
42//!
43//! Use setup methods when you need manual lifecycle control:
44//!
45//! | Method | Description |
46//! |--------|-------------|
47//! | [`DmaChannel::setup_write()`] | Configure TX without starting |
48//! | [`DmaChannel::setup_read()`] | Configure RX without starting |
49//!
50//! These configure the TCD but don't start the transfer. You control:
51//! 1. When to call [`DmaChannel::enable_request()`]
52//! 2. How to detect completion (polling or interrupts)
53//! 3. When to clean up with [`DmaChannel::clear_done()`]
54//!
55//! ## Circular/Ring Buffer API (For Continuous Reception)
56//!
57//! Use [`DmaChannel::setup_circular_read()`] for continuous data reception:
58//!
59//! ```no_run
60//! # use embassy_mcxa::dma::DmaChannel;
61//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
62//! # let uart_rx_addr = 0x4000_0000 as *const u8;
63//! static mut RX_BUF: [u8; 64] = [0; 64];
64//!
65//! let ring_buf = unsafe {
66//! dma_ch.setup_circular_read(uart_rx_addr, &mut RX_BUF)
67//! };
68//!
69//! // Read data as it arrives
70//! let mut buf = [0u8; 16];
71//! let n = ring_buf.read(&mut buf).await.unwrap();
72//! ```
73//!
74//! ## Scatter-Gather Builder (For Chained Transfers)
75//!
76//! Use [`ScatterGatherBuilder`] for complex multi-segment transfers:
77//!
78//! ```no_run
79//! # use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
80//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
81//! let mut builder = ScatterGatherBuilder::<u32>::new();
82//! builder.add_transfer(&src1, &mut dst1);
83//! builder.add_transfer(&src2, &mut dst2);
84//!
85//! let transfer = unsafe { builder.build(&dma_ch).unwrap() };
86//! transfer.await;
87//! ```
88//!
89//! ## Direct TCD Access (For Advanced Use Cases)
90//!
91//! For full control, use the channel's `tcd()` method to access TCD registers directly.
92//! See the `dma_*` examples for patterns.
93//!
94//! # Example
95//!
96//! ```no_run
97//! use embassy_mcxa::dma::{DmaChannel, TransferOptions, Direction};
98//!
99//! let dma_ch = DmaChannel::new(p.DMA_CH0);
100//! // Configure and trigger a transfer...
101//! ```
102
103use core::future::Future;
104use core::marker::PhantomData;
105use core::pin::Pin;
106use core::ptr::NonNull;
107use core::sync::atomic::{fence, AtomicBool, AtomicUsize, Ordering};
108use core::task::{Context, Poll};
109
110use embassy_hal_internal::PeripheralType;
111use embassy_sync::waitqueue::AtomicWaker;
112
113use crate::clocks::Gate;
114use crate::pac;
115use crate::pac::Interrupt;
116use crate::peripherals::DMA0;
117
118/// Static flag to track whether DMA has been initialized.
119static DMA_INITIALIZED: AtomicBool = AtomicBool::new(false);
120
121/// Initialize DMA controller (clock enabled, reset released, controller configured).
122///
123/// This function is intended to be called during HAL initialization (`hal::init()`).
124/// It is idempotent - it will only initialize DMA once, even if called multiple times.
125///
126/// The function enables the DMA0 clock, releases reset, and configures the controller
127/// for normal operation with round-robin arbitration.
128pub fn init() {
129 // Fast path: already initialized
130 if DMA_INITIALIZED.load(Ordering::Acquire) {
131 return;
132 }
133
134 // Slow path: initialize DMA
135 // Use compare_exchange to ensure only one caller initializes
136 if DMA_INITIALIZED
137 .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
138 .is_ok()
139 {
140 // We won the race - initialize DMA
141 unsafe {
142 // Enable DMA0 clock and release reset
143 DMA0::enable_clock();
144 DMA0::release_reset();
145
146 // Configure DMA controller
147 let dma = &(*pac::Dma0::ptr());
148 dma.mp_csr().modify(|_, w| {
149 w.edbg()
150 .enable()
151 .erca()
152 .enable()
153 .halt()
154 .normal_operation()
155 .gclc()
156 .available()
157 .gmrc()
158 .available()
159 });
160 }
161 }
162}
163
164// ============================================================================
165// Phase 1: Foundation Types (Embassy-aligned)
166// ============================================================================
167
168/// DMA transfer direction.
169#[derive(Debug, Copy, Clone, PartialEq, Eq)]
170#[cfg_attr(feature = "defmt", derive(defmt::Format))]
171pub enum Direction {
172 /// Transfer from memory to memory.
173 MemoryToMemory,
174 /// Transfer from memory to a peripheral register.
175 MemoryToPeripheral,
176 /// Transfer from a peripheral register to memory.
177 PeripheralToMemory,
178}
179
180/// DMA transfer priority.
181#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
182#[cfg_attr(feature = "defmt", derive(defmt::Format))]
183pub enum Priority {
184 /// Low priority (channel priority 7).
185 Low,
186 /// Medium priority (channel priority 4).
187 Medium,
188 /// High priority (channel priority 1).
189 #[default]
190 High,
191 /// Highest priority (channel priority 0).
192 Highest,
193}
194
195impl Priority {
196 /// Convert to hardware priority value (0 = highest, 7 = lowest).
197 pub fn to_hw_priority(self) -> u8 {
198 match self {
199 Priority::Low => 7,
200 Priority::Medium => 4,
201 Priority::High => 1,
202 Priority::Highest => 0,
203 }
204 }
205}
206
207/// DMA transfer data width.
208#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
209#[cfg_attr(feature = "defmt", derive(defmt::Format))]
210pub enum WordSize {
211 /// 8-bit (1 byte) transfers.
212 OneByte,
213 /// 16-bit (2 byte) transfers.
214 TwoBytes,
215 /// 32-bit (4 byte) transfers.
216 #[default]
217 FourBytes,
218}
219
220impl WordSize {
221 /// Size in bytes.
222 pub const fn bytes(self) -> usize {
223 match self {
224 WordSize::OneByte => 1,
225 WordSize::TwoBytes => 2,
226 WordSize::FourBytes => 4,
227 }
228 }
229
230 /// Convert to hardware SSIZE/DSIZE field value.
231 pub const fn to_hw_size(self) -> u8 {
232 match self {
233 WordSize::OneByte => 0,
234 WordSize::TwoBytes => 1,
235 WordSize::FourBytes => 2,
236 }
237 }
238
239 /// Create from byte width (1, 2, or 4).
240 pub const fn from_bytes(bytes: u8) -> Option<Self> {
241 match bytes {
242 1 => Some(WordSize::OneByte),
243 2 => Some(WordSize::TwoBytes),
244 4 => Some(WordSize::FourBytes),
245 _ => None,
246 }
247 }
248}
249
250/// Trait for types that can be transferred via DMA.
251///
252/// This provides compile-time type safety for DMA transfers.
253pub trait Word: Copy + 'static {
254 /// The word size for this type.
255 fn size() -> WordSize;
256}
257
258impl Word for u8 {
259 fn size() -> WordSize {
260 WordSize::OneByte
261 }
262}
263
264impl Word for u16 {
265 fn size() -> WordSize {
266 WordSize::TwoBytes
267 }
268}
269
270impl Word for u32 {
271 fn size() -> WordSize {
272 WordSize::FourBytes
273 }
274}
275
276/// DMA transfer options.
277///
278/// This struct configures various aspects of a DMA transfer.
279#[derive(Debug, Copy, Clone, PartialEq, Eq)]
280#[cfg_attr(feature = "defmt", derive(defmt::Format))]
281#[non_exhaustive]
282pub struct TransferOptions {
283 /// Transfer priority.
284 pub priority: Priority,
285 /// Enable circular (continuous) mode.
286 ///
287 /// When enabled, the transfer repeats automatically after completing.
288 pub circular: bool,
289 /// Enable interrupt on half transfer complete.
290 pub half_transfer_interrupt: bool,
291 /// Enable interrupt on transfer complete.
292 pub complete_transfer_interrupt: bool,
293}
294
295impl Default for TransferOptions {
296 fn default() -> Self {
297 Self {
298 priority: Priority::High,
299 circular: false,
300 half_transfer_interrupt: false,
301 complete_transfer_interrupt: true,
302 }
303 }
304}
305
306/// DMA error types.
307#[derive(Debug, Copy, Clone, PartialEq, Eq)]
308#[cfg_attr(feature = "defmt", derive(defmt::Format))]
309pub enum Error {
310 /// The DMA controller reported a bus error.
311 BusError,
312 /// The transfer was aborted.
313 Aborted,
314 /// Configuration error (e.g., invalid parameters).
315 Configuration,
316 /// Buffer overrun (for ring buffers).
317 Overrun,
318}
319
320/// Whether to enable the major loop completion interrupt.
321///
322/// This enum provides better readability than a boolean parameter
323/// for functions that configure DMA interrupt behavior.
324#[derive(Debug, Copy, Clone, PartialEq, Eq)]
325#[cfg_attr(feature = "defmt", derive(defmt::Format))]
326pub enum EnableInterrupt {
327 /// Enable the interrupt on major loop completion.
328 Yes,
329 /// Do not enable the interrupt.
330 No,
331}
332
333// ============================================================================
334// DMA Constants
335// ============================================================================
336
337/// Maximum bytes per DMA transfer (eDMA4 CITER/BITER are 15-bit fields).
338///
339/// This is a hardware limitation of the eDMA4 controller. Transfers larger
340/// than this must be split into multiple DMA operations.
341pub const DMA_MAX_TRANSFER_SIZE: usize = 0x7FFF;
342
343// ============================================================================
344// DMA Request Source Types (Type-Safe API)
345// ============================================================================
346
347/// Trait for type-safe DMA request sources.
348///
349/// Each peripheral that can trigger DMA requests implements this trait
350/// with marker types that encode the correct request source number at
351/// compile time. This prevents using the wrong request source for a
352/// peripheral.
353///
354/// # Example
355///
356/// ```ignore
357/// // The LPUART2 RX request source is automatically derived from the type:
358/// channel.set_request_source::<Lpuart2RxRequest>();
359/// ```
360///
361/// This trait is sealed and cannot be implemented outside this crate.
362#[allow(private_bounds)]
363pub trait DmaRequest: sealed::SealedDmaRequest {
364 /// The hardware request source number for the DMA mux.
365 const REQUEST_NUMBER: u8;
366}
367
368/// Macro to define a DMA request type.
369///
370/// Creates a zero-sized marker type that implements `DmaRequest` with
371/// the specified request number.
372macro_rules! define_dma_request {
373 ($(#[$meta:meta])* $name:ident = $num:expr) => {
374 $(#[$meta])*
375 #[derive(Debug, Copy, Clone)]
376 pub struct $name;
377
378 impl sealed::SealedDmaRequest for $name {}
379
380 impl DmaRequest for $name {
381 const REQUEST_NUMBER: u8 = $num;
382 }
383 };
384}
385
386// LPUART DMA request sources (from MCXA276 reference manual Table 4-8)
387define_dma_request!(
388 /// DMA request source for LPUART0 RX.
389 Lpuart0RxRequest = 21
390);
391define_dma_request!(
392 /// DMA request source for LPUART0 TX.
393 Lpuart0TxRequest = 22
394);
395define_dma_request!(
396 /// DMA request source for LPUART1 RX.
397 Lpuart1RxRequest = 23
398);
399define_dma_request!(
400 /// DMA request source for LPUART1 TX.
401 Lpuart1TxRequest = 24
402);
403define_dma_request!(
404 /// DMA request source for LPUART2 RX.
405 Lpuart2RxRequest = 25
406);
407define_dma_request!(
408 /// DMA request source for LPUART2 TX.
409 Lpuart2TxRequest = 26
410);
411define_dma_request!(
412 /// DMA request source for LPUART3 RX.
413 Lpuart3RxRequest = 27
414);
415define_dma_request!(
416 /// DMA request source for LPUART3 TX.
417 Lpuart3TxRequest = 28
418);
419define_dma_request!(
420 /// DMA request source for LPUART4 RX.
421 Lpuart4RxRequest = 29
422);
423define_dma_request!(
424 /// DMA request source for LPUART4 TX.
425 Lpuart4TxRequest = 30
426);
427define_dma_request!(
428 /// DMA request source for LPUART5 RX.
429 Lpuart5RxRequest = 31
430);
431define_dma_request!(
432 /// DMA request source for LPUART5 TX.
433 Lpuart5TxRequest = 32
434);
435
436// ============================================================================
437// Channel Trait (Sealed Pattern)
438// ============================================================================
439
440mod sealed {
441 use crate::pac::Interrupt;
442
443 /// Sealed trait for DMA channels.
444 pub trait SealedChannel {
445 /// Zero-based channel index into the TCD array.
446 fn index(&self) -> usize;
447 /// Interrupt vector for this channel.
448 fn interrupt(&self) -> Interrupt;
449 }
450
451 /// Sealed trait for DMA request sources.
452 pub trait SealedDmaRequest {}
453}
454
455/// Marker trait implemented by HAL peripheral tokens that map to a DMA0
456/// channel backed by one EDMA_0_TCD0 TCD slot.
457///
458/// This trait is sealed and cannot be implemented outside this crate.
459#[allow(private_bounds)]
460pub trait Channel: sealed::SealedChannel + PeripheralType + Into<AnyChannel> + 'static {
461 /// Zero-based channel index into the TCD array.
462 const INDEX: usize;
463 /// Interrupt vector for this channel.
464 const INTERRUPT: Interrupt;
465}
466
467/// Type-erased DMA channel.
468///
469/// This allows storing DMA channels in a uniform way regardless of their
470/// concrete type, useful for async transfer futures and runtime channel selection.
471#[derive(Debug, Clone, Copy)]
472pub struct AnyChannel {
473 index: usize,
474 interrupt: Interrupt,
475}
476
477impl AnyChannel {
478 /// Get the channel index.
479 #[inline]
480 pub const fn index(&self) -> usize {
481 self.index
482 }
483
484 /// Get the channel interrupt.
485 #[inline]
486 pub const fn interrupt(&self) -> Interrupt {
487 self.interrupt
488 }
489
490 /// Get a reference to the TCD register block for this channel.
491 ///
492 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
493 #[inline]
494 fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
495 // Safety: MCXA276 has a single eDMA instance, and we're only accessing
496 // the TCD for this specific channel
497 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
498 edma.tcd(self.index)
499 }
500
501 /// Check if the channel's DONE flag is set.
502 pub fn is_done(&self) -> bool {
503 self.tcd().ch_csr().read().done().bit_is_set()
504 }
505
506 /// Get the waker for this channel.
507 pub fn waker(&self) -> &'static AtomicWaker {
508 &STATES[self.index].waker
509 }
510}
511
512impl sealed::SealedChannel for AnyChannel {
513 fn index(&self) -> usize {
514 self.index
515 }
516
517 fn interrupt(&self) -> Interrupt {
518 self.interrupt
519 }
520}
521
522/// Macro to implement Channel trait for a peripheral.
523macro_rules! impl_channel {
524 ($peri:ident, $index:expr, $irq:ident) => {
525 impl sealed::SealedChannel for crate::peripherals::$peri {
526 fn index(&self) -> usize {
527 $index
528 }
529
530 fn interrupt(&self) -> Interrupt {
531 Interrupt::$irq
532 }
533 }
534
535 impl Channel for crate::peripherals::$peri {
536 const INDEX: usize = $index;
537 const INTERRUPT: Interrupt = Interrupt::$irq;
538 }
539
540 impl From<crate::peripherals::$peri> for AnyChannel {
541 fn from(_: crate::peripherals::$peri) -> Self {
542 AnyChannel {
543 index: $index,
544 interrupt: Interrupt::$irq,
545 }
546 }
547 }
548 };
549}
550
551impl_channel!(DMA_CH0, 0, DMA_CH0);
552impl_channel!(DMA_CH1, 1, DMA_CH1);
553impl_channel!(DMA_CH2, 2, DMA_CH2);
554impl_channel!(DMA_CH3, 3, DMA_CH3);
555impl_channel!(DMA_CH4, 4, DMA_CH4);
556impl_channel!(DMA_CH5, 5, DMA_CH5);
557impl_channel!(DMA_CH6, 6, DMA_CH6);
558impl_channel!(DMA_CH7, 7, DMA_CH7);
559
560/// Strongly-typed handle to a DMA0 channel.
561///
562/// The lifetime of this value is tied to the unique peripheral token
563/// supplied by `embassy_hal_internal::peripherals!`, so safe code cannot
564/// create two `DmaChannel` instances for the same hardware channel.
565pub struct DmaChannel<C: Channel> {
566 _ch: core::marker::PhantomData<C>,
567}
568
569// ============================================================================
570// DMA Transfer Methods - API Overview
571// ============================================================================
572//
573// The DMA API provides two categories of methods for configuring transfers:
574//
575// ## 1. Async Methods (Return `Transfer` Future)
576//
577// These methods return a [`Transfer`] Future that must be `.await`ed:
578//
579// - [`write()`](DmaChannel::write) - Memory-to-peripheral using default eDMA TCD block
580// - [`read()`](DmaChannel::read) - Peripheral-to-memory using default eDMA TCD block
581// - [`write_to_peripheral()`](DmaChannel::write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
582// - [`read_from_peripheral()`](DmaChannel::read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
583// - [`mem_to_mem()`](DmaChannel::mem_to_mem) - Memory-to-memory using default eDMA TCD block
584// - [`transfer_mem_to_mem()`](DmaChannel::transfer_mem_to_mem) - Memory-to-memory with custom eDMA TCD block
585//
586// The `Transfer` manages the DMA lifecycle automatically:
587// - Enables channel request
588// - Waits for completion via async/await
589// - Cleans up on completion
590//
591// **Important:** `Transfer::Drop` aborts the transfer if dropped before completion.
592// This means you MUST `.await` the Transfer or it will be aborted when it goes out of scope.
593//
594// **Use case:** When you want to use async/await and let the Transfer handle lifecycle management.
595//
596// ## 2. Setup Methods (Configure TCD Only)
597//
598// These methods configure the TCD but do NOT return a `Transfer`:
599//
600// - [`setup_write()`](DmaChannel::setup_write) - Memory-to-peripheral using default eDMA TCD block
601// - [`setup_read()`](DmaChannel::setup_read) - Peripheral-to-memory using default eDMA TCD block
602// - [`setup_write_to_peripheral()`](DmaChannel::setup_write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
603// - [`setup_read_from_peripheral()`](DmaChannel::setup_read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
604//
605// The caller is responsible for the complete DMA lifecycle:
606// 1. Call [`enable_request()`](DmaChannel::enable_request) to start the transfer
607// 2. Poll [`is_done()`](DmaChannel::is_done) or use interrupts to detect completion
608// 3. Call [`disable_request()`](DmaChannel::disable_request), [`clear_done()`](DmaChannel::clear_done),
609// [`clear_interrupt()`](DmaChannel::clear_interrupt) for cleanup
610//
611// **Use case:** Peripheral drivers (like LPUART) that need fine-grained control over
612// DMA setup before starting a `Transfer`.
613//
614// ============================================================================
615
616impl<C: Channel> DmaChannel<C> {
617 /// Wrap a DMA channel token (takes ownership of the Peri wrapper).
618 ///
619 /// Note: DMA is initialized during `hal::init()` via `dma::init()`.
620 #[inline]
621 pub fn new(_ch: embassy_hal_internal::Peri<'_, C>) -> Self {
622 Self {
623 _ch: core::marker::PhantomData,
624 }
625 }
626
627 /// Wrap a DMA channel token directly (for internal use).
628 ///
629 /// Note: DMA is initialized during `hal::init()` via `dma::init()`.
630 #[inline]
631 pub fn from_token(_ch: C) -> Self {
632 Self {
633 _ch: core::marker::PhantomData,
634 }
635 }
636
637 /// Channel index in the EDMA_0_TCD0 array.
638 #[inline]
639 pub const fn index(&self) -> usize {
640 C::INDEX
641 }
642
643 /// Convert this typed channel into a type-erased `AnyChannel`.
644 #[inline]
645 pub fn into_any(self) -> AnyChannel {
646 AnyChannel {
647 index: C::INDEX,
648 interrupt: C::INTERRUPT,
649 }
650 }
651
652 /// Get a reference to the type-erased channel info.
653 #[inline]
654 pub fn as_any(&self) -> AnyChannel {
655 AnyChannel {
656 index: C::INDEX,
657 interrupt: C::INTERRUPT,
658 }
659 }
660
661 /// Return a reference to the underlying TCD register block.
662 ///
663 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
664 ///
665 /// # Note
666 ///
667 /// This is exposed for advanced use cases that need direct TCD access.
668 /// For most use cases, prefer the higher-level transfer methods.
669 #[inline]
670 pub fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
671 // Safety: MCXA276 has a single eDMA instance
672 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
673 edma.tcd(C::INDEX)
674 }
675
676 /// Start an async transfer.
677 ///
678 /// The channel must already be configured. This enables the channel
679 /// request and returns a `Transfer` future that resolves when the
680 /// DMA transfer completes.
681 ///
682 /// # Safety
683 ///
684 /// The caller must ensure the DMA channel has been properly configured
685 /// and that source/destination buffers remain valid for the duration
686 /// of the transfer.
687 pub unsafe fn start_transfer(&self) -> Transfer<'_> {
688 // Clear any previous DONE/INT flags
689 let t = self.tcd();
690 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
691 t.ch_int().write(|w| w.int().clear_bit_by_one());
692
693 // Enable the channel request
694 t.ch_csr().modify(|_, w| w.erq().enable());
695
696 Transfer::new(self.as_any())
697 }
698
699 // ========================================================================
700 // Type-Safe Transfer Methods (Embassy-style API)
701 // ========================================================================
702
703 /// Perform a memory-to-memory DMA transfer (simplified API).
704 ///
705 /// This is a type-safe wrapper that uses the `Word` trait to determine
706 /// the correct transfer width automatically. Uses the global eDMA TCD
707 /// register accessor internally.
708 ///
709 /// # Arguments
710 ///
711 /// * `src` - Source buffer
712 /// * `dst` - Destination buffer (must be at least as large as src)
713 /// * `options` - Transfer configuration options
714 ///
715 /// # Safety
716 ///
717 /// The source and destination buffers must remain valid for the
718 /// duration of the transfer.
719 pub unsafe fn mem_to_mem<W: Word>(&self, src: &[W], dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
720 self.transfer_mem_to_mem(src, dst, options)
721 }
722
723 /// Perform a memory-to-memory DMA transfer.
724 ///
725 /// This is a type-safe wrapper that uses the `Word` trait to determine
726 /// the correct transfer width automatically.
727 ///
728 /// # Arguments
729 ///
730 /// * `edma` - Reference to the eDMA TCD register block
731 /// * `src` - Source buffer
732 /// * `dst` - Destination buffer (must be at least as large as src)
733 /// * `options` - Transfer configuration options
734 ///
735 /// # Safety
736 ///
737 /// The source and destination buffers must remain valid for the
738 /// duration of the transfer.
739 pub unsafe fn transfer_mem_to_mem<W: Word>(
740 &self,
741 src: &[W],
742 dst: &mut [W],
743 options: TransferOptions,
744 ) -> Transfer<'_> {
745 assert!(!src.is_empty());
746 assert!(dst.len() >= src.len());
747 assert!(src.len() <= 0x7fff);
748
749 let size = W::size();
750 let byte_count = (src.len() * size.bytes()) as u32;
751
752 let t = self.tcd();
753
754 // Reset channel state - clear DONE, disable requests, clear errors
755 t.ch_csr().write(|w| {
756 w.erq()
757 .disable()
758 .earq()
759 .disable()
760 .eei()
761 .no_error()
762 .done()
763 .clear_bit_by_one()
764 });
765 t.ch_es().write(|w| w.err().clear_bit_by_one());
766 t.ch_int().write(|w| w.int().clear_bit_by_one());
767
768 // Memory barrier to ensure channel state is fully reset before touching TCD
769 cortex_m::asm::dsb();
770
771 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
772 // Reset ALL TCD registers to 0 to clear any stale configuration from
773 // previous transfers. This is critical when reusing a channel.
774 t.tcd_saddr().write(|w| w.saddr().bits(0));
775 t.tcd_soff().write(|w| w.soff().bits(0));
776 t.tcd_attr().write(|w| w.bits(0));
777 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0));
778 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
779 t.tcd_daddr().write(|w| w.daddr().bits(0));
780 t.tcd_doff().write(|w| w.doff().bits(0));
781 t.tcd_citer_elinkno().write(|w| w.bits(0));
782 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
783 t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely
784 t.tcd_biter_elinkno().write(|w| w.bits(0));
785
786 // Memory barrier after TCD reset
787 cortex_m::asm::dsb();
788
789 // Note: Priority is managed by round-robin arbitration (set in init())
790 // Per-channel priority can be configured via ch_pri() if needed
791
792 // Now configure the new transfer
793
794 // Source address and increment
795 t.tcd_saddr().write(|w| w.saddr().bits(src.as_ptr() as u32));
796 t.tcd_soff().write(|w| w.soff().bits(size.bytes() as u16));
797
798 // Destination address and increment
799 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
800 t.tcd_doff().write(|w| w.doff().bits(size.bytes() as u16));
801
802 // Transfer attributes (size)
803 let hw_size = size.to_hw_size();
804 t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size));
805
806 // Minor loop: transfer all bytes in one minor loop
807 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_count));
808
809 // No source/dest adjustment after major loop
810 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
811 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
812
813 // Major loop count = 1 (single major loop)
814 // Write BITER first, then CITER (CITER must match BITER at start)
815 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
816 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
817
818 // Memory barrier before setting START
819 cortex_m::asm::dsb();
820
821 // Control/status: interrupt on major complete, start
822 // Write this last after all other TCD registers are configured
823 let int_major = options.complete_transfer_interrupt;
824 t.tcd_csr().write(|w| {
825 w.intmajor()
826 .bit(int_major)
827 .inthalf()
828 .bit(options.half_transfer_interrupt)
829 .dreq()
830 .set_bit() // Auto-disable request after major loop
831 .start()
832 .set_bit() // Start the channel
833 });
834
835 Transfer::new(self.as_any())
836 }
837
838 /// Fill a memory buffer with a pattern value (memset).
839 ///
840 /// This performs a DMA transfer where the source address remains fixed
841 /// (pattern value) while the destination address increments through the buffer.
842 /// It's useful for quickly filling large memory regions with a constant value.
843 ///
844 /// # Arguments
845 ///
846 /// * `pattern` - Reference to the pattern value (will be read repeatedly)
847 /// * `dst` - Destination buffer to fill
848 /// * `options` - Transfer configuration options
849 ///
850 /// # Example
851 ///
852 /// ```no_run
853 /// use embassy_mcxa::dma::{DmaChannel, TransferOptions};
854 ///
855 /// let dma_ch = DmaChannel::new(p.DMA_CH0);
856 /// let pattern: u32 = 0xDEADBEEF;
857 /// let mut buffer = [0u32; 256];
858 ///
859 /// unsafe {
860 /// dma_ch.memset(&pattern, &mut buffer, TransferOptions::default()).await;
861 /// }
862 /// // buffer is now filled with 0xDEADBEEF
863 /// ```
864 ///
865 /// # Safety
866 ///
867 /// - The pattern and destination buffer must remain valid for the duration of the transfer.
868 pub unsafe fn memset<W: Word>(&self, pattern: &W, dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
869 assert!(!dst.is_empty());
870 assert!(dst.len() <= 0x7fff);
871
872 let size = W::size();
873 let byte_size = size.bytes();
874 // Total bytes to transfer - all in one minor loop for software-triggered transfers
875 let total_bytes = (dst.len() * byte_size) as u32;
876
877 let t = self.tcd();
878
879 // Reset channel state - clear DONE, disable requests, clear errors
880 t.ch_csr().write(|w| {
881 w.erq()
882 .disable()
883 .earq()
884 .disable()
885 .eei()
886 .no_error()
887 .done()
888 .clear_bit_by_one()
889 });
890 t.ch_es().write(|w| w.err().clear_bit_by_one());
891 t.ch_int().write(|w| w.int().clear_bit_by_one());
892
893 // Memory barrier to ensure channel state is fully reset before touching TCD
894 cortex_m::asm::dsb();
895
896 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
897 // Reset ALL TCD registers to 0 to clear any stale configuration from
898 // previous transfers. This is critical when reusing a channel.
899 t.tcd_saddr().write(|w| w.saddr().bits(0));
900 t.tcd_soff().write(|w| w.soff().bits(0));
901 t.tcd_attr().write(|w| w.bits(0));
902 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0));
903 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
904 t.tcd_daddr().write(|w| w.daddr().bits(0));
905 t.tcd_doff().write(|w| w.doff().bits(0));
906 t.tcd_citer_elinkno().write(|w| w.bits(0));
907 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
908 t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely
909 t.tcd_biter_elinkno().write(|w| w.bits(0));
910
911 // Memory barrier after TCD reset
912 cortex_m::asm::dsb();
913
914 // Now configure the new transfer
915 //
916 // For software-triggered memset, we use a SINGLE minor loop that transfers
917 // all bytes at once. The source address stays fixed (SOFF=0) while the
918 // destination increments (DOFF=byte_size). The eDMA will read from the
919 // same source address for each destination word.
920 //
921 // This is necessary because the START bit only triggers ONE minor loop
922 // iteration. Using CITER>1 with software trigger would require multiple
923 // START triggers.
924
925 // Source: pattern address, fixed (soff=0)
926 t.tcd_saddr().write(|w| w.saddr().bits(pattern as *const W as u32));
927 t.tcd_soff().write(|w| w.soff().bits(0)); // Fixed source - reads pattern repeatedly
928
929 // Destination: memory buffer, incrementing by word size
930 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
931 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
932
933 // Transfer attributes - source and dest are same word size
934 let hw_size = size.to_hw_size();
935 t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size));
936
937 // Minor loop: transfer ALL bytes in one minor loop (like mem_to_mem)
938 // This allows the entire transfer to complete with a single START trigger
939 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(total_bytes));
940
941 // No address adjustment after major loop
942 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
943 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
944
945 // Major loop count = 1 (single major loop, all data in minor loop)
946 // Write BITER first, then CITER (CITER must match BITER at start)
947 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
948 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
949
950 // Memory barrier before setting START
951 cortex_m::asm::dsb();
952
953 // Control/status: interrupt on major complete, start immediately
954 // Write this last after all other TCD registers are configured
955 let int_major = options.complete_transfer_interrupt;
956 t.tcd_csr().write(|w| {
957 w.intmajor()
958 .bit(int_major)
959 .inthalf()
960 .bit(options.half_transfer_interrupt)
961 .dreq()
962 .set_bit() // Auto-disable request after major loop
963 .start()
964 .set_bit() // Start the channel
965 });
966
967 Transfer::new(self.as_any())
968 }
969
970 /// Write data from memory to a peripheral register.
971 ///
972 /// The destination address remains fixed (peripheral register) while
973 /// the source address increments through the buffer.
974 ///
975 /// # Arguments
976 ///
977 /// * `buf` - Source buffer to write from
978 /// * `peri_addr` - Peripheral register address
979 /// * `options` - Transfer configuration options
980 ///
981 /// # Safety
982 ///
983 /// - The buffer must remain valid for the duration of the transfer.
984 /// - The peripheral address must be valid for writes.
985 pub unsafe fn write<W: Word>(&self, buf: &[W], peri_addr: *mut W, options: TransferOptions) -> Transfer<'_> {
986 self.write_to_peripheral(buf, peri_addr, options)
987 }
988
989 /// Configure a memory-to-peripheral DMA transfer without starting it.
990 ///
991 /// This is a convenience wrapper around [`setup_write_to_peripheral()`](Self::setup_write_to_peripheral)
992 /// that uses the default eDMA TCD register block.
993 ///
994 /// This method configures the TCD but does NOT return a `Transfer`. The caller
995 /// is responsible for the complete DMA lifecycle:
996 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
997 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
998 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
999 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1000 ///
1001 /// # Example
1002 ///
1003 /// ```no_run
1004 /// # use embassy_mcxa::dma::DmaChannel;
1005 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1006 /// # let uart_tx_addr = 0x4000_0000 as *mut u8;
1007 /// let data = [0x48, 0x65, 0x6c, 0x6c, 0x6f]; // "Hello"
1008 ///
1009 /// unsafe {
1010 /// // Configure the transfer
1011 /// dma_ch.setup_write(&data, uart_tx_addr, EnableInterrupt::Yes);
1012 ///
1013 /// // Start when peripheral is ready
1014 /// dma_ch.enable_request();
1015 ///
1016 /// // Wait for completion (or use interrupt)
1017 /// while !dma_ch.is_done() {}
1018 ///
1019 /// // Clean up
1020 /// dma_ch.clear_done();
1021 /// dma_ch.clear_interrupt();
1022 /// }
1023 /// ```
1024 ///
1025 /// # Arguments
1026 ///
1027 /// * `buf` - Source buffer to write from
1028 /// * `peri_addr` - Peripheral register address
1029 /// * `enable_interrupt` - Whether to enable interrupt on completion
1030 ///
1031 /// # Safety
1032 ///
1033 /// - The buffer must remain valid for the duration of the transfer.
1034 /// - The peripheral address must be valid for writes.
1035 pub unsafe fn setup_write<W: Word>(&self, buf: &[W], peri_addr: *mut W, enable_interrupt: EnableInterrupt) {
1036 self.setup_write_to_peripheral(buf, peri_addr, enable_interrupt)
1037 }
1038
1039 /// Write data from memory to a peripheral register.
1040 ///
1041 /// The destination address remains fixed (peripheral register) while
1042 /// the source address increments through the buffer.
1043 ///
1044 /// # Arguments
1045 ///
1046 /// * `buf` - Source buffer to write from
1047 /// * `peri_addr` - Peripheral register address
1048 /// * `options` - Transfer configuration options
1049 ///
1050 /// # Safety
1051 ///
1052 /// - The buffer must remain valid for the duration of the transfer.
1053 /// - The peripheral address must be valid for writes.
1054 pub unsafe fn write_to_peripheral<W: Word>(
1055 &self,
1056 buf: &[W],
1057 peri_addr: *mut W,
1058 options: TransferOptions,
1059 ) -> Transfer<'_> {
1060 assert!(!buf.is_empty());
1061 assert!(buf.len() <= 0x7fff);
1062
1063 let size = W::size();
1064 let byte_size = size.bytes();
1065
1066 let t = self.tcd();
1067
1068 // Reset channel state
1069 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
1070 t.ch_es().write(|w| w.bits(0));
1071 t.ch_int().write(|w| w.int().clear_bit_by_one());
1072
1073 // Addresses
1074 t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32));
1075 t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32));
1076
1077 // Offsets: Source increments, Dest fixed
1078 t.tcd_soff().write(|w| w.soff().bits(byte_size as u16));
1079 t.tcd_doff().write(|w| w.doff().bits(0));
1080
1081 // Attributes: set size and explicitly disable modulo
1082 let hw_size = size.to_hw_size();
1083 t.tcd_attr().write(|w| {
1084 w.ssize()
1085 .bits(hw_size)
1086 .dsize()
1087 .bits(hw_size)
1088 .smod()
1089 .disable()
1090 .dmod()
1091 .bits(0)
1092 });
1093
1094 // Minor loop: transfer one word per request (match old: only set nbytes)
1095 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1096
1097 // No final adjustments
1098 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1099 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1100
1101 // Major loop count = number of words
1102 let count = buf.len() as u16;
1103 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1104 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1105
1106 // CSR: interrupt on major loop complete and auto-clear ERQ
1107 t.tcd_csr().write(|w| {
1108 let w = if options.complete_transfer_interrupt {
1109 w.intmajor().enable()
1110 } else {
1111 w.intmajor().disable()
1112 };
1113 w.inthalf()
1114 .disable()
1115 .dreq()
1116 .erq_field_clear() // Disable request when done
1117 .esg()
1118 .normal_format()
1119 .majorelink()
1120 .disable()
1121 .eeop()
1122 .disable()
1123 .esda()
1124 .disable()
1125 .bwc()
1126 .no_stall()
1127 });
1128
1129 // Ensure all TCD writes have completed before DMA engine reads them
1130 cortex_m::asm::dsb();
1131
1132 Transfer::new(self.as_any())
1133 }
1134
1135 /// Read data from a peripheral register to memory.
1136 ///
1137 /// The source address remains fixed (peripheral register) while
1138 /// the destination address increments through the buffer.
1139 ///
1140 /// # Arguments
1141 ///
1142 /// * `peri_addr` - Peripheral register address
1143 /// * `buf` - Destination buffer to read into
1144 /// * `options` - Transfer configuration options
1145 ///
1146 /// # Safety
1147 ///
1148 /// - The buffer must remain valid for the duration of the transfer.
1149 /// - The peripheral address must be valid for reads.
1150 pub unsafe fn read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], options: TransferOptions) -> Transfer<'_> {
1151 self.read_from_peripheral(peri_addr, buf, options)
1152 }
1153
1154 /// Configure a peripheral-to-memory DMA transfer without starting it.
1155 ///
1156 /// This is a convenience wrapper around [`setup_read_from_peripheral()`](Self::setup_read_from_peripheral)
1157 /// that uses the default eDMA TCD register block.
1158 ///
1159 /// This method configures the TCD but does NOT return a `Transfer`. The caller
1160 /// is responsible for the complete DMA lifecycle:
1161 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
1162 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
1163 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
1164 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1165 ///
1166 /// # Example
1167 ///
1168 /// ```no_run
1169 /// # use embassy_mcxa::dma::DmaChannel;
1170 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1171 /// # let uart_rx_addr = 0x4000_0000 as *const u8;
1172 /// let mut buf = [0u8; 32];
1173 ///
1174 /// unsafe {
1175 /// // Configure the transfer
1176 /// dma_ch.setup_read(uart_rx_addr, &mut buf, EnableInterrupt::Yes);
1177 ///
1178 /// // Start when peripheral is ready
1179 /// dma_ch.enable_request();
1180 ///
1181 /// // Wait for completion (or use interrupt)
1182 /// while !dma_ch.is_done() {}
1183 ///
1184 /// // Clean up
1185 /// dma_ch.clear_done();
1186 /// dma_ch.clear_interrupt();
1187 /// }
1188 /// // buf now contains received data
1189 /// ```
1190 ///
1191 /// # Arguments
1192 ///
1193 /// * `peri_addr` - Peripheral register address
1194 /// * `buf` - Destination buffer to read into
1195 /// * `enable_interrupt` - Whether to enable interrupt on completion
1196 ///
1197 /// # Safety
1198 ///
1199 /// - The buffer must remain valid for the duration of the transfer.
1200 /// - The peripheral address must be valid for reads.
1201 pub unsafe fn setup_read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], enable_interrupt: EnableInterrupt) {
1202 self.setup_read_from_peripheral(peri_addr, buf, enable_interrupt)
1203 }
1204
1205 /// Read data from a peripheral register to memory.
1206 ///
1207 /// The source address remains fixed (peripheral register) while
1208 /// the destination address increments through the buffer.
1209 ///
1210 /// # Arguments
1211 ///
1212 /// * `peri_addr` - Peripheral register address
1213 /// * `buf` - Destination buffer to read into
1214 /// * `options` - Transfer configuration options
1215 ///
1216 /// # Safety
1217 ///
1218 /// - The buffer must remain valid for the duration of the transfer.
1219 /// - The peripheral address must be valid for reads.
1220 pub unsafe fn read_from_peripheral<W: Word>(
1221 &self,
1222 peri_addr: *const W,
1223 buf: &mut [W],
1224 options: TransferOptions,
1225 ) -> Transfer<'_> {
1226 assert!(!buf.is_empty());
1227 assert!(buf.len() <= 0x7fff);
1228
1229 let size = W::size();
1230 let byte_size = size.bytes();
1231
1232 let t = self.tcd();
1233
1234 // Reset channel control/error/interrupt state
1235 t.ch_csr().write(|w| {
1236 w.erq()
1237 .disable()
1238 .earq()
1239 .disable()
1240 .eei()
1241 .no_error()
1242 .ebw()
1243 .disable()
1244 .done()
1245 .clear_bit_by_one()
1246 });
1247 t.ch_es().write(|w| w.bits(0));
1248 t.ch_int().write(|w| w.int().clear_bit_by_one());
1249
1250 // Source: peripheral register, fixed
1251 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
1252 t.tcd_soff().write(|w| w.soff().bits(0)); // No increment
1253
1254 // Destination: memory buffer, incrementing
1255 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
1256 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
1257
1258 // Transfer attributes: set size and explicitly disable modulo
1259 let hw_size = size.to_hw_size();
1260 t.tcd_attr().write(|w| {
1261 w.ssize()
1262 .bits(hw_size)
1263 .dsize()
1264 .bits(hw_size)
1265 .smod()
1266 .disable()
1267 .dmod()
1268 .bits(0)
1269 });
1270
1271 // Minor loop: transfer one word per request, no offsets
1272 t.tcd_nbytes_mloffno().write(|w| {
1273 w.nbytes()
1274 .bits(byte_size as u32)
1275 .dmloe()
1276 .offset_not_applied()
1277 .smloe()
1278 .offset_not_applied()
1279 });
1280
1281 // Major loop count = number of words
1282 let count = buf.len() as u16;
1283 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1284 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1285
1286 // No address adjustment after major loop
1287 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1288 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1289
1290 // Control/status: interrupt on major complete, auto-clear ERQ when done
1291 t.tcd_csr().write(|w| {
1292 let w = if options.complete_transfer_interrupt {
1293 w.intmajor().enable()
1294 } else {
1295 w.intmajor().disable()
1296 };
1297 let w = if options.half_transfer_interrupt {
1298 w.inthalf().enable()
1299 } else {
1300 w.inthalf().disable()
1301 };
1302 w.dreq()
1303 .erq_field_clear() // Disable request when done (important for peripheral DMA)
1304 .esg()
1305 .normal_format()
1306 .majorelink()
1307 .disable()
1308 .eeop()
1309 .disable()
1310 .esda()
1311 .disable()
1312 .bwc()
1313 .no_stall()
1314 });
1315
1316 // Ensure all TCD writes have completed before DMA engine reads them
1317 cortex_m::asm::dsb();
1318
1319 Transfer::new(self.as_any())
1320 }
1321
1322 /// Configure a memory-to-peripheral DMA transfer without starting it.
1323 ///
1324 /// This configures the TCD for a memory-to-peripheral transfer but does NOT
1325 /// return a Transfer object. The caller is responsible for:
1326 /// 1. Enabling the peripheral's DMA request
1327 /// 2. Calling `enable_request()` to start the transfer
1328 /// 3. Polling `is_done()` or using interrupts to detect completion
1329 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1330 ///
1331 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1332 /// peripheral drivers that have their own completion polling).
1333 ///
1334 /// # Arguments
1335 ///
1336 /// * `buf` - Source buffer to write from
1337 /// * `peri_addr` - Peripheral register address
1338 /// * `enable_interrupt` - Whether to enable interrupt on completion
1339 ///
1340 /// # Safety
1341 ///
1342 /// - The buffer must remain valid for the duration of the transfer.
1343 /// - The peripheral address must be valid for writes.
1344 pub unsafe fn setup_write_to_peripheral<W: Word>(
1345 &self,
1346 buf: &[W],
1347 peri_addr: *mut W,
1348 enable_interrupt: EnableInterrupt,
1349 ) {
1350 assert!(!buf.is_empty());
1351 assert!(buf.len() <= 0x7fff);
1352
1353 let size = W::size();
1354 let byte_size = size.bytes();
1355
1356 let t = self.tcd();
1357
1358 // Reset channel state
1359 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
1360 t.ch_es().write(|w| w.bits(0));
1361 t.ch_int().write(|w| w.int().clear_bit_by_one());
1362
1363 // Addresses
1364 t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32));
1365 t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32));
1366
1367 // Offsets: Source increments, Dest fixed
1368 t.tcd_soff().write(|w| w.soff().bits(byte_size as u16));
1369 t.tcd_doff().write(|w| w.doff().bits(0));
1370
1371 // Attributes: set size and explicitly disable modulo
1372 let hw_size = size.to_hw_size();
1373 t.tcd_attr().write(|w| {
1374 w.ssize()
1375 .bits(hw_size)
1376 .dsize()
1377 .bits(hw_size)
1378 .smod()
1379 .disable()
1380 .dmod()
1381 .bits(0)
1382 });
1383
1384 // Minor loop: transfer one word per request
1385 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1386
1387 // No final adjustments
1388 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1389 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1390
1391 // Major loop count = number of words
1392 let count = buf.len() as u16;
1393 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1394 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1395
1396 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1397 t.tcd_csr().write(|w| {
1398 let w = match enable_interrupt {
1399 EnableInterrupt::Yes => w.intmajor().enable(),
1400 EnableInterrupt::No => w.intmajor().disable(),
1401 };
1402 w.inthalf()
1403 .disable()
1404 .dreq()
1405 .erq_field_clear()
1406 .esg()
1407 .normal_format()
1408 .majorelink()
1409 .disable()
1410 .eeop()
1411 .disable()
1412 .esda()
1413 .disable()
1414 .bwc()
1415 .no_stall()
1416 });
1417
1418 // Ensure all TCD writes have completed before DMA engine reads them
1419 cortex_m::asm::dsb();
1420 }
1421
1422 /// Configure a peripheral-to-memory DMA transfer without starting it.
1423 ///
1424 /// This configures the TCD for a peripheral-to-memory transfer but does NOT
1425 /// return a Transfer object. The caller is responsible for:
1426 /// 1. Enabling the peripheral's DMA request
1427 /// 2. Calling `enable_request()` to start the transfer
1428 /// 3. Polling `is_done()` or using interrupts to detect completion
1429 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1430 ///
1431 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1432 /// peripheral drivers that have their own completion polling).
1433 ///
1434 /// # Arguments
1435 ///
1436 /// * `peri_addr` - Peripheral register address
1437 /// * `buf` - Destination buffer to read into
1438 /// * `enable_interrupt` - Whether to enable interrupt on completion
1439 ///
1440 /// # Safety
1441 ///
1442 /// - The buffer must remain valid for the duration of the transfer.
1443 /// - The peripheral address must be valid for reads.
1444 pub unsafe fn setup_read_from_peripheral<W: Word>(
1445 &self,
1446 peri_addr: *const W,
1447 buf: &mut [W],
1448 enable_interrupt: EnableInterrupt,
1449 ) {
1450 assert!(!buf.is_empty());
1451 assert!(buf.len() <= 0x7fff);
1452
1453 let size = W::size();
1454 let byte_size = size.bytes();
1455
1456 let t = self.tcd();
1457
1458 // Reset channel control/error/interrupt state
1459 t.ch_csr().write(|w| {
1460 w.erq()
1461 .disable()
1462 .earq()
1463 .disable()
1464 .eei()
1465 .no_error()
1466 .ebw()
1467 .disable()
1468 .done()
1469 .clear_bit_by_one()
1470 });
1471 t.ch_es().write(|w| w.bits(0));
1472 t.ch_int().write(|w| w.int().clear_bit_by_one());
1473
1474 // Source: peripheral register, fixed
1475 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
1476 t.tcd_soff().write(|w| w.soff().bits(0));
1477
1478 // Destination: memory buffer, incrementing
1479 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
1480 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
1481
1482 // Attributes: set size and explicitly disable modulo
1483 let hw_size = size.to_hw_size();
1484 t.tcd_attr().write(|w| {
1485 w.ssize()
1486 .bits(hw_size)
1487 .dsize()
1488 .bits(hw_size)
1489 .smod()
1490 .disable()
1491 .dmod()
1492 .bits(0)
1493 });
1494
1495 // Minor loop: transfer one word per request
1496 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1497
1498 // No final adjustments
1499 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1500 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1501
1502 // Major loop count = number of words
1503 let count = buf.len() as u16;
1504 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1505 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1506
1507 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1508 t.tcd_csr().write(|w| {
1509 let w = match enable_interrupt {
1510 EnableInterrupt::Yes => w.intmajor().enable(),
1511 EnableInterrupt::No => w.intmajor().disable(),
1512 };
1513 w.inthalf()
1514 .disable()
1515 .dreq()
1516 .erq_field_clear()
1517 .esg()
1518 .normal_format()
1519 .majorelink()
1520 .disable()
1521 .eeop()
1522 .disable()
1523 .esda()
1524 .disable()
1525 .bwc()
1526 .no_stall()
1527 });
1528
1529 // Ensure all TCD writes have completed before DMA engine reads them
1530 cortex_m::asm::dsb();
1531 }
1532
1533 /// Configure the integrated channel MUX to use the given typed
1534 /// DMA request source (e.g., [`Lpuart2TxRequest`] or [`Lpuart2RxRequest`]).
1535 ///
1536 /// This is the type-safe version that uses marker types to ensure
1537 /// compile-time verification of request source validity.
1538 ///
1539 /// # Safety
1540 ///
1541 /// The channel must be properly configured before enabling requests.
1542 /// The caller must ensure the DMA request source matches the peripheral
1543 /// that will drive this channel.
1544 ///
1545 /// # Note
1546 ///
1547 /// The NXP SDK requires a two-step write sequence: first clear
1548 /// the mux to 0, then set the actual source. This is a hardware
1549 /// requirement on eDMA4 for the mux to properly latch.
1550 ///
1551 /// # Example
1552 ///
1553 /// ```ignore
1554 /// use embassy_mcxa::dma::{DmaChannel, Lpuart2RxRequest};
1555 ///
1556 /// // Type-safe: compiler verifies this is a valid DMA request type
1557 /// unsafe {
1558 /// channel.set_request_source::<Lpuart2RxRequest>();
1559 /// }
1560 /// ```
1561 #[inline]
1562 pub unsafe fn set_request_source<R: DmaRequest>(&self) {
1563 // Two-step write per NXP SDK: clear to 0, then set actual source.
1564 self.tcd().ch_mux().write(|w| w.src().bits(0));
1565 cortex_m::asm::dsb(); // Ensure the clear completes before setting new source
1566 self.tcd().ch_mux().write(|w| w.src().bits(R::REQUEST_NUMBER));
1567 }
1568
1569 /// Enable hardware requests for this channel (ERQ=1).
1570 ///
1571 /// # Safety
1572 ///
1573 /// The channel must be properly configured before enabling requests.
1574 pub unsafe fn enable_request(&self) {
1575 let t = self.tcd();
1576 t.ch_csr().modify(|_, w| w.erq().enable());
1577 }
1578
1579 /// Disable hardware requests for this channel (ERQ=0).
1580 ///
1581 /// # Safety
1582 ///
1583 /// Disabling requests on an active transfer may leave the transfer incomplete.
1584 pub unsafe fn disable_request(&self) {
1585 let t = self.tcd();
1586 t.ch_csr().modify(|_, w| w.erq().disable());
1587 }
1588
1589 /// Return true if the channel's DONE flag is set.
1590 pub fn is_done(&self) -> bool {
1591 let t = self.tcd();
1592 t.ch_csr().read().done().bit_is_set()
1593 }
1594
1595 /// Clear the DONE flag for this channel.
1596 ///
1597 /// Uses modify to preserve other bits (especially ERQ) unlike write
1598 /// which would clear ERQ and halt an active transfer.
1599 ///
1600 /// # Safety
1601 ///
1602 /// Clearing DONE while a transfer is in progress may cause undefined behavior.
1603 pub unsafe fn clear_done(&self) {
1604 let t = self.tcd();
1605 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1606 }
1607
1608 /// Clear the channel interrupt flag (CH_INT.INT).
1609 ///
1610 /// # Safety
1611 ///
1612 /// Must be called from the correct interrupt context or with interrupts disabled.
1613 pub unsafe fn clear_interrupt(&self) {
1614 let t = self.tcd();
1615 t.ch_int().write(|w| w.int().clear_bit_by_one());
1616 }
1617
1618 /// Trigger a software start for this channel.
1619 ///
1620 /// # Safety
1621 ///
1622 /// The channel must be properly configured with a valid TCD before triggering.
1623 pub unsafe fn trigger_start(&self) {
1624 let t = self.tcd();
1625 t.tcd_csr().modify(|_, w| w.start().channel_started());
1626 }
1627
1628 /// Get the waker for this channel
1629 pub fn waker(&self) -> &'static AtomicWaker {
1630 &STATES[C::INDEX].waker
1631 }
1632
1633 /// Enable the interrupt for this channel in the NVIC.
1634 pub fn enable_interrupt(&self) {
1635 unsafe {
1636 cortex_m::peripheral::NVIC::unmask(C::INTERRUPT);
1637 }
1638 }
1639
1640 /// Enable Major Loop Linking.
1641 ///
1642 /// When the major loop completes, the hardware will trigger a service request
1643 /// on `link_ch`.
1644 ///
1645 /// # Arguments
1646 ///
1647 /// * `link_ch` - Target channel index (0-7) to link to
1648 ///
1649 /// # Safety
1650 ///
1651 /// The channel must be properly configured before setting up linking.
1652 pub unsafe fn set_major_link(&self, link_ch: usize) {
1653 let t = self.tcd();
1654 t.tcd_csr()
1655 .modify(|_, w| w.majorelink().enable().majorlinkch().bits(link_ch as u8));
1656 }
1657
1658 /// Disable Major Loop Linking.
1659 ///
1660 /// Removes any major loop channel linking previously configured.
1661 ///
1662 /// # Safety
1663 ///
1664 /// The caller must ensure this doesn't disrupt an active transfer that
1665 /// depends on the linking.
1666 pub unsafe fn clear_major_link(&self) {
1667 let t = self.tcd();
1668 t.tcd_csr().modify(|_, w| w.majorelink().disable());
1669 }
1670
1671 /// Enable Minor Loop Linking.
1672 ///
1673 /// After each minor loop, the hardware will trigger a service request
1674 /// on `link_ch`.
1675 ///
1676 /// # Arguments
1677 ///
1678 /// * `link_ch` - Target channel index (0-7) to link to
1679 ///
1680 /// # Note
1681 ///
1682 /// This rewrites CITER and BITER registers to the ELINKYES format.
1683 /// It preserves the current loop count.
1684 ///
1685 /// # Safety
1686 ///
1687 /// The channel must be properly configured before setting up linking.
1688 pub unsafe fn set_minor_link(&self, link_ch: usize) {
1689 let t = self.tcd();
1690
1691 // Read current CITER (assuming ELINKNO format initially)
1692 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1693 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1694
1695 // Write back using ELINKYES format
1696 t.tcd_citer_elinkyes().write(|w| {
1697 w.citer()
1698 .bits(current_citer)
1699 .elink()
1700 .enable()
1701 .linkch()
1702 .bits(link_ch as u8)
1703 });
1704
1705 t.tcd_biter_elinkyes().write(|w| {
1706 w.biter()
1707 .bits(current_biter)
1708 .elink()
1709 .enable()
1710 .linkch()
1711 .bits(link_ch as u8)
1712 });
1713 }
1714
1715 /// Disable Minor Loop Linking.
1716 ///
1717 /// Removes any minor loop channel linking previously configured.
1718 /// This rewrites CITER and BITER registers to the ELINKNO format,
1719 /// preserving the current loop count.
1720 ///
1721 /// # Safety
1722 ///
1723 /// The caller must ensure this doesn't disrupt an active transfer that
1724 /// depends on the linking.
1725 pub unsafe fn clear_minor_link(&self) {
1726 let t = self.tcd();
1727
1728 // Read current CITER (could be in either format, but we only need the count)
1729 // Note: In ELINKYES format, citer is 9 bits; in ELINKNO, it's 15 bits.
1730 // We read from ELINKNO which will give us the combined value.
1731 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1732 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1733
1734 // Write back using ELINKNO format (disabling link)
1735 t.tcd_citer_elinkno()
1736 .write(|w| w.citer().bits(current_citer).elink().disable());
1737
1738 t.tcd_biter_elinkno()
1739 .write(|w| w.biter().bits(current_biter).elink().disable());
1740 }
1741
1742 /// Load a TCD from memory into the hardware channel registers.
1743 ///
1744 /// This is useful for scatter/gather and ping-pong transfers where
1745 /// TCDs are prepared in RAM and then loaded into the hardware.
1746 ///
1747 /// # Safety
1748 ///
1749 /// - The TCD must be properly initialized.
1750 /// - The caller must ensure no concurrent access to the same channel.
1751 pub unsafe fn load_tcd(&self, tcd: &Tcd) {
1752 let t = self.tcd();
1753 t.tcd_saddr().write(|w| w.saddr().bits(tcd.saddr));
1754 t.tcd_soff().write(|w| w.soff().bits(tcd.soff as u16));
1755 t.tcd_attr().write(|w| w.bits(tcd.attr));
1756 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(tcd.nbytes));
1757 t.tcd_slast_sda().write(|w| w.slast_sda().bits(tcd.slast as u32));
1758 t.tcd_daddr().write(|w| w.daddr().bits(tcd.daddr));
1759 t.tcd_doff().write(|w| w.doff().bits(tcd.doff as u16));
1760 t.tcd_citer_elinkno().write(|w| w.citer().bits(tcd.citer));
1761 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(tcd.dlast_sga as u32));
1762 t.tcd_csr().write(|w| w.bits(tcd.csr));
1763 t.tcd_biter_elinkno().write(|w| w.biter().bits(tcd.biter));
1764 }
1765}
1766
1767/// In-memory representation of a Transfer Control Descriptor (TCD).
1768///
1769/// This matches the hardware layout (32 bytes).
1770#[repr(C, align(32))]
1771#[derive(Clone, Copy, Debug, Default)]
1772pub struct Tcd {
1773 pub saddr: u32,
1774 pub soff: i16,
1775 pub attr: u16,
1776 pub nbytes: u32,
1777 pub slast: i32,
1778 pub daddr: u32,
1779 pub doff: i16,
1780 pub citer: u16,
1781 pub dlast_sga: i32,
1782 pub csr: u16,
1783 pub biter: u16,
1784}
1785
1786struct State {
1787 /// Waker for transfer complete interrupt
1788 waker: AtomicWaker,
1789 /// Waker for half-transfer interrupt
1790 half_waker: AtomicWaker,
1791}
1792
1793impl State {
1794 const fn new() -> Self {
1795 Self {
1796 waker: AtomicWaker::new(),
1797 half_waker: AtomicWaker::new(),
1798 }
1799 }
1800}
1801
1802static STATES: [State; 8] = [
1803 State::new(),
1804 State::new(),
1805 State::new(),
1806 State::new(),
1807 State::new(),
1808 State::new(),
1809 State::new(),
1810 State::new(),
1811];
1812
1813pub(crate) fn waker(idx: usize) -> &'static AtomicWaker {
1814 &STATES[idx].waker
1815}
1816
1817pub(crate) fn half_waker(idx: usize) -> &'static AtomicWaker {
1818 &STATES[idx].half_waker
1819}
1820
1821// ============================================================================
1822// Async Transfer Future
1823// ============================================================================
1824
1825/// An in-progress DMA transfer.
1826///
1827/// This type implements `Future` and can be `.await`ed to wait for the
1828/// transfer to complete. Dropping the transfer will abort it.
1829#[must_use = "futures do nothing unless you `.await` or poll them"]
1830pub struct Transfer<'a> {
1831 channel: AnyChannel,
1832 _phantom: core::marker::PhantomData<&'a ()>,
1833}
1834
1835impl<'a> Transfer<'a> {
1836 /// Create a new transfer for the given channel.
1837 ///
1838 /// The caller must have already configured and started the DMA channel.
1839 pub(crate) fn new(channel: AnyChannel) -> Self {
1840 Self {
1841 channel,
1842 _phantom: core::marker::PhantomData,
1843 }
1844 }
1845
1846 /// Check if the transfer is still running.
1847 pub fn is_running(&self) -> bool {
1848 !self.channel.is_done()
1849 }
1850
1851 /// Get the remaining transfer count.
1852 pub fn remaining(&self) -> u16 {
1853 let t = self.channel.tcd();
1854 t.tcd_citer_elinkno().read().citer().bits()
1855 }
1856
1857 /// Block until the transfer completes.
1858 pub fn blocking_wait(self) {
1859 while self.is_running() {
1860 core::hint::spin_loop();
1861 }
1862
1863 // Ensure all DMA writes are visible
1864 fence(Ordering::SeqCst);
1865
1866 // Don't run drop (which would abort)
1867 core::mem::forget(self);
1868 }
1869
1870 /// Wait for the half-transfer interrupt asynchronously.
1871 ///
1872 /// This is useful for double-buffering scenarios where you want to process
1873 /// the first half of the buffer while the second half is being filled.
1874 ///
1875 /// Returns `true` if the half-transfer occurred, `false` if the transfer
1876 /// completed before the half-transfer interrupt.
1877 ///
1878 /// # Note
1879 ///
1880 /// The transfer must be configured with `TransferOptions::half_transfer_interrupt = true`
1881 /// for this method to work correctly.
1882 pub async fn wait_half(&mut self) -> bool {
1883 use core::future::poll_fn;
1884
1885 poll_fn(|cx| {
1886 let state = &STATES[self.channel.index];
1887
1888 // Register the half-transfer waker
1889 state.half_waker.register(cx.waker());
1890
1891 // Check if we're past the half-way point
1892 let t = self.channel.tcd();
1893 let biter = t.tcd_biter_elinkno().read().biter().bits();
1894 let citer = t.tcd_citer_elinkno().read().citer().bits();
1895 let half_point = biter / 2;
1896
1897 if self.channel.is_done() {
1898 // Transfer completed before half-transfer
1899 Poll::Ready(false)
1900 } else if citer <= half_point {
1901 // We're past the half-way point
1902 fence(Ordering::SeqCst);
1903 Poll::Ready(true)
1904 } else {
1905 Poll::Pending
1906 }
1907 })
1908 .await
1909 }
1910
1911 /// Abort the transfer.
1912 fn abort(&mut self) {
1913 let t = self.channel.tcd();
1914
1915 // Disable channel requests
1916 t.ch_csr().modify(|_, w| w.erq().disable());
1917
1918 // Clear any pending interrupt
1919 t.ch_int().write(|w| w.int().clear_bit_by_one());
1920
1921 // Clear DONE flag
1922 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1923
1924 fence(Ordering::SeqCst);
1925 }
1926}
1927
1928impl<'a> Unpin for Transfer<'a> {}
1929
1930impl<'a> Future for Transfer<'a> {
1931 type Output = ();
1932
1933 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1934 let state = &STATES[self.channel.index];
1935
1936 // Register waker first
1937 state.waker.register(cx.waker());
1938
1939 let done = self.channel.is_done();
1940
1941 if done {
1942 // Ensure all DMA writes are visible before returning
1943 fence(Ordering::SeqCst);
1944 Poll::Ready(())
1945 } else {
1946 Poll::Pending
1947 }
1948 }
1949}
1950
1951impl<'a> Drop for Transfer<'a> {
1952 fn drop(&mut self) {
1953 // Only abort if the transfer is still running
1954 // If already complete, no need to abort
1955 if self.is_running() {
1956 self.abort();
1957
1958 // Wait for abort to complete
1959 while self.is_running() {
1960 core::hint::spin_loop();
1961 }
1962 }
1963
1964 fence(Ordering::SeqCst);
1965 }
1966}
1967
1968// ============================================================================
1969// Ring Buffer for Circular DMA
1970// ============================================================================
1971
1972/// A ring buffer for continuous DMA reception.
1973///
1974/// This structure manages a circular DMA transfer, allowing continuous
1975/// reception of data without losing bytes between reads. It uses both
1976/// half-transfer and complete-transfer interrupts to track available data.
1977///
1978/// # Example
1979///
1980/// ```no_run
1981/// use embassy_mcxa::dma::{DmaChannel, RingBuffer, TransferOptions};
1982///
1983/// static mut RX_BUF: [u8; 64] = [0; 64];
1984///
1985/// let dma_ch = DmaChannel::new(p.DMA_CH0);
1986/// let ring_buf = unsafe {
1987/// dma_ch.setup_circular_read(
1988/// uart_rx_addr,
1989/// &mut RX_BUF,
1990/// )
1991/// };
1992///
1993/// // Read data as it arrives
1994/// let mut buf = [0u8; 16];
1995/// let n = ring_buf.read(&mut buf).await?;
1996/// ```
1997pub struct RingBuffer<'a, W: Word> {
1998 channel: AnyChannel,
1999 /// Buffer pointer. We use NonNull instead of &mut because DMA acts like
2000 /// a separate thread writing to this buffer, and &mut claims exclusive
2001 /// access which the compiler could optimize incorrectly.
2002 buf: NonNull<[W]>,
2003 /// Buffer length cached for convenience
2004 buf_len: usize,
2005 /// Read position in the buffer (consumer side)
2006 read_pos: AtomicUsize,
2007 /// Phantom data to tie the lifetime to the original buffer
2008 _lt: PhantomData<&'a mut [W]>,
2009}
2010
2011impl<'a, W: Word> RingBuffer<'a, W> {
2012 /// Create a new ring buffer for the given channel and buffer.
2013 ///
2014 /// # Safety
2015 ///
2016 /// The caller must ensure:
2017 /// - The DMA channel has been configured for circular transfer
2018 /// - The buffer remains valid for the lifetime of the ring buffer
2019 /// - Only one RingBuffer exists per DMA channel at a time
2020 pub(crate) unsafe fn new(channel: AnyChannel, buf: &'a mut [W]) -> Self {
2021 let buf_len = buf.len();
2022 Self {
2023 channel,
2024 buf: NonNull::from(buf),
2025 buf_len,
2026 read_pos: AtomicUsize::new(0),
2027 _lt: PhantomData,
2028 }
2029 }
2030
2031 /// Get a slice reference to the buffer.
2032 ///
2033 /// # Safety
2034 ///
2035 /// The caller must ensure that DMA is not actively writing to the
2036 /// portion of the buffer being accessed, or that the access is
2037 /// appropriately synchronized.
2038 #[inline]
2039 unsafe fn buf_slice(&self) -> &[W] {
2040 self.buf.as_ref()
2041 }
2042
2043 /// Get the current DMA write position in the buffer.
2044 ///
2045 /// This reads the current destination address from the DMA controller
2046 /// and calculates the buffer offset.
2047 fn dma_write_pos(&self) -> usize {
2048 let t = self.channel.tcd();
2049 let daddr = t.tcd_daddr().read().daddr().bits() as usize;
2050 let buf_start = self.buf.as_ptr() as *const W as usize;
2051
2052 // Calculate offset from buffer start
2053 let offset = daddr.wrapping_sub(buf_start) / core::mem::size_of::<W>();
2054
2055 // Ensure we're within bounds (DMA wraps around)
2056 offset % self.buf_len
2057 }
2058
2059 /// Returns the number of bytes available to read.
2060 pub fn available(&self) -> usize {
2061 let write_pos = self.dma_write_pos();
2062 let read_pos = self.read_pos.load(Ordering::Acquire);
2063
2064 if write_pos >= read_pos {
2065 write_pos - read_pos
2066 } else {
2067 self.buf_len - read_pos + write_pos
2068 }
2069 }
2070
2071 /// Check if the buffer has overrun (data was lost).
2072 ///
2073 /// This happens when DMA writes faster than the application reads.
2074 pub fn is_overrun(&self) -> bool {
2075 // In a true overrun, the DMA would have wrapped around and caught up
2076 // to our read position. We can detect this by checking if available()
2077 // equals the full buffer size (minus 1 to distinguish from empty).
2078 self.available() >= self.buf_len - 1
2079 }
2080
2081 /// Read data from the ring buffer into the provided slice.
2082 ///
2083 /// Returns the number of elements read, which may be less than
2084 /// `dst.len()` if not enough data is available.
2085 ///
2086 /// This method does not block; use `read_async()` for async waiting.
2087 pub fn read_immediate(&self, dst: &mut [W]) -> usize {
2088 let write_pos = self.dma_write_pos();
2089 let read_pos = self.read_pos.load(Ordering::Acquire);
2090
2091 // Calculate available bytes
2092 let available = if write_pos >= read_pos {
2093 write_pos - read_pos
2094 } else {
2095 self.buf_len - read_pos + write_pos
2096 };
2097
2098 let to_read = dst.len().min(available);
2099 if to_read == 0 {
2100 return 0;
2101 }
2102
2103 // Safety: We only read from portions of the buffer that DMA has
2104 // already written to (between read_pos and write_pos).
2105 let buf = unsafe { self.buf_slice() };
2106
2107 // Read data, handling wrap-around
2108 let first_chunk = (self.buf_len - read_pos).min(to_read);
2109 dst[..first_chunk].copy_from_slice(&buf[read_pos..read_pos + first_chunk]);
2110
2111 if to_read > first_chunk {
2112 let second_chunk = to_read - first_chunk;
2113 dst[first_chunk..to_read].copy_from_slice(&buf[..second_chunk]);
2114 }
2115
2116 // Update read position
2117 let new_read_pos = (read_pos + to_read) % self.buf_len;
2118 self.read_pos.store(new_read_pos, Ordering::Release);
2119
2120 to_read
2121 }
2122
2123 /// Read data from the ring buffer asynchronously.
2124 ///
2125 /// This waits until at least one byte is available, then reads as much
2126 /// as possible into the destination buffer.
2127 ///
2128 /// Returns the number of elements read.
2129 pub async fn read(&self, dst: &mut [W]) -> Result<usize, Error> {
2130 use core::future::poll_fn;
2131
2132 if dst.is_empty() {
2133 return Ok(0);
2134 }
2135
2136 poll_fn(|cx| {
2137 // Check for overrun
2138 if self.is_overrun() {
2139 return Poll::Ready(Err(Error::Overrun));
2140 }
2141
2142 // Try to read immediately
2143 let n = self.read_immediate(dst);
2144 if n > 0 {
2145 return Poll::Ready(Ok(n));
2146 }
2147
2148 // Register wakers for both half and complete interrupts
2149 let state = &STATES[self.channel.index()];
2150 state.waker.register(cx.waker());
2151 state.half_waker.register(cx.waker());
2152
2153 // Check again after registering waker (avoid race)
2154 let n = self.read_immediate(dst);
2155 if n > 0 {
2156 return Poll::Ready(Ok(n));
2157 }
2158
2159 Poll::Pending
2160 })
2161 .await
2162 }
2163
2164 /// Clear the ring buffer, discarding all unread data.
2165 pub fn clear(&self) {
2166 let write_pos = self.dma_write_pos();
2167 self.read_pos.store(write_pos, Ordering::Release);
2168 }
2169
2170 /// Stop the DMA transfer and consume the ring buffer.
2171 ///
2172 /// Returns any remaining unread data count.
2173 pub fn stop(self) -> usize {
2174 let available = self.available();
2175
2176 // Disable the channel
2177 let t = self.channel.tcd();
2178 t.ch_csr().modify(|_, w| w.erq().disable());
2179
2180 // Clear flags
2181 t.ch_int().write(|w| w.int().clear_bit_by_one());
2182 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
2183
2184 fence(Ordering::SeqCst);
2185
2186 available
2187 }
2188}
2189
2190impl<C: Channel> DmaChannel<C> {
2191 /// Set up a circular DMA transfer for continuous peripheral-to-memory reception.
2192 ///
2193 /// This configures the DMA channel for circular operation with both half-transfer
2194 /// and complete-transfer interrupts enabled. The transfer runs continuously until
2195 /// stopped via [`RingBuffer::stop()`].
2196 ///
2197 /// # Arguments
2198 ///
2199 /// * `peri_addr` - Peripheral register address to read from
2200 /// * `buf` - Destination buffer (should be power-of-2 size for best efficiency)
2201 ///
2202 /// # Returns
2203 ///
2204 /// A [`RingBuffer`] that can be used to read received data.
2205 ///
2206 /// # Safety
2207 ///
2208 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
2209 /// - The peripheral address must be valid for reads.
2210 /// - The peripheral's DMA request must be configured to trigger this channel.
2211 pub unsafe fn setup_circular_read<'a, W: Word>(&self, peri_addr: *const W, buf: &'a mut [W]) -> RingBuffer<'a, W> {
2212 assert!(!buf.is_empty());
2213 assert!(buf.len() <= 0x7fff);
2214 // For circular mode, buffer size should ideally be power of 2
2215 // but we don't enforce it
2216
2217 let size = W::size();
2218 let byte_size = size.bytes();
2219
2220 let t = self.tcd();
2221
2222 // Reset channel state
2223 t.ch_csr().write(|w| {
2224 w.erq()
2225 .disable()
2226 .earq()
2227 .disable()
2228 .eei()
2229 .no_error()
2230 .ebw()
2231 .disable()
2232 .done()
2233 .clear_bit_by_one()
2234 });
2235 t.ch_es().write(|w| w.bits(0));
2236 t.ch_int().write(|w| w.int().clear_bit_by_one());
2237
2238 // Source: peripheral register, fixed
2239 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
2240 t.tcd_soff().write(|w| w.soff().bits(0)); // No increment
2241
2242 // Destination: memory buffer, incrementing
2243 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
2244 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
2245
2246 // Transfer attributes
2247 let hw_size = size.to_hw_size();
2248 t.tcd_attr().write(|w| {
2249 w.ssize()
2250 .bits(hw_size)
2251 .dsize()
2252 .bits(hw_size)
2253 .smod()
2254 .disable()
2255 .dmod()
2256 .bits(0)
2257 });
2258
2259 // Minor loop: transfer one word per request
2260 t.tcd_nbytes_mloffno().write(|w| {
2261 w.nbytes()
2262 .bits(byte_size as u32)
2263 .dmloe()
2264 .offset_not_applied()
2265 .smloe()
2266 .offset_not_applied()
2267 });
2268
2269 // Major loop count = buffer size
2270 let count = buf.len() as u16;
2271 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
2272 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
2273
2274 // After major loop: reset destination to buffer start (circular)
2275 let buf_bytes = (buf.len() * byte_size) as i32;
2276 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); // Source doesn't change
2277 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits((-buf_bytes) as u32));
2278
2279 // Control/status: enable both half and complete interrupts, NO DREQ (continuous)
2280 t.tcd_csr().write(|w| {
2281 w.intmajor()
2282 .enable()
2283 .inthalf()
2284 .enable()
2285 .dreq()
2286 .channel_not_affected() // Don't clear ERQ on complete (circular)
2287 .esg()
2288 .normal_format()
2289 .majorelink()
2290 .disable()
2291 .eeop()
2292 .disable()
2293 .esda()
2294 .disable()
2295 .bwc()
2296 .no_stall()
2297 });
2298
2299 cortex_m::asm::dsb();
2300
2301 // Enable the channel request
2302 t.ch_csr().modify(|_, w| w.erq().enable());
2303
2304 // Enable NVIC interrupt for this channel so async wakeups work
2305 self.enable_interrupt();
2306
2307 RingBuffer::new(self.as_any(), buf)
2308 }
2309}
2310
2311// ============================================================================
2312// Scatter-Gather Builder
2313// ============================================================================
2314
2315/// Maximum number of TCDs in a scatter-gather chain.
2316pub const MAX_SCATTER_GATHER_TCDS: usize = 16;
2317
2318/// A builder for constructing scatter-gather DMA transfer chains.
2319///
2320/// This provides a type-safe way to build TCD chains for scatter-gather
2321/// transfers without manual TCD manipulation.
2322///
2323/// # Example
2324///
2325/// ```no_run
2326/// use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
2327///
2328/// let mut builder = ScatterGatherBuilder::<u32>::new();
2329///
2330/// // Add transfer segments
2331/// builder.add_transfer(&src1, &mut dst1);
2332/// builder.add_transfer(&src2, &mut dst2);
2333/// builder.add_transfer(&src3, &mut dst3);
2334///
2335/// // Build and execute
2336/// let transfer = unsafe { builder.build(&dma_ch).unwrap() };
2337/// transfer.await;
2338/// ```
2339pub struct ScatterGatherBuilder<W: Word> {
2340 /// TCD pool (must be 32-byte aligned)
2341 tcds: [Tcd; MAX_SCATTER_GATHER_TCDS],
2342 /// Number of TCDs configured
2343 count: usize,
2344 /// Phantom marker for word type
2345 _phantom: core::marker::PhantomData<W>,
2346}
2347
2348impl<W: Word> ScatterGatherBuilder<W> {
2349 /// Create a new scatter-gather builder.
2350 pub fn new() -> Self {
2351 Self {
2352 tcds: [Tcd::default(); MAX_SCATTER_GATHER_TCDS],
2353 count: 0,
2354 _phantom: core::marker::PhantomData,
2355 }
2356 }
2357
2358 /// Add a memory-to-memory transfer segment to the chain.
2359 ///
2360 /// # Arguments
2361 ///
2362 /// * `src` - Source buffer for this segment
2363 /// * `dst` - Destination buffer for this segment
2364 ///
2365 /// # Panics
2366 ///
2367 /// Panics if the maximum number of segments (16) is exceeded.
2368 pub fn add_transfer(&mut self, src: &[W], dst: &mut [W]) -> &mut Self {
2369 assert!(self.count < MAX_SCATTER_GATHER_TCDS, "Too many scatter-gather segments");
2370 assert!(!src.is_empty());
2371 assert!(dst.len() >= src.len());
2372
2373 let size = W::size();
2374 let byte_size = size.bytes();
2375 let hw_size = size.to_hw_size();
2376 let nbytes = (src.len() * byte_size) as u32;
2377
2378 // Build the TCD for this segment
2379 self.tcds[self.count] = Tcd {
2380 saddr: src.as_ptr() as u32,
2381 soff: byte_size as i16,
2382 attr: ((hw_size as u16) << 8) | (hw_size as u16), // SSIZE | DSIZE
2383 nbytes,
2384 slast: 0,
2385 daddr: dst.as_mut_ptr() as u32,
2386 doff: byte_size as i16,
2387 citer: 1,
2388 dlast_sga: 0, // Will be filled in by build()
2389 csr: 0x0002, // INTMAJOR only (ESG will be set for non-last TCDs)
2390 biter: 1,
2391 };
2392
2393 self.count += 1;
2394 self
2395 }
2396
2397 /// Get the number of transfer segments added.
2398 pub fn segment_count(&self) -> usize {
2399 self.count
2400 }
2401
2402 /// Build the scatter-gather chain and start the transfer.
2403 ///
2404 /// # Arguments
2405 ///
2406 /// * `channel` - The DMA channel to use for the transfer
2407 ///
2408 /// # Returns
2409 ///
2410 /// A `Transfer` future that completes when the entire chain has executed.
2411 ///
2412 /// # Safety
2413 ///
2414 /// All source and destination buffers passed to `add_transfer()` must
2415 /// remain valid for the duration of the transfer.
2416 pub unsafe fn build<C: Channel>(&mut self, channel: &DmaChannel<C>) -> Result<Transfer<'_>, Error> {
2417 if self.count == 0 {
2418 return Err(Error::Configuration);
2419 }
2420
2421 // Link TCDs together
2422 //
2423 // CSR bit definitions:
2424 // - START = bit 0 = 0x0001 (triggers transfer when set)
2425 // - INTMAJOR = bit 1 = 0x0002 (interrupt on major loop complete)
2426 // - ESG = bit 4 = 0x0010 (enable scatter-gather, loads next TCD on complete)
2427 //
2428 // When hardware loads a TCD via scatter-gather (ESG), it copies the TCD's
2429 // CSR directly into the hardware register. If START is not set in that CSR,
2430 // the hardware will NOT auto-execute the loaded TCD.
2431 //
2432 // Strategy:
2433 // - First TCD: ESG | INTMAJOR (no START - we add it manually after loading)
2434 // - Middle TCDs: ESG | INTMAJOR | START (auto-execute when loaded via S/G)
2435 // - Last TCD: INTMAJOR | START (auto-execute, no further linking)
2436 for i in 0..self.count {
2437 let is_first = i == 0;
2438 let is_last = i == self.count - 1;
2439
2440 if is_first {
2441 if is_last {
2442 // Only one TCD - no ESG, no START (we add START manually)
2443 self.tcds[i].dlast_sga = 0;
2444 self.tcds[i].csr = 0x0002; // INTMAJOR only
2445 } else {
2446 // First of multiple - ESG to link, no START (we add START manually)
2447 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2448 self.tcds[i].csr = 0x0012; // ESG | INTMAJOR
2449 }
2450 } else if is_last {
2451 // Last TCD (not first) - no ESG, but START so it auto-executes
2452 self.tcds[i].dlast_sga = 0;
2453 self.tcds[i].csr = 0x0003; // INTMAJOR | START
2454 } else {
2455 // Middle TCD - ESG to link, and START so it auto-executes
2456 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2457 self.tcds[i].csr = 0x0013; // ESG | INTMAJOR | START
2458 }
2459 }
2460
2461 let t = channel.tcd();
2462
2463 // Reset channel state - clear DONE, disable requests, clear errors
2464 // This ensures the channel is in a clean state before loading the TCD
2465 t.ch_csr().write(|w| {
2466 w.erq()
2467 .disable()
2468 .earq()
2469 .disable()
2470 .eei()
2471 .no_error()
2472 .done()
2473 .clear_bit_by_one()
2474 });
2475 t.ch_es().write(|w| w.err().clear_bit_by_one());
2476 t.ch_int().write(|w| w.int().clear_bit_by_one());
2477
2478 // Memory barrier to ensure channel state is reset before loading TCD
2479 cortex_m::asm::dsb();
2480
2481 // Load first TCD into hardware
2482 channel.load_tcd(&self.tcds[0]);
2483
2484 // Memory barrier before setting START
2485 cortex_m::asm::dsb();
2486
2487 // Start the transfer
2488 t.tcd_csr().modify(|_, w| w.start().channel_started());
2489
2490 Ok(Transfer::new(channel.as_any()))
2491 }
2492
2493 /// Reset the builder for reuse.
2494 pub fn clear(&mut self) {
2495 self.count = 0;
2496 }
2497}
2498
2499impl<W: Word> Default for ScatterGatherBuilder<W> {
2500 fn default() -> Self {
2501 Self::new()
2502 }
2503}
2504
2505/// A completed scatter-gather transfer result.
2506///
2507/// This type is returned after a scatter-gather transfer completes,
2508/// providing access to any error information.
2509#[derive(Debug, Clone, Copy, PartialEq, Eq)]
2510pub struct ScatterGatherResult {
2511 /// Number of segments successfully transferred
2512 pub segments_completed: usize,
2513 /// Error if any occurred
2514 pub error: Option<Error>,
2515}
2516
2517// ============================================================================
2518// Interrupt Handler
2519// ============================================================================
2520
2521/// Interrupt handler helper.
2522///
2523/// Call this from your interrupt handler to clear the interrupt flag and wake the waker.
2524/// This handles both half-transfer and complete-transfer interrupts.
2525///
2526/// # Safety
2527/// Must be called from the correct DMA channel interrupt context.
2528pub unsafe fn on_interrupt(ch_index: usize) {
2529 let p = pac::Peripherals::steal();
2530 let edma = &p.edma_0_tcd0;
2531 let t = edma.tcd(ch_index);
2532
2533 // Read TCD CSR to determine interrupt source
2534 let csr = t.tcd_csr().read();
2535
2536 // Check if this is a half-transfer interrupt
2537 // INTHALF is set and we're at or past the half-way point
2538 if csr.inthalf().bit_is_set() {
2539 let biter = t.tcd_biter_elinkno().read().biter().bits();
2540 let citer = t.tcd_citer_elinkno().read().citer().bits();
2541 let half_point = biter / 2;
2542
2543 if citer <= half_point && citer > 0 {
2544 // Half-transfer interrupt - wake half_waker
2545 half_waker(ch_index).wake();
2546 }
2547 }
2548
2549 // Clear INT flag
2550 t.ch_int().write(|w| w.int().clear_bit_by_one());
2551
2552 // If DONE is set, this is a complete-transfer interrupt
2553 // Only wake the full-transfer waker when the transfer is actually complete
2554 if t.ch_csr().read().done().bit_is_set() {
2555 waker(ch_index).wake();
2556 }
2557}
2558
2559// ============================================================================
2560// Type-level Interrupt Handlers for bind_interrupts! macro
2561// ============================================================================
2562
2563/// Macro to generate DMA channel interrupt handlers.
2564///
2565/// This generates handler structs that implement the `Handler` trait for use
2566/// with the `bind_interrupts!` macro.
2567macro_rules! impl_dma_interrupt_handler {
2568 ($name:ident, $irq:ident, $ch:expr) => {
2569 /// Interrupt handler for DMA channel.
2570 ///
2571 /// Use this with the `bind_interrupts!` macro:
2572 /// ```ignore
2573 /// bind_interrupts!(struct Irqs {
2574 #[doc = concat!(" ", stringify!($irq), " => dma::", stringify!($name), ";")]
2575 /// });
2576 /// ```
2577 pub struct $name;
2578
2579 impl crate::interrupt::typelevel::Handler<crate::interrupt::typelevel::$irq> for $name {
2580 unsafe fn on_interrupt() {
2581 on_interrupt($ch);
2582 }
2583 }
2584 };
2585}
2586
2587impl_dma_interrupt_handler!(DmaCh0InterruptHandler, DMA_CH0, 0);
2588impl_dma_interrupt_handler!(DmaCh1InterruptHandler, DMA_CH1, 1);
2589impl_dma_interrupt_handler!(DmaCh2InterruptHandler, DMA_CH2, 2);
2590impl_dma_interrupt_handler!(DmaCh3InterruptHandler, DMA_CH3, 3);
2591impl_dma_interrupt_handler!(DmaCh4InterruptHandler, DMA_CH4, 4);
2592impl_dma_interrupt_handler!(DmaCh5InterruptHandler, DMA_CH5, 5);
2593impl_dma_interrupt_handler!(DmaCh6InterruptHandler, DMA_CH6, 6);
2594impl_dma_interrupt_handler!(DmaCh7InterruptHandler, DMA_CH7, 7);