aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--embassy-mcxa/src/clocks/mod.rs7
-rw-r--r--embassy-mcxa/src/dma.rs2594
-rw-r--r--embassy-mcxa/src/interrupt.rs8
-rw-r--r--embassy-mcxa/src/lib.rs12
-rw-r--r--embassy-mcxa/src/lpuart/mod.rs525
-rw-r--r--embassy-mcxa/src/pins.rs5
-rw-r--r--examples/mcxa/src/bin/dma_channel_link.rs372
-rw-r--r--examples/mcxa/src/bin/dma_interleave_transfer.rs215
-rw-r--r--examples/mcxa/src/bin/dma_mem_to_mem.rs229
-rw-r--r--examples/mcxa/src/bin/dma_memset.rs218
-rw-r--r--examples/mcxa/src/bin/dma_ping_pong_transfer.rs376
-rw-r--r--examples/mcxa/src/bin/dma_scatter_gather.rs262
-rw-r--r--examples/mcxa/src/bin/dma_scatter_gather_builder.rs231
-rw-r--r--examples/mcxa/src/bin/dma_wrap_transfer.rs222
-rw-r--r--examples/mcxa/src/bin/lpuart_dma.rs81
-rw-r--r--examples/mcxa/src/bin/lpuart_ring_buffer.rs130
16 files changed, 5445 insertions, 42 deletions
diff --git a/embassy-mcxa/src/clocks/mod.rs b/embassy-mcxa/src/clocks/mod.rs
index 9c9e6ef3d..ac30115f6 100644
--- a/embassy-mcxa/src/clocks/mod.rs
+++ b/embassy-mcxa/src/clocks/mod.rs
@@ -399,6 +399,10 @@ pub unsafe fn assert_reset<G: Gate>() {
399} 399}
400 400
401/// Check whether the peripheral is held in reset. 401/// Check whether the peripheral is held in reset.
402///
403/// # Safety
404///
405/// Must be called with a valid peripheral gate type.
402#[inline] 406#[inline]
403pub unsafe fn is_reset_released<G: Gate>() -> bool { 407pub unsafe fn is_reset_released<G: Gate>() -> bool {
404 G::is_reset_released() 408 G::is_reset_released()
@@ -940,4 +944,7 @@ pub(crate) mod gate {
940 impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig); 944 impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig);
941 impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig); 945 impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig);
942 impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig); 946 impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig);
947
948 // DMA0 peripheral - uses NoConfig since it has no selectable clock source
949 impl_cc_gate!(DMA0, mrcc_glb_cc0, mrcc_glb_rst0, dma0, NoConfig);
943} 950}
diff --git a/embassy-mcxa/src/dma.rs b/embassy-mcxa/src/dma.rs
new file mode 100644
index 000000000..7d1588516
--- /dev/null
+++ b/embassy-mcxa/src/dma.rs
@@ -0,0 +1,2594 @@
1//! DMA driver for MCXA276.
2//!
3//! This module provides a typed channel abstraction over the EDMA_0_TCD0 array
4//! and helpers for configuring the channel MUX. The driver supports both
5//! low-level TCD configuration and higher-level async transfer APIs.
6//!
7//! # Architecture
8//!
9//! The MCXA276 has 8 DMA channels (0-7), each with its own interrupt vector.
10//! Each channel has a Transfer Control Descriptor (TCD) that defines the
11//! transfer parameters.
12//!
13//! # Choosing the Right API
14//!
15//! This module provides several API levels to match different use cases:
16//!
17//! ## High-Level Async API (Recommended for Most Users)
18//!
19//! Use the async methods when you want simple, safe DMA transfers:
20//!
21//! | Method | Description |
22//! |--------|-------------|
23//! | [`DmaChannel::mem_to_mem()`] | Memory-to-memory copy |
24//! | [`DmaChannel::memset()`] | Fill memory with a pattern |
25//! | [`DmaChannel::write()`] | Memory-to-peripheral (TX) |
26//! | [`DmaChannel::read()`] | Peripheral-to-memory (RX) |
27//!
28//! These return a [`Transfer`] future that can be `.await`ed:
29//!
30//! ```no_run
31//! # use embassy_mcxa::dma::{DmaChannel, TransferOptions};
32//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
33//! # let src = [0u32; 4];
34//! # let mut dst = [0u32; 4];
35//! // Simple memory-to-memory transfer
36//! unsafe {
37//! dma_ch.mem_to_mem(&src, &mut dst, TransferOptions::default()).await;
38//! }
39//! ```
40//!
41//! ## Setup Methods (For Peripheral Drivers)
42//!
43//! Use setup methods when you need manual lifecycle control:
44//!
45//! | Method | Description |
46//! |--------|-------------|
47//! | [`DmaChannel::setup_write()`] | Configure TX without starting |
48//! | [`DmaChannel::setup_read()`] | Configure RX without starting |
49//!
50//! These configure the TCD but don't start the transfer. You control:
51//! 1. When to call [`DmaChannel::enable_request()`]
52//! 2. How to detect completion (polling or interrupts)
53//! 3. When to clean up with [`DmaChannel::clear_done()`]
54//!
55//! ## Circular/Ring Buffer API (For Continuous Reception)
56//!
57//! Use [`DmaChannel::setup_circular_read()`] for continuous data reception:
58//!
59//! ```no_run
60//! # use embassy_mcxa::dma::DmaChannel;
61//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
62//! # let uart_rx_addr = 0x4000_0000 as *const u8;
63//! static mut RX_BUF: [u8; 64] = [0; 64];
64//!
65//! let ring_buf = unsafe {
66//! dma_ch.setup_circular_read(uart_rx_addr, &mut RX_BUF)
67//! };
68//!
69//! // Read data as it arrives
70//! let mut buf = [0u8; 16];
71//! let n = ring_buf.read(&mut buf).await.unwrap();
72//! ```
73//!
74//! ## Scatter-Gather Builder (For Chained Transfers)
75//!
76//! Use [`ScatterGatherBuilder`] for complex multi-segment transfers:
77//!
78//! ```no_run
79//! # use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
80//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
81//! let mut builder = ScatterGatherBuilder::<u32>::new();
82//! builder.add_transfer(&src1, &mut dst1);
83//! builder.add_transfer(&src2, &mut dst2);
84//!
85//! let transfer = unsafe { builder.build(&dma_ch).unwrap() };
86//! transfer.await;
87//! ```
88//!
89//! ## Direct TCD Access (For Advanced Use Cases)
90//!
91//! For full control, use the channel's `tcd()` method to access TCD registers directly.
92//! See the `dma_*` examples for patterns.
93//!
94//! # Example
95//!
96//! ```no_run
97//! use embassy_mcxa::dma::{DmaChannel, TransferOptions, Direction};
98//!
99//! let dma_ch = DmaChannel::new(p.DMA_CH0);
100//! // Configure and trigger a transfer...
101//! ```
102
103use core::future::Future;
104use core::marker::PhantomData;
105use core::pin::Pin;
106use core::ptr::NonNull;
107use core::sync::atomic::{fence, AtomicBool, AtomicUsize, Ordering};
108use core::task::{Context, Poll};
109
110use embassy_hal_internal::PeripheralType;
111use embassy_sync::waitqueue::AtomicWaker;
112
113use crate::clocks::Gate;
114use crate::pac;
115use crate::pac::Interrupt;
116use crate::peripherals::DMA0;
117
118/// Static flag to track whether DMA has been initialized.
119static DMA_INITIALIZED: AtomicBool = AtomicBool::new(false);
120
121/// Initialize DMA controller (clock enabled, reset released, controller configured).
122///
123/// This function is intended to be called during HAL initialization (`hal::init()`).
124/// It is idempotent - it will only initialize DMA once, even if called multiple times.
125///
126/// The function enables the DMA0 clock, releases reset, and configures the controller
127/// for normal operation with round-robin arbitration.
128pub fn init() {
129 // Fast path: already initialized
130 if DMA_INITIALIZED.load(Ordering::Acquire) {
131 return;
132 }
133
134 // Slow path: initialize DMA
135 // Use compare_exchange to ensure only one caller initializes
136 if DMA_INITIALIZED
137 .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
138 .is_ok()
139 {
140 // We won the race - initialize DMA
141 unsafe {
142 // Enable DMA0 clock and release reset
143 DMA0::enable_clock();
144 DMA0::release_reset();
145
146 // Configure DMA controller
147 let dma = &(*pac::Dma0::ptr());
148 dma.mp_csr().modify(|_, w| {
149 w.edbg()
150 .enable()
151 .erca()
152 .enable()
153 .halt()
154 .normal_operation()
155 .gclc()
156 .available()
157 .gmrc()
158 .available()
159 });
160 }
161 }
162}
163
164// ============================================================================
165// Phase 1: Foundation Types (Embassy-aligned)
166// ============================================================================
167
168/// DMA transfer direction.
169#[derive(Debug, Copy, Clone, PartialEq, Eq)]
170#[cfg_attr(feature = "defmt", derive(defmt::Format))]
171pub enum Direction {
172 /// Transfer from memory to memory.
173 MemoryToMemory,
174 /// Transfer from memory to a peripheral register.
175 MemoryToPeripheral,
176 /// Transfer from a peripheral register to memory.
177 PeripheralToMemory,
178}
179
180/// DMA transfer priority.
181#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
182#[cfg_attr(feature = "defmt", derive(defmt::Format))]
183pub enum Priority {
184 /// Low priority (channel priority 7).
185 Low,
186 /// Medium priority (channel priority 4).
187 Medium,
188 /// High priority (channel priority 1).
189 #[default]
190 High,
191 /// Highest priority (channel priority 0).
192 Highest,
193}
194
195impl Priority {
196 /// Convert to hardware priority value (0 = highest, 7 = lowest).
197 pub fn to_hw_priority(self) -> u8 {
198 match self {
199 Priority::Low => 7,
200 Priority::Medium => 4,
201 Priority::High => 1,
202 Priority::Highest => 0,
203 }
204 }
205}
206
207/// DMA transfer data width.
208#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
209#[cfg_attr(feature = "defmt", derive(defmt::Format))]
210pub enum WordSize {
211 /// 8-bit (1 byte) transfers.
212 OneByte,
213 /// 16-bit (2 byte) transfers.
214 TwoBytes,
215 /// 32-bit (4 byte) transfers.
216 #[default]
217 FourBytes,
218}
219
220impl WordSize {
221 /// Size in bytes.
222 pub const fn bytes(self) -> usize {
223 match self {
224 WordSize::OneByte => 1,
225 WordSize::TwoBytes => 2,
226 WordSize::FourBytes => 4,
227 }
228 }
229
230 /// Convert to hardware SSIZE/DSIZE field value.
231 pub const fn to_hw_size(self) -> u8 {
232 match self {
233 WordSize::OneByte => 0,
234 WordSize::TwoBytes => 1,
235 WordSize::FourBytes => 2,
236 }
237 }
238
239 /// Create from byte width (1, 2, or 4).
240 pub const fn from_bytes(bytes: u8) -> Option<Self> {
241 match bytes {
242 1 => Some(WordSize::OneByte),
243 2 => Some(WordSize::TwoBytes),
244 4 => Some(WordSize::FourBytes),
245 _ => None,
246 }
247 }
248}
249
250/// Trait for types that can be transferred via DMA.
251///
252/// This provides compile-time type safety for DMA transfers.
253pub trait Word: Copy + 'static {
254 /// The word size for this type.
255 fn size() -> WordSize;
256}
257
258impl Word for u8 {
259 fn size() -> WordSize {
260 WordSize::OneByte
261 }
262}
263
264impl Word for u16 {
265 fn size() -> WordSize {
266 WordSize::TwoBytes
267 }
268}
269
270impl Word for u32 {
271 fn size() -> WordSize {
272 WordSize::FourBytes
273 }
274}
275
276/// DMA transfer options.
277///
278/// This struct configures various aspects of a DMA transfer.
279#[derive(Debug, Copy, Clone, PartialEq, Eq)]
280#[cfg_attr(feature = "defmt", derive(defmt::Format))]
281#[non_exhaustive]
282pub struct TransferOptions {
283 /// Transfer priority.
284 pub priority: Priority,
285 /// Enable circular (continuous) mode.
286 ///
287 /// When enabled, the transfer repeats automatically after completing.
288 pub circular: bool,
289 /// Enable interrupt on half transfer complete.
290 pub half_transfer_interrupt: bool,
291 /// Enable interrupt on transfer complete.
292 pub complete_transfer_interrupt: bool,
293}
294
295impl Default for TransferOptions {
296 fn default() -> Self {
297 Self {
298 priority: Priority::High,
299 circular: false,
300 half_transfer_interrupt: false,
301 complete_transfer_interrupt: true,
302 }
303 }
304}
305
306/// DMA error types.
307#[derive(Debug, Copy, Clone, PartialEq, Eq)]
308#[cfg_attr(feature = "defmt", derive(defmt::Format))]
309pub enum Error {
310 /// The DMA controller reported a bus error.
311 BusError,
312 /// The transfer was aborted.
313 Aborted,
314 /// Configuration error (e.g., invalid parameters).
315 Configuration,
316 /// Buffer overrun (for ring buffers).
317 Overrun,
318}
319
320/// Whether to enable the major loop completion interrupt.
321///
322/// This enum provides better readability than a boolean parameter
323/// for functions that configure DMA interrupt behavior.
324#[derive(Debug, Copy, Clone, PartialEq, Eq)]
325#[cfg_attr(feature = "defmt", derive(defmt::Format))]
326pub enum EnableInterrupt {
327 /// Enable the interrupt on major loop completion.
328 Yes,
329 /// Do not enable the interrupt.
330 No,
331}
332
333// ============================================================================
334// DMA Constants
335// ============================================================================
336
337/// Maximum bytes per DMA transfer (eDMA4 CITER/BITER are 15-bit fields).
338///
339/// This is a hardware limitation of the eDMA4 controller. Transfers larger
340/// than this must be split into multiple DMA operations.
341pub const DMA_MAX_TRANSFER_SIZE: usize = 0x7FFF;
342
343// ============================================================================
344// DMA Request Source Types (Type-Safe API)
345// ============================================================================
346
347/// Trait for type-safe DMA request sources.
348///
349/// Each peripheral that can trigger DMA requests implements this trait
350/// with marker types that encode the correct request source number at
351/// compile time. This prevents using the wrong request source for a
352/// peripheral.
353///
354/// # Example
355///
356/// ```ignore
357/// // The LPUART2 RX request source is automatically derived from the type:
358/// channel.set_request_source::<Lpuart2RxRequest>();
359/// ```
360///
361/// This trait is sealed and cannot be implemented outside this crate.
362#[allow(private_bounds)]
363pub trait DmaRequest: sealed::SealedDmaRequest {
364 /// The hardware request source number for the DMA mux.
365 const REQUEST_NUMBER: u8;
366}
367
368/// Macro to define a DMA request type.
369///
370/// Creates a zero-sized marker type that implements `DmaRequest` with
371/// the specified request number.
372macro_rules! define_dma_request {
373 ($(#[$meta:meta])* $name:ident = $num:expr) => {
374 $(#[$meta])*
375 #[derive(Debug, Copy, Clone)]
376 pub struct $name;
377
378 impl sealed::SealedDmaRequest for $name {}
379
380 impl DmaRequest for $name {
381 const REQUEST_NUMBER: u8 = $num;
382 }
383 };
384}
385
386// LPUART DMA request sources (from MCXA276 reference manual Table 4-8)
387define_dma_request!(
388 /// DMA request source for LPUART0 RX.
389 Lpuart0RxRequest = 21
390);
391define_dma_request!(
392 /// DMA request source for LPUART0 TX.
393 Lpuart0TxRequest = 22
394);
395define_dma_request!(
396 /// DMA request source for LPUART1 RX.
397 Lpuart1RxRequest = 23
398);
399define_dma_request!(
400 /// DMA request source for LPUART1 TX.
401 Lpuart1TxRequest = 24
402);
403define_dma_request!(
404 /// DMA request source for LPUART2 RX.
405 Lpuart2RxRequest = 25
406);
407define_dma_request!(
408 /// DMA request source for LPUART2 TX.
409 Lpuart2TxRequest = 26
410);
411define_dma_request!(
412 /// DMA request source for LPUART3 RX.
413 Lpuart3RxRequest = 27
414);
415define_dma_request!(
416 /// DMA request source for LPUART3 TX.
417 Lpuart3TxRequest = 28
418);
419define_dma_request!(
420 /// DMA request source for LPUART4 RX.
421 Lpuart4RxRequest = 29
422);
423define_dma_request!(
424 /// DMA request source for LPUART4 TX.
425 Lpuart4TxRequest = 30
426);
427define_dma_request!(
428 /// DMA request source for LPUART5 RX.
429 Lpuart5RxRequest = 31
430);
431define_dma_request!(
432 /// DMA request source for LPUART5 TX.
433 Lpuart5TxRequest = 32
434);
435
436// ============================================================================
437// Channel Trait (Sealed Pattern)
438// ============================================================================
439
440mod sealed {
441 use crate::pac::Interrupt;
442
443 /// Sealed trait for DMA channels.
444 pub trait SealedChannel {
445 /// Zero-based channel index into the TCD array.
446 fn index(&self) -> usize;
447 /// Interrupt vector for this channel.
448 fn interrupt(&self) -> Interrupt;
449 }
450
451 /// Sealed trait for DMA request sources.
452 pub trait SealedDmaRequest {}
453}
454
455/// Marker trait implemented by HAL peripheral tokens that map to a DMA0
456/// channel backed by one EDMA_0_TCD0 TCD slot.
457///
458/// This trait is sealed and cannot be implemented outside this crate.
459#[allow(private_bounds)]
460pub trait Channel: sealed::SealedChannel + PeripheralType + Into<AnyChannel> + 'static {
461 /// Zero-based channel index into the TCD array.
462 const INDEX: usize;
463 /// Interrupt vector for this channel.
464 const INTERRUPT: Interrupt;
465}
466
467/// Type-erased DMA channel.
468///
469/// This allows storing DMA channels in a uniform way regardless of their
470/// concrete type, useful for async transfer futures and runtime channel selection.
471#[derive(Debug, Clone, Copy)]
472pub struct AnyChannel {
473 index: usize,
474 interrupt: Interrupt,
475}
476
477impl AnyChannel {
478 /// Get the channel index.
479 #[inline]
480 pub const fn index(&self) -> usize {
481 self.index
482 }
483
484 /// Get the channel interrupt.
485 #[inline]
486 pub const fn interrupt(&self) -> Interrupt {
487 self.interrupt
488 }
489
490 /// Get a reference to the TCD register block for this channel.
491 ///
492 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
493 #[inline]
494 fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
495 // Safety: MCXA276 has a single eDMA instance, and we're only accessing
496 // the TCD for this specific channel
497 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
498 edma.tcd(self.index)
499 }
500
501 /// Check if the channel's DONE flag is set.
502 pub fn is_done(&self) -> bool {
503 self.tcd().ch_csr().read().done().bit_is_set()
504 }
505
506 /// Get the waker for this channel.
507 pub fn waker(&self) -> &'static AtomicWaker {
508 &STATES[self.index].waker
509 }
510}
511
512impl sealed::SealedChannel for AnyChannel {
513 fn index(&self) -> usize {
514 self.index
515 }
516
517 fn interrupt(&self) -> Interrupt {
518 self.interrupt
519 }
520}
521
522/// Macro to implement Channel trait for a peripheral.
523macro_rules! impl_channel {
524 ($peri:ident, $index:expr, $irq:ident) => {
525 impl sealed::SealedChannel for crate::peripherals::$peri {
526 fn index(&self) -> usize {
527 $index
528 }
529
530 fn interrupt(&self) -> Interrupt {
531 Interrupt::$irq
532 }
533 }
534
535 impl Channel for crate::peripherals::$peri {
536 const INDEX: usize = $index;
537 const INTERRUPT: Interrupt = Interrupt::$irq;
538 }
539
540 impl From<crate::peripherals::$peri> for AnyChannel {
541 fn from(_: crate::peripherals::$peri) -> Self {
542 AnyChannel {
543 index: $index,
544 interrupt: Interrupt::$irq,
545 }
546 }
547 }
548 };
549}
550
551impl_channel!(DMA_CH0, 0, DMA_CH0);
552impl_channel!(DMA_CH1, 1, DMA_CH1);
553impl_channel!(DMA_CH2, 2, DMA_CH2);
554impl_channel!(DMA_CH3, 3, DMA_CH3);
555impl_channel!(DMA_CH4, 4, DMA_CH4);
556impl_channel!(DMA_CH5, 5, DMA_CH5);
557impl_channel!(DMA_CH6, 6, DMA_CH6);
558impl_channel!(DMA_CH7, 7, DMA_CH7);
559
560/// Strongly-typed handle to a DMA0 channel.
561///
562/// The lifetime of this value is tied to the unique peripheral token
563/// supplied by `embassy_hal_internal::peripherals!`, so safe code cannot
564/// create two `DmaChannel` instances for the same hardware channel.
565pub struct DmaChannel<C: Channel> {
566 _ch: core::marker::PhantomData<C>,
567}
568
569// ============================================================================
570// DMA Transfer Methods - API Overview
571// ============================================================================
572//
573// The DMA API provides two categories of methods for configuring transfers:
574//
575// ## 1. Async Methods (Return `Transfer` Future)
576//
577// These methods return a [`Transfer`] Future that must be `.await`ed:
578//
579// - [`write()`](DmaChannel::write) - Memory-to-peripheral using default eDMA TCD block
580// - [`read()`](DmaChannel::read) - Peripheral-to-memory using default eDMA TCD block
581// - [`write_to_peripheral()`](DmaChannel::write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
582// - [`read_from_peripheral()`](DmaChannel::read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
583// - [`mem_to_mem()`](DmaChannel::mem_to_mem) - Memory-to-memory using default eDMA TCD block
584// - [`transfer_mem_to_mem()`](DmaChannel::transfer_mem_to_mem) - Memory-to-memory with custom eDMA TCD block
585//
586// The `Transfer` manages the DMA lifecycle automatically:
587// - Enables channel request
588// - Waits for completion via async/await
589// - Cleans up on completion
590//
591// **Important:** `Transfer::Drop` aborts the transfer if dropped before completion.
592// This means you MUST `.await` the Transfer or it will be aborted when it goes out of scope.
593//
594// **Use case:** When you want to use async/await and let the Transfer handle lifecycle management.
595//
596// ## 2. Setup Methods (Configure TCD Only)
597//
598// These methods configure the TCD but do NOT return a `Transfer`:
599//
600// - [`setup_write()`](DmaChannel::setup_write) - Memory-to-peripheral using default eDMA TCD block
601// - [`setup_read()`](DmaChannel::setup_read) - Peripheral-to-memory using default eDMA TCD block
602// - [`setup_write_to_peripheral()`](DmaChannel::setup_write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
603// - [`setup_read_from_peripheral()`](DmaChannel::setup_read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
604//
605// The caller is responsible for the complete DMA lifecycle:
606// 1. Call [`enable_request()`](DmaChannel::enable_request) to start the transfer
607// 2. Poll [`is_done()`](DmaChannel::is_done) or use interrupts to detect completion
608// 3. Call [`disable_request()`](DmaChannel::disable_request), [`clear_done()`](DmaChannel::clear_done),
609// [`clear_interrupt()`](DmaChannel::clear_interrupt) for cleanup
610//
611// **Use case:** Peripheral drivers (like LPUART) that need fine-grained control over
612// DMA setup before starting a `Transfer`.
613//
614// ============================================================================
615
616impl<C: Channel> DmaChannel<C> {
617 /// Wrap a DMA channel token (takes ownership of the Peri wrapper).
618 ///
619 /// Note: DMA is initialized during `hal::init()` via `dma::init()`.
620 #[inline]
621 pub fn new(_ch: embassy_hal_internal::Peri<'_, C>) -> Self {
622 Self {
623 _ch: core::marker::PhantomData,
624 }
625 }
626
627 /// Wrap a DMA channel token directly (for internal use).
628 ///
629 /// Note: DMA is initialized during `hal::init()` via `dma::init()`.
630 #[inline]
631 pub fn from_token(_ch: C) -> Self {
632 Self {
633 _ch: core::marker::PhantomData,
634 }
635 }
636
637 /// Channel index in the EDMA_0_TCD0 array.
638 #[inline]
639 pub const fn index(&self) -> usize {
640 C::INDEX
641 }
642
643 /// Convert this typed channel into a type-erased `AnyChannel`.
644 #[inline]
645 pub fn into_any(self) -> AnyChannel {
646 AnyChannel {
647 index: C::INDEX,
648 interrupt: C::INTERRUPT,
649 }
650 }
651
652 /// Get a reference to the type-erased channel info.
653 #[inline]
654 pub fn as_any(&self) -> AnyChannel {
655 AnyChannel {
656 index: C::INDEX,
657 interrupt: C::INTERRUPT,
658 }
659 }
660
661 /// Return a reference to the underlying TCD register block.
662 ///
663 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
664 ///
665 /// # Note
666 ///
667 /// This is exposed for advanced use cases that need direct TCD access.
668 /// For most use cases, prefer the higher-level transfer methods.
669 #[inline]
670 pub fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
671 // Safety: MCXA276 has a single eDMA instance
672 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
673 edma.tcd(C::INDEX)
674 }
675
676 /// Start an async transfer.
677 ///
678 /// The channel must already be configured. This enables the channel
679 /// request and returns a `Transfer` future that resolves when the
680 /// DMA transfer completes.
681 ///
682 /// # Safety
683 ///
684 /// The caller must ensure the DMA channel has been properly configured
685 /// and that source/destination buffers remain valid for the duration
686 /// of the transfer.
687 pub unsafe fn start_transfer(&self) -> Transfer<'_> {
688 // Clear any previous DONE/INT flags
689 let t = self.tcd();
690 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
691 t.ch_int().write(|w| w.int().clear_bit_by_one());
692
693 // Enable the channel request
694 t.ch_csr().modify(|_, w| w.erq().enable());
695
696 Transfer::new(self.as_any())
697 }
698
699 // ========================================================================
700 // Type-Safe Transfer Methods (Embassy-style API)
701 // ========================================================================
702
703 /// Perform a memory-to-memory DMA transfer (simplified API).
704 ///
705 /// This is a type-safe wrapper that uses the `Word` trait to determine
706 /// the correct transfer width automatically. Uses the global eDMA TCD
707 /// register accessor internally.
708 ///
709 /// # Arguments
710 ///
711 /// * `src` - Source buffer
712 /// * `dst` - Destination buffer (must be at least as large as src)
713 /// * `options` - Transfer configuration options
714 ///
715 /// # Safety
716 ///
717 /// The source and destination buffers must remain valid for the
718 /// duration of the transfer.
719 pub unsafe fn mem_to_mem<W: Word>(&self, src: &[W], dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
720 self.transfer_mem_to_mem(src, dst, options)
721 }
722
723 /// Perform a memory-to-memory DMA transfer.
724 ///
725 /// This is a type-safe wrapper that uses the `Word` trait to determine
726 /// the correct transfer width automatically.
727 ///
728 /// # Arguments
729 ///
730 /// * `edma` - Reference to the eDMA TCD register block
731 /// * `src` - Source buffer
732 /// * `dst` - Destination buffer (must be at least as large as src)
733 /// * `options` - Transfer configuration options
734 ///
735 /// # Safety
736 ///
737 /// The source and destination buffers must remain valid for the
738 /// duration of the transfer.
739 pub unsafe fn transfer_mem_to_mem<W: Word>(
740 &self,
741 src: &[W],
742 dst: &mut [W],
743 options: TransferOptions,
744 ) -> Transfer<'_> {
745 assert!(!src.is_empty());
746 assert!(dst.len() >= src.len());
747 assert!(src.len() <= 0x7fff);
748
749 let size = W::size();
750 let byte_count = (src.len() * size.bytes()) as u32;
751
752 let t = self.tcd();
753
754 // Reset channel state - clear DONE, disable requests, clear errors
755 t.ch_csr().write(|w| {
756 w.erq()
757 .disable()
758 .earq()
759 .disable()
760 .eei()
761 .no_error()
762 .done()
763 .clear_bit_by_one()
764 });
765 t.ch_es().write(|w| w.err().clear_bit_by_one());
766 t.ch_int().write(|w| w.int().clear_bit_by_one());
767
768 // Memory barrier to ensure channel state is fully reset before touching TCD
769 cortex_m::asm::dsb();
770
771 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
772 // Reset ALL TCD registers to 0 to clear any stale configuration from
773 // previous transfers. This is critical when reusing a channel.
774 t.tcd_saddr().write(|w| w.saddr().bits(0));
775 t.tcd_soff().write(|w| w.soff().bits(0));
776 t.tcd_attr().write(|w| w.bits(0));
777 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0));
778 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
779 t.tcd_daddr().write(|w| w.daddr().bits(0));
780 t.tcd_doff().write(|w| w.doff().bits(0));
781 t.tcd_citer_elinkno().write(|w| w.bits(0));
782 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
783 t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely
784 t.tcd_biter_elinkno().write(|w| w.bits(0));
785
786 // Memory barrier after TCD reset
787 cortex_m::asm::dsb();
788
789 // Note: Priority is managed by round-robin arbitration (set in init())
790 // Per-channel priority can be configured via ch_pri() if needed
791
792 // Now configure the new transfer
793
794 // Source address and increment
795 t.tcd_saddr().write(|w| w.saddr().bits(src.as_ptr() as u32));
796 t.tcd_soff().write(|w| w.soff().bits(size.bytes() as u16));
797
798 // Destination address and increment
799 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
800 t.tcd_doff().write(|w| w.doff().bits(size.bytes() as u16));
801
802 // Transfer attributes (size)
803 let hw_size = size.to_hw_size();
804 t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size));
805
806 // Minor loop: transfer all bytes in one minor loop
807 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_count));
808
809 // No source/dest adjustment after major loop
810 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
811 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
812
813 // Major loop count = 1 (single major loop)
814 // Write BITER first, then CITER (CITER must match BITER at start)
815 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
816 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
817
818 // Memory barrier before setting START
819 cortex_m::asm::dsb();
820
821 // Control/status: interrupt on major complete, start
822 // Write this last after all other TCD registers are configured
823 let int_major = options.complete_transfer_interrupt;
824 t.tcd_csr().write(|w| {
825 w.intmajor()
826 .bit(int_major)
827 .inthalf()
828 .bit(options.half_transfer_interrupt)
829 .dreq()
830 .set_bit() // Auto-disable request after major loop
831 .start()
832 .set_bit() // Start the channel
833 });
834
835 Transfer::new(self.as_any())
836 }
837
838 /// Fill a memory buffer with a pattern value (memset).
839 ///
840 /// This performs a DMA transfer where the source address remains fixed
841 /// (pattern value) while the destination address increments through the buffer.
842 /// It's useful for quickly filling large memory regions with a constant value.
843 ///
844 /// # Arguments
845 ///
846 /// * `pattern` - Reference to the pattern value (will be read repeatedly)
847 /// * `dst` - Destination buffer to fill
848 /// * `options` - Transfer configuration options
849 ///
850 /// # Example
851 ///
852 /// ```no_run
853 /// use embassy_mcxa::dma::{DmaChannel, TransferOptions};
854 ///
855 /// let dma_ch = DmaChannel::new(p.DMA_CH0);
856 /// let pattern: u32 = 0xDEADBEEF;
857 /// let mut buffer = [0u32; 256];
858 ///
859 /// unsafe {
860 /// dma_ch.memset(&pattern, &mut buffer, TransferOptions::default()).await;
861 /// }
862 /// // buffer is now filled with 0xDEADBEEF
863 /// ```
864 ///
865 /// # Safety
866 ///
867 /// - The pattern and destination buffer must remain valid for the duration of the transfer.
868 pub unsafe fn memset<W: Word>(&self, pattern: &W, dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
869 assert!(!dst.is_empty());
870 assert!(dst.len() <= 0x7fff);
871
872 let size = W::size();
873 let byte_size = size.bytes();
874 // Total bytes to transfer - all in one minor loop for software-triggered transfers
875 let total_bytes = (dst.len() * byte_size) as u32;
876
877 let t = self.tcd();
878
879 // Reset channel state - clear DONE, disable requests, clear errors
880 t.ch_csr().write(|w| {
881 w.erq()
882 .disable()
883 .earq()
884 .disable()
885 .eei()
886 .no_error()
887 .done()
888 .clear_bit_by_one()
889 });
890 t.ch_es().write(|w| w.err().clear_bit_by_one());
891 t.ch_int().write(|w| w.int().clear_bit_by_one());
892
893 // Memory barrier to ensure channel state is fully reset before touching TCD
894 cortex_m::asm::dsb();
895
896 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
897 // Reset ALL TCD registers to 0 to clear any stale configuration from
898 // previous transfers. This is critical when reusing a channel.
899 t.tcd_saddr().write(|w| w.saddr().bits(0));
900 t.tcd_soff().write(|w| w.soff().bits(0));
901 t.tcd_attr().write(|w| w.bits(0));
902 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0));
903 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
904 t.tcd_daddr().write(|w| w.daddr().bits(0));
905 t.tcd_doff().write(|w| w.doff().bits(0));
906 t.tcd_citer_elinkno().write(|w| w.bits(0));
907 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
908 t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely
909 t.tcd_biter_elinkno().write(|w| w.bits(0));
910
911 // Memory barrier after TCD reset
912 cortex_m::asm::dsb();
913
914 // Now configure the new transfer
915 //
916 // For software-triggered memset, we use a SINGLE minor loop that transfers
917 // all bytes at once. The source address stays fixed (SOFF=0) while the
918 // destination increments (DOFF=byte_size). The eDMA will read from the
919 // same source address for each destination word.
920 //
921 // This is necessary because the START bit only triggers ONE minor loop
922 // iteration. Using CITER>1 with software trigger would require multiple
923 // START triggers.
924
925 // Source: pattern address, fixed (soff=0)
926 t.tcd_saddr().write(|w| w.saddr().bits(pattern as *const W as u32));
927 t.tcd_soff().write(|w| w.soff().bits(0)); // Fixed source - reads pattern repeatedly
928
929 // Destination: memory buffer, incrementing by word size
930 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
931 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
932
933 // Transfer attributes - source and dest are same word size
934 let hw_size = size.to_hw_size();
935 t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size));
936
937 // Minor loop: transfer ALL bytes in one minor loop (like mem_to_mem)
938 // This allows the entire transfer to complete with a single START trigger
939 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(total_bytes));
940
941 // No address adjustment after major loop
942 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
943 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
944
945 // Major loop count = 1 (single major loop, all data in minor loop)
946 // Write BITER first, then CITER (CITER must match BITER at start)
947 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
948 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
949
950 // Memory barrier before setting START
951 cortex_m::asm::dsb();
952
953 // Control/status: interrupt on major complete, start immediately
954 // Write this last after all other TCD registers are configured
955 let int_major = options.complete_transfer_interrupt;
956 t.tcd_csr().write(|w| {
957 w.intmajor()
958 .bit(int_major)
959 .inthalf()
960 .bit(options.half_transfer_interrupt)
961 .dreq()
962 .set_bit() // Auto-disable request after major loop
963 .start()
964 .set_bit() // Start the channel
965 });
966
967 Transfer::new(self.as_any())
968 }
969
970 /// Write data from memory to a peripheral register.
971 ///
972 /// The destination address remains fixed (peripheral register) while
973 /// the source address increments through the buffer.
974 ///
975 /// # Arguments
976 ///
977 /// * `buf` - Source buffer to write from
978 /// * `peri_addr` - Peripheral register address
979 /// * `options` - Transfer configuration options
980 ///
981 /// # Safety
982 ///
983 /// - The buffer must remain valid for the duration of the transfer.
984 /// - The peripheral address must be valid for writes.
985 pub unsafe fn write<W: Word>(&self, buf: &[W], peri_addr: *mut W, options: TransferOptions) -> Transfer<'_> {
986 self.write_to_peripheral(buf, peri_addr, options)
987 }
988
989 /// Configure a memory-to-peripheral DMA transfer without starting it.
990 ///
991 /// This is a convenience wrapper around [`setup_write_to_peripheral()`](Self::setup_write_to_peripheral)
992 /// that uses the default eDMA TCD register block.
993 ///
994 /// This method configures the TCD but does NOT return a `Transfer`. The caller
995 /// is responsible for the complete DMA lifecycle:
996 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
997 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
998 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
999 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1000 ///
1001 /// # Example
1002 ///
1003 /// ```no_run
1004 /// # use embassy_mcxa::dma::DmaChannel;
1005 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1006 /// # let uart_tx_addr = 0x4000_0000 as *mut u8;
1007 /// let data = [0x48, 0x65, 0x6c, 0x6c, 0x6f]; // "Hello"
1008 ///
1009 /// unsafe {
1010 /// // Configure the transfer
1011 /// dma_ch.setup_write(&data, uart_tx_addr, EnableInterrupt::Yes);
1012 ///
1013 /// // Start when peripheral is ready
1014 /// dma_ch.enable_request();
1015 ///
1016 /// // Wait for completion (or use interrupt)
1017 /// while !dma_ch.is_done() {}
1018 ///
1019 /// // Clean up
1020 /// dma_ch.clear_done();
1021 /// dma_ch.clear_interrupt();
1022 /// }
1023 /// ```
1024 ///
1025 /// # Arguments
1026 ///
1027 /// * `buf` - Source buffer to write from
1028 /// * `peri_addr` - Peripheral register address
1029 /// * `enable_interrupt` - Whether to enable interrupt on completion
1030 ///
1031 /// # Safety
1032 ///
1033 /// - The buffer must remain valid for the duration of the transfer.
1034 /// - The peripheral address must be valid for writes.
1035 pub unsafe fn setup_write<W: Word>(&self, buf: &[W], peri_addr: *mut W, enable_interrupt: EnableInterrupt) {
1036 self.setup_write_to_peripheral(buf, peri_addr, enable_interrupt)
1037 }
1038
1039 /// Write data from memory to a peripheral register.
1040 ///
1041 /// The destination address remains fixed (peripheral register) while
1042 /// the source address increments through the buffer.
1043 ///
1044 /// # Arguments
1045 ///
1046 /// * `buf` - Source buffer to write from
1047 /// * `peri_addr` - Peripheral register address
1048 /// * `options` - Transfer configuration options
1049 ///
1050 /// # Safety
1051 ///
1052 /// - The buffer must remain valid for the duration of the transfer.
1053 /// - The peripheral address must be valid for writes.
1054 pub unsafe fn write_to_peripheral<W: Word>(
1055 &self,
1056 buf: &[W],
1057 peri_addr: *mut W,
1058 options: TransferOptions,
1059 ) -> Transfer<'_> {
1060 assert!(!buf.is_empty());
1061 assert!(buf.len() <= 0x7fff);
1062
1063 let size = W::size();
1064 let byte_size = size.bytes();
1065
1066 let t = self.tcd();
1067
1068 // Reset channel state
1069 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
1070 t.ch_es().write(|w| w.bits(0));
1071 t.ch_int().write(|w| w.int().clear_bit_by_one());
1072
1073 // Addresses
1074 t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32));
1075 t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32));
1076
1077 // Offsets: Source increments, Dest fixed
1078 t.tcd_soff().write(|w| w.soff().bits(byte_size as u16));
1079 t.tcd_doff().write(|w| w.doff().bits(0));
1080
1081 // Attributes: set size and explicitly disable modulo
1082 let hw_size = size.to_hw_size();
1083 t.tcd_attr().write(|w| {
1084 w.ssize()
1085 .bits(hw_size)
1086 .dsize()
1087 .bits(hw_size)
1088 .smod()
1089 .disable()
1090 .dmod()
1091 .bits(0)
1092 });
1093
1094 // Minor loop: transfer one word per request (match old: only set nbytes)
1095 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1096
1097 // No final adjustments
1098 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1099 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1100
1101 // Major loop count = number of words
1102 let count = buf.len() as u16;
1103 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1104 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1105
1106 // CSR: interrupt on major loop complete and auto-clear ERQ
1107 t.tcd_csr().write(|w| {
1108 let w = if options.complete_transfer_interrupt {
1109 w.intmajor().enable()
1110 } else {
1111 w.intmajor().disable()
1112 };
1113 w.inthalf()
1114 .disable()
1115 .dreq()
1116 .erq_field_clear() // Disable request when done
1117 .esg()
1118 .normal_format()
1119 .majorelink()
1120 .disable()
1121 .eeop()
1122 .disable()
1123 .esda()
1124 .disable()
1125 .bwc()
1126 .no_stall()
1127 });
1128
1129 // Ensure all TCD writes have completed before DMA engine reads them
1130 cortex_m::asm::dsb();
1131
1132 Transfer::new(self.as_any())
1133 }
1134
1135 /// Read data from a peripheral register to memory.
1136 ///
1137 /// The source address remains fixed (peripheral register) while
1138 /// the destination address increments through the buffer.
1139 ///
1140 /// # Arguments
1141 ///
1142 /// * `peri_addr` - Peripheral register address
1143 /// * `buf` - Destination buffer to read into
1144 /// * `options` - Transfer configuration options
1145 ///
1146 /// # Safety
1147 ///
1148 /// - The buffer must remain valid for the duration of the transfer.
1149 /// - The peripheral address must be valid for reads.
1150 pub unsafe fn read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], options: TransferOptions) -> Transfer<'_> {
1151 self.read_from_peripheral(peri_addr, buf, options)
1152 }
1153
1154 /// Configure a peripheral-to-memory DMA transfer without starting it.
1155 ///
1156 /// This is a convenience wrapper around [`setup_read_from_peripheral()`](Self::setup_read_from_peripheral)
1157 /// that uses the default eDMA TCD register block.
1158 ///
1159 /// This method configures the TCD but does NOT return a `Transfer`. The caller
1160 /// is responsible for the complete DMA lifecycle:
1161 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
1162 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
1163 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
1164 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1165 ///
1166 /// # Example
1167 ///
1168 /// ```no_run
1169 /// # use embassy_mcxa::dma::DmaChannel;
1170 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1171 /// # let uart_rx_addr = 0x4000_0000 as *const u8;
1172 /// let mut buf = [0u8; 32];
1173 ///
1174 /// unsafe {
1175 /// // Configure the transfer
1176 /// dma_ch.setup_read(uart_rx_addr, &mut buf, EnableInterrupt::Yes);
1177 ///
1178 /// // Start when peripheral is ready
1179 /// dma_ch.enable_request();
1180 ///
1181 /// // Wait for completion (or use interrupt)
1182 /// while !dma_ch.is_done() {}
1183 ///
1184 /// // Clean up
1185 /// dma_ch.clear_done();
1186 /// dma_ch.clear_interrupt();
1187 /// }
1188 /// // buf now contains received data
1189 /// ```
1190 ///
1191 /// # Arguments
1192 ///
1193 /// * `peri_addr` - Peripheral register address
1194 /// * `buf` - Destination buffer to read into
1195 /// * `enable_interrupt` - Whether to enable interrupt on completion
1196 ///
1197 /// # Safety
1198 ///
1199 /// - The buffer must remain valid for the duration of the transfer.
1200 /// - The peripheral address must be valid for reads.
1201 pub unsafe fn setup_read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], enable_interrupt: EnableInterrupt) {
1202 self.setup_read_from_peripheral(peri_addr, buf, enable_interrupt)
1203 }
1204
1205 /// Read data from a peripheral register to memory.
1206 ///
1207 /// The source address remains fixed (peripheral register) while
1208 /// the destination address increments through the buffer.
1209 ///
1210 /// # Arguments
1211 ///
1212 /// * `peri_addr` - Peripheral register address
1213 /// * `buf` - Destination buffer to read into
1214 /// * `options` - Transfer configuration options
1215 ///
1216 /// # Safety
1217 ///
1218 /// - The buffer must remain valid for the duration of the transfer.
1219 /// - The peripheral address must be valid for reads.
1220 pub unsafe fn read_from_peripheral<W: Word>(
1221 &self,
1222 peri_addr: *const W,
1223 buf: &mut [W],
1224 options: TransferOptions,
1225 ) -> Transfer<'_> {
1226 assert!(!buf.is_empty());
1227 assert!(buf.len() <= 0x7fff);
1228
1229 let size = W::size();
1230 let byte_size = size.bytes();
1231
1232 let t = self.tcd();
1233
1234 // Reset channel control/error/interrupt state
1235 t.ch_csr().write(|w| {
1236 w.erq()
1237 .disable()
1238 .earq()
1239 .disable()
1240 .eei()
1241 .no_error()
1242 .ebw()
1243 .disable()
1244 .done()
1245 .clear_bit_by_one()
1246 });
1247 t.ch_es().write(|w| w.bits(0));
1248 t.ch_int().write(|w| w.int().clear_bit_by_one());
1249
1250 // Source: peripheral register, fixed
1251 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
1252 t.tcd_soff().write(|w| w.soff().bits(0)); // No increment
1253
1254 // Destination: memory buffer, incrementing
1255 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
1256 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
1257
1258 // Transfer attributes: set size and explicitly disable modulo
1259 let hw_size = size.to_hw_size();
1260 t.tcd_attr().write(|w| {
1261 w.ssize()
1262 .bits(hw_size)
1263 .dsize()
1264 .bits(hw_size)
1265 .smod()
1266 .disable()
1267 .dmod()
1268 .bits(0)
1269 });
1270
1271 // Minor loop: transfer one word per request, no offsets
1272 t.tcd_nbytes_mloffno().write(|w| {
1273 w.nbytes()
1274 .bits(byte_size as u32)
1275 .dmloe()
1276 .offset_not_applied()
1277 .smloe()
1278 .offset_not_applied()
1279 });
1280
1281 // Major loop count = number of words
1282 let count = buf.len() as u16;
1283 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1284 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1285
1286 // No address adjustment after major loop
1287 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1288 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1289
1290 // Control/status: interrupt on major complete, auto-clear ERQ when done
1291 t.tcd_csr().write(|w| {
1292 let w = if options.complete_transfer_interrupt {
1293 w.intmajor().enable()
1294 } else {
1295 w.intmajor().disable()
1296 };
1297 let w = if options.half_transfer_interrupt {
1298 w.inthalf().enable()
1299 } else {
1300 w.inthalf().disable()
1301 };
1302 w.dreq()
1303 .erq_field_clear() // Disable request when done (important for peripheral DMA)
1304 .esg()
1305 .normal_format()
1306 .majorelink()
1307 .disable()
1308 .eeop()
1309 .disable()
1310 .esda()
1311 .disable()
1312 .bwc()
1313 .no_stall()
1314 });
1315
1316 // Ensure all TCD writes have completed before DMA engine reads them
1317 cortex_m::asm::dsb();
1318
1319 Transfer::new(self.as_any())
1320 }
1321
1322 /// Configure a memory-to-peripheral DMA transfer without starting it.
1323 ///
1324 /// This configures the TCD for a memory-to-peripheral transfer but does NOT
1325 /// return a Transfer object. The caller is responsible for:
1326 /// 1. Enabling the peripheral's DMA request
1327 /// 2. Calling `enable_request()` to start the transfer
1328 /// 3. Polling `is_done()` or using interrupts to detect completion
1329 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1330 ///
1331 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1332 /// peripheral drivers that have their own completion polling).
1333 ///
1334 /// # Arguments
1335 ///
1336 /// * `buf` - Source buffer to write from
1337 /// * `peri_addr` - Peripheral register address
1338 /// * `enable_interrupt` - Whether to enable interrupt on completion
1339 ///
1340 /// # Safety
1341 ///
1342 /// - The buffer must remain valid for the duration of the transfer.
1343 /// - The peripheral address must be valid for writes.
1344 pub unsafe fn setup_write_to_peripheral<W: Word>(
1345 &self,
1346 buf: &[W],
1347 peri_addr: *mut W,
1348 enable_interrupt: EnableInterrupt,
1349 ) {
1350 assert!(!buf.is_empty());
1351 assert!(buf.len() <= 0x7fff);
1352
1353 let size = W::size();
1354 let byte_size = size.bytes();
1355
1356 let t = self.tcd();
1357
1358 // Reset channel state
1359 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
1360 t.ch_es().write(|w| w.bits(0));
1361 t.ch_int().write(|w| w.int().clear_bit_by_one());
1362
1363 // Addresses
1364 t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32));
1365 t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32));
1366
1367 // Offsets: Source increments, Dest fixed
1368 t.tcd_soff().write(|w| w.soff().bits(byte_size as u16));
1369 t.tcd_doff().write(|w| w.doff().bits(0));
1370
1371 // Attributes: set size and explicitly disable modulo
1372 let hw_size = size.to_hw_size();
1373 t.tcd_attr().write(|w| {
1374 w.ssize()
1375 .bits(hw_size)
1376 .dsize()
1377 .bits(hw_size)
1378 .smod()
1379 .disable()
1380 .dmod()
1381 .bits(0)
1382 });
1383
1384 // Minor loop: transfer one word per request
1385 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1386
1387 // No final adjustments
1388 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1389 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1390
1391 // Major loop count = number of words
1392 let count = buf.len() as u16;
1393 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1394 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1395
1396 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1397 t.tcd_csr().write(|w| {
1398 let w = match enable_interrupt {
1399 EnableInterrupt::Yes => w.intmajor().enable(),
1400 EnableInterrupt::No => w.intmajor().disable(),
1401 };
1402 w.inthalf()
1403 .disable()
1404 .dreq()
1405 .erq_field_clear()
1406 .esg()
1407 .normal_format()
1408 .majorelink()
1409 .disable()
1410 .eeop()
1411 .disable()
1412 .esda()
1413 .disable()
1414 .bwc()
1415 .no_stall()
1416 });
1417
1418 // Ensure all TCD writes have completed before DMA engine reads them
1419 cortex_m::asm::dsb();
1420 }
1421
1422 /// Configure a peripheral-to-memory DMA transfer without starting it.
1423 ///
1424 /// This configures the TCD for a peripheral-to-memory transfer but does NOT
1425 /// return a Transfer object. The caller is responsible for:
1426 /// 1. Enabling the peripheral's DMA request
1427 /// 2. Calling `enable_request()` to start the transfer
1428 /// 3. Polling `is_done()` or using interrupts to detect completion
1429 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1430 ///
1431 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1432 /// peripheral drivers that have their own completion polling).
1433 ///
1434 /// # Arguments
1435 ///
1436 /// * `peri_addr` - Peripheral register address
1437 /// * `buf` - Destination buffer to read into
1438 /// * `enable_interrupt` - Whether to enable interrupt on completion
1439 ///
1440 /// # Safety
1441 ///
1442 /// - The buffer must remain valid for the duration of the transfer.
1443 /// - The peripheral address must be valid for reads.
1444 pub unsafe fn setup_read_from_peripheral<W: Word>(
1445 &self,
1446 peri_addr: *const W,
1447 buf: &mut [W],
1448 enable_interrupt: EnableInterrupt,
1449 ) {
1450 assert!(!buf.is_empty());
1451 assert!(buf.len() <= 0x7fff);
1452
1453 let size = W::size();
1454 let byte_size = size.bytes();
1455
1456 let t = self.tcd();
1457
1458 // Reset channel control/error/interrupt state
1459 t.ch_csr().write(|w| {
1460 w.erq()
1461 .disable()
1462 .earq()
1463 .disable()
1464 .eei()
1465 .no_error()
1466 .ebw()
1467 .disable()
1468 .done()
1469 .clear_bit_by_one()
1470 });
1471 t.ch_es().write(|w| w.bits(0));
1472 t.ch_int().write(|w| w.int().clear_bit_by_one());
1473
1474 // Source: peripheral register, fixed
1475 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
1476 t.tcd_soff().write(|w| w.soff().bits(0));
1477
1478 // Destination: memory buffer, incrementing
1479 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
1480 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
1481
1482 // Attributes: set size and explicitly disable modulo
1483 let hw_size = size.to_hw_size();
1484 t.tcd_attr().write(|w| {
1485 w.ssize()
1486 .bits(hw_size)
1487 .dsize()
1488 .bits(hw_size)
1489 .smod()
1490 .disable()
1491 .dmod()
1492 .bits(0)
1493 });
1494
1495 // Minor loop: transfer one word per request
1496 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1497
1498 // No final adjustments
1499 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1500 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1501
1502 // Major loop count = number of words
1503 let count = buf.len() as u16;
1504 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1505 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1506
1507 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1508 t.tcd_csr().write(|w| {
1509 let w = match enable_interrupt {
1510 EnableInterrupt::Yes => w.intmajor().enable(),
1511 EnableInterrupt::No => w.intmajor().disable(),
1512 };
1513 w.inthalf()
1514 .disable()
1515 .dreq()
1516 .erq_field_clear()
1517 .esg()
1518 .normal_format()
1519 .majorelink()
1520 .disable()
1521 .eeop()
1522 .disable()
1523 .esda()
1524 .disable()
1525 .bwc()
1526 .no_stall()
1527 });
1528
1529 // Ensure all TCD writes have completed before DMA engine reads them
1530 cortex_m::asm::dsb();
1531 }
1532
1533 /// Configure the integrated channel MUX to use the given typed
1534 /// DMA request source (e.g., [`Lpuart2TxRequest`] or [`Lpuart2RxRequest`]).
1535 ///
1536 /// This is the type-safe version that uses marker types to ensure
1537 /// compile-time verification of request source validity.
1538 ///
1539 /// # Safety
1540 ///
1541 /// The channel must be properly configured before enabling requests.
1542 /// The caller must ensure the DMA request source matches the peripheral
1543 /// that will drive this channel.
1544 ///
1545 /// # Note
1546 ///
1547 /// The NXP SDK requires a two-step write sequence: first clear
1548 /// the mux to 0, then set the actual source. This is a hardware
1549 /// requirement on eDMA4 for the mux to properly latch.
1550 ///
1551 /// # Example
1552 ///
1553 /// ```ignore
1554 /// use embassy_mcxa::dma::{DmaChannel, Lpuart2RxRequest};
1555 ///
1556 /// // Type-safe: compiler verifies this is a valid DMA request type
1557 /// unsafe {
1558 /// channel.set_request_source::<Lpuart2RxRequest>();
1559 /// }
1560 /// ```
1561 #[inline]
1562 pub unsafe fn set_request_source<R: DmaRequest>(&self) {
1563 // Two-step write per NXP SDK: clear to 0, then set actual source.
1564 self.tcd().ch_mux().write(|w| w.src().bits(0));
1565 cortex_m::asm::dsb(); // Ensure the clear completes before setting new source
1566 self.tcd().ch_mux().write(|w| w.src().bits(R::REQUEST_NUMBER));
1567 }
1568
1569 /// Enable hardware requests for this channel (ERQ=1).
1570 ///
1571 /// # Safety
1572 ///
1573 /// The channel must be properly configured before enabling requests.
1574 pub unsafe fn enable_request(&self) {
1575 let t = self.tcd();
1576 t.ch_csr().modify(|_, w| w.erq().enable());
1577 }
1578
1579 /// Disable hardware requests for this channel (ERQ=0).
1580 ///
1581 /// # Safety
1582 ///
1583 /// Disabling requests on an active transfer may leave the transfer incomplete.
1584 pub unsafe fn disable_request(&self) {
1585 let t = self.tcd();
1586 t.ch_csr().modify(|_, w| w.erq().disable());
1587 }
1588
1589 /// Return true if the channel's DONE flag is set.
1590 pub fn is_done(&self) -> bool {
1591 let t = self.tcd();
1592 t.ch_csr().read().done().bit_is_set()
1593 }
1594
1595 /// Clear the DONE flag for this channel.
1596 ///
1597 /// Uses modify to preserve other bits (especially ERQ) unlike write
1598 /// which would clear ERQ and halt an active transfer.
1599 ///
1600 /// # Safety
1601 ///
1602 /// Clearing DONE while a transfer is in progress may cause undefined behavior.
1603 pub unsafe fn clear_done(&self) {
1604 let t = self.tcd();
1605 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1606 }
1607
1608 /// Clear the channel interrupt flag (CH_INT.INT).
1609 ///
1610 /// # Safety
1611 ///
1612 /// Must be called from the correct interrupt context or with interrupts disabled.
1613 pub unsafe fn clear_interrupt(&self) {
1614 let t = self.tcd();
1615 t.ch_int().write(|w| w.int().clear_bit_by_one());
1616 }
1617
1618 /// Trigger a software start for this channel.
1619 ///
1620 /// # Safety
1621 ///
1622 /// The channel must be properly configured with a valid TCD before triggering.
1623 pub unsafe fn trigger_start(&self) {
1624 let t = self.tcd();
1625 t.tcd_csr().modify(|_, w| w.start().channel_started());
1626 }
1627
1628 /// Get the waker for this channel
1629 pub fn waker(&self) -> &'static AtomicWaker {
1630 &STATES[C::INDEX].waker
1631 }
1632
1633 /// Enable the interrupt for this channel in the NVIC.
1634 pub fn enable_interrupt(&self) {
1635 unsafe {
1636 cortex_m::peripheral::NVIC::unmask(C::INTERRUPT);
1637 }
1638 }
1639
1640 /// Enable Major Loop Linking.
1641 ///
1642 /// When the major loop completes, the hardware will trigger a service request
1643 /// on `link_ch`.
1644 ///
1645 /// # Arguments
1646 ///
1647 /// * `link_ch` - Target channel index (0-7) to link to
1648 ///
1649 /// # Safety
1650 ///
1651 /// The channel must be properly configured before setting up linking.
1652 pub unsafe fn set_major_link(&self, link_ch: usize) {
1653 let t = self.tcd();
1654 t.tcd_csr()
1655 .modify(|_, w| w.majorelink().enable().majorlinkch().bits(link_ch as u8));
1656 }
1657
1658 /// Disable Major Loop Linking.
1659 ///
1660 /// Removes any major loop channel linking previously configured.
1661 ///
1662 /// # Safety
1663 ///
1664 /// The caller must ensure this doesn't disrupt an active transfer that
1665 /// depends on the linking.
1666 pub unsafe fn clear_major_link(&self) {
1667 let t = self.tcd();
1668 t.tcd_csr().modify(|_, w| w.majorelink().disable());
1669 }
1670
1671 /// Enable Minor Loop Linking.
1672 ///
1673 /// After each minor loop, the hardware will trigger a service request
1674 /// on `link_ch`.
1675 ///
1676 /// # Arguments
1677 ///
1678 /// * `link_ch` - Target channel index (0-7) to link to
1679 ///
1680 /// # Note
1681 ///
1682 /// This rewrites CITER and BITER registers to the ELINKYES format.
1683 /// It preserves the current loop count.
1684 ///
1685 /// # Safety
1686 ///
1687 /// The channel must be properly configured before setting up linking.
1688 pub unsafe fn set_minor_link(&self, link_ch: usize) {
1689 let t = self.tcd();
1690
1691 // Read current CITER (assuming ELINKNO format initially)
1692 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1693 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1694
1695 // Write back using ELINKYES format
1696 t.tcd_citer_elinkyes().write(|w| {
1697 w.citer()
1698 .bits(current_citer)
1699 .elink()
1700 .enable()
1701 .linkch()
1702 .bits(link_ch as u8)
1703 });
1704
1705 t.tcd_biter_elinkyes().write(|w| {
1706 w.biter()
1707 .bits(current_biter)
1708 .elink()
1709 .enable()
1710 .linkch()
1711 .bits(link_ch as u8)
1712 });
1713 }
1714
1715 /// Disable Minor Loop Linking.
1716 ///
1717 /// Removes any minor loop channel linking previously configured.
1718 /// This rewrites CITER and BITER registers to the ELINKNO format,
1719 /// preserving the current loop count.
1720 ///
1721 /// # Safety
1722 ///
1723 /// The caller must ensure this doesn't disrupt an active transfer that
1724 /// depends on the linking.
1725 pub unsafe fn clear_minor_link(&self) {
1726 let t = self.tcd();
1727
1728 // Read current CITER (could be in either format, but we only need the count)
1729 // Note: In ELINKYES format, citer is 9 bits; in ELINKNO, it's 15 bits.
1730 // We read from ELINKNO which will give us the combined value.
1731 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1732 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1733
1734 // Write back using ELINKNO format (disabling link)
1735 t.tcd_citer_elinkno()
1736 .write(|w| w.citer().bits(current_citer).elink().disable());
1737
1738 t.tcd_biter_elinkno()
1739 .write(|w| w.biter().bits(current_biter).elink().disable());
1740 }
1741
1742 /// Load a TCD from memory into the hardware channel registers.
1743 ///
1744 /// This is useful for scatter/gather and ping-pong transfers where
1745 /// TCDs are prepared in RAM and then loaded into the hardware.
1746 ///
1747 /// # Safety
1748 ///
1749 /// - The TCD must be properly initialized.
1750 /// - The caller must ensure no concurrent access to the same channel.
1751 pub unsafe fn load_tcd(&self, tcd: &Tcd) {
1752 let t = self.tcd();
1753 t.tcd_saddr().write(|w| w.saddr().bits(tcd.saddr));
1754 t.tcd_soff().write(|w| w.soff().bits(tcd.soff as u16));
1755 t.tcd_attr().write(|w| w.bits(tcd.attr));
1756 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(tcd.nbytes));
1757 t.tcd_slast_sda().write(|w| w.slast_sda().bits(tcd.slast as u32));
1758 t.tcd_daddr().write(|w| w.daddr().bits(tcd.daddr));
1759 t.tcd_doff().write(|w| w.doff().bits(tcd.doff as u16));
1760 t.tcd_citer_elinkno().write(|w| w.citer().bits(tcd.citer));
1761 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(tcd.dlast_sga as u32));
1762 t.tcd_csr().write(|w| w.bits(tcd.csr));
1763 t.tcd_biter_elinkno().write(|w| w.biter().bits(tcd.biter));
1764 }
1765}
1766
1767/// In-memory representation of a Transfer Control Descriptor (TCD).
1768///
1769/// This matches the hardware layout (32 bytes).
1770#[repr(C, align(32))]
1771#[derive(Clone, Copy, Debug, Default)]
1772pub struct Tcd {
1773 pub saddr: u32,
1774 pub soff: i16,
1775 pub attr: u16,
1776 pub nbytes: u32,
1777 pub slast: i32,
1778 pub daddr: u32,
1779 pub doff: i16,
1780 pub citer: u16,
1781 pub dlast_sga: i32,
1782 pub csr: u16,
1783 pub biter: u16,
1784}
1785
1786struct State {
1787 /// Waker for transfer complete interrupt
1788 waker: AtomicWaker,
1789 /// Waker for half-transfer interrupt
1790 half_waker: AtomicWaker,
1791}
1792
1793impl State {
1794 const fn new() -> Self {
1795 Self {
1796 waker: AtomicWaker::new(),
1797 half_waker: AtomicWaker::new(),
1798 }
1799 }
1800}
1801
1802static STATES: [State; 8] = [
1803 State::new(),
1804 State::new(),
1805 State::new(),
1806 State::new(),
1807 State::new(),
1808 State::new(),
1809 State::new(),
1810 State::new(),
1811];
1812
1813pub(crate) fn waker(idx: usize) -> &'static AtomicWaker {
1814 &STATES[idx].waker
1815}
1816
1817pub(crate) fn half_waker(idx: usize) -> &'static AtomicWaker {
1818 &STATES[idx].half_waker
1819}
1820
1821// ============================================================================
1822// Async Transfer Future
1823// ============================================================================
1824
1825/// An in-progress DMA transfer.
1826///
1827/// This type implements `Future` and can be `.await`ed to wait for the
1828/// transfer to complete. Dropping the transfer will abort it.
1829#[must_use = "futures do nothing unless you `.await` or poll them"]
1830pub struct Transfer<'a> {
1831 channel: AnyChannel,
1832 _phantom: core::marker::PhantomData<&'a ()>,
1833}
1834
1835impl<'a> Transfer<'a> {
1836 /// Create a new transfer for the given channel.
1837 ///
1838 /// The caller must have already configured and started the DMA channel.
1839 pub(crate) fn new(channel: AnyChannel) -> Self {
1840 Self {
1841 channel,
1842 _phantom: core::marker::PhantomData,
1843 }
1844 }
1845
1846 /// Check if the transfer is still running.
1847 pub fn is_running(&self) -> bool {
1848 !self.channel.is_done()
1849 }
1850
1851 /// Get the remaining transfer count.
1852 pub fn remaining(&self) -> u16 {
1853 let t = self.channel.tcd();
1854 t.tcd_citer_elinkno().read().citer().bits()
1855 }
1856
1857 /// Block until the transfer completes.
1858 pub fn blocking_wait(self) {
1859 while self.is_running() {
1860 core::hint::spin_loop();
1861 }
1862
1863 // Ensure all DMA writes are visible
1864 fence(Ordering::SeqCst);
1865
1866 // Don't run drop (which would abort)
1867 core::mem::forget(self);
1868 }
1869
1870 /// Wait for the half-transfer interrupt asynchronously.
1871 ///
1872 /// This is useful for double-buffering scenarios where you want to process
1873 /// the first half of the buffer while the second half is being filled.
1874 ///
1875 /// Returns `true` if the half-transfer occurred, `false` if the transfer
1876 /// completed before the half-transfer interrupt.
1877 ///
1878 /// # Note
1879 ///
1880 /// The transfer must be configured with `TransferOptions::half_transfer_interrupt = true`
1881 /// for this method to work correctly.
1882 pub async fn wait_half(&mut self) -> bool {
1883 use core::future::poll_fn;
1884
1885 poll_fn(|cx| {
1886 let state = &STATES[self.channel.index];
1887
1888 // Register the half-transfer waker
1889 state.half_waker.register(cx.waker());
1890
1891 // Check if we're past the half-way point
1892 let t = self.channel.tcd();
1893 let biter = t.tcd_biter_elinkno().read().biter().bits();
1894 let citer = t.tcd_citer_elinkno().read().citer().bits();
1895 let half_point = biter / 2;
1896
1897 if self.channel.is_done() {
1898 // Transfer completed before half-transfer
1899 Poll::Ready(false)
1900 } else if citer <= half_point {
1901 // We're past the half-way point
1902 fence(Ordering::SeqCst);
1903 Poll::Ready(true)
1904 } else {
1905 Poll::Pending
1906 }
1907 })
1908 .await
1909 }
1910
1911 /// Abort the transfer.
1912 fn abort(&mut self) {
1913 let t = self.channel.tcd();
1914
1915 // Disable channel requests
1916 t.ch_csr().modify(|_, w| w.erq().disable());
1917
1918 // Clear any pending interrupt
1919 t.ch_int().write(|w| w.int().clear_bit_by_one());
1920
1921 // Clear DONE flag
1922 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1923
1924 fence(Ordering::SeqCst);
1925 }
1926}
1927
1928impl<'a> Unpin for Transfer<'a> {}
1929
1930impl<'a> Future for Transfer<'a> {
1931 type Output = ();
1932
1933 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1934 let state = &STATES[self.channel.index];
1935
1936 // Register waker first
1937 state.waker.register(cx.waker());
1938
1939 let done = self.channel.is_done();
1940
1941 if done {
1942 // Ensure all DMA writes are visible before returning
1943 fence(Ordering::SeqCst);
1944 Poll::Ready(())
1945 } else {
1946 Poll::Pending
1947 }
1948 }
1949}
1950
1951impl<'a> Drop for Transfer<'a> {
1952 fn drop(&mut self) {
1953 // Only abort if the transfer is still running
1954 // If already complete, no need to abort
1955 if self.is_running() {
1956 self.abort();
1957
1958 // Wait for abort to complete
1959 while self.is_running() {
1960 core::hint::spin_loop();
1961 }
1962 }
1963
1964 fence(Ordering::SeqCst);
1965 }
1966}
1967
1968// ============================================================================
1969// Ring Buffer for Circular DMA
1970// ============================================================================
1971
1972/// A ring buffer for continuous DMA reception.
1973///
1974/// This structure manages a circular DMA transfer, allowing continuous
1975/// reception of data without losing bytes between reads. It uses both
1976/// half-transfer and complete-transfer interrupts to track available data.
1977///
1978/// # Example
1979///
1980/// ```no_run
1981/// use embassy_mcxa::dma::{DmaChannel, RingBuffer, TransferOptions};
1982///
1983/// static mut RX_BUF: [u8; 64] = [0; 64];
1984///
1985/// let dma_ch = DmaChannel::new(p.DMA_CH0);
1986/// let ring_buf = unsafe {
1987/// dma_ch.setup_circular_read(
1988/// uart_rx_addr,
1989/// &mut RX_BUF,
1990/// )
1991/// };
1992///
1993/// // Read data as it arrives
1994/// let mut buf = [0u8; 16];
1995/// let n = ring_buf.read(&mut buf).await?;
1996/// ```
1997pub struct RingBuffer<'a, W: Word> {
1998 channel: AnyChannel,
1999 /// Buffer pointer. We use NonNull instead of &mut because DMA acts like
2000 /// a separate thread writing to this buffer, and &mut claims exclusive
2001 /// access which the compiler could optimize incorrectly.
2002 buf: NonNull<[W]>,
2003 /// Buffer length cached for convenience
2004 buf_len: usize,
2005 /// Read position in the buffer (consumer side)
2006 read_pos: AtomicUsize,
2007 /// Phantom data to tie the lifetime to the original buffer
2008 _lt: PhantomData<&'a mut [W]>,
2009}
2010
2011impl<'a, W: Word> RingBuffer<'a, W> {
2012 /// Create a new ring buffer for the given channel and buffer.
2013 ///
2014 /// # Safety
2015 ///
2016 /// The caller must ensure:
2017 /// - The DMA channel has been configured for circular transfer
2018 /// - The buffer remains valid for the lifetime of the ring buffer
2019 /// - Only one RingBuffer exists per DMA channel at a time
2020 pub(crate) unsafe fn new(channel: AnyChannel, buf: &'a mut [W]) -> Self {
2021 let buf_len = buf.len();
2022 Self {
2023 channel,
2024 buf: NonNull::from(buf),
2025 buf_len,
2026 read_pos: AtomicUsize::new(0),
2027 _lt: PhantomData,
2028 }
2029 }
2030
2031 /// Get a slice reference to the buffer.
2032 ///
2033 /// # Safety
2034 ///
2035 /// The caller must ensure that DMA is not actively writing to the
2036 /// portion of the buffer being accessed, or that the access is
2037 /// appropriately synchronized.
2038 #[inline]
2039 unsafe fn buf_slice(&self) -> &[W] {
2040 self.buf.as_ref()
2041 }
2042
2043 /// Get the current DMA write position in the buffer.
2044 ///
2045 /// This reads the current destination address from the DMA controller
2046 /// and calculates the buffer offset.
2047 fn dma_write_pos(&self) -> usize {
2048 let t = self.channel.tcd();
2049 let daddr = t.tcd_daddr().read().daddr().bits() as usize;
2050 let buf_start = self.buf.as_ptr() as *const W as usize;
2051
2052 // Calculate offset from buffer start
2053 let offset = daddr.wrapping_sub(buf_start) / core::mem::size_of::<W>();
2054
2055 // Ensure we're within bounds (DMA wraps around)
2056 offset % self.buf_len
2057 }
2058
2059 /// Returns the number of bytes available to read.
2060 pub fn available(&self) -> usize {
2061 let write_pos = self.dma_write_pos();
2062 let read_pos = self.read_pos.load(Ordering::Acquire);
2063
2064 if write_pos >= read_pos {
2065 write_pos - read_pos
2066 } else {
2067 self.buf_len - read_pos + write_pos
2068 }
2069 }
2070
2071 /// Check if the buffer has overrun (data was lost).
2072 ///
2073 /// This happens when DMA writes faster than the application reads.
2074 pub fn is_overrun(&self) -> bool {
2075 // In a true overrun, the DMA would have wrapped around and caught up
2076 // to our read position. We can detect this by checking if available()
2077 // equals the full buffer size (minus 1 to distinguish from empty).
2078 self.available() >= self.buf_len - 1
2079 }
2080
2081 /// Read data from the ring buffer into the provided slice.
2082 ///
2083 /// Returns the number of elements read, which may be less than
2084 /// `dst.len()` if not enough data is available.
2085 ///
2086 /// This method does not block; use `read_async()` for async waiting.
2087 pub fn read_immediate(&self, dst: &mut [W]) -> usize {
2088 let write_pos = self.dma_write_pos();
2089 let read_pos = self.read_pos.load(Ordering::Acquire);
2090
2091 // Calculate available bytes
2092 let available = if write_pos >= read_pos {
2093 write_pos - read_pos
2094 } else {
2095 self.buf_len - read_pos + write_pos
2096 };
2097
2098 let to_read = dst.len().min(available);
2099 if to_read == 0 {
2100 return 0;
2101 }
2102
2103 // Safety: We only read from portions of the buffer that DMA has
2104 // already written to (between read_pos and write_pos).
2105 let buf = unsafe { self.buf_slice() };
2106
2107 // Read data, handling wrap-around
2108 let first_chunk = (self.buf_len - read_pos).min(to_read);
2109 dst[..first_chunk].copy_from_slice(&buf[read_pos..read_pos + first_chunk]);
2110
2111 if to_read > first_chunk {
2112 let second_chunk = to_read - first_chunk;
2113 dst[first_chunk..to_read].copy_from_slice(&buf[..second_chunk]);
2114 }
2115
2116 // Update read position
2117 let new_read_pos = (read_pos + to_read) % self.buf_len;
2118 self.read_pos.store(new_read_pos, Ordering::Release);
2119
2120 to_read
2121 }
2122
2123 /// Read data from the ring buffer asynchronously.
2124 ///
2125 /// This waits until at least one byte is available, then reads as much
2126 /// as possible into the destination buffer.
2127 ///
2128 /// Returns the number of elements read.
2129 pub async fn read(&self, dst: &mut [W]) -> Result<usize, Error> {
2130 use core::future::poll_fn;
2131
2132 if dst.is_empty() {
2133 return Ok(0);
2134 }
2135
2136 poll_fn(|cx| {
2137 // Check for overrun
2138 if self.is_overrun() {
2139 return Poll::Ready(Err(Error::Overrun));
2140 }
2141
2142 // Try to read immediately
2143 let n = self.read_immediate(dst);
2144 if n > 0 {
2145 return Poll::Ready(Ok(n));
2146 }
2147
2148 // Register wakers for both half and complete interrupts
2149 let state = &STATES[self.channel.index()];
2150 state.waker.register(cx.waker());
2151 state.half_waker.register(cx.waker());
2152
2153 // Check again after registering waker (avoid race)
2154 let n = self.read_immediate(dst);
2155 if n > 0 {
2156 return Poll::Ready(Ok(n));
2157 }
2158
2159 Poll::Pending
2160 })
2161 .await
2162 }
2163
2164 /// Clear the ring buffer, discarding all unread data.
2165 pub fn clear(&self) {
2166 let write_pos = self.dma_write_pos();
2167 self.read_pos.store(write_pos, Ordering::Release);
2168 }
2169
2170 /// Stop the DMA transfer and consume the ring buffer.
2171 ///
2172 /// Returns any remaining unread data count.
2173 pub fn stop(self) -> usize {
2174 let available = self.available();
2175
2176 // Disable the channel
2177 let t = self.channel.tcd();
2178 t.ch_csr().modify(|_, w| w.erq().disable());
2179
2180 // Clear flags
2181 t.ch_int().write(|w| w.int().clear_bit_by_one());
2182 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
2183
2184 fence(Ordering::SeqCst);
2185
2186 available
2187 }
2188}
2189
2190impl<C: Channel> DmaChannel<C> {
2191 /// Set up a circular DMA transfer for continuous peripheral-to-memory reception.
2192 ///
2193 /// This configures the DMA channel for circular operation with both half-transfer
2194 /// and complete-transfer interrupts enabled. The transfer runs continuously until
2195 /// stopped via [`RingBuffer::stop()`].
2196 ///
2197 /// # Arguments
2198 ///
2199 /// * `peri_addr` - Peripheral register address to read from
2200 /// * `buf` - Destination buffer (should be power-of-2 size for best efficiency)
2201 ///
2202 /// # Returns
2203 ///
2204 /// A [`RingBuffer`] that can be used to read received data.
2205 ///
2206 /// # Safety
2207 ///
2208 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
2209 /// - The peripheral address must be valid for reads.
2210 /// - The peripheral's DMA request must be configured to trigger this channel.
2211 pub unsafe fn setup_circular_read<'a, W: Word>(&self, peri_addr: *const W, buf: &'a mut [W]) -> RingBuffer<'a, W> {
2212 assert!(!buf.is_empty());
2213 assert!(buf.len() <= 0x7fff);
2214 // For circular mode, buffer size should ideally be power of 2
2215 // but we don't enforce it
2216
2217 let size = W::size();
2218 let byte_size = size.bytes();
2219
2220 let t = self.tcd();
2221
2222 // Reset channel state
2223 t.ch_csr().write(|w| {
2224 w.erq()
2225 .disable()
2226 .earq()
2227 .disable()
2228 .eei()
2229 .no_error()
2230 .ebw()
2231 .disable()
2232 .done()
2233 .clear_bit_by_one()
2234 });
2235 t.ch_es().write(|w| w.bits(0));
2236 t.ch_int().write(|w| w.int().clear_bit_by_one());
2237
2238 // Source: peripheral register, fixed
2239 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
2240 t.tcd_soff().write(|w| w.soff().bits(0)); // No increment
2241
2242 // Destination: memory buffer, incrementing
2243 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
2244 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
2245
2246 // Transfer attributes
2247 let hw_size = size.to_hw_size();
2248 t.tcd_attr().write(|w| {
2249 w.ssize()
2250 .bits(hw_size)
2251 .dsize()
2252 .bits(hw_size)
2253 .smod()
2254 .disable()
2255 .dmod()
2256 .bits(0)
2257 });
2258
2259 // Minor loop: transfer one word per request
2260 t.tcd_nbytes_mloffno().write(|w| {
2261 w.nbytes()
2262 .bits(byte_size as u32)
2263 .dmloe()
2264 .offset_not_applied()
2265 .smloe()
2266 .offset_not_applied()
2267 });
2268
2269 // Major loop count = buffer size
2270 let count = buf.len() as u16;
2271 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
2272 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
2273
2274 // After major loop: reset destination to buffer start (circular)
2275 let buf_bytes = (buf.len() * byte_size) as i32;
2276 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); // Source doesn't change
2277 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits((-buf_bytes) as u32));
2278
2279 // Control/status: enable both half and complete interrupts, NO DREQ (continuous)
2280 t.tcd_csr().write(|w| {
2281 w.intmajor()
2282 .enable()
2283 .inthalf()
2284 .enable()
2285 .dreq()
2286 .channel_not_affected() // Don't clear ERQ on complete (circular)
2287 .esg()
2288 .normal_format()
2289 .majorelink()
2290 .disable()
2291 .eeop()
2292 .disable()
2293 .esda()
2294 .disable()
2295 .bwc()
2296 .no_stall()
2297 });
2298
2299 cortex_m::asm::dsb();
2300
2301 // Enable the channel request
2302 t.ch_csr().modify(|_, w| w.erq().enable());
2303
2304 // Enable NVIC interrupt for this channel so async wakeups work
2305 self.enable_interrupt();
2306
2307 RingBuffer::new(self.as_any(), buf)
2308 }
2309}
2310
2311// ============================================================================
2312// Scatter-Gather Builder
2313// ============================================================================
2314
2315/// Maximum number of TCDs in a scatter-gather chain.
2316pub const MAX_SCATTER_GATHER_TCDS: usize = 16;
2317
2318/// A builder for constructing scatter-gather DMA transfer chains.
2319///
2320/// This provides a type-safe way to build TCD chains for scatter-gather
2321/// transfers without manual TCD manipulation.
2322///
2323/// # Example
2324///
2325/// ```no_run
2326/// use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
2327///
2328/// let mut builder = ScatterGatherBuilder::<u32>::new();
2329///
2330/// // Add transfer segments
2331/// builder.add_transfer(&src1, &mut dst1);
2332/// builder.add_transfer(&src2, &mut dst2);
2333/// builder.add_transfer(&src3, &mut dst3);
2334///
2335/// // Build and execute
2336/// let transfer = unsafe { builder.build(&dma_ch).unwrap() };
2337/// transfer.await;
2338/// ```
2339pub struct ScatterGatherBuilder<W: Word> {
2340 /// TCD pool (must be 32-byte aligned)
2341 tcds: [Tcd; MAX_SCATTER_GATHER_TCDS],
2342 /// Number of TCDs configured
2343 count: usize,
2344 /// Phantom marker for word type
2345 _phantom: core::marker::PhantomData<W>,
2346}
2347
2348impl<W: Word> ScatterGatherBuilder<W> {
2349 /// Create a new scatter-gather builder.
2350 pub fn new() -> Self {
2351 Self {
2352 tcds: [Tcd::default(); MAX_SCATTER_GATHER_TCDS],
2353 count: 0,
2354 _phantom: core::marker::PhantomData,
2355 }
2356 }
2357
2358 /// Add a memory-to-memory transfer segment to the chain.
2359 ///
2360 /// # Arguments
2361 ///
2362 /// * `src` - Source buffer for this segment
2363 /// * `dst` - Destination buffer for this segment
2364 ///
2365 /// # Panics
2366 ///
2367 /// Panics if the maximum number of segments (16) is exceeded.
2368 pub fn add_transfer(&mut self, src: &[W], dst: &mut [W]) -> &mut Self {
2369 assert!(self.count < MAX_SCATTER_GATHER_TCDS, "Too many scatter-gather segments");
2370 assert!(!src.is_empty());
2371 assert!(dst.len() >= src.len());
2372
2373 let size = W::size();
2374 let byte_size = size.bytes();
2375 let hw_size = size.to_hw_size();
2376 let nbytes = (src.len() * byte_size) as u32;
2377
2378 // Build the TCD for this segment
2379 self.tcds[self.count] = Tcd {
2380 saddr: src.as_ptr() as u32,
2381 soff: byte_size as i16,
2382 attr: ((hw_size as u16) << 8) | (hw_size as u16), // SSIZE | DSIZE
2383 nbytes,
2384 slast: 0,
2385 daddr: dst.as_mut_ptr() as u32,
2386 doff: byte_size as i16,
2387 citer: 1,
2388 dlast_sga: 0, // Will be filled in by build()
2389 csr: 0x0002, // INTMAJOR only (ESG will be set for non-last TCDs)
2390 biter: 1,
2391 };
2392
2393 self.count += 1;
2394 self
2395 }
2396
2397 /// Get the number of transfer segments added.
2398 pub fn segment_count(&self) -> usize {
2399 self.count
2400 }
2401
2402 /// Build the scatter-gather chain and start the transfer.
2403 ///
2404 /// # Arguments
2405 ///
2406 /// * `channel` - The DMA channel to use for the transfer
2407 ///
2408 /// # Returns
2409 ///
2410 /// A `Transfer` future that completes when the entire chain has executed.
2411 ///
2412 /// # Safety
2413 ///
2414 /// All source and destination buffers passed to `add_transfer()` must
2415 /// remain valid for the duration of the transfer.
2416 pub unsafe fn build<C: Channel>(&mut self, channel: &DmaChannel<C>) -> Result<Transfer<'_>, Error> {
2417 if self.count == 0 {
2418 return Err(Error::Configuration);
2419 }
2420
2421 // Link TCDs together
2422 //
2423 // CSR bit definitions:
2424 // - START = bit 0 = 0x0001 (triggers transfer when set)
2425 // - INTMAJOR = bit 1 = 0x0002 (interrupt on major loop complete)
2426 // - ESG = bit 4 = 0x0010 (enable scatter-gather, loads next TCD on complete)
2427 //
2428 // When hardware loads a TCD via scatter-gather (ESG), it copies the TCD's
2429 // CSR directly into the hardware register. If START is not set in that CSR,
2430 // the hardware will NOT auto-execute the loaded TCD.
2431 //
2432 // Strategy:
2433 // - First TCD: ESG | INTMAJOR (no START - we add it manually after loading)
2434 // - Middle TCDs: ESG | INTMAJOR | START (auto-execute when loaded via S/G)
2435 // - Last TCD: INTMAJOR | START (auto-execute, no further linking)
2436 for i in 0..self.count {
2437 let is_first = i == 0;
2438 let is_last = i == self.count - 1;
2439
2440 if is_first {
2441 if is_last {
2442 // Only one TCD - no ESG, no START (we add START manually)
2443 self.tcds[i].dlast_sga = 0;
2444 self.tcds[i].csr = 0x0002; // INTMAJOR only
2445 } else {
2446 // First of multiple - ESG to link, no START (we add START manually)
2447 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2448 self.tcds[i].csr = 0x0012; // ESG | INTMAJOR
2449 }
2450 } else if is_last {
2451 // Last TCD (not first) - no ESG, but START so it auto-executes
2452 self.tcds[i].dlast_sga = 0;
2453 self.tcds[i].csr = 0x0003; // INTMAJOR | START
2454 } else {
2455 // Middle TCD - ESG to link, and START so it auto-executes
2456 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2457 self.tcds[i].csr = 0x0013; // ESG | INTMAJOR | START
2458 }
2459 }
2460
2461 let t = channel.tcd();
2462
2463 // Reset channel state - clear DONE, disable requests, clear errors
2464 // This ensures the channel is in a clean state before loading the TCD
2465 t.ch_csr().write(|w| {
2466 w.erq()
2467 .disable()
2468 .earq()
2469 .disable()
2470 .eei()
2471 .no_error()
2472 .done()
2473 .clear_bit_by_one()
2474 });
2475 t.ch_es().write(|w| w.err().clear_bit_by_one());
2476 t.ch_int().write(|w| w.int().clear_bit_by_one());
2477
2478 // Memory barrier to ensure channel state is reset before loading TCD
2479 cortex_m::asm::dsb();
2480
2481 // Load first TCD into hardware
2482 channel.load_tcd(&self.tcds[0]);
2483
2484 // Memory barrier before setting START
2485 cortex_m::asm::dsb();
2486
2487 // Start the transfer
2488 t.tcd_csr().modify(|_, w| w.start().channel_started());
2489
2490 Ok(Transfer::new(channel.as_any()))
2491 }
2492
2493 /// Reset the builder for reuse.
2494 pub fn clear(&mut self) {
2495 self.count = 0;
2496 }
2497}
2498
2499impl<W: Word> Default for ScatterGatherBuilder<W> {
2500 fn default() -> Self {
2501 Self::new()
2502 }
2503}
2504
2505/// A completed scatter-gather transfer result.
2506///
2507/// This type is returned after a scatter-gather transfer completes,
2508/// providing access to any error information.
2509#[derive(Debug, Clone, Copy, PartialEq, Eq)]
2510pub struct ScatterGatherResult {
2511 /// Number of segments successfully transferred
2512 pub segments_completed: usize,
2513 /// Error if any occurred
2514 pub error: Option<Error>,
2515}
2516
2517// ============================================================================
2518// Interrupt Handler
2519// ============================================================================
2520
2521/// Interrupt handler helper.
2522///
2523/// Call this from your interrupt handler to clear the interrupt flag and wake the waker.
2524/// This handles both half-transfer and complete-transfer interrupts.
2525///
2526/// # Safety
2527/// Must be called from the correct DMA channel interrupt context.
2528pub unsafe fn on_interrupt(ch_index: usize) {
2529 let p = pac::Peripherals::steal();
2530 let edma = &p.edma_0_tcd0;
2531 let t = edma.tcd(ch_index);
2532
2533 // Read TCD CSR to determine interrupt source
2534 let csr = t.tcd_csr().read();
2535
2536 // Check if this is a half-transfer interrupt
2537 // INTHALF is set and we're at or past the half-way point
2538 if csr.inthalf().bit_is_set() {
2539 let biter = t.tcd_biter_elinkno().read().biter().bits();
2540 let citer = t.tcd_citer_elinkno().read().citer().bits();
2541 let half_point = biter / 2;
2542
2543 if citer <= half_point && citer > 0 {
2544 // Half-transfer interrupt - wake half_waker
2545 half_waker(ch_index).wake();
2546 }
2547 }
2548
2549 // Clear INT flag
2550 t.ch_int().write(|w| w.int().clear_bit_by_one());
2551
2552 // If DONE is set, this is a complete-transfer interrupt
2553 // Only wake the full-transfer waker when the transfer is actually complete
2554 if t.ch_csr().read().done().bit_is_set() {
2555 waker(ch_index).wake();
2556 }
2557}
2558
2559// ============================================================================
2560// Type-level Interrupt Handlers for bind_interrupts! macro
2561// ============================================================================
2562
2563/// Macro to generate DMA channel interrupt handlers.
2564///
2565/// This generates handler structs that implement the `Handler` trait for use
2566/// with the `bind_interrupts!` macro.
2567macro_rules! impl_dma_interrupt_handler {
2568 ($name:ident, $irq:ident, $ch:expr) => {
2569 /// Interrupt handler for DMA channel.
2570 ///
2571 /// Use this with the `bind_interrupts!` macro:
2572 /// ```ignore
2573 /// bind_interrupts!(struct Irqs {
2574 #[doc = concat!(" ", stringify!($irq), " => dma::", stringify!($name), ";")]
2575 /// });
2576 /// ```
2577 pub struct $name;
2578
2579 impl crate::interrupt::typelevel::Handler<crate::interrupt::typelevel::$irq> for $name {
2580 unsafe fn on_interrupt() {
2581 on_interrupt($ch);
2582 }
2583 }
2584 };
2585}
2586
2587impl_dma_interrupt_handler!(DmaCh0InterruptHandler, DMA_CH0, 0);
2588impl_dma_interrupt_handler!(DmaCh1InterruptHandler, DMA_CH1, 1);
2589impl_dma_interrupt_handler!(DmaCh2InterruptHandler, DMA_CH2, 2);
2590impl_dma_interrupt_handler!(DmaCh3InterruptHandler, DMA_CH3, 3);
2591impl_dma_interrupt_handler!(DmaCh4InterruptHandler, DMA_CH4, 4);
2592impl_dma_interrupt_handler!(DmaCh5InterruptHandler, DMA_CH5, 5);
2593impl_dma_interrupt_handler!(DmaCh6InterruptHandler, DMA_CH6, 6);
2594impl_dma_interrupt_handler!(DmaCh7InterruptHandler, DMA_CH7, 7);
diff --git a/embassy-mcxa/src/interrupt.rs b/embassy-mcxa/src/interrupt.rs
index c1f7e55a0..c960af7a2 100644
--- a/embassy-mcxa/src/interrupt.rs
+++ b/embassy-mcxa/src/interrupt.rs
@@ -10,6 +10,14 @@ mod generated {
10 #[rustfmt::skip] 10 #[rustfmt::skip]
11 embassy_hal_internal::interrupt_mod!( 11 embassy_hal_internal::interrupt_mod!(
12 ADC1, 12 ADC1,
13 DMA_CH0,
14 DMA_CH1,
15 DMA_CH2,
16 DMA_CH3,
17 DMA_CH4,
18 DMA_CH5,
19 DMA_CH6,
20 DMA_CH7,
13 GPIO0, 21 GPIO0,
14 GPIO1, 22 GPIO1,
15 GPIO2, 23 GPIO2,
diff --git a/embassy-mcxa/src/lib.rs b/embassy-mcxa/src/lib.rs
index c6d8adc8f..23cda7511 100644
--- a/embassy-mcxa/src/lib.rs
+++ b/embassy-mcxa/src/lib.rs
@@ -6,6 +6,7 @@
6// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)] 6// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
7 7
8pub mod clocks; // still provide clock helpers 8pub mod clocks; // still provide clock helpers
9pub mod dma;
9pub mod gpio; 10pub mod gpio;
10pub mod pins; // pin mux helpers 11pub mod pins; // pin mux helpers
11 12
@@ -51,6 +52,14 @@ embassy_hal_internal::peripherals!(
51 52
52 DBGMAILBOX, 53 DBGMAILBOX,
53 DMA0, 54 DMA0,
55 DMA_CH0,
56 DMA_CH1,
57 DMA_CH2,
58 DMA_CH3,
59 DMA_CH4,
60 DMA_CH5,
61 DMA_CH6,
62 DMA_CH7,
54 EDMA0_TCD0, 63 EDMA0_TCD0,
55 EIM0, 64 EIM0,
56 EQDC0, 65 EQDC0,
@@ -363,6 +372,9 @@ pub fn init(cfg: crate::config::Config) -> Peripherals {
363 crate::gpio::init(); 372 crate::gpio::init();
364 } 373 }
365 374
375 // Initialize DMA controller (clock, reset, configuration)
376 crate::dma::init();
377
366 // Initialize embassy-time global driver backed by OSTIMER0 378 // Initialize embassy-time global driver backed by OSTIMER0
367 #[cfg(feature = "time")] 379 #[cfg(feature = "time")]
368 crate::ostimer::time_driver::init(crate::config::Config::default().time_interrupt_priority, 1_000_000); 380 crate::ostimer::time_driver::init(crate::config::Config::default().time_interrupt_priority, 1_000_000);
diff --git a/embassy-mcxa/src/lpuart/mod.rs b/embassy-mcxa/src/lpuart/mod.rs
index b8a2d5172..6e60164e6 100644
--- a/embassy-mcxa/src/lpuart/mod.rs
+++ b/embassy-mcxa/src/lpuart/mod.rs
@@ -15,22 +15,12 @@ use crate::{AnyPin, interrupt, pac};
15pub mod buffered; 15pub mod buffered;
16 16
17// ============================================================================ 17// ============================================================================
18// STUB IMPLEMENTATION 18// DMA INTEGRATION
19// ============================================================================ 19// ============================================================================
20 20
21// Stub implementation for LIB (Peripherals), GPIO, DMA and CLOCK until stable API 21use crate::dma::{
22// Pin and Clock initialization is currently done at the examples level. 22 Channel as DmaChannelTrait, DmaChannel, DmaRequest, EnableInterrupt, RingBuffer, DMA_MAX_TRANSFER_SIZE,
23 23};
24// --- START DMA ---
25mod dma {
26 pub struct Channel<'d> {
27 pub(super) _lifetime: core::marker::PhantomData<&'d ()>,
28 }
29}
30
31use dma::Channel;
32
33// --- END DMA ---
34 24
35// ============================================================================ 25// ============================================================================
36// MISC 26// MISC
@@ -62,10 +52,14 @@ pub struct Info {
62pub trait Instance: SealedInstance + PeripheralType + 'static + Send + Gate<MrccPeriphConfig = LpuartConfig> { 52pub trait Instance: SealedInstance + PeripheralType + 'static + Send + Gate<MrccPeriphConfig = LpuartConfig> {
63 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance; 53 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance;
64 type Interrupt: interrupt::typelevel::Interrupt; 54 type Interrupt: interrupt::typelevel::Interrupt;
55 /// Type-safe DMA request source for TX
56 type TxDmaRequest: DmaRequest;
57 /// Type-safe DMA request source for RX
58 type RxDmaRequest: DmaRequest;
65} 59}
66 60
67macro_rules! impl_instance { 61macro_rules! impl_instance {
68 ($($n:expr),*) => { 62 ($($n:expr);* $(;)?) => {
69 $( 63 $(
70 paste!{ 64 paste!{
71 impl SealedInstance for crate::peripherals::[<LPUART $n>] { 65 impl SealedInstance for crate::peripherals::[<LPUART $n>] {
@@ -90,13 +84,23 @@ macro_rules! impl_instance {
90 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance 84 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance
91 = crate::clocks::periph_helpers::LpuartInstance::[<Lpuart $n>]; 85 = crate::clocks::periph_helpers::LpuartInstance::[<Lpuart $n>];
92 type Interrupt = crate::interrupt::typelevel::[<LPUART $n>]; 86 type Interrupt = crate::interrupt::typelevel::[<LPUART $n>];
87 type TxDmaRequest = crate::dma::[<Lpuart $n TxRequest>];
88 type RxDmaRequest = crate::dma::[<Lpuart $n RxRequest>];
93 } 89 }
94 } 90 }
95 )* 91 )*
96 }; 92 };
97} 93}
98 94
99impl_instance!(0, 1, 2, 3, 4, 5); 95// DMA request sources are now type-safe via associated types.
96// The request source numbers are defined in src/dma.rs:
97// LPUART0: RX=21, TX=22 -> Lpuart0RxRequest, Lpuart0TxRequest
98// LPUART1: RX=23, TX=24 -> Lpuart1RxRequest, Lpuart1TxRequest
99// LPUART2: RX=25, TX=26 -> Lpuart2RxRequest, Lpuart2TxRequest
100// LPUART3: RX=27, TX=28 -> Lpuart3RxRequest, Lpuart3TxRequest
101// LPUART4: RX=29, TX=30 -> Lpuart4RxRequest, Lpuart4TxRequest
102// LPUART5: RX=31, TX=32 -> Lpuart5RxRequest, Lpuart5TxRequest
103impl_instance!(0; 1; 2; 3; 4; 5);
100 104
101// ============================================================================ 105// ============================================================================
102// INSTANCE HELPER FUNCTIONS 106// INSTANCE HELPER FUNCTIONS
@@ -683,7 +687,6 @@ pub struct LpuartTx<'a, M: Mode> {
683 info: Info, 687 info: Info,
684 _tx_pin: Peri<'a, AnyPin>, 688 _tx_pin: Peri<'a, AnyPin>,
685 _cts_pin: Option<Peri<'a, AnyPin>>, 689 _cts_pin: Option<Peri<'a, AnyPin>>,
686 _tx_dma: Option<Channel<'a>>,
687 mode: PhantomData<(&'a (), M)>, 690 mode: PhantomData<(&'a (), M)>,
688} 691}
689 692
@@ -692,10 +695,31 @@ pub struct LpuartRx<'a, M: Mode> {
692 info: Info, 695 info: Info,
693 _rx_pin: Peri<'a, AnyPin>, 696 _rx_pin: Peri<'a, AnyPin>,
694 _rts_pin: Option<Peri<'a, AnyPin>>, 697 _rts_pin: Option<Peri<'a, AnyPin>>,
695 _rx_dma: Option<Channel<'a>>,
696 mode: PhantomData<(&'a (), M)>, 698 mode: PhantomData<(&'a (), M)>,
697} 699}
698 700
701/// Lpuart TX driver with DMA support.
702pub struct LpuartTxDma<'a, T: Instance, C: DmaChannelTrait> {
703 info: Info,
704 _tx_pin: Peri<'a, AnyPin>,
705 tx_dma: DmaChannel<C>,
706 _instance: core::marker::PhantomData<T>,
707}
708
709/// Lpuart RX driver with DMA support.
710pub struct LpuartRxDma<'a, T: Instance, C: DmaChannelTrait> {
711 info: Info,
712 _rx_pin: Peri<'a, AnyPin>,
713 rx_dma: DmaChannel<C>,
714 _instance: core::marker::PhantomData<T>,
715}
716
717/// Lpuart driver with DMA support for both TX and RX.
718pub struct LpuartDma<'a, T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> {
719 tx: LpuartTxDma<'a, T, TxC>,
720 rx: LpuartRxDma<'a, T, RxC>,
721}
722
699// ============================================================================ 723// ============================================================================
700// LPUART CORE IMPLEMENTATION 724// LPUART CORE IMPLEMENTATION
701// ============================================================================ 725// ============================================================================
@@ -782,8 +806,8 @@ impl<'a> Lpuart<'a, Blocking> {
782 806
783 Ok(Self { 807 Ok(Self {
784 info: T::info(), 808 info: T::info(),
785 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), None, None), 809 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), None),
786 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), None, None), 810 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), None),
787 }) 811 })
788 } 812 }
789 813
@@ -807,8 +831,8 @@ impl<'a> Lpuart<'a, Blocking> {
807 831
808 Ok(Self { 832 Ok(Self {
809 info: T::info(), 833 info: T::info(),
810 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into()), None), 834 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into())),
811 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into()), None), 835 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into())),
812 }) 836 })
813 } 837 }
814} 838}
@@ -818,17 +842,11 @@ impl<'a> Lpuart<'a, Blocking> {
818// ---------------------------------------------------------------------------- 842// ----------------------------------------------------------------------------
819 843
820impl<'a, M: Mode> LpuartTx<'a, M> { 844impl<'a, M: Mode> LpuartTx<'a, M> {
821 fn new_inner( 845 fn new_inner(info: Info, tx_pin: Peri<'a, AnyPin>, cts_pin: Option<Peri<'a, AnyPin>>) -> Self {
822 info: Info,
823 tx_pin: Peri<'a, AnyPin>,
824 cts_pin: Option<Peri<'a, AnyPin>>,
825 tx_dma: Option<Channel<'a>>,
826 ) -> Self {
827 Self { 846 Self {
828 info, 847 info,
829 _tx_pin: tx_pin, 848 _tx_pin: tx_pin,
830 _cts_pin: cts_pin, 849 _cts_pin: cts_pin,
831 _tx_dma: tx_dma,
832 mode: PhantomData, 850 mode: PhantomData,
833 } 851 }
834 } 852 }
@@ -847,7 +865,7 @@ impl<'a> LpuartTx<'a, Blocking> {
847 // Initialize the peripheral 865 // Initialize the peripheral
848 Lpuart::<Blocking>::init::<T>(true, false, false, false, config)?; 866 Lpuart::<Blocking>::init::<T>(true, false, false, false, config)?;
849 867
850 Ok(Self::new_inner(T::info(), tx_pin.into(), None, None)) 868 Ok(Self::new_inner(T::info(), tx_pin.into(), None))
851 } 869 }
852 870
853 /// Create a new blocking LPUART transmitter instance with CTS flow control 871 /// Create a new blocking LPUART transmitter instance with CTS flow control
@@ -862,7 +880,7 @@ impl<'a> LpuartTx<'a, Blocking> {
862 880
863 Lpuart::<Blocking>::init::<T>(true, false, true, false, config)?; 881 Lpuart::<Blocking>::init::<T>(true, false, true, false, config)?;
864 882
865 Ok(Self::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into()), None)) 883 Ok(Self::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into())))
866 } 884 }
867 885
868 fn write_byte_internal(&mut self, byte: u8) -> Result<()> { 886 fn write_byte_internal(&mut self, byte: u8) -> Result<()> {
@@ -941,17 +959,11 @@ impl<'a> LpuartTx<'a, Blocking> {
941// ---------------------------------------------------------------------------- 959// ----------------------------------------------------------------------------
942 960
943impl<'a, M: Mode> LpuartRx<'a, M> { 961impl<'a, M: Mode> LpuartRx<'a, M> {
944 fn new_inner( 962 fn new_inner(info: Info, rx_pin: Peri<'a, AnyPin>, rts_pin: Option<Peri<'a, AnyPin>>) -> Self {
945 info: Info,
946 rx_pin: Peri<'a, AnyPin>,
947 rts_pin: Option<Peri<'a, AnyPin>>,
948 rx_dma: Option<Channel<'a>>,
949 ) -> Self {
950 Self { 963 Self {
951 info, 964 info,
952 _rx_pin: rx_pin, 965 _rx_pin: rx_pin,
953 _rts_pin: rts_pin, 966 _rts_pin: rts_pin,
954 _rx_dma: rx_dma,
955 mode: PhantomData, 967 mode: PhantomData,
956 } 968 }
957 } 969 }
@@ -968,7 +980,7 @@ impl<'a> LpuartRx<'a, Blocking> {
968 980
969 Lpuart::<Blocking>::init::<T>(false, true, false, false, config)?; 981 Lpuart::<Blocking>::init::<T>(false, true, false, false, config)?;
970 982
971 Ok(Self::new_inner(T::info(), rx_pin.into(), None, None)) 983 Ok(Self::new_inner(T::info(), rx_pin.into(), None))
972 } 984 }
973 985
974 /// Create a new blocking LPUART Receiver instance with RTS flow control 986 /// Create a new blocking LPUART Receiver instance with RTS flow control
@@ -983,7 +995,7 @@ impl<'a> LpuartRx<'a, Blocking> {
983 995
984 Lpuart::<Blocking>::init::<T>(false, true, false, true, config)?; 996 Lpuart::<Blocking>::init::<T>(false, true, false, true, config)?;
985 997
986 Ok(Self::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into()), None)) 998 Ok(Self::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into())))
987 } 999 }
988 1000
989 fn read_byte_internal(&mut self) -> Result<u8> { 1001 fn read_byte_internal(&mut self) -> Result<u8> {
@@ -1078,10 +1090,439 @@ impl<'a> Lpuart<'a, Blocking> {
1078} 1090}
1079 1091
1080// ============================================================================ 1092// ============================================================================
1081// ASYNC MODE IMPLEMENTATIONS 1093// ASYNC MODE IMPLEMENTATIONS (DMA-based)
1094// ============================================================================
1095
1096/// Guard struct that ensures DMA is stopped if the async future is cancelled.
1097///
1098/// This implements the RAII pattern: if the future is dropped before completion
1099/// (e.g., due to a timeout), the DMA transfer is automatically aborted to prevent
1100/// use-after-free when the buffer goes out of scope.
1101struct TxDmaGuard<'a, C: DmaChannelTrait> {
1102 dma: &'a DmaChannel<C>,
1103 regs: Regs,
1104}
1105
1106impl<'a, C: DmaChannelTrait> TxDmaGuard<'a, C> {
1107 fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self {
1108 Self { dma, regs }
1109 }
1110
1111 /// Complete the transfer normally (don't abort on drop).
1112 fn complete(self) {
1113 // Cleanup
1114 self.regs.baud().modify(|_, w| w.tdmae().disabled());
1115 unsafe {
1116 self.dma.disable_request();
1117 self.dma.clear_done();
1118 }
1119 // Don't run drop since we've cleaned up
1120 core::mem::forget(self);
1121 }
1122}
1123
1124impl<C: DmaChannelTrait> Drop for TxDmaGuard<'_, C> {
1125 fn drop(&mut self) {
1126 // Abort the DMA transfer if still running
1127 unsafe {
1128 self.dma.disable_request();
1129 self.dma.clear_done();
1130 self.dma.clear_interrupt();
1131 }
1132 // Disable UART TX DMA request
1133 self.regs.baud().modify(|_, w| w.tdmae().disabled());
1134 }
1135}
1136
1137/// Guard struct for RX DMA transfers.
1138struct RxDmaGuard<'a, C: DmaChannelTrait> {
1139 dma: &'a DmaChannel<C>,
1140 regs: Regs,
1141}
1142
1143impl<'a, C: DmaChannelTrait> RxDmaGuard<'a, C> {
1144 fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self {
1145 Self { dma, regs }
1146 }
1147
1148 /// Complete the transfer normally (don't abort on drop).
1149 fn complete(self) {
1150 // Ensure DMA writes are visible to CPU
1151 cortex_m::asm::dsb();
1152 // Cleanup
1153 self.regs.baud().modify(|_, w| w.rdmae().disabled());
1154 unsafe {
1155 self.dma.disable_request();
1156 self.dma.clear_done();
1157 }
1158 // Don't run drop since we've cleaned up
1159 core::mem::forget(self);
1160 }
1161}
1162
1163impl<C: DmaChannelTrait> Drop for RxDmaGuard<'_, C> {
1164 fn drop(&mut self) {
1165 // Abort the DMA transfer if still running
1166 unsafe {
1167 self.dma.disable_request();
1168 self.dma.clear_done();
1169 self.dma.clear_interrupt();
1170 }
1171 // Disable UART RX DMA request
1172 self.regs.baud().modify(|_, w| w.rdmae().disabled());
1173 }
1174}
1175
1176impl<'a, T: Instance, C: DmaChannelTrait> LpuartTxDma<'a, T, C> {
1177 /// Create a new LPUART TX driver with DMA support.
1178 pub fn new(
1179 _inner: Peri<'a, T>,
1180 tx_pin: Peri<'a, impl TxPin<T>>,
1181 tx_dma_ch: Peri<'a, C>,
1182 config: Config,
1183 ) -> Result<Self> {
1184 tx_pin.as_tx();
1185 let tx_pin: Peri<'a, AnyPin> = tx_pin.into();
1186
1187 // Initialize LPUART with TX enabled, RX disabled, no flow control
1188 Lpuart::<Async>::init::<T>(true, false, false, false, config)?;
1189
1190 Ok(Self {
1191 info: T::info(),
1192 _tx_pin: tx_pin,
1193 tx_dma: DmaChannel::new(tx_dma_ch),
1194 _instance: core::marker::PhantomData,
1195 })
1196 }
1197
1198 /// Write data using DMA.
1199 ///
1200 /// This configures the DMA channel for a memory-to-peripheral transfer
1201 /// and waits for completion asynchronously. Large buffers are automatically
1202 /// split into chunks that fit within the DMA transfer limit.
1203 ///
1204 /// The DMA request source is automatically derived from the LPUART instance type.
1205 ///
1206 /// # Safety
1207 ///
1208 /// If the returned future is dropped before completion (e.g., due to a timeout),
1209 /// the DMA transfer is automatically aborted to prevent use-after-free.
1210 ///
1211 /// # Arguments
1212 /// * `buf` - Data buffer to transmit
1213 pub async fn write_dma(&mut self, buf: &[u8]) -> Result<usize> {
1214 if buf.is_empty() {
1215 return Ok(0);
1216 }
1217
1218 let mut total = 0;
1219 for chunk in buf.chunks(DMA_MAX_TRANSFER_SIZE) {
1220 total += self.write_dma_inner(chunk).await?;
1221 }
1222
1223 Ok(total)
1224 }
1225
1226 /// Internal helper to write a single chunk (max 0x7FFF bytes) using DMA.
1227 async fn write_dma_inner(&mut self, buf: &[u8]) -> Result<usize> {
1228 let len = buf.len();
1229 let peri_addr = self.info.regs.data().as_ptr() as *mut u8;
1230
1231 unsafe {
1232 // Clean up channel state
1233 self.tx_dma.disable_request();
1234 self.tx_dma.clear_done();
1235 self.tx_dma.clear_interrupt();
1236
1237 // Set DMA request source from instance type (type-safe)
1238 self.tx_dma.set_request_source::<T::TxDmaRequest>();
1239
1240 // Configure TCD for memory-to-peripheral transfer
1241 self.tx_dma
1242 .setup_write_to_peripheral(buf, peri_addr, EnableInterrupt::Yes);
1243
1244 // Enable UART TX DMA request
1245 self.info.regs.baud().modify(|_, w| w.tdmae().enabled());
1246
1247 // Enable DMA channel request
1248 self.tx_dma.enable_request();
1249 }
1250
1251 // Create guard that will abort DMA if this future is dropped
1252 let guard = TxDmaGuard::new(&self.tx_dma, self.info.regs);
1253
1254 // Wait for completion asynchronously
1255 core::future::poll_fn(|cx| {
1256 self.tx_dma.waker().register(cx.waker());
1257 if self.tx_dma.is_done() {
1258 core::task::Poll::Ready(())
1259 } else {
1260 core::task::Poll::Pending
1261 }
1262 })
1263 .await;
1264
1265 // Transfer completed successfully - clean up without aborting
1266 guard.complete();
1267
1268 Ok(len)
1269 }
1270
1271 /// Blocking write (fallback when DMA is not needed)
1272 pub fn blocking_write(&mut self, buf: &[u8]) -> Result<()> {
1273 for &byte in buf {
1274 while self.info.regs.stat().read().tdre().is_txdata() {}
1275 self.info.regs.data().modify(|_, w| unsafe { w.bits(u32::from(byte)) });
1276 }
1277 Ok(())
1278 }
1279
1280 /// Flush TX blocking
1281 pub fn blocking_flush(&mut self) -> Result<()> {
1282 while self.info.regs.water().read().txcount().bits() != 0 {}
1283 while self.info.regs.stat().read().tc().is_active() {}
1284 Ok(())
1285 }
1286}
1287
1288impl<'a, T: Instance, C: DmaChannelTrait> LpuartRxDma<'a, T, C> {
1289 /// Create a new LPUART RX driver with DMA support.
1290 pub fn new(
1291 _inner: Peri<'a, T>,
1292 rx_pin: Peri<'a, impl RxPin<T>>,
1293 rx_dma_ch: Peri<'a, C>,
1294 config: Config,
1295 ) -> Result<Self> {
1296 rx_pin.as_rx();
1297 let rx_pin: Peri<'a, AnyPin> = rx_pin.into();
1298
1299 // Initialize LPUART with TX disabled, RX enabled, no flow control
1300 Lpuart::<Async>::init::<T>(false, true, false, false, config)?;
1301
1302 Ok(Self {
1303 info: T::info(),
1304 _rx_pin: rx_pin,
1305 rx_dma: DmaChannel::new(rx_dma_ch),
1306 _instance: core::marker::PhantomData,
1307 })
1308 }
1309
1310 /// Read data using DMA.
1311 ///
1312 /// This configures the DMA channel for a peripheral-to-memory transfer
1313 /// and waits for completion asynchronously. Large buffers are automatically
1314 /// split into chunks that fit within the DMA transfer limit.
1315 ///
1316 /// The DMA request source is automatically derived from the LPUART instance type.
1317 ///
1318 /// # Safety
1319 ///
1320 /// If the returned future is dropped before completion (e.g., due to a timeout),
1321 /// the DMA transfer is automatically aborted to prevent use-after-free.
1322 ///
1323 /// # Arguments
1324 /// * `buf` - Buffer to receive data into
1325 pub async fn read_dma(&mut self, buf: &mut [u8]) -> Result<usize> {
1326 if buf.is_empty() {
1327 return Ok(0);
1328 }
1329
1330 let mut total = 0;
1331 for chunk in buf.chunks_mut(DMA_MAX_TRANSFER_SIZE) {
1332 total += self.read_dma_inner(chunk).await?;
1333 }
1334
1335 Ok(total)
1336 }
1337
1338 /// Internal helper to read a single chunk (max 0x7FFF bytes) using DMA.
1339 async fn read_dma_inner(&mut self, buf: &mut [u8]) -> Result<usize> {
1340 let len = buf.len();
1341 let peri_addr = self.info.regs.data().as_ptr() as *const u8;
1342
1343 unsafe {
1344 // Clean up channel state
1345 self.rx_dma.disable_request();
1346 self.rx_dma.clear_done();
1347 self.rx_dma.clear_interrupt();
1348
1349 // Set DMA request source from instance type (type-safe)
1350 self.rx_dma.set_request_source::<T::RxDmaRequest>();
1351
1352 // Configure TCD for peripheral-to-memory transfer
1353 self.rx_dma
1354 .setup_read_from_peripheral(peri_addr, buf, EnableInterrupt::Yes);
1355
1356 // Enable UART RX DMA request
1357 self.info.regs.baud().modify(|_, w| w.rdmae().enabled());
1358
1359 // Enable DMA channel request
1360 self.rx_dma.enable_request();
1361 }
1362
1363 // Create guard that will abort DMA if this future is dropped
1364 let guard = RxDmaGuard::new(&self.rx_dma, self.info.regs);
1365
1366 // Wait for completion asynchronously
1367 core::future::poll_fn(|cx| {
1368 self.rx_dma.waker().register(cx.waker());
1369 if self.rx_dma.is_done() {
1370 core::task::Poll::Ready(())
1371 } else {
1372 core::task::Poll::Pending
1373 }
1374 })
1375 .await;
1376
1377 // Transfer completed successfully - clean up without aborting
1378 guard.complete();
1379
1380 Ok(len)
1381 }
1382
1383 /// Blocking read (fallback when DMA is not needed)
1384 pub fn blocking_read(&mut self, buf: &mut [u8]) -> Result<()> {
1385 for byte in buf.iter_mut() {
1386 loop {
1387 if has_data(self.info.regs) {
1388 *byte = (self.info.regs.data().read().bits() & 0xFF) as u8;
1389 break;
1390 }
1391 check_and_clear_rx_errors(self.info.regs)?;
1392 }
1393 }
1394 Ok(())
1395 }
1396
1397 /// Set up a ring buffer for continuous DMA reception.
1398 ///
1399 /// This configures the DMA channel for circular operation, enabling continuous
1400 /// reception of data without gaps. The DMA will continuously write received
1401 /// bytes into the buffer, wrapping around when it reaches the end.
1402 ///
1403 /// This method encapsulates all the low-level setup:
1404 /// - Configures the DMA request source for this LPUART instance
1405 /// - Enables the RX DMA request in the LPUART peripheral
1406 /// - Sets up the circular DMA transfer
1407 /// - Enables the NVIC interrupt for async wakeups
1408 ///
1409 /// # Arguments
1410 ///
1411 /// * `buf` - Destination buffer for received data (power-of-2 size is ideal for efficiency)
1412 ///
1413 /// # Returns
1414 ///
1415 /// A [`RingBuffer`] that can be used to asynchronously read received data.
1416 ///
1417 /// # Example
1418 ///
1419 /// ```no_run
1420 /// static mut RX_BUF: [u8; 64] = [0; 64];
1421 ///
1422 /// let rx = LpuartRxDma::new(p.LPUART2, p.P2_3, p.DMA_CH0, config).unwrap();
1423 /// let ring_buf = unsafe { rx.setup_ring_buffer(&mut RX_BUF) };
1424 ///
1425 /// // Read data as it arrives
1426 /// let mut buf = [0u8; 16];
1427 /// let n = ring_buf.read(&mut buf).await.unwrap();
1428 /// ```
1429 ///
1430 /// # Safety
1431 ///
1432 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
1433 /// - Only one RingBuffer should exist per LPUART RX channel at a time.
1434 /// - The caller must ensure the static buffer is not accessed elsewhere while
1435 /// the ring buffer is active.
1436 pub unsafe fn setup_ring_buffer<'b>(&self, buf: &'b mut [u8]) -> RingBuffer<'b, u8> {
1437 // Get the peripheral data register address
1438 let peri_addr = self.info.regs.data().as_ptr() as *const u8;
1439
1440 // Configure DMA request source for this LPUART instance (type-safe)
1441 self.rx_dma.set_request_source::<T::RxDmaRequest>();
1442
1443 // Enable RX DMA request in the LPUART peripheral
1444 self.info.regs.baud().modify(|_, w| w.rdmae().enabled());
1445
1446 // Set up circular DMA transfer (this also enables NVIC interrupt)
1447 self.rx_dma.setup_circular_read(peri_addr, buf)
1448 }
1449
1450 /// Enable the DMA channel request.
1451 ///
1452 /// Call this after `setup_ring_buffer()` to start continuous reception.
1453 /// This is separated from setup to allow for any additional configuration
1454 /// before starting the transfer.
1455 pub unsafe fn enable_dma_request(&self) {
1456 self.rx_dma.enable_request();
1457 }
1458}
1459
1460impl<'a, T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> LpuartDma<'a, T, TxC, RxC> {
1461 /// Create a new LPUART driver with DMA support for both TX and RX.
1462 pub fn new(
1463 _inner: Peri<'a, T>,
1464 tx_pin: Peri<'a, impl TxPin<T>>,
1465 rx_pin: Peri<'a, impl RxPin<T>>,
1466 tx_dma_ch: Peri<'a, TxC>,
1467 rx_dma_ch: Peri<'a, RxC>,
1468 config: Config,
1469 ) -> Result<Self> {
1470 tx_pin.as_tx();
1471 rx_pin.as_rx();
1472
1473 let tx_pin: Peri<'a, AnyPin> = tx_pin.into();
1474 let rx_pin: Peri<'a, AnyPin> = rx_pin.into();
1475
1476 // Initialize LPUART with both TX and RX enabled, no flow control
1477 Lpuart::<Async>::init::<T>(true, true, false, false, config)?;
1478
1479 Ok(Self {
1480 tx: LpuartTxDma {
1481 info: T::info(),
1482 _tx_pin: tx_pin,
1483 tx_dma: DmaChannel::new(tx_dma_ch),
1484 _instance: core::marker::PhantomData,
1485 },
1486 rx: LpuartRxDma {
1487 info: T::info(),
1488 _rx_pin: rx_pin,
1489 rx_dma: DmaChannel::new(rx_dma_ch),
1490 _instance: core::marker::PhantomData,
1491 },
1492 })
1493 }
1494
1495 /// Split into separate TX and RX drivers
1496 pub fn split(self) -> (LpuartTxDma<'a, T, TxC>, LpuartRxDma<'a, T, RxC>) {
1497 (self.tx, self.rx)
1498 }
1499
1500 /// Write data using DMA
1501 pub async fn write_dma(&mut self, buf: &[u8]) -> Result<usize> {
1502 self.tx.write_dma(buf).await
1503 }
1504
1505 /// Read data using DMA
1506 pub async fn read_dma(&mut self, buf: &mut [u8]) -> Result<usize> {
1507 self.rx.read_dma(buf).await
1508 }
1509}
1510
1511// ============================================================================
1512// EMBEDDED-IO-ASYNC TRAIT IMPLEMENTATIONS
1082// ============================================================================ 1513// ============================================================================
1083 1514
1084// TODO: Implement async mode for LPUART 1515impl<T: Instance, C: DmaChannelTrait> embedded_io::ErrorType for LpuartTxDma<'_, T, C> {
1516 type Error = Error;
1517}
1518
1519impl<T: Instance, C: DmaChannelTrait> embedded_io::ErrorType for LpuartRxDma<'_, T, C> {
1520 type Error = Error;
1521}
1522
1523impl<T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> embedded_io::ErrorType for LpuartDma<'_, T, TxC, RxC> {
1524 type Error = Error;
1525}
1085 1526
1086// ============================================================================ 1527// ============================================================================
1087// EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS 1528// EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS
diff --git a/embassy-mcxa/src/pins.rs b/embassy-mcxa/src/pins.rs
index fdf1b0a86..9adbe64c8 100644
--- a/embassy-mcxa/src/pins.rs
+++ b/embassy-mcxa/src/pins.rs
@@ -1,6 +1,11 @@
1//! Pin configuration helpers (separate from peripheral drivers). 1//! Pin configuration helpers (separate from peripheral drivers).
2use crate::pac; 2use crate::pac;
3 3
4/// Configure pins for ADC usage.
5///
6/// # Safety
7///
8/// Must be called after PORT clocks are enabled.
4pub unsafe fn configure_adc_pins() { 9pub unsafe fn configure_adc_pins() {
5 // P1_10 = ADC1_A8 10 // P1_10 = ADC1_A8
6 let port1 = &*pac::Port1::ptr(); 11 let port1 = &*pac::Port1::ptr();
diff --git a/examples/mcxa/src/bin/dma_channel_link.rs b/examples/mcxa/src/bin/dma_channel_link.rs
new file mode 100644
index 000000000..92c7a9681
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_channel_link.rs
@@ -0,0 +1,372 @@
1//! DMA channel linking example for MCXA276.
2//!
3//! This example demonstrates DMA channel linking (minor and major loop linking):
4//! - Channel 0: Transfers SRC_BUFFER to DEST_BUFFER0, with:
5//! - Minor Link to Channel 1 (triggers CH1 after each minor loop)
6//! - Major Link to Channel 2 (triggers CH2 after major loop completes)
7//! - Channel 1: Transfers SRC_BUFFER to DEST_BUFFER1 (triggered by CH0 minor link)
8//! - Channel 2: Transfers SRC_BUFFER to DEST_BUFFER2 (triggered by CH0 major link)
9//!
10//! # Embassy-style features demonstrated:
11//! - `DmaChannel::new()` for channel creation
12//! - `DmaChannel::is_done()` and `clear_done()` helper methods
13//! - Channel linking with `set_minor_link()` and `set_major_link()`
14//! - Standard `DmaCh*InterruptHandler` with `bind_interrupts!` macro
15
16#![no_std]
17#![no_main]
18
19use embassy_executor::Spawner;
20use embassy_mcxa::clocks::config::Div8;
21use embassy_mcxa::dma::{DmaCh0InterruptHandler, DmaCh1InterruptHandler, DmaCh2InterruptHandler, DmaChannel};
22use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
23use embassy_mcxa::{bind_interrupts, pac};
24use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
25
26// Buffers
27static mut SRC_BUFFER: [u32; 4] = [1, 2, 3, 4];
28static mut DEST_BUFFER0: [u32; 4] = [0; 4];
29static mut DEST_BUFFER1: [u32; 4] = [0; 4];
30static mut DEST_BUFFER2: [u32; 4] = [0; 4];
31
32// Bind DMA channel interrupts using Embassy-style macro
33// The standard handlers call on_interrupt() which wakes wakers and clears flags
34bind_interrupts!(struct Irqs {
35 DMA_CH0 => DmaCh0InterruptHandler;
36 DMA_CH1 => DmaCh1InterruptHandler;
37 DMA_CH2 => DmaCh2InterruptHandler;
38});
39
40/// Helper to write a u32 as decimal ASCII to UART
41fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
42 let mut buf = [0u8; 10];
43 let mut n = val;
44 let mut i = buf.len();
45
46 if n == 0 {
47 tx.blocking_write(b"0").ok();
48 return;
49 }
50
51 while n > 0 {
52 i -= 1;
53 buf[i] = b'0' + (n % 10) as u8;
54 n /= 10;
55 }
56
57 tx.blocking_write(&buf[i..]).ok();
58}
59
60/// Helper to print a buffer to UART
61fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
62 tx.blocking_write(b"[").ok();
63 unsafe {
64 for i in 0..len {
65 write_u32(tx, *buf_ptr.add(i));
66 if i < len - 1 {
67 tx.blocking_write(b", ").ok();
68 }
69 }
70 }
71 tx.blocking_write(b"]").ok();
72}
73
74#[embassy_executor::main]
75async fn main(_spawner: Spawner) {
76 // Small delay to allow probe-rs to attach after reset
77 for _ in 0..100_000 {
78 cortex_m::asm::nop();
79 }
80
81 let mut cfg = hal::config::Config::default();
82 cfg.clock_cfg.sirc.fro_12m_enabled = true;
83 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
84 let p = hal::init(cfg);
85
86 defmt::info!("DMA channel link example starting...");
87
88 // DMA is initialized during hal::init() - no need to call ensure_init()
89
90 let pac_periphs = unsafe { pac::Peripherals::steal() };
91 let dma0 = &pac_periphs.dma0;
92 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
93
94 // Clear any residual state
95 for i in 0..3 {
96 let t = edma.tcd(i);
97 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
98 t.ch_int().write(|w| w.int().clear_bit_by_one());
99 t.ch_es().write(|w| w.err().clear_bit_by_one());
100 t.ch_mux().write(|w| unsafe { w.bits(0) });
101 }
102
103 // Clear Global Halt/Error state
104 dma0.mp_csr().modify(|_, w| {
105 w.halt()
106 .normal_operation()
107 .hae()
108 .normal_operation()
109 .ecx()
110 .normal_operation()
111 .cx()
112 .normal_operation()
113 });
114
115 unsafe {
116 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
117 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH1);
118 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH2);
119 }
120
121 let config = Config {
122 baudrate_bps: 115_200,
123 ..Default::default()
124 };
125
126 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
127 let (mut tx, _rx) = lpuart.split();
128
129 tx.blocking_write(b"EDMA channel link example begin.\r\n\r\n").unwrap();
130
131 // Initialize buffers
132 unsafe {
133 SRC_BUFFER = [1, 2, 3, 4];
134 DEST_BUFFER0 = [0; 4];
135 DEST_BUFFER1 = [0; 4];
136 DEST_BUFFER2 = [0; 4];
137 }
138
139 tx.blocking_write(b"Source Buffer: ").unwrap();
140 print_buffer(&mut tx, core::ptr::addr_of!(SRC_BUFFER) as *const u32, 4);
141 tx.blocking_write(b"\r\n").unwrap();
142
143 tx.blocking_write(b"DEST0 (before): ").unwrap();
144 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER0) as *const u32, 4);
145 tx.blocking_write(b"\r\n").unwrap();
146
147 tx.blocking_write(b"DEST1 (before): ").unwrap();
148 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER1) as *const u32, 4);
149 tx.blocking_write(b"\r\n").unwrap();
150
151 tx.blocking_write(b"DEST2 (before): ").unwrap();
152 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER2) as *const u32, 4);
153 tx.blocking_write(b"\r\n\r\n").unwrap();
154
155 tx.blocking_write(b"Configuring DMA channels with Embassy-style API...\r\n")
156 .unwrap();
157
158 let ch0 = DmaChannel::new(p.DMA_CH0);
159 let ch1 = DmaChannel::new(p.DMA_CH1);
160 let ch2 = DmaChannel::new(p.DMA_CH2);
161
162 // Configure channels using direct TCD access (advanced feature demo)
163 // This example demonstrates channel linking which requires direct TCD manipulation
164
165 // Helper to configure TCD for memory-to-memory transfer
166 // Parameters: channel, src, dst, width, nbytes (minor loop), count (major loop), interrupt
167 #[allow(clippy::too_many_arguments)]
168 unsafe fn configure_tcd(
169 edma: &embassy_mcxa::pac::edma_0_tcd0::RegisterBlock,
170 ch: usize,
171 src: u32,
172 dst: u32,
173 width: u8,
174 nbytes: u32,
175 count: u16,
176 enable_int: bool,
177 ) {
178 let t = edma.tcd(ch);
179
180 // Reset channel state
181 t.ch_csr().write(|w| {
182 w.erq()
183 .disable()
184 .earq()
185 .disable()
186 .eei()
187 .no_error()
188 .ebw()
189 .disable()
190 .done()
191 .clear_bit_by_one()
192 });
193 t.ch_es().write(|w| w.bits(0));
194 t.ch_int().write(|w| w.int().clear_bit_by_one());
195
196 // Source/destination addresses
197 t.tcd_saddr().write(|w| w.saddr().bits(src));
198 t.tcd_daddr().write(|w| w.daddr().bits(dst));
199
200 // Offsets: increment by width
201 t.tcd_soff().write(|w| w.soff().bits(width as u16));
202 t.tcd_doff().write(|w| w.doff().bits(width as u16));
203
204 // Attributes: size = log2(width)
205 let size = match width {
206 1 => 0,
207 2 => 1,
208 4 => 2,
209 _ => 0,
210 };
211 t.tcd_attr().write(|w| w.ssize().bits(size).dsize().bits(size));
212
213 // Number of bytes per minor loop
214 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
215
216 // Major loop: reset source address after major loop
217 let total_bytes = nbytes * count as u32;
218 t.tcd_slast_sda()
219 .write(|w| w.slast_sda().bits(-(total_bytes as i32) as u32));
220 t.tcd_dlast_sga()
221 .write(|w| w.dlast_sga().bits(-(total_bytes as i32) as u32));
222
223 // Major loop count
224 t.tcd_biter_elinkno().write(|w| w.biter().bits(count));
225 t.tcd_citer_elinkno().write(|w| w.citer().bits(count));
226
227 // Control/status: enable interrupt if requested
228 if enable_int {
229 t.tcd_csr().write(|w| w.intmajor().set_bit());
230 } else {
231 t.tcd_csr().write(|w| w.intmajor().clear_bit());
232 }
233
234 cortex_m::asm::dsb();
235 }
236
237 unsafe {
238 // Channel 0: Transfer 16 bytes total (8 bytes per minor loop, 2 major iterations)
239 // Minor Link -> Channel 1
240 // Major Link -> Channel 2
241 configure_tcd(
242 edma,
243 0,
244 core::ptr::addr_of!(SRC_BUFFER) as u32,
245 core::ptr::addr_of_mut!(DEST_BUFFER0) as u32,
246 4, // src width
247 8, // nbytes (minor loop = 2 words)
248 2, // count (major loop = 2 iterations)
249 false, // no interrupt
250 );
251 ch0.set_minor_link(1); // Link to CH1 after each minor loop
252 ch0.set_major_link(2); // Link to CH2 after major loop
253
254 // Channel 1: Transfer 16 bytes (triggered by CH0 minor link)
255 configure_tcd(
256 edma,
257 1,
258 core::ptr::addr_of!(SRC_BUFFER) as u32,
259 core::ptr::addr_of_mut!(DEST_BUFFER1) as u32,
260 4,
261 16, // full buffer in one minor loop
262 1, // 1 major iteration
263 false,
264 );
265
266 // Channel 2: Transfer 16 bytes (triggered by CH0 major link)
267 configure_tcd(
268 edma,
269 2,
270 core::ptr::addr_of!(SRC_BUFFER) as u32,
271 core::ptr::addr_of_mut!(DEST_BUFFER2) as u32,
272 4,
273 16, // full buffer in one minor loop
274 1, // 1 major iteration
275 true, // enable interrupt
276 );
277 }
278
279 tx.blocking_write(b"Triggering Channel 0 (1st minor loop)...\r\n")
280 .unwrap();
281
282 // Trigger first minor loop of CH0
283 unsafe {
284 ch0.trigger_start();
285 }
286
287 // Wait for CH1 to complete (triggered by CH0 minor link)
288 while !ch1.is_done() {
289 cortex_m::asm::nop();
290 }
291 unsafe {
292 ch1.clear_done();
293 }
294
295 tx.blocking_write(b"CH1 done (via minor link).\r\n").unwrap();
296 tx.blocking_write(b"Triggering Channel 0 (2nd minor loop)...\r\n")
297 .unwrap();
298
299 // Trigger second minor loop of CH0
300 unsafe {
301 ch0.trigger_start();
302 }
303
304 // Wait for CH0 major loop to complete
305 while !ch0.is_done() {
306 cortex_m::asm::nop();
307 }
308 unsafe {
309 ch0.clear_done();
310 }
311
312 tx.blocking_write(b"CH0 major loop done.\r\n").unwrap();
313
314 // Wait for CH2 to complete (triggered by CH0 major link)
315 // Using is_done() instead of AtomicBool - the standard interrupt handler
316 // clears the interrupt flag and wakes wakers, but DONE bit remains set
317 while !ch2.is_done() {
318 cortex_m::asm::nop();
319 }
320 unsafe {
321 ch2.clear_done();
322 }
323
324 tx.blocking_write(b"CH2 done (via major link).\r\n\r\n").unwrap();
325
326 tx.blocking_write(b"EDMA channel link example finish.\r\n\r\n").unwrap();
327
328 tx.blocking_write(b"DEST0 (after): ").unwrap();
329 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER0) as *const u32, 4);
330 tx.blocking_write(b"\r\n").unwrap();
331
332 tx.blocking_write(b"DEST1 (after): ").unwrap();
333 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER1) as *const u32, 4);
334 tx.blocking_write(b"\r\n").unwrap();
335
336 tx.blocking_write(b"DEST2 (after): ").unwrap();
337 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER2) as *const u32, 4);
338 tx.blocking_write(b"\r\n\r\n").unwrap();
339
340 // Verify all buffers match source
341 let mut success = true;
342 unsafe {
343 let src_ptr = core::ptr::addr_of!(SRC_BUFFER) as *const u32;
344 let dst0_ptr = core::ptr::addr_of!(DEST_BUFFER0) as *const u32;
345 let dst1_ptr = core::ptr::addr_of!(DEST_BUFFER1) as *const u32;
346 let dst2_ptr = core::ptr::addr_of!(DEST_BUFFER2) as *const u32;
347
348 for i in 0..4 {
349 if *dst0_ptr.add(i) != *src_ptr.add(i) {
350 success = false;
351 }
352 if *dst1_ptr.add(i) != *src_ptr.add(i) {
353 success = false;
354 }
355 if *dst2_ptr.add(i) != *src_ptr.add(i) {
356 success = false;
357 }
358 }
359 }
360
361 if success {
362 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
363 defmt::info!("PASS: Data verified.");
364 } else {
365 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
366 defmt::error!("FAIL: Mismatch detected!");
367 }
368
369 loop {
370 cortex_m::asm::wfe();
371 }
372}
diff --git a/examples/mcxa/src/bin/dma_interleave_transfer.rs b/examples/mcxa/src/bin/dma_interleave_transfer.rs
new file mode 100644
index 000000000..7876e8978
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_interleave_transfer.rs
@@ -0,0 +1,215 @@
1//! DMA interleaved transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA with custom source/destination offsets
4//! to interleave data during transfer.
5//!
6//! # Embassy-style features demonstrated:
7//! - `TransferOptions::default()` for configuration (used internally)
8//! - DMA channel with `DmaChannel::new()`
9
10#![no_std]
11#![no_main]
12
13use embassy_executor::Spawner;
14use embassy_mcxa::clocks::config::Div8;
15use embassy_mcxa::dma::{DmaCh0InterruptHandler, DmaChannel};
16use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
17use embassy_mcxa::{bind_interrupts, pac};
18use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
19
20// Bind DMA channel 0 interrupt using Embassy-style macro
21bind_interrupts!(struct Irqs {
22 DMA_CH0 => DmaCh0InterruptHandler;
23});
24
25const BUFFER_LENGTH: usize = 16;
26const HALF_BUFF_LENGTH: usize = BUFFER_LENGTH / 2;
27
28// Buffers in RAM
29static mut SRC_BUFFER: [u32; HALF_BUFF_LENGTH] = [0; HALF_BUFF_LENGTH];
30static mut DEST_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
31
32/// Helper to write a u32 as decimal ASCII to UART
33fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
34 let mut buf = [0u8; 10];
35 let mut n = val;
36 let mut i = buf.len();
37
38 if n == 0 {
39 tx.blocking_write(b"0").ok();
40 return;
41 }
42
43 while n > 0 {
44 i -= 1;
45 buf[i] = b'0' + (n % 10) as u8;
46 n /= 10;
47 }
48
49 tx.blocking_write(&buf[i..]).ok();
50}
51
52/// Helper to print a buffer to UART
53fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
54 tx.blocking_write(b"[").ok();
55 unsafe {
56 for i in 0..len {
57 write_u32(tx, *buf_ptr.add(i));
58 if i < len - 1 {
59 tx.blocking_write(b", ").ok();
60 }
61 }
62 }
63 tx.blocking_write(b"]").ok();
64}
65
66#[embassy_executor::main]
67async fn main(_spawner: Spawner) {
68 // Small delay to allow probe-rs to attach after reset
69 for _ in 0..100_000 {
70 cortex_m::asm::nop();
71 }
72
73 let mut cfg = hal::config::Config::default();
74 cfg.clock_cfg.sirc.fro_12m_enabled = true;
75 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
76 let p = hal::init(cfg);
77
78 defmt::info!("DMA interleave transfer example starting...");
79
80 // Enable DMA interrupt (DMA clock/reset/init is handled automatically by HAL)
81 unsafe {
82 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
83 }
84
85 let config = Config {
86 baudrate_bps: 115_200,
87 ..Default::default()
88 };
89
90 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
91 let (mut tx, _rx) = lpuart.split();
92
93 tx.blocking_write(b"EDMA interleave transfer example begin.\r\n\r\n")
94 .unwrap();
95
96 // Initialize buffers
97 unsafe {
98 SRC_BUFFER = [1, 2, 3, 4, 5, 6, 7, 8];
99 DEST_BUFFER = [0; BUFFER_LENGTH];
100 }
101
102 tx.blocking_write(b"Source Buffer: ").unwrap();
103 print_buffer(&mut tx, core::ptr::addr_of!(SRC_BUFFER) as *const u32, HALF_BUFF_LENGTH);
104 tx.blocking_write(b"\r\n").unwrap();
105
106 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
107 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER) as *const u32, BUFFER_LENGTH);
108 tx.blocking_write(b"\r\n").unwrap();
109
110 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
111 .unwrap();
112
113 // Create DMA channel using Embassy-style API
114 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
115
116 // Configure interleaved transfer using direct TCD access:
117 // - src_offset = 4: advance source by 4 bytes after each read
118 // - dst_offset = 8: advance dest by 8 bytes after each write
119 // This spreads source data across every other word in destination
120 unsafe {
121 let t = dma_ch0.tcd();
122
123 // Reset channel state
124 t.ch_csr().write(|w| {
125 w.erq()
126 .disable()
127 .earq()
128 .disable()
129 .eei()
130 .no_error()
131 .ebw()
132 .disable()
133 .done()
134 .clear_bit_by_one()
135 });
136 t.ch_es().write(|w| w.bits(0));
137 t.ch_int().write(|w| w.int().clear_bit_by_one());
138
139 // Source/destination addresses
140 t.tcd_saddr()
141 .write(|w| w.saddr().bits(core::ptr::addr_of_mut!(SRC_BUFFER) as u32));
142 t.tcd_daddr()
143 .write(|w| w.daddr().bits(core::ptr::addr_of_mut!(DEST_BUFFER) as u32));
144
145 // Custom offsets for interleaving
146 t.tcd_soff().write(|w| w.soff().bits(4)); // src: +4 bytes per read
147 t.tcd_doff().write(|w| w.doff().bits(8)); // dst: +8 bytes per write
148
149 // Attributes: 32-bit transfers (size = 2)
150 t.tcd_attr().write(|w| w.ssize().bits(2).dsize().bits(2));
151
152 // Transfer entire source buffer in one minor loop
153 let nbytes = (HALF_BUFF_LENGTH * 4) as u32;
154 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
155
156 // Reset source address after major loop
157 t.tcd_slast_sda().write(|w| w.slast_sda().bits(-(nbytes as i32) as u32));
158 // Destination uses 2x offset, so adjust accordingly
159 let dst_total = (HALF_BUFF_LENGTH * 8) as u32;
160 t.tcd_dlast_sga()
161 .write(|w| w.dlast_sga().bits(-(dst_total as i32) as u32));
162
163 // Major loop count = 1
164 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
165 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
166
167 // Enable interrupt on major loop completion
168 t.tcd_csr().write(|w| w.intmajor().set_bit());
169
170 cortex_m::asm::dsb();
171
172 tx.blocking_write(b"Triggering transfer...\r\n").unwrap();
173 dma_ch0.trigger_start();
174 }
175
176 // Wait for completion using channel helper method
177 while !dma_ch0.is_done() {
178 cortex_m::asm::nop();
179 }
180 unsafe {
181 dma_ch0.clear_done();
182 }
183
184 tx.blocking_write(b"\r\nEDMA interleave transfer example finish.\r\n\r\n")
185 .unwrap();
186 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
187 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER) as *const u32, BUFFER_LENGTH);
188 tx.blocking_write(b"\r\n\r\n").unwrap();
189
190 // Verify: Even indices should match SRC_BUFFER[i/2], odd indices should be 0
191 let mut mismatch = false;
192 unsafe {
193 for i in 0..BUFFER_LENGTH {
194 if i % 2 == 0 {
195 if DEST_BUFFER[i] != SRC_BUFFER[i / 2] {
196 mismatch = true;
197 }
198 } else if DEST_BUFFER[i] != 0 {
199 mismatch = true;
200 }
201 }
202 }
203
204 if mismatch {
205 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
206 defmt::error!("FAIL: Mismatch detected!");
207 } else {
208 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
209 defmt::info!("PASS: Data verified.");
210 }
211
212 loop {
213 cortex_m::asm::wfe();
214 }
215}
diff --git a/examples/mcxa/src/bin/dma_mem_to_mem.rs b/examples/mcxa/src/bin/dma_mem_to_mem.rs
new file mode 100644
index 000000000..68f70e742
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_mem_to_mem.rs
@@ -0,0 +1,229 @@
1//! DMA memory-to-memory transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA to copy data between memory buffers
4//! using the Embassy-style async API with type-safe transfers.
5//!
6//! # Embassy-style features demonstrated:
7//! - `TransferOptions` for configuration
8//! - Type-safe `mem_to_mem<u32>()` method with async `.await`
9//! - `Transfer` Future that can be `.await`ed
10//! - `Word` trait for automatic transfer width detection
11//! - `memset()` method for filling memory with a pattern
12
13#![no_std]
14#![no_main]
15
16use embassy_executor::Spawner;
17use embassy_mcxa::clocks::config::Div8;
18use embassy_mcxa::dma::{DmaCh0InterruptHandler, DmaChannel, TransferOptions};
19use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
20use embassy_mcxa::{bind_interrupts, pac};
21use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
22
23// Bind DMA channel 0 interrupt using Embassy-style macro
24bind_interrupts!(struct Irqs {
25 DMA_CH0 => DmaCh0InterruptHandler;
26});
27
28const BUFFER_LENGTH: usize = 4;
29
30// Buffers in RAM (static mut is automatically placed in .bss/.data)
31static mut SRC_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
32static mut DEST_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
33static mut MEMSET_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
34
35/// Helper to write a u32 as decimal ASCII to UART
36fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
37 let mut buf = [0u8; 10]; // u32 max is 4294967295 (10 digits)
38 let mut n = val;
39 let mut i = buf.len();
40
41 if n == 0 {
42 tx.blocking_write(b"0").ok();
43 return;
44 }
45
46 while n > 0 {
47 i -= 1;
48 buf[i] = b'0' + (n % 10) as u8;
49 n /= 10;
50 }
51
52 tx.blocking_write(&buf[i..]).ok();
53}
54
55/// Helper to print a buffer as [v1, v2, v3, v4] to UART
56/// Takes a raw pointer to avoid warnings about shared references to mutable statics
57fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const [u32; BUFFER_LENGTH]) {
58 tx.blocking_write(b"[").ok();
59 unsafe {
60 let buf = &*buf_ptr;
61 for (i, val) in buf.iter().enumerate() {
62 write_u32(tx, *val);
63 if i < buf.len() - 1 {
64 tx.blocking_write(b", ").ok();
65 }
66 }
67 }
68 tx.blocking_write(b"]").ok();
69}
70
71#[embassy_executor::main]
72async fn main(_spawner: Spawner) {
73 // Small delay to allow probe-rs to attach after reset
74 for _ in 0..100_000 {
75 cortex_m::asm::nop();
76 }
77
78 let mut cfg = hal::config::Config::default();
79 cfg.clock_cfg.sirc.fro_12m_enabled = true;
80 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
81 let p = hal::init(cfg);
82
83 defmt::info!("DMA memory-to-memory example starting...");
84
85 // Enable DMA interrupt (DMA clock/reset/init is handled automatically by HAL)
86 unsafe {
87 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
88 }
89
90 // Create UART for debug output
91 let config = Config {
92 baudrate_bps: 115_200,
93 ..Default::default()
94 };
95
96 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
97 let (mut tx, _rx) = lpuart.split();
98
99 tx.blocking_write(b"EDMA memory to memory example begin.\r\n\r\n")
100 .unwrap();
101
102 // Initialize buffers
103 unsafe {
104 SRC_BUFFER = [1, 2, 3, 4];
105 DEST_BUFFER = [0; BUFFER_LENGTH];
106 }
107
108 tx.blocking_write(b"Source Buffer: ").unwrap();
109 print_buffer(&mut tx, &raw const SRC_BUFFER);
110 tx.blocking_write(b"\r\n").unwrap();
111
112 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
113 print_buffer(&mut tx, &raw const DEST_BUFFER);
114 tx.blocking_write(b"\r\n").unwrap();
115
116 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
117 .unwrap();
118
119 // Create DMA channel
120 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
121
122 // Configure transfer options (Embassy-style)
123 // TransferOptions defaults to: complete_transfer_interrupt = true
124 let options = TransferOptions::default();
125
126 // =========================================================================
127 // Part 1: Embassy-style async API demonstration (mem_to_mem)
128 // =========================================================================
129 //
130 // Use the new type-safe `mem_to_mem<u32>()` method:
131 // - Automatically determines transfer width from buffer element type (u32)
132 // - Returns a `Transfer` future that can be `.await`ed
133 // - Uses TransferOptions for consistent configuration
134 //
135 // Using async `.await` - the executor can run other tasks while waiting!
136
137 // Perform type-safe memory-to-memory transfer using Embassy-style async API
138 unsafe {
139 let src = &*core::ptr::addr_of!(SRC_BUFFER);
140 let dst = &mut *core::ptr::addr_of_mut!(DEST_BUFFER);
141
142 // Using async `.await` - the executor can run other tasks while waiting!
143 let transfer = dma_ch0.mem_to_mem(src, dst, options);
144 transfer.await;
145 }
146
147 tx.blocking_write(b"DMA mem-to-mem transfer complete!\r\n\r\n").unwrap();
148 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
149 print_buffer(&mut tx, &raw const DEST_BUFFER);
150 tx.blocking_write(b"\r\n").unwrap();
151
152 // Verify data
153 let mut mismatch = false;
154 unsafe {
155 for i in 0..BUFFER_LENGTH {
156 if SRC_BUFFER[i] != DEST_BUFFER[i] {
157 mismatch = true;
158 break;
159 }
160 }
161 }
162
163 if mismatch {
164 tx.blocking_write(b"FAIL: mem_to_mem mismatch!\r\n").unwrap();
165 defmt::error!("FAIL: mem_to_mem mismatch!");
166 } else {
167 tx.blocking_write(b"PASS: mem_to_mem verified.\r\n\r\n").unwrap();
168 defmt::info!("PASS: mem_to_mem verified.");
169 }
170
171 // =========================================================================
172 // Part 2: memset() demonstration
173 // =========================================================================
174 //
175 // The `memset()` method fills a buffer with a pattern value:
176 // - Fixed source address (pattern is read repeatedly)
177 // - Incrementing destination address
178 // - Uses the same Transfer future pattern
179
180 tx.blocking_write(b"--- Demonstrating memset() feature ---\r\n\r\n")
181 .unwrap();
182
183 tx.blocking_write(b"Memset Buffer (before): ").unwrap();
184 print_buffer(&mut tx, &raw const MEMSET_BUFFER);
185 tx.blocking_write(b"\r\n").unwrap();
186
187 // Fill buffer with a pattern value using DMA memset
188 let pattern: u32 = 0xDEADBEEF;
189 tx.blocking_write(b"Filling with pattern 0xDEADBEEF...\r\n").unwrap();
190
191 unsafe {
192 let dst = &mut *core::ptr::addr_of_mut!(MEMSET_BUFFER);
193
194 // Using blocking_wait() for demonstration - also shows non-async usage
195 let transfer = dma_ch0.memset(&pattern, dst, options);
196 transfer.blocking_wait();
197 }
198
199 tx.blocking_write(b"DMA memset complete!\r\n\r\n").unwrap();
200 tx.blocking_write(b"Memset Buffer (after): ").unwrap();
201 print_buffer(&mut tx, &raw const MEMSET_BUFFER);
202 tx.blocking_write(b"\r\n").unwrap();
203
204 // Verify memset result
205 let mut memset_ok = true;
206 unsafe {
207 #[allow(clippy::needless_range_loop)]
208 for i in 0..BUFFER_LENGTH {
209 if MEMSET_BUFFER[i] != pattern {
210 memset_ok = false;
211 break;
212 }
213 }
214 }
215
216 if !memset_ok {
217 tx.blocking_write(b"FAIL: memset mismatch!\r\n").unwrap();
218 defmt::error!("FAIL: memset mismatch!");
219 } else {
220 tx.blocking_write(b"PASS: memset verified.\r\n\r\n").unwrap();
221 defmt::info!("PASS: memset verified.");
222 }
223
224 tx.blocking_write(b"=== All DMA tests complete ===\r\n").unwrap();
225
226 loop {
227 cortex_m::asm::wfe();
228 }
229}
diff --git a/examples/mcxa/src/bin/dma_memset.rs b/examples/mcxa/src/bin/dma_memset.rs
new file mode 100644
index 000000000..95e365e47
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_memset.rs
@@ -0,0 +1,218 @@
1//! DMA memset example for MCXA276.
2//!
3//! This example demonstrates using DMA to fill a buffer with a repeated pattern.
4//! The source address stays fixed while the destination increments.
5//!
6//! # Embassy-style features demonstrated:
7//! - `DmaChannel::is_done()` and `clear_done()` helper methods
8//! - No need to pass register block around
9
10#![no_std]
11#![no_main]
12
13use embassy_executor::Spawner;
14use embassy_mcxa::clocks::config::Div8;
15use embassy_mcxa::dma::{DmaCh0InterruptHandler, DmaChannel};
16use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
17use embassy_mcxa::{bind_interrupts, pac};
18use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
19
20// Bind DMA channel 0 interrupt using Embassy-style macro
21bind_interrupts!(struct Irqs {
22 DMA_CH0 => DmaCh0InterruptHandler;
23});
24
25const BUFFER_LENGTH: usize = 4;
26
27// Buffers in RAM
28static mut PATTERN: u32 = 0;
29static mut DEST_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
30
31/// Helper to write a u32 as decimal ASCII to UART
32fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
33 let mut buf = [0u8; 10];
34 let mut n = val;
35 let mut i = buf.len();
36
37 if n == 0 {
38 tx.blocking_write(b"0").ok();
39 return;
40 }
41
42 while n > 0 {
43 i -= 1;
44 buf[i] = b'0' + (n % 10) as u8;
45 n /= 10;
46 }
47
48 tx.blocking_write(&buf[i..]).ok();
49}
50
51/// Helper to print a buffer to UART
52fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
53 tx.blocking_write(b"[").ok();
54 unsafe {
55 for i in 0..len {
56 write_u32(tx, *buf_ptr.add(i));
57 if i < len - 1 {
58 tx.blocking_write(b", ").ok();
59 }
60 }
61 }
62 tx.blocking_write(b"]").ok();
63}
64
65#[embassy_executor::main]
66async fn main(_spawner: Spawner) {
67 // Small delay to allow probe-rs to attach after reset
68 for _ in 0..100_000 {
69 cortex_m::asm::nop();
70 }
71
72 let mut cfg = hal::config::Config::default();
73 cfg.clock_cfg.sirc.fro_12m_enabled = true;
74 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
75 let p = hal::init(cfg);
76
77 defmt::info!("DMA memset example starting...");
78
79 // Enable DMA interrupt (DMA clock/reset/init is handled automatically by HAL)
80 unsafe {
81 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
82 }
83
84 let config = Config {
85 baudrate_bps: 115_200,
86 ..Default::default()
87 };
88
89 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
90 let (mut tx, _rx) = lpuart.split();
91
92 tx.blocking_write(b"EDMA memset example begin.\r\n\r\n").unwrap();
93
94 // Initialize buffers
95 unsafe {
96 PATTERN = 0xDEADBEEF;
97 DEST_BUFFER = [0; BUFFER_LENGTH];
98 }
99
100 tx.blocking_write(b"Pattern value: 0x").unwrap();
101 // Print pattern in hex
102 unsafe {
103 let hex_chars = b"0123456789ABCDEF";
104 let mut hex_buf = [0u8; 8];
105 let mut val = PATTERN;
106 for i in (0..8).rev() {
107 hex_buf[i] = hex_chars[(val & 0xF) as usize];
108 val >>= 4;
109 }
110 tx.blocking_write(&hex_buf).ok();
111 }
112 tx.blocking_write(b"\r\n").unwrap();
113
114 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
115 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER) as *const u32, BUFFER_LENGTH);
116 tx.blocking_write(b"\r\n").unwrap();
117
118 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
119 .unwrap();
120
121 // Create DMA channel using Embassy-style API
122 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
123
124 // Configure memset transfer using direct TCD access:
125 // Source stays fixed (soff = 0, reads same pattern repeatedly)
126 // Destination increments (doff = 4)
127 unsafe {
128 let t = dma_ch0.tcd();
129
130 // Reset channel state
131 t.ch_csr().write(|w| {
132 w.erq()
133 .disable()
134 .earq()
135 .disable()
136 .eei()
137 .no_error()
138 .ebw()
139 .disable()
140 .done()
141 .clear_bit_by_one()
142 });
143 t.ch_es().write(|w| w.bits(0));
144 t.ch_int().write(|w| w.int().clear_bit_by_one());
145
146 // Source address (pattern) - fixed
147 t.tcd_saddr()
148 .write(|w| w.saddr().bits(core::ptr::addr_of_mut!(PATTERN) as u32));
149 // Destination address - increments
150 t.tcd_daddr()
151 .write(|w| w.daddr().bits(core::ptr::addr_of_mut!(DEST_BUFFER) as u32));
152
153 // Source offset = 0 (stays fixed), Dest offset = 4 (increments)
154 t.tcd_soff().write(|w| w.soff().bits(0));
155 t.tcd_doff().write(|w| w.doff().bits(4));
156
157 // Attributes: 32-bit transfers (size = 2)
158 t.tcd_attr().write(|w| w.ssize().bits(2).dsize().bits(2));
159
160 // Transfer entire buffer in one minor loop
161 let nbytes = (BUFFER_LENGTH * 4) as u32;
162 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
163
164 // Source doesn't need adjustment (stays fixed)
165 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
166 // Reset dest address after major loop
167 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(-(nbytes as i32) as u32));
168
169 // Major loop count = 1
170 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
171 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
172
173 // Enable interrupt on major loop completion
174 t.tcd_csr().write(|w| w.intmajor().set_bit());
175
176 cortex_m::asm::dsb();
177
178 tx.blocking_write(b"Triggering transfer...\r\n").unwrap();
179 dma_ch0.trigger_start();
180 }
181
182 // Wait for completion using channel helper method
183 while !dma_ch0.is_done() {
184 cortex_m::asm::nop();
185 }
186 unsafe {
187 dma_ch0.clear_done();
188 }
189
190 tx.blocking_write(b"\r\nEDMA memset example finish.\r\n\r\n").unwrap();
191 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
192 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER) as *const u32, BUFFER_LENGTH);
193 tx.blocking_write(b"\r\n\r\n").unwrap();
194
195 // Verify: All elements should equal PATTERN
196 let mut mismatch = false;
197 unsafe {
198 #[allow(clippy::needless_range_loop)]
199 for i in 0..BUFFER_LENGTH {
200 if DEST_BUFFER[i] != PATTERN {
201 mismatch = true;
202 break;
203 }
204 }
205 }
206
207 if mismatch {
208 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
209 defmt::error!("FAIL: Mismatch detected!");
210 } else {
211 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
212 defmt::info!("PASS: Data verified.");
213 }
214
215 loop {
216 cortex_m::asm::wfe();
217 }
218}
diff --git a/examples/mcxa/src/bin/dma_ping_pong_transfer.rs b/examples/mcxa/src/bin/dma_ping_pong_transfer.rs
new file mode 100644
index 000000000..f8f543382
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_ping_pong_transfer.rs
@@ -0,0 +1,376 @@
1//! DMA ping-pong/double-buffer transfer example for MCXA276.
2//!
3//! This example demonstrates two approaches for ping-pong/double-buffering:
4//!
5//! ## Approach 1: Scatter/Gather with linked TCDs (manual)
6//! - Two TCDs link to each other for alternating transfers
7//! - Uses custom handler that delegates to on_interrupt() then signals completion
8//! - Note: With ESG=1, DONE bit is cleared by hardware when next TCD loads,
9//! so we need an AtomicBool to track completion
10//!
11//! ## Approach 2: Half-transfer interrupt with wait_half() (NEW!)
12//! - Single continuous transfer over entire buffer
13//! - Uses half-transfer interrupt to know when first half is ready
14//! - Application can process first half while second half is being filled
15//!
16//! # Embassy-style features demonstrated:
17//! - `DmaChannel::new()` for channel creation
18//! - Scatter/gather with linked TCDs
19//! - Custom handler that delegates to HAL's `on_interrupt()` (best practice)
20//! - Standard `DmaCh1InterruptHandler` with `bind_interrupts!` macro
21//! - NEW: `wait_half()` for half-transfer interrupt handling
22
23#![no_std]
24#![no_main]
25
26use core::sync::atomic::{AtomicBool, Ordering};
27
28use embassy_executor::Spawner;
29use embassy_mcxa::clocks::config::Div8;
30use embassy_mcxa::dma::{self, DmaCh1InterruptHandler, DmaChannel, Tcd, TransferOptions};
31use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
32use embassy_mcxa::{bind_interrupts, pac};
33use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
34
35// Source and destination buffers for Approach 1 (scatter/gather)
36static mut SRC: [u32; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
37static mut DST: [u32; 8] = [0; 8];
38
39// Source and destination buffers for Approach 2 (wait_half)
40static mut SRC2: [u32; 8] = [0xA1, 0xA2, 0xA3, 0xA4, 0xB1, 0xB2, 0xB3, 0xB4];
41static mut DST2: [u32; 8] = [0; 8];
42
43// TCD pool for scatter/gather - must be 32-byte aligned
44#[repr(C, align(32))]
45struct TcdPool([Tcd; 2]);
46
47static mut TCD_POOL: TcdPool = TcdPool(
48 [Tcd {
49 saddr: 0,
50 soff: 0,
51 attr: 0,
52 nbytes: 0,
53 slast: 0,
54 daddr: 0,
55 doff: 0,
56 citer: 0,
57 dlast_sga: 0,
58 csr: 0,
59 biter: 0,
60 }; 2],
61);
62
63// AtomicBool to track scatter/gather completion
64// Note: With ESG=1, DONE bit is cleared by hardware when next TCD loads,
65// so we need this flag to detect when each transfer completes
66static TRANSFER_DONE: AtomicBool = AtomicBool::new(false);
67
68// Custom handler for scatter/gather that delegates to HAL's on_interrupt()
69// This follows the "interrupts as threads" pattern - the handler does minimal work
70// (delegates to HAL + sets a flag) and the main task does the actual processing
71pub struct PingPongDmaHandler;
72
73impl embassy_mcxa::interrupt::typelevel::Handler<embassy_mcxa::interrupt::typelevel::DMA_CH0> for PingPongDmaHandler {
74 unsafe fn on_interrupt() {
75 // Delegate to HAL's on_interrupt() which clears INT flag and wakes wakers
76 dma::on_interrupt(0);
77 // Signal completion for polling (needed because ESG clears DONE bit)
78 TRANSFER_DONE.store(true, Ordering::Release);
79 }
80}
81
82// Bind DMA channel interrupts
83// CH0: Custom handler for scatter/gather (delegates to on_interrupt + sets flag)
84// CH1: Standard handler for wait_half() demo
85bind_interrupts!(struct Irqs {
86 DMA_CH0 => PingPongDmaHandler;
87 DMA_CH1 => DmaCh1InterruptHandler;
88});
89
90/// Helper to write a u32 as decimal ASCII to UART
91fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
92 let mut buf = [0u8; 10];
93 let mut n = val;
94 let mut i = buf.len();
95
96 if n == 0 {
97 tx.blocking_write(b"0").ok();
98 return;
99 }
100
101 while n > 0 {
102 i -= 1;
103 buf[i] = b'0' + (n % 10) as u8;
104 n /= 10;
105 }
106
107 tx.blocking_write(&buf[i..]).ok();
108}
109
110/// Helper to print a buffer to UART
111fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
112 tx.blocking_write(b"[").ok();
113 unsafe {
114 for i in 0..len {
115 write_u32(tx, *buf_ptr.add(i));
116 if i < len - 1 {
117 tx.blocking_write(b", ").ok();
118 }
119 }
120 }
121 tx.blocking_write(b"]").ok();
122}
123
124#[embassy_executor::main]
125async fn main(_spawner: Spawner) {
126 // Small delay to allow probe-rs to attach after reset
127 for _ in 0..100_000 {
128 cortex_m::asm::nop();
129 }
130
131 let mut cfg = hal::config::Config::default();
132 cfg.clock_cfg.sirc.fro_12m_enabled = true;
133 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
134 let p = hal::init(cfg);
135
136 defmt::info!("DMA ping-pong transfer example starting...");
137
138 // Enable DMA interrupt (DMA clock/reset/init is handled automatically by HAL)
139 unsafe {
140 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
141 }
142
143 let config = Config {
144 baudrate_bps: 115_200,
145 ..Default::default()
146 };
147
148 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
149 let (mut tx, _rx) = lpuart.split();
150
151 tx.blocking_write(b"EDMA ping-pong transfer example begin.\r\n\r\n")
152 .unwrap();
153
154 // Initialize buffers
155 unsafe {
156 SRC = [1, 2, 3, 4, 5, 6, 7, 8];
157 DST = [0; 8];
158 }
159
160 tx.blocking_write(b"Source Buffer: ").unwrap();
161 print_buffer(&mut tx, core::ptr::addr_of!(SRC) as *const u32, 8);
162 tx.blocking_write(b"\r\n").unwrap();
163
164 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
165 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
166 tx.blocking_write(b"\r\n").unwrap();
167
168 tx.blocking_write(b"Configuring ping-pong DMA with Embassy-style API...\r\n")
169 .unwrap();
170
171 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
172
173 // Configure ping-pong transfer using direct TCD access:
174 // This sets up TCD0 and TCD1 in RAM, and loads TCD0 into the channel.
175 // TCD0 transfers first half (SRC[0..4] -> DST[0..4]), links to TCD1.
176 // TCD1 transfers second half (SRC[4..8] -> DST[4..8]), links to TCD0.
177 unsafe {
178 let tcds = &mut *core::ptr::addr_of_mut!(TCD_POOL.0);
179 let src_ptr = core::ptr::addr_of!(SRC) as *const u32;
180 let dst_ptr = core::ptr::addr_of_mut!(DST) as *mut u32;
181
182 let half_len = 4usize;
183 let half_bytes = (half_len * 4) as u32;
184
185 let tcd0_addr = &tcds[0] as *const _ as u32;
186 let tcd1_addr = &tcds[1] as *const _ as u32;
187
188 // TCD0: First half -> Links to TCD1
189 tcds[0] = Tcd {
190 saddr: src_ptr as u32,
191 soff: 4,
192 attr: 0x0202, // 32-bit src/dst
193 nbytes: half_bytes,
194 slast: 0,
195 daddr: dst_ptr as u32,
196 doff: 4,
197 citer: 1,
198 dlast_sga: tcd1_addr as i32,
199 csr: 0x0012, // ESG | INTMAJOR
200 biter: 1,
201 };
202
203 // TCD1: Second half -> Links to TCD0
204 tcds[1] = Tcd {
205 saddr: src_ptr.add(half_len) as u32,
206 soff: 4,
207 attr: 0x0202,
208 nbytes: half_bytes,
209 slast: 0,
210 daddr: dst_ptr.add(half_len) as u32,
211 doff: 4,
212 citer: 1,
213 dlast_sga: tcd0_addr as i32,
214 csr: 0x0012,
215 biter: 1,
216 };
217
218 // Load TCD0 into hardware registers
219 dma_ch0.load_tcd(&tcds[0]);
220 }
221
222 tx.blocking_write(b"Triggering first half transfer...\r\n").unwrap();
223
224 // Trigger first transfer (first half: SRC[0..4] -> DST[0..4])
225 unsafe {
226 dma_ch0.trigger_start();
227 }
228
229 // Wait for first half
230 while !TRANSFER_DONE.load(Ordering::Acquire) {
231 cortex_m::asm::nop();
232 }
233 TRANSFER_DONE.store(false, Ordering::Release);
234
235 tx.blocking_write(b"First half transferred.\r\n").unwrap();
236 tx.blocking_write(b"Triggering second half transfer...\r\n").unwrap();
237
238 // Trigger second transfer (second half: SRC[4..8] -> DST[4..8])
239 unsafe {
240 dma_ch0.trigger_start();
241 }
242
243 // Wait for second half
244 while !TRANSFER_DONE.load(Ordering::Acquire) {
245 cortex_m::asm::nop();
246 }
247 TRANSFER_DONE.store(false, Ordering::Release);
248
249 tx.blocking_write(b"Second half transferred.\r\n\r\n").unwrap();
250
251 tx.blocking_write(b"EDMA ping-pong transfer example finish.\r\n\r\n")
252 .unwrap();
253 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
254 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
255 tx.blocking_write(b"\r\n\r\n").unwrap();
256
257 // Verify: DST should match SRC
258 let mut mismatch = false;
259 unsafe {
260 let src_ptr = core::ptr::addr_of!(SRC) as *const u32;
261 let dst_ptr = core::ptr::addr_of!(DST) as *const u32;
262 for i in 0..8 {
263 if *src_ptr.add(i) != *dst_ptr.add(i) {
264 mismatch = true;
265 break;
266 }
267 }
268 }
269
270 if mismatch {
271 tx.blocking_write(b"FAIL: Approach 1 mismatch detected!\r\n").unwrap();
272 defmt::error!("FAIL: Approach 1 mismatch detected!");
273 } else {
274 tx.blocking_write(b"PASS: Approach 1 data verified.\r\n\r\n").unwrap();
275 defmt::info!("PASS: Approach 1 data verified.");
276 }
277
278 // =========================================================================
279 // Approach 2: Half-Transfer Interrupt with wait_half() (NEW!)
280 // =========================================================================
281 //
282 // This approach uses a single continuous DMA transfer with half-transfer
283 // interrupt enabled. The wait_half() method allows you to be notified
284 // when the first half of the buffer is complete, so you can process it
285 // while the second half is still being filled.
286 //
287 // Benefits:
288 // - Simpler setup (no TCD pool needed)
289 // - True async/await support
290 // - Good for streaming data processing
291
292 tx.blocking_write(b"--- Approach 2: wait_half() demo ---\r\n\r\n")
293 .unwrap();
294
295 // Enable DMA CH1 interrupt
296 unsafe {
297 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH1);
298 }
299
300 // Initialize approach 2 buffers
301 unsafe {
302 SRC2 = [0xA1, 0xA2, 0xA3, 0xA4, 0xB1, 0xB2, 0xB3, 0xB4];
303 DST2 = [0; 8];
304 }
305
306 tx.blocking_write(b"SRC2: ").unwrap();
307 print_buffer(&mut tx, core::ptr::addr_of!(SRC2) as *const u32, 8);
308 tx.blocking_write(b"\r\n").unwrap();
309
310 let dma_ch1 = DmaChannel::new(p.DMA_CH1);
311
312 // Configure transfer with half-transfer interrupt enabled
313 let mut options = TransferOptions::default();
314 options.half_transfer_interrupt = true; // Enable half-transfer interrupt
315 options.complete_transfer_interrupt = true;
316
317 tx.blocking_write(b"Starting transfer with half_transfer_interrupt...\r\n")
318 .unwrap();
319
320 unsafe {
321 let src = &*core::ptr::addr_of!(SRC2);
322 let dst = &mut *core::ptr::addr_of_mut!(DST2);
323
324 // Create the transfer
325 let mut transfer = dma_ch1.mem_to_mem(src, dst, options);
326
327 // Wait for half-transfer (first 4 elements)
328 tx.blocking_write(b"Waiting for first half...\r\n").unwrap();
329 let half_ok = transfer.wait_half().await;
330
331 if half_ok {
332 tx.blocking_write(b"Half-transfer complete! First half of DST2: ")
333 .unwrap();
334 print_buffer(&mut tx, core::ptr::addr_of!(DST2) as *const u32, 4);
335 tx.blocking_write(b"\r\n").unwrap();
336 tx.blocking_write(b"(Processing first half while second half transfers...)\r\n")
337 .unwrap();
338 }
339
340 // Wait for complete transfer
341 tx.blocking_write(b"Waiting for second half...\r\n").unwrap();
342 transfer.await;
343 }
344
345 tx.blocking_write(b"Transfer complete! Full DST2: ").unwrap();
346 print_buffer(&mut tx, core::ptr::addr_of!(DST2) as *const u32, 8);
347 tx.blocking_write(b"\r\n\r\n").unwrap();
348
349 // Verify approach 2
350 let mut mismatch2 = false;
351 unsafe {
352 let src_ptr = core::ptr::addr_of!(SRC2) as *const u32;
353 let dst_ptr = core::ptr::addr_of!(DST2) as *const u32;
354 for i in 0..8 {
355 if *src_ptr.add(i) != *dst_ptr.add(i) {
356 mismatch2 = true;
357 break;
358 }
359 }
360 }
361
362 if mismatch2 {
363 tx.blocking_write(b"FAIL: Approach 2 mismatch!\r\n").unwrap();
364 defmt::error!("FAIL: Approach 2 mismatch!");
365 } else {
366 tx.blocking_write(b"PASS: Approach 2 verified.\r\n").unwrap();
367 defmt::info!("PASS: Approach 2 verified.");
368 }
369
370 tx.blocking_write(b"\r\n=== All ping-pong demos complete ===\r\n")
371 .unwrap();
372
373 loop {
374 cortex_m::asm::wfe();
375 }
376}
diff --git a/examples/mcxa/src/bin/dma_scatter_gather.rs b/examples/mcxa/src/bin/dma_scatter_gather.rs
new file mode 100644
index 000000000..4b26bc2ed
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_scatter_gather.rs
@@ -0,0 +1,262 @@
1//! DMA scatter-gather transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA with scatter/gather to chain multiple
4//! transfer descriptors. The first TCD transfers the first half of the buffer,
5//! then automatically loads the second TCD to transfer the second half.
6//!
7//! # Embassy-style features demonstrated:
8//! - `DmaChannel::new()` for channel creation
9//! - Scatter/gather with chained TCDs
10//! - Custom handler that delegates to HAL's `on_interrupt()` (best practice)
11
12#![no_std]
13#![no_main]
14
15use core::sync::atomic::{AtomicBool, Ordering};
16
17use embassy_executor::Spawner;
18use embassy_mcxa::clocks::config::Div8;
19use embassy_mcxa::dma::{self, DmaChannel, Tcd};
20use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
21use embassy_mcxa::{bind_interrupts, pac};
22use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
23
24// Source and destination buffers
25static mut SRC: [u32; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
26static mut DST: [u32; 8] = [0; 8];
27
28// TCD pool for scatter/gather - must be 32-byte aligned
29#[repr(C, align(32))]
30struct TcdPool([Tcd; 2]);
31
32static mut TCD_POOL: TcdPool = TcdPool(
33 [Tcd {
34 saddr: 0,
35 soff: 0,
36 attr: 0,
37 nbytes: 0,
38 slast: 0,
39 daddr: 0,
40 doff: 0,
41 citer: 0,
42 dlast_sga: 0,
43 csr: 0,
44 biter: 0,
45 }; 2],
46);
47
48// AtomicBool to track scatter/gather completion
49// Note: With ESG=1, DONE bit is cleared by hardware when next TCD loads,
50// so we need this flag to detect when each transfer completes
51static TRANSFER_DONE: AtomicBool = AtomicBool::new(false);
52
53// Custom handler for scatter/gather that delegates to HAL's on_interrupt()
54// This follows the "interrupts as threads" pattern - the handler does minimal work
55// (delegates to HAL + sets a flag) and the main task does the actual processing
56pub struct ScatterGatherDmaHandler;
57
58impl embassy_mcxa::interrupt::typelevel::Handler<embassy_mcxa::interrupt::typelevel::DMA_CH0>
59 for ScatterGatherDmaHandler
60{
61 unsafe fn on_interrupt() {
62 // Delegate to HAL's on_interrupt() which clears INT flag and wakes wakers
63 dma::on_interrupt(0);
64 // Signal completion for polling (needed because ESG clears DONE bit)
65 TRANSFER_DONE.store(true, Ordering::Release);
66 }
67}
68
69// Bind DMA channel interrupt
70// Custom handler for scatter/gather (delegates to on_interrupt + sets flag)
71bind_interrupts!(struct Irqs {
72 DMA_CH0 => ScatterGatherDmaHandler;
73});
74
75/// Helper to write a u32 as decimal ASCII to UART
76fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
77 let mut buf = [0u8; 10];
78 let mut n = val;
79 let mut i = buf.len();
80
81 if n == 0 {
82 tx.blocking_write(b"0").ok();
83 return;
84 }
85
86 while n > 0 {
87 i -= 1;
88 buf[i] = b'0' + (n % 10) as u8;
89 n /= 10;
90 }
91
92 tx.blocking_write(&buf[i..]).ok();
93}
94
95/// Helper to print a buffer to UART
96fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
97 tx.blocking_write(b"[").ok();
98 unsafe {
99 for i in 0..len {
100 write_u32(tx, *buf_ptr.add(i));
101 if i < len - 1 {
102 tx.blocking_write(b", ").ok();
103 }
104 }
105 }
106 tx.blocking_write(b"]").ok();
107}
108
109#[embassy_executor::main]
110async fn main(_spawner: Spawner) {
111 // Small delay to allow probe-rs to attach after reset
112 for _ in 0..100_000 {
113 cortex_m::asm::nop();
114 }
115
116 let mut cfg = hal::config::Config::default();
117 cfg.clock_cfg.sirc.fro_12m_enabled = true;
118 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
119 let p = hal::init(cfg);
120
121 defmt::info!("DMA scatter-gather transfer example starting...");
122
123 // DMA is initialized during hal::init() - no need to call ensure_init()
124
125 // Enable DMA interrupt
126 unsafe {
127 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
128 }
129
130 let config = Config {
131 baudrate_bps: 115_200,
132 ..Default::default()
133 };
134
135 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
136 let (mut tx, _rx) = lpuart.split();
137
138 tx.blocking_write(b"EDMA scatter-gather transfer example begin.\r\n\r\n")
139 .unwrap();
140
141 // Initialize buffers
142 unsafe {
143 SRC = [1, 2, 3, 4, 5, 6, 7, 8];
144 DST = [0; 8];
145 }
146
147 tx.blocking_write(b"Source Buffer: ").unwrap();
148 print_buffer(&mut tx, core::ptr::addr_of!(SRC) as *const u32, 8);
149 tx.blocking_write(b"\r\n").unwrap();
150
151 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
152 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
153 tx.blocking_write(b"\r\n").unwrap();
154
155 tx.blocking_write(b"Configuring scatter-gather DMA with Embassy-style API...\r\n")
156 .unwrap();
157
158 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
159
160 // Configure scatter-gather transfer using direct TCD access:
161 // This sets up TCD0 and TCD1 in RAM, and loads TCD0 into the channel.
162 // TCD0 transfers first half (SRC[0..4] -> DST[0..4]), then loads TCD1.
163 // TCD1 transfers second half (SRC[4..8] -> DST[4..8]), last TCD.
164 unsafe {
165 let tcds = core::slice::from_raw_parts_mut(core::ptr::addr_of_mut!(TCD_POOL.0) as *mut Tcd, 2);
166 let src_ptr = core::ptr::addr_of!(SRC) as *const u32;
167 let dst_ptr = core::ptr::addr_of_mut!(DST) as *mut u32;
168
169 let num_tcds = 2usize;
170 let chunk_len = 4usize; // 8 / 2
171 let chunk_bytes = (chunk_len * 4) as u32;
172
173 for i in 0..num_tcds {
174 let is_last = i == num_tcds - 1;
175 let next_tcd_addr = if is_last {
176 0 // No next TCD
177 } else {
178 &tcds[i + 1] as *const _ as u32
179 };
180
181 tcds[i] = Tcd {
182 saddr: src_ptr.add(i * chunk_len) as u32,
183 soff: 4,
184 attr: 0x0202, // 32-bit src/dst
185 nbytes: chunk_bytes,
186 slast: 0,
187 daddr: dst_ptr.add(i * chunk_len) as u32,
188 doff: 4,
189 citer: 1,
190 dlast_sga: next_tcd_addr as i32,
191 // ESG (scatter/gather) for non-last, INTMAJOR for all
192 csr: if is_last { 0x0002 } else { 0x0012 },
193 biter: 1,
194 };
195 }
196
197 // Load TCD0 into hardware registers
198 dma_ch0.load_tcd(&tcds[0]);
199 }
200
201 tx.blocking_write(b"Triggering first half transfer...\r\n").unwrap();
202
203 // Trigger first transfer (first half: SRC[0..4] -> DST[0..4])
204 // TCD0 is currently loaded.
205 unsafe {
206 dma_ch0.trigger_start();
207 }
208
209 // Wait for first half
210 while !TRANSFER_DONE.load(Ordering::Acquire) {
211 cortex_m::asm::nop();
212 }
213 TRANSFER_DONE.store(false, Ordering::Release);
214
215 tx.blocking_write(b"First half transferred.\r\n").unwrap();
216 tx.blocking_write(b"Triggering second half transfer...\r\n").unwrap();
217
218 // Trigger second transfer (second half: SRC[4..8] -> DST[4..8])
219 // TCD1 should have been loaded by the scatter/gather engine.
220 unsafe {
221 dma_ch0.trigger_start();
222 }
223
224 // Wait for second half
225 while !TRANSFER_DONE.load(Ordering::Acquire) {
226 cortex_m::asm::nop();
227 }
228 TRANSFER_DONE.store(false, Ordering::Release);
229
230 tx.blocking_write(b"Second half transferred.\r\n\r\n").unwrap();
231
232 tx.blocking_write(b"EDMA scatter-gather transfer example finish.\r\n\r\n")
233 .unwrap();
234 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
235 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
236 tx.blocking_write(b"\r\n\r\n").unwrap();
237
238 // Verify: DST should match SRC
239 let mut mismatch = false;
240 unsafe {
241 let src_ptr = core::ptr::addr_of!(SRC) as *const u32;
242 let dst_ptr = core::ptr::addr_of!(DST) as *const u32;
243 for i in 0..8 {
244 if *src_ptr.add(i) != *dst_ptr.add(i) {
245 mismatch = true;
246 break;
247 }
248 }
249 }
250
251 if mismatch {
252 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
253 defmt::error!("FAIL: Mismatch detected!");
254 } else {
255 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
256 defmt::info!("PASS: Data verified.");
257 }
258
259 loop {
260 cortex_m::asm::wfe();
261 }
262}
diff --git a/examples/mcxa/src/bin/dma_scatter_gather_builder.rs b/examples/mcxa/src/bin/dma_scatter_gather_builder.rs
new file mode 100644
index 000000000..e483bb81f
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_scatter_gather_builder.rs
@@ -0,0 +1,231 @@
1//! DMA Scatter-Gather Builder example for MCXA276.
2//!
3//! This example demonstrates using the new `ScatterGatherBuilder` API for
4//! chaining multiple DMA transfers with a type-safe builder pattern.
5//!
6//! # Features demonstrated:
7//! - `ScatterGatherBuilder::new()` for creating a builder
8//! - `add_transfer()` for adding memory-to-memory segments
9//! - `build()` to start the chained transfer
10//! - Automatic TCD linking and ESG bit management
11//!
12//! # Comparison with manual scatter-gather:
13//! The manual approach (see `dma_scatter_gather.rs`) requires:
14//! - Manual TCD pool allocation and alignment
15//! - Manual CSR/ESG/INTMAJOR bit manipulation
16//! - Manual dlast_sga address calculations
17//!
18//! The builder approach handles all of this automatically!
19
20#![no_std]
21#![no_main]
22
23use embassy_executor::Spawner;
24use embassy_mcxa::clocks::config::Div8;
25use embassy_mcxa::dma::{DmaCh0InterruptHandler, DmaChannel, ScatterGatherBuilder};
26use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
27use embassy_mcxa::{bind_interrupts, pac};
28use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
29
30// Bind DMA channel 0 interrupt
31bind_interrupts!(struct Irqs {
32 DMA_CH0 => DmaCh0InterruptHandler;
33});
34
35// Source buffers (multiple segments)
36static mut SRC1: [u32; 4] = [0x11111111, 0x22222222, 0x33333333, 0x44444444];
37static mut SRC2: [u32; 4] = [0xAAAAAAAA, 0xBBBBBBBB, 0xCCCCCCCC, 0xDDDDDDDD];
38static mut SRC3: [u32; 4] = [0x12345678, 0x9ABCDEF0, 0xFEDCBA98, 0x76543210];
39
40// Destination buffers (one per segment)
41static mut DST1: [u32; 4] = [0; 4];
42static mut DST2: [u32; 4] = [0; 4];
43static mut DST3: [u32; 4] = [0; 4];
44
45/// Helper to write a u32 as hex to UART
46fn write_hex(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
47 const HEX: &[u8; 16] = b"0123456789ABCDEF";
48 for i in (0..8).rev() {
49 let nibble = ((val >> (i * 4)) & 0xF) as usize;
50 tx.blocking_write(&[HEX[nibble]]).ok();
51 }
52}
53
54/// Helper to print a buffer to UART
55fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
56 tx.blocking_write(b"[").ok();
57 unsafe {
58 for i in 0..len {
59 write_hex(tx, *buf_ptr.add(i));
60 if i < len - 1 {
61 tx.blocking_write(b", ").ok();
62 }
63 }
64 }
65 tx.blocking_write(b"]").ok();
66}
67
68#[embassy_executor::main]
69async fn main(_spawner: Spawner) {
70 // Small delay to allow probe-rs to attach after reset
71 for _ in 0..100_000 {
72 cortex_m::asm::nop();
73 }
74
75 let mut cfg = hal::config::Config::default();
76 cfg.clock_cfg.sirc.fro_12m_enabled = true;
77 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
78 let p = hal::init(cfg);
79
80 defmt::info!("DMA Scatter-Gather Builder example starting...");
81
82 // Enable DMA interrupt (DMA clock/reset/init is handled automatically by HAL)
83 unsafe {
84 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
85 }
86
87 // Create UART for debug output
88 let config = Config {
89 baudrate_bps: 115_200,
90 ..Default::default()
91 };
92
93 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
94 let (mut tx, _rx) = lpuart.split();
95
96 tx.blocking_write(b"DMA Scatter-Gather Builder Example\r\n").unwrap();
97 tx.blocking_write(b"===================================\r\n\r\n")
98 .unwrap();
99
100 // Show source buffers
101 tx.blocking_write(b"Source buffers:\r\n").unwrap();
102 tx.blocking_write(b" SRC1: ").unwrap();
103 print_buffer(&mut tx, core::ptr::addr_of!(SRC1) as *const u32, 4);
104 tx.blocking_write(b"\r\n").unwrap();
105 tx.blocking_write(b" SRC2: ").unwrap();
106 print_buffer(&mut tx, core::ptr::addr_of!(SRC2) as *const u32, 4);
107 tx.blocking_write(b"\r\n").unwrap();
108 tx.blocking_write(b" SRC3: ").unwrap();
109 print_buffer(&mut tx, core::ptr::addr_of!(SRC3) as *const u32, 4);
110 tx.blocking_write(b"\r\n\r\n").unwrap();
111
112 tx.blocking_write(b"Destination buffers (before):\r\n").unwrap();
113 tx.blocking_write(b" DST1: ").unwrap();
114 print_buffer(&mut tx, core::ptr::addr_of!(DST1) as *const u32, 4);
115 tx.blocking_write(b"\r\n").unwrap();
116 tx.blocking_write(b" DST2: ").unwrap();
117 print_buffer(&mut tx, core::ptr::addr_of!(DST2) as *const u32, 4);
118 tx.blocking_write(b"\r\n").unwrap();
119 tx.blocking_write(b" DST3: ").unwrap();
120 print_buffer(&mut tx, core::ptr::addr_of!(DST3) as *const u32, 4);
121 tx.blocking_write(b"\r\n\r\n").unwrap();
122
123 // Create DMA channel
124 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
125
126 tx.blocking_write(b"Building scatter-gather chain with builder API...\r\n")
127 .unwrap();
128
129 // =========================================================================
130 // ScatterGatherBuilder API demonstration
131 // =========================================================================
132 //
133 // The builder pattern makes scatter-gather transfers much easier:
134 // 1. Create a builder
135 // 2. Add transfer segments with add_transfer()
136 // 3. Call build() to start the entire chain
137 // No manual TCD manipulation required!
138
139 let mut builder = ScatterGatherBuilder::<u32>::new();
140
141 // Add three transfer segments - the builder handles TCD linking automatically
142 unsafe {
143 let src1 = &*core::ptr::addr_of!(SRC1);
144 let dst1 = &mut *core::ptr::addr_of_mut!(DST1);
145 builder.add_transfer(src1, dst1);
146 }
147
148 unsafe {
149 let src2 = &*core::ptr::addr_of!(SRC2);
150 let dst2 = &mut *core::ptr::addr_of_mut!(DST2);
151 builder.add_transfer(src2, dst2);
152 }
153
154 unsafe {
155 let src3 = &*core::ptr::addr_of!(SRC3);
156 let dst3 = &mut *core::ptr::addr_of_mut!(DST3);
157 builder.add_transfer(src3, dst3);
158 }
159
160 tx.blocking_write(b"Added 3 transfer segments to chain.\r\n").unwrap();
161 tx.blocking_write(b"Starting scatter-gather transfer with .await...\r\n\r\n")
162 .unwrap();
163
164 // Build and execute the scatter-gather chain
165 // The build() method:
166 // - Links all TCDs together with ESG bit
167 // - Sets INTMAJOR on all TCDs
168 // - Loads the first TCD into hardware
169 // - Returns a Transfer future
170 unsafe {
171 let transfer = builder.build(&dma_ch0).expect("Failed to build scatter-gather");
172 transfer.blocking_wait();
173 }
174
175 tx.blocking_write(b"Scatter-gather transfer complete!\r\n\r\n").unwrap();
176
177 // Show results
178 tx.blocking_write(b"Destination buffers (after):\r\n").unwrap();
179 tx.blocking_write(b" DST1: ").unwrap();
180 print_buffer(&mut tx, core::ptr::addr_of!(DST1) as *const u32, 4);
181 tx.blocking_write(b"\r\n").unwrap();
182 tx.blocking_write(b" DST2: ").unwrap();
183 print_buffer(&mut tx, core::ptr::addr_of!(DST2) as *const u32, 4);
184 tx.blocking_write(b"\r\n").unwrap();
185 tx.blocking_write(b" DST3: ").unwrap();
186 print_buffer(&mut tx, core::ptr::addr_of!(DST3) as *const u32, 4);
187 tx.blocking_write(b"\r\n\r\n").unwrap();
188
189 // Verify all three segments
190 let mut all_ok = true;
191 unsafe {
192 let src1 = core::ptr::addr_of!(SRC1) as *const u32;
193 let dst1 = core::ptr::addr_of!(DST1) as *const u32;
194 for i in 0..4 {
195 if *src1.add(i) != *dst1.add(i) {
196 all_ok = false;
197 }
198 }
199
200 let src2 = core::ptr::addr_of!(SRC2) as *const u32;
201 let dst2 = core::ptr::addr_of!(DST2) as *const u32;
202 for i in 0..4 {
203 if *src2.add(i) != *dst2.add(i) {
204 all_ok = false;
205 }
206 }
207
208 let src3 = core::ptr::addr_of!(SRC3) as *const u32;
209 let dst3 = core::ptr::addr_of!(DST3) as *const u32;
210 for i in 0..4 {
211 if *src3.add(i) != *dst3.add(i) {
212 all_ok = false;
213 }
214 }
215 }
216
217 if all_ok {
218 tx.blocking_write(b"PASS: All segments verified!\r\n").unwrap();
219 defmt::info!("PASS: All segments verified!");
220 } else {
221 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
222 defmt::error!("FAIL: Mismatch detected!");
223 }
224
225 tx.blocking_write(b"\r\n=== Scatter-Gather Builder example complete ===\r\n")
226 .unwrap();
227
228 loop {
229 cortex_m::asm::wfe();
230 }
231}
diff --git a/examples/mcxa/src/bin/dma_wrap_transfer.rs b/examples/mcxa/src/bin/dma_wrap_transfer.rs
new file mode 100644
index 000000000..82936d9d0
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_wrap_transfer.rs
@@ -0,0 +1,222 @@
1//! DMA wrap transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA with modulo addressing to wrap around
4//! a source buffer, effectively repeating the source data in the destination.
5//!
6//! # Embassy-style features demonstrated:
7//! - `DmaChannel::is_done()` and `clear_done()` helper methods
8//! - No need to pass register block around
9
10#![no_std]
11#![no_main]
12
13use embassy_executor::Spawner;
14use embassy_mcxa::clocks::config::Div8;
15use embassy_mcxa::dma::{DmaCh0InterruptHandler, DmaChannel};
16use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
17use embassy_mcxa::{bind_interrupts, pac};
18use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
19
20// Bind DMA channel 0 interrupt using Embassy-style macro
21bind_interrupts!(struct Irqs {
22 DMA_CH0 => DmaCh0InterruptHandler;
23});
24
25// Source buffer: 4 words (16 bytes), aligned to 16 bytes for modulo
26#[repr(align(16))]
27struct AlignedSrc([u32; 4]);
28
29static mut SRC: AlignedSrc = AlignedSrc([0; 4]);
30static mut DST: [u32; 8] = [0; 8];
31
32/// Helper to write a u32 as decimal ASCII to UART
33fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
34 let mut buf = [0u8; 10];
35 let mut n = val;
36 let mut i = buf.len();
37
38 if n == 0 {
39 tx.blocking_write(b"0").ok();
40 return;
41 }
42
43 while n > 0 {
44 i -= 1;
45 buf[i] = b'0' + (n % 10) as u8;
46 n /= 10;
47 }
48
49 tx.blocking_write(&buf[i..]).ok();
50}
51
52/// Helper to print a buffer to UART
53fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
54 tx.blocking_write(b"[").ok();
55 unsafe {
56 for i in 0..len {
57 write_u32(tx, *buf_ptr.add(i));
58 if i < len - 1 {
59 tx.blocking_write(b", ").ok();
60 }
61 }
62 }
63 tx.blocking_write(b"]").ok();
64}
65
66#[embassy_executor::main]
67async fn main(_spawner: Spawner) {
68 // Small delay to allow probe-rs to attach after reset
69 for _ in 0..100_000 {
70 cortex_m::asm::nop();
71 }
72
73 let mut cfg = hal::config::Config::default();
74 cfg.clock_cfg.sirc.fro_12m_enabled = true;
75 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
76 let p = hal::init(cfg);
77
78 defmt::info!("DMA wrap transfer example starting...");
79
80 // Enable DMA interrupt (DMA clock/reset/init is handled automatically by HAL)
81 unsafe {
82 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
83 }
84
85 let config = Config {
86 baudrate_bps: 115_200,
87 ..Default::default()
88 };
89
90 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
91 let (mut tx, _rx) = lpuart.split();
92
93 tx.blocking_write(b"EDMA wrap transfer example begin.\r\n\r\n").unwrap();
94
95 // Initialize buffers
96 unsafe {
97 SRC.0 = [1, 2, 3, 4];
98 DST = [0; 8];
99 }
100
101 tx.blocking_write(b"Source Buffer: ").unwrap();
102 print_buffer(&mut tx, unsafe { core::ptr::addr_of!(SRC.0) } as *const u32, 4);
103 tx.blocking_write(b"\r\n").unwrap();
104
105 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
106 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
107 tx.blocking_write(b"\r\n").unwrap();
108
109 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
110 .unwrap();
111
112 // Create DMA channel using Embassy-style API
113 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
114
115 // Configure wrap transfer using direct TCD access:
116 // SRC is 16 bytes (4 * u32). We want to transfer 32 bytes (8 * u32).
117 // SRC modulo is 16 bytes (2^4 = 16) - wraps source address.
118 // DST modulo is 0 (disabled).
119 // This causes the source address to wrap around after 16 bytes,
120 // effectively repeating the source data.
121 unsafe {
122 let t = dma_ch0.tcd();
123
124 // Reset channel state
125 t.ch_csr().write(|w| {
126 w.erq()
127 .disable()
128 .earq()
129 .disable()
130 .eei()
131 .no_error()
132 .ebw()
133 .disable()
134 .done()
135 .clear_bit_by_one()
136 });
137 t.ch_es().write(|w| w.bits(0));
138 t.ch_int().write(|w| w.int().clear_bit_by_one());
139
140 // Source/destination addresses
141 t.tcd_saddr()
142 .write(|w| w.saddr().bits(core::ptr::addr_of!(SRC.0) as u32));
143 t.tcd_daddr()
144 .write(|w| w.daddr().bits(core::ptr::addr_of_mut!(DST) as u32));
145
146 // Offsets: both increment by 4 bytes
147 t.tcd_soff().write(|w| w.soff().bits(4));
148 t.tcd_doff().write(|w| w.doff().bits(4));
149
150 // Attributes: 32-bit transfers (size = 2)
151 // SMOD = 4 (2^4 = 16 byte modulo for source), DMOD = 0 (disabled)
152 t.tcd_attr().write(|w| {
153 w.ssize()
154 .bits(2)
155 .dsize()
156 .bits(2)
157 .smod()
158 .bits(4) // Source modulo: 2^4 = 16 bytes
159 .dmod()
160 .bits(0) // Dest modulo: disabled
161 });
162
163 // Transfer 32 bytes total in one minor loop
164 let nbytes = 32u32;
165 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
166
167 // Source wraps via modulo, no adjustment needed
168 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
169 // Reset dest address after major loop
170 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(-(nbytes as i32) as u32));
171
172 // Major loop count = 1
173 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
174 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
175
176 // Enable interrupt on major loop completion
177 t.tcd_csr().write(|w| w.intmajor().set_bit());
178
179 cortex_m::asm::dsb();
180
181 tx.blocking_write(b"Triggering transfer...\r\n").unwrap();
182 dma_ch0.trigger_start();
183 }
184
185 // Wait for completion using channel helper method
186 while !dma_ch0.is_done() {
187 cortex_m::asm::nop();
188 }
189 unsafe {
190 dma_ch0.clear_done();
191 }
192
193 tx.blocking_write(b"\r\nEDMA wrap transfer example finish.\r\n\r\n")
194 .unwrap();
195 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
196 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
197 tx.blocking_write(b"\r\n\r\n").unwrap();
198
199 // Verify: DST should be [1, 2, 3, 4, 1, 2, 3, 4]
200 let expected = [1u32, 2, 3, 4, 1, 2, 3, 4];
201 let mut mismatch = false;
202 unsafe {
203 for i in 0..8 {
204 if DST[i] != expected[i] {
205 mismatch = true;
206 break;
207 }
208 }
209 }
210
211 if mismatch {
212 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
213 defmt::error!("FAIL: Mismatch detected!");
214 } else {
215 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
216 defmt::info!("PASS: Data verified.");
217 }
218
219 loop {
220 cortex_m::asm::wfe();
221 }
222}
diff --git a/examples/mcxa/src/bin/lpuart_dma.rs b/examples/mcxa/src/bin/lpuart_dma.rs
new file mode 100644
index 000000000..5497f8646
--- /dev/null
+++ b/examples/mcxa/src/bin/lpuart_dma.rs
@@ -0,0 +1,81 @@
1//! LPUART DMA example for MCXA276.
2//!
3//! This example demonstrates using DMA for UART TX and RX operations.
4//! It sends a message using DMA, then waits for 16 characters to be received
5//! via DMA and echoes them back.
6//!
7//! The DMA request sources are automatically derived from the LPUART instance type.
8//! DMA clock/reset/init is handled automatically by the HAL.
9
10#![no_std]
11#![no_main]
12
13use embassy_executor::Spawner;
14use embassy_mcxa::clocks::config::Div8;
15use embassy_mcxa::dma::{DmaCh0InterruptHandler, DmaCh1InterruptHandler};
16use embassy_mcxa::lpuart::{Config, LpuartDma};
17use embassy_mcxa::{bind_interrupts, pac};
18use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
19
20// Bind DMA channel interrupts using Embassy-style macro
21bind_interrupts!(struct Irqs {
22 DMA_CH0 => DmaCh0InterruptHandler;
23 DMA_CH1 => DmaCh1InterruptHandler;
24});
25
26#[embassy_executor::main]
27async fn main(_spawner: Spawner) {
28 let mut cfg = hal::config::Config::default();
29 cfg.clock_cfg.sirc.fro_12m_enabled = true;
30 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
31 let p = hal::init(cfg);
32
33 defmt::info!("LPUART DMA example starting...");
34
35 // Enable DMA interrupts (per-channel, as needed)
36 unsafe {
37 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
38 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH1);
39 }
40
41 // Create UART configuration
42 let config = Config {
43 baudrate_bps: 115_200,
44 ..Default::default()
45 };
46
47 // Create UART instance with DMA channels
48 let mut lpuart = LpuartDma::new(
49 p.LPUART2, p.P2_2, // TX pin
50 p.P2_3, // RX pin
51 p.DMA_CH0, // TX DMA channel
52 p.DMA_CH1, // RX DMA channel
53 config,
54 )
55 .unwrap();
56
57 // Send a message using DMA (DMA request source is automatically derived from LPUART2)
58 let tx_msg = b"Hello from LPUART2 DMA TX!\r\n";
59 lpuart.write_dma(tx_msg).await.unwrap();
60
61 defmt::info!("TX DMA complete");
62
63 // Send prompt
64 let prompt = b"Type 16 characters to echo via DMA:\r\n";
65 lpuart.write_dma(prompt).await.unwrap();
66
67 // Receive 16 characters using DMA
68 let mut rx_buf = [0u8; 16];
69 lpuart.read_dma(&mut rx_buf).await.unwrap();
70
71 defmt::info!("RX DMA complete");
72
73 // Echo back the received data
74 let echo_prefix = b"\r\nReceived: ";
75 lpuart.write_dma(echo_prefix).await.unwrap();
76 lpuart.write_dma(&rx_buf).await.unwrap();
77 let done_msg = b"\r\nDone!\r\n";
78 lpuart.write_dma(done_msg).await.unwrap();
79
80 defmt::info!("Example complete");
81}
diff --git a/examples/mcxa/src/bin/lpuart_ring_buffer.rs b/examples/mcxa/src/bin/lpuart_ring_buffer.rs
new file mode 100644
index 000000000..1d1a51970
--- /dev/null
+++ b/examples/mcxa/src/bin/lpuart_ring_buffer.rs
@@ -0,0 +1,130 @@
1//! LPUART Ring Buffer DMA example for MCXA276.
2//!
3//! This example demonstrates using the high-level `LpuartRxDma::setup_ring_buffer()`
4//! API for continuous circular DMA reception from a UART peripheral.
5//!
6//! # Features demonstrated:
7//! - `LpuartRxDma::setup_ring_buffer()` for continuous peripheral-to-memory DMA
8//! - `RingBuffer` for async reading of received data
9//! - Handling of potential overrun conditions
10//! - Half-transfer and complete-transfer interrupts for timely wakeups
11//!
12//! # How it works:
13//! 1. Create an `LpuartRxDma` driver with a DMA channel
14//! 2. Call `setup_ring_buffer()` which handles all low-level DMA configuration
15//! 3. Application asynchronously reads data as it arrives via `ring_buf.read()`
16//! 4. Both half-transfer and complete-transfer interrupts wake the reader
17
18#![no_std]
19#![no_main]
20
21use embassy_executor::Spawner;
22use embassy_mcxa::bind_interrupts;
23use embassy_mcxa::clocks::config::Div8;
24use embassy_mcxa::dma::{DmaCh0InterruptHandler, DmaCh1InterruptHandler};
25use embassy_mcxa::lpuart::{Config, LpuartDma, LpuartTxDma};
26use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
27
28// Bind DMA channel interrupts
29bind_interrupts!(struct Irqs {
30 DMA_CH0 => DmaCh0InterruptHandler;
31 DMA_CH1 => DmaCh1InterruptHandler;
32});
33
34// Ring buffer for RX - power of 2 is ideal for modulo efficiency
35static mut RX_RING_BUFFER: [u8; 64] = [0; 64];
36
37/// Helper to write a byte as hex to UART
38fn write_hex<T: embassy_mcxa::lpuart::Instance, C: embassy_mcxa::dma::Channel>(
39 tx: &mut LpuartTxDma<'_, T, C>,
40 byte: u8,
41) {
42 const HEX: &[u8; 16] = b"0123456789ABCDEF";
43 let buf = [HEX[(byte >> 4) as usize], HEX[(byte & 0x0F) as usize]];
44 tx.blocking_write(&buf).ok();
45}
46
47#[embassy_executor::main]
48async fn main(_spawner: Spawner) {
49 // Small delay to allow probe-rs to attach after reset
50 for _ in 0..100_000 {
51 cortex_m::asm::nop();
52 }
53
54 let mut cfg = hal::config::Config::default();
55 cfg.clock_cfg.sirc.fro_12m_enabled = true;
56 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
57 let p = hal::init(cfg);
58
59 defmt::info!("LPUART Ring Buffer DMA example starting...");
60
61 // Create UART configuration
62 let config = Config {
63 baudrate_bps: 115_200,
64 ..Default::default()
65 };
66
67 // Create LPUART with DMA support for both TX and RX, then split
68 // This is the proper Embassy pattern - create once, split into TX and RX
69 let lpuart = LpuartDma::new(p.LPUART2, p.P2_2, p.P2_3, p.DMA_CH1, p.DMA_CH0, config).unwrap();
70 let (mut tx, rx) = lpuart.split();
71
72 tx.blocking_write(b"LPUART Ring Buffer DMA Example\r\n").unwrap();
73 tx.blocking_write(b"==============================\r\n\r\n").unwrap();
74
75 tx.blocking_write(b"Setting up circular DMA for UART RX...\r\n")
76 .unwrap();
77
78 // Set up the ring buffer with circular DMA
79 // The HAL handles: DMA request source, RDMAE enable, circular transfer config, NVIC enable
80 let ring_buf = unsafe {
81 let buf = &mut *core::ptr::addr_of_mut!(RX_RING_BUFFER);
82 rx.setup_ring_buffer(buf)
83 };
84
85 // Enable DMA requests to start continuous reception
86 unsafe {
87 rx.enable_dma_request();
88 }
89
90 tx.blocking_write(b"Ring buffer ready! Type characters to see them echoed.\r\n")
91 .unwrap();
92 tx.blocking_write(b"The DMA continuously receives in the background.\r\n\r\n")
93 .unwrap();
94
95 // Main loop: read from ring buffer and echo back
96 let mut read_buf = [0u8; 16];
97 let mut total_received: usize = 0;
98
99 loop {
100 // Async read - waits until data is available
101 match ring_buf.read(&mut read_buf).await {
102 Ok(n) if n > 0 => {
103 total_received += n;
104
105 // Echo back what we received
106 tx.blocking_write(b"RX[").unwrap();
107 for (i, &byte) in read_buf.iter().enumerate().take(n) {
108 write_hex(&mut tx, byte);
109 if i < n - 1 {
110 tx.blocking_write(b" ").unwrap();
111 }
112 }
113 tx.blocking_write(b"]: ").unwrap();
114 tx.blocking_write(&read_buf[..n]).unwrap();
115 tx.blocking_write(b"\r\n").unwrap();
116
117 defmt::info!("Received {} bytes, total: {}", n, total_received);
118 }
119 Ok(_) => {
120 // No data, shouldn't happen with async read
121 }
122 Err(_) => {
123 // Overrun detected
124 tx.blocking_write(b"ERROR: Ring buffer overrun!\r\n").unwrap();
125 defmt::error!("Ring buffer overrun!");
126 ring_buf.clear();
127 }
128 }
129 }
130}