aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Munns <[email protected]>2025-12-09 17:40:22 +0000
committerGitHub <[email protected]>2025-12-09 17:40:22 +0000
commit23623d634b88da7bc398f092ac4ab9e571c6e6e1 (patch)
treed631103d213340780c6bd4e7b8925df491ad4a1d
parent4f322f4a03d336e90d530045255f46cce93e6252 (diff)
parent2d7328d5839e196f7b6c275283a50fd4ac019440 (diff)
Merge pull request #5015 from jamesmunns/james/dma-suggestions
[MCXA]: Extend DMA interface
-rw-r--r--embassy-mcxa/src/clocks/mod.rs7
-rw-r--r--embassy-mcxa/src/dma.rs2602
-rw-r--r--embassy-mcxa/src/interrupt.rs8
-rw-r--r--embassy-mcxa/src/lib.rs47
-rw-r--r--embassy-mcxa/src/lpuart/mod.rs575
-rw-r--r--embassy-mcxa/src/pins.rs5
-rw-r--r--examples/mcxa/Cargo.toml1
-rw-r--r--examples/mcxa/src/bin/dma_mem_to_mem.rs118
-rw-r--r--examples/mcxa/src/bin/dma_scatter_gather_builder.rs130
-rw-r--r--examples/mcxa/src/bin/dma_wrap_transfer.rs184
-rw-r--r--examples/mcxa/src/bin/lpuart_dma.rs68
-rw-r--r--examples/mcxa/src/bin/lpuart_ring_buffer.rs115
-rw-r--r--examples/mcxa/src/bin/raw_dma_channel_link.rs278
-rw-r--r--examples/mcxa/src/bin/raw_dma_interleave_transfer.rs141
-rw-r--r--examples/mcxa/src/bin/raw_dma_memset.rs129
-rw-r--r--examples/mcxa/src/bin/raw_dma_ping_pong_transfer.rs244
-rw-r--r--examples/mcxa/src/bin/raw_dma_scatter_gather.rs165
17 files changed, 4740 insertions, 77 deletions
diff --git a/embassy-mcxa/src/clocks/mod.rs b/embassy-mcxa/src/clocks/mod.rs
index 1b23a9d9f..014a12519 100644
--- a/embassy-mcxa/src/clocks/mod.rs
+++ b/embassy-mcxa/src/clocks/mod.rs
@@ -399,6 +399,10 @@ pub unsafe fn assert_reset<G: Gate>() {
399} 399}
400 400
401/// Check whether the peripheral is held in reset. 401/// Check whether the peripheral is held in reset.
402///
403/// # Safety
404///
405/// Must be called with a valid peripheral gate type.
402#[inline] 406#[inline]
403pub unsafe fn is_reset_released<G: Gate>() -> bool { 407pub unsafe fn is_reset_released<G: Gate>() -> bool {
404 G::is_reset_released() 408 G::is_reset_released()
@@ -942,4 +946,7 @@ pub(crate) mod gate {
942 impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig); 946 impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig);
943 impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig); 947 impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig);
944 impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig); 948 impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig);
949
950 // DMA0 peripheral - uses NoConfig since it has no selectable clock source
951 impl_cc_gate!(DMA0, mrcc_glb_cc0, mrcc_glb_rst0, dma0, NoConfig);
945} 952}
diff --git a/embassy-mcxa/src/dma.rs b/embassy-mcxa/src/dma.rs
new file mode 100644
index 000000000..8d519d99b
--- /dev/null
+++ b/embassy-mcxa/src/dma.rs
@@ -0,0 +1,2602 @@
1//! DMA driver for MCXA276.
2//!
3//! This module provides a typed channel abstraction over the EDMA_0_TCD0 array
4//! and helpers for configuring the channel MUX. The driver supports both
5//! low-level TCD configuration and higher-level async transfer APIs.
6//!
7//! # Architecture
8//!
9//! The MCXA276 has 8 DMA channels (0-7), each with its own interrupt vector.
10//! Each channel has a Transfer Control Descriptor (TCD) that defines the
11//! transfer parameters.
12//!
13//! # Choosing the Right API
14//!
15//! This module provides several API levels to match different use cases:
16//!
17//! ## High-Level Async API (Recommended for Most Users)
18//!
19//! Use the async methods when you want simple, safe DMA transfers:
20//!
21//! | Method | Description |
22//! |--------|-------------|
23//! | [`DmaChannel::mem_to_mem()`] | Memory-to-memory copy |
24//! | [`DmaChannel::memset()`] | Fill memory with a pattern |
25//! | [`DmaChannel::write()`] | Memory-to-peripheral (TX) |
26//! | [`DmaChannel::read()`] | Peripheral-to-memory (RX) |
27//!
28//! These return a [`Transfer`] future that can be `.await`ed:
29//!
30//! ```no_run
31//! # use embassy_mcxa::dma::{DmaChannel, TransferOptions};
32//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
33//! # let src = [0u32; 4];
34//! # let mut dst = [0u32; 4];
35//! // Simple memory-to-memory transfer
36//! unsafe {
37//! dma_ch.mem_to_mem(&src, &mut dst, TransferOptions::default()).await;
38//! }
39//! ```
40//!
41//! ## Setup Methods (For Peripheral Drivers)
42//!
43//! Use setup methods when you need manual lifecycle control:
44//!
45//! | Method | Description |
46//! |--------|-------------|
47//! | [`DmaChannel::setup_write()`] | Configure TX without starting |
48//! | [`DmaChannel::setup_read()`] | Configure RX without starting |
49//!
50//! These configure the TCD but don't start the transfer. You control:
51//! 1. When to call [`DmaChannel::enable_request()`]
52//! 2. How to detect completion (polling or interrupts)
53//! 3. When to clean up with [`DmaChannel::clear_done()`]
54//!
55//! ## Circular/Ring Buffer API (For Continuous Reception)
56//!
57//! Use [`DmaChannel::setup_circular_read()`] for continuous data reception:
58//!
59//! ```no_run
60//! # use embassy_mcxa::dma::DmaChannel;
61//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
62//! # let uart_rx_addr = 0x4000_0000 as *const u8;
63//! static mut RX_BUF: [u8; 64] = [0; 64];
64//!
65//! let ring_buf = unsafe {
66//! dma_ch.setup_circular_read(uart_rx_addr, &mut RX_BUF)
67//! };
68//!
69//! // Read data as it arrives
70//! let mut buf = [0u8; 16];
71//! let n = ring_buf.read(&mut buf).await.unwrap();
72//! ```
73//!
74//! ## Scatter-Gather Builder (For Chained Transfers)
75//!
76//! Use [`ScatterGatherBuilder`] for complex multi-segment transfers:
77//!
78//! ```no_run
79//! # use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
80//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
81//! let mut builder = ScatterGatherBuilder::<u32>::new();
82//! builder.add_transfer(&src1, &mut dst1);
83//! builder.add_transfer(&src2, &mut dst2);
84//!
85//! let transfer = unsafe { builder.build(&dma_ch).unwrap() };
86//! transfer.await;
87//! ```
88//!
89//! ## Direct TCD Access (For Advanced Use Cases)
90//!
91//! For full control, use the channel's `tcd()` method to access TCD registers directly.
92//! See the `dma_*` examples for patterns.
93//!
94//! # Example
95//!
96//! ```no_run
97//! use embassy_mcxa::dma::{DmaChannel, TransferOptions, Direction};
98//!
99//! let dma_ch = DmaChannel::new(p.DMA_CH0);
100//! // Configure and trigger a transfer...
101//! ```
102
103use core::future::Future;
104use core::marker::PhantomData;
105use core::pin::Pin;
106use core::ptr::NonNull;
107use core::sync::atomic::{AtomicUsize, Ordering, fence};
108use core::task::{Context, Poll};
109
110use embassy_hal_internal::PeripheralType;
111use embassy_sync::waitqueue::AtomicWaker;
112
113use crate::clocks::Gate;
114use crate::pac;
115use crate::pac::Interrupt;
116use crate::peripherals::DMA0;
117
118/// Initialize DMA controller (clock enabled, reset released, controller configured).
119///
120/// This function is intended to be called ONCE during HAL initialization (`hal::init()`).
121///
122/// The function enables the DMA0 clock, releases reset, and configures the controller
123/// for normal operation with round-robin arbitration.
124pub(crate) fn init() {
125 unsafe {
126 // Enable DMA0 clock and release reset
127 DMA0::enable_clock();
128 DMA0::release_reset();
129
130 // Configure DMA controller
131 let dma = &(*pac::Dma0::ptr());
132 dma.mp_csr().modify(|_, w| {
133 w.edbg()
134 .enable()
135 .erca()
136 .enable()
137 .halt()
138 .normal_operation()
139 .gclc()
140 .available()
141 .gmrc()
142 .available()
143 });
144 }
145}
146
147// ============================================================================
148// Phase 1: Foundation Types (Embassy-aligned)
149// ============================================================================
150
151/// DMA transfer direction.
152#[derive(Debug, Copy, Clone, PartialEq, Eq)]
153#[cfg_attr(feature = "defmt", derive(defmt::Format))]
154pub enum Direction {
155 /// Transfer from memory to memory.
156 MemoryToMemory,
157 /// Transfer from memory to a peripheral register.
158 MemoryToPeripheral,
159 /// Transfer from a peripheral register to memory.
160 PeripheralToMemory,
161}
162
163/// DMA transfer priority.
164#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
165#[cfg_attr(feature = "defmt", derive(defmt::Format))]
166pub enum Priority {
167 /// Low priority (channel priority 7).
168 Low,
169 /// Medium priority (channel priority 4).
170 Medium,
171 /// High priority (channel priority 1).
172 #[default]
173 High,
174 /// Highest priority (channel priority 0).
175 Highest,
176}
177
178impl Priority {
179 /// Convert to hardware priority value (0 = highest, 7 = lowest).
180 pub fn to_hw_priority(self) -> u8 {
181 match self {
182 Priority::Low => 7,
183 Priority::Medium => 4,
184 Priority::High => 1,
185 Priority::Highest => 0,
186 }
187 }
188}
189
190/// DMA transfer data width.
191#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
192#[cfg_attr(feature = "defmt", derive(defmt::Format))]
193pub enum WordSize {
194 /// 8-bit (1 byte) transfers.
195 OneByte,
196 /// 16-bit (2 byte) transfers.
197 TwoBytes,
198 /// 32-bit (4 byte) transfers.
199 #[default]
200 FourBytes,
201}
202
203impl WordSize {
204 /// Size in bytes.
205 pub const fn bytes(self) -> usize {
206 match self {
207 WordSize::OneByte => 1,
208 WordSize::TwoBytes => 2,
209 WordSize::FourBytes => 4,
210 }
211 }
212
213 /// Convert to hardware SSIZE/DSIZE field value.
214 pub const fn to_hw_size(self) -> u8 {
215 match self {
216 WordSize::OneByte => 0,
217 WordSize::TwoBytes => 1,
218 WordSize::FourBytes => 2,
219 }
220 }
221
222 /// Create from byte width (1, 2, or 4).
223 pub const fn from_bytes(bytes: u8) -> Option<Self> {
224 match bytes {
225 1 => Some(WordSize::OneByte),
226 2 => Some(WordSize::TwoBytes),
227 4 => Some(WordSize::FourBytes),
228 _ => None,
229 }
230 }
231}
232
233/// Trait for types that can be transferred via DMA.
234///
235/// This provides compile-time type safety for DMA transfers.
236pub trait Word: Copy + 'static {
237 /// The word size for this type.
238 fn size() -> WordSize;
239}
240
241impl Word for u8 {
242 fn size() -> WordSize {
243 WordSize::OneByte
244 }
245}
246
247impl Word for u16 {
248 fn size() -> WordSize {
249 WordSize::TwoBytes
250 }
251}
252
253impl Word for u32 {
254 fn size() -> WordSize {
255 WordSize::FourBytes
256 }
257}
258
259/// DMA transfer options.
260///
261/// This struct configures various aspects of a DMA transfer.
262#[derive(Debug, Copy, Clone, PartialEq, Eq)]
263#[cfg_attr(feature = "defmt", derive(defmt::Format))]
264#[non_exhaustive]
265pub struct TransferOptions {
266 /// Transfer priority.
267 pub priority: Priority,
268 /// Enable circular (continuous) mode.
269 ///
270 /// When enabled, the transfer repeats automatically after completing.
271 pub circular: bool,
272 /// Enable interrupt on half transfer complete.
273 pub half_transfer_interrupt: bool,
274 /// Enable interrupt on transfer complete.
275 pub complete_transfer_interrupt: bool,
276}
277
278impl Default for TransferOptions {
279 fn default() -> Self {
280 Self {
281 priority: Priority::High,
282 circular: false,
283 half_transfer_interrupt: false,
284 complete_transfer_interrupt: true,
285 }
286 }
287}
288
289/// DMA error types.
290#[derive(Debug, Copy, Clone, PartialEq, Eq)]
291#[cfg_attr(feature = "defmt", derive(defmt::Format))]
292pub enum Error {
293 /// The DMA controller reported a bus error.
294 BusError,
295 /// The transfer was aborted.
296 Aborted,
297 /// Configuration error (e.g., invalid parameters).
298 Configuration,
299 /// Buffer overrun (for ring buffers).
300 Overrun,
301}
302
303/// Whether to enable the major loop completion interrupt.
304///
305/// This enum provides better readability than a boolean parameter
306/// for functions that configure DMA interrupt behavior.
307#[derive(Debug, Copy, Clone, PartialEq, Eq)]
308#[cfg_attr(feature = "defmt", derive(defmt::Format))]
309pub enum EnableInterrupt {
310 /// Enable the interrupt on major loop completion.
311 Yes,
312 /// Do not enable the interrupt.
313 No,
314}
315
316// ============================================================================
317// DMA Constants
318// ============================================================================
319
320/// Maximum bytes per DMA transfer (eDMA4 CITER/BITER are 15-bit fields).
321///
322/// This is a hardware limitation of the eDMA4 controller. Transfers larger
323/// than this must be split into multiple DMA operations.
324pub const DMA_MAX_TRANSFER_SIZE: usize = 0x7FFF;
325
326// ============================================================================
327// DMA Request Source Types (Type-Safe API)
328// ============================================================================
329
330/// Trait for type-safe DMA request sources.
331///
332/// Each peripheral that can trigger DMA requests implements this trait
333/// with marker types that encode the correct request source number at
334/// compile time. This prevents using the wrong request source for a
335/// peripheral.
336///
337/// # Example
338///
339/// ```ignore
340/// // The LPUART2 RX request source is automatically derived from the type:
341/// channel.set_request_source::<Lpuart2RxRequest>();
342/// ```
343///
344/// This trait is sealed and cannot be implemented outside this crate.
345#[allow(private_bounds)]
346pub trait DmaRequest: sealed::SealedDmaRequest {
347 /// The hardware request source number for the DMA mux.
348 const REQUEST_NUMBER: u8;
349}
350
351/// Macro to define a DMA request type.
352///
353/// Creates a zero-sized marker type that implements `DmaRequest` with
354/// the specified request number.
355macro_rules! define_dma_request {
356 ($(#[$meta:meta])* $name:ident = $num:expr) => {
357 $(#[$meta])*
358 #[derive(Debug, Copy, Clone)]
359 pub struct $name;
360
361 impl sealed::SealedDmaRequest for $name {}
362
363 impl DmaRequest for $name {
364 const REQUEST_NUMBER: u8 = $num;
365 }
366 };
367}
368
369// LPUART DMA request sources (from MCXA276 reference manual Table 4-8)
370define_dma_request!(
371 /// DMA request source for LPUART0 RX.
372 Lpuart0RxRequest = 21
373);
374define_dma_request!(
375 /// DMA request source for LPUART0 TX.
376 Lpuart0TxRequest = 22
377);
378define_dma_request!(
379 /// DMA request source for LPUART1 RX.
380 Lpuart1RxRequest = 23
381);
382define_dma_request!(
383 /// DMA request source for LPUART1 TX.
384 Lpuart1TxRequest = 24
385);
386define_dma_request!(
387 /// DMA request source for LPUART2 RX.
388 Lpuart2RxRequest = 25
389);
390define_dma_request!(
391 /// DMA request source for LPUART2 TX.
392 Lpuart2TxRequest = 26
393);
394define_dma_request!(
395 /// DMA request source for LPUART3 RX.
396 Lpuart3RxRequest = 27
397);
398define_dma_request!(
399 /// DMA request source for LPUART3 TX.
400 Lpuart3TxRequest = 28
401);
402define_dma_request!(
403 /// DMA request source for LPUART4 RX.
404 Lpuart4RxRequest = 29
405);
406define_dma_request!(
407 /// DMA request source for LPUART4 TX.
408 Lpuart4TxRequest = 30
409);
410define_dma_request!(
411 /// DMA request source for LPUART5 RX.
412 Lpuart5RxRequest = 31
413);
414define_dma_request!(
415 /// DMA request source for LPUART5 TX.
416 Lpuart5TxRequest = 32
417);
418
419// ============================================================================
420// Channel Trait (Sealed Pattern)
421// ============================================================================
422
423mod sealed {
424 use crate::pac::Interrupt;
425
426 /// Sealed trait for DMA channels.
427 pub trait SealedChannel {
428 /// Zero-based channel index into the TCD array.
429 fn index(&self) -> usize;
430 /// Interrupt vector for this channel.
431 fn interrupt(&self) -> Interrupt;
432 }
433
434 /// Sealed trait for DMA request sources.
435 pub trait SealedDmaRequest {}
436}
437
438/// Marker trait implemented by HAL peripheral tokens that map to a DMA0
439/// channel backed by one EDMA_0_TCD0 TCD slot.
440///
441/// This trait is sealed and cannot be implemented outside this crate.
442#[allow(private_bounds)]
443pub trait Channel: sealed::SealedChannel + PeripheralType + Into<AnyChannel> + 'static {
444 /// Zero-based channel index into the TCD array.
445 const INDEX: usize;
446 /// Interrupt vector for this channel.
447 const INTERRUPT: Interrupt;
448}
449
450/// Type-erased DMA channel.
451///
452/// This allows storing DMA channels in a uniform way regardless of their
453/// concrete type, useful for async transfer futures and runtime channel selection.
454#[derive(Debug, Clone, Copy)]
455pub struct AnyChannel {
456 index: usize,
457 interrupt: Interrupt,
458}
459
460impl AnyChannel {
461 /// Get the channel index.
462 #[inline]
463 pub const fn index(&self) -> usize {
464 self.index
465 }
466
467 /// Get the channel interrupt.
468 #[inline]
469 pub const fn interrupt(&self) -> Interrupt {
470 self.interrupt
471 }
472
473 /// Get a reference to the TCD register block for this channel.
474 ///
475 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
476 #[inline]
477 fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
478 // Safety: MCXA276 has a single eDMA instance, and we're only accessing
479 // the TCD for this specific channel
480 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
481 edma.tcd(self.index)
482 }
483
484 /// Check if the channel's DONE flag is set.
485 pub fn is_done(&self) -> bool {
486 self.tcd().ch_csr().read().done().bit_is_set()
487 }
488
489 /// Get the waker for this channel.
490 pub fn waker(&self) -> &'static AtomicWaker {
491 &STATES[self.index].waker
492 }
493}
494
495impl sealed::SealedChannel for AnyChannel {
496 fn index(&self) -> usize {
497 self.index
498 }
499
500 fn interrupt(&self) -> Interrupt {
501 self.interrupt
502 }
503}
504
505/// Macro to implement Channel trait for a peripheral.
506macro_rules! impl_channel {
507 ($peri:ident, $index:expr, $irq:ident) => {
508 impl sealed::SealedChannel for crate::peripherals::$peri {
509 fn index(&self) -> usize {
510 $index
511 }
512
513 fn interrupt(&self) -> Interrupt {
514 Interrupt::$irq
515 }
516 }
517
518 impl Channel for crate::peripherals::$peri {
519 const INDEX: usize = $index;
520 const INTERRUPT: Interrupt = Interrupt::$irq;
521 }
522
523 impl From<crate::peripherals::$peri> for AnyChannel {
524 fn from(_: crate::peripherals::$peri) -> Self {
525 AnyChannel {
526 index: $index,
527 interrupt: Interrupt::$irq,
528 }
529 }
530 }
531 };
532}
533
534impl_channel!(DMA_CH0, 0, DMA_CH0);
535impl_channel!(DMA_CH1, 1, DMA_CH1);
536impl_channel!(DMA_CH2, 2, DMA_CH2);
537impl_channel!(DMA_CH3, 3, DMA_CH3);
538impl_channel!(DMA_CH4, 4, DMA_CH4);
539impl_channel!(DMA_CH5, 5, DMA_CH5);
540impl_channel!(DMA_CH6, 6, DMA_CH6);
541impl_channel!(DMA_CH7, 7, DMA_CH7);
542
543/// Strongly-typed handle to a DMA0 channel.
544///
545/// The lifetime of this value is tied to the unique peripheral token
546/// supplied by `embassy_hal_internal::peripherals!`, so safe code cannot
547/// create two `DmaChannel` instances for the same hardware channel.
548pub struct DmaChannel<C: Channel> {
549 _ch: core::marker::PhantomData<C>,
550}
551
552// ============================================================================
553// DMA Transfer Methods - API Overview
554// ============================================================================
555//
556// The DMA API provides two categories of methods for configuring transfers:
557//
558// ## 1. Async Methods (Return `Transfer` Future)
559//
560// These methods return a [`Transfer`] Future that must be `.await`ed:
561//
562// - [`write()`](DmaChannel::write) - Memory-to-peripheral using default eDMA TCD block
563// - [`read()`](DmaChannel::read) - Peripheral-to-memory using default eDMA TCD block
564// - [`write_to_peripheral()`](DmaChannel::write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
565// - [`read_from_peripheral()`](DmaChannel::read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
566// - [`mem_to_mem()`](DmaChannel::mem_to_mem) - Memory-to-memory using default eDMA TCD block
567//
568// The `Transfer` manages the DMA lifecycle automatically:
569// - Enables channel request
570// - Waits for completion via async/await
571// - Cleans up on completion
572//
573// **Important:** `Transfer::Drop` aborts the transfer if dropped before completion.
574// This means you MUST `.await` the Transfer or it will be aborted when it goes out of scope.
575//
576// **Use case:** When you want to use async/await and let the Transfer handle lifecycle management.
577//
578// ## 2. Setup Methods (Configure TCD Only)
579//
580// These methods configure the TCD but do NOT return a `Transfer`:
581//
582// - [`setup_write()`](DmaChannel::setup_write) - Memory-to-peripheral using default eDMA TCD block
583// - [`setup_read()`](DmaChannel::setup_read) - Peripheral-to-memory using default eDMA TCD block
584// - [`setup_write_to_peripheral()`](DmaChannel::setup_write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
585// - [`setup_read_from_peripheral()`](DmaChannel::setup_read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
586//
587// The caller is responsible for the complete DMA lifecycle:
588// 1. Call [`enable_request()`](DmaChannel::enable_request) to start the transfer
589// 2. Poll [`is_done()`](DmaChannel::is_done) or use interrupts to detect completion
590// 3. Call [`disable_request()`](DmaChannel::disable_request), [`clear_done()`](DmaChannel::clear_done),
591// [`clear_interrupt()`](DmaChannel::clear_interrupt) for cleanup
592//
593// **Use case:** Peripheral drivers (like LPUART) that need fine-grained control over
594// DMA setup before starting a `Transfer`.
595//
596// ============================================================================
597
598impl<C: Channel> DmaChannel<C> {
599 /// Wrap a DMA channel token (takes ownership of the Peri wrapper).
600 ///
601 /// Note: DMA is initialized during `hal::init()` via `dma::init()`.
602 #[inline]
603 pub fn new(_ch: embassy_hal_internal::Peri<'_, C>) -> Self {
604 unsafe {
605 cortex_m::peripheral::NVIC::unmask(C::INTERRUPT);
606 }
607 Self {
608 _ch: core::marker::PhantomData,
609 }
610 }
611
612 /// Channel index in the EDMA_0_TCD0 array.
613 #[inline]
614 pub const fn index(&self) -> usize {
615 C::INDEX
616 }
617
618 /// Convert this typed channel into a type-erased `AnyChannel`.
619 #[inline]
620 pub fn into_any(self) -> AnyChannel {
621 AnyChannel {
622 index: C::INDEX,
623 interrupt: C::INTERRUPT,
624 }
625 }
626
627 /// Get a reference to the type-erased channel info.
628 #[inline]
629 pub fn as_any(&self) -> AnyChannel {
630 AnyChannel {
631 index: C::INDEX,
632 interrupt: C::INTERRUPT,
633 }
634 }
635
636 /// Return a reference to the underlying TCD register block.
637 ///
638 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
639 ///
640 /// # Note
641 ///
642 /// This is exposed for advanced use cases that need direct TCD access.
643 /// For most use cases, prefer the higher-level transfer methods.
644 #[inline]
645 pub fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
646 // Safety: MCXA276 has a single eDMA instance
647 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
648 edma.tcd(C::INDEX)
649 }
650
651 fn clear_tcd(t: &'static pac::edma_0_tcd0::Tcd) {
652 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
653 // Reset ALL TCD registers to 0 to clear any stale configuration from
654 // previous transfers. This is critical when reusing a channel.
655 t.tcd_saddr().write(|w| unsafe { w.saddr().bits(0) });
656 t.tcd_soff().write(|w| unsafe { w.soff().bits(0) });
657 t.tcd_attr().write(|w| unsafe { w.bits(0) });
658 t.tcd_nbytes_mloffno().write(|w| unsafe { w.nbytes().bits(0) });
659 t.tcd_slast_sda().write(|w| unsafe { w.slast_sda().bits(0) });
660 t.tcd_daddr().write(|w| unsafe { w.daddr().bits(0) });
661 t.tcd_doff().write(|w| unsafe { w.doff().bits(0) });
662 t.tcd_citer_elinkno().write(|w| unsafe { w.bits(0) });
663 t.tcd_dlast_sga().write(|w| unsafe { w.dlast_sga().bits(0) });
664 t.tcd_csr().write(|w| unsafe { w.bits(0) }); // Clear CSR completely
665 t.tcd_biter_elinkno().write(|w| unsafe { w.bits(0) });
666 }
667
668 #[inline]
669 fn set_major_loop_ct_elinkno(t: &'static pac::edma_0_tcd0::Tcd, count: u16) {
670 t.tcd_biter_elinkno().write(|w| unsafe { w.biter().bits(count) });
671 t.tcd_citer_elinkno().write(|w| unsafe { w.citer().bits(count) });
672 }
673
674 #[inline]
675 fn set_minor_loop_ct_no_offsets(t: &'static pac::edma_0_tcd0::Tcd, count: u32) {
676 t.tcd_nbytes_mloffno().write(|w| unsafe { w.nbytes().bits(count) });
677 }
678
679 #[inline]
680 fn set_no_final_adjustments(t: &'static pac::edma_0_tcd0::Tcd) {
681 // No source/dest adjustment after major loop
682 t.tcd_slast_sda().write(|w| unsafe { w.slast_sda().bits(0) });
683 t.tcd_dlast_sga().write(|w| unsafe { w.dlast_sga().bits(0) });
684 }
685
686 #[inline]
687 fn set_source_ptr<T>(t: &'static pac::edma_0_tcd0::Tcd, p: *const T) {
688 t.tcd_saddr().write(|w| unsafe { w.saddr().bits(p as u32) });
689 }
690
691 #[inline]
692 fn set_source_increment(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) {
693 t.tcd_soff().write(|w| unsafe { w.soff().bits(sz.bytes() as u16) });
694 }
695
696 #[inline]
697 fn set_source_fixed(t: &'static pac::edma_0_tcd0::Tcd) {
698 t.tcd_soff().write(|w| unsafe { w.soff().bits(0) });
699 }
700
701 #[inline]
702 fn set_dest_ptr<T>(t: &'static pac::edma_0_tcd0::Tcd, p: *mut T) {
703 t.tcd_daddr().write(|w| unsafe { w.daddr().bits(p as u32) });
704 }
705
706 #[inline]
707 fn set_dest_increment(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) {
708 t.tcd_doff().write(|w| unsafe { w.doff().bits(sz.bytes() as u16) });
709 }
710
711 #[inline]
712 fn set_dest_fixed(t: &'static pac::edma_0_tcd0::Tcd) {
713 t.tcd_doff().write(|w| unsafe { w.doff().bits(0) });
714 }
715
716 #[inline]
717 fn set_even_transfer_size(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) {
718 let hw_size = sz.to_hw_size();
719 t.tcd_attr()
720 .write(|w| unsafe { w.ssize().bits(hw_size).dsize().bits(hw_size) });
721 }
722
723 #[inline]
724 fn reset_channel_state(t: &'static pac::edma_0_tcd0::Tcd) {
725 // CSR: Resets to all zeroes (disabled), "done" is cleared by writing 1
726 t.ch_csr().write(|w| w.done().clear_bit_by_one());
727 // ES: Resets to all zeroes (disabled), "err" is cleared by writing 1
728 t.ch_es().write(|w| w.err().clear_bit_by_one());
729 // INT: Resets to all zeroes (disabled), "int" is cleared by writing 1
730 t.ch_int().write(|w| w.int().clear_bit_by_one());
731 }
732
733 /// Start an async transfer.
734 ///
735 /// The channel must already be configured. This enables the channel
736 /// request and returns a `Transfer` future that resolves when the
737 /// DMA transfer completes.
738 ///
739 /// # Safety
740 ///
741 /// The caller must ensure the DMA channel has been properly configured
742 /// and that source/destination buffers remain valid for the duration
743 /// of the transfer.
744 pub unsafe fn start_transfer(&self) -> Transfer<'_> {
745 // Clear any previous DONE/INT flags
746 let t = self.tcd();
747 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
748 t.ch_int().write(|w| w.int().clear_bit_by_one());
749
750 // Enable the channel request
751 t.ch_csr().modify(|_, w| w.erq().enable());
752
753 Transfer::new(self.as_any())
754 }
755
756 // ========================================================================
757 // Type-Safe Transfer Methods (Embassy-style API)
758 // ========================================================================
759
760 /// Perform a memory-to-memory DMA transfer (simplified API).
761 ///
762 /// This is a type-safe wrapper that uses the `Word` trait to determine
763 /// the correct transfer width automatically. Uses the global eDMA TCD
764 /// register accessor internally.
765 ///
766 /// # Arguments
767 ///
768 /// * `src` - Source buffer
769 /// * `dst` - Destination buffer (must be at least as large as src)
770 /// * `options` - Transfer configuration options
771 ///
772 /// # Safety
773 ///
774 /// The source and destination buffers must remain valid for the
775 /// duration of the transfer.
776 pub fn mem_to_mem<W: Word>(
777 &self,
778 src: &[W],
779 dst: &mut [W],
780 options: TransferOptions,
781 ) -> Result<Transfer<'_>, Error> {
782 let mut invalid = false;
783 invalid |= src.is_empty();
784 invalid |= src.len() > dst.len();
785 invalid |= src.len() > 0x7fff;
786 if invalid {
787 return Err(Error::Configuration);
788 }
789
790 let size = W::size();
791 let byte_count = (src.len() * size.bytes()) as u32;
792
793 let t = self.tcd();
794
795 // Reset channel state - clear DONE, disable requests, clear errors
796 Self::reset_channel_state(t);
797
798 // Memory barrier to ensure channel state is fully reset before touching TCD
799 cortex_m::asm::dsb();
800
801 Self::clear_tcd(t);
802
803 // Memory barrier after TCD reset
804 cortex_m::asm::dsb();
805
806 // Note: Priority is managed by round-robin arbitration (set in init())
807 // Per-channel priority can be configured via ch_pri() if needed
808
809 // Now configure the new transfer
810
811 // Source address and increment
812 Self::set_source_ptr(t, src.as_ptr());
813 Self::set_source_increment(t, size);
814
815 // Destination address and increment
816 Self::set_dest_ptr(t, dst.as_mut_ptr());
817 Self::set_dest_increment(t, size);
818
819 // Transfer attributes (size)
820 Self::set_even_transfer_size(t, size);
821
822 // Minor loop: transfer all bytes in one minor loop
823 Self::set_minor_loop_ct_no_offsets(t, byte_count);
824
825 // No source/dest adjustment after major loop
826 Self::set_no_final_adjustments(t);
827
828 // Major loop count = 1 (single major loop)
829 // Write BITER first, then CITER (CITER must match BITER at start)
830 Self::set_major_loop_ct_elinkno(t, 1);
831
832 // Memory barrier before setting START
833 cortex_m::asm::dsb();
834
835 // Control/status: interrupt on major complete, start
836 // Write this last after all other TCD registers are configured
837 let int_major = options.complete_transfer_interrupt;
838 t.tcd_csr().write(|w| {
839 w.intmajor()
840 .bit(int_major)
841 .inthalf()
842 .bit(options.half_transfer_interrupt)
843 .dreq()
844 .set_bit() // Auto-disable request after major loop
845 .start()
846 .set_bit() // Start the channel
847 });
848
849 Ok(Transfer::new(self.as_any()))
850 }
851
852 /// Fill a memory buffer with a pattern value (memset).
853 ///
854 /// This performs a DMA transfer where the source address remains fixed
855 /// (pattern value) while the destination address increments through the buffer.
856 /// It's useful for quickly filling large memory regions with a constant value.
857 ///
858 /// # Arguments
859 ///
860 /// * `pattern` - Reference to the pattern value (will be read repeatedly)
861 /// * `dst` - Destination buffer to fill
862 /// * `options` - Transfer configuration options
863 ///
864 /// # Example
865 ///
866 /// ```no_run
867 /// use embassy_mcxa::dma::{DmaChannel, TransferOptions};
868 ///
869 /// let dma_ch = DmaChannel::new(p.DMA_CH0);
870 /// let pattern: u32 = 0xDEADBEEF;
871 /// let mut buffer = [0u32; 256];
872 ///
873 /// unsafe {
874 /// dma_ch.memset(&pattern, &mut buffer, TransferOptions::default()).await;
875 /// }
876 /// // buffer is now filled with 0xDEADBEEF
877 /// ```
878 ///
879 pub fn memset<W: Word>(&self, pattern: &W, dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
880 assert!(!dst.is_empty());
881 assert!(dst.len() <= 0x7fff);
882
883 let size = W::size();
884 let byte_size = size.bytes();
885 // Total bytes to transfer - all in one minor loop for software-triggered transfers
886 let total_bytes = (dst.len() * byte_size) as u32;
887
888 let t = self.tcd();
889
890 // Reset channel state - clear DONE, disable requests, clear errors
891 Self::reset_channel_state(t);
892
893 // Memory barrier to ensure channel state is fully reset before touching TCD
894 cortex_m::asm::dsb();
895
896 Self::clear_tcd(t);
897
898 // Memory barrier after TCD reset
899 cortex_m::asm::dsb();
900
901 // Now configure the new transfer
902 //
903 // For software-triggered memset, we use a SINGLE minor loop that transfers
904 // all bytes at once. The source address stays fixed (SOFF=0) while the
905 // destination increments (DOFF=byte_size). The eDMA will read from the
906 // same source address for each destination word.
907 //
908 // This is necessary because the START bit only triggers ONE minor loop
909 // iteration. Using CITER>1 with software trigger would require multiple
910 // START triggers.
911
912 // Source: pattern address, fixed (soff=0)
913 Self::set_source_ptr(t, pattern);
914 Self::set_source_fixed(t);
915
916 // Destination: memory buffer, incrementing by word size
917 Self::set_dest_ptr(t, dst.as_mut_ptr());
918 Self::set_dest_increment(t, size);
919
920 // Transfer attributes - source and dest are same word size
921 Self::set_even_transfer_size(t, size);
922
923 // Minor loop: transfer ALL bytes in one minor loop (like mem_to_mem)
924 // This allows the entire transfer to complete with a single START trigger
925 Self::set_minor_loop_ct_no_offsets(t, total_bytes);
926
927 // No address adjustment after major loop
928 Self::set_no_final_adjustments(t);
929
930 // Major loop count = 1 (single major loop, all data in minor loop)
931 // Write BITER first, then CITER (CITER must match BITER at start)
932 Self::set_major_loop_ct_elinkno(t, 1);
933
934 // Memory barrier before setting START
935 cortex_m::asm::dsb();
936
937 // Control/status: interrupt on major complete, start immediately
938 // Write this last after all other TCD registers are configured
939 let int_major = options.complete_transfer_interrupt;
940 t.tcd_csr().write(|w| {
941 w.intmajor()
942 .bit(int_major)
943 .inthalf()
944 .bit(options.half_transfer_interrupt)
945 .dreq()
946 .set_bit() // Auto-disable request after major loop
947 .start()
948 .set_bit() // Start the channel
949 });
950
951 Transfer::new(self.as_any())
952 }
953
954 /// Write data from memory to a peripheral register.
955 ///
956 /// The destination address remains fixed (peripheral register) while
957 /// the source address increments through the buffer.
958 ///
959 /// # Arguments
960 ///
961 /// * `buf` - Source buffer to write from
962 /// * `peri_addr` - Peripheral register address
963 /// * `options` - Transfer configuration options
964 ///
965 /// # Safety
966 ///
967 /// - The buffer must remain valid for the duration of the transfer.
968 /// - The peripheral address must be valid for writes.
969 pub unsafe fn write<W: Word>(&self, buf: &[W], peri_addr: *mut W, options: TransferOptions) -> Transfer<'_> {
970 self.write_to_peripheral(buf, peri_addr, options)
971 }
972
973 /// Configure a memory-to-peripheral DMA transfer without starting it.
974 ///
975 /// This is a convenience wrapper around [`setup_write_to_peripheral()`](Self::setup_write_to_peripheral)
976 /// that uses the default eDMA TCD register block.
977 ///
978 /// This method configures the TCD but does NOT return a `Transfer`. The caller
979 /// is responsible for the complete DMA lifecycle:
980 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
981 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
982 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
983 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
984 ///
985 /// # Example
986 ///
987 /// ```no_run
988 /// # use embassy_mcxa::dma::DmaChannel;
989 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
990 /// # let uart_tx_addr = 0x4000_0000 as *mut u8;
991 /// let data = [0x48, 0x65, 0x6c, 0x6c, 0x6f]; // "Hello"
992 ///
993 /// unsafe {
994 /// // Configure the transfer
995 /// dma_ch.setup_write(&data, uart_tx_addr, EnableInterrupt::Yes);
996 ///
997 /// // Start when peripheral is ready
998 /// dma_ch.enable_request();
999 ///
1000 /// // Wait for completion (or use interrupt)
1001 /// while !dma_ch.is_done() {}
1002 ///
1003 /// // Clean up
1004 /// dma_ch.clear_done();
1005 /// dma_ch.clear_interrupt();
1006 /// }
1007 /// ```
1008 ///
1009 /// # Arguments
1010 ///
1011 /// * `buf` - Source buffer to write from
1012 /// * `peri_addr` - Peripheral register address
1013 /// * `enable_interrupt` - Whether to enable interrupt on completion
1014 ///
1015 /// # Safety
1016 ///
1017 /// - The buffer must remain valid for the duration of the transfer.
1018 /// - The peripheral address must be valid for writes.
1019 pub unsafe fn setup_write<W: Word>(&self, buf: &[W], peri_addr: *mut W, enable_interrupt: EnableInterrupt) {
1020 self.setup_write_to_peripheral(buf, peri_addr, enable_interrupt)
1021 }
1022
1023 /// Write data from memory to a peripheral register.
1024 ///
1025 /// The destination address remains fixed (peripheral register) while
1026 /// the source address increments through the buffer.
1027 ///
1028 /// # Arguments
1029 ///
1030 /// * `buf` - Source buffer to write from
1031 /// * `peri_addr` - Peripheral register address
1032 /// * `options` - Transfer configuration options
1033 ///
1034 /// # Safety
1035 ///
1036 /// - The buffer must remain valid for the duration of the transfer.
1037 /// - The peripheral address must be valid for writes.
1038 pub unsafe fn write_to_peripheral<W: Word>(
1039 &self,
1040 buf: &[W],
1041 peri_addr: *mut W,
1042 options: TransferOptions,
1043 ) -> Transfer<'_> {
1044 assert!(!buf.is_empty());
1045 assert!(buf.len() <= 0x7fff);
1046
1047 let size = W::size();
1048 let byte_size = size.bytes();
1049
1050 let t = self.tcd();
1051
1052 // Reset channel state
1053 Self::reset_channel_state(t);
1054
1055 // Addresses
1056 Self::set_source_ptr(t, buf.as_ptr());
1057 Self::set_dest_ptr(t, peri_addr);
1058
1059 // Offsets: Source increments, Dest fixed
1060 Self::set_source_increment(t, size);
1061 Self::set_dest_fixed(t);
1062
1063 // Attributes: set size and explicitly disable modulo
1064 Self::set_even_transfer_size(t, size);
1065
1066 // Minor loop: transfer one word per request (match old: only set nbytes)
1067 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
1068
1069 // No final adjustments
1070 Self::set_no_final_adjustments(t);
1071
1072 // Major loop count = number of words
1073 let count = buf.len() as u16;
1074 Self::set_major_loop_ct_elinkno(t, count);
1075
1076 // CSR: interrupt on major loop complete and auto-clear ERQ
1077 t.tcd_csr().write(|w| {
1078 let w = if options.complete_transfer_interrupt {
1079 w.intmajor().enable()
1080 } else {
1081 w.intmajor().disable()
1082 };
1083 w.inthalf()
1084 .disable()
1085 .dreq()
1086 .erq_field_clear() // Disable request when done
1087 .esg()
1088 .normal_format()
1089 .majorelink()
1090 .disable()
1091 .eeop()
1092 .disable()
1093 .esda()
1094 .disable()
1095 .bwc()
1096 .no_stall()
1097 });
1098
1099 // Ensure all TCD writes have completed before DMA engine reads them
1100 cortex_m::asm::dsb();
1101
1102 Transfer::new(self.as_any())
1103 }
1104
1105 /// Read data from a peripheral register to memory.
1106 ///
1107 /// The source address remains fixed (peripheral register) while
1108 /// the destination address increments through the buffer.
1109 ///
1110 /// # Arguments
1111 ///
1112 /// * `peri_addr` - Peripheral register address
1113 /// * `buf` - Destination buffer to read into
1114 /// * `options` - Transfer configuration options
1115 ///
1116 /// # Safety
1117 ///
1118 /// - The buffer must remain valid for the duration of the transfer.
1119 /// - The peripheral address must be valid for reads.
1120 pub unsafe fn read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], options: TransferOptions) -> Transfer<'_> {
1121 self.read_from_peripheral(peri_addr, buf, options)
1122 }
1123
1124 /// Configure a peripheral-to-memory DMA transfer without starting it.
1125 ///
1126 /// This is a convenience wrapper around [`setup_read_from_peripheral()`](Self::setup_read_from_peripheral)
1127 /// that uses the default eDMA TCD register block.
1128 ///
1129 /// This method configures the TCD but does NOT return a `Transfer`. The caller
1130 /// is responsible for the complete DMA lifecycle:
1131 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
1132 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
1133 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
1134 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1135 ///
1136 /// # Example
1137 ///
1138 /// ```no_run
1139 /// # use embassy_mcxa::dma::DmaChannel;
1140 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1141 /// # let uart_rx_addr = 0x4000_0000 as *const u8;
1142 /// let mut buf = [0u8; 32];
1143 ///
1144 /// unsafe {
1145 /// // Configure the transfer
1146 /// dma_ch.setup_read(uart_rx_addr, &mut buf, EnableInterrupt::Yes);
1147 ///
1148 /// // Start when peripheral is ready
1149 /// dma_ch.enable_request();
1150 ///
1151 /// // Wait for completion (or use interrupt)
1152 /// while !dma_ch.is_done() {}
1153 ///
1154 /// // Clean up
1155 /// dma_ch.clear_done();
1156 /// dma_ch.clear_interrupt();
1157 /// }
1158 /// // buf now contains received data
1159 /// ```
1160 ///
1161 /// # Arguments
1162 ///
1163 /// * `peri_addr` - Peripheral register address
1164 /// * `buf` - Destination buffer to read into
1165 /// * `enable_interrupt` - Whether to enable interrupt on completion
1166 ///
1167 /// # Safety
1168 ///
1169 /// - The buffer must remain valid for the duration of the transfer.
1170 /// - The peripheral address must be valid for reads.
1171 pub unsafe fn setup_read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], enable_interrupt: EnableInterrupt) {
1172 self.setup_read_from_peripheral(peri_addr, buf, enable_interrupt)
1173 }
1174
1175 /// Read data from a peripheral register to memory.
1176 ///
1177 /// The source address remains fixed (peripheral register) while
1178 /// the destination address increments through the buffer.
1179 ///
1180 /// # Arguments
1181 ///
1182 /// * `peri_addr` - Peripheral register address
1183 /// * `buf` - Destination buffer to read into
1184 /// * `options` - Transfer configuration options
1185 ///
1186 /// # Safety
1187 ///
1188 /// - The buffer must remain valid for the duration of the transfer.
1189 /// - The peripheral address must be valid for reads.
1190 pub unsafe fn read_from_peripheral<W: Word>(
1191 &self,
1192 peri_addr: *const W,
1193 buf: &mut [W],
1194 options: TransferOptions,
1195 ) -> Transfer<'_> {
1196 assert!(!buf.is_empty());
1197 assert!(buf.len() <= 0x7fff);
1198
1199 let size = W::size();
1200 let byte_size = size.bytes();
1201
1202 let t = self.tcd();
1203
1204 // Reset channel control/error/interrupt state
1205 Self::reset_channel_state(t);
1206
1207 // Source: peripheral register, fixed
1208 Self::set_source_ptr(t, peri_addr);
1209 Self::set_source_fixed(t);
1210
1211 // Destination: memory buffer, incrementing
1212 Self::set_dest_ptr(t, buf.as_mut_ptr());
1213 Self::set_dest_increment(t, size);
1214
1215 // Transfer attributes: set size and explicitly disable modulo
1216 Self::set_even_transfer_size(t, size);
1217
1218 // Minor loop: transfer one word per request, no offsets
1219 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
1220
1221 // Major loop count = number of words
1222 let count = buf.len() as u16;
1223 Self::set_major_loop_ct_elinkno(t, count);
1224
1225 // No address adjustment after major loop
1226 Self::set_no_final_adjustments(t);
1227
1228 // Control/status: interrupt on major complete, auto-clear ERQ when done
1229 t.tcd_csr().write(|w| {
1230 let w = if options.complete_transfer_interrupt {
1231 w.intmajor().enable()
1232 } else {
1233 w.intmajor().disable()
1234 };
1235 let w = if options.half_transfer_interrupt {
1236 w.inthalf().enable()
1237 } else {
1238 w.inthalf().disable()
1239 };
1240 w.dreq()
1241 .erq_field_clear() // Disable request when done (important for peripheral DMA)
1242 .esg()
1243 .normal_format()
1244 .majorelink()
1245 .disable()
1246 .eeop()
1247 .disable()
1248 .esda()
1249 .disable()
1250 .bwc()
1251 .no_stall()
1252 });
1253
1254 // Ensure all TCD writes have completed before DMA engine reads them
1255 cortex_m::asm::dsb();
1256
1257 Transfer::new(self.as_any())
1258 }
1259
1260 /// Configure a memory-to-peripheral DMA transfer without starting it.
1261 ///
1262 /// This configures the TCD for a memory-to-peripheral transfer but does NOT
1263 /// return a Transfer object. The caller is responsible for:
1264 /// 1. Enabling the peripheral's DMA request
1265 /// 2. Calling `enable_request()` to start the transfer
1266 /// 3. Polling `is_done()` or using interrupts to detect completion
1267 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1268 ///
1269 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1270 /// peripheral drivers that have their own completion polling).
1271 ///
1272 /// # Arguments
1273 ///
1274 /// * `buf` - Source buffer to write from
1275 /// * `peri_addr` - Peripheral register address
1276 /// * `enable_interrupt` - Whether to enable interrupt on completion
1277 ///
1278 /// # Safety
1279 ///
1280 /// - The buffer must remain valid for the duration of the transfer.
1281 /// - The peripheral address must be valid for writes.
1282 pub unsafe fn setup_write_to_peripheral<W: Word>(
1283 &self,
1284 buf: &[W],
1285 peri_addr: *mut W,
1286 enable_interrupt: EnableInterrupt,
1287 ) {
1288 assert!(!buf.is_empty());
1289 assert!(buf.len() <= 0x7fff);
1290
1291 let size = W::size();
1292 let byte_size = size.bytes();
1293
1294 let t = self.tcd();
1295
1296 // Reset channel state
1297 Self::reset_channel_state(t);
1298
1299 // Addresses
1300 Self::set_source_ptr(t, buf.as_ptr());
1301 Self::set_dest_ptr(t, peri_addr);
1302
1303 // Offsets: Source increments, Dest fixed
1304 Self::set_source_increment(t, size);
1305 Self::set_dest_fixed(t);
1306
1307 // Attributes: set size and explicitly disable modulo
1308 Self::set_even_transfer_size(t, size);
1309
1310 // Minor loop: transfer one word per request
1311 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
1312
1313 // No final adjustments
1314 Self::set_no_final_adjustments(t);
1315
1316 // Major loop count = number of words
1317 let count = buf.len() as u16;
1318 Self::set_major_loop_ct_elinkno(t, count);
1319
1320 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1321 t.tcd_csr().write(|w| {
1322 let w = match enable_interrupt {
1323 EnableInterrupt::Yes => w.intmajor().enable(),
1324 EnableInterrupt::No => w.intmajor().disable(),
1325 };
1326 w.inthalf()
1327 .disable()
1328 .dreq()
1329 .erq_field_clear()
1330 .esg()
1331 .normal_format()
1332 .majorelink()
1333 .disable()
1334 .eeop()
1335 .disable()
1336 .esda()
1337 .disable()
1338 .bwc()
1339 .no_stall()
1340 });
1341
1342 // Ensure all TCD writes have completed before DMA engine reads them
1343 cortex_m::asm::dsb();
1344 }
1345
1346 /// Configure a peripheral-to-memory DMA transfer without starting it.
1347 ///
1348 /// This configures the TCD for a peripheral-to-memory transfer but does NOT
1349 /// return a Transfer object. The caller is responsible for:
1350 /// 1. Enabling the peripheral's DMA request
1351 /// 2. Calling `enable_request()` to start the transfer
1352 /// 3. Polling `is_done()` or using interrupts to detect completion
1353 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1354 ///
1355 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1356 /// peripheral drivers that have their own completion polling).
1357 ///
1358 /// # Arguments
1359 ///
1360 /// * `peri_addr` - Peripheral register address
1361 /// * `buf` - Destination buffer to read into
1362 /// * `enable_interrupt` - Whether to enable interrupt on completion
1363 ///
1364 /// # Safety
1365 ///
1366 /// - The buffer must remain valid for the duration of the transfer.
1367 /// - The peripheral address must be valid for reads.
1368 pub unsafe fn setup_read_from_peripheral<W: Word>(
1369 &self,
1370 peri_addr: *const W,
1371 buf: &mut [W],
1372 enable_interrupt: EnableInterrupt,
1373 ) {
1374 assert!(!buf.is_empty());
1375 assert!(buf.len() <= 0x7fff);
1376
1377 let size = W::size();
1378 let byte_size = size.bytes();
1379
1380 let t = self.tcd();
1381
1382 // Reset channel control/error/interrupt state
1383 Self::reset_channel_state(t);
1384
1385 // Source: peripheral register, fixed
1386 Self::set_source_ptr(t, peri_addr);
1387 Self::set_source_fixed(t);
1388
1389 // Destination: memory buffer, incrementing
1390 Self::set_dest_ptr(t, buf.as_mut_ptr());
1391 Self::set_dest_increment(t, size);
1392
1393 // Attributes: set size and explicitly disable modulo
1394 Self::set_even_transfer_size(t, size);
1395
1396 // Minor loop: transfer one word per request
1397 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
1398
1399 // No final adjustments
1400 Self::set_no_final_adjustments(t);
1401
1402 // Major loop count = number of words
1403 let count = buf.len() as u16;
1404 Self::set_major_loop_ct_elinkno(t, count);
1405
1406 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1407 t.tcd_csr().write(|w| {
1408 let w = match enable_interrupt {
1409 EnableInterrupt::Yes => w.intmajor().enable(),
1410 EnableInterrupt::No => w.intmajor().disable(),
1411 };
1412 w.inthalf()
1413 .disable()
1414 .dreq()
1415 .erq_field_clear()
1416 .esg()
1417 .normal_format()
1418 .majorelink()
1419 .disable()
1420 .eeop()
1421 .disable()
1422 .esda()
1423 .disable()
1424 .bwc()
1425 .no_stall()
1426 });
1427
1428 // Ensure all TCD writes have completed before DMA engine reads them
1429 cortex_m::asm::dsb();
1430 }
1431
1432 /// Configure the integrated channel MUX to use the given typed
1433 /// DMA request source (e.g., [`Lpuart2TxRequest`] or [`Lpuart2RxRequest`]).
1434 ///
1435 /// This is the type-safe version that uses marker types to ensure
1436 /// compile-time verification of request source validity.
1437 ///
1438 /// # Safety
1439 ///
1440 /// The channel must be properly configured before enabling requests.
1441 /// The caller must ensure the DMA request source matches the peripheral
1442 /// that will drive this channel.
1443 ///
1444 /// # Note
1445 ///
1446 /// The NXP SDK requires a two-step write sequence: first clear
1447 /// the mux to 0, then set the actual source. This is a hardware
1448 /// requirement on eDMA4 for the mux to properly latch.
1449 ///
1450 /// # Example
1451 ///
1452 /// ```ignore
1453 /// use embassy_mcxa::dma::{DmaChannel, Lpuart2RxRequest};
1454 ///
1455 /// // Type-safe: compiler verifies this is a valid DMA request type
1456 /// unsafe {
1457 /// channel.set_request_source::<Lpuart2RxRequest>();
1458 /// }
1459 /// ```
1460 #[inline]
1461 pub unsafe fn set_request_source<R: DmaRequest>(&self) {
1462 // Two-step write per NXP SDK: clear to 0, then set actual source.
1463 self.tcd().ch_mux().write(|w| w.src().bits(0));
1464 cortex_m::asm::dsb(); // Ensure the clear completes before setting new source
1465 self.tcd().ch_mux().write(|w| w.src().bits(R::REQUEST_NUMBER));
1466 }
1467
1468 /// Enable hardware requests for this channel (ERQ=1).
1469 ///
1470 /// # Safety
1471 ///
1472 /// The channel must be properly configured before enabling requests.
1473 pub unsafe fn enable_request(&self) {
1474 let t = self.tcd();
1475 t.ch_csr().modify(|_, w| w.erq().enable());
1476 }
1477
1478 /// Disable hardware requests for this channel (ERQ=0).
1479 ///
1480 /// # Safety
1481 ///
1482 /// Disabling requests on an active transfer may leave the transfer incomplete.
1483 pub unsafe fn disable_request(&self) {
1484 let t = self.tcd();
1485 t.ch_csr().modify(|_, w| w.erq().disable());
1486 }
1487
1488 /// Return true if the channel's DONE flag is set.
1489 pub fn is_done(&self) -> bool {
1490 let t = self.tcd();
1491 t.ch_csr().read().done().bit_is_set()
1492 }
1493
1494 /// Clear the DONE flag for this channel.
1495 ///
1496 /// Uses modify to preserve other bits (especially ERQ) unlike write
1497 /// which would clear ERQ and halt an active transfer.
1498 ///
1499 /// # Safety
1500 ///
1501 /// Clearing DONE while a transfer is in progress may cause undefined behavior.
1502 pub unsafe fn clear_done(&self) {
1503 let t = self.tcd();
1504 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1505 }
1506
1507 /// Clear the channel interrupt flag (CH_INT.INT).
1508 ///
1509 /// # Safety
1510 ///
1511 /// Must be called from the correct interrupt context or with interrupts disabled.
1512 pub unsafe fn clear_interrupt(&self) {
1513 let t = self.tcd();
1514 t.ch_int().write(|w| w.int().clear_bit_by_one());
1515 }
1516
1517 /// Trigger a software start for this channel.
1518 ///
1519 /// # Safety
1520 ///
1521 /// The channel must be properly configured with a valid TCD before triggering.
1522 pub unsafe fn trigger_start(&self) {
1523 let t = self.tcd();
1524 t.tcd_csr().modify(|_, w| w.start().channel_started());
1525 }
1526
1527 /// Get the waker for this channel
1528 pub fn waker(&self) -> &'static AtomicWaker {
1529 &STATES[C::INDEX].waker
1530 }
1531
1532 /// Enable the interrupt for this channel in the NVIC.
1533 pub fn enable_interrupt(&self) {
1534 unsafe {
1535 cortex_m::peripheral::NVIC::unmask(C::INTERRUPT);
1536 }
1537 }
1538
1539 /// Enable Major Loop Linking.
1540 ///
1541 /// When the major loop completes, the hardware will trigger a service request
1542 /// on `link_ch`.
1543 ///
1544 /// # Arguments
1545 ///
1546 /// * `link_ch` - Target channel index (0-7) to link to
1547 ///
1548 /// # Safety
1549 ///
1550 /// The channel must be properly configured before setting up linking.
1551 pub unsafe fn set_major_link(&self, link_ch: usize) {
1552 let t = self.tcd();
1553 t.tcd_csr()
1554 .modify(|_, w| w.majorelink().enable().majorlinkch().bits(link_ch as u8));
1555 }
1556
1557 /// Disable Major Loop Linking.
1558 ///
1559 /// Removes any major loop channel linking previously configured.
1560 ///
1561 /// # Safety
1562 ///
1563 /// The caller must ensure this doesn't disrupt an active transfer that
1564 /// depends on the linking.
1565 pub unsafe fn clear_major_link(&self) {
1566 let t = self.tcd();
1567 t.tcd_csr().modify(|_, w| w.majorelink().disable());
1568 }
1569
1570 /// Enable Minor Loop Linking.
1571 ///
1572 /// After each minor loop, the hardware will trigger a service request
1573 /// on `link_ch`.
1574 ///
1575 /// # Arguments
1576 ///
1577 /// * `link_ch` - Target channel index (0-7) to link to
1578 ///
1579 /// # Note
1580 ///
1581 /// This rewrites CITER and BITER registers to the ELINKYES format.
1582 /// It preserves the current loop count.
1583 ///
1584 /// # Safety
1585 ///
1586 /// The channel must be properly configured before setting up linking.
1587 pub unsafe fn set_minor_link(&self, link_ch: usize) {
1588 let t = self.tcd();
1589
1590 // Read current CITER (assuming ELINKNO format initially)
1591 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1592 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1593
1594 // Write back using ELINKYES format
1595 t.tcd_citer_elinkyes().write(|w| {
1596 w.citer()
1597 .bits(current_citer)
1598 .elink()
1599 .enable()
1600 .linkch()
1601 .bits(link_ch as u8)
1602 });
1603
1604 t.tcd_biter_elinkyes().write(|w| {
1605 w.biter()
1606 .bits(current_biter)
1607 .elink()
1608 .enable()
1609 .linkch()
1610 .bits(link_ch as u8)
1611 });
1612 }
1613
1614 /// Disable Minor Loop Linking.
1615 ///
1616 /// Removes any minor loop channel linking previously configured.
1617 /// This rewrites CITER and BITER registers to the ELINKNO format,
1618 /// preserving the current loop count.
1619 ///
1620 /// # Safety
1621 ///
1622 /// The caller must ensure this doesn't disrupt an active transfer that
1623 /// depends on the linking.
1624 pub unsafe fn clear_minor_link(&self) {
1625 let t = self.tcd();
1626
1627 // Read current CITER (could be in either format, but we only need the count)
1628 // Note: In ELINKYES format, citer is 9 bits; in ELINKNO, it's 15 bits.
1629 // We read from ELINKNO which will give us the combined value.
1630 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1631 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1632
1633 // Write back using ELINKNO format (disabling link)
1634 t.tcd_citer_elinkno()
1635 .write(|w| w.citer().bits(current_citer).elink().disable());
1636
1637 t.tcd_biter_elinkno()
1638 .write(|w| w.biter().bits(current_biter).elink().disable());
1639 }
1640
1641 /// Load a TCD from memory into the hardware channel registers.
1642 ///
1643 /// This is useful for scatter/gather and ping-pong transfers where
1644 /// TCDs are prepared in RAM and then loaded into the hardware.
1645 ///
1646 /// # Safety
1647 ///
1648 /// - The TCD must be properly initialized.
1649 /// - The caller must ensure no concurrent access to the same channel.
1650 pub unsafe fn load_tcd(&self, tcd: &Tcd) {
1651 let t = self.tcd();
1652 t.tcd_saddr().write(|w| w.saddr().bits(tcd.saddr));
1653 t.tcd_soff().write(|w| w.soff().bits(tcd.soff as u16));
1654 t.tcd_attr().write(|w| w.bits(tcd.attr));
1655 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(tcd.nbytes));
1656 t.tcd_slast_sda().write(|w| w.slast_sda().bits(tcd.slast as u32));
1657 t.tcd_daddr().write(|w| w.daddr().bits(tcd.daddr));
1658 t.tcd_doff().write(|w| w.doff().bits(tcd.doff as u16));
1659 t.tcd_citer_elinkno().write(|w| w.citer().bits(tcd.citer));
1660 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(tcd.dlast_sga as u32));
1661 t.tcd_csr().write(|w| w.bits(tcd.csr));
1662 t.tcd_biter_elinkno().write(|w| w.biter().bits(tcd.biter));
1663 }
1664}
1665
1666/// In-memory representation of a Transfer Control Descriptor (TCD).
1667///
1668/// This matches the hardware layout (32 bytes).
1669#[repr(C, align(32))]
1670#[derive(Clone, Copy, Debug, Default)]
1671#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1672pub struct Tcd {
1673 pub saddr: u32,
1674 pub soff: i16,
1675 pub attr: u16,
1676 pub nbytes: u32,
1677 pub slast: i32,
1678 pub daddr: u32,
1679 pub doff: i16,
1680 pub citer: u16,
1681 pub dlast_sga: i32,
1682 pub csr: u16,
1683 pub biter: u16,
1684}
1685
1686struct State {
1687 /// Waker for transfer complete interrupt
1688 waker: AtomicWaker,
1689 /// Waker for half-transfer interrupt
1690 half_waker: AtomicWaker,
1691}
1692
1693impl State {
1694 const fn new() -> Self {
1695 Self {
1696 waker: AtomicWaker::new(),
1697 half_waker: AtomicWaker::new(),
1698 }
1699 }
1700}
1701
1702static STATES: [State; 8] = [
1703 State::new(),
1704 State::new(),
1705 State::new(),
1706 State::new(),
1707 State::new(),
1708 State::new(),
1709 State::new(),
1710 State::new(),
1711];
1712
1713pub(crate) fn waker(idx: usize) -> &'static AtomicWaker {
1714 &STATES[idx].waker
1715}
1716
1717pub(crate) fn half_waker(idx: usize) -> &'static AtomicWaker {
1718 &STATES[idx].half_waker
1719}
1720
1721// ============================================================================
1722// Async Transfer Future
1723// ============================================================================
1724
1725/// An in-progress DMA transfer.
1726///
1727/// This type implements `Future` and can be `.await`ed to wait for the
1728/// transfer to complete. Dropping the transfer will abort it.
1729#[must_use = "futures do nothing unless you `.await` or poll them"]
1730pub struct Transfer<'a> {
1731 channel: AnyChannel,
1732 _phantom: core::marker::PhantomData<&'a ()>,
1733}
1734
1735impl<'a> Transfer<'a> {
1736 /// Create a new transfer for the given channel.
1737 ///
1738 /// The caller must have already configured and started the DMA channel.
1739 pub(crate) fn new(channel: AnyChannel) -> Self {
1740 Self {
1741 channel,
1742 _phantom: core::marker::PhantomData,
1743 }
1744 }
1745
1746 /// Check if the transfer is still running.
1747 pub fn is_running(&self) -> bool {
1748 !self.channel.is_done()
1749 }
1750
1751 /// Get the remaining transfer count.
1752 pub fn remaining(&self) -> u16 {
1753 let t = self.channel.tcd();
1754 t.tcd_citer_elinkno().read().citer().bits()
1755 }
1756
1757 /// Block until the transfer completes.
1758 pub fn blocking_wait(self) {
1759 while self.is_running() {
1760 core::hint::spin_loop();
1761 }
1762
1763 // Ensure all DMA writes are visible
1764 fence(Ordering::SeqCst);
1765
1766 // Don't run drop (which would abort)
1767 core::mem::forget(self);
1768 }
1769
1770 /// Wait for the half-transfer interrupt asynchronously.
1771 ///
1772 /// This is useful for double-buffering scenarios where you want to process
1773 /// the first half of the buffer while the second half is being filled.
1774 ///
1775 /// Returns `true` if the half-transfer occurred, `false` if the transfer
1776 /// completed before the half-transfer interrupt.
1777 ///
1778 /// # Note
1779 ///
1780 /// The transfer must be configured with `TransferOptions::half_transfer_interrupt = true`
1781 /// for this method to work correctly.
1782 pub async fn wait_half(&mut self) -> Result<bool, TransferErrorRaw> {
1783 use core::future::poll_fn;
1784
1785 poll_fn(|cx| {
1786 let state = &STATES[self.channel.index];
1787
1788 // Register the half-transfer waker
1789 state.half_waker.register(cx.waker());
1790
1791 // Check if there's an error
1792 let t = self.channel.tcd();
1793 let es = t.ch_es().read();
1794 if es.err().is_error() {
1795 // Currently, all error fields are in the lowest 8 bits, as-casting truncates
1796 let errs = es.bits() as u8;
1797 return Poll::Ready(Err(TransferErrorRaw(errs)));
1798 }
1799
1800 // Check if we're past the half-way point
1801 let biter = t.tcd_biter_elinkno().read().biter().bits();
1802 let citer = t.tcd_citer_elinkno().read().citer().bits();
1803 let half_point = biter / 2;
1804
1805 if self.channel.is_done() {
1806 // Transfer completed before half-transfer
1807 Poll::Ready(Ok(false))
1808 } else if citer <= half_point {
1809 // We're past the half-way point
1810 fence(Ordering::SeqCst);
1811 Poll::Ready(Ok(true))
1812 } else {
1813 Poll::Pending
1814 }
1815 })
1816 .await
1817 }
1818
1819 /// Abort the transfer.
1820 fn abort(&mut self) {
1821 let t = self.channel.tcd();
1822
1823 // Disable channel requests
1824 t.ch_csr().modify(|_, w| w.erq().disable());
1825
1826 // Clear any pending interrupt
1827 t.ch_int().write(|w| w.int().clear_bit_by_one());
1828
1829 // Clear DONE flag
1830 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1831
1832 fence(Ordering::SeqCst);
1833 }
1834}
1835
1836/// Raw transfer error bits. Can be queried or all errors can be iterated over
1837#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1838#[derive(Copy, Clone, Debug)]
1839pub struct TransferErrorRaw(u8);
1840
1841#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1842#[derive(Copy, Clone, Debug)]
1843pub struct TransferErrorRawIter(u8);
1844
1845impl TransferErrorRaw {
1846 const MAP: &[(u8, TransferError)] = &[
1847 (1 << 0, TransferError::DestinationBus),
1848 (1 << 1, TransferError::SourceBus),
1849 (1 << 2, TransferError::ScatterGatherConfiguration),
1850 (1 << 3, TransferError::NbytesCiterConfiguration),
1851 (1 << 4, TransferError::DestinationOffset),
1852 (1 << 5, TransferError::DestinationAddress),
1853 (1 << 6, TransferError::SourceOffset),
1854 (1 << 7, TransferError::SourceAddress),
1855 ];
1856
1857 /// Convert to an iterator of contained errors
1858 pub fn err_iter(self) -> TransferErrorRawIter {
1859 TransferErrorRawIter(self.0)
1860 }
1861
1862 /// Destination Bus Error
1863 #[inline]
1864 pub fn has_destination_bus_err(&self) -> bool {
1865 (self.0 & (1 << 0)) != 0
1866 }
1867
1868 /// Source Bus Error
1869 #[inline]
1870 pub fn has_source_bus_err(&self) -> bool {
1871 (self.0 & (1 << 1)) != 0
1872 }
1873
1874 /// Indicates that `TCDn_DLAST_SGA` is not on a 32-byte boundary. This field is
1875 /// checked at the beginning of a scatter/gather operation after major loop completion
1876 /// if `TCDn_CSR[ESG]` is enabled.
1877 #[inline]
1878 pub fn has_scatter_gather_configuration_err(&self) -> bool {
1879 (self.0 & (1 << 2)) != 0
1880 }
1881
1882 /// This error indicates that one of the following has occurred:
1883 ///
1884 /// * `TCDn_NBYTES` is not a multiple of `TCDn_ATTR[SSIZE]` and `TCDn_ATTR[DSIZE]`
1885 /// * `TCDn_CITER[CITER]` is equal to zero
1886 /// * `TCDn_CITER[ELINK]` is not equal to `TCDn_BITER[ELINK]`
1887 #[inline]
1888 pub fn has_nbytes_citer_configuration_err(&self) -> bool {
1889 (self.0 & (1 << 3)) != 0
1890 }
1891
1892 /// `TCDn_DOFF` is inconsistent with `TCDn_ATTR[DSIZE]`.
1893 #[inline]
1894 pub fn has_destination_offset_err(&self) -> bool {
1895 (self.0 & (1 << 4)) != 0
1896 }
1897
1898 /// `TCDn_DADDR` is inconsistent with `TCDn_ATTR[DSIZE]`.
1899 #[inline]
1900 pub fn has_destination_address_err(&self) -> bool {
1901 (self.0 & (1 << 5)) != 0
1902 }
1903
1904 /// `TCDn_SOFF` is inconsistent with `TCDn_ATTR[SSIZE]`.
1905 #[inline]
1906 pub fn has_source_offset_err(&self) -> bool {
1907 (self.0 & (1 << 6)) != 0
1908 }
1909
1910 /// `TCDn_SADDR` is inconsistent with `TCDn_ATTR[SSIZE]`
1911 #[inline]
1912 pub fn has_source_address_err(&self) -> bool {
1913 (self.0 & (1 << 7)) != 0
1914 }
1915}
1916
1917impl Iterator for TransferErrorRawIter {
1918 type Item = TransferError;
1919
1920 fn next(&mut self) -> Option<Self::Item> {
1921 if self.0 == 0 {
1922 return None;
1923 }
1924
1925 for (mask, var) in TransferErrorRaw::MAP {
1926 // If the bit is set...
1927 if self.0 | mask != 0 {
1928 // clear the bit
1929 self.0 &= !mask;
1930 // and return the answer
1931 return Some(*var);
1932 }
1933 }
1934
1935 // Shouldn't happen, but oh well.
1936 None
1937 }
1938}
1939
1940#[derive(Copy, Clone, Debug)]
1941#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1942pub enum TransferError {
1943 /// `TCDn_SADDR` is inconsistent with `TCDn_ATTR[SSIZE]`
1944 SourceAddress,
1945 /// `TCDn_SOFF` is inconsistent with `TCDn_ATTR[SSIZE]`.
1946 SourceOffset,
1947 /// `TCDn_DADDR` is inconsistent with `TCDn_ATTR[DSIZE]`.
1948 DestinationAddress,
1949 /// `TCDn_DOFF` is inconsistent with `TCDn_ATTR[DSIZE]`.
1950 DestinationOffset,
1951 /// This error indicates that one of the following has occurred:
1952 ///
1953 /// * `TCDn_NBYTES` is not a multiple of `TCDn_ATTR[SSIZE]` and `TCDn_ATTR[DSIZE]`
1954 /// * `TCDn_CITER[CITER]` is equal to zero
1955 /// * `TCDn_CITER[ELINK]` is not equal to `TCDn_BITER[ELINK]`
1956 NbytesCiterConfiguration,
1957 /// Indicates that `TCDn_DLAST_SGA` is not on a 32-byte boundary. This field is
1958 /// checked at the beginning of a scatter/gather operation after major loop completion
1959 /// if `TCDn_CSR[ESG]` is enabled.
1960 ScatterGatherConfiguration,
1961 /// Source Bus Error
1962 SourceBus,
1963 /// Destination Bus Error
1964 DestinationBus,
1965}
1966
1967impl<'a> Unpin for Transfer<'a> {}
1968
1969impl<'a> Future for Transfer<'a> {
1970 type Output = Result<(), TransferErrorRaw>;
1971
1972 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1973 let state = &STATES[self.channel.index];
1974
1975 // Register waker first
1976 state.waker.register(cx.waker());
1977
1978 let done = self.channel.is_done();
1979
1980 if done {
1981 // Ensure all DMA writes are visible before returning
1982 fence(Ordering::SeqCst);
1983
1984 let es = self.channel.tcd().ch_es().read();
1985 if es.err().is_error() {
1986 // Currently, all error fields are in the lowest 8 bits, as-casting truncates
1987 let errs = es.bits() as u8;
1988 Poll::Ready(Err(TransferErrorRaw(errs)))
1989 } else {
1990 Poll::Ready(Ok(()))
1991 }
1992 } else {
1993 Poll::Pending
1994 }
1995 }
1996}
1997
1998impl<'a> Drop for Transfer<'a> {
1999 fn drop(&mut self) {
2000 // Only abort if the transfer is still running
2001 // If already complete, no need to abort
2002 if self.is_running() {
2003 self.abort();
2004
2005 // Wait for abort to complete
2006 while self.is_running() {
2007 core::hint::spin_loop();
2008 }
2009 }
2010
2011 fence(Ordering::SeqCst);
2012 }
2013}
2014
2015// ============================================================================
2016// Ring Buffer for Circular DMA
2017// ============================================================================
2018
2019/// A ring buffer for continuous DMA reception.
2020///
2021/// This structure manages a circular DMA transfer, allowing continuous
2022/// reception of data without losing bytes between reads. It uses both
2023/// half-transfer and complete-transfer interrupts to track available data.
2024///
2025/// # Example
2026///
2027/// ```no_run
2028/// use embassy_mcxa::dma::{DmaChannel, RingBuffer, TransferOptions};
2029///
2030/// static mut RX_BUF: [u8; 64] = [0; 64];
2031///
2032/// let dma_ch = DmaChannel::new(p.DMA_CH0);
2033/// let ring_buf = unsafe {
2034/// dma_ch.setup_circular_read(
2035/// uart_rx_addr,
2036/// &mut RX_BUF,
2037/// )
2038/// };
2039///
2040/// // Read data as it arrives
2041/// let mut buf = [0u8; 16];
2042/// let n = ring_buf.read(&mut buf).await?;
2043/// ```
2044pub struct RingBuffer<'a, W: Word> {
2045 channel: AnyChannel,
2046 /// Buffer pointer. We use NonNull instead of &mut because DMA acts like
2047 /// a separate thread writing to this buffer, and &mut claims exclusive
2048 /// access which the compiler could optimize incorrectly.
2049 buf: NonNull<[W]>,
2050 /// Buffer length cached for convenience
2051 buf_len: usize,
2052 /// Read position in the buffer (consumer side)
2053 read_pos: AtomicUsize,
2054 /// Phantom data to tie the lifetime to the original buffer
2055 _lt: PhantomData<&'a mut [W]>,
2056}
2057
2058impl<'a, W: Word> RingBuffer<'a, W> {
2059 /// Create a new ring buffer for the given channel and buffer.
2060 ///
2061 /// # Safety
2062 ///
2063 /// The caller must ensure:
2064 /// - The DMA channel has been configured for circular transfer
2065 /// - The buffer remains valid for the lifetime of the ring buffer
2066 /// - Only one RingBuffer exists per DMA channel at a time
2067 pub(crate) unsafe fn new(channel: AnyChannel, buf: &'a mut [W]) -> Self {
2068 let buf_len = buf.len();
2069 Self {
2070 channel,
2071 buf: NonNull::from(buf),
2072 buf_len,
2073 read_pos: AtomicUsize::new(0),
2074 _lt: PhantomData,
2075 }
2076 }
2077
2078 /// Get a slice reference to the buffer.
2079 ///
2080 /// # Safety
2081 ///
2082 /// The caller must ensure that DMA is not actively writing to the
2083 /// portion of the buffer being accessed, or that the access is
2084 /// appropriately synchronized.
2085 #[inline]
2086 unsafe fn buf_slice(&self) -> &[W] {
2087 self.buf.as_ref()
2088 }
2089
2090 /// Get the current DMA write position in the buffer.
2091 ///
2092 /// This reads the current destination address from the DMA controller
2093 /// and calculates the buffer offset.
2094 fn dma_write_pos(&self) -> usize {
2095 let t = self.channel.tcd();
2096 let daddr = t.tcd_daddr().read().daddr().bits() as usize;
2097 let buf_start = self.buf.as_ptr() as *const W as usize;
2098
2099 // Calculate offset from buffer start
2100 let offset = daddr.wrapping_sub(buf_start) / core::mem::size_of::<W>();
2101
2102 // Ensure we're within bounds (DMA wraps around)
2103 offset % self.buf_len
2104 }
2105
2106 /// Returns the number of bytes available to read.
2107 pub fn available(&self) -> usize {
2108 let write_pos = self.dma_write_pos();
2109 let read_pos = self.read_pos.load(Ordering::Acquire);
2110
2111 if write_pos >= read_pos {
2112 write_pos - read_pos
2113 } else {
2114 self.buf_len - read_pos + write_pos
2115 }
2116 }
2117
2118 /// Check if the buffer has overrun (data was lost).
2119 ///
2120 /// This happens when DMA writes faster than the application reads.
2121 pub fn is_overrun(&self) -> bool {
2122 // In a true overrun, the DMA would have wrapped around and caught up
2123 // to our read position. We can detect this by checking if available()
2124 // equals the full buffer size (minus 1 to distinguish from empty).
2125 self.available() >= self.buf_len - 1
2126 }
2127
2128 /// Read data from the ring buffer into the provided slice.
2129 ///
2130 /// Returns the number of elements read, which may be less than
2131 /// `dst.len()` if not enough data is available.
2132 ///
2133 /// This method does not block; use `read_async()` for async waiting.
2134 pub fn read_immediate(&self, dst: &mut [W]) -> usize {
2135 let write_pos = self.dma_write_pos();
2136 let read_pos = self.read_pos.load(Ordering::Acquire);
2137
2138 // Calculate available bytes
2139 let available = if write_pos >= read_pos {
2140 write_pos - read_pos
2141 } else {
2142 self.buf_len - read_pos + write_pos
2143 };
2144
2145 let to_read = dst.len().min(available);
2146 if to_read == 0 {
2147 return 0;
2148 }
2149
2150 // Safety: We only read from portions of the buffer that DMA has
2151 // already written to (between read_pos and write_pos).
2152 let buf = unsafe { self.buf_slice() };
2153
2154 // Read data, handling wrap-around
2155 let first_chunk = (self.buf_len - read_pos).min(to_read);
2156 dst[..first_chunk].copy_from_slice(&buf[read_pos..read_pos + first_chunk]);
2157
2158 if to_read > first_chunk {
2159 let second_chunk = to_read - first_chunk;
2160 dst[first_chunk..to_read].copy_from_slice(&buf[..second_chunk]);
2161 }
2162
2163 // Update read position
2164 let new_read_pos = (read_pos + to_read) % self.buf_len;
2165 self.read_pos.store(new_read_pos, Ordering::Release);
2166
2167 to_read
2168 }
2169
2170 /// Read data from the ring buffer asynchronously.
2171 ///
2172 /// This waits until at least one byte is available, then reads as much
2173 /// as possible into the destination buffer.
2174 ///
2175 /// Returns the number of elements read.
2176 pub async fn read(&self, dst: &mut [W]) -> Result<usize, Error> {
2177 use core::future::poll_fn;
2178
2179 if dst.is_empty() {
2180 return Ok(0);
2181 }
2182
2183 poll_fn(|cx| {
2184 // Check for overrun
2185 if self.is_overrun() {
2186 return Poll::Ready(Err(Error::Overrun));
2187 }
2188
2189 // Try to read immediately
2190 let n = self.read_immediate(dst);
2191 if n > 0 {
2192 return Poll::Ready(Ok(n));
2193 }
2194
2195 // Register wakers for both half and complete interrupts
2196 let state = &STATES[self.channel.index()];
2197 state.waker.register(cx.waker());
2198 state.half_waker.register(cx.waker());
2199
2200 // Check again after registering waker (avoid race)
2201 let n = self.read_immediate(dst);
2202 if n > 0 {
2203 return Poll::Ready(Ok(n));
2204 }
2205
2206 Poll::Pending
2207 })
2208 .await
2209 }
2210
2211 /// Clear the ring buffer, discarding all unread data.
2212 pub fn clear(&self) {
2213 let write_pos = self.dma_write_pos();
2214 self.read_pos.store(write_pos, Ordering::Release);
2215 }
2216
2217 /// Stop the DMA transfer and consume the ring buffer.
2218 ///
2219 /// Returns any remaining unread data count.
2220 pub fn stop(mut self) -> usize {
2221 let res = self.teardown();
2222 drop(self);
2223 res
2224 }
2225
2226 /// Stop the DMA transfer. Intended to be called by `stop()` or `Drop`.
2227 fn teardown(&mut self) -> usize {
2228 let available = self.available();
2229
2230 // Disable the channel
2231 let t = self.channel.tcd();
2232 t.ch_csr().modify(|_, w| w.erq().disable());
2233
2234 // Clear flags
2235 t.ch_int().write(|w| w.int().clear_bit_by_one());
2236 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
2237
2238 fence(Ordering::SeqCst);
2239
2240 available
2241 }
2242}
2243
2244impl<'a, W: Word> Drop for RingBuffer<'a, W> {
2245 fn drop(&mut self) {
2246 self.teardown();
2247 }
2248}
2249
2250impl<C: Channel> DmaChannel<C> {
2251 /// Set up a circular DMA transfer for continuous peripheral-to-memory reception.
2252 ///
2253 /// This configures the DMA channel for circular operation with both half-transfer
2254 /// and complete-transfer interrupts enabled. The transfer runs continuously until
2255 /// stopped via [`RingBuffer::stop()`].
2256 ///
2257 /// # Arguments
2258 ///
2259 /// * `peri_addr` - Peripheral register address to read from
2260 /// * `buf` - Destination buffer (should be power-of-2 size for best efficiency)
2261 ///
2262 /// # Returns
2263 ///
2264 /// A [`RingBuffer`] that can be used to read received data.
2265 ///
2266 /// # Safety
2267 ///
2268 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
2269 /// - The peripheral address must be valid for reads.
2270 /// - The peripheral's DMA request must be configured to trigger this channel.
2271 pub unsafe fn setup_circular_read<'a, W: Word>(&self, peri_addr: *const W, buf: &'a mut [W]) -> RingBuffer<'a, W> {
2272 assert!(!buf.is_empty());
2273 assert!(buf.len() <= 0x7fff);
2274 // For circular mode, buffer size should ideally be power of 2
2275 // but we don't enforce it
2276
2277 let size = W::size();
2278 let byte_size = size.bytes();
2279
2280 let t = self.tcd();
2281
2282 // Reset channel state
2283 Self::reset_channel_state(t);
2284
2285 // Source: peripheral register, fixed
2286 Self::set_source_ptr(t, peri_addr);
2287 Self::set_source_fixed(t);
2288
2289 // Destination: memory buffer, incrementing
2290 Self::set_dest_ptr(t, buf.as_mut_ptr());
2291 Self::set_dest_increment(t, size);
2292
2293 // Transfer attributes
2294 Self::set_even_transfer_size(t, size);
2295
2296 // Minor loop: transfer one word per request
2297 Self::set_minor_loop_ct_no_offsets(t, byte_size as u32);
2298
2299 // Major loop count = buffer size
2300 let count = buf.len() as u16;
2301 Self::set_major_loop_ct_elinkno(t, count);
2302
2303 // After major loop: reset destination to buffer start (circular)
2304 let buf_bytes = (buf.len() * byte_size) as i32;
2305 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); // Source doesn't change
2306 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits((-buf_bytes) as u32));
2307
2308 // Control/status: enable both half and complete interrupts, NO DREQ (continuous)
2309 t.tcd_csr().write(|w| {
2310 w.intmajor()
2311 .enable()
2312 .inthalf()
2313 .enable()
2314 .dreq()
2315 .channel_not_affected() // Don't clear ERQ on complete (circular)
2316 .esg()
2317 .normal_format()
2318 .majorelink()
2319 .disable()
2320 .eeop()
2321 .disable()
2322 .esda()
2323 .disable()
2324 .bwc()
2325 .no_stall()
2326 });
2327
2328 cortex_m::asm::dsb();
2329
2330 // Enable the channel request
2331 t.ch_csr().modify(|_, w| w.erq().enable());
2332
2333 // Enable NVIC interrupt for this channel so async wakeups work
2334 self.enable_interrupt();
2335
2336 RingBuffer::new(self.as_any(), buf)
2337 }
2338}
2339
2340// ============================================================================
2341// Scatter-Gather Builder
2342// ============================================================================
2343
2344/// Maximum number of TCDs in a scatter-gather chain.
2345pub const MAX_SCATTER_GATHER_TCDS: usize = 16;
2346
2347/// A builder for constructing scatter-gather DMA transfer chains.
2348///
2349/// This provides a type-safe way to build TCD chains for scatter-gather
2350/// transfers without manual TCD manipulation.
2351///
2352/// # Example
2353///
2354/// ```no_run
2355/// use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
2356///
2357/// let mut builder = ScatterGatherBuilder::<u32>::new();
2358///
2359/// // Add transfer segments
2360/// builder.add_transfer(&src1, &mut dst1);
2361/// builder.add_transfer(&src2, &mut dst2);
2362/// builder.add_transfer(&src3, &mut dst3);
2363///
2364/// // Build and execute
2365/// let transfer = unsafe { builder.build(&dma_ch).unwrap() };
2366/// transfer.await;
2367/// ```
2368pub struct ScatterGatherBuilder<'a, W: Word> {
2369 /// TCD pool (must be 32-byte aligned)
2370 tcds: [Tcd; MAX_SCATTER_GATHER_TCDS],
2371 /// Number of TCDs configured
2372 count: usize,
2373 /// Phantom marker for word type
2374 _phantom: core::marker::PhantomData<W>,
2375
2376 _plt: core::marker::PhantomData<&'a mut W>,
2377}
2378
2379impl<'a, W: Word> ScatterGatherBuilder<'a, W> {
2380 /// Create a new scatter-gather builder.
2381 pub fn new() -> Self {
2382 ScatterGatherBuilder {
2383 tcds: [Tcd::default(); MAX_SCATTER_GATHER_TCDS],
2384 count: 0,
2385 _phantom: core::marker::PhantomData,
2386 _plt: core::marker::PhantomData,
2387 }
2388 }
2389
2390 /// Add a memory-to-memory transfer segment to the chain.
2391 ///
2392 /// # Arguments
2393 ///
2394 /// * `src` - Source buffer for this segment
2395 /// * `dst` - Destination buffer for this segment
2396 ///
2397 /// # Panics
2398 ///
2399 /// Panics if the maximum number of segments (16) is exceeded.
2400 pub fn add_transfer<'b: 'a>(&mut self, src: &'b [W], dst: &'b mut [W]) -> &mut Self {
2401 assert!(self.count < MAX_SCATTER_GATHER_TCDS, "Too many scatter-gather segments");
2402 assert!(!src.is_empty());
2403 assert!(dst.len() >= src.len());
2404
2405 let size = W::size();
2406 let byte_size = size.bytes();
2407 let hw_size = size.to_hw_size();
2408 let nbytes = (src.len() * byte_size) as u32;
2409
2410 // Build the TCD for this segment
2411 self.tcds[self.count] = Tcd {
2412 saddr: src.as_ptr() as u32,
2413 soff: byte_size as i16,
2414 attr: ((hw_size as u16) << 8) | (hw_size as u16), // SSIZE | DSIZE
2415 nbytes,
2416 slast: 0,
2417 daddr: dst.as_mut_ptr() as u32,
2418 doff: byte_size as i16,
2419 citer: 1,
2420 dlast_sga: 0, // Will be filled in by build()
2421 csr: 0x0002, // INTMAJOR only (ESG will be set for non-last TCDs)
2422 biter: 1,
2423 };
2424
2425 self.count += 1;
2426 self
2427 }
2428
2429 /// Get the number of transfer segments added.
2430 pub fn segment_count(&self) -> usize {
2431 self.count
2432 }
2433
2434 /// Build the scatter-gather chain and start the transfer.
2435 ///
2436 /// # Arguments
2437 ///
2438 /// * `channel` - The DMA channel to use for the transfer
2439 ///
2440 /// # Returns
2441 ///
2442 /// A `Transfer` future that completes when the entire chain has executed.
2443 pub fn build<C: Channel>(&mut self, channel: &DmaChannel<C>) -> Result<Transfer<'a>, Error> {
2444 if self.count == 0 {
2445 return Err(Error::Configuration);
2446 }
2447
2448 // Link TCDs together
2449 //
2450 // CSR bit definitions:
2451 // - START = bit 0 = 0x0001 (triggers transfer when set)
2452 // - INTMAJOR = bit 1 = 0x0002 (interrupt on major loop complete)
2453 // - ESG = bit 4 = 0x0010 (enable scatter-gather, loads next TCD on complete)
2454 //
2455 // When hardware loads a TCD via scatter-gather (ESG), it copies the TCD's
2456 // CSR directly into the hardware register. If START is not set in that CSR,
2457 // the hardware will NOT auto-execute the loaded TCD.
2458 //
2459 // Strategy:
2460 // - First TCD: ESG | INTMAJOR (no START - we add it manually after loading)
2461 // - Middle TCDs: ESG | INTMAJOR | START (auto-execute when loaded via S/G)
2462 // - Last TCD: INTMAJOR | START (auto-execute, no further linking)
2463 for i in 0..self.count {
2464 let is_first = i == 0;
2465 let is_last = i == self.count - 1;
2466
2467 if is_first {
2468 if is_last {
2469 // Only one TCD - no ESG, no START (we add START manually)
2470 self.tcds[i].dlast_sga = 0;
2471 self.tcds[i].csr = 0x0002; // INTMAJOR only
2472 } else {
2473 // First of multiple - ESG to link, no START (we add START manually)
2474 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2475 self.tcds[i].csr = 0x0012; // ESG | INTMAJOR
2476 }
2477 } else if is_last {
2478 // Last TCD (not first) - no ESG, but START so it auto-executes
2479 self.tcds[i].dlast_sga = 0;
2480 self.tcds[i].csr = 0x0003; // INTMAJOR | START
2481 } else {
2482 // Middle TCD - ESG to link, and START so it auto-executes
2483 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2484 self.tcds[i].csr = 0x0013; // ESG | INTMAJOR | START
2485 }
2486 }
2487
2488 let t = channel.tcd();
2489
2490 // Reset channel state - clear DONE, disable requests, clear errors
2491 // This ensures the channel is in a clean state before loading the TCD
2492 DmaChannel::<C>::reset_channel_state(t);
2493
2494 // Memory barrier to ensure channel state is reset before loading TCD
2495 cortex_m::asm::dsb();
2496
2497 // Load first TCD into hardware
2498 unsafe {
2499 channel.load_tcd(&self.tcds[0]);
2500 }
2501
2502 // Memory barrier before setting START
2503 cortex_m::asm::dsb();
2504
2505 // Start the transfer
2506 t.tcd_csr().modify(|_, w| w.start().channel_started());
2507
2508 Ok(Transfer::new(channel.as_any()))
2509 }
2510
2511 /// Reset the builder for reuse.
2512 pub fn clear(&mut self) {
2513 self.count = 0;
2514 }
2515}
2516
2517impl<W: Word> Default for ScatterGatherBuilder<'_, W> {
2518 fn default() -> Self {
2519 Self::new()
2520 }
2521}
2522
2523/// A completed scatter-gather transfer result.
2524///
2525/// This type is returned after a scatter-gather transfer completes,
2526/// providing access to any error information.
2527#[derive(Debug, Clone, Copy, PartialEq, Eq)]
2528pub struct ScatterGatherResult {
2529 /// Number of segments successfully transferred
2530 pub segments_completed: usize,
2531 /// Error if any occurred
2532 pub error: Option<Error>,
2533}
2534
2535// ============================================================================
2536// Interrupt Handler
2537// ============================================================================
2538
2539/// Interrupt handler helper.
2540///
2541/// Call this from your interrupt handler to clear the interrupt flag and wake the waker.
2542/// This handles both half-transfer and complete-transfer interrupts.
2543///
2544/// # Safety
2545/// Must be called from the correct DMA channel interrupt context.
2546pub unsafe fn on_interrupt(ch_index: usize) {
2547 let p = pac::Peripherals::steal();
2548 let edma = &p.edma_0_tcd0;
2549 let t = edma.tcd(ch_index);
2550
2551 // Read TCD CSR to determine interrupt source
2552 let csr = t.tcd_csr().read();
2553
2554 // Check if this is a half-transfer interrupt
2555 // INTHALF is set and we're at or past the half-way point
2556 if csr.inthalf().bit_is_set() {
2557 let biter = t.tcd_biter_elinkno().read().biter().bits();
2558 let citer = t.tcd_citer_elinkno().read().citer().bits();
2559 let half_point = biter / 2;
2560
2561 if citer <= half_point && citer > 0 {
2562 // Half-transfer interrupt - wake half_waker
2563 half_waker(ch_index).wake();
2564 }
2565 }
2566
2567 // Clear INT flag
2568 t.ch_int().write(|w| w.int().clear_bit_by_one());
2569
2570 // If DONE is set, this is a complete-transfer interrupt
2571 // Only wake the full-transfer waker when the transfer is actually complete
2572 if t.ch_csr().read().done().bit_is_set() {
2573 waker(ch_index).wake();
2574 }
2575}
2576
2577// ============================================================================
2578// Type-level Interrupt Handlers
2579// ============================================================================
2580
2581/// Macro to generate DMA channel interrupt handlers.
2582macro_rules! impl_dma_interrupt_handler {
2583 ($irq:ident, $ch:expr) => {
2584 #[interrupt]
2585 fn $irq() {
2586 unsafe {
2587 on_interrupt($ch);
2588 }
2589 }
2590 };
2591}
2592
2593use crate::pac::interrupt;
2594
2595impl_dma_interrupt_handler!(DMA_CH0, 0);
2596impl_dma_interrupt_handler!(DMA_CH1, 1);
2597impl_dma_interrupt_handler!(DMA_CH2, 2);
2598impl_dma_interrupt_handler!(DMA_CH3, 3);
2599impl_dma_interrupt_handler!(DMA_CH4, 4);
2600impl_dma_interrupt_handler!(DMA_CH5, 5);
2601impl_dma_interrupt_handler!(DMA_CH6, 6);
2602impl_dma_interrupt_handler!(DMA_CH7, 7);
diff --git a/embassy-mcxa/src/interrupt.rs b/embassy-mcxa/src/interrupt.rs
index c1f7e55a0..c960af7a2 100644
--- a/embassy-mcxa/src/interrupt.rs
+++ b/embassy-mcxa/src/interrupt.rs
@@ -10,6 +10,14 @@ mod generated {
10 #[rustfmt::skip] 10 #[rustfmt::skip]
11 embassy_hal_internal::interrupt_mod!( 11 embassy_hal_internal::interrupt_mod!(
12 ADC1, 12 ADC1,
13 DMA_CH0,
14 DMA_CH1,
15 DMA_CH2,
16 DMA_CH3,
17 DMA_CH4,
18 DMA_CH5,
19 DMA_CH6,
20 DMA_CH7,
13 GPIO0, 21 GPIO0,
14 GPIO1, 22 GPIO1,
15 GPIO2, 23 GPIO2,
diff --git a/embassy-mcxa/src/lib.rs b/embassy-mcxa/src/lib.rs
index 64eeb4012..1bbdffa06 100644
--- a/embassy-mcxa/src/lib.rs
+++ b/embassy-mcxa/src/lib.rs
@@ -6,6 +6,7 @@
6// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)] 6// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
7 7
8pub mod clocks; // still provide clock helpers 8pub mod clocks; // still provide clock helpers
9pub mod dma;
9pub mod gpio; 10pub mod gpio;
10pub mod pins; // pin mux helpers 11pub mod pins; // pin mux helpers
11 12
@@ -52,6 +53,14 @@ embassy_hal_internal::peripherals!(
52 53
53 DBGMAILBOX, 54 DBGMAILBOX,
54 DMA0, 55 DMA0,
56 DMA_CH0,
57 DMA_CH1,
58 DMA_CH2,
59 DMA_CH3,
60 DMA_CH4,
61 DMA_CH5,
62 DMA_CH6,
63 DMA_CH7,
55 EDMA0_TCD0, 64 EDMA0_TCD0,
56 EIM0, 65 EIM0,
57 EQDC0, 66 EQDC0,
@@ -364,6 +373,9 @@ pub fn init(cfg: crate::config::Config) -> Peripherals {
364 crate::gpio::init(); 373 crate::gpio::init();
365 } 374 }
366 375
376 // Initialize DMA controller (clock, reset, configuration)
377 crate::dma::init();
378
367 // Initialize embassy-time global driver backed by OSTIMER0 379 // Initialize embassy-time global driver backed by OSTIMER0
368 #[cfg(feature = "time")] 380 #[cfg(feature = "time")]
369 crate::ostimer::time_driver::init(crate::config::Config::default().time_interrupt_priority, 1_000_000); 381 crate::ostimer::time_driver::init(crate::config::Config::default().time_interrupt_priority, 1_000_000);
@@ -389,41 +401,6 @@ pub fn init(cfg: crate::config::Config) -> Peripherals {
389 peripherals 401 peripherals
390} 402}
391 403
392// /// Optional hook called by cortex-m-rt before RAM init.
393// /// We proactively mask and clear all NVIC IRQs to avoid wedges from stale state
394// /// left by soft resets/debug sessions.
395// ///
396// /// NOTE: Manual VTOR setup is required for RAM execution. The cortex-m-rt 'set-vtor'
397// /// feature is incompatible with our setup because it expects __vector_table to be
398// /// defined differently than how our RAM-based linker script arranges it.
399// #[no_mangle]
400// pub unsafe extern "C" fn __pre_init() {
401// // Set the VTOR to point to the interrupt vector table in RAM
402// // This is required since code runs from RAM on this MCU
403// crate::interrupt::vtor_set_ram_vector_base(0x2000_0000 as *const u32);
404
405// // Mask and clear pending for all NVIC lines (0..127) to avoid stale state across runs.
406// let nvic = &*cortex_m::peripheral::NVIC::PTR;
407// for i in 0..4 {
408// // 4 words x 32 = 128 IRQs
409// nvic.icer[i].write(0xFFFF_FFFF);
410// nvic.icpr[i].write(0xFFFF_FFFF);
411// }
412// // Do NOT touch peripheral registers here: clocks may be off and accesses can fault.
413// crate::interrupt::clear_default_handler_snapshot();
414// }
415
416/// Internal helper to dispatch a type-level interrupt handler.
417#[inline(always)]
418#[doc(hidden)]
419pub unsafe fn __handle_interrupt<T, H>()
420where
421 T: crate::interrupt::typelevel::Interrupt,
422 H: crate::interrupt::typelevel::Handler<T>,
423{
424 H::on_interrupt();
425}
426
427/// Macro to bind interrupts to handlers, similar to embassy-imxrt. 404/// Macro to bind interrupts to handlers, similar to embassy-imxrt.
428/// 405///
429/// Example: 406/// Example:
diff --git a/embassy-mcxa/src/lpuart/mod.rs b/embassy-mcxa/src/lpuart/mod.rs
index b8a2d5172..e59ce8140 100644
--- a/embassy-mcxa/src/lpuart/mod.rs
+++ b/embassy-mcxa/src/lpuart/mod.rs
@@ -1,3 +1,4 @@
1use core::future::Future;
1use core::marker::PhantomData; 2use core::marker::PhantomData;
2 3
3use embassy_hal_internal::{Peri, PeripheralType}; 4use embassy_hal_internal::{Peri, PeripheralType};
@@ -15,22 +16,12 @@ use crate::{AnyPin, interrupt, pac};
15pub mod buffered; 16pub mod buffered;
16 17
17// ============================================================================ 18// ============================================================================
18// STUB IMPLEMENTATION 19// DMA INTEGRATION
19// ============================================================================ 20// ============================================================================
20 21
21// Stub implementation for LIB (Peripherals), GPIO, DMA and CLOCK until stable API 22use crate::dma::{
22// Pin and Clock initialization is currently done at the examples level. 23 Channel as DmaChannelTrait, DMA_MAX_TRANSFER_SIZE, DmaChannel, DmaRequest, EnableInterrupt, RingBuffer,
23 24};
24// --- START DMA ---
25mod dma {
26 pub struct Channel<'d> {
27 pub(super) _lifetime: core::marker::PhantomData<&'d ()>,
28 }
29}
30
31use dma::Channel;
32
33// --- END DMA ---
34 25
35// ============================================================================ 26// ============================================================================
36// MISC 27// MISC
@@ -62,10 +53,14 @@ pub struct Info {
62pub trait Instance: SealedInstance + PeripheralType + 'static + Send + Gate<MrccPeriphConfig = LpuartConfig> { 53pub trait Instance: SealedInstance + PeripheralType + 'static + Send + Gate<MrccPeriphConfig = LpuartConfig> {
63 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance; 54 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance;
64 type Interrupt: interrupt::typelevel::Interrupt; 55 type Interrupt: interrupt::typelevel::Interrupt;
56 /// Type-safe DMA request source for TX
57 type TxDmaRequest: DmaRequest;
58 /// Type-safe DMA request source for RX
59 type RxDmaRequest: DmaRequest;
65} 60}
66 61
67macro_rules! impl_instance { 62macro_rules! impl_instance {
68 ($($n:expr),*) => { 63 ($($n:expr);* $(;)?) => {
69 $( 64 $(
70 paste!{ 65 paste!{
71 impl SealedInstance for crate::peripherals::[<LPUART $n>] { 66 impl SealedInstance for crate::peripherals::[<LPUART $n>] {
@@ -90,13 +85,23 @@ macro_rules! impl_instance {
90 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance 85 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance
91 = crate::clocks::periph_helpers::LpuartInstance::[<Lpuart $n>]; 86 = crate::clocks::periph_helpers::LpuartInstance::[<Lpuart $n>];
92 type Interrupt = crate::interrupt::typelevel::[<LPUART $n>]; 87 type Interrupt = crate::interrupt::typelevel::[<LPUART $n>];
88 type TxDmaRequest = crate::dma::[<Lpuart $n TxRequest>];
89 type RxDmaRequest = crate::dma::[<Lpuart $n RxRequest>];
93 } 90 }
94 } 91 }
95 )* 92 )*
96 }; 93 };
97} 94}
98 95
99impl_instance!(0, 1, 2, 3, 4, 5); 96// DMA request sources are now type-safe via associated types.
97// The request source numbers are defined in src/dma.rs:
98// LPUART0: RX=21, TX=22 -> Lpuart0RxRequest, Lpuart0TxRequest
99// LPUART1: RX=23, TX=24 -> Lpuart1RxRequest, Lpuart1TxRequest
100// LPUART2: RX=25, TX=26 -> Lpuart2RxRequest, Lpuart2TxRequest
101// LPUART3: RX=27, TX=28 -> Lpuart3RxRequest, Lpuart3TxRequest
102// LPUART4: RX=29, TX=30 -> Lpuart4RxRequest, Lpuart4TxRequest
103// LPUART5: RX=31, TX=32 -> Lpuart5RxRequest, Lpuart5TxRequest
104impl_instance!(0; 1; 2; 3; 4; 5);
100 105
101// ============================================================================ 106// ============================================================================
102// INSTANCE HELPER FUNCTIONS 107// INSTANCE HELPER FUNCTIONS
@@ -683,7 +688,6 @@ pub struct LpuartTx<'a, M: Mode> {
683 info: Info, 688 info: Info,
684 _tx_pin: Peri<'a, AnyPin>, 689 _tx_pin: Peri<'a, AnyPin>,
685 _cts_pin: Option<Peri<'a, AnyPin>>, 690 _cts_pin: Option<Peri<'a, AnyPin>>,
686 _tx_dma: Option<Channel<'a>>,
687 mode: PhantomData<(&'a (), M)>, 691 mode: PhantomData<(&'a (), M)>,
688} 692}
689 693
@@ -692,10 +696,37 @@ pub struct LpuartRx<'a, M: Mode> {
692 info: Info, 696 info: Info,
693 _rx_pin: Peri<'a, AnyPin>, 697 _rx_pin: Peri<'a, AnyPin>,
694 _rts_pin: Option<Peri<'a, AnyPin>>, 698 _rts_pin: Option<Peri<'a, AnyPin>>,
695 _rx_dma: Option<Channel<'a>>,
696 mode: PhantomData<(&'a (), M)>, 699 mode: PhantomData<(&'a (), M)>,
697} 700}
698 701
702/// Lpuart TX driver with DMA support.
703pub struct LpuartTxDma<'a, T: Instance, C: DmaChannelTrait> {
704 info: Info,
705 _tx_pin: Peri<'a, AnyPin>,
706 tx_dma: DmaChannel<C>,
707 _instance: core::marker::PhantomData<T>,
708}
709
710/// Lpuart RX driver with DMA support.
711pub struct LpuartRxDma<'a, T: Instance, C: DmaChannelTrait> {
712 info: Info,
713 _rx_pin: Peri<'a, AnyPin>,
714 rx_dma: DmaChannel<C>,
715 _instance: core::marker::PhantomData<T>,
716}
717
718/// Lpuart driver with DMA support for both TX and RX.
719pub struct LpuartDma<'a, T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> {
720 tx: LpuartTxDma<'a, T, TxC>,
721 rx: LpuartRxDma<'a, T, RxC>,
722}
723
724/// Lpuart RX driver with ring-buffered DMA support.
725pub struct LpuartRxRingDma<'peri, 'ring, T: Instance, C: DmaChannelTrait> {
726 _inner: LpuartRxDma<'peri, T, C>,
727 ring: RingBuffer<'ring, u8>,
728}
729
699// ============================================================================ 730// ============================================================================
700// LPUART CORE IMPLEMENTATION 731// LPUART CORE IMPLEMENTATION
701// ============================================================================ 732// ============================================================================
@@ -782,8 +813,8 @@ impl<'a> Lpuart<'a, Blocking> {
782 813
783 Ok(Self { 814 Ok(Self {
784 info: T::info(), 815 info: T::info(),
785 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), None, None), 816 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), None),
786 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), None, None), 817 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), None),
787 }) 818 })
788 } 819 }
789 820
@@ -807,8 +838,8 @@ impl<'a> Lpuart<'a, Blocking> {
807 838
808 Ok(Self { 839 Ok(Self {
809 info: T::info(), 840 info: T::info(),
810 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into()), None), 841 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into())),
811 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into()), None), 842 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into())),
812 }) 843 })
813 } 844 }
814} 845}
@@ -818,17 +849,11 @@ impl<'a> Lpuart<'a, Blocking> {
818// ---------------------------------------------------------------------------- 849// ----------------------------------------------------------------------------
819 850
820impl<'a, M: Mode> LpuartTx<'a, M> { 851impl<'a, M: Mode> LpuartTx<'a, M> {
821 fn new_inner( 852 fn new_inner(info: Info, tx_pin: Peri<'a, AnyPin>, cts_pin: Option<Peri<'a, AnyPin>>) -> Self {
822 info: Info,
823 tx_pin: Peri<'a, AnyPin>,
824 cts_pin: Option<Peri<'a, AnyPin>>,
825 tx_dma: Option<Channel<'a>>,
826 ) -> Self {
827 Self { 853 Self {
828 info, 854 info,
829 _tx_pin: tx_pin, 855 _tx_pin: tx_pin,
830 _cts_pin: cts_pin, 856 _cts_pin: cts_pin,
831 _tx_dma: tx_dma,
832 mode: PhantomData, 857 mode: PhantomData,
833 } 858 }
834 } 859 }
@@ -847,7 +872,7 @@ impl<'a> LpuartTx<'a, Blocking> {
847 // Initialize the peripheral 872 // Initialize the peripheral
848 Lpuart::<Blocking>::init::<T>(true, false, false, false, config)?; 873 Lpuart::<Blocking>::init::<T>(true, false, false, false, config)?;
849 874
850 Ok(Self::new_inner(T::info(), tx_pin.into(), None, None)) 875 Ok(Self::new_inner(T::info(), tx_pin.into(), None))
851 } 876 }
852 877
853 /// Create a new blocking LPUART transmitter instance with CTS flow control 878 /// Create a new blocking LPUART transmitter instance with CTS flow control
@@ -862,7 +887,7 @@ impl<'a> LpuartTx<'a, Blocking> {
862 887
863 Lpuart::<Blocking>::init::<T>(true, false, true, false, config)?; 888 Lpuart::<Blocking>::init::<T>(true, false, true, false, config)?;
864 889
865 Ok(Self::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into()), None)) 890 Ok(Self::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into())))
866 } 891 }
867 892
868 fn write_byte_internal(&mut self, byte: u8) -> Result<()> { 893 fn write_byte_internal(&mut self, byte: u8) -> Result<()> {
@@ -941,17 +966,11 @@ impl<'a> LpuartTx<'a, Blocking> {
941// ---------------------------------------------------------------------------- 966// ----------------------------------------------------------------------------
942 967
943impl<'a, M: Mode> LpuartRx<'a, M> { 968impl<'a, M: Mode> LpuartRx<'a, M> {
944 fn new_inner( 969 fn new_inner(info: Info, rx_pin: Peri<'a, AnyPin>, rts_pin: Option<Peri<'a, AnyPin>>) -> Self {
945 info: Info,
946 rx_pin: Peri<'a, AnyPin>,
947 rts_pin: Option<Peri<'a, AnyPin>>,
948 rx_dma: Option<Channel<'a>>,
949 ) -> Self {
950 Self { 970 Self {
951 info, 971 info,
952 _rx_pin: rx_pin, 972 _rx_pin: rx_pin,
953 _rts_pin: rts_pin, 973 _rts_pin: rts_pin,
954 _rx_dma: rx_dma,
955 mode: PhantomData, 974 mode: PhantomData,
956 } 975 }
957 } 976 }
@@ -968,7 +987,7 @@ impl<'a> LpuartRx<'a, Blocking> {
968 987
969 Lpuart::<Blocking>::init::<T>(false, true, false, false, config)?; 988 Lpuart::<Blocking>::init::<T>(false, true, false, false, config)?;
970 989
971 Ok(Self::new_inner(T::info(), rx_pin.into(), None, None)) 990 Ok(Self::new_inner(T::info(), rx_pin.into(), None))
972 } 991 }
973 992
974 /// Create a new blocking LPUART Receiver instance with RTS flow control 993 /// Create a new blocking LPUART Receiver instance with RTS flow control
@@ -983,7 +1002,7 @@ impl<'a> LpuartRx<'a, Blocking> {
983 1002
984 Lpuart::<Blocking>::init::<T>(false, true, false, true, config)?; 1003 Lpuart::<Blocking>::init::<T>(false, true, false, true, config)?;
985 1004
986 Ok(Self::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into()), None)) 1005 Ok(Self::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into())))
987 } 1006 }
988 1007
989 fn read_byte_internal(&mut self) -> Result<u8> { 1008 fn read_byte_internal(&mut self) -> Result<u8> {
@@ -1078,10 +1097,476 @@ impl<'a> Lpuart<'a, Blocking> {
1078} 1097}
1079 1098
1080// ============================================================================ 1099// ============================================================================
1081// ASYNC MODE IMPLEMENTATIONS 1100// ASYNC MODE IMPLEMENTATIONS (DMA-based)
1082// ============================================================================ 1101// ============================================================================
1083 1102
1084// TODO: Implement async mode for LPUART 1103/// Guard struct that ensures DMA is stopped if the async future is cancelled.
1104///
1105/// This implements the RAII pattern: if the future is dropped before completion
1106/// (e.g., due to a timeout), the DMA transfer is automatically aborted to prevent
1107/// use-after-free when the buffer goes out of scope.
1108struct TxDmaGuard<'a, C: DmaChannelTrait> {
1109 dma: &'a DmaChannel<C>,
1110 regs: Regs,
1111}
1112
1113impl<'a, C: DmaChannelTrait> TxDmaGuard<'a, C> {
1114 fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self {
1115 Self { dma, regs }
1116 }
1117
1118 /// Complete the transfer normally (don't abort on drop).
1119 fn complete(self) {
1120 // Cleanup
1121 self.regs.baud().modify(|_, w| w.tdmae().disabled());
1122 unsafe {
1123 self.dma.disable_request();
1124 self.dma.clear_done();
1125 }
1126 // Don't run drop since we've cleaned up
1127 core::mem::forget(self);
1128 }
1129}
1130
1131impl<C: DmaChannelTrait> Drop for TxDmaGuard<'_, C> {
1132 fn drop(&mut self) {
1133 // Abort the DMA transfer if still running
1134 unsafe {
1135 self.dma.disable_request();
1136 self.dma.clear_done();
1137 self.dma.clear_interrupt();
1138 }
1139 // Disable UART TX DMA request
1140 self.regs.baud().modify(|_, w| w.tdmae().disabled());
1141 }
1142}
1143
1144/// Guard struct for RX DMA transfers.
1145struct RxDmaGuard<'a, C: DmaChannelTrait> {
1146 dma: &'a DmaChannel<C>,
1147 regs: Regs,
1148}
1149
1150impl<'a, C: DmaChannelTrait> RxDmaGuard<'a, C> {
1151 fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self {
1152 Self { dma, regs }
1153 }
1154
1155 /// Complete the transfer normally (don't abort on drop).
1156 fn complete(self) {
1157 // Ensure DMA writes are visible to CPU
1158 cortex_m::asm::dsb();
1159 // Cleanup
1160 self.regs.baud().modify(|_, w| w.rdmae().disabled());
1161 unsafe {
1162 self.dma.disable_request();
1163 self.dma.clear_done();
1164 }
1165 // Don't run drop since we've cleaned up
1166 core::mem::forget(self);
1167 }
1168}
1169
1170impl<C: DmaChannelTrait> Drop for RxDmaGuard<'_, C> {
1171 fn drop(&mut self) {
1172 // Abort the DMA transfer if still running
1173 unsafe {
1174 self.dma.disable_request();
1175 self.dma.clear_done();
1176 self.dma.clear_interrupt();
1177 }
1178 // Disable UART RX DMA request
1179 self.regs.baud().modify(|_, w| w.rdmae().disabled());
1180 }
1181}
1182
1183impl<'a, T: Instance, C: DmaChannelTrait> LpuartTxDma<'a, T, C> {
1184 /// Create a new LPUART TX driver with DMA support.
1185 pub fn new(
1186 _inner: Peri<'a, T>,
1187 tx_pin: Peri<'a, impl TxPin<T>>,
1188 tx_dma_ch: Peri<'a, C>,
1189 config: Config,
1190 ) -> Result<Self> {
1191 tx_pin.as_tx();
1192 let tx_pin: Peri<'a, AnyPin> = tx_pin.into();
1193
1194 // Initialize LPUART with TX enabled, RX disabled, no flow control
1195 Lpuart::<Async>::init::<T>(true, false, false, false, config)?;
1196
1197 // Enable interrupt
1198 let tx_dma = DmaChannel::new(tx_dma_ch);
1199 tx_dma.enable_interrupt();
1200
1201 Ok(Self {
1202 info: T::info(),
1203 _tx_pin: tx_pin,
1204 tx_dma,
1205 _instance: core::marker::PhantomData,
1206 })
1207 }
1208
1209 /// Write data using DMA.
1210 ///
1211 /// This configures the DMA channel for a memory-to-peripheral transfer
1212 /// and waits for completion asynchronously. Large buffers are automatically
1213 /// split into chunks that fit within the DMA transfer limit.
1214 ///
1215 /// The DMA request source is automatically derived from the LPUART instance type.
1216 ///
1217 /// # Safety
1218 ///
1219 /// If the returned future is dropped before completion (e.g., due to a timeout),
1220 /// the DMA transfer is automatically aborted to prevent use-after-free.
1221 ///
1222 /// # Arguments
1223 /// * `buf` - Data buffer to transmit
1224 pub async fn write_dma(&mut self, buf: &[u8]) -> Result<usize> {
1225 if buf.is_empty() {
1226 return Ok(0);
1227 }
1228
1229 let mut total = 0;
1230 for chunk in buf.chunks(DMA_MAX_TRANSFER_SIZE) {
1231 total += self.write_dma_inner(chunk).await?;
1232 }
1233
1234 Ok(total)
1235 }
1236
1237 /// Internal helper to write a single chunk (max 0x7FFF bytes) using DMA.
1238 async fn write_dma_inner(&mut self, buf: &[u8]) -> Result<usize> {
1239 let len = buf.len();
1240 let peri_addr = self.info.regs.data().as_ptr() as *mut u8;
1241
1242 unsafe {
1243 // Clean up channel state
1244 self.tx_dma.disable_request();
1245 self.tx_dma.clear_done();
1246 self.tx_dma.clear_interrupt();
1247
1248 // Set DMA request source from instance type (type-safe)
1249 self.tx_dma.set_request_source::<T::TxDmaRequest>();
1250
1251 // Configure TCD for memory-to-peripheral transfer
1252 self.tx_dma
1253 .setup_write_to_peripheral(buf, peri_addr, EnableInterrupt::Yes);
1254
1255 // Enable UART TX DMA request
1256 self.info.regs.baud().modify(|_, w| w.tdmae().enabled());
1257
1258 // Enable DMA channel request
1259 self.tx_dma.enable_request();
1260 }
1261
1262 // Create guard that will abort DMA if this future is dropped
1263 let guard = TxDmaGuard::new(&self.tx_dma, self.info.regs);
1264
1265 // Wait for completion asynchronously
1266 core::future::poll_fn(|cx| {
1267 self.tx_dma.waker().register(cx.waker());
1268 if self.tx_dma.is_done() {
1269 core::task::Poll::Ready(())
1270 } else {
1271 core::task::Poll::Pending
1272 }
1273 })
1274 .await;
1275
1276 // Transfer completed successfully - clean up without aborting
1277 guard.complete();
1278
1279 Ok(len)
1280 }
1281
1282 /// Blocking write (fallback when DMA is not needed)
1283 pub fn blocking_write(&mut self, buf: &[u8]) -> Result<()> {
1284 for &byte in buf {
1285 while self.info.regs.stat().read().tdre().is_txdata() {}
1286 self.info.regs.data().modify(|_, w| unsafe { w.bits(u32::from(byte)) });
1287 }
1288 Ok(())
1289 }
1290
1291 /// Flush TX blocking
1292 pub fn blocking_flush(&mut self) -> Result<()> {
1293 while self.info.regs.water().read().txcount().bits() != 0 {}
1294 while self.info.regs.stat().read().tc().is_active() {}
1295 Ok(())
1296 }
1297}
1298
1299impl<'a, T: Instance, C: DmaChannelTrait> LpuartRxDma<'a, T, C> {
1300 /// Create a new LPUART RX driver with DMA support.
1301 pub fn new(
1302 _inner: Peri<'a, T>,
1303 rx_pin: Peri<'a, impl RxPin<T>>,
1304 rx_dma_ch: Peri<'a, C>,
1305 config: Config,
1306 ) -> Result<Self> {
1307 rx_pin.as_rx();
1308 let rx_pin: Peri<'a, AnyPin> = rx_pin.into();
1309
1310 // Initialize LPUART with TX disabled, RX enabled, no flow control
1311 Lpuart::<Async>::init::<T>(false, true, false, false, config)?;
1312
1313 // Enable dma interrupt
1314 let rx_dma = DmaChannel::new(rx_dma_ch);
1315 rx_dma.enable_interrupt();
1316
1317 Ok(Self {
1318 info: T::info(),
1319 _rx_pin: rx_pin,
1320 rx_dma,
1321 _instance: core::marker::PhantomData,
1322 })
1323 }
1324
1325 /// Read data using DMA.
1326 ///
1327 /// This configures the DMA channel for a peripheral-to-memory transfer
1328 /// and waits for completion asynchronously. Large buffers are automatically
1329 /// split into chunks that fit within the DMA transfer limit.
1330 ///
1331 /// The DMA request source is automatically derived from the LPUART instance type.
1332 ///
1333 /// # Safety
1334 ///
1335 /// If the returned future is dropped before completion (e.g., due to a timeout),
1336 /// the DMA transfer is automatically aborted to prevent use-after-free.
1337 ///
1338 /// # Arguments
1339 /// * `buf` - Buffer to receive data into
1340 pub async fn read_dma(&mut self, buf: &mut [u8]) -> Result<usize> {
1341 if buf.is_empty() {
1342 return Ok(0);
1343 }
1344
1345 let mut total = 0;
1346 for chunk in buf.chunks_mut(DMA_MAX_TRANSFER_SIZE) {
1347 total += self.read_dma_inner(chunk).await?;
1348 }
1349
1350 Ok(total)
1351 }
1352
1353 /// Internal helper to read a single chunk (max 0x7FFF bytes) using DMA.
1354 async fn read_dma_inner(&mut self, buf: &mut [u8]) -> Result<usize> {
1355 let len = buf.len();
1356 let peri_addr = self.info.regs.data().as_ptr() as *const u8;
1357
1358 unsafe {
1359 // Clean up channel state
1360 self.rx_dma.disable_request();
1361 self.rx_dma.clear_done();
1362 self.rx_dma.clear_interrupt();
1363
1364 // Set DMA request source from instance type (type-safe)
1365 self.rx_dma.set_request_source::<T::RxDmaRequest>();
1366
1367 // Configure TCD for peripheral-to-memory transfer
1368 self.rx_dma
1369 .setup_read_from_peripheral(peri_addr, buf, EnableInterrupt::Yes);
1370
1371 // Enable UART RX DMA request
1372 self.info.regs.baud().modify(|_, w| w.rdmae().enabled());
1373
1374 // Enable DMA channel request
1375 self.rx_dma.enable_request();
1376 }
1377
1378 // Create guard that will abort DMA if this future is dropped
1379 let guard = RxDmaGuard::new(&self.rx_dma, self.info.regs);
1380
1381 // Wait for completion asynchronously
1382 core::future::poll_fn(|cx| {
1383 self.rx_dma.waker().register(cx.waker());
1384 if self.rx_dma.is_done() {
1385 core::task::Poll::Ready(())
1386 } else {
1387 core::task::Poll::Pending
1388 }
1389 })
1390 .await;
1391
1392 // Transfer completed successfully - clean up without aborting
1393 guard.complete();
1394
1395 Ok(len)
1396 }
1397
1398 /// Blocking read (fallback when DMA is not needed)
1399 pub fn blocking_read(&mut self, buf: &mut [u8]) -> Result<()> {
1400 for byte in buf.iter_mut() {
1401 loop {
1402 if has_data(self.info.regs) {
1403 *byte = (self.info.regs.data().read().bits() & 0xFF) as u8;
1404 break;
1405 }
1406 check_and_clear_rx_errors(self.info.regs)?;
1407 }
1408 }
1409 Ok(())
1410 }
1411
1412 pub fn into_ring_dma_rx<'buf>(self, buf: &'buf mut [u8]) -> LpuartRxRingDma<'a, 'buf, T, C> {
1413 unsafe {
1414 let ring = self.setup_ring_buffer(buf);
1415 self.enable_dma_request();
1416 LpuartRxRingDma { _inner: self, ring }
1417 }
1418 }
1419
1420 /// Set up a ring buffer for continuous DMA reception.
1421 ///
1422 /// This configures the DMA channel for circular operation, enabling continuous
1423 /// reception of data without gaps. The DMA will continuously write received
1424 /// bytes into the buffer, wrapping around when it reaches the end.
1425 ///
1426 /// This method encapsulates all the low-level setup:
1427 /// - Configures the DMA request source for this LPUART instance
1428 /// - Enables the RX DMA request in the LPUART peripheral
1429 /// - Sets up the circular DMA transfer
1430 /// - Enables the NVIC interrupt for async wakeups
1431 ///
1432 /// # Arguments
1433 ///
1434 /// * `buf` - Destination buffer for received data (power-of-2 size is ideal for efficiency)
1435 ///
1436 /// # Returns
1437 ///
1438 /// A [`RingBuffer`] that can be used to asynchronously read received data.
1439 ///
1440 /// # Example
1441 ///
1442 /// ```no_run
1443 /// static mut RX_BUF: [u8; 64] = [0; 64];
1444 ///
1445 /// let rx = LpuartRxDma::new(p.LPUART2, p.P2_3, p.DMA_CH0, config).unwrap();
1446 /// let ring_buf = unsafe { rx.setup_ring_buffer(&mut RX_BUF) };
1447 ///
1448 /// // Read data as it arrives
1449 /// let mut buf = [0u8; 16];
1450 /// let n = ring_buf.read(&mut buf).await.unwrap();
1451 /// ```
1452 ///
1453 /// # Safety
1454 ///
1455 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
1456 /// - Only one RingBuffer should exist per LPUART RX channel at a time.
1457 /// - The caller must ensure the static buffer is not accessed elsewhere while
1458 /// the ring buffer is active.
1459 unsafe fn setup_ring_buffer<'b>(&self, buf: &'b mut [u8]) -> RingBuffer<'b, u8> {
1460 // Get the peripheral data register address
1461 let peri_addr = self.info.regs.data().as_ptr() as *const u8;
1462
1463 // Configure DMA request source for this LPUART instance (type-safe)
1464 self.rx_dma.set_request_source::<T::RxDmaRequest>();
1465
1466 // Enable RX DMA request in the LPUART peripheral
1467 self.info.regs.baud().modify(|_, w| w.rdmae().enabled());
1468
1469 // Set up circular DMA transfer (this also enables NVIC interrupt)
1470 self.rx_dma.setup_circular_read(peri_addr, buf)
1471 }
1472
1473 /// Enable the DMA channel request.
1474 ///
1475 /// Call this after `setup_ring_buffer()` to start continuous reception.
1476 /// This is separated from setup to allow for any additional configuration
1477 /// before starting the transfer.
1478 unsafe fn enable_dma_request(&self) {
1479 self.rx_dma.enable_request();
1480 }
1481}
1482
1483impl<'peri, 'buf, T: Instance, C: DmaChannelTrait> LpuartRxRingDma<'peri, 'buf, T, C> {
1484 /// Read from the ring buffer
1485 pub fn read<'d>(
1486 &mut self,
1487 dst: &'d mut [u8],
1488 ) -> impl Future<Output = core::result::Result<usize, crate::dma::Error>> + use<'_, 'buf, 'd, T, C> {
1489 self.ring.read(dst)
1490 }
1491
1492 /// Clear the current contents of the ring buffer
1493 pub fn clear(&mut self) {
1494 self.ring.clear();
1495 }
1496}
1497
1498impl<'a, T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> LpuartDma<'a, T, TxC, RxC> {
1499 /// Create a new LPUART driver with DMA support for both TX and RX.
1500 pub fn new(
1501 _inner: Peri<'a, T>,
1502 tx_pin: Peri<'a, impl TxPin<T>>,
1503 rx_pin: Peri<'a, impl RxPin<T>>,
1504 tx_dma_ch: Peri<'a, TxC>,
1505 rx_dma_ch: Peri<'a, RxC>,
1506 config: Config,
1507 ) -> Result<Self> {
1508 tx_pin.as_tx();
1509 rx_pin.as_rx();
1510
1511 let tx_pin: Peri<'a, AnyPin> = tx_pin.into();
1512 let rx_pin: Peri<'a, AnyPin> = rx_pin.into();
1513
1514 // Initialize LPUART with both TX and RX enabled, no flow control
1515 Lpuart::<Async>::init::<T>(true, true, false, false, config)?;
1516
1517 // Enable DMA interrupts
1518 let tx_dma = DmaChannel::new(tx_dma_ch);
1519 let rx_dma = DmaChannel::new(rx_dma_ch);
1520 tx_dma.enable_interrupt();
1521 rx_dma.enable_interrupt();
1522
1523 Ok(Self {
1524 tx: LpuartTxDma {
1525 info: T::info(),
1526 _tx_pin: tx_pin,
1527 tx_dma,
1528 _instance: core::marker::PhantomData,
1529 },
1530 rx: LpuartRxDma {
1531 info: T::info(),
1532 _rx_pin: rx_pin,
1533 rx_dma,
1534 _instance: core::marker::PhantomData,
1535 },
1536 })
1537 }
1538
1539 /// Split into separate TX and RX drivers
1540 pub fn split(self) -> (LpuartTxDma<'a, T, TxC>, LpuartRxDma<'a, T, RxC>) {
1541 (self.tx, self.rx)
1542 }
1543
1544 /// Write data using DMA
1545 pub async fn write_dma(&mut self, buf: &[u8]) -> Result<usize> {
1546 self.tx.write_dma(buf).await
1547 }
1548
1549 /// Read data using DMA
1550 pub async fn read_dma(&mut self, buf: &mut [u8]) -> Result<usize> {
1551 self.rx.read_dma(buf).await
1552 }
1553}
1554
1555// ============================================================================
1556// EMBEDDED-IO-ASYNC TRAIT IMPLEMENTATIONS
1557// ============================================================================
1558
1559impl<T: Instance, C: DmaChannelTrait> embedded_io::ErrorType for LpuartTxDma<'_, T, C> {
1560 type Error = Error;
1561}
1562
1563impl<T: Instance, C: DmaChannelTrait> embedded_io::ErrorType for LpuartRxDma<'_, T, C> {
1564 type Error = Error;
1565}
1566
1567impl<T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> embedded_io::ErrorType for LpuartDma<'_, T, TxC, RxC> {
1568 type Error = Error;
1569}
1085 1570
1086// ============================================================================ 1571// ============================================================================
1087// EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS 1572// EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS
@@ -1221,6 +1706,12 @@ impl embedded_hal_nb::serial::Write for LpuartTx<'_, Blocking> {
1221 } 1706 }
1222} 1707}
1223 1708
1709impl core::fmt::Write for LpuartTx<'_, Blocking> {
1710 fn write_str(&mut self, s: &str) -> core::fmt::Result {
1711 self.blocking_write(s.as_bytes()).map_err(|_| core::fmt::Error)
1712 }
1713}
1714
1224impl embedded_hal_nb::serial::Read for Lpuart<'_, Blocking> { 1715impl embedded_hal_nb::serial::Read for Lpuart<'_, Blocking> {
1225 fn read(&mut self) -> nb::Result<u8, Self::Error> { 1716 fn read(&mut self) -> nb::Result<u8, Self::Error> {
1226 embedded_hal_nb::serial::Read::read(&mut self.rx) 1717 embedded_hal_nb::serial::Read::read(&mut self.rx)
diff --git a/embassy-mcxa/src/pins.rs b/embassy-mcxa/src/pins.rs
index fdf1b0a86..9adbe64c8 100644
--- a/embassy-mcxa/src/pins.rs
+++ b/embassy-mcxa/src/pins.rs
@@ -1,6 +1,11 @@
1//! Pin configuration helpers (separate from peripheral drivers). 1//! Pin configuration helpers (separate from peripheral drivers).
2use crate::pac; 2use crate::pac;
3 3
4/// Configure pins for ADC usage.
5///
6/// # Safety
7///
8/// Must be called after PORT clocks are enabled.
4pub unsafe fn configure_adc_pins() { 9pub unsafe fn configure_adc_pins() {
5 // P1_10 = ADC1_A8 10 // P1_10 = ADC1_A8
6 let port1 = &*pac::Port1::ptr(); 11 let port1 = &*pac::Port1::ptr();
diff --git a/examples/mcxa/Cargo.toml b/examples/mcxa/Cargo.toml
index 19d8d8657..d07cc4272 100644
--- a/examples/mcxa/Cargo.toml
+++ b/examples/mcxa/Cargo.toml
@@ -21,6 +21,7 @@ embassy-time-driver = "0.2.1"
21embedded-io-async = "0.6.1" 21embedded-io-async = "0.6.1"
22heapless = "0.9.2" 22heapless = "0.9.2"
23panic-probe = { version = "1.0", features = ["print-defmt"] } 23panic-probe = { version = "1.0", features = ["print-defmt"] }
24static_cell = "2.1.1"
24tmp108 = "0.4.0" 25tmp108 = "0.4.0"
25 26
26[profile.release] 27[profile.release]
diff --git a/examples/mcxa/src/bin/dma_mem_to_mem.rs b/examples/mcxa/src/bin/dma_mem_to_mem.rs
new file mode 100644
index 000000000..b38baccb5
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_mem_to_mem.rs
@@ -0,0 +1,118 @@
1//! DMA memory-to-memory transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA to copy data between memory buffers
4//! using the Embassy-style async API with type-safe transfers.
5//!
6//! # Embassy-style features demonstrated:
7//! - `TransferOptions` for configuration
8//! - Type-safe `mem_to_mem<u32>()` method with async `.await`
9//! - `Transfer` Future that can be `.await`ed
10//! - `Word` trait for automatic transfer width detection
11//! - `memset()` method for filling memory with a pattern
12
13#![no_std]
14#![no_main]
15
16use embassy_executor::Spawner;
17use embassy_mcxa::clocks::config::Div8;
18use embassy_mcxa::dma::{DmaChannel, TransferOptions};
19use static_cell::ConstStaticCell;
20use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
21
22const BUFFER_LENGTH: usize = 4;
23
24// Buffers in RAM (static mut is automatically placed in .bss/.data)
25static SRC_BUFFER: ConstStaticCell<[u32; BUFFER_LENGTH]> = ConstStaticCell::new([1, 2, 3, 4]);
26static DEST_BUFFER: ConstStaticCell<[u32; BUFFER_LENGTH]> = ConstStaticCell::new([0; BUFFER_LENGTH]);
27static MEMSET_BUFFER: ConstStaticCell<[u32; BUFFER_LENGTH]> = ConstStaticCell::new([0; BUFFER_LENGTH]);
28
29#[embassy_executor::main]
30async fn main(_spawner: Spawner) {
31 // Small delay to allow probe-rs to attach after reset
32 for _ in 0..100_000 {
33 cortex_m::asm::nop();
34 }
35
36 let mut cfg = hal::config::Config::default();
37 cfg.clock_cfg.sirc.fro_12m_enabled = true;
38 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
39 let p = hal::init(cfg);
40
41 defmt::info!("DMA memory-to-memory example starting...");
42
43 defmt::info!("EDMA memory to memory example begin.");
44
45 let src = SRC_BUFFER.take();
46 let dst = DEST_BUFFER.take();
47 let mst = MEMSET_BUFFER.take();
48
49 defmt::info!("Source Buffer: {=[?]}", src.as_slice());
50 defmt::info!("Destination Buffer (before): {=[?]}", dst.as_slice());
51 defmt::info!("Configuring DMA with Embassy-style API...");
52
53 // Create DMA channel
54 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
55
56 // Configure transfer options (Embassy-style)
57 // TransferOptions defaults to: complete_transfer_interrupt = true
58 let options = TransferOptions::default();
59
60 // =========================================================================
61 // Part 1: Embassy-style async API demonstration (mem_to_mem)
62 // =========================================================================
63 //
64 // Use the new type-safe `mem_to_mem<u32>()` method:
65 // - Automatically determines transfer width from buffer element type (u32)
66 // - Returns a `Transfer` future that can be `.await`ed
67 // - Uses TransferOptions for consistent configuration
68 //
69 // Using async `.await` - the executor can run other tasks while waiting!
70
71 // Perform type-safe memory-to-memory transfer using Embassy-style async API
72 // Using async `.await` - the executor can run other tasks while waiting!
73 let transfer = dma_ch0.mem_to_mem(src, dst, options).unwrap();
74 transfer.await.unwrap();
75
76 defmt::info!("DMA mem-to-mem transfer complete!");
77 defmt::info!("Destination Buffer (after): {=[?]}", dst.as_slice());
78
79 // Verify data
80 if src != dst {
81 defmt::error!("FAIL: mem_to_mem mismatch!");
82 } else {
83 defmt::info!("PASS: mem_to_mem verified.");
84 }
85
86 // =========================================================================
87 // Part 2: memset() demonstration
88 // =========================================================================
89 //
90 // The `memset()` method fills a buffer with a pattern value:
91 // - Fixed source address (pattern is read repeatedly)
92 // - Incrementing destination address
93 // - Uses the same Transfer future pattern
94
95 defmt::info!("--- Demonstrating memset() feature ---");
96
97 defmt::info!("Memset Buffer (before): {=[?]}", mst.as_slice());
98
99 // Fill buffer with a pattern value using DMA memset
100 let pattern: u32 = 0xDEADBEEF;
101 defmt::info!("Filling with pattern 0xDEADBEEF...");
102
103 // Using blocking_wait() for demonstration - also shows non-async usage
104 let transfer = dma_ch0.memset(&pattern, mst, options);
105 transfer.blocking_wait();
106
107 defmt::info!("DMA memset complete!");
108 defmt::info!("Memset Buffer (after): {=[?]}", mst.as_slice());
109
110 // Verify memset result
111 if !mst.iter().all(|&v| v == pattern) {
112 defmt::error!("FAIL: memset mismatch!");
113 } else {
114 defmt::info!("PASS: memset verified.");
115 }
116
117 defmt::info!("=== All DMA tests complete ===");
118}
diff --git a/examples/mcxa/src/bin/dma_scatter_gather_builder.rs b/examples/mcxa/src/bin/dma_scatter_gather_builder.rs
new file mode 100644
index 000000000..30ce20c96
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_scatter_gather_builder.rs
@@ -0,0 +1,130 @@
1//! DMA Scatter-Gather Builder example for MCXA276.
2//!
3//! This example demonstrates using the new `ScatterGatherBuilder` API for
4//! chaining multiple DMA transfers with a type-safe builder pattern.
5//!
6//! # Features demonstrated:
7//! - `ScatterGatherBuilder::new()` for creating a builder
8//! - `add_transfer()` for adding memory-to-memory segments
9//! - `build()` to start the chained transfer
10//! - Automatic TCD linking and ESG bit management
11//!
12//! # Comparison with manual scatter-gather:
13//! The manual approach (see `dma_scatter_gather.rs`) requires:
14//! - Manual TCD pool allocation and alignment
15//! - Manual CSR/ESG/INTMAJOR bit manipulation
16//! - Manual dlast_sga address calculations
17//!
18//! The builder approach handles all of this automatically!
19
20#![no_std]
21#![no_main]
22
23use embassy_executor::Spawner;
24use embassy_mcxa::clocks::config::Div8;
25use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
26use static_cell::ConstStaticCell;
27use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
28
29// Source buffers (multiple segments)
30static SRC1: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0x11111111, 0x22222222, 0x33333333, 0x44444444]);
31static SRC2: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0xAAAAAAAA, 0xBBBBBBBB, 0xCCCCCCCC, 0xDDDDDDDD]);
32static SRC3: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0x12345678, 0x9ABCDEF0, 0xFEDCBA98, 0x76543210]);
33
34// Destination buffers (one per segment)
35static DST1: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0; 4]);
36static DST2: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0; 4]);
37static DST3: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0; 4]);
38
39#[embassy_executor::main]
40async fn main(_spawner: Spawner) {
41 // Small delay to allow probe-rs to attach after reset
42 for _ in 0..100_000 {
43 cortex_m::asm::nop();
44 }
45
46 let mut cfg = hal::config::Config::default();
47 cfg.clock_cfg.sirc.fro_12m_enabled = true;
48 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
49 let p = hal::init(cfg);
50
51 defmt::info!("DMA Scatter-Gather Builder example starting...");
52
53 defmt::info!("DMA Scatter-Gather Builder Example");
54 defmt::info!("===================================");
55 let src1 = SRC1.take();
56 let src2 = SRC2.take();
57 let src3 = SRC3.take();
58 let dst1 = DST1.take();
59 let dst2 = DST2.take();
60 let dst3 = DST3.take();
61
62 // Show source buffers
63 defmt::info!("Source buffers:");
64 defmt::info!(" SRC1: {=[?]}", src1.as_slice());
65 defmt::info!(" SRC2: {=[?]}", src2.as_slice());
66 defmt::info!(" SRC3: {=[?]}", src3.as_slice());
67
68 defmt::info!("Destination buffers (before):");
69 defmt::info!(" DST1: {=[?]}", dst1.as_slice());
70 defmt::info!(" DST2: {=[?]}", dst2.as_slice());
71 defmt::info!(" DST3: {=[?]}", dst3.as_slice());
72
73 // Create DMA channel
74 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
75
76 defmt::info!("Building scatter-gather chain with builder API...");
77
78 // =========================================================================
79 // ScatterGatherBuilder API demonstration
80 // =========================================================================
81 //
82 // The builder pattern makes scatter-gather transfers much easier:
83 // 1. Create a builder
84 // 2. Add transfer segments with add_transfer()
85 // 3. Call build() to start the entire chain
86 // No manual TCD manipulation required!
87
88 let mut builder = ScatterGatherBuilder::<u32>::new();
89
90 // Add three transfer segments - the builder handles TCD linking automatically
91 builder.add_transfer(src1, dst1);
92 builder.add_transfer(src2, dst2);
93 builder.add_transfer(src3, dst3);
94
95 defmt::info!("Added 3 transfer segments to chain.");
96 defmt::info!("Starting scatter-gather transfer with .await...");
97
98 // Build and execute the scatter-gather chain
99 // The build() method:
100 // - Links all TCDs together with ESG bit
101 // - Sets INTMAJOR on all TCDs
102 // - Loads the first TCD into hardware
103 // - Returns a Transfer future
104 let transfer = builder.build(&dma_ch0).expect("Failed to build scatter-gather");
105 transfer.blocking_wait();
106
107 defmt::info!("Scatter-gather transfer complete!");
108
109 // Show results
110 defmt::info!("Destination buffers (after):");
111 defmt::info!(" DST1: {=[?]}", dst1.as_slice());
112 defmt::info!(" DST2: {=[?]}", dst2.as_slice());
113 defmt::info!(" DST3: {=[?]}", dst3.as_slice());
114
115 let comps = [(src1, dst1), (src2, dst2), (src3, dst3)];
116
117 // Verify all three segments
118 let mut all_ok = true;
119 for (src, dst) in comps {
120 all_ok &= src == dst;
121 }
122
123 if all_ok {
124 defmt::info!("PASS: All segments verified!");
125 } else {
126 defmt::error!("FAIL: Mismatch detected!");
127 }
128
129 defmt::info!("=== Scatter-Gather Builder example complete ===");
130}
diff --git a/examples/mcxa/src/bin/dma_wrap_transfer.rs b/examples/mcxa/src/bin/dma_wrap_transfer.rs
new file mode 100644
index 000000000..acfd29f08
--- /dev/null
+++ b/examples/mcxa/src/bin/dma_wrap_transfer.rs
@@ -0,0 +1,184 @@
1//! DMA wrap transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA with modulo addressing to wrap around
4//! a source buffer, effectively repeating the source data in the destination.
5//!
6//! # Embassy-style features demonstrated:
7//! - `DmaChannel::is_done()` and `clear_done()` helper methods
8//! - No need to pass register block around
9
10#![no_std]
11#![no_main]
12
13use core::fmt::Write as _;
14
15use embassy_executor::Spawner;
16use embassy_mcxa::clocks::config::Div8;
17use embassy_mcxa::dma::DmaChannel;
18use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
19use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
20
21// Source buffer: 4 words (16 bytes), aligned to 16 bytes for modulo
22#[repr(align(16))]
23struct AlignedSrc([u32; 4]);
24
25static mut SRC: AlignedSrc = AlignedSrc([0; 4]);
26static mut DST: [u32; 8] = [0; 8];
27
28/// Helper to print a buffer to UART
29fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
30 write!(tx, "{:?}", unsafe { core::slice::from_raw_parts(buf_ptr, len) }).ok();
31}
32
33#[embassy_executor::main]
34async fn main(_spawner: Spawner) {
35 // Small delay to allow probe-rs to attach after reset
36 for _ in 0..100_000 {
37 cortex_m::asm::nop();
38 }
39
40 let mut cfg = hal::config::Config::default();
41 cfg.clock_cfg.sirc.fro_12m_enabled = true;
42 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
43 let p = hal::init(cfg);
44
45 defmt::info!("DMA wrap transfer example starting...");
46
47 let config = Config {
48 baudrate_bps: 115_200,
49 ..Default::default()
50 };
51
52 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
53 let (mut tx, _rx) = lpuart.split();
54
55 tx.blocking_write(b"EDMA wrap transfer example begin.\r\n\r\n").unwrap();
56
57 // Initialize buffers
58 unsafe {
59 SRC.0 = [1, 2, 3, 4];
60 DST = [0; 8];
61 }
62
63 tx.blocking_write(b"Source Buffer: ").unwrap();
64 print_buffer(&mut tx, unsafe { core::ptr::addr_of!(SRC.0) } as *const u32, 4);
65 tx.blocking_write(b"\r\n").unwrap();
66
67 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
68 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
69 tx.blocking_write(b"\r\n").unwrap();
70
71 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
72 .unwrap();
73
74 // Create DMA channel using Embassy-style API
75 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
76
77 // Configure wrap transfer using direct TCD access:
78 // SRC is 16 bytes (4 * u32). We want to transfer 32 bytes (8 * u32).
79 // SRC modulo is 16 bytes (2^4 = 16) - wraps source address.
80 // DST modulo is 0 (disabled).
81 // This causes the source address to wrap around after 16 bytes,
82 // effectively repeating the source data.
83 unsafe {
84 let t = dma_ch0.tcd();
85
86 // Reset channel state
87 t.ch_csr().write(|w| {
88 w.erq()
89 .disable()
90 .earq()
91 .disable()
92 .eei()
93 .no_error()
94 .ebw()
95 .disable()
96 .done()
97 .clear_bit_by_one()
98 });
99 t.ch_es().write(|w| w.bits(0));
100 t.ch_int().write(|w| w.int().clear_bit_by_one());
101
102 // Source/destination addresses
103 t.tcd_saddr()
104 .write(|w| w.saddr().bits(core::ptr::addr_of!(SRC.0) as u32));
105 t.tcd_daddr()
106 .write(|w| w.daddr().bits(core::ptr::addr_of_mut!(DST) as u32));
107
108 // Offsets: both increment by 4 bytes
109 t.tcd_soff().write(|w| w.soff().bits(4));
110 t.tcd_doff().write(|w| w.doff().bits(4));
111
112 // Attributes: 32-bit transfers (size = 2)
113 // SMOD = 4 (2^4 = 16 byte modulo for source), DMOD = 0 (disabled)
114 t.tcd_attr().write(|w| {
115 w.ssize()
116 .bits(2)
117 .dsize()
118 .bits(2)
119 .smod()
120 .bits(4) // Source modulo: 2^4 = 16 bytes
121 .dmod()
122 .bits(0) // Dest modulo: disabled
123 });
124
125 // Transfer 32 bytes total in one minor loop
126 let nbytes = 32u32;
127 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
128
129 // Source wraps via modulo, no adjustment needed
130 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
131 // Reset dest address after major loop
132 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(-(nbytes as i32) as u32));
133
134 // Major loop count = 1
135 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
136 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
137
138 // Enable interrupt on major loop completion
139 t.tcd_csr().write(|w| w.intmajor().set_bit());
140
141 cortex_m::asm::dsb();
142
143 tx.blocking_write(b"Triggering transfer...\r\n").unwrap();
144 dma_ch0.trigger_start();
145 }
146
147 // Wait for completion using channel helper method
148 while !dma_ch0.is_done() {
149 cortex_m::asm::nop();
150 }
151 unsafe {
152 dma_ch0.clear_done();
153 }
154
155 tx.blocking_write(b"\r\nEDMA wrap transfer example finish.\r\n\r\n")
156 .unwrap();
157 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
158 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
159 tx.blocking_write(b"\r\n\r\n").unwrap();
160
161 // Verify: DST should be [1, 2, 3, 4, 1, 2, 3, 4]
162 let expected = [1u32, 2, 3, 4, 1, 2, 3, 4];
163 let mut mismatch = false;
164 unsafe {
165 for i in 0..8 {
166 if DST[i] != expected[i] {
167 mismatch = true;
168 break;
169 }
170 }
171 }
172
173 if mismatch {
174 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
175 defmt::error!("FAIL: Mismatch detected!");
176 } else {
177 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
178 defmt::info!("PASS: Data verified.");
179 }
180
181 loop {
182 cortex_m::asm::wfe();
183 }
184}
diff --git a/examples/mcxa/src/bin/lpuart_dma.rs b/examples/mcxa/src/bin/lpuart_dma.rs
new file mode 100644
index 000000000..cc86f6a40
--- /dev/null
+++ b/examples/mcxa/src/bin/lpuart_dma.rs
@@ -0,0 +1,68 @@
1//! LPUART DMA example for MCXA276.
2//!
3//! This example demonstrates using DMA for UART TX and RX operations.
4//! It sends a message using DMA, then waits for 16 characters to be received
5//! via DMA and echoes them back.
6//!
7//! The DMA request sources are automatically derived from the LPUART instance type.
8//! DMA clock/reset/init is handled automatically by the HAL.
9
10#![no_std]
11#![no_main]
12
13use embassy_executor::Spawner;
14use embassy_mcxa::clocks::config::Div8;
15use embassy_mcxa::lpuart::{Config, LpuartDma};
16use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
17
18#[embassy_executor::main]
19async fn main(_spawner: Spawner) {
20 let mut cfg = hal::config::Config::default();
21 cfg.clock_cfg.sirc.fro_12m_enabled = true;
22 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
23 let p = hal::init(cfg);
24
25 defmt::info!("LPUART DMA example starting...");
26
27 // Create UART configuration
28 let config = Config {
29 baudrate_bps: 115_200,
30 ..Default::default()
31 };
32
33 // Create UART instance with DMA channels
34 let mut lpuart = LpuartDma::new(
35 p.LPUART2, // Instance
36 p.P2_2, // TX pin
37 p.P2_3, // RX pin
38 p.DMA_CH0, // TX DMA channel
39 p.DMA_CH1, // RX DMA channel
40 config,
41 )
42 .unwrap();
43
44 // Send a message using DMA (DMA request source is automatically derived from LPUART2)
45 let tx_msg = b"Hello from LPUART2 DMA TX!\r\n";
46 lpuart.write_dma(tx_msg).await.unwrap();
47
48 defmt::info!("TX DMA complete");
49
50 // Send prompt
51 let prompt = b"Type 16 characters to echo via DMA:\r\n";
52 lpuart.write_dma(prompt).await.unwrap();
53
54 // Receive 16 characters using DMA
55 let mut rx_buf = [0u8; 16];
56 lpuart.read_dma(&mut rx_buf).await.unwrap();
57
58 defmt::info!("RX DMA complete");
59
60 // Echo back the received data
61 let echo_prefix = b"\r\nReceived: ";
62 lpuart.write_dma(echo_prefix).await.unwrap();
63 lpuart.write_dma(&rx_buf).await.unwrap();
64 let done_msg = b"\r\nDone!\r\n";
65 lpuart.write_dma(done_msg).await.unwrap();
66
67 defmt::info!("Example complete");
68}
diff --git a/examples/mcxa/src/bin/lpuart_ring_buffer.rs b/examples/mcxa/src/bin/lpuart_ring_buffer.rs
new file mode 100644
index 000000000..be7fd4534
--- /dev/null
+++ b/examples/mcxa/src/bin/lpuart_ring_buffer.rs
@@ -0,0 +1,115 @@
1//! LPUART Ring Buffer DMA example for MCXA276.
2//!
3//! This example demonstrates using the high-level `LpuartRxDma::setup_ring_buffer()`
4//! API for continuous circular DMA reception from a UART peripheral.
5//!
6//! # Features demonstrated:
7//! - `LpuartRxDma::setup_ring_buffer()` for continuous peripheral-to-memory DMA
8//! - `RingBuffer` for async reading of received data
9//! - Handling of potential overrun conditions
10//! - Half-transfer and complete-transfer interrupts for timely wakeups
11//!
12//! # How it works:
13//! 1. Create an `LpuartRxDma` driver with a DMA channel
14//! 2. Call `setup_ring_buffer()` which handles all low-level DMA configuration
15//! 3. Application asynchronously reads data as it arrives via `ring_buf.read()`
16//! 4. Both half-transfer and complete-transfer interrupts wake the reader
17
18#![no_std]
19#![no_main]
20
21use embassy_executor::Spawner;
22use embassy_mcxa::clocks::config::Div8;
23use embassy_mcxa::lpuart::{Config, LpuartDma, LpuartTxDma};
24use static_cell::ConstStaticCell;
25use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
26
27// Ring buffer for RX - power of 2 is ideal for modulo efficiency
28static RX_RING_BUFFER: ConstStaticCell<[u8; 64]> = ConstStaticCell::new([0; 64]);
29
30/// Helper to write a byte as hex to UART
31fn write_hex<T: embassy_mcxa::lpuart::Instance, C: embassy_mcxa::dma::Channel>(
32 tx: &mut LpuartTxDma<'_, T, C>,
33 byte: u8,
34) {
35 const HEX: &[u8; 16] = b"0123456789ABCDEF";
36 let buf = [HEX[(byte >> 4) as usize], HEX[(byte & 0x0F) as usize]];
37 tx.blocking_write(&buf).ok();
38}
39
40#[embassy_executor::main]
41async fn main(_spawner: Spawner) {
42 // Small delay to allow probe-rs to attach after reset
43 for _ in 0..100_000 {
44 cortex_m::asm::nop();
45 }
46
47 let mut cfg = hal::config::Config::default();
48 cfg.clock_cfg.sirc.fro_12m_enabled = true;
49 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
50 let p = hal::init(cfg);
51
52 defmt::info!("LPUART Ring Buffer DMA example starting...");
53
54 // Create UART configuration
55 let config = Config {
56 baudrate_bps: 115_200,
57 ..Default::default()
58 };
59
60 // Create LPUART with DMA support for both TX and RX, then split
61 // This is the proper Embassy pattern - create once, split into TX and RX
62 let lpuart = LpuartDma::new(p.LPUART2, p.P2_2, p.P2_3, p.DMA_CH1, p.DMA_CH0, config).unwrap();
63 let (mut tx, rx) = lpuart.split();
64
65 tx.blocking_write(b"LPUART Ring Buffer DMA Example\r\n").unwrap();
66 tx.blocking_write(b"==============================\r\n\r\n").unwrap();
67
68 tx.blocking_write(b"Setting up circular DMA for UART RX...\r\n")
69 .unwrap();
70
71 let buf = RX_RING_BUFFER.take();
72 // Set up the ring buffer with circular DMA
73 let mut ring_buf = rx.into_ring_dma_rx(buf);
74
75 tx.blocking_write(b"Ring buffer ready! Type characters to see them echoed.\r\n")
76 .unwrap();
77 tx.blocking_write(b"The DMA continuously receives in the background.\r\n\r\n")
78 .unwrap();
79
80 // Main loop: read from ring buffer and echo back
81 let mut read_buf = [0u8; 16];
82 let mut total_received: usize = 0;
83
84 loop {
85 // Async read - waits until data is available
86 match ring_buf.read(&mut read_buf).await {
87 Ok(n) if n > 0 => {
88 total_received += n;
89
90 // Echo back what we received
91 tx.blocking_write(b"RX[").unwrap();
92 for (i, &byte) in read_buf.iter().enumerate().take(n) {
93 write_hex(&mut tx, byte);
94 if i < n - 1 {
95 tx.blocking_write(b" ").unwrap();
96 }
97 }
98 tx.blocking_write(b"]: ").unwrap();
99 tx.blocking_write(&read_buf[..n]).unwrap();
100 tx.blocking_write(b"\r\n").unwrap();
101
102 defmt::info!("Received {} bytes, total: {}", n, total_received);
103 }
104 Ok(_) => {
105 // No data, shouldn't happen with async read
106 }
107 Err(_) => {
108 // Overrun detected
109 tx.blocking_write(b"ERROR: Ring buffer overrun!\r\n").unwrap();
110 defmt::error!("Ring buffer overrun!");
111 ring_buf.clear();
112 }
113 }
114 }
115}
diff --git a/examples/mcxa/src/bin/raw_dma_channel_link.rs b/examples/mcxa/src/bin/raw_dma_channel_link.rs
new file mode 100644
index 000000000..74785e4f3
--- /dev/null
+++ b/examples/mcxa/src/bin/raw_dma_channel_link.rs
@@ -0,0 +1,278 @@
1//! DMA channel linking example for MCXA276.
2//!
3//! NOTE: this is a "raw dma" example! It exists as a proof of concept, as we don't have
4//! a high-level and safe API for. It should not be taken as typical, recommended, or
5//! stable usage!
6//!
7//! This example demonstrates DMA channel linking (minor and major loop linking):
8//! - Channel 0: Transfers SRC_BUFFER to DEST_BUFFER0, with:
9//! - Minor Link to Channel 1 (triggers CH1 after each minor loop)
10//! - Major Link to Channel 2 (triggers CH2 after major loop completes)
11//! - Channel 1: Transfers SRC_BUFFER to DEST_BUFFER1 (triggered by CH0 minor link)
12//! - Channel 2: Transfers SRC_BUFFER to DEST_BUFFER2 (triggered by CH0 major link)
13//!
14//! # Embassy-style features demonstrated:
15//! - `DmaChannel::new()` for channel creation
16//! - `DmaChannel::is_done()` and `clear_done()` helper methods
17//! - Channel linking with `set_minor_link()` and `set_major_link()`
18
19#![no_std]
20#![no_main]
21
22use embassy_executor::Spawner;
23use embassy_mcxa::clocks::config::Div8;
24use embassy_mcxa::dma::DmaChannel;
25use embassy_mcxa::pac;
26use static_cell::ConstStaticCell;
27use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
28
29// Buffers
30static SRC_BUFFER: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([1, 2, 3, 4]);
31static DEST_BUFFER0: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0; 4]);
32static DEST_BUFFER1: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0; 4]);
33static DEST_BUFFER2: ConstStaticCell<[u32; 4]> = ConstStaticCell::new([0; 4]);
34
35#[embassy_executor::main]
36async fn main(_spawner: Spawner) {
37 // Small delay to allow probe-rs to attach after reset
38 for _ in 0..100_000 {
39 cortex_m::asm::nop();
40 }
41
42 let mut cfg = hal::config::Config::default();
43 cfg.clock_cfg.sirc.fro_12m_enabled = true;
44 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
45 let p = hal::init(cfg);
46
47 defmt::info!("DMA channel link example starting...");
48
49 // DMA is initialized during hal::init() - no need to call ensure_init()
50
51 let pac_periphs = unsafe { pac::Peripherals::steal() };
52 let dma0 = &pac_periphs.dma0;
53 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
54
55 // Clear any residual state
56 for i in 0..3 {
57 let t = edma.tcd(i);
58 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
59 t.ch_int().write(|w| w.int().clear_bit_by_one());
60 t.ch_es().write(|w| w.err().clear_bit_by_one());
61 t.ch_mux().write(|w| unsafe { w.bits(0) });
62 }
63
64 // Clear Global Halt/Error state
65 dma0.mp_csr().modify(|_, w| {
66 w.halt()
67 .normal_operation()
68 .hae()
69 .normal_operation()
70 .ecx()
71 .normal_operation()
72 .cx()
73 .normal_operation()
74 });
75
76 defmt::info!("EDMA channel link example begin.");
77
78 // Initialize buffers
79 let src = SRC_BUFFER.take();
80 let dst0 = DEST_BUFFER0.take();
81 let dst1 = DEST_BUFFER1.take();
82 let dst2 = DEST_BUFFER2.take();
83
84 defmt::info!("Source Buffer: {=[?]}", src.as_slice());
85 defmt::info!("DEST0 (before): {=[?]}", dst0.as_slice());
86 defmt::info!("DEST1 (before): {=[?]}", dst1.as_slice());
87 defmt::info!("DEST2 (before): {=[?]}", dst2.as_slice());
88
89 defmt::info!("Configuring DMA channels with Embassy-style API...");
90
91 let ch0 = DmaChannel::new(p.DMA_CH0);
92 let ch1 = DmaChannel::new(p.DMA_CH1);
93 let ch2 = DmaChannel::new(p.DMA_CH2);
94
95 // Configure channels using direct TCD access (advanced feature demo)
96 // This example demonstrates channel linking which requires direct TCD manipulation
97
98 // Helper to configure TCD for memory-to-memory transfer
99 // Parameters: channel, src, dst, width, nbytes (minor loop), count (major loop), interrupt
100 #[allow(clippy::too_many_arguments)]
101 unsafe fn configure_tcd(
102 edma: &embassy_mcxa::pac::edma_0_tcd0::RegisterBlock,
103 ch: usize,
104 src: u32,
105 dst: u32,
106 width: u8,
107 nbytes: u32,
108 count: u16,
109 enable_int: bool,
110 ) {
111 let t = edma.tcd(ch);
112
113 // Reset channel state
114 t.ch_csr().write(|w| {
115 w.erq()
116 .disable()
117 .earq()
118 .disable()
119 .eei()
120 .no_error()
121 .ebw()
122 .disable()
123 .done()
124 .clear_bit_by_one()
125 });
126 t.ch_es().write(|w| w.bits(0));
127 t.ch_int().write(|w| w.int().clear_bit_by_one());
128
129 // Source/destination addresses
130 t.tcd_saddr().write(|w| w.saddr().bits(src));
131 t.tcd_daddr().write(|w| w.daddr().bits(dst));
132
133 // Offsets: increment by width
134 t.tcd_soff().write(|w| w.soff().bits(width as u16));
135 t.tcd_doff().write(|w| w.doff().bits(width as u16));
136
137 // Attributes: size = log2(width)
138 let size = match width {
139 1 => 0,
140 2 => 1,
141 4 => 2,
142 _ => 0,
143 };
144 t.tcd_attr().write(|w| w.ssize().bits(size).dsize().bits(size));
145
146 // Number of bytes per minor loop
147 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
148
149 // Major loop: reset source address after major loop
150 let total_bytes = nbytes * count as u32;
151 t.tcd_slast_sda()
152 .write(|w| w.slast_sda().bits(-(total_bytes as i32) as u32));
153 t.tcd_dlast_sga()
154 .write(|w| w.dlast_sga().bits(-(total_bytes as i32) as u32));
155
156 // Major loop count
157 t.tcd_biter_elinkno().write(|w| w.biter().bits(count));
158 t.tcd_citer_elinkno().write(|w| w.citer().bits(count));
159
160 // Control/status: enable interrupt if requested
161 if enable_int {
162 t.tcd_csr().write(|w| w.intmajor().set_bit());
163 } else {
164 t.tcd_csr().write(|w| w.intmajor().clear_bit());
165 }
166
167 cortex_m::asm::dsb();
168 }
169
170 unsafe {
171 // Channel 0: Transfer 16 bytes total (8 bytes per minor loop, 2 major iterations)
172 // Minor Link -> Channel 1
173 // Major Link -> Channel 2
174 configure_tcd(
175 edma,
176 0,
177 src.as_ptr() as u32,
178 dst0.as_mut_ptr() as u32,
179 4, // src width
180 8, // nbytes (minor loop = 2 words)
181 2, // count (major loop = 2 iterations)
182 false, // no interrupt
183 );
184 ch0.set_minor_link(1); // Link to CH1 after each minor loop
185 ch0.set_major_link(2); // Link to CH2 after major loop
186
187 // Channel 1: Transfer 16 bytes (triggered by CH0 minor link)
188 configure_tcd(
189 edma,
190 1,
191 src.as_ptr() as u32,
192 dst1.as_mut_ptr() as u32,
193 4,
194 16, // full buffer in one minor loop
195 1, // 1 major iteration
196 false,
197 );
198
199 // Channel 2: Transfer 16 bytes (triggered by CH0 major link)
200 configure_tcd(
201 edma,
202 2,
203 src.as_ptr() as u32,
204 dst2.as_mut_ptr() as u32,
205 4,
206 16, // full buffer in one minor loop
207 1, // 1 major iteration
208 true, // enable interrupt
209 );
210 }
211
212 defmt::info!("Triggering Channel 0 (1st minor loop)...");
213
214 // Trigger first minor loop of CH0
215 unsafe {
216 ch0.trigger_start();
217 }
218
219 // Wait for CH1 to complete (triggered by CH0 minor link)
220 while !ch1.is_done() {
221 cortex_m::asm::nop();
222 }
223 unsafe {
224 ch1.clear_done();
225 }
226
227 defmt::info!("CH1 done (via minor link).");
228 defmt::info!("Triggering Channel 0 (2nd minor loop)...");
229
230 // Trigger second minor loop of CH0
231 unsafe {
232 ch0.trigger_start();
233 }
234
235 // Wait for CH0 major loop to complete
236 while !ch0.is_done() {
237 cortex_m::asm::nop();
238 }
239 unsafe {
240 ch0.clear_done();
241 }
242
243 defmt::info!("CH0 major loop done.");
244
245 // Wait for CH2 to complete (triggered by CH0 major link)
246 // Using is_done() instead of AtomicBool - the standard interrupt handler
247 // clears the interrupt flag and wakes wakers, but DONE bit remains set
248 while !ch2.is_done() {
249 cortex_m::asm::nop();
250 }
251 unsafe {
252 ch2.clear_done();
253 }
254
255 defmt::info!("CH2 done (via major link).");
256
257 defmt::info!("EDMA channel link example finish.");
258
259 defmt::info!("DEST0 (after): {=[?]}", dst0.as_slice());
260 defmt::info!("DEST1 (after): {=[?]}", dst1.as_slice());
261 defmt::info!("DEST2 (after): {=[?]}", dst2.as_slice());
262
263 // Verify all buffers match source
264 let mut success = true;
265 for sli in [dst0, dst1, dst2] {
266 success &= sli == src;
267 }
268
269 if success {
270 defmt::info!("PASS: Data verified.");
271 } else {
272 defmt::error!("FAIL: Mismatch detected!");
273 }
274
275 loop {
276 cortex_m::asm::wfe();
277 }
278}
diff --git a/examples/mcxa/src/bin/raw_dma_interleave_transfer.rs b/examples/mcxa/src/bin/raw_dma_interleave_transfer.rs
new file mode 100644
index 000000000..a383b6cf4
--- /dev/null
+++ b/examples/mcxa/src/bin/raw_dma_interleave_transfer.rs
@@ -0,0 +1,141 @@
1//! DMA interleaved transfer example for MCXA276.
2//!
3//! NOTE: this is a "raw dma" example! It exists as a proof of concept, as we don't have
4//! a high-level and safe API for. It should not be taken as typical, recommended, or
5//! stable usage!
6//!
7//! This example demonstrates using DMA with custom source/destination offsets
8//! to interleave data during transfer.
9//!
10//! # Embassy-style features demonstrated:
11//! - `TransferOptions::default()` for configuration (used internally)
12//! - DMA channel with `DmaChannel::new()`
13
14#![no_std]
15#![no_main]
16
17use embassy_executor::Spawner;
18use embassy_mcxa::clocks::config::Div8;
19use embassy_mcxa::dma::DmaChannel;
20use static_cell::ConstStaticCell;
21use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
22
23const BUFFER_LENGTH: usize = 16;
24const HALF_BUFF_LENGTH: usize = BUFFER_LENGTH / 2;
25
26// Buffers in RAM
27static SRC_BUFFER: ConstStaticCell<[u32; HALF_BUFF_LENGTH]> = ConstStaticCell::new([0; HALF_BUFF_LENGTH]);
28static DEST_BUFFER: ConstStaticCell<[u32; BUFFER_LENGTH]> = ConstStaticCell::new([0; BUFFER_LENGTH]);
29
30#[embassy_executor::main]
31async fn main(_spawner: Spawner) {
32 // Small delay to allow probe-rs to attach after reset
33 for _ in 0..100_000 {
34 cortex_m::asm::nop();
35 }
36
37 let mut cfg = hal::config::Config::default();
38 cfg.clock_cfg.sirc.fro_12m_enabled = true;
39 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
40 let p = hal::init(cfg);
41
42 defmt::info!("DMA interleave transfer example starting...");
43
44 defmt::info!("EDMA interleave transfer example begin.");
45
46 // Initialize buffers
47 let src = SRC_BUFFER.take();
48 *src = [1, 2, 3, 4, 5, 6, 7, 8];
49 let dst = DEST_BUFFER.take();
50
51 defmt::info!("Source Buffer: {=[?]}", src.as_slice());
52 defmt::info!("Destination Buffer (before): {=[?]}", dst.as_slice());
53
54 defmt::info!("Configuring DMA with Embassy-style API...");
55
56 // Create DMA channel using Embassy-style API
57 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
58
59 // Configure interleaved transfer using direct TCD access:
60 // - src_offset = 4: advance source by 4 bytes after each read
61 // - dst_offset = 8: advance dest by 8 bytes after each write
62 // This spreads source data across every other word in destination
63 unsafe {
64 let t = dma_ch0.tcd();
65
66 // Reset channel state
67 t.ch_csr().write(|w| {
68 w.erq()
69 .disable()
70 .earq()
71 .disable()
72 .eei()
73 .no_error()
74 .ebw()
75 .disable()
76 .done()
77 .clear_bit_by_one()
78 });
79 t.ch_es().write(|w| w.bits(0));
80 t.ch_int().write(|w| w.int().clear_bit_by_one());
81
82 // Source/destination addresses
83 t.tcd_saddr().write(|w| w.saddr().bits(src.as_ptr() as u32));
84 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
85
86 // Custom offsets for interleaving
87 t.tcd_soff().write(|w| w.soff().bits(4)); // src: +4 bytes per read
88 t.tcd_doff().write(|w| w.doff().bits(8)); // dst: +8 bytes per write
89
90 // Attributes: 32-bit transfers (size = 2)
91 t.tcd_attr().write(|w| w.ssize().bits(2).dsize().bits(2));
92
93 // Transfer entire source buffer in one minor loop
94 let nbytes = (HALF_BUFF_LENGTH * 4) as u32;
95 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
96
97 // Reset source address after major loop
98 t.tcd_slast_sda().write(|w| w.slast_sda().bits(-(nbytes as i32) as u32));
99 // Destination uses 2x offset, so adjust accordingly
100 let dst_total = (HALF_BUFF_LENGTH * 8) as u32;
101 t.tcd_dlast_sga()
102 .write(|w| w.dlast_sga().bits(-(dst_total as i32) as u32));
103
104 // Major loop count = 1
105 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
106 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
107
108 // Enable interrupt on major loop completion
109 t.tcd_csr().write(|w| w.intmajor().set_bit());
110
111 cortex_m::asm::dsb();
112
113 defmt::info!("Triggering transfer...");
114 dma_ch0.trigger_start();
115 }
116
117 // Wait for completion using channel helper method
118 while !dma_ch0.is_done() {
119 cortex_m::asm::nop();
120 }
121 unsafe {
122 dma_ch0.clear_done();
123 }
124
125 defmt::info!("EDMA interleave transfer example finish.");
126 defmt::info!("Destination Buffer (after): {=[?]}", dst.as_slice());
127
128 // Verify: Even indices should match SRC_BUFFER[i/2], odd indices should be 0
129 let mut mismatch = false;
130 let diter = dst.chunks_exact(2);
131 let siter = src.iter();
132 for (ch, src) in diter.zip(siter) {
133 mismatch |= !matches!(ch, [a, 0] if a == src);
134 }
135
136 if mismatch {
137 defmt::error!("FAIL: Mismatch detected!");
138 } else {
139 defmt::info!("PASS: Data verified.");
140 }
141}
diff --git a/examples/mcxa/src/bin/raw_dma_memset.rs b/examples/mcxa/src/bin/raw_dma_memset.rs
new file mode 100644
index 000000000..7b3c06ffa
--- /dev/null
+++ b/examples/mcxa/src/bin/raw_dma_memset.rs
@@ -0,0 +1,129 @@
1//! DMA memset example for MCXA276.
2//!
3//! NOTE: this is a "raw dma" example! It exists as a proof of concept, as we don't have
4//! a high-level and safe API for. It should not be taken as typical, recommended, or
5//! stable usage!
6//!
7//! This example demonstrates using DMA to fill a buffer with a repeated pattern.
8//! The source address stays fixed while the destination increments.
9//!
10//! # Embassy-style features demonstrated:
11//! - `DmaChannel::is_done()` and `clear_done()` helper methods
12//! - No need to pass register block around
13
14#![no_std]
15#![no_main]
16
17use embassy_executor::Spawner;
18use embassy_mcxa::clocks::config::Div8;
19use embassy_mcxa::dma::DmaChannel;
20use static_cell::ConstStaticCell;
21use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
22
23const BUFFER_LENGTH: usize = 4;
24
25// Buffers in RAM
26static PATTERN: u32 = 0xDEADBEEF;
27static DEST_BUFFER: ConstStaticCell<[u32; BUFFER_LENGTH]> = ConstStaticCell::new([0; BUFFER_LENGTH]);
28
29#[embassy_executor::main]
30async fn main(_spawner: Spawner) {
31 // Small delay to allow probe-rs to attach after reset
32 for _ in 0..100_000 {
33 cortex_m::asm::nop();
34 }
35
36 let mut cfg = hal::config::Config::default();
37 cfg.clock_cfg.sirc.fro_12m_enabled = true;
38 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
39 let p = hal::init(cfg);
40
41 defmt::info!("DMA memset example starting...");
42 defmt::info!("EDMA memset example begin.");
43
44 // Initialize buffers
45 let pat = &PATTERN;
46 let dst = DEST_BUFFER.take();
47 defmt::info!("Pattern Value: {=u32}", pat);
48 defmt::info!("Destination Buffer (before): {=[?]}", dst.as_slice());
49 defmt::info!("Configuring DMA with Embassy-style API...");
50
51 // Create DMA channel using Embassy-style API
52 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
53
54 // Configure memset transfer using direct TCD access:
55 // Source stays fixed (soff = 0, reads same pattern repeatedly)
56 // Destination increments (doff = 4)
57 unsafe {
58 let t = dma_ch0.tcd();
59
60 // Reset channel state
61 t.ch_csr().write(|w| {
62 w.erq()
63 .disable()
64 .earq()
65 .disable()
66 .eei()
67 .no_error()
68 .ebw()
69 .disable()
70 .done()
71 .clear_bit_by_one()
72 });
73 t.ch_es().write(|w| w.bits(0));
74 t.ch_int().write(|w| w.int().clear_bit_by_one());
75
76 // Source address (pattern) - fixed
77 t.tcd_saddr().write(|w| w.saddr().bits(pat as *const _ as u32));
78 // Destination address - increments
79 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
80
81 // Source offset = 0 (stays fixed), Dest offset = 4 (increments)
82 t.tcd_soff().write(|w| w.soff().bits(0));
83 t.tcd_doff().write(|w| w.doff().bits(4));
84
85 // Attributes: 32-bit transfers (size = 2)
86 t.tcd_attr().write(|w| w.ssize().bits(2).dsize().bits(2));
87
88 // Transfer entire buffer in one minor loop
89 let nbytes = (BUFFER_LENGTH * 4) as u32;
90 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
91
92 // Source doesn't need adjustment (stays fixed)
93 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
94 // Reset dest address after major loop
95 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(-(nbytes as i32) as u32));
96
97 // Major loop count = 1
98 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
99 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
100
101 // Enable interrupt on major loop completion
102 t.tcd_csr().write(|w| w.intmajor().set_bit());
103
104 cortex_m::asm::dsb();
105
106 defmt::info!("Triggering transfer...");
107 dma_ch0.trigger_start();
108 }
109
110 // Wait for completion using channel helper method
111 while !dma_ch0.is_done() {
112 cortex_m::asm::nop();
113 }
114 unsafe {
115 dma_ch0.clear_done();
116 }
117
118 defmt::info!("EDMA memset example finish.");
119 defmt::info!("Destination Buffer (after): {=[?]}", dst.as_slice());
120
121 // Verify: All elements should equal PATTERN
122 let mismatch = dst.iter().any(|i| *i != *pat);
123
124 if mismatch {
125 defmt::error!("FAIL: Mismatch detected!");
126 } else {
127 defmt::info!("PASS: Data verified.");
128 }
129}
diff --git a/examples/mcxa/src/bin/raw_dma_ping_pong_transfer.rs b/examples/mcxa/src/bin/raw_dma_ping_pong_transfer.rs
new file mode 100644
index 000000000..80df40449
--- /dev/null
+++ b/examples/mcxa/src/bin/raw_dma_ping_pong_transfer.rs
@@ -0,0 +1,244 @@
1//! DMA ping-pong/double-buffer transfer example for MCXA276.
2//!
3//! NOTE: this is a "raw dma" example! It exists as a proof of concept, as we don't have
4//! a high-level and safe API for. It should not be taken as typical, recommended, or
5//! stable usage!
6//!
7//! This example demonstrates two approaches for ping-pong/double-buffering:
8//!
9//! ## Approach 1: Scatter/Gather with linked TCDs (manual)
10//! - Two TCDs link to each other for alternating transfers
11//! - Uses custom handler that delegates to on_interrupt() then signals completion
12//! - Note: With ESG=1, DONE bit is cleared by hardware when next TCD loads,
13//! so we need an AtomicBool to track completion
14//!
15//! ## Approach 2: Half-transfer interrupt with wait_half() (NEW!)
16//! - Single continuous transfer over entire buffer
17//! - Uses half-transfer interrupt to know when first half is ready
18//! - Application can process first half while second half is being filled
19//!
20//! # Embassy-style features demonstrated:
21//! - `DmaChannel::new()` for channel creation
22//! - Scatter/gather with linked TCDs
23//! - Custom handler that delegates to HAL's `on_interrupt()` (best practice)
24//! - Standard `DmaCh1InterruptHandler` with `bind_interrupts!` macro
25//! - NEW: `wait_half()` for half-transfer interrupt handling
26
27#![no_std]
28#![no_main]
29
30use embassy_executor::Spawner;
31use embassy_mcxa::clocks::config::Div8;
32use embassy_mcxa::dma::{DmaChannel, Tcd, TransferOptions};
33use embassy_mcxa::pac;
34use static_cell::ConstStaticCell;
35use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
36
37// Source and destination buffers for Approach 1 (scatter/gather)
38static SRC: ConstStaticCell<[u32; 8]> = ConstStaticCell::new([1, 2, 3, 4, 5, 6, 7, 8]);
39static DST: ConstStaticCell<[u32; 8]> = ConstStaticCell::new([0; 8]);
40
41// Source and destination buffers for Approach 2 (wait_half)
42static SRC2: ConstStaticCell<[u32; 8]> = ConstStaticCell::new([0xA1, 0xA2, 0xA3, 0xA4, 0xB1, 0xB2, 0xB3, 0xB4]);
43static DST2: ConstStaticCell<[u32; 8]> = ConstStaticCell::new([0; 8]);
44
45// TCD pool for scatter/gather - must be 32-byte aligned
46#[repr(C, align(32))]
47struct TcdPool([Tcd; 2]);
48
49static TCD_POOL: ConstStaticCell<TcdPool> = ConstStaticCell::new(TcdPool(
50 [Tcd {
51 saddr: 0,
52 soff: 0,
53 attr: 0,
54 nbytes: 0,
55 slast: 0,
56 daddr: 0,
57 doff: 0,
58 citer: 0,
59 dlast_sga: 0,
60 csr: 0,
61 biter: 0,
62 }; 2],
63));
64
65#[embassy_executor::main]
66async fn main(_spawner: Spawner) {
67 // Small delay to allow probe-rs to attach after reset
68 for _ in 0..100_000 {
69 cortex_m::asm::nop();
70 }
71
72 let mut cfg = hal::config::Config::default();
73 cfg.clock_cfg.sirc.fro_12m_enabled = true;
74 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
75 let p = hal::init(cfg);
76
77 defmt::info!("DMA ping-pong transfer example starting...");
78
79 defmt::info!("EDMA ping-pong transfer example begin.");
80
81 // Initialize buffers
82 let src = SRC.take();
83 let dst = DST.take();
84
85 defmt::info!("Source Buffer: {=[?]}", src.as_slice());
86 defmt::info!("Destination Buffer (before): {=[?]}", dst.as_slice());
87
88 defmt::info!("Configuring ping-pong DMA with Embassy-style API...");
89
90 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
91
92 // Configure ping-pong transfer using direct TCD access:
93 // This sets up TCD0 and TCD1 in RAM, and loads TCD0 into the channel.
94 // TCD0 transfers first half (SRC[0..4] -> DST[0..4]), links to TCD1.
95 // TCD1 transfers second half (SRC[4..8] -> DST[4..8]), links to TCD0.
96 let tcds = &mut TCD_POOL.take().0;
97
98 let half_len = 4usize;
99 let half_bytes = (half_len * 4) as u32;
100
101 unsafe {
102 let tcd0_addr = &tcds[0] as *const _ as u32;
103 let tcd1_addr = &tcds[1] as *const _ as u32;
104
105 // TCD0: First half -> Links to TCD1
106 tcds[0] = Tcd {
107 saddr: src.as_ptr() as u32,
108 soff: 4,
109 attr: 0x0202, // 32-bit src/dst
110 nbytes: half_bytes,
111 slast: 0,
112 daddr: dst.as_mut_ptr() as u32,
113 doff: 4,
114 citer: 1,
115 dlast_sga: tcd1_addr as i32,
116 csr: 0x0012, // ESG | INTMAJOR
117 biter: 1,
118 };
119
120 // TCD1: Second half -> Links to TCD0
121 tcds[1] = Tcd {
122 saddr: src.as_ptr().add(half_len) as u32,
123 soff: 4,
124 attr: 0x0202,
125 nbytes: half_bytes,
126 slast: 0,
127 daddr: dst.as_mut_ptr().add(half_len) as u32,
128 doff: 4,
129 citer: 1,
130 dlast_sga: tcd0_addr as i32,
131 csr: 0x0012,
132 biter: 1,
133 };
134
135 // Load TCD0 into hardware registers
136 dma_ch0.load_tcd(&tcds[0]);
137 }
138
139 defmt::info!("Triggering first half transfer...");
140
141 // Trigger first transfer (first half: SRC[0..4] -> DST[0..4])
142 unsafe {
143 dma_ch0.trigger_start();
144 }
145
146 let tcd = dma_ch0.tcd();
147 // Wait for first half
148 loop {
149 if tcd.tcd_saddr().read().bits() != src.as_ptr() as u32 {
150 break;
151 }
152 }
153
154 defmt::info!("First half transferred.");
155 defmt::info!("Triggering second half transfer...");
156
157 // Trigger second transfer (second half: SRC[4..8] -> DST[4..8])
158 unsafe {
159 dma_ch0.trigger_start();
160 }
161
162 // Wait for second half
163 loop {
164 if tcd.tcd_saddr().read().bits() != unsafe { src.as_ptr().add(half_len) } as u32 {
165 break;
166 }
167 }
168
169 defmt::info!("Second half transferred.");
170
171 defmt::info!("EDMA ping-pong transfer example finish.");
172 defmt::info!("Destination Buffer (after): {=[?]}", dst.as_slice());
173
174 // Verify: DST should match SRC
175 let mismatch = src != dst;
176
177 if mismatch {
178 defmt::error!("FAIL: Approach 1 mismatch detected!");
179 } else {
180 defmt::info!("PASS: Approach 1 data verified.");
181 }
182
183 // =========================================================================
184 // Approach 2: Half-Transfer Interrupt with wait_half() (NEW!)
185 // =========================================================================
186 //
187 // This approach uses a single continuous DMA transfer with half-transfer
188 // interrupt enabled. The wait_half() method allows you to be notified
189 // when the first half of the buffer is complete, so you can process it
190 // while the second half is still being filled.
191 //
192 // Benefits:
193 // - Simpler setup (no TCD pool needed)
194 // - True async/await support
195 // - Good for streaming data processing
196
197 defmt::info!("--- Approach 2: wait_half() demo ---");
198
199 // Enable DMA CH1 interrupt
200 unsafe {
201 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH1);
202 }
203
204 // Initialize approach 2 buffers
205 let src2 = SRC2.take();
206 let dst2 = DST2.take();
207
208 defmt::info!("SRC2: {=[?]}", src2.as_slice());
209
210 let dma_ch1 = DmaChannel::new(p.DMA_CH1);
211
212 // Configure transfer with half-transfer interrupt enabled
213 let mut options = TransferOptions::default();
214 options.half_transfer_interrupt = true; // Enable half-transfer interrupt
215 options.complete_transfer_interrupt = true;
216
217 defmt::info!("Starting transfer with half_transfer_interrupt...");
218
219 // Create the transfer
220 let mut transfer = dma_ch1.mem_to_mem(src2, dst2, options).unwrap();
221
222 // Wait for half-transfer (first 4 elements)
223 defmt::info!("Waiting for first half...");
224 let _ok = transfer.wait_half().await.unwrap();
225
226 defmt::info!("Half-transfer complete!");
227
228 // Wait for complete transfer
229 defmt::info!("Waiting for second half...");
230 transfer.await.unwrap();
231
232 defmt::info!("Transfer complete! Full DST2: {=[?]}", dst2.as_slice());
233
234 // Verify approach 2
235 let mismatch2 = src2 != dst2;
236
237 if mismatch2 {
238 defmt::error!("FAIL: Approach 2 mismatch!");
239 } else {
240 defmt::info!("PASS: Approach 2 verified.");
241 }
242
243 defmt::info!("=== All ping-pong demos complete ===");
244}
diff --git a/examples/mcxa/src/bin/raw_dma_scatter_gather.rs b/examples/mcxa/src/bin/raw_dma_scatter_gather.rs
new file mode 100644
index 000000000..eb9960764
--- /dev/null
+++ b/examples/mcxa/src/bin/raw_dma_scatter_gather.rs
@@ -0,0 +1,165 @@
1//! DMA scatter-gather transfer example for MCXA276.
2//!
3//! NOTE: this is a "raw dma" example! It exists as a proof of concept, as we don't have
4//! a high-level and safe API for. It should not be taken as typical, recommended, or
5//! stable usage!
6//!
7//! This example demonstrates using DMA with scatter/gather to chain multiple
8//! transfer descriptors. The first TCD transfers the first half of the buffer,
9//! then automatically loads the second TCD to transfer the second half.
10//!
11//! # Embassy-style features demonstrated:
12//! - `DmaChannel::new()` for channel creation
13//! - Scatter/gather with chained TCDs
14//! - Custom handler that delegates to HAL's `on_interrupt()` (best practice)
15
16#![no_std]
17#![no_main]
18
19use embassy_executor::Spawner;
20use embassy_mcxa::clocks::config::Div8;
21use embassy_mcxa::dma::{DmaChannel, Tcd};
22use static_cell::ConstStaticCell;
23use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
24
25// Source and destination buffers
26static SRC: ConstStaticCell<[u32; 12]> = ConstStaticCell::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
27static DST: ConstStaticCell<[u32; 12]> = ConstStaticCell::new([0; 12]);
28
29// TCD pool for scatter/gather - must be 32-byte aligned
30#[repr(C, align(32))]
31struct TcdPool([Tcd; 2]);
32
33static TCD_POOL: ConstStaticCell<TcdPool> = ConstStaticCell::new(TcdPool(
34 [Tcd {
35 saddr: 0,
36 soff: 0,
37 attr: 0,
38 nbytes: 0,
39 slast: 0,
40 daddr: 0,
41 doff: 0,
42 citer: 0,
43 dlast_sga: 0,
44 csr: 0,
45 biter: 0,
46 }; 2],
47));
48
49#[embassy_executor::main]
50async fn main(_spawner: Spawner) {
51 // Small delay to allow probe-rs to attach after reset
52 for _ in 0..100_000 {
53 cortex_m::asm::nop();
54 }
55
56 let mut cfg = hal::config::Config::default();
57 cfg.clock_cfg.sirc.fro_12m_enabled = true;
58 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
59 let p = hal::init(cfg);
60
61 defmt::info!("DMA scatter-gather transfer example starting...");
62
63 defmt::info!("EDMA scatter-gather transfer example begin.");
64
65 // Initialize buffers
66 let src = SRC.take();
67 let dst = DST.take();
68
69 defmt::info!("Source Buffer: {=[?]}", src.as_slice());
70 defmt::info!("Destination Buffer (before): {=[?]}", dst.as_slice());
71 defmt::info!("Configuring scatter-gather DMA with Embassy-style API...");
72
73 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
74 let src_ptr = src.as_ptr();
75 let dst_ptr = dst.as_mut_ptr();
76
77 // Configure scatter-gather transfer using direct TCD access:
78 // This sets up TCD0 and TCD1 in RAM, and loads TCD0 into the channel.
79 // TCD0 transfers first half (SRC[0..4] -> DST[0..4]), then loads TCD1.
80 // TCD1 transfers second half (SRC[4..12] -> DST[4..12]), last TCD.
81 unsafe {
82 let tcds = &mut TCD_POOL.take().0;
83
84 // In the first transfer, copy
85 tcds[0] = Tcd {
86 saddr: src_ptr as u32,
87 soff: 4,
88 attr: 0x0202, // 32-bit src/dst
89 nbytes: 4 * 4,
90 slast: 0,
91 daddr: dst_ptr as u32,
92 doff: 4,
93 citer: 1,
94 dlast_sga: tcds.as_ptr().add(1) as i32,
95 // ESG (scatter/gather) for non-last, INTMAJOR for all
96 csr: 0x0012,
97 biter: 1,
98 };
99
100 tcds[1] = Tcd {
101 saddr: src_ptr.add(4) as u32,
102 soff: 4,
103 attr: 0x0202, // 32-bit src/dst
104 nbytes: 8 * 4,
105 slast: 0,
106 daddr: dst_ptr.add(4) as u32,
107 doff: 4,
108 citer: 1,
109 dlast_sga: 0,
110 // ESG (scatter/gather) for non-last, INTMAJOR for all
111 csr: 0x0002,
112 biter: 1,
113 };
114
115 // Load TCD0 into hardware registers
116 dma_ch0.load_tcd(&tcds[0]);
117 }
118
119 defmt::info!("Triggering first half transfer...");
120
121 let tcd = dma_ch0.tcd();
122
123 // Trigger first transfer (first half: SRC[0..4] -> DST[0..4])
124 // TCD0 is currently loaded.
125 unsafe {
126 dma_ch0.trigger_start();
127 }
128
129 // Wait for first half
130 loop {
131 if tcd.tcd_saddr().read().bits() != src_ptr as u32 {
132 defmt::info!("saddr: {=u32}", tcd.tcd_saddr().read().bits());
133 defmt::info!("srptr: {=u32}", src_ptr as u32);
134 break;
135 }
136 }
137
138 defmt::info!("First half transferred.");
139 defmt::info!("Triggering second half transfer...");
140
141 // Trigger second transfer (second half: SRC[4..8] -> DST[4..8])
142 // TCD1 should have been loaded by the scatter/gather engine.
143 unsafe {
144 dma_ch0.trigger_start();
145 }
146
147 // Wait for second half
148 while !dma_ch0.is_done() {
149 cortex_m::asm::nop();
150 }
151
152 defmt::info!("Second half transferred.");
153
154 defmt::info!("EDMA scatter-gather transfer example finish.");
155 defmt::info!("Destination Buffer (after): {=[?]}", dst.as_slice());
156
157 // Verify: DST should match SRC
158 let mismatch = src != dst;
159
160 if mismatch {
161 defmt::error!("FAIL: Mismatch detected!");
162 } else {
163 defmt::info!("PASS: Data verified.");
164 }
165}