aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--examples/src/bin/dma_channel_link.rs396
-rw-r--r--examples/src/bin/dma_interleave_transfer.rs226
-rw-r--r--examples/src/bin/dma_mem_to_mem.rs248
-rw-r--r--examples/src/bin/dma_memset.rs232
-rw-r--r--examples/src/bin/dma_ping_pong_transfer.rs384
-rw-r--r--examples/src/bin/dma_scatter_gather.rs281
-rw-r--r--examples/src/bin/dma_scatter_gather_builder.rs244
-rw-r--r--examples/src/bin/dma_wrap_transfer.rs231
-rw-r--r--examples/src/bin/lpuart_dma.rs127
-rw-r--r--examples/src/bin/lpuart_ring_buffer.rs162
-rw-r--r--src/clocks/mod.rs7
-rw-r--r--src/dma.rs2467
-rw-r--r--src/interrupt.rs2
-rw-r--r--src/lib.rs9
-rw-r--r--src/lpuart/mod.rs421
-rw-r--r--src/pins.rs5
16 files changed, 5415 insertions, 27 deletions
diff --git a/examples/src/bin/dma_channel_link.rs b/examples/src/bin/dma_channel_link.rs
new file mode 100644
index 000000000..d585f8e3a
--- /dev/null
+++ b/examples/src/bin/dma_channel_link.rs
@@ -0,0 +1,396 @@
1//! DMA channel linking example for MCXA276.
2//!
3//! This example demonstrates DMA channel linking (minor and major loop linking):
4//! - Channel 0: Transfers SRC_BUFFER to DEST_BUFFER0, with:
5//! - Minor Link to Channel 1 (triggers CH1 after each minor loop)
6//! - Major Link to Channel 2 (triggers CH2 after major loop completes)
7//! - Channel 1: Transfers SRC_BUFFER to DEST_BUFFER1 (triggered by CH0 minor link)
8//! - Channel 2: Transfers SRC_BUFFER to DEST_BUFFER2 (triggered by CH0 major link)
9//!
10//! # Embassy-style features demonstrated:
11//! - `dma::edma_tcd()` accessor for simplified register access
12//! - `DmaChannel::new()` for channel creation
13//! - `DmaChannel::is_done()` and `clear_done()` helper methods
14//! - Channel linking with `set_minor_link()` and `set_major_link()`
15
16#![no_std]
17#![no_main]
18
19use core::sync::atomic::{AtomicBool, Ordering};
20use embassy_executor::Spawner;
21use embassy_mcxa::clocks::config::Div8;
22use embassy_mcxa::clocks::Gate;
23use embassy_mcxa::dma::{edma_tcd, DmaChannel};
24use embassy_mcxa::{bind_interrupts, dma};
25use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
26use embassy_mcxa::pac;
27use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
28
29// Buffers
30static mut SRC_BUFFER: [u32; 4] = [1, 2, 3, 4];
31static mut DEST_BUFFER0: [u32; 4] = [0; 4];
32static mut DEST_BUFFER1: [u32; 4] = [0; 4];
33static mut DEST_BUFFER2: [u32; 4] = [0; 4];
34
35static DMA_CH2_DONE: AtomicBool = AtomicBool::new(false);
36
37// Custom DMA interrupt handlers for channel linking
38// CH0 and CH1 just clear flags, CH2 signals completion
39
40pub struct Ch0Handler;
41impl embassy_mcxa::interrupt::typelevel::Handler<embassy_mcxa::interrupt::typelevel::DMA_CH0> for Ch0Handler {
42 unsafe fn on_interrupt() {
43 let edma = edma_tcd();
44 edma.tcd(0).ch_int().write(|w| w.int().clear_bit_by_one());
45 if edma.tcd(0).ch_csr().read().done().bit_is_set() {
46 edma.tcd(0).ch_csr().write(|w| w.done().clear_bit_by_one());
47 }
48 }
49}
50
51pub struct Ch1Handler;
52impl embassy_mcxa::interrupt::typelevel::Handler<embassy_mcxa::interrupt::typelevel::DMA_CH1> for Ch1Handler {
53 unsafe fn on_interrupt() {
54 let edma = edma_tcd();
55 edma.tcd(1).ch_int().write(|w| w.int().clear_bit_by_one());
56 if edma.tcd(1).ch_csr().read().done().bit_is_set() {
57 edma.tcd(1).ch_csr().write(|w| w.done().clear_bit_by_one());
58 }
59 }
60}
61
62pub struct Ch2Handler;
63impl embassy_mcxa::interrupt::typelevel::Handler<embassy_mcxa::interrupt::typelevel::DMA_CH2> for Ch2Handler {
64 unsafe fn on_interrupt() {
65 let edma = edma_tcd();
66 edma.tcd(2).ch_int().write(|w| w.int().clear_bit_by_one());
67 if edma.tcd(2).ch_csr().read().done().bit_is_set() {
68 edma.tcd(2).ch_csr().write(|w| w.done().clear_bit_by_one());
69 }
70 DMA_CH2_DONE.store(true, Ordering::Release);
71 }
72}
73
74bind_interrupts!(struct Irqs {
75 DMA_CH0 => Ch0Handler;
76 DMA_CH1 => Ch1Handler;
77 DMA_CH2 => Ch2Handler;
78});
79
80/// Helper to write a u32 as decimal ASCII to UART
81fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
82 let mut buf = [0u8; 10];
83 let mut n = val;
84 let mut i = buf.len();
85
86 if n == 0 {
87 tx.blocking_write(b"0").ok();
88 return;
89 }
90
91 while n > 0 {
92 i -= 1;
93 buf[i] = b'0' + (n % 10) as u8;
94 n /= 10;
95 }
96
97 tx.blocking_write(&buf[i..]).ok();
98}
99
100/// Helper to print a buffer to UART
101fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
102 tx.blocking_write(b"[").ok();
103 unsafe {
104 for i in 0..len {
105 write_u32(tx, *buf_ptr.add(i));
106 if i < len - 1 {
107 tx.blocking_write(b", ").ok();
108 }
109 }
110 }
111 tx.blocking_write(b"]").ok();
112}
113
114#[embassy_executor::main]
115async fn main(_spawner: Spawner) {
116 // Small delay to allow probe-rs to attach after reset
117 for _ in 0..100_000 {
118 cortex_m::asm::nop();
119 }
120
121 let mut cfg = hal::config::Config::default();
122 cfg.clock_cfg.sirc.fro_12m_enabled = true;
123 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
124 let p = hal::init(cfg);
125
126 defmt::info!("DMA channel link example starting...");
127
128 // Enable DMA0 clock and release reset
129 unsafe {
130 hal::peripherals::DMA0::enable_clock();
131 hal::peripherals::DMA0::release_reset();
132 }
133
134 let pac_periphs = unsafe { pac::Peripherals::steal() };
135
136 unsafe {
137 dma::init(&pac_periphs);
138 }
139
140 // Use edma_tcd() accessor instead of passing register block around
141 let edma = edma_tcd();
142 let dma0 = &pac_periphs.dma0;
143
144 // Clear any residual state
145 for i in 0..3 {
146 let t = edma.tcd(i);
147 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
148 t.ch_int().write(|w| w.int().clear_bit_by_one());
149 t.ch_es().write(|w| w.err().clear_bit_by_one());
150 t.ch_mux().write(|w| unsafe { w.bits(0) });
151 }
152
153 // Clear Global Halt/Error state
154 dma0.mp_csr().modify(|_, w| {
155 w.halt().normal_operation()
156 .hae().normal_operation()
157 .ecx().normal_operation()
158 .cx().normal_operation()
159 });
160
161 unsafe {
162 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
163 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH1);
164 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH2);
165 }
166
167 let config = Config {
168 baudrate_bps: 115_200,
169 enable_tx: true,
170 enable_rx: false,
171 ..Default::default()
172 };
173
174 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
175 let (mut tx, _rx) = lpuart.split();
176
177 tx.blocking_write(b"EDMA channel link example begin.\r\n\r\n")
178 .unwrap();
179
180 // Initialize buffers
181 unsafe {
182 SRC_BUFFER = [1, 2, 3, 4];
183 DEST_BUFFER0 = [0; 4];
184 DEST_BUFFER1 = [0; 4];
185 DEST_BUFFER2 = [0; 4];
186 }
187
188 tx.blocking_write(b"Source Buffer: ").unwrap();
189 print_buffer(&mut tx, core::ptr::addr_of!(SRC_BUFFER) as *const u32, 4);
190 tx.blocking_write(b"\r\n").unwrap();
191
192 tx.blocking_write(b"DEST0 (before): ").unwrap();
193 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER0) as *const u32, 4);
194 tx.blocking_write(b"\r\n").unwrap();
195
196 tx.blocking_write(b"DEST1 (before): ").unwrap();
197 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER1) as *const u32, 4);
198 tx.blocking_write(b"\r\n").unwrap();
199
200 tx.blocking_write(b"DEST2 (before): ").unwrap();
201 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER2) as *const u32, 4);
202 tx.blocking_write(b"\r\n\r\n").unwrap();
203
204 tx.blocking_write(b"Configuring DMA channels with Embassy-style API...\r\n")
205 .unwrap();
206
207 let ch0 = DmaChannel::new(p.DMA_CH0);
208 let ch1 = DmaChannel::new(p.DMA_CH1);
209 let _ch2 = DmaChannel::new(p.DMA_CH2);
210
211 // Configure channels using direct TCD access (advanced feature demo)
212 // This example demonstrates channel linking which requires direct TCD manipulation
213
214 // Helper to configure TCD for memory-to-memory transfer
215 // Parameters: channel, src, dst, width, nbytes (minor loop), count (major loop), interrupt
216 #[allow(clippy::too_many_arguments)]
217 unsafe fn configure_tcd(
218 edma: &embassy_mcxa::pac::edma_0_tcd0::RegisterBlock,
219 ch: usize,
220 src: u32,
221 dst: u32,
222 width: u8,
223 nbytes: u32,
224 count: u16,
225 enable_int: bool,
226 ) {
227 let t = edma.tcd(ch);
228
229 // Reset channel state
230 t.ch_csr().write(|w| {
231 w.erq().disable()
232 .earq().disable()
233 .eei().no_error()
234 .ebw().disable()
235 .done().clear_bit_by_one()
236 });
237 t.ch_es().write(|w| w.bits(0));
238 t.ch_int().write(|w| w.int().clear_bit_by_one());
239
240 // Source/destination addresses
241 t.tcd_saddr().write(|w| w.saddr().bits(src));
242 t.tcd_daddr().write(|w| w.daddr().bits(dst));
243
244 // Offsets: increment by width
245 t.tcd_soff().write(|w| w.soff().bits(width as u16));
246 t.tcd_doff().write(|w| w.doff().bits(width as u16));
247
248 // Attributes: size = log2(width)
249 let size = match width {
250 1 => 0,
251 2 => 1,
252 4 => 2,
253 _ => 0,
254 };
255 t.tcd_attr().write(|w| w.ssize().bits(size).dsize().bits(size));
256
257 // Number of bytes per minor loop
258 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
259
260 // Major loop: reset source address after major loop
261 let total_bytes = nbytes * count as u32;
262 t.tcd_slast_sda().write(|w| w.slast_sda().bits(-(total_bytes as i32) as u32));
263 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(-(total_bytes as i32) as u32));
264
265 // Major loop count
266 t.tcd_biter_elinkno().write(|w| w.biter().bits(count));
267 t.tcd_citer_elinkno().write(|w| w.citer().bits(count));
268
269 // Control/status: enable interrupt if requested
270 if enable_int {
271 t.tcd_csr().write(|w| w.intmajor().set_bit());
272 } else {
273 t.tcd_csr().write(|w| w.intmajor().clear_bit());
274 }
275
276 cortex_m::asm::dsb();
277 }
278
279 unsafe {
280
281 // Channel 0: Transfer 16 bytes total (8 bytes per minor loop, 2 major iterations)
282 // Minor Link -> Channel 1
283 // Major Link -> Channel 2
284 configure_tcd(
285 edma,
286 0,
287 core::ptr::addr_of!(SRC_BUFFER) as u32,
288 core::ptr::addr_of_mut!(DEST_BUFFER0) as u32,
289 4, // src width
290 8, // nbytes (minor loop = 2 words)
291 2, // count (major loop = 2 iterations)
292 false, // no interrupt
293 );
294 ch0.set_minor_link(edma, 1); // Link to CH1 after each minor loop
295 ch0.set_major_link(edma, 2); // Link to CH2 after major loop
296
297 // Channel 1: Transfer 16 bytes (triggered by CH0 minor link)
298 configure_tcd(
299 edma,
300 1,
301 core::ptr::addr_of!(SRC_BUFFER) as u32,
302 core::ptr::addr_of_mut!(DEST_BUFFER1) as u32,
303 4,
304 16, // full buffer in one minor loop
305 1, // 1 major iteration
306 false,
307 );
308
309 // Channel 2: Transfer 16 bytes (triggered by CH0 major link)
310 configure_tcd(
311 edma,
312 2,
313 core::ptr::addr_of!(SRC_BUFFER) as u32,
314 core::ptr::addr_of_mut!(DEST_BUFFER2) as u32,
315 4,
316 16, // full buffer in one minor loop
317 1, // 1 major iteration
318 true, // enable interrupt
319 );
320 }
321
322 tx.blocking_write(b"Triggering Channel 0 (1st minor loop)...\r\n").unwrap();
323
324 // Trigger first minor loop of CH0
325 unsafe { ch0.trigger_start(edma); }
326
327 // Wait for CH1 to complete (triggered by CH0 minor link)
328 while !ch1.is_done(edma) {
329 cortex_m::asm::nop();
330 }
331 unsafe { ch1.clear_done(edma); }
332
333 tx.blocking_write(b"CH1 done (via minor link).\r\n").unwrap();
334 tx.blocking_write(b"Triggering Channel 0 (2nd minor loop)...\r\n").unwrap();
335
336 // Trigger second minor loop of CH0
337 unsafe { ch0.trigger_start(edma); }
338
339 // Wait for CH0 major loop to complete
340 while !ch0.is_done(edma) {
341 cortex_m::asm::nop();
342 }
343 unsafe { ch0.clear_done(edma); }
344
345 tx.blocking_write(b"CH0 major loop done.\r\n").unwrap();
346
347 // Wait for CH2 to complete (triggered by CH0 major link)
348 while !DMA_CH2_DONE.load(Ordering::Acquire) {
349 cortex_m::asm::nop();
350 }
351
352 tx.blocking_write(b"CH2 done (via major link).\r\n\r\n").unwrap();
353
354 tx.blocking_write(b"EDMA channel link example finish.\r\n\r\n")
355 .unwrap();
356
357 tx.blocking_write(b"DEST0 (after): ").unwrap();
358 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER0) as *const u32, 4);
359 tx.blocking_write(b"\r\n").unwrap();
360
361 tx.blocking_write(b"DEST1 (after): ").unwrap();
362 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER1) as *const u32, 4);
363 tx.blocking_write(b"\r\n").unwrap();
364
365 tx.blocking_write(b"DEST2 (after): ").unwrap();
366 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER2) as *const u32, 4);
367 tx.blocking_write(b"\r\n\r\n").unwrap();
368
369 // Verify all buffers match source
370 let mut success = true;
371 unsafe {
372 let src_ptr = core::ptr::addr_of!(SRC_BUFFER) as *const u32;
373 let dst0_ptr = core::ptr::addr_of!(DEST_BUFFER0) as *const u32;
374 let dst1_ptr = core::ptr::addr_of!(DEST_BUFFER1) as *const u32;
375 let dst2_ptr = core::ptr::addr_of!(DEST_BUFFER2) as *const u32;
376
377 for i in 0..4 {
378 if *dst0_ptr.add(i) != *src_ptr.add(i) { success = false; }
379 if *dst1_ptr.add(i) != *src_ptr.add(i) { success = false; }
380 if *dst2_ptr.add(i) != *src_ptr.add(i) { success = false; }
381 }
382 }
383
384 if success {
385 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
386 defmt::info!("PASS: Data verified.");
387 } else {
388 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
389 defmt::error!("FAIL: Mismatch detected!");
390 }
391
392 loop {
393 cortex_m::asm::wfe();
394 }
395}
396
diff --git a/examples/src/bin/dma_interleave_transfer.rs b/examples/src/bin/dma_interleave_transfer.rs
new file mode 100644
index 000000000..710f18de3
--- /dev/null
+++ b/examples/src/bin/dma_interleave_transfer.rs
@@ -0,0 +1,226 @@
1//! DMA interleaved transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA with custom source/destination offsets
4//! to interleave data during transfer.
5//!
6//! # Embassy-style features demonstrated:
7//! - `dma::edma_tcd()` accessor for simplified register access
8//! - `TransferOptions::default()` for configuration (used internally)
9//! - DMA channel with `DmaChannel::new()`
10
11#![no_std]
12#![no_main]
13
14use embassy_executor::Spawner;
15use embassy_mcxa::clocks::config::Div8;
16use embassy_mcxa::clocks::Gate;
17use embassy_mcxa::dma::{edma_tcd, DmaChannel, DmaCh0InterruptHandler};
18use embassy_mcxa::{bind_interrupts, dma};
19use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
20use embassy_mcxa::pac;
21use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
22
23// Bind DMA channel 0 interrupt using Embassy-style macro
24bind_interrupts!(struct Irqs {
25 DMA_CH0 => DmaCh0InterruptHandler;
26});
27
28const BUFFER_LENGTH: usize = 16;
29const HALF_BUFF_LENGTH: usize = BUFFER_LENGTH / 2;
30
31// Buffers in RAM
32static mut SRC_BUFFER: [u32; HALF_BUFF_LENGTH] = [0; HALF_BUFF_LENGTH];
33static mut DEST_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
34
35/// Helper to write a u32 as decimal ASCII to UART
36fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
37 let mut buf = [0u8; 10];
38 let mut n = val;
39 let mut i = buf.len();
40
41 if n == 0 {
42 tx.blocking_write(b"0").ok();
43 return;
44 }
45
46 while n > 0 {
47 i -= 1;
48 buf[i] = b'0' + (n % 10) as u8;
49 n /= 10;
50 }
51
52 tx.blocking_write(&buf[i..]).ok();
53}
54
55/// Helper to print a buffer to UART
56fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
57 tx.blocking_write(b"[").ok();
58 unsafe {
59 for i in 0..len {
60 write_u32(tx, *buf_ptr.add(i));
61 if i < len - 1 {
62 tx.blocking_write(b", ").ok();
63 }
64 }
65 }
66 tx.blocking_write(b"]").ok();
67}
68
69#[embassy_executor::main]
70async fn main(_spawner: Spawner) {
71 // Small delay to allow probe-rs to attach after reset
72 for _ in 0..100_000 {
73 cortex_m::asm::nop();
74 }
75
76 let mut cfg = hal::config::Config::default();
77 cfg.clock_cfg.sirc.fro_12m_enabled = true;
78 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
79 let p = hal::init(cfg);
80
81 defmt::info!("DMA interleave transfer example starting...");
82
83 // Enable DMA0 clock and release reset
84 unsafe {
85 hal::peripherals::DMA0::enable_clock();
86 hal::peripherals::DMA0::release_reset();
87 }
88
89 let pac_periphs = unsafe { pac::Peripherals::steal() };
90
91 unsafe {
92 dma::init(&pac_periphs);
93 }
94
95 // Enable DMA interrupt
96 unsafe {
97 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
98 }
99
100 let config = Config {
101 baudrate_bps: 115_200,
102 enable_tx: true,
103 enable_rx: false,
104 ..Default::default()
105 };
106
107 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
108 let (mut tx, _rx) = lpuart.split();
109
110 tx.blocking_write(b"EDMA interleave transfer example begin.\r\n\r\n")
111 .unwrap();
112
113 // Initialize buffers
114 unsafe {
115 SRC_BUFFER = [1, 2, 3, 4, 5, 6, 7, 8];
116 DEST_BUFFER = [0; BUFFER_LENGTH];
117 }
118
119 tx.blocking_write(b"Source Buffer: ").unwrap();
120 print_buffer(&mut tx, core::ptr::addr_of!(SRC_BUFFER) as *const u32, HALF_BUFF_LENGTH);
121 tx.blocking_write(b"\r\n").unwrap();
122
123 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
124 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER) as *const u32, BUFFER_LENGTH);
125 tx.blocking_write(b"\r\n").unwrap();
126
127 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
128 .unwrap();
129
130 // Create DMA channel using Embassy-style API
131 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
132
133 // Use edma_tcd() accessor instead of passing register block around
134 let edma = edma_tcd();
135
136 // Configure interleaved transfer using direct TCD access:
137 // - src_offset = 4: advance source by 4 bytes after each read
138 // - dst_offset = 8: advance dest by 8 bytes after each write
139 // This spreads source data across every other word in destination
140 unsafe {
141 let t = edma.tcd(0);
142
143 // Reset channel state
144 t.ch_csr().write(|w| {
145 w.erq().disable()
146 .earq().disable()
147 .eei().no_error()
148 .ebw().disable()
149 .done().clear_bit_by_one()
150 });
151 t.ch_es().write(|w| w.bits(0));
152 t.ch_int().write(|w| w.int().clear_bit_by_one());
153
154 // Source/destination addresses
155 t.tcd_saddr().write(|w| w.saddr().bits(core::ptr::addr_of_mut!(SRC_BUFFER) as u32));
156 t.tcd_daddr().write(|w| w.daddr().bits(core::ptr::addr_of_mut!(DEST_BUFFER) as u32));
157
158 // Custom offsets for interleaving
159 t.tcd_soff().write(|w| w.soff().bits(4)); // src: +4 bytes per read
160 t.tcd_doff().write(|w| w.doff().bits(8)); // dst: +8 bytes per write
161
162 // Attributes: 32-bit transfers (size = 2)
163 t.tcd_attr().write(|w| w.ssize().bits(2).dsize().bits(2));
164
165 // Transfer entire source buffer in one minor loop
166 let nbytes = (HALF_BUFF_LENGTH * 4) as u32;
167 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
168
169 // Reset source address after major loop
170 t.tcd_slast_sda().write(|w| w.slast_sda().bits(-(nbytes as i32) as u32));
171 // Destination uses 2x offset, so adjust accordingly
172 let dst_total = (HALF_BUFF_LENGTH * 8) as u32;
173 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(-(dst_total as i32) as u32));
174
175 // Major loop count = 1
176 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
177 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
178
179 // Enable interrupt on major loop completion
180 t.tcd_csr().write(|w| w.intmajor().set_bit());
181
182 cortex_m::asm::dsb();
183
184 tx.blocking_write(b"Triggering transfer...\r\n").unwrap();
185 dma_ch0.trigger_start(edma);
186 }
187
188 // Wait for completion using channel helper method
189 while !dma_ch0.is_done(edma) {
190 cortex_m::asm::nop();
191 }
192 unsafe { dma_ch0.clear_done(edma); }
193
194 tx.blocking_write(b"\r\nEDMA interleave transfer example finish.\r\n\r\n")
195 .unwrap();
196 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
197 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER) as *const u32, BUFFER_LENGTH);
198 tx.blocking_write(b"\r\n\r\n").unwrap();
199
200 // Verify: Even indices should match SRC_BUFFER[i/2], odd indices should be 0
201 let mut mismatch = false;
202 unsafe {
203 for i in 0..BUFFER_LENGTH {
204 if i % 2 == 0 {
205 if DEST_BUFFER[i] != SRC_BUFFER[i / 2] {
206 mismatch = true;
207 }
208 } else if DEST_BUFFER[i] != 0 {
209 mismatch = true;
210 }
211 }
212 }
213
214 if mismatch {
215 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
216 defmt::error!("FAIL: Mismatch detected!");
217 } else {
218 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
219 defmt::info!("PASS: Data verified.");
220 }
221
222 loop {
223 cortex_m::asm::wfe();
224 }
225}
226
diff --git a/examples/src/bin/dma_mem_to_mem.rs b/examples/src/bin/dma_mem_to_mem.rs
new file mode 100644
index 000000000..e193e8c6a
--- /dev/null
+++ b/examples/src/bin/dma_mem_to_mem.rs
@@ -0,0 +1,248 @@
1//! DMA memory-to-memory transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA to copy data between memory buffers
4//! using the Embassy-style async API with type-safe transfers.
5//!
6//! # Embassy-style features demonstrated:
7//! - `TransferOptions` for configuration
8//! - Type-safe `mem_to_mem<u32>()` method with async `.await`
9//! - `Transfer` Future that can be `.await`ed
10//! - `Word` trait for automatic transfer width detection
11//! - `memset()` method for filling memory with a pattern
12
13#![no_std]
14#![no_main]
15
16use embassy_executor::Spawner;
17use embassy_mcxa::clocks::config::Div8;
18use embassy_mcxa::clocks::Gate;
19use embassy_mcxa::dma::{DmaChannel, DmaCh0InterruptHandler, TransferOptions};
20use embassy_mcxa::{bind_interrupts, dma};
21use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
22use embassy_mcxa::pac;
23use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
24
25// Bind DMA channel 0 interrupt using Embassy-style macro
26bind_interrupts!(struct Irqs {
27 DMA_CH0 => DmaCh0InterruptHandler;
28});
29
30const BUFFER_LENGTH: usize = 4;
31
32// Buffers in RAM (static mut is automatically placed in .bss/.data)
33static mut SRC_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
34static mut DEST_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
35static mut MEMSET_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
36
37/// Helper to write a u32 as decimal ASCII to UART
38fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
39 let mut buf = [0u8; 10]; // u32 max is 4294967295 (10 digits)
40 let mut n = val;
41 let mut i = buf.len();
42
43 if n == 0 {
44 tx.blocking_write(b"0").ok();
45 return;
46 }
47
48 while n > 0 {
49 i -= 1;
50 buf[i] = b'0' + (n % 10) as u8;
51 n /= 10;
52 }
53
54 tx.blocking_write(&buf[i..]).ok();
55}
56
57/// Helper to print a buffer as [v1, v2, v3, v4] to UART
58/// Takes a raw pointer to avoid warnings about shared references to mutable statics
59fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const [u32; BUFFER_LENGTH]) {
60 tx.blocking_write(b"[").ok();
61 unsafe {
62 let buf = &*buf_ptr;
63 for (i, val) in buf.iter().enumerate() {
64 write_u32(tx, *val);
65 if i < buf.len() - 1 {
66 tx.blocking_write(b", ").ok();
67 }
68 }
69 }
70 tx.blocking_write(b"]").ok();
71}
72
73#[embassy_executor::main]
74async fn main(_spawner: Spawner) {
75 // Small delay to allow probe-rs to attach after reset
76 for _ in 0..100_000 {
77 cortex_m::asm::nop();
78 }
79
80 let mut cfg = hal::config::Config::default();
81 cfg.clock_cfg.sirc.fro_12m_enabled = true;
82 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
83 let p = hal::init(cfg);
84
85 defmt::info!("DMA memory-to-memory example starting...");
86
87 // Enable DMA0 clock and release reset
88 unsafe {
89 hal::peripherals::DMA0::enable_clock();
90 hal::peripherals::DMA0::release_reset();
91 }
92
93 // Get PAC peripherals for DMA init
94 let pac_periphs = unsafe { pac::Peripherals::steal() };
95
96 // Initialize DMA
97 unsafe {
98 dma::init(&pac_periphs);
99 }
100
101 // Enable DMA interrupt
102 unsafe {
103 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
104 }
105
106 // Create UART for debug output
107 let config = Config {
108 baudrate_bps: 115_200,
109 enable_tx: true,
110 enable_rx: false,
111 ..Default::default()
112 };
113
114 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
115 let (mut tx, _rx) = lpuart.split();
116
117 tx.blocking_write(b"EDMA memory to memory example begin.\r\n\r\n")
118 .unwrap();
119
120 // Initialize buffers
121 unsafe {
122 SRC_BUFFER = [1, 2, 3, 4];
123 DEST_BUFFER = [0; BUFFER_LENGTH];
124 }
125
126 tx.blocking_write(b"Source Buffer: ").unwrap();
127 print_buffer(&mut tx, &raw const SRC_BUFFER);
128 tx.blocking_write(b"\r\n").unwrap();
129
130 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
131 print_buffer(&mut tx, &raw const DEST_BUFFER);
132 tx.blocking_write(b"\r\n").unwrap();
133
134 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
135 .unwrap();
136
137 // Create DMA channel
138 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
139
140 // Configure transfer options (Embassy-style)
141 // TransferOptions defaults to: complete_transfer_interrupt = true
142 let options = TransferOptions::default();
143
144 // =========================================================================
145 // Part 1: Embassy-style async API demonstration (mem_to_mem)
146 // =========================================================================
147 //
148 // Use the new type-safe `mem_to_mem<u32>()` method:
149 // - Automatically determines transfer width from buffer element type (u32)
150 // - Returns a `Transfer` future that can be `.await`ed
151 // - Uses TransferOptions for consistent configuration
152 //
153 // Using async `.await` - the executor can run other tasks while waiting!
154
155 // Perform type-safe memory-to-memory transfer using Embassy-style async API
156 unsafe {
157 let src = &*core::ptr::addr_of!(SRC_BUFFER);
158 let dst = &mut *core::ptr::addr_of_mut!(DEST_BUFFER);
159
160 // Using async `.await` - the executor can run other tasks while waiting!
161 let transfer = dma_ch0.mem_to_mem(src, dst, options);
162 transfer.await;
163 }
164
165 tx.blocking_write(b"DMA mem-to-mem transfer complete!\r\n\r\n")
166 .unwrap();
167 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
168 print_buffer(&mut tx, &raw const DEST_BUFFER);
169 tx.blocking_write(b"\r\n").unwrap();
170
171 // Verify data
172 let mut mismatch = false;
173 unsafe {
174 for i in 0..BUFFER_LENGTH {
175 if SRC_BUFFER[i] != DEST_BUFFER[i] {
176 mismatch = true;
177 break;
178 }
179 }
180 }
181
182 if mismatch {
183 tx.blocking_write(b"FAIL: mem_to_mem mismatch!\r\n").unwrap();
184 defmt::error!("FAIL: mem_to_mem mismatch!");
185 } else {
186 tx.blocking_write(b"PASS: mem_to_mem verified.\r\n\r\n").unwrap();
187 defmt::info!("PASS: mem_to_mem verified.");
188 }
189
190 // =========================================================================
191 // Part 2: memset() demonstration
192 // =========================================================================
193 //
194 // The `memset()` method fills a buffer with a pattern value:
195 // - Fixed source address (pattern is read repeatedly)
196 // - Incrementing destination address
197 // - Uses the same Transfer future pattern
198
199 tx.blocking_write(b"--- Demonstrating memset() feature ---\r\n\r\n").unwrap();
200
201 tx.blocking_write(b"Memset Buffer (before): ").unwrap();
202 print_buffer(&mut tx, &raw const MEMSET_BUFFER);
203 tx.blocking_write(b"\r\n").unwrap();
204
205 // Fill buffer with a pattern value using DMA memset
206 let pattern: u32 = 0xDEADBEEF;
207 tx.blocking_write(b"Filling with pattern 0xDEADBEEF...\r\n").unwrap();
208
209 unsafe {
210 let dst = &mut *core::ptr::addr_of_mut!(MEMSET_BUFFER);
211
212 // Using blocking_wait() for demonstration - also shows non-async usage
213 let transfer = dma_ch0.memset(&pattern, dst, options);
214 transfer.blocking_wait();
215 }
216
217 tx.blocking_write(b"DMA memset complete!\r\n\r\n").unwrap();
218 tx.blocking_write(b"Memset Buffer (after): ").unwrap();
219 print_buffer(&mut tx, &raw const MEMSET_BUFFER);
220 tx.blocking_write(b"\r\n").unwrap();
221
222 // Verify memset result
223 let mut memset_ok = true;
224 unsafe {
225 #[allow(clippy::needless_range_loop)]
226 for i in 0..BUFFER_LENGTH {
227 if MEMSET_BUFFER[i] != pattern {
228 memset_ok = false;
229 break;
230 }
231 }
232 }
233
234 if !memset_ok {
235 tx.blocking_write(b"FAIL: memset mismatch!\r\n").unwrap();
236 defmt::error!("FAIL: memset mismatch!");
237 } else {
238 tx.blocking_write(b"PASS: memset verified.\r\n\r\n").unwrap();
239 defmt::info!("PASS: memset verified.");
240 }
241
242 tx.blocking_write(b"=== All DMA tests complete ===\r\n").unwrap();
243
244 loop {
245 cortex_m::asm::wfe();
246 }
247}
248
diff --git a/examples/src/bin/dma_memset.rs b/examples/src/bin/dma_memset.rs
new file mode 100644
index 000000000..b76ba988d
--- /dev/null
+++ b/examples/src/bin/dma_memset.rs
@@ -0,0 +1,232 @@
1//! DMA memset example for MCXA276.
2//!
3//! This example demonstrates using DMA to fill a buffer with a repeated pattern.
4//! The source address stays fixed while the destination increments.
5//!
6//! # Embassy-style features demonstrated:
7//! - `dma::edma_tcd()` accessor for simplified register access
8//! - `DmaChannel::is_done()` and `clear_done()` helper methods
9//! - No need to pass register block around
10
11#![no_std]
12#![no_main]
13
14use embassy_executor::Spawner;
15use embassy_mcxa::clocks::config::Div8;
16use embassy_mcxa::clocks::Gate;
17use embassy_mcxa::dma::{edma_tcd, DmaChannel, DmaCh0InterruptHandler};
18use embassy_mcxa::{bind_interrupts, dma};
19use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
20use embassy_mcxa::pac;
21use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
22
23// Bind DMA channel 0 interrupt using Embassy-style macro
24bind_interrupts!(struct Irqs {
25 DMA_CH0 => DmaCh0InterruptHandler;
26});
27
28const BUFFER_LENGTH: usize = 4;
29
30// Buffers in RAM
31static mut PATTERN: u32 = 0;
32static mut DEST_BUFFER: [u32; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
33
34/// Helper to write a u32 as decimal ASCII to UART
35fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
36 let mut buf = [0u8; 10];
37 let mut n = val;
38 let mut i = buf.len();
39
40 if n == 0 {
41 tx.blocking_write(b"0").ok();
42 return;
43 }
44
45 while n > 0 {
46 i -= 1;
47 buf[i] = b'0' + (n % 10) as u8;
48 n /= 10;
49 }
50
51 tx.blocking_write(&buf[i..]).ok();
52}
53
54/// Helper to print a buffer to UART
55fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
56 tx.blocking_write(b"[").ok();
57 unsafe {
58 for i in 0..len {
59 write_u32(tx, *buf_ptr.add(i));
60 if i < len - 1 {
61 tx.blocking_write(b", ").ok();
62 }
63 }
64 }
65 tx.blocking_write(b"]").ok();
66}
67
68#[embassy_executor::main]
69async fn main(_spawner: Spawner) {
70 // Small delay to allow probe-rs to attach after reset
71 for _ in 0..100_000 {
72 cortex_m::asm::nop();
73 }
74
75 let mut cfg = hal::config::Config::default();
76 cfg.clock_cfg.sirc.fro_12m_enabled = true;
77 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
78 let p = hal::init(cfg);
79
80 defmt::info!("DMA memset example starting...");
81
82 // Enable DMA0 clock and release reset
83 unsafe {
84 hal::peripherals::DMA0::enable_clock();
85 hal::peripherals::DMA0::release_reset();
86 }
87
88 let pac_periphs = unsafe { pac::Peripherals::steal() };
89
90 unsafe {
91 dma::init(&pac_periphs);
92 }
93
94 // Enable DMA interrupt
95 unsafe {
96 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
97 }
98
99 let config = Config {
100 baudrate_bps: 115_200,
101 enable_tx: true,
102 enable_rx: false,
103 ..Default::default()
104 };
105
106 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
107 let (mut tx, _rx) = lpuart.split();
108
109 tx.blocking_write(b"EDMA memset example begin.\r\n\r\n")
110 .unwrap();
111
112 // Initialize buffers
113 unsafe {
114 PATTERN = 0xDEADBEEF;
115 DEST_BUFFER = [0; BUFFER_LENGTH];
116 }
117
118 tx.blocking_write(b"Pattern value: 0x").unwrap();
119 // Print pattern in hex
120 unsafe {
121 let hex_chars = b"0123456789ABCDEF";
122 let mut hex_buf = [0u8; 8];
123 let mut val = PATTERN;
124 for i in (0..8).rev() {
125 hex_buf[i] = hex_chars[(val & 0xF) as usize];
126 val >>= 4;
127 }
128 tx.blocking_write(&hex_buf).ok();
129 }
130 tx.blocking_write(b"\r\n").unwrap();
131
132 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
133 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER) as *const u32, BUFFER_LENGTH);
134 tx.blocking_write(b"\r\n").unwrap();
135
136 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
137 .unwrap();
138
139 // Create DMA channel using Embassy-style API
140 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
141
142 // Use edma_tcd() accessor instead of passing register block around
143 let edma = edma_tcd();
144
145 // Configure memset transfer using direct TCD access:
146 // Source stays fixed (soff = 0, reads same pattern repeatedly)
147 // Destination increments (doff = 4)
148 unsafe {
149 let t = edma.tcd(0);
150
151 // Reset channel state
152 t.ch_csr().write(|w| {
153 w.erq().disable()
154 .earq().disable()
155 .eei().no_error()
156 .ebw().disable()
157 .done().clear_bit_by_one()
158 });
159 t.ch_es().write(|w| w.bits(0));
160 t.ch_int().write(|w| w.int().clear_bit_by_one());
161
162 // Source address (pattern) - fixed
163 t.tcd_saddr().write(|w| w.saddr().bits(core::ptr::addr_of_mut!(PATTERN) as u32));
164 // Destination address - increments
165 t.tcd_daddr().write(|w| w.daddr().bits(core::ptr::addr_of_mut!(DEST_BUFFER) as u32));
166
167 // Source offset = 0 (stays fixed), Dest offset = 4 (increments)
168 t.tcd_soff().write(|w| w.soff().bits(0));
169 t.tcd_doff().write(|w| w.doff().bits(4));
170
171 // Attributes: 32-bit transfers (size = 2)
172 t.tcd_attr().write(|w| w.ssize().bits(2).dsize().bits(2));
173
174 // Transfer entire buffer in one minor loop
175 let nbytes = (BUFFER_LENGTH * 4) as u32;
176 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
177
178 // Source doesn't need adjustment (stays fixed)
179 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
180 // Reset dest address after major loop
181 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(-(nbytes as i32) as u32));
182
183 // Major loop count = 1
184 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
185 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
186
187 // Enable interrupt on major loop completion
188 t.tcd_csr().write(|w| w.intmajor().set_bit());
189
190 cortex_m::asm::dsb();
191
192 tx.blocking_write(b"Triggering transfer...\r\n").unwrap();
193 dma_ch0.trigger_start(edma);
194 }
195
196 // Wait for completion using channel helper method
197 while !dma_ch0.is_done(edma) {
198 cortex_m::asm::nop();
199 }
200 unsafe { dma_ch0.clear_done(edma); }
201
202 tx.blocking_write(b"\r\nEDMA memset example finish.\r\n\r\n")
203 .unwrap();
204 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
205 print_buffer(&mut tx, core::ptr::addr_of!(DEST_BUFFER) as *const u32, BUFFER_LENGTH);
206 tx.blocking_write(b"\r\n\r\n").unwrap();
207
208 // Verify: All elements should equal PATTERN
209 let mut mismatch = false;
210 unsafe {
211 #[allow(clippy::needless_range_loop)]
212 for i in 0..BUFFER_LENGTH {
213 if DEST_BUFFER[i] != PATTERN {
214 mismatch = true;
215 break;
216 }
217 }
218 }
219
220 if mismatch {
221 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
222 defmt::error!("FAIL: Mismatch detected!");
223 } else {
224 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
225 defmt::info!("PASS: Data verified.");
226 }
227
228 loop {
229 cortex_m::asm::wfe();
230 }
231}
232
diff --git a/examples/src/bin/dma_ping_pong_transfer.rs b/examples/src/bin/dma_ping_pong_transfer.rs
new file mode 100644
index 000000000..13ad9782d
--- /dev/null
+++ b/examples/src/bin/dma_ping_pong_transfer.rs
@@ -0,0 +1,384 @@
1//! DMA ping-pong/double-buffer transfer example for MCXA276.
2//!
3//! This example demonstrates two approaches for ping-pong/double-buffering:
4//!
5//! ## Approach 1: Scatter/Gather with linked TCDs (manual)
6//! - Two TCDs link to each other for alternating transfers
7//! - Uses custom interrupt handler with AtomicBool flag
8//!
9//! ## Approach 2: Half-transfer interrupt with wait_half() (NEW!)
10//! - Single continuous transfer over entire buffer
11//! - Uses half-transfer interrupt to know when first half is ready
12//! - Application can process first half while second half is being filled
13//!
14//! # Embassy-style features demonstrated:
15//! - `dma::edma_tcd()` accessor for simplified register access
16//! - `DmaChannel::new()` for channel creation
17//! - Scatter/gather with linked TCDs
18//! - NEW: `wait_half()` for half-transfer interrupt handling
19
20#![no_std]
21#![no_main]
22
23use core::sync::atomic::{AtomicBool, Ordering};
24use embassy_executor::Spawner;
25use embassy_mcxa::clocks::config::Div8;
26use embassy_mcxa::clocks::Gate;
27use embassy_mcxa::dma::{edma_tcd, DmaChannel, DmaCh1InterruptHandler, Tcd, TransferOptions};
28use embassy_mcxa::{bind_interrupts, dma};
29use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
30use embassy_mcxa::pac;
31use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
32
33// Source and destination buffers for Approach 1 (scatter/gather)
34static mut SRC: [u32; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
35static mut DST: [u32; 8] = [0; 8];
36
37// Source and destination buffers for Approach 2 (wait_half)
38static mut SRC2: [u32; 8] = [0xA1, 0xA2, 0xA3, 0xA4, 0xB1, 0xB2, 0xB3, 0xB4];
39static mut DST2: [u32; 8] = [0; 8];
40
41// TCD pool for scatter/gather - must be 32-byte aligned
42#[repr(C, align(32))]
43struct TcdPool([Tcd; 2]);
44
45static mut TCD_POOL: TcdPool = TcdPool([Tcd {
46 saddr: 0,
47 soff: 0,
48 attr: 0,
49 nbytes: 0,
50 slast: 0,
51 daddr: 0,
52 doff: 0,
53 citer: 0,
54 dlast_sga: 0,
55 csr: 0,
56 biter: 0,
57}; 2]);
58
59static TRANSFER_DONE: AtomicBool = AtomicBool::new(false);
60
61// Custom DMA interrupt handler for ping-pong transfer
62// We need a custom handler because we signal completion via TRANSFER_DONE flag
63// and don't clear DONE bit when using Scatter/Gather (ESG=1)
64pub struct PingPongDmaHandler;
65
66impl embassy_mcxa::interrupt::typelevel::Handler<embassy_mcxa::interrupt::typelevel::DMA_CH0> for PingPongDmaHandler {
67 unsafe fn on_interrupt() {
68 let edma = edma_tcd();
69
70 // Clear interrupt flag
71 edma.tcd(0).ch_int().write(|w| w.int().clear_bit_by_one());
72
73 // Do NOT clear DONE bit when using Scatter/Gather (ESG=1),
74 // as the hardware loads the next TCD which resets the status.
75
76 TRANSFER_DONE.store(true, Ordering::Release);
77 }
78}
79
80bind_interrupts!(struct Irqs {
81 DMA_CH0 => PingPongDmaHandler;
82 DMA_CH1 => DmaCh1InterruptHandler; // For wait_half() demo
83});
84
85/// Helper to write a u32 as decimal ASCII to UART
86fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
87 let mut buf = [0u8; 10];
88 let mut n = val;
89 let mut i = buf.len();
90
91 if n == 0 {
92 tx.blocking_write(b"0").ok();
93 return;
94 }
95
96 while n > 0 {
97 i -= 1;
98 buf[i] = b'0' + (n % 10) as u8;
99 n /= 10;
100 }
101
102 tx.blocking_write(&buf[i..]).ok();
103}
104
105/// Helper to print a buffer to UART
106fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
107 tx.blocking_write(b"[").ok();
108 unsafe {
109 for i in 0..len {
110 write_u32(tx, *buf_ptr.add(i));
111 if i < len - 1 {
112 tx.blocking_write(b", ").ok();
113 }
114 }
115 }
116 tx.blocking_write(b"]").ok();
117}
118
119#[embassy_executor::main]
120async fn main(_spawner: Spawner) {
121 // Small delay to allow probe-rs to attach after reset
122 for _ in 0..100_000 {
123 cortex_m::asm::nop();
124 }
125
126 let mut cfg = hal::config::Config::default();
127 cfg.clock_cfg.sirc.fro_12m_enabled = true;
128 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
129 let p = hal::init(cfg);
130
131 defmt::info!("DMA ping-pong transfer example starting...");
132
133 // Enable DMA0 clock and release reset
134 unsafe {
135 hal::peripherals::DMA0::enable_clock();
136 hal::peripherals::DMA0::release_reset();
137 }
138
139 let pac_periphs = unsafe { pac::Peripherals::steal() };
140
141 unsafe {
142 dma::init(&pac_periphs);
143 }
144
145 // Use edma_tcd() accessor instead of passing register block around
146 let edma = edma_tcd();
147
148 // Enable DMA interrupt
149 unsafe {
150 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
151 }
152
153 let config = Config {
154 baudrate_bps: 115_200,
155 enable_tx: true,
156 enable_rx: false,
157 ..Default::default()
158 };
159
160 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
161 let (mut tx, _rx) = lpuart.split();
162
163 tx.blocking_write(b"EDMA ping-pong transfer example begin.\r\n\r\n")
164 .unwrap();
165
166 // Initialize buffers
167 unsafe {
168 SRC = [1, 2, 3, 4, 5, 6, 7, 8];
169 DST = [0; 8];
170 }
171
172 tx.blocking_write(b"Source Buffer: ").unwrap();
173 print_buffer(&mut tx, core::ptr::addr_of!(SRC) as *const u32, 8);
174 tx.blocking_write(b"\r\n").unwrap();
175
176 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
177 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
178 tx.blocking_write(b"\r\n").unwrap();
179
180 tx.blocking_write(b"Configuring ping-pong DMA with Embassy-style API...\r\n")
181 .unwrap();
182
183 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
184
185 // Configure ping-pong transfer using direct TCD access:
186 // This sets up TCD0 and TCD1 in RAM, and loads TCD0 into the channel.
187 // TCD0 transfers first half (SRC[0..4] -> DST[0..4]), links to TCD1.
188 // TCD1 transfers second half (SRC[4..8] -> DST[4..8]), links to TCD0.
189 unsafe {
190 let tcds = &mut *core::ptr::addr_of_mut!(TCD_POOL.0);
191 let src_ptr = core::ptr::addr_of!(SRC) as *const u32;
192 let dst_ptr = core::ptr::addr_of_mut!(DST) as *mut u32;
193
194 let half_len = 4usize;
195 let half_bytes = (half_len * 4) as u32;
196
197 let tcd0_addr = &tcds[0] as *const _ as u32;
198 let tcd1_addr = &tcds[1] as *const _ as u32;
199
200 // TCD0: First half -> Links to TCD1
201 tcds[0] = Tcd {
202 saddr: src_ptr as u32,
203 soff: 4,
204 attr: 0x0202, // 32-bit src/dst
205 nbytes: half_bytes,
206 slast: 0,
207 daddr: dst_ptr as u32,
208 doff: 4,
209 citer: 1,
210 dlast_sga: tcd1_addr as i32,
211 csr: 0x0012, // ESG | INTMAJOR
212 biter: 1,
213 };
214
215 // TCD1: Second half -> Links to TCD0
216 tcds[1] = Tcd {
217 saddr: src_ptr.add(half_len) as u32,
218 soff: 4,
219 attr: 0x0202,
220 nbytes: half_bytes,
221 slast: 0,
222 daddr: dst_ptr.add(half_len) as u32,
223 doff: 4,
224 citer: 1,
225 dlast_sga: tcd0_addr as i32,
226 csr: 0x0012,
227 biter: 1,
228 };
229
230 // Load TCD0 into hardware registers
231 dma_ch0.load_tcd(edma, &tcds[0]);
232 }
233
234 tx.blocking_write(b"Triggering first half transfer...\r\n").unwrap();
235
236 // Trigger first transfer (first half: SRC[0..4] -> DST[0..4])
237 unsafe {
238 dma_ch0.trigger_start(edma);
239 }
240
241 // Wait for first half
242 while !TRANSFER_DONE.load(Ordering::Acquire) {
243 cortex_m::asm::nop();
244 }
245 TRANSFER_DONE.store(false, Ordering::Release);
246
247 tx.blocking_write(b"First half transferred.\r\n").unwrap();
248 tx.blocking_write(b"Triggering second half transfer...\r\n").unwrap();
249
250 // Trigger second transfer (second half: SRC[4..8] -> DST[4..8])
251 unsafe {
252 dma_ch0.trigger_start(edma);
253 }
254
255 // Wait for second half
256 while !TRANSFER_DONE.load(Ordering::Acquire) {
257 cortex_m::asm::nop();
258 }
259 TRANSFER_DONE.store(false, Ordering::Release);
260
261 tx.blocking_write(b"Second half transferred.\r\n\r\n").unwrap();
262
263 tx.blocking_write(b"EDMA ping-pong transfer example finish.\r\n\r\n")
264 .unwrap();
265 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
266 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
267 tx.blocking_write(b"\r\n\r\n").unwrap();
268
269 // Verify: DST should match SRC
270 let mut mismatch = false;
271 unsafe {
272 let src_ptr = core::ptr::addr_of!(SRC) as *const u32;
273 let dst_ptr = core::ptr::addr_of!(DST) as *const u32;
274 for i in 0..8 {
275 if *src_ptr.add(i) != *dst_ptr.add(i) {
276 mismatch = true;
277 break;
278 }
279 }
280 }
281
282 if mismatch {
283 tx.blocking_write(b"FAIL: Approach 1 mismatch detected!\r\n").unwrap();
284 defmt::error!("FAIL: Approach 1 mismatch detected!");
285 } else {
286 tx.blocking_write(b"PASS: Approach 1 data verified.\r\n\r\n").unwrap();
287 defmt::info!("PASS: Approach 1 data verified.");
288 }
289
290 // =========================================================================
291 // Approach 2: Half-Transfer Interrupt with wait_half() (NEW!)
292 // =========================================================================
293 //
294 // This approach uses a single continuous DMA transfer with half-transfer
295 // interrupt enabled. The wait_half() method allows you to be notified
296 // when the first half of the buffer is complete, so you can process it
297 // while the second half is still being filled.
298 //
299 // Benefits:
300 // - Simpler setup (no TCD pool needed)
301 // - True async/await support
302 // - Good for streaming data processing
303
304 tx.blocking_write(b"--- Approach 2: wait_half() demo ---\r\n\r\n").unwrap();
305
306 // Enable DMA CH1 interrupt
307 unsafe {
308 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH1);
309 }
310
311 // Initialize approach 2 buffers
312 unsafe {
313 SRC2 = [0xA1, 0xA2, 0xA3, 0xA4, 0xB1, 0xB2, 0xB3, 0xB4];
314 DST2 = [0; 8];
315 }
316
317 tx.blocking_write(b"SRC2: ").unwrap();
318 print_buffer(&mut tx, core::ptr::addr_of!(SRC2) as *const u32, 8);
319 tx.blocking_write(b"\r\n").unwrap();
320
321 let dma_ch1 = DmaChannel::new(p.DMA_CH1);
322
323 // Configure transfer with half-transfer interrupt enabled
324 let mut options = TransferOptions::default();
325 options.half_transfer_interrupt = true; // Enable half-transfer interrupt
326 options.complete_transfer_interrupt = true;
327
328 tx.blocking_write(b"Starting transfer with half_transfer_interrupt...\r\n").unwrap();
329
330 unsafe {
331 let src = &*core::ptr::addr_of!(SRC2);
332 let dst = &mut *core::ptr::addr_of_mut!(DST2);
333
334 // Create the transfer
335 let mut transfer = dma_ch1.mem_to_mem(src, dst, options);
336
337 // Wait for half-transfer (first 4 elements)
338 tx.blocking_write(b"Waiting for first half...\r\n").unwrap();
339 let half_ok = transfer.wait_half().await;
340
341 if half_ok {
342 tx.blocking_write(b"Half-transfer complete! First half of DST2: ").unwrap();
343 print_buffer(&mut tx, core::ptr::addr_of!(DST2) as *const u32, 4);
344 tx.blocking_write(b"\r\n").unwrap();
345 tx.blocking_write(b"(Processing first half while second half transfers...)\r\n").unwrap();
346 }
347
348 // Wait for complete transfer
349 tx.blocking_write(b"Waiting for second half...\r\n").unwrap();
350 transfer.await;
351 }
352
353 tx.blocking_write(b"Transfer complete! Full DST2: ").unwrap();
354 print_buffer(&mut tx, core::ptr::addr_of!(DST2) as *const u32, 8);
355 tx.blocking_write(b"\r\n\r\n").unwrap();
356
357 // Verify approach 2
358 let mut mismatch2 = false;
359 unsafe {
360 let src_ptr = core::ptr::addr_of!(SRC2) as *const u32;
361 let dst_ptr = core::ptr::addr_of!(DST2) as *const u32;
362 for i in 0..8 {
363 if *src_ptr.add(i) != *dst_ptr.add(i) {
364 mismatch2 = true;
365 break;
366 }
367 }
368 }
369
370 if mismatch2 {
371 tx.blocking_write(b"FAIL: Approach 2 mismatch!\r\n").unwrap();
372 defmt::error!("FAIL: Approach 2 mismatch!");
373 } else {
374 tx.blocking_write(b"PASS: Approach 2 verified.\r\n").unwrap();
375 defmt::info!("PASS: Approach 2 verified.");
376 }
377
378 tx.blocking_write(b"\r\n=== All ping-pong demos complete ===\r\n").unwrap();
379
380 loop {
381 cortex_m::asm::wfe();
382 }
383}
384
diff --git a/examples/src/bin/dma_scatter_gather.rs b/examples/src/bin/dma_scatter_gather.rs
new file mode 100644
index 000000000..86dd881cd
--- /dev/null
+++ b/examples/src/bin/dma_scatter_gather.rs
@@ -0,0 +1,281 @@
1//! DMA scatter-gather transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA with scatter/gather to chain multiple
4//! transfer descriptors. The first TCD transfers the first half of the buffer,
5//! then automatically loads the second TCD to transfer the second half.
6//!
7//! # Embassy-style features demonstrated:
8//! - `dma::edma_tcd()` accessor for simplified register access
9//! - `DmaChannel::new()` for channel creation
10//! - Scatter/gather with chained TCDs
11
12#![no_std]
13#![no_main]
14
15use core::sync::atomic::{AtomicBool, Ordering};
16use embassy_executor::Spawner;
17use embassy_mcxa::clocks::config::Div8;
18use embassy_mcxa::clocks::Gate;
19use embassy_mcxa::dma::{edma_tcd, DmaChannel, Tcd};
20use embassy_mcxa::{bind_interrupts, dma};
21use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
22use embassy_mcxa::pac;
23use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
24
25// Source and destination buffers
26static mut SRC: [u32; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
27static mut DST: [u32; 8] = [0; 8];
28
29// TCD pool for scatter/gather - must be 32-byte aligned
30#[repr(C, align(32))]
31struct TcdPool([Tcd; 2]);
32
33static mut TCD_POOL: TcdPool = TcdPool([Tcd {
34 saddr: 0,
35 soff: 0,
36 attr: 0,
37 nbytes: 0,
38 slast: 0,
39 daddr: 0,
40 doff: 0,
41 citer: 0,
42 dlast_sga: 0,
43 csr: 0,
44 biter: 0,
45}; 2]);
46
47static TRANSFER_DONE: AtomicBool = AtomicBool::new(false);
48
49// Custom DMA interrupt handler for scatter-gather transfer
50// We need a custom handler because we signal completion via TRANSFER_DONE flag
51// and need to conditionally clear DONE bit based on ESG status
52pub struct ScatterGatherDmaHandler;
53
54impl embassy_mcxa::interrupt::typelevel::Handler<embassy_mcxa::interrupt::typelevel::DMA_CH0> for ScatterGatherDmaHandler {
55 unsafe fn on_interrupt() {
56 let edma = edma_tcd();
57
58 // Clear interrupt flag
59 edma.tcd(0).ch_int().write(|w| w.int().clear_bit_by_one());
60
61 // If ESG=1 (Scatter/Gather), the hardware loads the next TCD and clears DONE.
62 // If ESG=0 (Last TCD), DONE remains set and must be cleared.
63 if edma.tcd(0).ch_csr().read().done().bit_is_set() {
64 edma.tcd(0).ch_csr().write(|w| w.done().clear_bit_by_one());
65 }
66
67 TRANSFER_DONE.store(true, Ordering::Release);
68 }
69}
70
71bind_interrupts!(struct Irqs {
72 DMA_CH0 => ScatterGatherDmaHandler;
73});
74
75/// Helper to write a u32 as decimal ASCII to UART
76fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
77 let mut buf = [0u8; 10];
78 let mut n = val;
79 let mut i = buf.len();
80
81 if n == 0 {
82 tx.blocking_write(b"0").ok();
83 return;
84 }
85
86 while n > 0 {
87 i -= 1;
88 buf[i] = b'0' + (n % 10) as u8;
89 n /= 10;
90 }
91
92 tx.blocking_write(&buf[i..]).ok();
93}
94
95/// Helper to print a buffer to UART
96fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
97 tx.blocking_write(b"[").ok();
98 unsafe {
99 for i in 0..len {
100 write_u32(tx, *buf_ptr.add(i));
101 if i < len - 1 {
102 tx.blocking_write(b", ").ok();
103 }
104 }
105 }
106 tx.blocking_write(b"]").ok();
107}
108
109#[embassy_executor::main]
110async fn main(_spawner: Spawner) {
111 // Small delay to allow probe-rs to attach after reset
112 for _ in 0..100_000 {
113 cortex_m::asm::nop();
114 }
115
116 let mut cfg = hal::config::Config::default();
117 cfg.clock_cfg.sirc.fro_12m_enabled = true;
118 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
119 let p = hal::init(cfg);
120
121 defmt::info!("DMA scatter-gather transfer example starting...");
122
123 // Enable DMA0 clock and release reset
124 unsafe {
125 hal::peripherals::DMA0::enable_clock();
126 hal::peripherals::DMA0::release_reset();
127 }
128
129 let pac_periphs = unsafe { pac::Peripherals::steal() };
130
131 unsafe {
132 dma::init(&pac_periphs);
133 }
134
135 // Use edma_tcd() accessor instead of passing register block around
136 let edma = edma_tcd();
137
138 // Enable DMA interrupt
139 unsafe {
140 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
141 }
142
143 let config = Config {
144 baudrate_bps: 115_200,
145 enable_tx: true,
146 enable_rx: false,
147 ..Default::default()
148 };
149
150 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
151 let (mut tx, _rx) = lpuart.split();
152
153 tx.blocking_write(b"EDMA scatter-gather transfer example begin.\r\n\r\n")
154 .unwrap();
155
156 // Initialize buffers
157 unsafe {
158 SRC = [1, 2, 3, 4, 5, 6, 7, 8];
159 DST = [0; 8];
160 }
161
162 tx.blocking_write(b"Source Buffer: ").unwrap();
163 print_buffer(&mut tx, core::ptr::addr_of!(SRC) as *const u32, 8);
164 tx.blocking_write(b"\r\n").unwrap();
165
166 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
167 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
168 tx.blocking_write(b"\r\n").unwrap();
169
170 tx.blocking_write(b"Configuring scatter-gather DMA with Embassy-style API...\r\n")
171 .unwrap();
172
173 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
174
175 // Configure scatter-gather transfer using direct TCD access:
176 // This sets up TCD0 and TCD1 in RAM, and loads TCD0 into the channel.
177 // TCD0 transfers first half (SRC[0..4] -> DST[0..4]), then loads TCD1.
178 // TCD1 transfers second half (SRC[4..8] -> DST[4..8]), last TCD.
179 unsafe {
180 let tcds = core::slice::from_raw_parts_mut(
181 core::ptr::addr_of_mut!(TCD_POOL.0) as *mut Tcd,
182 2,
183 );
184 let src_ptr = core::ptr::addr_of!(SRC) as *const u32;
185 let dst_ptr = core::ptr::addr_of_mut!(DST) as *mut u32;
186
187 let num_tcds = 2usize;
188 let chunk_len = 4usize; // 8 / 2
189 let chunk_bytes = (chunk_len * 4) as u32;
190
191 for i in 0..num_tcds {
192 let is_last = i == num_tcds - 1;
193 let next_tcd_addr = if is_last {
194 0 // No next TCD
195 } else {
196 &tcds[i + 1] as *const _ as u32
197 };
198
199 tcds[i] = Tcd {
200 saddr: src_ptr.add(i * chunk_len) as u32,
201 soff: 4,
202 attr: 0x0202, // 32-bit src/dst
203 nbytes: chunk_bytes,
204 slast: 0,
205 daddr: dst_ptr.add(i * chunk_len) as u32,
206 doff: 4,
207 citer: 1,
208 dlast_sga: next_tcd_addr as i32,
209 // ESG (scatter/gather) for non-last, INTMAJOR for all
210 csr: if is_last { 0x0002 } else { 0x0012 },
211 biter: 1,
212 };
213 }
214
215 // Load TCD0 into hardware registers
216 dma_ch0.load_tcd(edma, &tcds[0]);
217 }
218
219 tx.blocking_write(b"Triggering first half transfer...\r\n").unwrap();
220
221 // Trigger first transfer (first half: SRC[0..4] -> DST[0..4])
222 // TCD0 is currently loaded.
223 unsafe {
224 dma_ch0.trigger_start(edma);
225 }
226
227 // Wait for first half
228 while !TRANSFER_DONE.load(Ordering::Acquire) {
229 cortex_m::asm::nop();
230 }
231 TRANSFER_DONE.store(false, Ordering::Release);
232
233 tx.blocking_write(b"First half transferred.\r\n").unwrap();
234 tx.blocking_write(b"Triggering second half transfer...\r\n").unwrap();
235
236 // Trigger second transfer (second half: SRC[4..8] -> DST[4..8])
237 // TCD1 should have been loaded by the scatter/gather engine.
238 unsafe {
239 dma_ch0.trigger_start(edma);
240 }
241
242 // Wait for second half
243 while !TRANSFER_DONE.load(Ordering::Acquire) {
244 cortex_m::asm::nop();
245 }
246 TRANSFER_DONE.store(false, Ordering::Release);
247
248 tx.blocking_write(b"Second half transferred.\r\n\r\n").unwrap();
249
250 tx.blocking_write(b"EDMA scatter-gather transfer example finish.\r\n\r\n")
251 .unwrap();
252 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
253 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
254 tx.blocking_write(b"\r\n\r\n").unwrap();
255
256 // Verify: DST should match SRC
257 let mut mismatch = false;
258 unsafe {
259 let src_ptr = core::ptr::addr_of!(SRC) as *const u32;
260 let dst_ptr = core::ptr::addr_of!(DST) as *const u32;
261 for i in 0..8 {
262 if *src_ptr.add(i) != *dst_ptr.add(i) {
263 mismatch = true;
264 break;
265 }
266 }
267 }
268
269 if mismatch {
270 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
271 defmt::error!("FAIL: Mismatch detected!");
272 } else {
273 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
274 defmt::info!("PASS: Data verified.");
275 }
276
277 loop {
278 cortex_m::asm::wfe();
279 }
280}
281
diff --git a/examples/src/bin/dma_scatter_gather_builder.rs b/examples/src/bin/dma_scatter_gather_builder.rs
new file mode 100644
index 000000000..078e26c60
--- /dev/null
+++ b/examples/src/bin/dma_scatter_gather_builder.rs
@@ -0,0 +1,244 @@
1//! DMA Scatter-Gather Builder example for MCXA276.
2//!
3//! This example demonstrates using the new `ScatterGatherBuilder` API for
4//! chaining multiple DMA transfers with a type-safe builder pattern.
5//!
6//! # Features demonstrated:
7//! - `ScatterGatherBuilder::new()` for creating a builder
8//! - `add_transfer()` for adding memory-to-memory segments
9//! - `build()` to start the chained transfer
10//! - Automatic TCD linking and ESG bit management
11//!
12//! # Comparison with manual scatter-gather:
13//! The manual approach (see `dma_scatter_gather.rs`) requires:
14//! - Manual TCD pool allocation and alignment
15//! - Manual CSR/ESG/INTMAJOR bit manipulation
16//! - Manual dlast_sga address calculations
17//!
18//! The builder approach handles all of this automatically!
19
20#![no_std]
21#![no_main]
22
23use embassy_executor::Spawner;
24use embassy_mcxa::clocks::config::Div8;
25use embassy_mcxa::clocks::Gate;
26use embassy_mcxa::dma::{DmaChannel, DmaCh0InterruptHandler, ScatterGatherBuilder};
27use embassy_mcxa::{bind_interrupts, dma};
28use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
29use embassy_mcxa::pac;
30use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
31
32// Bind DMA channel 0 interrupt
33bind_interrupts!(struct Irqs {
34 DMA_CH0 => DmaCh0InterruptHandler;
35});
36
37// Source buffers (multiple segments)
38static mut SRC1: [u32; 4] = [0x11111111, 0x22222222, 0x33333333, 0x44444444];
39static mut SRC2: [u32; 4] = [0xAAAAAAAA, 0xBBBBBBBB, 0xCCCCCCCC, 0xDDDDDDDD];
40static mut SRC3: [u32; 4] = [0x12345678, 0x9ABCDEF0, 0xFEDCBA98, 0x76543210];
41
42// Destination buffers (one per segment)
43static mut DST1: [u32; 4] = [0; 4];
44static mut DST2: [u32; 4] = [0; 4];
45static mut DST3: [u32; 4] = [0; 4];
46
47/// Helper to write a u32 as hex to UART
48fn write_hex(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
49 const HEX: &[u8; 16] = b"0123456789ABCDEF";
50 for i in (0..8).rev() {
51 let nibble = ((val >> (i * 4)) & 0xF) as usize;
52 tx.blocking_write(&[HEX[nibble]]).ok();
53 }
54}
55
56/// Helper to print a buffer to UART
57fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
58 tx.blocking_write(b"[").ok();
59 unsafe {
60 for i in 0..len {
61 write_hex(tx, *buf_ptr.add(i));
62 if i < len - 1 {
63 tx.blocking_write(b", ").ok();
64 }
65 }
66 }
67 tx.blocking_write(b"]").ok();
68}
69
70#[embassy_executor::main]
71async fn main(_spawner: Spawner) {
72 // Small delay to allow probe-rs to attach after reset
73 for _ in 0..100_000 {
74 cortex_m::asm::nop();
75 }
76
77 let mut cfg = hal::config::Config::default();
78 cfg.clock_cfg.sirc.fro_12m_enabled = true;
79 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
80 let p = hal::init(cfg);
81
82 defmt::info!("DMA Scatter-Gather Builder example starting...");
83
84 // Enable DMA0 clock and release reset
85 unsafe {
86 hal::peripherals::DMA0::enable_clock();
87 hal::peripherals::DMA0::release_reset();
88 }
89
90 let pac_periphs = unsafe { pac::Peripherals::steal() };
91
92 // Initialize DMA
93 unsafe {
94 dma::init(&pac_periphs);
95 }
96
97 // Enable DMA interrupt
98 unsafe {
99 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
100 }
101
102 // Create UART for debug output
103 let config = Config {
104 baudrate_bps: 115_200,
105 enable_tx: true,
106 enable_rx: false,
107 ..Default::default()
108 };
109
110 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
111 let (mut tx, _rx) = lpuart.split();
112
113 tx.blocking_write(b"DMA Scatter-Gather Builder Example\r\n").unwrap();
114 tx.blocking_write(b"===================================\r\n\r\n").unwrap();
115
116 // Show source buffers
117 tx.blocking_write(b"Source buffers:\r\n").unwrap();
118 tx.blocking_write(b" SRC1: ").unwrap();
119 print_buffer(&mut tx, core::ptr::addr_of!(SRC1) as *const u32, 4);
120 tx.blocking_write(b"\r\n").unwrap();
121 tx.blocking_write(b" SRC2: ").unwrap();
122 print_buffer(&mut tx, core::ptr::addr_of!(SRC2) as *const u32, 4);
123 tx.blocking_write(b"\r\n").unwrap();
124 tx.blocking_write(b" SRC3: ").unwrap();
125 print_buffer(&mut tx, core::ptr::addr_of!(SRC3) as *const u32, 4);
126 tx.blocking_write(b"\r\n\r\n").unwrap();
127
128 tx.blocking_write(b"Destination buffers (before):\r\n").unwrap();
129 tx.blocking_write(b" DST1: ").unwrap();
130 print_buffer(&mut tx, core::ptr::addr_of!(DST1) as *const u32, 4);
131 tx.blocking_write(b"\r\n").unwrap();
132 tx.blocking_write(b" DST2: ").unwrap();
133 print_buffer(&mut tx, core::ptr::addr_of!(DST2) as *const u32, 4);
134 tx.blocking_write(b"\r\n").unwrap();
135 tx.blocking_write(b" DST3: ").unwrap();
136 print_buffer(&mut tx, core::ptr::addr_of!(DST3) as *const u32, 4);
137 tx.blocking_write(b"\r\n\r\n").unwrap();
138
139 // Create DMA channel
140 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
141
142 tx.blocking_write(b"Building scatter-gather chain with builder API...\r\n").unwrap();
143
144 // =========================================================================
145 // ScatterGatherBuilder API demonstration
146 // =========================================================================
147 //
148 // The builder pattern makes scatter-gather transfers much easier:
149 // 1. Create a builder
150 // 2. Add transfer segments with add_transfer()
151 // 3. Call build() to start the entire chain
152 // No manual TCD manipulation required!
153
154 let mut builder = ScatterGatherBuilder::<u32>::new();
155
156 // Add three transfer segments - the builder handles TCD linking automatically
157 unsafe {
158 let src1 = &*core::ptr::addr_of!(SRC1);
159 let dst1 = &mut *core::ptr::addr_of_mut!(DST1);
160 builder.add_transfer(src1, dst1);
161 }
162
163 unsafe {
164 let src2 = &*core::ptr::addr_of!(SRC2);
165 let dst2 = &mut *core::ptr::addr_of_mut!(DST2);
166 builder.add_transfer(src2, dst2);
167 }
168
169 unsafe {
170 let src3 = &*core::ptr::addr_of!(SRC3);
171 let dst3 = &mut *core::ptr::addr_of_mut!(DST3);
172 builder.add_transfer(src3, dst3);
173 }
174
175 tx.blocking_write(b"Added 3 transfer segments to chain.\r\n").unwrap();
176 tx.blocking_write(b"Starting scatter-gather transfer with .await...\r\n\r\n").unwrap();
177
178 // Build and execute the scatter-gather chain
179 // The build() method:
180 // - Links all TCDs together with ESG bit
181 // - Sets INTMAJOR on all TCDs
182 // - Loads the first TCD into hardware
183 // - Returns a Transfer future
184 unsafe {
185 let transfer = builder.build(&dma_ch0).expect("Failed to build scatter-gather");
186 transfer.blocking_wait();
187 }
188
189 tx.blocking_write(b"Scatter-gather transfer complete!\r\n\r\n").unwrap();
190
191 // Show results
192 tx.blocking_write(b"Destination buffers (after):\r\n").unwrap();
193 tx.blocking_write(b" DST1: ").unwrap();
194 print_buffer(&mut tx, core::ptr::addr_of!(DST1) as *const u32, 4);
195 tx.blocking_write(b"\r\n").unwrap();
196 tx.blocking_write(b" DST2: ").unwrap();
197 print_buffer(&mut tx, core::ptr::addr_of!(DST2) as *const u32, 4);
198 tx.blocking_write(b"\r\n").unwrap();
199 tx.blocking_write(b" DST3: ").unwrap();
200 print_buffer(&mut tx, core::ptr::addr_of!(DST3) as *const u32, 4);
201 tx.blocking_write(b"\r\n\r\n").unwrap();
202
203 // Verify all three segments
204 let mut all_ok = true;
205 unsafe {
206 let src1 = core::ptr::addr_of!(SRC1) as *const u32;
207 let dst1 = core::ptr::addr_of!(DST1) as *const u32;
208 for i in 0..4 {
209 if *src1.add(i) != *dst1.add(i) {
210 all_ok = false;
211 }
212 }
213
214 let src2 = core::ptr::addr_of!(SRC2) as *const u32;
215 let dst2 = core::ptr::addr_of!(DST2) as *const u32;
216 for i in 0..4 {
217 if *src2.add(i) != *dst2.add(i) {
218 all_ok = false;
219 }
220 }
221
222 let src3 = core::ptr::addr_of!(SRC3) as *const u32;
223 let dst3 = core::ptr::addr_of!(DST3) as *const u32;
224 for i in 0..4 {
225 if *src3.add(i) != *dst3.add(i) {
226 all_ok = false;
227 }
228 }
229 }
230
231 if all_ok {
232 tx.blocking_write(b"PASS: All segments verified!\r\n").unwrap();
233 defmt::info!("PASS: All segments verified!");
234 } else {
235 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
236 defmt::error!("FAIL: Mismatch detected!");
237 }
238
239 tx.blocking_write(b"\r\n=== Scatter-Gather Builder example complete ===\r\n").unwrap();
240
241 loop {
242 cortex_m::asm::wfe();
243 }
244}
diff --git a/examples/src/bin/dma_wrap_transfer.rs b/examples/src/bin/dma_wrap_transfer.rs
new file mode 100644
index 000000000..b115a2c19
--- /dev/null
+++ b/examples/src/bin/dma_wrap_transfer.rs
@@ -0,0 +1,231 @@
1//! DMA wrap transfer example for MCXA276.
2//!
3//! This example demonstrates using DMA with modulo addressing to wrap around
4//! a source buffer, effectively repeating the source data in the destination.
5//!
6//! # Embassy-style features demonstrated:
7//! - `dma::edma_tcd()` accessor for simplified register access
8//! - `DmaChannel::is_done()` and `clear_done()` helper methods
9//! - No need to pass register block around
10
11#![no_std]
12#![no_main]
13
14use embassy_executor::Spawner;
15use embassy_mcxa::clocks::config::Div8;
16use embassy_mcxa::clocks::Gate;
17use embassy_mcxa::dma::{edma_tcd, DmaChannel, DmaCh0InterruptHandler};
18use embassy_mcxa::{bind_interrupts, dma};
19use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
20use embassy_mcxa::pac;
21use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
22
23// Bind DMA channel 0 interrupt using Embassy-style macro
24bind_interrupts!(struct Irqs {
25 DMA_CH0 => DmaCh0InterruptHandler;
26});
27
28// Source buffer: 4 words (16 bytes), aligned to 16 bytes for modulo
29#[repr(align(16))]
30struct AlignedSrc([u32; 4]);
31
32static mut SRC: AlignedSrc = AlignedSrc([0; 4]);
33static mut DST: [u32; 8] = [0; 8];
34
35/// Helper to write a u32 as decimal ASCII to UART
36fn write_u32(tx: &mut LpuartTx<'_, Blocking>, val: u32) {
37 let mut buf = [0u8; 10];
38 let mut n = val;
39 let mut i = buf.len();
40
41 if n == 0 {
42 tx.blocking_write(b"0").ok();
43 return;
44 }
45
46 while n > 0 {
47 i -= 1;
48 buf[i] = b'0' + (n % 10) as u8;
49 n /= 10;
50 }
51
52 tx.blocking_write(&buf[i..]).ok();
53}
54
55/// Helper to print a buffer to UART
56fn print_buffer(tx: &mut LpuartTx<'_, Blocking>, buf_ptr: *const u32, len: usize) {
57 tx.blocking_write(b"[").ok();
58 unsafe {
59 for i in 0..len {
60 write_u32(tx, *buf_ptr.add(i));
61 if i < len - 1 {
62 tx.blocking_write(b", ").ok();
63 }
64 }
65 }
66 tx.blocking_write(b"]").ok();
67}
68
69#[embassy_executor::main]
70async fn main(_spawner: Spawner) {
71 // Small delay to allow probe-rs to attach after reset
72 for _ in 0..100_000 {
73 cortex_m::asm::nop();
74 }
75
76 let mut cfg = hal::config::Config::default();
77 cfg.clock_cfg.sirc.fro_12m_enabled = true;
78 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
79 let p = hal::init(cfg);
80
81 defmt::info!("DMA wrap transfer example starting...");
82
83 // Enable DMA0 clock and release reset
84 unsafe {
85 hal::peripherals::DMA0::enable_clock();
86 hal::peripherals::DMA0::release_reset();
87 }
88
89 let pac_periphs = unsafe { pac::Peripherals::steal() };
90
91 unsafe {
92 dma::init(&pac_periphs);
93 }
94
95 // Enable DMA interrupt
96 unsafe {
97 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
98 }
99
100 let config = Config {
101 baudrate_bps: 115_200,
102 enable_tx: true,
103 enable_rx: false,
104 ..Default::default()
105 };
106
107 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
108 let (mut tx, _rx) = lpuart.split();
109
110 tx.blocking_write(b"EDMA wrap transfer example begin.\r\n\r\n")
111 .unwrap();
112
113 // Initialize buffers
114 unsafe {
115 SRC.0 = [1, 2, 3, 4];
116 DST = [0; 8];
117 }
118
119 tx.blocking_write(b"Source Buffer: ").unwrap();
120 print_buffer(&mut tx, unsafe { core::ptr::addr_of!(SRC.0) } as *const u32, 4);
121 tx.blocking_write(b"\r\n").unwrap();
122
123 tx.blocking_write(b"Destination Buffer (before): ").unwrap();
124 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
125 tx.blocking_write(b"\r\n").unwrap();
126
127 tx.blocking_write(b"Configuring DMA with Embassy-style API...\r\n")
128 .unwrap();
129
130 // Create DMA channel using Embassy-style API
131 let dma_ch0 = DmaChannel::new(p.DMA_CH0);
132
133 // Use edma_tcd() accessor instead of passing register block around
134 let edma = edma_tcd();
135
136 // Configure wrap transfer using direct TCD access:
137 // SRC is 16 bytes (4 * u32). We want to transfer 32 bytes (8 * u32).
138 // SRC modulo is 16 bytes (2^4 = 16) - wraps source address.
139 // DST modulo is 0 (disabled).
140 // This causes the source address to wrap around after 16 bytes,
141 // effectively repeating the source data.
142 unsafe {
143 let t = edma.tcd(0);
144
145 // Reset channel state
146 t.ch_csr().write(|w| {
147 w.erq().disable()
148 .earq().disable()
149 .eei().no_error()
150 .ebw().disable()
151 .done().clear_bit_by_one()
152 });
153 t.ch_es().write(|w| w.bits(0));
154 t.ch_int().write(|w| w.int().clear_bit_by_one());
155
156 // Source/destination addresses
157 t.tcd_saddr().write(|w| w.saddr().bits(core::ptr::addr_of!(SRC.0) as u32));
158 t.tcd_daddr().write(|w| w.daddr().bits(core::ptr::addr_of_mut!(DST) as u32));
159
160 // Offsets: both increment by 4 bytes
161 t.tcd_soff().write(|w| w.soff().bits(4));
162 t.tcd_doff().write(|w| w.doff().bits(4));
163
164 // Attributes: 32-bit transfers (size = 2)
165 // SMOD = 4 (2^4 = 16 byte modulo for source), DMOD = 0 (disabled)
166 t.tcd_attr().write(|w| {
167 w.ssize().bits(2)
168 .dsize().bits(2)
169 .smod().bits(4) // Source modulo: 2^4 = 16 bytes
170 .dmod().bits(0) // Dest modulo: disabled
171 });
172
173 // Transfer 32 bytes total in one minor loop
174 let nbytes = 32u32;
175 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(nbytes));
176
177 // Source wraps via modulo, no adjustment needed
178 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
179 // Reset dest address after major loop
180 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(-(nbytes as i32) as u32));
181
182 // Major loop count = 1
183 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
184 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
185
186 // Enable interrupt on major loop completion
187 t.tcd_csr().write(|w| w.intmajor().set_bit());
188
189 cortex_m::asm::dsb();
190
191 tx.blocking_write(b"Triggering transfer...\r\n").unwrap();
192 dma_ch0.trigger_start(edma);
193 }
194
195 // Wait for completion using channel helper method
196 while !dma_ch0.is_done(edma) {
197 cortex_m::asm::nop();
198 }
199 unsafe { dma_ch0.clear_done(edma); }
200
201 tx.blocking_write(b"\r\nEDMA wrap transfer example finish.\r\n\r\n")
202 .unwrap();
203 tx.blocking_write(b"Destination Buffer (after): ").unwrap();
204 print_buffer(&mut tx, core::ptr::addr_of!(DST) as *const u32, 8);
205 tx.blocking_write(b"\r\n\r\n").unwrap();
206
207 // Verify: DST should be [1, 2, 3, 4, 1, 2, 3, 4]
208 let expected = [1u32, 2, 3, 4, 1, 2, 3, 4];
209 let mut mismatch = false;
210 unsafe {
211 for i in 0..8 {
212 if DST[i] != expected[i] {
213 mismatch = true;
214 break;
215 }
216 }
217 }
218
219 if mismatch {
220 tx.blocking_write(b"FAIL: Mismatch detected!\r\n").unwrap();
221 defmt::error!("FAIL: Mismatch detected!");
222 } else {
223 tx.blocking_write(b"PASS: Data verified.\r\n").unwrap();
224 defmt::info!("PASS: Data verified.");
225 }
226
227 loop {
228 cortex_m::asm::wfe();
229 }
230}
231
diff --git a/examples/src/bin/lpuart_dma.rs b/examples/src/bin/lpuart_dma.rs
new file mode 100644
index 000000000..5ccf97ecc
--- /dev/null
+++ b/examples/src/bin/lpuart_dma.rs
@@ -0,0 +1,127 @@
1//! LPUART DMA example for MCXA276.
2//!
3//! This example demonstrates using DMA for UART TX and RX operations.
4//! It sends a message using DMA, then waits for 16 characters to be received
5//! via DMA and echoes them back.
6
7#![no_std]
8#![no_main]
9
10use embassy_executor::Spawner;
11use embassy_mcxa::clocks::config::Div8;
12use embassy_mcxa::clocks::Gate;
13use embassy_mcxa::dma::{self, DMA_REQ_LPUART2_RX, DMA_REQ_LPUART2_TX};
14use embassy_mcxa::lpuart::{Config, LpuartDma};
15use embassy_mcxa::pac;
16use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
17
18// DMA interrupt handlers
19#[no_mangle]
20pub extern "C" fn DMA_CH0() {
21 unsafe { dma::on_interrupt(0) };
22}
23
24#[no_mangle]
25pub extern "C" fn DMA_CH1() {
26 unsafe { dma::on_interrupt(1) };
27}
28
29#[embassy_executor::main]
30async fn main(_spawner: Spawner) {
31 let mut cfg = hal::config::Config::default();
32 cfg.clock_cfg.sirc.fro_12m_enabled = true;
33 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
34 let p = hal::init(cfg);
35
36 defmt::info!("LPUART DMA example starting...");
37
38 // Enable DMA0 clock and release reset
39 unsafe {
40 hal::peripherals::DMA0::enable_clock();
41 hal::peripherals::DMA0::release_reset();
42 }
43
44 // Get PAC peripherals for DMA init
45 let pac_periphs = unsafe { pac::Peripherals::steal() };
46
47 // Initialize DMA
48 unsafe {
49 dma::init(&pac_periphs);
50 }
51
52 // Get EDMA TCD register block for transfers
53 let edma = &pac_periphs.edma_0_tcd0;
54
55 // Enable DMA interrupts
56 unsafe {
57 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
58 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH1);
59 }
60
61 // Create UART configuration
62 let config = Config {
63 baudrate_bps: 115_200,
64 enable_tx: true,
65 enable_rx: true,
66 ..Default::default()
67 };
68
69 // Create UART instance with DMA channels
70 let mut lpuart = LpuartDma::new(
71 p.LPUART2,
72 p.P2_2, // TX pin
73 p.P2_3, // RX pin
74 p.DMA_CH0, // TX DMA channel
75 p.DMA_CH1, // RX DMA channel
76 config,
77 )
78 .unwrap();
79
80 // Send a message using DMA
81 let tx_msg = b"Hello from LPUART2 DMA TX!\r\n";
82 lpuart
83 .write_dma(edma, DMA_REQ_LPUART2_TX, tx_msg)
84 .await
85 .unwrap();
86
87 defmt::info!("TX DMA complete");
88
89 // Send prompt
90 let prompt = b"Type 16 characters to echo via DMA:\r\n";
91 lpuart
92 .write_dma(edma, DMA_REQ_LPUART2_TX, prompt)
93 .await
94 .unwrap();
95
96 // Receive 16 characters using DMA
97 let mut rx_buf = [0u8; 16];
98 lpuart
99 .read_dma(edma, DMA_REQ_LPUART2_RX, &mut rx_buf)
100 .await
101 .unwrap();
102
103 defmt::info!("RX DMA complete");
104
105 // Echo back the received data
106 let echo_prefix = b"\r\nReceived: ";
107 lpuart
108 .write_dma(edma, DMA_REQ_LPUART2_TX, echo_prefix)
109 .await
110 .unwrap();
111 lpuart
112 .write_dma(edma, DMA_REQ_LPUART2_TX, &rx_buf)
113 .await
114 .unwrap();
115 let done_msg = b"\r\nDone!\r\n";
116 lpuart
117 .write_dma(edma, DMA_REQ_LPUART2_TX, done_msg)
118 .await
119 .unwrap();
120
121 defmt::info!("Example complete");
122
123 loop {
124 cortex_m::asm::wfe();
125 }
126}
127
diff --git a/examples/src/bin/lpuart_ring_buffer.rs b/examples/src/bin/lpuart_ring_buffer.rs
new file mode 100644
index 000000000..bc666560c
--- /dev/null
+++ b/examples/src/bin/lpuart_ring_buffer.rs
@@ -0,0 +1,162 @@
1//! LPUART Ring Buffer DMA example for MCXA276.
2//!
3//! This example demonstrates using the new `RingBuffer` API for continuous
4//! circular DMA reception from a UART peripheral.
5//!
6//! # Features demonstrated:
7//! - `setup_circular_read()` for continuous peripheral-to-memory DMA
8//! - `RingBuffer` for async reading of received data
9//! - Handling of potential overrun conditions
10//! - Half-transfer and complete-transfer interrupts for timely wakeups
11//!
12//! # How it works:
13//! 1. Set up a circular DMA transfer from LPUART RX to a ring buffer
14//! 2. DMA continuously writes received bytes into the buffer, wrapping around
15//! 3. Application asynchronously reads data as it arrives
16//! 4. Both half-transfer and complete-transfer interrupts wake the reader
17
18#![no_std]
19#![no_main]
20
21use embassy_executor::Spawner;
22use embassy_mcxa::clocks::config::Div8;
23use embassy_mcxa::clocks::Gate;
24use embassy_mcxa::dma::{self, DmaChannel, DmaCh0InterruptHandler, DmaCh1InterruptHandler, DMA_REQ_LPUART2_RX};
25use embassy_mcxa::lpuart::{Blocking, Config, Lpuart, LpuartTx};
26use embassy_mcxa::{bind_interrupts, pac};
27use {defmt_rtt as _, embassy_mcxa as hal, panic_probe as _};
28
29// Bind DMA channel interrupts
30bind_interrupts!(struct Irqs {
31 DMA_CH0 => DmaCh0InterruptHandler;
32 DMA_CH1 => DmaCh1InterruptHandler;
33});
34
35// Ring buffer for RX - power of 2 is ideal for modulo efficiency
36static mut RX_RING_BUFFER: [u8; 64] = [0; 64];
37
38/// Helper to write a byte as hex to UART
39fn write_hex(tx: &mut LpuartTx<'_, Blocking>, byte: u8) {
40 const HEX: &[u8; 16] = b"0123456789ABCDEF";
41 let buf = [HEX[(byte >> 4) as usize], HEX[(byte & 0x0F) as usize]];
42 tx.blocking_write(&buf).ok();
43}
44
45#[embassy_executor::main]
46async fn main(_spawner: Spawner) {
47 // Small delay to allow probe-rs to attach after reset
48 for _ in 0..100_000 {
49 cortex_m::asm::nop();
50 }
51
52 let mut cfg = hal::config::Config::default();
53 cfg.clock_cfg.sirc.fro_12m_enabled = true;
54 cfg.clock_cfg.sirc.fro_lf_div = Some(Div8::no_div());
55 let p = hal::init(cfg);
56
57 defmt::info!("LPUART Ring Buffer DMA example starting...");
58
59 // Enable DMA0 clock and release reset
60 unsafe {
61 hal::peripherals::DMA0::enable_clock();
62 hal::peripherals::DMA0::release_reset();
63 }
64
65 let pac_periphs = unsafe { pac::Peripherals::steal() };
66
67 // Initialize DMA
68 unsafe {
69 dma::init(&pac_periphs);
70 }
71
72 // Enable DMA interrupts
73 unsafe {
74 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH0);
75 cortex_m::peripheral::NVIC::unmask(pac::Interrupt::DMA_CH1);
76 }
77
78 // Create UART configuration
79 let config = Config {
80 baudrate_bps: 115_200,
81 enable_tx: true,
82 enable_rx: true,
83 ..Default::default()
84 };
85
86 // Create blocking UART for TX (we'll use DMA for RX only)
87 let lpuart = Lpuart::new_blocking(p.LPUART2, p.P2_2, p.P2_3, config).unwrap();
88 let (mut tx, _rx) = lpuart.split();
89
90 tx.blocking_write(b"LPUART Ring Buffer DMA Example\r\n").unwrap();
91 tx.blocking_write(b"==============================\r\n\r\n").unwrap();
92
93 // Get LPUART2 RX data register address for DMA
94 let lpuart2 = unsafe { &*pac::Lpuart2::ptr() };
95 let rx_data_addr = lpuart2.data().as_ptr() as *const u8;
96
97 // Enable RX DMA request in LPUART
98 lpuart2.baud().modify(|_, w| w.rdmae().enabled());
99
100 // Create DMA channel for RX
101 let dma_ch_rx = DmaChannel::new(p.DMA_CH0);
102 let edma = dma::edma_tcd();
103
104 // Configure the DMA mux for LPUART2 RX
105 unsafe {
106 dma_ch_rx.set_request_source(edma, DMA_REQ_LPUART2_RX);
107 }
108
109 tx.blocking_write(b"Setting up circular DMA for UART RX...\r\n").unwrap();
110
111 // Set up the ring buffer with circular DMA
112 // This configures the DMA for continuous reception
113 let ring_buf = unsafe {
114 let buf = &mut *core::ptr::addr_of_mut!(RX_RING_BUFFER);
115 dma_ch_rx.setup_circular_read(rx_data_addr, buf)
116 };
117
118 // Enable DMA requests to start continuous reception
119 unsafe {
120 dma_ch_rx.enable_request(edma);
121 }
122
123 tx.blocking_write(b"Ring buffer ready! Type characters to see them echoed.\r\n").unwrap();
124 tx.blocking_write(b"The DMA continuously receives in the background.\r\n\r\n").unwrap();
125
126 // Main loop: read from ring buffer and echo back
127 let mut read_buf = [0u8; 16];
128 let mut total_received: usize = 0;
129
130 loop {
131 // Async read - waits until data is available
132 match ring_buf.read(&mut read_buf).await {
133 Ok(n) if n > 0 => {
134 total_received += n;
135
136 // Echo back what we received
137 tx.blocking_write(b"RX[").unwrap();
138 for (i, &byte) in read_buf.iter().enumerate().take(n) {
139 write_hex(&mut tx, byte);
140 if i < n - 1 {
141 tx.blocking_write(b" ").unwrap();
142 }
143 }
144 tx.blocking_write(b"]: ").unwrap();
145 tx.blocking_write(&read_buf[..n]).unwrap();
146 tx.blocking_write(b"\r\n").unwrap();
147
148 defmt::info!("Received {} bytes, total: {}", n, total_received);
149 }
150 Ok(_) => {
151 // No data, shouldn't happen with async read
152 }
153 Err(_) => {
154 // Overrun detected
155 tx.blocking_write(b"ERROR: Ring buffer overrun!\r\n").unwrap();
156 defmt::error!("Ring buffer overrun!");
157 ring_buf.clear();
158 }
159 }
160 }
161}
162
diff --git a/src/clocks/mod.rs b/src/clocks/mod.rs
index 9c9e6ef3d..ac30115f6 100644
--- a/src/clocks/mod.rs
+++ b/src/clocks/mod.rs
@@ -399,6 +399,10 @@ pub unsafe fn assert_reset<G: Gate>() {
399} 399}
400 400
401/// Check whether the peripheral is held in reset. 401/// Check whether the peripheral is held in reset.
402///
403/// # Safety
404///
405/// Must be called with a valid peripheral gate type.
402#[inline] 406#[inline]
403pub unsafe fn is_reset_released<G: Gate>() -> bool { 407pub unsafe fn is_reset_released<G: Gate>() -> bool {
404 G::is_reset_released() 408 G::is_reset_released()
@@ -940,4 +944,7 @@ pub(crate) mod gate {
940 impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig); 944 impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig);
941 impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig); 945 impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig);
942 impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig); 946 impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig);
947
948 // DMA0 peripheral - uses NoConfig since it has no selectable clock source
949 impl_cc_gate!(DMA0, mrcc_glb_cc0, mrcc_glb_rst0, dma0, NoConfig);
943} 950}
diff --git a/src/dma.rs b/src/dma.rs
new file mode 100644
index 000000000..f6badc826
--- /dev/null
+++ b/src/dma.rs
@@ -0,0 +1,2467 @@
1//! DMA driver for MCXA276.
2//!
3//! This module provides a typed channel abstraction over the EDMA_0_TCD0 array
4//! and helpers for configuring the channel MUX. The driver supports both
5//! low-level TCD configuration and higher-level async transfer APIs.
6//!
7//! # Architecture
8//!
9//! The MCXA276 has 8 DMA channels (0-7), each with its own interrupt vector.
10//! Each channel has a Transfer Control Descriptor (TCD) that defines the
11//! transfer parameters.
12//!
13//! # Choosing the Right API
14//!
15//! This module provides several API levels to match different use cases:
16//!
17//! ## High-Level Async API (Recommended for Most Users)
18//!
19//! Use the async methods when you want simple, safe DMA transfers:
20//!
21//! | Method | Description |
22//! |--------|-------------|
23//! | [`DmaChannel::mem_to_mem()`] | Memory-to-memory copy |
24//! | [`DmaChannel::memset()`] | Fill memory with a pattern |
25//! | [`DmaChannel::write()`] | Memory-to-peripheral (TX) |
26//! | [`DmaChannel::read()`] | Peripheral-to-memory (RX) |
27//!
28//! These return a [`Transfer`] future that can be `.await`ed:
29//!
30//! ```no_run
31//! # use embassy_mcxa::dma::{DmaChannel, TransferOptions};
32//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
33//! # let src = [0u32; 4];
34//! # let mut dst = [0u32; 4];
35//! // Simple memory-to-memory transfer
36//! unsafe {
37//! dma_ch.mem_to_mem(&src, &mut dst, TransferOptions::default()).await;
38//! }
39//! ```
40//!
41//! ## Setup Methods (For Peripheral Drivers)
42//!
43//! Use setup methods when you need manual lifecycle control:
44//!
45//! | Method | Description |
46//! |--------|-------------|
47//! | [`DmaChannel::setup_write()`] | Configure TX without starting |
48//! | [`DmaChannel::setup_read()`] | Configure RX without starting |
49//!
50//! These configure the TCD but don't start the transfer. You control:
51//! 1. When to call [`DmaChannel::enable_request()`]
52//! 2. How to detect completion (polling or interrupts)
53//! 3. When to clean up with [`DmaChannel::clear_done()`]
54//!
55//! ## Circular/Ring Buffer API (For Continuous Reception)
56//!
57//! Use [`DmaChannel::setup_circular_read()`] for continuous data reception:
58//!
59//! ```no_run
60//! # use embassy_mcxa::dma::DmaChannel;
61//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
62//! # let uart_rx_addr = 0x4000_0000 as *const u8;
63//! static mut RX_BUF: [u8; 64] = [0; 64];
64//!
65//! let ring_buf = unsafe {
66//! dma_ch.setup_circular_read(uart_rx_addr, &mut RX_BUF)
67//! };
68//!
69//! // Read data as it arrives
70//! let mut buf = [0u8; 16];
71//! let n = ring_buf.read(&mut buf).await.unwrap();
72//! ```
73//!
74//! ## Scatter-Gather Builder (For Chained Transfers)
75//!
76//! Use [`ScatterGatherBuilder`] for complex multi-segment transfers:
77//!
78//! ```no_run
79//! # use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
80//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
81//! let mut builder = ScatterGatherBuilder::<u32>::new();
82//! builder.add_transfer(&src1, &mut dst1);
83//! builder.add_transfer(&src2, &mut dst2);
84//!
85//! let transfer = unsafe { builder.build(&dma_ch).unwrap() };
86//! transfer.await;
87//! ```
88//!
89//! ## Direct TCD Access (For Advanced Use Cases)
90//!
91//! For full control, use the channel's `tcd()` method to access TCD registers directly.
92//! See the `dma_*` examples for patterns.
93//!
94//! # Example
95//!
96//! ```no_run
97//! use embassy_mcxa::dma::{DmaChannel, TransferOptions, Direction};
98//!
99//! let dma_ch = DmaChannel::new(p.DMA_CH0);
100//! // Configure and trigger a transfer...
101//! ```
102
103use core::future::Future;
104use core::marker::PhantomData;
105use core::pin::Pin;
106use core::ptr::NonNull;
107use core::sync::atomic::{fence, AtomicUsize, Ordering};
108use core::task::{Context, Poll};
109
110use crate::pac;
111use crate::pac::Interrupt;
112use embassy_hal_internal::PeripheralType;
113use embassy_sync::waitqueue::AtomicWaker;
114
115// ============================================================================
116// Phase 1: Foundation Types (Embassy-aligned)
117// ============================================================================
118
119/// DMA transfer direction.
120#[derive(Debug, Copy, Clone, PartialEq, Eq)]
121#[cfg_attr(feature = "defmt", derive(defmt::Format))]
122pub enum Direction {
123 /// Transfer from memory to memory.
124 MemoryToMemory,
125 /// Transfer from memory to a peripheral register.
126 MemoryToPeripheral,
127 /// Transfer from a peripheral register to memory.
128 PeripheralToMemory,
129}
130
131/// DMA transfer priority.
132#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
133#[cfg_attr(feature = "defmt", derive(defmt::Format))]
134pub enum Priority {
135 /// Low priority (channel priority 7).
136 Low,
137 /// Medium priority (channel priority 4).
138 Medium,
139 /// High priority (channel priority 1).
140 #[default]
141 High,
142 /// Highest priority (channel priority 0).
143 Highest,
144}
145
146impl Priority {
147 /// Convert to hardware priority value (0 = highest, 7 = lowest).
148 pub fn to_hw_priority(self) -> u8 {
149 match self {
150 Priority::Low => 7,
151 Priority::Medium => 4,
152 Priority::High => 1,
153 Priority::Highest => 0,
154 }
155 }
156}
157
158/// DMA transfer data width.
159#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
160#[cfg_attr(feature = "defmt", derive(defmt::Format))]
161pub enum WordSize {
162 /// 8-bit (1 byte) transfers.
163 OneByte,
164 /// 16-bit (2 byte) transfers.
165 TwoBytes,
166 /// 32-bit (4 byte) transfers.
167 #[default]
168 FourBytes,
169}
170
171impl WordSize {
172 /// Size in bytes.
173 pub const fn bytes(self) -> usize {
174 match self {
175 WordSize::OneByte => 1,
176 WordSize::TwoBytes => 2,
177 WordSize::FourBytes => 4,
178 }
179 }
180
181 /// Convert to hardware SSIZE/DSIZE field value.
182 pub const fn to_hw_size(self) -> u8 {
183 match self {
184 WordSize::OneByte => 0,
185 WordSize::TwoBytes => 1,
186 WordSize::FourBytes => 2,
187 }
188 }
189
190 /// Create from byte width (1, 2, or 4).
191 pub const fn from_bytes(bytes: u8) -> Option<Self> {
192 match bytes {
193 1 => Some(WordSize::OneByte),
194 2 => Some(WordSize::TwoBytes),
195 4 => Some(WordSize::FourBytes),
196 _ => None,
197 }
198 }
199}
200
201/// Trait for types that can be transferred via DMA.
202///
203/// This provides compile-time type safety for DMA transfers.
204pub trait Word: Copy + 'static {
205 /// The word size for this type.
206 fn size() -> WordSize;
207}
208
209impl Word for u8 {
210 fn size() -> WordSize {
211 WordSize::OneByte
212 }
213}
214
215impl Word for u16 {
216 fn size() -> WordSize {
217 WordSize::TwoBytes
218 }
219}
220
221impl Word for u32 {
222 fn size() -> WordSize {
223 WordSize::FourBytes
224 }
225}
226
227/// DMA transfer options.
228///
229/// This struct configures various aspects of a DMA transfer.
230#[derive(Debug, Copy, Clone, PartialEq, Eq)]
231#[cfg_attr(feature = "defmt", derive(defmt::Format))]
232#[non_exhaustive]
233pub struct TransferOptions {
234 /// Transfer priority.
235 pub priority: Priority,
236 /// Enable circular (continuous) mode.
237 ///
238 /// When enabled, the transfer repeats automatically after completing.
239 pub circular: bool,
240 /// Enable interrupt on half transfer complete.
241 pub half_transfer_interrupt: bool,
242 /// Enable interrupt on transfer complete.
243 pub complete_transfer_interrupt: bool,
244}
245
246impl Default for TransferOptions {
247 fn default() -> Self {
248 Self {
249 priority: Priority::High,
250 circular: false,
251 half_transfer_interrupt: false,
252 complete_transfer_interrupt: true,
253 }
254 }
255}
256
257/// DMA error types.
258#[derive(Debug, Copy, Clone, PartialEq, Eq)]
259#[cfg_attr(feature = "defmt", derive(defmt::Format))]
260pub enum Error {
261 /// The DMA controller reported a bus error.
262 BusError,
263 /// The transfer was aborted.
264 Aborted,
265 /// Configuration error (e.g., invalid parameters).
266 Configuration,
267 /// Buffer overrun (for ring buffers).
268 Overrun,
269}
270
271/// Whether to enable the major loop completion interrupt.
272///
273/// This enum provides better readability than a boolean parameter
274/// for functions that configure DMA interrupt behavior.
275#[derive(Debug, Copy, Clone, PartialEq, Eq)]
276#[cfg_attr(feature = "defmt", derive(defmt::Format))]
277pub enum EnableInterrupt {
278 /// Enable the interrupt on major loop completion.
279 Yes,
280 /// Do not enable the interrupt.
281 No,
282}
283
284// ============================================================================
285// DMA Request Source Constants
286// ============================================================================
287
288/// DMA request source numbers for LPUART peripherals on DMA0.
289pub const DMA_REQ_LPUART0_RX: u8 = 21;
290pub const DMA_REQ_LPUART0_TX: u8 = 22;
291pub const DMA_REQ_LPUART1_RX: u8 = 23;
292pub const DMA_REQ_LPUART1_TX: u8 = 24;
293pub const DMA_REQ_LPUART2_RX: u8 = 25;
294pub const DMA_REQ_LPUART2_TX: u8 = 26;
295pub const DMA_REQ_LPUART3_RX: u8 = 27;
296pub const DMA_REQ_LPUART3_TX: u8 = 28;
297pub const DMA_REQ_LPUART4_RX: u8 = 29;
298pub const DMA_REQ_LPUART4_TX: u8 = 30;
299pub const DMA_REQ_LPUART5_RX: u8 = 31;
300pub const DMA_REQ_LPUART5_TX: u8 = 32;
301
302// ============================================================================
303// Channel Trait (Sealed Pattern)
304// ============================================================================
305
306mod sealed {
307 use crate::pac::Interrupt;
308
309 /// Sealed trait for DMA channels.
310 pub trait SealedChannel {
311 /// Zero-based channel index into the TCD array.
312 fn index(&self) -> usize;
313 /// Interrupt vector for this channel.
314 fn interrupt(&self) -> Interrupt;
315 }
316}
317
318/// Marker trait implemented by HAL peripheral tokens that map to a DMA0
319/// channel backed by one EDMA_0_TCD0 TCD slot.
320///
321/// This trait is sealed and cannot be implemented outside this crate.
322#[allow(private_bounds)]
323pub trait Channel: sealed::SealedChannel + PeripheralType + Into<AnyChannel> + 'static {
324 /// Zero-based channel index into the TCD array.
325 const INDEX: usize;
326 /// Interrupt vector for this channel.
327 const INTERRUPT: Interrupt;
328}
329
330/// Type-erased DMA channel.
331///
332/// This allows storing DMA channels in a uniform way regardless of their
333/// concrete type, useful for async transfer futures and runtime channel selection.
334#[derive(Debug, Clone, Copy)]
335pub struct AnyChannel {
336 index: usize,
337 interrupt: Interrupt,
338}
339
340impl AnyChannel {
341 /// Get the channel index.
342 #[inline]
343 pub const fn index(&self) -> usize {
344 self.index
345 }
346
347 /// Get the channel interrupt.
348 #[inline]
349 pub const fn interrupt(&self) -> Interrupt {
350 self.interrupt
351 }
352
353 /// Get a reference to the TCD register block for this channel.
354 ///
355 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
356 #[inline]
357 fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
358 // Safety: MCXA276 has a single eDMA instance, and we're only accessing
359 // the TCD for this specific channel
360 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
361 edma.tcd(self.index)
362 }
363
364 /// Check if the channel's DONE flag is set.
365 pub fn is_done(&self) -> bool {
366 self.tcd().ch_csr().read().done().bit_is_set()
367 }
368
369 /// Get the waker for this channel.
370 pub fn waker(&self) -> &'static AtomicWaker {
371 &STATES[self.index].waker
372 }
373}
374
375impl sealed::SealedChannel for AnyChannel {
376 fn index(&self) -> usize {
377 self.index
378 }
379
380 fn interrupt(&self) -> Interrupt {
381 self.interrupt
382 }
383}
384
385/// Macro to implement Channel trait for a peripheral.
386macro_rules! impl_channel {
387 ($peri:ident, $index:expr, $irq:ident) => {
388 impl sealed::SealedChannel for crate::peripherals::$peri {
389 fn index(&self) -> usize {
390 $index
391 }
392
393 fn interrupt(&self) -> Interrupt {
394 Interrupt::$irq
395 }
396 }
397
398 impl Channel for crate::peripherals::$peri {
399 const INDEX: usize = $index;
400 const INTERRUPT: Interrupt = Interrupt::$irq;
401 }
402
403 impl From<crate::peripherals::$peri> for AnyChannel {
404 fn from(_: crate::peripherals::$peri) -> Self {
405 AnyChannel {
406 index: $index,
407 interrupt: Interrupt::$irq,
408 }
409 }
410 }
411 };
412}
413
414impl_channel!(DMA_CH0, 0, DMA_CH0);
415impl_channel!(DMA_CH1, 1, DMA_CH1);
416impl_channel!(DMA_CH2, 2, DMA_CH2);
417impl_channel!(DMA_CH3, 3, DMA_CH3);
418impl_channel!(DMA_CH4, 4, DMA_CH4);
419impl_channel!(DMA_CH5, 5, DMA_CH5);
420impl_channel!(DMA_CH6, 6, DMA_CH6);
421impl_channel!(DMA_CH7, 7, DMA_CH7);
422
423/// Strongly-typed handle to a DMA0 channel.
424///
425/// The lifetime of this value is tied to the unique peripheral token
426/// supplied by `embassy_hal_internal::peripherals!`, so safe code cannot
427/// create two `DmaChannel` instances for the same hardware channel.
428pub struct DmaChannel<C: Channel> {
429 _ch: core::marker::PhantomData<C>,
430}
431
432// ============================================================================
433// DMA Transfer Methods - API Overview
434// ============================================================================
435//
436// The DMA API provides two categories of methods for configuring transfers:
437//
438// ## 1. Async Methods (Return `Transfer` Future)
439//
440// These methods return a [`Transfer`] Future that must be `.await`ed:
441//
442// - [`write()`](DmaChannel::write) - Memory-to-peripheral using default eDMA TCD block
443// - [`read()`](DmaChannel::read) - Peripheral-to-memory using default eDMA TCD block
444// - [`write_to_peripheral()`](DmaChannel::write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
445// - [`read_from_peripheral()`](DmaChannel::read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
446// - [`mem_to_mem()`](DmaChannel::mem_to_mem) - Memory-to-memory using default eDMA TCD block
447// - [`transfer_mem_to_mem()`](DmaChannel::transfer_mem_to_mem) - Memory-to-memory with custom eDMA TCD block
448//
449// The `Transfer` manages the DMA lifecycle automatically:
450// - Enables channel request
451// - Waits for completion via async/await
452// - Cleans up on completion
453//
454// **Important:** `Transfer::Drop` aborts the transfer if dropped before completion.
455// This means you MUST `.await` the Transfer or it will be aborted when it goes out of scope.
456//
457// **Use case:** When you want to use async/await and let the Transfer handle lifecycle management.
458//
459// ## 2. Setup Methods (Configure TCD Only)
460//
461// These methods configure the TCD but do NOT return a `Transfer`:
462//
463// - [`setup_write()`](DmaChannel::setup_write) - Memory-to-peripheral using default eDMA TCD block
464// - [`setup_read()`](DmaChannel::setup_read) - Peripheral-to-memory using default eDMA TCD block
465// - [`setup_write_to_peripheral()`](DmaChannel::setup_write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
466// - [`setup_read_from_peripheral()`](DmaChannel::setup_read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
467//
468// The caller is responsible for the complete DMA lifecycle:
469// 1. Call [`enable_request()`](DmaChannel::enable_request) to start the transfer
470// 2. Poll [`is_done()`](DmaChannel::is_done) or use interrupts to detect completion
471// 3. Call [`disable_request()`](DmaChannel::disable_request), [`clear_done()`](DmaChannel::clear_done),
472// [`clear_interrupt()`](DmaChannel::clear_interrupt) for cleanup
473//
474// **Use case:** Peripheral drivers (like LPUART) that implement their own `poll_fn`-based
475// completion mechanism and cannot use the `Transfer` Future approach.
476//
477// ============================================================================
478
479impl<C: Channel> DmaChannel<C> {
480 /// Wrap a DMA channel token (takes ownership of the Peri wrapper).
481 #[inline]
482 pub fn new(_ch: embassy_hal_internal::Peri<'_, C>) -> Self {
483 Self {
484 _ch: core::marker::PhantomData,
485 }
486 }
487
488 /// Wrap a DMA channel token directly (for internal use).
489 #[inline]
490 pub fn from_token(_ch: C) -> Self {
491 Self {
492 _ch: core::marker::PhantomData,
493 }
494 }
495
496 /// Channel index in the EDMA_0_TCD0 array.
497 #[inline]
498 pub const fn index(&self) -> usize {
499 C::INDEX
500 }
501
502 /// Convert this typed channel into a type-erased `AnyChannel`.
503 #[inline]
504 pub fn into_any(self) -> AnyChannel {
505 AnyChannel {
506 index: C::INDEX,
507 interrupt: C::INTERRUPT,
508 }
509 }
510
511 /// Get a reference to the type-erased channel info.
512 #[inline]
513 pub fn as_any(&self) -> AnyChannel {
514 AnyChannel {
515 index: C::INDEX,
516 interrupt: C::INTERRUPT,
517 }
518 }
519
520 /// Return a reference to the underlying TCD register block.
521 ///
522 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
523 #[inline]
524 pub fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
525 // Safety: MCXA276 has a single eDMA instance
526 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
527 edma.tcd(C::INDEX)
528 }
529
530 /// Start an async transfer.
531 ///
532 /// The channel must already be configured. This enables the channel
533 /// request and returns a `Transfer` future that resolves when the
534 /// DMA transfer completes.
535 ///
536 /// # Safety
537 ///
538 /// The caller must ensure the DMA channel has been properly configured
539 /// and that source/destination buffers remain valid for the duration
540 /// of the transfer.
541 pub unsafe fn start_transfer(&self) -> Transfer<'_> {
542 // Clear any previous DONE/INT flags
543 let t = self.tcd();
544 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
545 t.ch_int().write(|w| w.int().clear_bit_by_one());
546
547 // Enable the channel request
548 t.ch_csr().modify(|_, w| w.erq().enable());
549
550 Transfer::new(self.as_any())
551 }
552
553 // ========================================================================
554 // Type-Safe Transfer Methods (Embassy-style API)
555 // ========================================================================
556
557 /// Perform a memory-to-memory DMA transfer (simplified API).
558 ///
559 /// This is a type-safe wrapper that uses the `Word` trait to determine
560 /// the correct transfer width automatically. Uses the global eDMA TCD
561 /// register accessor internally.
562 ///
563 /// # Arguments
564 ///
565 /// * `src` - Source buffer
566 /// * `dst` - Destination buffer (must be at least as large as src)
567 /// * `options` - Transfer configuration options
568 ///
569 /// # Safety
570 ///
571 /// The source and destination buffers must remain valid for the
572 /// duration of the transfer.
573 pub unsafe fn mem_to_mem<W: Word>(&self, src: &[W], dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
574 self.transfer_mem_to_mem(src, dst, options)
575 }
576
577 /// Perform a memory-to-memory DMA transfer.
578 ///
579 /// This is a type-safe wrapper that uses the `Word` trait to determine
580 /// the correct transfer width automatically.
581 ///
582 /// # Arguments
583 ///
584 /// * `edma` - Reference to the eDMA TCD register block
585 /// * `src` - Source buffer
586 /// * `dst` - Destination buffer (must be at least as large as src)
587 /// * `options` - Transfer configuration options
588 ///
589 /// # Safety
590 ///
591 /// The source and destination buffers must remain valid for the
592 /// duration of the transfer.
593 pub unsafe fn transfer_mem_to_mem<W: Word>(
594 &self,
595 src: &[W],
596 dst: &mut [W],
597 options: TransferOptions,
598 ) -> Transfer<'_> {
599 assert!(!src.is_empty());
600 assert!(dst.len() >= src.len());
601 assert!(src.len() <= 0x7fff);
602
603 let size = W::size();
604 let byte_count = (src.len() * size.bytes()) as u32;
605
606 let t = self.tcd();
607
608 // Reset channel state - clear DONE, disable requests, clear errors
609 t.ch_csr().write(|w| {
610 w.erq()
611 .disable()
612 .earq()
613 .disable()
614 .eei()
615 .no_error()
616 .done()
617 .clear_bit_by_one()
618 });
619 t.ch_es().write(|w| w.err().clear_bit_by_one());
620 t.ch_int().write(|w| w.int().clear_bit_by_one());
621
622 // Memory barrier to ensure channel state is fully reset before touching TCD
623 cortex_m::asm::dsb();
624
625 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
626 // Reset ALL TCD registers to 0 to clear any stale configuration from
627 // previous transfers. This is critical when reusing a channel.
628 t.tcd_saddr().write(|w| w.saddr().bits(0));
629 t.tcd_soff().write(|w| w.soff().bits(0));
630 t.tcd_attr().write(|w| w.bits(0));
631 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0));
632 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
633 t.tcd_daddr().write(|w| w.daddr().bits(0));
634 t.tcd_doff().write(|w| w.doff().bits(0));
635 t.tcd_citer_elinkno().write(|w| w.bits(0));
636 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
637 t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely
638 t.tcd_biter_elinkno().write(|w| w.bits(0));
639
640 // Memory barrier after TCD reset
641 cortex_m::asm::dsb();
642
643 // Note: Priority is managed by round-robin arbitration (set in init())
644 // Per-channel priority can be configured via ch_pri() if needed
645
646 // Now configure the new transfer
647
648 // Source address and increment
649 t.tcd_saddr().write(|w| w.saddr().bits(src.as_ptr() as u32));
650 t.tcd_soff().write(|w| w.soff().bits(size.bytes() as u16));
651
652 // Destination address and increment
653 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
654 t.tcd_doff().write(|w| w.doff().bits(size.bytes() as u16));
655
656 // Transfer attributes (size)
657 let hw_size = size.to_hw_size();
658 t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size));
659
660 // Minor loop: transfer all bytes in one minor loop
661 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_count));
662
663 // No source/dest adjustment after major loop
664 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
665 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
666
667 // Major loop count = 1 (single major loop)
668 // Write BITER first, then CITER (CITER must match BITER at start)
669 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
670 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
671
672 // Memory barrier before setting START
673 cortex_m::asm::dsb();
674
675 // Control/status: interrupt on major complete, start
676 // Write this last after all other TCD registers are configured
677 let int_major = options.complete_transfer_interrupt;
678 t.tcd_csr().write(|w| {
679 w.intmajor()
680 .bit(int_major)
681 .inthalf()
682 .bit(options.half_transfer_interrupt)
683 .dreq()
684 .set_bit() // Auto-disable request after major loop
685 .start()
686 .set_bit() // Start the channel
687 });
688
689 Transfer::new(self.as_any())
690 }
691
692 /// Fill a memory buffer with a pattern value (memset).
693 ///
694 /// This performs a DMA transfer where the source address remains fixed
695 /// (pattern value) while the destination address increments through the buffer.
696 /// It's useful for quickly filling large memory regions with a constant value.
697 ///
698 /// # Arguments
699 ///
700 /// * `pattern` - Reference to the pattern value (will be read repeatedly)
701 /// * `dst` - Destination buffer to fill
702 /// * `options` - Transfer configuration options
703 ///
704 /// # Example
705 ///
706 /// ```no_run
707 /// use embassy_mcxa::dma::{DmaChannel, TransferOptions};
708 ///
709 /// let dma_ch = DmaChannel::new(p.DMA_CH0);
710 /// let pattern: u32 = 0xDEADBEEF;
711 /// let mut buffer = [0u32; 256];
712 ///
713 /// unsafe {
714 /// dma_ch.memset(&pattern, &mut buffer, TransferOptions::default()).await;
715 /// }
716 /// // buffer is now filled with 0xDEADBEEF
717 /// ```
718 ///
719 /// # Safety
720 ///
721 /// - The pattern and destination buffer must remain valid for the duration of the transfer.
722 pub unsafe fn memset<W: Word>(&self, pattern: &W, dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
723 assert!(!dst.is_empty());
724 assert!(dst.len() <= 0x7fff);
725
726 let size = W::size();
727 let byte_size = size.bytes();
728 // Total bytes to transfer - all in one minor loop for software-triggered transfers
729 let total_bytes = (dst.len() * byte_size) as u32;
730
731 let t = self.tcd();
732
733 // Reset channel state - clear DONE, disable requests, clear errors
734 t.ch_csr().write(|w| {
735 w.erq()
736 .disable()
737 .earq()
738 .disable()
739 .eei()
740 .no_error()
741 .done()
742 .clear_bit_by_one()
743 });
744 t.ch_es().write(|w| w.err().clear_bit_by_one());
745 t.ch_int().write(|w| w.int().clear_bit_by_one());
746
747 // Memory barrier to ensure channel state is fully reset before touching TCD
748 cortex_m::asm::dsb();
749
750 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
751 // Reset ALL TCD registers to 0 to clear any stale configuration from
752 // previous transfers. This is critical when reusing a channel.
753 t.tcd_saddr().write(|w| w.saddr().bits(0));
754 t.tcd_soff().write(|w| w.soff().bits(0));
755 t.tcd_attr().write(|w| w.bits(0));
756 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0));
757 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
758 t.tcd_daddr().write(|w| w.daddr().bits(0));
759 t.tcd_doff().write(|w| w.doff().bits(0));
760 t.tcd_citer_elinkno().write(|w| w.bits(0));
761 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
762 t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely
763 t.tcd_biter_elinkno().write(|w| w.bits(0));
764
765 // Memory barrier after TCD reset
766 cortex_m::asm::dsb();
767
768 // Now configure the new transfer
769 //
770 // For software-triggered memset, we use a SINGLE minor loop that transfers
771 // all bytes at once. The source address stays fixed (SOFF=0) while the
772 // destination increments (DOFF=byte_size). The eDMA will read from the
773 // same source address for each destination word.
774 //
775 // This is necessary because the START bit only triggers ONE minor loop
776 // iteration. Using CITER>1 with software trigger would require multiple
777 // START triggers.
778
779 // Source: pattern address, fixed (soff=0)
780 t.tcd_saddr().write(|w| w.saddr().bits(pattern as *const W as u32));
781 t.tcd_soff().write(|w| w.soff().bits(0)); // Fixed source - reads pattern repeatedly
782
783 // Destination: memory buffer, incrementing by word size
784 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
785 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
786
787 // Transfer attributes - source and dest are same word size
788 let hw_size = size.to_hw_size();
789 t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size));
790
791 // Minor loop: transfer ALL bytes in one minor loop (like mem_to_mem)
792 // This allows the entire transfer to complete with a single START trigger
793 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(total_bytes));
794
795 // No address adjustment after major loop
796 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
797 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
798
799 // Major loop count = 1 (single major loop, all data in minor loop)
800 // Write BITER first, then CITER (CITER must match BITER at start)
801 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
802 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
803
804 // Memory barrier before setting START
805 cortex_m::asm::dsb();
806
807 // Control/status: interrupt on major complete, start immediately
808 // Write this last after all other TCD registers are configured
809 let int_major = options.complete_transfer_interrupt;
810 t.tcd_csr().write(|w| {
811 w.intmajor()
812 .bit(int_major)
813 .inthalf()
814 .bit(options.half_transfer_interrupt)
815 .dreq()
816 .set_bit() // Auto-disable request after major loop
817 .start()
818 .set_bit() // Start the channel
819 });
820
821 Transfer::new(self.as_any())
822 }
823
824 /// Write data from memory to a peripheral register.
825 ///
826 /// The destination address remains fixed (peripheral register) while
827 /// the source address increments through the buffer.
828 ///
829 /// # Arguments
830 ///
831 /// * `buf` - Source buffer to write from
832 /// * `peri_addr` - Peripheral register address
833 /// * `options` - Transfer configuration options
834 ///
835 /// # Safety
836 ///
837 /// - The buffer must remain valid for the duration of the transfer.
838 /// - The peripheral address must be valid for writes.
839 pub unsafe fn write<W: Word>(&self, buf: &[W], peri_addr: *mut W, options: TransferOptions) -> Transfer<'_> {
840 self.write_to_peripheral(buf, peri_addr, options)
841 }
842
843 /// Configure a memory-to-peripheral DMA transfer without starting it.
844 ///
845 /// This is a convenience wrapper around [`setup_write_to_peripheral()`](Self::setup_write_to_peripheral)
846 /// that uses the default eDMA TCD register block.
847 ///
848 /// This method configures the TCD but does NOT return a `Transfer`. The caller
849 /// is responsible for the complete DMA lifecycle:
850 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
851 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
852 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
853 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
854 ///
855 /// # Example
856 ///
857 /// ```no_run
858 /// # use embassy_mcxa::dma::DmaChannel;
859 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
860 /// # let uart_tx_addr = 0x4000_0000 as *mut u8;
861 /// let data = [0x48, 0x65, 0x6c, 0x6c, 0x6f]; // "Hello"
862 ///
863 /// unsafe {
864 /// // Configure the transfer
865 /// dma_ch.setup_write(&data, uart_tx_addr, true);
866 ///
867 /// // Start when peripheral is ready
868 /// dma_ch.enable_request();
869 ///
870 /// // Wait for completion (or use interrupt)
871 /// while !dma_ch.is_done() {}
872 ///
873 /// // Clean up
874 /// dma_ch.clear_done();
875 /// dma_ch.clear_interrupt();
876 /// }
877 /// ```
878 ///
879 /// # Arguments
880 ///
881 /// * `buf` - Source buffer to write from
882 /// * `peri_addr` - Peripheral register address
883 /// * `enable_interrupt` - Whether to enable interrupt on completion
884 ///
885 /// # Safety
886 ///
887 /// - The buffer must remain valid for the duration of the transfer.
888 /// - The peripheral address must be valid for writes.
889 pub unsafe fn setup_write<W: Word>(&self, buf: &[W], peri_addr: *mut W, enable_interrupt: EnableInterrupt) {
890 self.setup_write_to_peripheral(buf, peri_addr, enable_interrupt)
891 }
892
893 /// Write data from memory to a peripheral register.
894 ///
895 /// The destination address remains fixed (peripheral register) while
896 /// the source address increments through the buffer.
897 ///
898 /// # Arguments
899 ///
900 /// * `buf` - Source buffer to write from
901 /// * `peri_addr` - Peripheral register address
902 /// * `options` - Transfer configuration options
903 ///
904 /// # Safety
905 ///
906 /// - The buffer must remain valid for the duration of the transfer.
907 /// - The peripheral address must be valid for writes.
908 pub unsafe fn write_to_peripheral<W: Word>(
909 &self,
910 buf: &[W],
911 peri_addr: *mut W,
912 options: TransferOptions,
913 ) -> Transfer<'_> {
914 assert!(!buf.is_empty());
915 assert!(buf.len() <= 0x7fff);
916
917 let size = W::size();
918 let byte_size = size.bytes();
919
920 let t = self.tcd();
921
922 // Reset channel state
923 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
924 t.ch_es().write(|w| w.bits(0));
925 t.ch_int().write(|w| w.int().clear_bit_by_one());
926
927 // Addresses
928 t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32));
929 t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32));
930
931 // Offsets: Source increments, Dest fixed
932 t.tcd_soff().write(|w| w.soff().bits(byte_size as u16));
933 t.tcd_doff().write(|w| w.doff().bits(0));
934
935 // Attributes: set size and explicitly disable modulo
936 let hw_size = size.to_hw_size();
937 t.tcd_attr().write(|w| {
938 w.ssize()
939 .bits(hw_size)
940 .dsize()
941 .bits(hw_size)
942 .smod()
943 .disable()
944 .dmod()
945 .bits(0)
946 });
947
948 // Minor loop: transfer one word per request (match old: only set nbytes)
949 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
950
951 // No final adjustments
952 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
953 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
954
955 // Major loop count = number of words
956 let count = buf.len() as u16;
957 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
958 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
959
960 // CSR: interrupt on major loop complete and auto-clear ERQ
961 t.tcd_csr().write(|w| {
962 let w = if options.complete_transfer_interrupt {
963 w.intmajor().enable()
964 } else {
965 w.intmajor().disable()
966 };
967 w.inthalf()
968 .disable()
969 .dreq()
970 .erq_field_clear() // Disable request when done
971 .esg()
972 .normal_format()
973 .majorelink()
974 .disable()
975 .eeop()
976 .disable()
977 .esda()
978 .disable()
979 .bwc()
980 .no_stall()
981 });
982
983 // Ensure all TCD writes have completed before DMA engine reads them
984 cortex_m::asm::dsb();
985
986 Transfer::new(self.as_any())
987 }
988
989 /// Read data from a peripheral register to memory.
990 ///
991 /// The source address remains fixed (peripheral register) while
992 /// the destination address increments through the buffer.
993 ///
994 /// # Arguments
995 ///
996 /// * `peri_addr` - Peripheral register address
997 /// * `buf` - Destination buffer to read into
998 /// * `options` - Transfer configuration options
999 ///
1000 /// # Safety
1001 ///
1002 /// - The buffer must remain valid for the duration of the transfer.
1003 /// - The peripheral address must be valid for reads.
1004 pub unsafe fn read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], options: TransferOptions) -> Transfer<'_> {
1005 self.read_from_peripheral(peri_addr, buf, options)
1006 }
1007
1008 /// Configure a peripheral-to-memory DMA transfer without starting it.
1009 ///
1010 /// This is a convenience wrapper around [`setup_read_from_peripheral()`](Self::setup_read_from_peripheral)
1011 /// that uses the default eDMA TCD register block.
1012 ///
1013 /// This method configures the TCD but does NOT return a `Transfer`. The caller
1014 /// is responsible for the complete DMA lifecycle:
1015 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
1016 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
1017 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
1018 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1019 ///
1020 /// # Example
1021 ///
1022 /// ```no_run
1023 /// # use embassy_mcxa::dma::DmaChannel;
1024 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1025 /// # let uart_rx_addr = 0x4000_0000 as *const u8;
1026 /// let mut buf = [0u8; 32];
1027 ///
1028 /// unsafe {
1029 /// // Configure the transfer
1030 /// dma_ch.setup_read(uart_rx_addr, &mut buf, true);
1031 ///
1032 /// // Start when peripheral is ready
1033 /// dma_ch.enable_request();
1034 ///
1035 /// // Wait for completion (or use interrupt)
1036 /// while !dma_ch.is_done() {}
1037 ///
1038 /// // Clean up
1039 /// dma_ch.clear_done();
1040 /// dma_ch.clear_interrupt();
1041 /// }
1042 /// // buf now contains received data
1043 /// ```
1044 ///
1045 /// # Arguments
1046 ///
1047 /// * `peri_addr` - Peripheral register address
1048 /// * `buf` - Destination buffer to read into
1049 /// * `enable_interrupt` - Whether to enable interrupt on completion
1050 ///
1051 /// # Safety
1052 ///
1053 /// - The buffer must remain valid for the duration of the transfer.
1054 /// - The peripheral address must be valid for reads.
1055 pub unsafe fn setup_read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], enable_interrupt: EnableInterrupt) {
1056 self.setup_read_from_peripheral(peri_addr, buf, enable_interrupt)
1057 }
1058
1059 /// Read data from a peripheral register to memory.
1060 ///
1061 /// The source address remains fixed (peripheral register) while
1062 /// the destination address increments through the buffer.
1063 ///
1064 /// # Arguments
1065 ///
1066 /// * `peri_addr` - Peripheral register address
1067 /// * `buf` - Destination buffer to read into
1068 /// * `options` - Transfer configuration options
1069 ///
1070 /// # Safety
1071 ///
1072 /// - The buffer must remain valid for the duration of the transfer.
1073 /// - The peripheral address must be valid for reads.
1074 pub unsafe fn read_from_peripheral<W: Word>(
1075 &self,
1076 peri_addr: *const W,
1077 buf: &mut [W],
1078 options: TransferOptions,
1079 ) -> Transfer<'_> {
1080 assert!(!buf.is_empty());
1081 assert!(buf.len() <= 0x7fff);
1082
1083 let size = W::size();
1084 let byte_size = size.bytes();
1085
1086 let t = self.tcd();
1087
1088 // Reset channel control/error/interrupt state
1089 t.ch_csr().write(|w| {
1090 w.erq()
1091 .disable()
1092 .earq()
1093 .disable()
1094 .eei()
1095 .no_error()
1096 .ebw()
1097 .disable()
1098 .done()
1099 .clear_bit_by_one()
1100 });
1101 t.ch_es().write(|w| w.bits(0));
1102 t.ch_int().write(|w| w.int().clear_bit_by_one());
1103
1104 // Source: peripheral register, fixed
1105 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
1106 t.tcd_soff().write(|w| w.soff().bits(0)); // No increment
1107
1108 // Destination: memory buffer, incrementing
1109 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
1110 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
1111
1112 // Transfer attributes: set size and explicitly disable modulo
1113 let hw_size = size.to_hw_size();
1114 t.tcd_attr().write(|w| {
1115 w.ssize()
1116 .bits(hw_size)
1117 .dsize()
1118 .bits(hw_size)
1119 .smod()
1120 .disable()
1121 .dmod()
1122 .bits(0)
1123 });
1124
1125 // Minor loop: transfer one word per request, no offsets
1126 t.tcd_nbytes_mloffno().write(|w| {
1127 w.nbytes()
1128 .bits(byte_size as u32)
1129 .dmloe()
1130 .offset_not_applied()
1131 .smloe()
1132 .offset_not_applied()
1133 });
1134
1135 // Major loop count = number of words
1136 let count = buf.len() as u16;
1137 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1138 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1139
1140 // No address adjustment after major loop
1141 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1142 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1143
1144 // Control/status: interrupt on major complete, auto-clear ERQ when done
1145 t.tcd_csr().write(|w| {
1146 let w = if options.complete_transfer_interrupt {
1147 w.intmajor().enable()
1148 } else {
1149 w.intmajor().disable()
1150 };
1151 let w = if options.half_transfer_interrupt {
1152 w.inthalf().enable()
1153 } else {
1154 w.inthalf().disable()
1155 };
1156 w.dreq()
1157 .erq_field_clear() // Disable request when done (important for peripheral DMA)
1158 .esg()
1159 .normal_format()
1160 .majorelink()
1161 .disable()
1162 .eeop()
1163 .disable()
1164 .esda()
1165 .disable()
1166 .bwc()
1167 .no_stall()
1168 });
1169
1170 // Ensure all TCD writes have completed before DMA engine reads them
1171 cortex_m::asm::dsb();
1172
1173 Transfer::new(self.as_any())
1174 }
1175
1176 /// Configure a memory-to-peripheral DMA transfer without starting it.
1177 ///
1178 /// This configures the TCD for a memory-to-peripheral transfer but does NOT
1179 /// return a Transfer object. The caller is responsible for:
1180 /// 1. Enabling the peripheral's DMA request
1181 /// 2. Calling `enable_request()` to start the transfer
1182 /// 3. Polling `is_done()` or using interrupts to detect completion
1183 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1184 ///
1185 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1186 /// peripheral drivers that have their own completion polling).
1187 ///
1188 /// # Arguments
1189 ///
1190 /// * `buf` - Source buffer to write from
1191 /// * `peri_addr` - Peripheral register address
1192 /// * `enable_interrupt` - Whether to enable interrupt on completion
1193 ///
1194 /// # Safety
1195 ///
1196 /// - The buffer must remain valid for the duration of the transfer.
1197 /// - The peripheral address must be valid for writes.
1198 pub unsafe fn setup_write_to_peripheral<W: Word>(
1199 &self,
1200 buf: &[W],
1201 peri_addr: *mut W,
1202 enable_interrupt: EnableInterrupt,
1203 ) {
1204 assert!(!buf.is_empty());
1205 assert!(buf.len() <= 0x7fff);
1206
1207 let size = W::size();
1208 let byte_size = size.bytes();
1209
1210 let t = self.tcd();
1211
1212 // Reset channel state
1213 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
1214 t.ch_es().write(|w| w.bits(0));
1215 t.ch_int().write(|w| w.int().clear_bit_by_one());
1216
1217 // Addresses
1218 t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32));
1219 t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32));
1220
1221 // Offsets: Source increments, Dest fixed
1222 t.tcd_soff().write(|w| w.soff().bits(byte_size as u16));
1223 t.tcd_doff().write(|w| w.doff().bits(0));
1224
1225 // Attributes: set size and explicitly disable modulo
1226 let hw_size = size.to_hw_size();
1227 t.tcd_attr().write(|w| {
1228 w.ssize()
1229 .bits(hw_size)
1230 .dsize()
1231 .bits(hw_size)
1232 .smod()
1233 .disable()
1234 .dmod()
1235 .bits(0)
1236 });
1237
1238 // Minor loop: transfer one word per request
1239 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1240
1241 // No final adjustments
1242 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1243 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1244
1245 // Major loop count = number of words
1246 let count = buf.len() as u16;
1247 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1248 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1249
1250 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1251 t.tcd_csr().write(|w| {
1252 let w = match enable_interrupt {
1253 EnableInterrupt::Yes => w.intmajor().enable(),
1254 EnableInterrupt::No => w.intmajor().disable(),
1255 };
1256 w.inthalf()
1257 .disable()
1258 .dreq()
1259 .erq_field_clear()
1260 .esg()
1261 .normal_format()
1262 .majorelink()
1263 .disable()
1264 .eeop()
1265 .disable()
1266 .esda()
1267 .disable()
1268 .bwc()
1269 .no_stall()
1270 });
1271
1272 // Ensure all TCD writes have completed before DMA engine reads them
1273 cortex_m::asm::dsb();
1274 }
1275
1276 /// Configure a peripheral-to-memory DMA transfer without starting it.
1277 ///
1278 /// This configures the TCD for a peripheral-to-memory transfer but does NOT
1279 /// return a Transfer object. The caller is responsible for:
1280 /// 1. Enabling the peripheral's DMA request
1281 /// 2. Calling `enable_request()` to start the transfer
1282 /// 3. Polling `is_done()` or using interrupts to detect completion
1283 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1284 ///
1285 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1286 /// peripheral drivers that have their own completion polling).
1287 ///
1288 /// # Arguments
1289 ///
1290 /// * `peri_addr` - Peripheral register address
1291 /// * `buf` - Destination buffer to read into
1292 /// * `enable_interrupt` - Whether to enable interrupt on completion
1293 ///
1294 /// # Safety
1295 ///
1296 /// - The buffer must remain valid for the duration of the transfer.
1297 /// - The peripheral address must be valid for reads.
1298 pub unsafe fn setup_read_from_peripheral<W: Word>(
1299 &self,
1300 peri_addr: *const W,
1301 buf: &mut [W],
1302 enable_interrupt: EnableInterrupt,
1303 ) {
1304 assert!(!buf.is_empty());
1305 assert!(buf.len() <= 0x7fff);
1306
1307 let size = W::size();
1308 let byte_size = size.bytes();
1309
1310 let t = self.tcd();
1311
1312 // Reset channel control/error/interrupt state
1313 t.ch_csr().write(|w| {
1314 w.erq()
1315 .disable()
1316 .earq()
1317 .disable()
1318 .eei()
1319 .no_error()
1320 .ebw()
1321 .disable()
1322 .done()
1323 .clear_bit_by_one()
1324 });
1325 t.ch_es().write(|w| w.bits(0));
1326 t.ch_int().write(|w| w.int().clear_bit_by_one());
1327
1328 // Source: peripheral register, fixed
1329 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
1330 t.tcd_soff().write(|w| w.soff().bits(0));
1331
1332 // Destination: memory buffer, incrementing
1333 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
1334 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
1335
1336 // Attributes: set size and explicitly disable modulo
1337 let hw_size = size.to_hw_size();
1338 t.tcd_attr().write(|w| {
1339 w.ssize()
1340 .bits(hw_size)
1341 .dsize()
1342 .bits(hw_size)
1343 .smod()
1344 .disable()
1345 .dmod()
1346 .bits(0)
1347 });
1348
1349 // Minor loop: transfer one word per request
1350 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1351
1352 // No final adjustments
1353 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1354 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1355
1356 // Major loop count = number of words
1357 let count = buf.len() as u16;
1358 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1359 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1360
1361 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1362 t.tcd_csr().write(|w| {
1363 let w = match enable_interrupt {
1364 EnableInterrupt::Yes => w.intmajor().enable(),
1365 EnableInterrupt::No => w.intmajor().disable(),
1366 };
1367 w.inthalf()
1368 .disable()
1369 .dreq()
1370 .erq_field_clear()
1371 .esg()
1372 .normal_format()
1373 .majorelink()
1374 .disable()
1375 .eeop()
1376 .disable()
1377 .esda()
1378 .disable()
1379 .bwc()
1380 .no_stall()
1381 });
1382
1383 // Ensure all TCD writes have completed before DMA engine reads them
1384 cortex_m::asm::dsb();
1385 }
1386
1387 /// Configure the integrated channel MUX to use the given request
1388 /// source value (for example [`DMA_REQ_LPUART2_TX`] or
1389 /// [`DMA_REQ_LPUART2_RX`]).
1390 ///
1391 /// # Safety
1392 ///
1393 /// Caller must ensure the request source mapping matches the
1394 /// peripheral that will drive this channel.
1395 ///
1396 /// # Note
1397 ///
1398 /// The NXP SDK requires a two-step write sequence: first clear
1399 /// the mux to 0, then set the actual source. This is a hardware
1400 /// requirement on eDMA4 for the mux to properly latch.
1401 #[inline]
1402 pub unsafe fn set_request_source(&self, request: u8) {
1403 // Two-step write per NXP SDK: clear to 0, then set actual source.
1404 self.tcd().ch_mux().write(|w| w.src().bits(0));
1405 cortex_m::asm::dsb(); // Ensure the clear completes before setting new source
1406 self.tcd().ch_mux().write(|w| w.src().bits(request));
1407 }
1408
1409 /// Enable hardware requests for this channel (ERQ=1).
1410 ///
1411 /// # Safety
1412 ///
1413 /// The channel must be properly configured before enabling requests.
1414 pub unsafe fn enable_request(&self) {
1415 let t = self.tcd();
1416 t.ch_csr().modify(|_, w| w.erq().enable());
1417 }
1418
1419 /// Disable hardware requests for this channel (ERQ=0).
1420 ///
1421 /// # Safety
1422 ///
1423 /// Disabling requests on an active transfer may leave the transfer incomplete.
1424 pub unsafe fn disable_request(&self) {
1425 let t = self.tcd();
1426 t.ch_csr().modify(|_, w| w.erq().disable());
1427 }
1428
1429 /// Return true if the channel's DONE flag is set.
1430 pub fn is_done(&self) -> bool {
1431 let t = self.tcd();
1432 t.ch_csr().read().done().bit_is_set()
1433 }
1434
1435 /// Clear the DONE flag for this channel.
1436 ///
1437 /// Uses modify to preserve other bits (especially ERQ) unlike write
1438 /// which would clear ERQ and halt an active transfer.
1439 ///
1440 /// # Safety
1441 ///
1442 /// Clearing DONE while a transfer is in progress may cause undefined behavior.
1443 pub unsafe fn clear_done(&self) {
1444 let t = self.tcd();
1445 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1446 }
1447
1448 /// Clear the channel interrupt flag (CH_INT.INT).
1449 ///
1450 /// # Safety
1451 ///
1452 /// Must be called from the correct interrupt context or with interrupts disabled.
1453 pub unsafe fn clear_interrupt(&self) {
1454 let t = self.tcd();
1455 t.ch_int().write(|w| w.int().clear_bit_by_one());
1456 }
1457
1458 /// Trigger a software start for this channel.
1459 ///
1460 /// # Safety
1461 ///
1462 /// The channel must be properly configured with a valid TCD before triggering.
1463 pub unsafe fn trigger_start(&self) {
1464 let t = self.tcd();
1465 t.tcd_csr().modify(|_, w| w.start().channel_started());
1466 }
1467
1468 /// Get the waker for this channel
1469 pub fn waker(&self) -> &'static AtomicWaker {
1470 &STATES[C::INDEX].waker
1471 }
1472
1473 /// Enable the interrupt for this channel in the NVIC.
1474 pub fn enable_interrupt(&self) {
1475 unsafe {
1476 cortex_m::peripheral::NVIC::unmask(C::INTERRUPT);
1477 }
1478 }
1479
1480 /// Enable Major Loop Linking.
1481 ///
1482 /// When the major loop completes, the hardware will trigger a service request
1483 /// on `link_ch`.
1484 ///
1485 /// # Arguments
1486 ///
1487 /// * `link_ch` - Target channel index (0-7) to link to
1488 ///
1489 /// # Safety
1490 ///
1491 /// The channel must be properly configured before setting up linking.
1492 pub unsafe fn set_major_link(&self, link_ch: usize) {
1493 let t = self.tcd();
1494 t.tcd_csr()
1495 .modify(|_, w| w.majorelink().enable().majorlinkch().bits(link_ch as u8));
1496 }
1497
1498 /// Disable Major Loop Linking.
1499 ///
1500 /// Removes any major loop channel linking previously configured.
1501 ///
1502 /// # Safety
1503 ///
1504 /// The caller must ensure this doesn't disrupt an active transfer that
1505 /// depends on the linking.
1506 pub unsafe fn clear_major_link(&self) {
1507 let t = self.tcd();
1508 t.tcd_csr().modify(|_, w| w.majorelink().disable());
1509 }
1510
1511 /// Enable Minor Loop Linking.
1512 ///
1513 /// After each minor loop, the hardware will trigger a service request
1514 /// on `link_ch`.
1515 ///
1516 /// # Arguments
1517 ///
1518 /// * `link_ch` - Target channel index (0-7) to link to
1519 ///
1520 /// # Note
1521 ///
1522 /// This rewrites CITER and BITER registers to the ELINKYES format.
1523 /// It preserves the current loop count.
1524 ///
1525 /// # Safety
1526 ///
1527 /// The channel must be properly configured before setting up linking.
1528 pub unsafe fn set_minor_link(&self, link_ch: usize) {
1529 let t = self.tcd();
1530
1531 // Read current CITER (assuming ELINKNO format initially)
1532 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1533 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1534
1535 // Write back using ELINKYES format
1536 t.tcd_citer_elinkyes().write(|w| {
1537 w.citer()
1538 .bits(current_citer)
1539 .elink()
1540 .enable()
1541 .linkch()
1542 .bits(link_ch as u8)
1543 });
1544
1545 t.tcd_biter_elinkyes().write(|w| {
1546 w.biter()
1547 .bits(current_biter)
1548 .elink()
1549 .enable()
1550 .linkch()
1551 .bits(link_ch as u8)
1552 });
1553 }
1554
1555 /// Disable Minor Loop Linking.
1556 ///
1557 /// Removes any minor loop channel linking previously configured.
1558 /// This rewrites CITER and BITER registers to the ELINKNO format,
1559 /// preserving the current loop count.
1560 ///
1561 /// # Safety
1562 ///
1563 /// The caller must ensure this doesn't disrupt an active transfer that
1564 /// depends on the linking.
1565 pub unsafe fn clear_minor_link(&self) {
1566 let t = self.tcd();
1567
1568 // Read current CITER (could be in either format, but we only need the count)
1569 // Note: In ELINKYES format, citer is 9 bits; in ELINKNO, it's 15 bits.
1570 // We read from ELINKNO which will give us the combined value.
1571 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1572 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1573
1574 // Write back using ELINKNO format (disabling link)
1575 t.tcd_citer_elinkno()
1576 .write(|w| w.citer().bits(current_citer).elink().disable());
1577
1578 t.tcd_biter_elinkno()
1579 .write(|w| w.biter().bits(current_biter).elink().disable());
1580 }
1581
1582 /// Load a TCD from memory into the hardware channel registers.
1583 ///
1584 /// This is useful for scatter/gather and ping-pong transfers where
1585 /// TCDs are prepared in RAM and then loaded into the hardware.
1586 ///
1587 /// # Safety
1588 ///
1589 /// - The TCD must be properly initialized.
1590 /// - The caller must ensure no concurrent access to the same channel.
1591 pub unsafe fn load_tcd(&self, tcd: &Tcd) {
1592 let t = self.tcd();
1593 t.tcd_saddr().write(|w| w.saddr().bits(tcd.saddr));
1594 t.tcd_soff().write(|w| w.soff().bits(tcd.soff as u16));
1595 t.tcd_attr().write(|w| w.bits(tcd.attr));
1596 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(tcd.nbytes));
1597 t.tcd_slast_sda().write(|w| w.slast_sda().bits(tcd.slast as u32));
1598 t.tcd_daddr().write(|w| w.daddr().bits(tcd.daddr));
1599 t.tcd_doff().write(|w| w.doff().bits(tcd.doff as u16));
1600 t.tcd_citer_elinkno().write(|w| w.citer().bits(tcd.citer));
1601 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(tcd.dlast_sga as u32));
1602 t.tcd_csr().write(|w| w.bits(tcd.csr));
1603 t.tcd_biter_elinkno().write(|w| w.biter().bits(tcd.biter));
1604 }
1605}
1606
1607// ============================================================================
1608// Global DMA Initialization
1609// ============================================================================
1610
1611/// Basic global DMA0 init.
1612///
1613/// This enables debug mode and round-robin arbitration and makes sure
1614/// the controller is not halted. Clock gate and reset must be handled
1615/// separately via `crate::clocks` and `crate::reset`.
1616///
1617/// # Safety
1618///
1619/// Must be called after DMA clock is enabled and reset is released.
1620/// Should only be called once during system initialization.
1621pub unsafe fn init(peripherals: &pac::Peripherals) {
1622 let dma = &peripherals.dma0;
1623
1624 dma.mp_csr().modify(|_, w| {
1625 w.edbg()
1626 .enable()
1627 .erca()
1628 .enable()
1629 // Leave HAE/ECX/CX at reset defaults.
1630 .halt()
1631 .normal_operation()
1632 // Allow per-channel linking and master-ID replication if used.
1633 .gclc()
1634 .available()
1635 .gmrc()
1636 .available()
1637 });
1638}
1639
1640/// In-memory representation of a Transfer Control Descriptor (TCD).
1641///
1642/// This matches the hardware layout (32 bytes).
1643#[repr(C, align(32))]
1644#[derive(Clone, Copy, Debug, Default)]
1645pub struct Tcd {
1646 pub saddr: u32,
1647 pub soff: i16,
1648 pub attr: u16,
1649 pub nbytes: u32,
1650 pub slast: i32,
1651 pub daddr: u32,
1652 pub doff: i16,
1653 pub citer: u16,
1654 pub dlast_sga: i32,
1655 pub csr: u16,
1656 pub biter: u16,
1657}
1658
1659struct State {
1660 /// Waker for transfer complete interrupt
1661 waker: AtomicWaker,
1662 /// Waker for half-transfer interrupt
1663 half_waker: AtomicWaker,
1664}
1665
1666impl State {
1667 const fn new() -> Self {
1668 Self {
1669 waker: AtomicWaker::new(),
1670 half_waker: AtomicWaker::new(),
1671 }
1672 }
1673}
1674
1675static STATES: [State; 8] = [
1676 State::new(),
1677 State::new(),
1678 State::new(),
1679 State::new(),
1680 State::new(),
1681 State::new(),
1682 State::new(),
1683 State::new(),
1684];
1685
1686pub(crate) fn waker(idx: usize) -> &'static AtomicWaker {
1687 &STATES[idx].waker
1688}
1689
1690pub(crate) fn half_waker(idx: usize) -> &'static AtomicWaker {
1691 &STATES[idx].half_waker
1692}
1693
1694// ============================================================================
1695// Async Transfer Future
1696// ============================================================================
1697
1698/// An in-progress DMA transfer.
1699///
1700/// This type implements `Future` and can be `.await`ed to wait for the
1701/// transfer to complete. Dropping the transfer will abort it.
1702#[must_use = "futures do nothing unless you `.await` or poll them"]
1703pub struct Transfer<'a> {
1704 channel: AnyChannel,
1705 _phantom: core::marker::PhantomData<&'a ()>,
1706}
1707
1708impl<'a> Transfer<'a> {
1709 /// Create a new transfer for the given channel.
1710 ///
1711 /// The caller must have already configured and started the DMA channel.
1712 pub(crate) fn new(channel: AnyChannel) -> Self {
1713 Self {
1714 channel,
1715 _phantom: core::marker::PhantomData,
1716 }
1717 }
1718
1719 /// Check if the transfer is still running.
1720 pub fn is_running(&self) -> bool {
1721 !self.channel.is_done()
1722 }
1723
1724 /// Get the remaining transfer count.
1725 pub fn remaining(&self) -> u16 {
1726 let t = self.channel.tcd();
1727 t.tcd_citer_elinkno().read().citer().bits()
1728 }
1729
1730 /// Block until the transfer completes.
1731 pub fn blocking_wait(self) {
1732 while self.is_running() {
1733 core::hint::spin_loop();
1734 }
1735
1736 // Ensure all DMA writes are visible
1737 fence(Ordering::SeqCst);
1738
1739 // Don't run drop (which would abort)
1740 core::mem::forget(self);
1741 }
1742
1743 /// Wait for the half-transfer interrupt asynchronously.
1744 ///
1745 /// This is useful for double-buffering scenarios where you want to process
1746 /// the first half of the buffer while the second half is being filled.
1747 ///
1748 /// Returns `true` if the half-transfer occurred, `false` if the transfer
1749 /// completed before the half-transfer interrupt.
1750 ///
1751 /// # Note
1752 ///
1753 /// The transfer must be configured with `TransferOptions::half_transfer_interrupt = true`
1754 /// for this method to work correctly.
1755 pub async fn wait_half(&mut self) -> bool {
1756 use core::future::poll_fn;
1757
1758 poll_fn(|cx| {
1759 let state = &STATES[self.channel.index];
1760
1761 // Register the half-transfer waker
1762 state.half_waker.register(cx.waker());
1763
1764 // Check if we're past the half-way point
1765 let t = self.channel.tcd();
1766 let biter = t.tcd_biter_elinkno().read().biter().bits();
1767 let citer = t.tcd_citer_elinkno().read().citer().bits();
1768 let half_point = biter / 2;
1769
1770 if self.channel.is_done() {
1771 // Transfer completed before half-transfer
1772 Poll::Ready(false)
1773 } else if citer <= half_point {
1774 // We're past the half-way point
1775 fence(Ordering::SeqCst);
1776 Poll::Ready(true)
1777 } else {
1778 Poll::Pending
1779 }
1780 })
1781 .await
1782 }
1783
1784 /// Abort the transfer.
1785 fn abort(&mut self) {
1786 let t = self.channel.tcd();
1787
1788 // Disable channel requests
1789 t.ch_csr().modify(|_, w| w.erq().disable());
1790
1791 // Clear any pending interrupt
1792 t.ch_int().write(|w| w.int().clear_bit_by_one());
1793
1794 // Clear DONE flag
1795 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1796
1797 fence(Ordering::SeqCst);
1798 }
1799}
1800
1801impl<'a> Unpin for Transfer<'a> {}
1802
1803impl<'a> Future for Transfer<'a> {
1804 type Output = ();
1805
1806 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1807 let state = &STATES[self.channel.index];
1808
1809 // Register waker first
1810 state.waker.register(cx.waker());
1811
1812 let done = self.channel.is_done();
1813
1814 if done {
1815 // Ensure all DMA writes are visible before returning
1816 fence(Ordering::SeqCst);
1817 Poll::Ready(())
1818 } else {
1819 Poll::Pending
1820 }
1821 }
1822}
1823
1824impl<'a> Drop for Transfer<'a> {
1825 fn drop(&mut self) {
1826 // Only abort if the transfer is still running
1827 // If already complete, no need to abort
1828 if self.is_running() {
1829 self.abort();
1830
1831 // Wait for abort to complete
1832 while self.is_running() {
1833 core::hint::spin_loop();
1834 }
1835 }
1836
1837 fence(Ordering::SeqCst);
1838 }
1839}
1840
1841// ============================================================================
1842// Ring Buffer for Circular DMA
1843// ============================================================================
1844
1845/// A ring buffer for continuous DMA reception.
1846///
1847/// This structure manages a circular DMA transfer, allowing continuous
1848/// reception of data without losing bytes between reads. It uses both
1849/// half-transfer and complete-transfer interrupts to track available data.
1850///
1851/// # Example
1852///
1853/// ```no_run
1854/// use embassy_mcxa::dma::{DmaChannel, RingBuffer, TransferOptions};
1855///
1856/// static mut RX_BUF: [u8; 64] = [0; 64];
1857///
1858/// let dma_ch = DmaChannel::new(p.DMA_CH0);
1859/// let ring_buf = unsafe {
1860/// dma_ch.setup_circular_read(
1861/// uart_rx_addr,
1862/// &mut RX_BUF,
1863/// )
1864/// };
1865///
1866/// // Read data as it arrives
1867/// let mut buf = [0u8; 16];
1868/// let n = ring_buf.read(&mut buf).await?;
1869/// ```
1870pub struct RingBuffer<'a, W: Word> {
1871 channel: AnyChannel,
1872 /// Buffer pointer. We use NonNull instead of &mut because DMA acts like
1873 /// a separate thread writing to this buffer, and &mut claims exclusive
1874 /// access which the compiler could optimize incorrectly.
1875 buf: NonNull<[W]>,
1876 /// Buffer length cached for convenience
1877 buf_len: usize,
1878 /// Read position in the buffer (consumer side)
1879 read_pos: AtomicUsize,
1880 /// Phantom data to tie the lifetime to the original buffer
1881 _lt: PhantomData<&'a mut [W]>,
1882}
1883
1884impl<'a, W: Word> RingBuffer<'a, W> {
1885 /// Create a new ring buffer for the given channel and buffer.
1886 ///
1887 /// # Safety
1888 ///
1889 /// The caller must ensure:
1890 /// - The DMA channel has been configured for circular transfer
1891 /// - The buffer remains valid for the lifetime of the ring buffer
1892 /// - Only one RingBuffer exists per DMA channel at a time
1893 pub(crate) unsafe fn new(channel: AnyChannel, buf: &'a mut [W]) -> Self {
1894 let buf_len = buf.len();
1895 Self {
1896 channel,
1897 buf: NonNull::from(buf),
1898 buf_len,
1899 read_pos: AtomicUsize::new(0),
1900 _lt: PhantomData,
1901 }
1902 }
1903
1904 /// Get a slice reference to the buffer.
1905 ///
1906 /// # Safety
1907 ///
1908 /// The caller must ensure that DMA is not actively writing to the
1909 /// portion of the buffer being accessed, or that the access is
1910 /// appropriately synchronized.
1911 #[inline]
1912 unsafe fn buf_slice(&self) -> &[W] {
1913 self.buf.as_ref()
1914 }
1915
1916 /// Get the current DMA write position in the buffer.
1917 ///
1918 /// This reads the current destination address from the DMA controller
1919 /// and calculates the buffer offset.
1920 fn dma_write_pos(&self) -> usize {
1921 let t = self.channel.tcd();
1922 let daddr = t.tcd_daddr().read().daddr().bits() as usize;
1923 let buf_start = self.buf.as_ptr() as *const W as usize;
1924
1925 // Calculate offset from buffer start
1926 let offset = daddr.wrapping_sub(buf_start) / core::mem::size_of::<W>();
1927
1928 // Ensure we're within bounds (DMA wraps around)
1929 offset % self.buf_len
1930 }
1931
1932 /// Returns the number of bytes available to read.
1933 pub fn available(&self) -> usize {
1934 let write_pos = self.dma_write_pos();
1935 let read_pos = self.read_pos.load(Ordering::Acquire);
1936
1937 if write_pos >= read_pos {
1938 write_pos - read_pos
1939 } else {
1940 self.buf_len - read_pos + write_pos
1941 }
1942 }
1943
1944 /// Check if the buffer has overrun (data was lost).
1945 ///
1946 /// This happens when DMA writes faster than the application reads.
1947 pub fn is_overrun(&self) -> bool {
1948 // In a true overrun, the DMA would have wrapped around and caught up
1949 // to our read position. We can detect this by checking if available()
1950 // equals the full buffer size (minus 1 to distinguish from empty).
1951 self.available() >= self.buf_len - 1
1952 }
1953
1954 /// Read data from the ring buffer into the provided slice.
1955 ///
1956 /// Returns the number of elements read, which may be less than
1957 /// `dst.len()` if not enough data is available.
1958 ///
1959 /// This method does not block; use `read_async()` for async waiting.
1960 pub fn read_immediate(&self, dst: &mut [W]) -> usize {
1961 let write_pos = self.dma_write_pos();
1962 let read_pos = self.read_pos.load(Ordering::Acquire);
1963
1964 // Calculate available bytes
1965 let available = if write_pos >= read_pos {
1966 write_pos - read_pos
1967 } else {
1968 self.buf_len - read_pos + write_pos
1969 };
1970
1971 let to_read = dst.len().min(available);
1972 if to_read == 0 {
1973 return 0;
1974 }
1975
1976 // Safety: We only read from portions of the buffer that DMA has
1977 // already written to (between read_pos and write_pos).
1978 let buf = unsafe { self.buf_slice() };
1979
1980 // Read data, handling wrap-around
1981 let first_chunk = (self.buf_len - read_pos).min(to_read);
1982 dst[..first_chunk].copy_from_slice(&buf[read_pos..read_pos + first_chunk]);
1983
1984 if to_read > first_chunk {
1985 let second_chunk = to_read - first_chunk;
1986 dst[first_chunk..to_read].copy_from_slice(&buf[..second_chunk]);
1987 }
1988
1989 // Update read position
1990 let new_read_pos = (read_pos + to_read) % self.buf_len;
1991 self.read_pos.store(new_read_pos, Ordering::Release);
1992
1993 to_read
1994 }
1995
1996 /// Read data from the ring buffer asynchronously.
1997 ///
1998 /// This waits until at least one byte is available, then reads as much
1999 /// as possible into the destination buffer.
2000 ///
2001 /// Returns the number of elements read.
2002 pub async fn read(&self, dst: &mut [W]) -> Result<usize, Error> {
2003 use core::future::poll_fn;
2004
2005 if dst.is_empty() {
2006 return Ok(0);
2007 }
2008
2009 poll_fn(|cx| {
2010 // Check for overrun
2011 if self.is_overrun() {
2012 return Poll::Ready(Err(Error::Overrun));
2013 }
2014
2015 // Try to read immediately
2016 let n = self.read_immediate(dst);
2017 if n > 0 {
2018 return Poll::Ready(Ok(n));
2019 }
2020
2021 // Register wakers for both half and complete interrupts
2022 let state = &STATES[self.channel.index()];
2023 state.waker.register(cx.waker());
2024 state.half_waker.register(cx.waker());
2025
2026 // Check again after registering waker (avoid race)
2027 let n = self.read_immediate(dst);
2028 if n > 0 {
2029 return Poll::Ready(Ok(n));
2030 }
2031
2032 Poll::Pending
2033 })
2034 .await
2035 }
2036
2037 /// Clear the ring buffer, discarding all unread data.
2038 pub fn clear(&self) {
2039 let write_pos = self.dma_write_pos();
2040 self.read_pos.store(write_pos, Ordering::Release);
2041 }
2042
2043 /// Stop the DMA transfer and consume the ring buffer.
2044 ///
2045 /// Returns any remaining unread data count.
2046 pub fn stop(self) -> usize {
2047 let available = self.available();
2048
2049 // Disable the channel
2050 let t = self.channel.tcd();
2051 t.ch_csr().modify(|_, w| w.erq().disable());
2052
2053 // Clear flags
2054 t.ch_int().write(|w| w.int().clear_bit_by_one());
2055 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
2056
2057 fence(Ordering::SeqCst);
2058
2059 available
2060 }
2061}
2062
2063impl<C: Channel> DmaChannel<C> {
2064 /// Set up a circular DMA transfer for continuous peripheral-to-memory reception.
2065 ///
2066 /// This configures the DMA channel for circular operation with both half-transfer
2067 /// and complete-transfer interrupts enabled. The transfer runs continuously until
2068 /// stopped via [`RingBuffer::stop()`].
2069 ///
2070 /// # Arguments
2071 ///
2072 /// * `peri_addr` - Peripheral register address to read from
2073 /// * `buf` - Destination buffer (should be power-of-2 size for best efficiency)
2074 ///
2075 /// # Returns
2076 ///
2077 /// A [`RingBuffer`] that can be used to read received data.
2078 ///
2079 /// # Safety
2080 ///
2081 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
2082 /// - The peripheral address must be valid for reads.
2083 /// - The peripheral's DMA request must be configured to trigger this channel.
2084 pub unsafe fn setup_circular_read<'a, W: Word>(&self, peri_addr: *const W, buf: &'a mut [W]) -> RingBuffer<'a, W> {
2085 assert!(!buf.is_empty());
2086 assert!(buf.len() <= 0x7fff);
2087 // For circular mode, buffer size should ideally be power of 2
2088 // but we don't enforce it
2089
2090 let size = W::size();
2091 let byte_size = size.bytes();
2092
2093 let t = self.tcd();
2094
2095 // Reset channel state
2096 t.ch_csr().write(|w| {
2097 w.erq()
2098 .disable()
2099 .earq()
2100 .disable()
2101 .eei()
2102 .no_error()
2103 .ebw()
2104 .disable()
2105 .done()
2106 .clear_bit_by_one()
2107 });
2108 t.ch_es().write(|w| w.bits(0));
2109 t.ch_int().write(|w| w.int().clear_bit_by_one());
2110
2111 // Source: peripheral register, fixed
2112 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
2113 t.tcd_soff().write(|w| w.soff().bits(0)); // No increment
2114
2115 // Destination: memory buffer, incrementing
2116 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
2117 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
2118
2119 // Transfer attributes
2120 let hw_size = size.to_hw_size();
2121 t.tcd_attr().write(|w| {
2122 w.ssize()
2123 .bits(hw_size)
2124 .dsize()
2125 .bits(hw_size)
2126 .smod()
2127 .disable()
2128 .dmod()
2129 .bits(0)
2130 });
2131
2132 // Minor loop: transfer one word per request
2133 t.tcd_nbytes_mloffno().write(|w| {
2134 w.nbytes()
2135 .bits(byte_size as u32)
2136 .dmloe()
2137 .offset_not_applied()
2138 .smloe()
2139 .offset_not_applied()
2140 });
2141
2142 // Major loop count = buffer size
2143 let count = buf.len() as u16;
2144 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
2145 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
2146
2147 // After major loop: reset destination to buffer start (circular)
2148 let buf_bytes = (buf.len() * byte_size) as i32;
2149 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); // Source doesn't change
2150 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits((-buf_bytes) as u32));
2151
2152 // Control/status: enable both half and complete interrupts, NO DREQ (continuous)
2153 t.tcd_csr().write(|w| {
2154 w.intmajor()
2155 .enable()
2156 .inthalf()
2157 .enable()
2158 .dreq()
2159 .channel_not_affected() // Don't clear ERQ on complete (circular)
2160 .esg()
2161 .normal_format()
2162 .majorelink()
2163 .disable()
2164 .eeop()
2165 .disable()
2166 .esda()
2167 .disable()
2168 .bwc()
2169 .no_stall()
2170 });
2171
2172 cortex_m::asm::dsb();
2173
2174 // Enable the channel request
2175 t.ch_csr().modify(|_, w| w.erq().enable());
2176
2177 RingBuffer::new(self.as_any(), buf)
2178 }
2179}
2180
2181// ============================================================================
2182// Scatter-Gather Builder
2183// ============================================================================
2184
2185/// Maximum number of TCDs in a scatter-gather chain.
2186pub const MAX_SCATTER_GATHER_TCDS: usize = 16;
2187
2188/// A builder for constructing scatter-gather DMA transfer chains.
2189///
2190/// This provides a type-safe way to build TCD chains for scatter-gather
2191/// transfers without manual TCD manipulation.
2192///
2193/// # Example
2194///
2195/// ```no_run
2196/// use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
2197///
2198/// let mut builder = ScatterGatherBuilder::<u32>::new();
2199///
2200/// // Add transfer segments
2201/// builder.add_transfer(&src1, &mut dst1);
2202/// builder.add_transfer(&src2, &mut dst2);
2203/// builder.add_transfer(&src3, &mut dst3);
2204///
2205/// // Build and execute
2206/// let transfer = unsafe { builder.build(&dma_ch).unwrap() };
2207/// transfer.await;
2208/// ```
2209pub struct ScatterGatherBuilder<W: Word> {
2210 /// TCD pool (must be 32-byte aligned)
2211 tcds: [Tcd; MAX_SCATTER_GATHER_TCDS],
2212 /// Number of TCDs configured
2213 count: usize,
2214 /// Phantom marker for word type
2215 _phantom: core::marker::PhantomData<W>,
2216}
2217
2218impl<W: Word> ScatterGatherBuilder<W> {
2219 /// Create a new scatter-gather builder.
2220 pub fn new() -> Self {
2221 Self {
2222 tcds: [Tcd::default(); MAX_SCATTER_GATHER_TCDS],
2223 count: 0,
2224 _phantom: core::marker::PhantomData,
2225 }
2226 }
2227
2228 /// Add a memory-to-memory transfer segment to the chain.
2229 ///
2230 /// # Arguments
2231 ///
2232 /// * `src` - Source buffer for this segment
2233 /// * `dst` - Destination buffer for this segment
2234 ///
2235 /// # Panics
2236 ///
2237 /// Panics if the maximum number of segments (16) is exceeded.
2238 pub fn add_transfer(&mut self, src: &[W], dst: &mut [W]) -> &mut Self {
2239 assert!(self.count < MAX_SCATTER_GATHER_TCDS, "Too many scatter-gather segments");
2240 assert!(!src.is_empty());
2241 assert!(dst.len() >= src.len());
2242
2243 let size = W::size();
2244 let byte_size = size.bytes();
2245 let hw_size = size.to_hw_size();
2246 let nbytes = (src.len() * byte_size) as u32;
2247
2248 // Build the TCD for this segment
2249 self.tcds[self.count] = Tcd {
2250 saddr: src.as_ptr() as u32,
2251 soff: byte_size as i16,
2252 attr: ((hw_size as u16) << 8) | (hw_size as u16), // SSIZE | DSIZE
2253 nbytes,
2254 slast: 0,
2255 daddr: dst.as_mut_ptr() as u32,
2256 doff: byte_size as i16,
2257 citer: 1,
2258 dlast_sga: 0, // Will be filled in by build()
2259 csr: 0x0002, // INTMAJOR only (ESG will be set for non-last TCDs)
2260 biter: 1,
2261 };
2262
2263 self.count += 1;
2264 self
2265 }
2266
2267 /// Get the number of transfer segments added.
2268 pub fn segment_count(&self) -> usize {
2269 self.count
2270 }
2271
2272 /// Build the scatter-gather chain and start the transfer.
2273 ///
2274 /// # Arguments
2275 ///
2276 /// * `channel` - The DMA channel to use for the transfer
2277 ///
2278 /// # Returns
2279 ///
2280 /// A `Transfer` future that completes when the entire chain has executed.
2281 ///
2282 /// # Safety
2283 ///
2284 /// All source and destination buffers passed to `add_transfer()` must
2285 /// remain valid for the duration of the transfer.
2286 pub unsafe fn build<C: Channel>(&mut self, channel: &DmaChannel<C>) -> Result<Transfer<'_>, Error> {
2287 if self.count == 0 {
2288 return Err(Error::Configuration);
2289 }
2290
2291 // Link TCDs together
2292 //
2293 // CSR bit definitions:
2294 // - START = bit 0 = 0x0001 (triggers transfer when set)
2295 // - INTMAJOR = bit 1 = 0x0002 (interrupt on major loop complete)
2296 // - ESG = bit 4 = 0x0010 (enable scatter-gather, loads next TCD on complete)
2297 //
2298 // When hardware loads a TCD via scatter-gather (ESG), it copies the TCD's
2299 // CSR directly into the hardware register. If START is not set in that CSR,
2300 // the hardware will NOT auto-execute the loaded TCD.
2301 //
2302 // Strategy:
2303 // - First TCD: ESG | INTMAJOR (no START - we add it manually after loading)
2304 // - Middle TCDs: ESG | INTMAJOR | START (auto-execute when loaded via S/G)
2305 // - Last TCD: INTMAJOR | START (auto-execute, no further linking)
2306 for i in 0..self.count {
2307 let is_first = i == 0;
2308 let is_last = i == self.count - 1;
2309
2310 if is_first {
2311 if is_last {
2312 // Only one TCD - no ESG, no START (we add START manually)
2313 self.tcds[i].dlast_sga = 0;
2314 self.tcds[i].csr = 0x0002; // INTMAJOR only
2315 } else {
2316 // First of multiple - ESG to link, no START (we add START manually)
2317 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2318 self.tcds[i].csr = 0x0012; // ESG | INTMAJOR
2319 }
2320 } else if is_last {
2321 // Last TCD (not first) - no ESG, but START so it auto-executes
2322 self.tcds[i].dlast_sga = 0;
2323 self.tcds[i].csr = 0x0003; // INTMAJOR | START
2324 } else {
2325 // Middle TCD - ESG to link, and START so it auto-executes
2326 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2327 self.tcds[i].csr = 0x0013; // ESG | INTMAJOR | START
2328 }
2329 }
2330
2331 let t = channel.tcd();
2332
2333 // Reset channel state - clear DONE, disable requests, clear errors
2334 // This ensures the channel is in a clean state before loading the TCD
2335 t.ch_csr().write(|w| {
2336 w.erq()
2337 .disable()
2338 .earq()
2339 .disable()
2340 .eei()
2341 .no_error()
2342 .done()
2343 .clear_bit_by_one()
2344 });
2345 t.ch_es().write(|w| w.err().clear_bit_by_one());
2346 t.ch_int().write(|w| w.int().clear_bit_by_one());
2347
2348 // Memory barrier to ensure channel state is reset before loading TCD
2349 cortex_m::asm::dsb();
2350
2351 // Load first TCD into hardware
2352 channel.load_tcd(&self.tcds[0]);
2353
2354 // Memory barrier before setting START
2355 cortex_m::asm::dsb();
2356
2357 // Start the transfer
2358 t.tcd_csr().modify(|_, w| w.start().channel_started());
2359
2360 Ok(Transfer::new(channel.as_any()))
2361 }
2362
2363 /// Reset the builder for reuse.
2364 pub fn clear(&mut self) {
2365 self.count = 0;
2366 }
2367}
2368
2369impl<W: Word> Default for ScatterGatherBuilder<W> {
2370 fn default() -> Self {
2371 Self::new()
2372 }
2373}
2374
2375/// A completed scatter-gather transfer result.
2376///
2377/// This type is returned after a scatter-gather transfer completes,
2378/// providing access to any error information.
2379#[derive(Debug, Clone, Copy, PartialEq, Eq)]
2380pub struct ScatterGatherResult {
2381 /// Number of segments successfully transferred
2382 pub segments_completed: usize,
2383 /// Error if any occurred
2384 pub error: Option<Error>,
2385}
2386
2387// ============================================================================
2388// Interrupt Handler
2389// ============================================================================
2390
2391/// Interrupt handler helper.
2392///
2393/// Call this from your interrupt handler to clear the interrupt flag and wake the waker.
2394/// This handles both half-transfer and complete-transfer interrupts.
2395///
2396/// # Safety
2397/// Must be called from the correct DMA channel interrupt context.
2398pub unsafe fn on_interrupt(ch_index: usize) {
2399 let p = pac::Peripherals::steal();
2400 let edma = &p.edma_0_tcd0;
2401 let t = edma.tcd(ch_index);
2402
2403 // Read TCD CSR to determine interrupt source
2404 let csr = t.tcd_csr().read();
2405
2406 // Check if this is a half-transfer interrupt
2407 // INTHALF is set and we're at or past the half-way point
2408 if csr.inthalf().bit_is_set() {
2409 let biter = t.tcd_biter_elinkno().read().biter().bits();
2410 let citer = t.tcd_citer_elinkno().read().citer().bits();
2411 let half_point = biter / 2;
2412
2413 if citer <= half_point && citer > 0 {
2414 // Half-transfer interrupt - wake half_waker
2415 half_waker(ch_index).wake();
2416 }
2417 }
2418
2419 // Clear INT flag
2420 t.ch_int().write(|w| w.int().clear_bit_by_one());
2421
2422 // If DONE is set, this is a complete-transfer interrupt
2423 let done = t.ch_csr().read().done().bit_is_set();
2424 if done {
2425 waker(ch_index).wake();
2426 } else {
2427 // Also wake the complete waker in case we're polling for progress
2428 waker(ch_index).wake();
2429 }
2430}
2431
2432// ============================================================================
2433// Type-level Interrupt Handlers for bind_interrupts! macro
2434// ============================================================================
2435
2436/// Macro to generate DMA channel interrupt handlers.
2437///
2438/// This generates handler structs that implement the `Handler` trait for use
2439/// with the `bind_interrupts!` macro.
2440macro_rules! impl_dma_interrupt_handler {
2441 ($name:ident, $irq:ident, $ch:expr) => {
2442 /// Interrupt handler for DMA channel.
2443 ///
2444 /// Use this with the `bind_interrupts!` macro:
2445 /// ```ignore
2446 /// bind_interrupts!(struct Irqs {
2447 #[doc = concat!(" ", stringify!($irq), " => dma::", stringify!($name), ";")]
2448 /// });
2449 /// ```
2450 pub struct $name;
2451
2452 impl crate::interrupt::typelevel::Handler<crate::interrupt::typelevel::$irq> for $name {
2453 unsafe fn on_interrupt() {
2454 on_interrupt($ch);
2455 }
2456 }
2457 };
2458}
2459
2460impl_dma_interrupt_handler!(DmaCh0InterruptHandler, DMA_CH0, 0);
2461impl_dma_interrupt_handler!(DmaCh1InterruptHandler, DMA_CH1, 1);
2462impl_dma_interrupt_handler!(DmaCh2InterruptHandler, DMA_CH2, 2);
2463impl_dma_interrupt_handler!(DmaCh3InterruptHandler, DMA_CH3, 3);
2464impl_dma_interrupt_handler!(DmaCh4InterruptHandler, DMA_CH4, 4);
2465impl_dma_interrupt_handler!(DmaCh5InterruptHandler, DMA_CH5, 5);
2466impl_dma_interrupt_handler!(DmaCh6InterruptHandler, DMA_CH6, 6);
2467impl_dma_interrupt_handler!(DmaCh7InterruptHandler, DMA_CH7, 7);
diff --git a/src/interrupt.rs b/src/interrupt.rs
index 0490e3a66..000b2f9cd 100644
--- a/src/interrupt.rs
+++ b/src/interrupt.rs
@@ -9,7 +9,7 @@
9mod generated { 9mod generated {
10 embassy_hal_internal::interrupt_mod!( 10 embassy_hal_internal::interrupt_mod!(
11 OS_EVENT, RTC, ADC1, GPIO0, GPIO1, GPIO2, GPIO3, GPIO4, LPI2C0, LPI2C1, LPI2C2, LPI2C3, LPUART0, LPUART1, 11 OS_EVENT, RTC, ADC1, GPIO0, GPIO1, GPIO2, GPIO3, GPIO4, LPI2C0, LPI2C1, LPI2C2, LPI2C3, LPUART0, LPUART1,
12 LPUART2, LPUART3, LPUART4, LPUART5, 12 LPUART2, LPUART3, LPUART4, LPUART5, DMA_CH0, DMA_CH1, DMA_CH2, DMA_CH3, DMA_CH4, DMA_CH5, DMA_CH6, DMA_CH7,
13 ); 13 );
14} 14}
15 15
diff --git a/src/lib.rs b/src/lib.rs
index fb204d27b..d3560e651 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -6,6 +6,7 @@
6// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)] 6// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
7 7
8pub mod clocks; // still provide clock helpers 8pub mod clocks; // still provide clock helpers
9pub mod dma;
9pub mod gpio; 10pub mod gpio;
10pub mod pins; // pin mux helpers 11pub mod pins; // pin mux helpers
11 12
@@ -51,6 +52,14 @@ embassy_hal_internal::peripherals!(
51 52
52 DBGMAILBOX, 53 DBGMAILBOX,
53 DMA0, 54 DMA0,
55 DMA_CH0,
56 DMA_CH1,
57 DMA_CH2,
58 DMA_CH3,
59 DMA_CH4,
60 DMA_CH5,
61 DMA_CH6,
62 DMA_CH7,
54 EDMA0_TCD0, 63 EDMA0_TCD0,
55 EIM0, 64 EIM0,
56 EQDC0, 65 EQDC0,
diff --git a/src/lpuart/mod.rs b/src/lpuart/mod.rs
index 317274a79..b29fe287d 100644
--- a/src/lpuart/mod.rs
+++ b/src/lpuart/mod.rs
@@ -15,22 +15,10 @@ use crate::{interrupt, pac, AnyPin};
15pub mod buffered; 15pub mod buffered;
16 16
17// ============================================================================ 17// ============================================================================
18// STUB IMPLEMENTATION 18// DMA INTEGRATION
19// ============================================================================ 19// ============================================================================
20 20
21// Stub implementation for LIB (Peripherals), GPIO, DMA and CLOCK until stable API 21use crate::dma::{Channel as DmaChannelTrait, DmaChannel, EnableInterrupt};
22// Pin and Clock initialization is currently done at the examples level.
23
24// --- START DMA ---
25mod dma {
26 pub struct Channel<'d> {
27 pub(super) _lifetime: core::marker::PhantomData<&'d ()>,
28 }
29}
30
31use dma::Channel;
32
33// --- END DMA ---
34 22
35// ============================================================================ 23// ============================================================================
36// MISC 24// MISC
@@ -694,7 +682,6 @@ pub struct Lpuart<'a, M: Mode> {
694pub struct LpuartTx<'a, M: Mode> { 682pub struct LpuartTx<'a, M: Mode> {
695 info: Info, 683 info: Info,
696 _tx_pin: Peri<'a, AnyPin>, 684 _tx_pin: Peri<'a, AnyPin>,
697 _tx_dma: Option<Channel<'a>>,
698 mode: PhantomData<(&'a (), M)>, 685 mode: PhantomData<(&'a (), M)>,
699} 686}
700 687
@@ -702,10 +689,31 @@ pub struct LpuartTx<'a, M: Mode> {
702pub struct LpuartRx<'a, M: Mode> { 689pub struct LpuartRx<'a, M: Mode> {
703 info: Info, 690 info: Info,
704 _rx_pin: Peri<'a, AnyPin>, 691 _rx_pin: Peri<'a, AnyPin>,
705 _rx_dma: Option<Channel<'a>>,
706 mode: PhantomData<(&'a (), M)>, 692 mode: PhantomData<(&'a (), M)>,
707} 693}
708 694
695/// Lpuart TX driver with DMA support.
696pub struct LpuartTxDma<'a, C: DmaChannelTrait> {
697 info: Info,
698 _tx_pin: Peri<'a, AnyPin>,
699 tx_dma: DmaChannel<C>,
700}
701
702/// Lpuart RX driver with DMA support.
703pub struct LpuartRxDma<'a, C: DmaChannelTrait> {
704 info: Info,
705 _rx_pin: Peri<'a, AnyPin>,
706 rx_dma: DmaChannel<C>,
707}
708
709/// Lpuart driver with DMA support for both TX and RX.
710pub struct LpuartDma<'a, TxC: DmaChannelTrait, RxC: DmaChannelTrait> {
711 #[allow(dead_code)]
712 info: Info,
713 tx: LpuartTxDma<'a, TxC>,
714 rx: LpuartRxDma<'a, RxC>,
715}
716
709// ============================================================================ 717// ============================================================================
710// LPUART CORE IMPLEMENTATION 718// LPUART CORE IMPLEMENTATION
711// ============================================================================ 719// ============================================================================
@@ -796,8 +804,8 @@ impl<'a> Lpuart<'a, Blocking> {
796 804
797 Ok(Self { 805 Ok(Self {
798 info: T::info(), 806 info: T::info(),
799 tx: LpuartTx::new_inner(T::info(), tx_pin, None), 807 tx: LpuartTx::new_inner(T::info(), tx_pin),
800 rx: LpuartRx::new_inner(T::info(), rx_pin, None), 808 rx: LpuartRx::new_inner(T::info(), rx_pin),
801 }) 809 })
802 } 810 }
803} 811}
@@ -807,11 +815,10 @@ impl<'a> Lpuart<'a, Blocking> {
807// ---------------------------------------------------------------------------- 815// ----------------------------------------------------------------------------
808 816
809impl<'a, M: Mode> LpuartTx<'a, M> { 817impl<'a, M: Mode> LpuartTx<'a, M> {
810 fn new_inner(info: Info, tx_pin: Peri<'a, AnyPin>, tx_dma: Option<Channel<'a>>) -> Self { 818 fn new_inner(info: Info, tx_pin: Peri<'a, AnyPin>) -> Self {
811 Self { 819 Self {
812 info, 820 info,
813 _tx_pin: tx_pin, 821 _tx_pin: tx_pin,
814 _tx_dma: tx_dma,
815 mode: PhantomData, 822 mode: PhantomData,
816 } 823 }
817 } 824 }
@@ -830,7 +837,7 @@ impl<'a> LpuartTx<'a, Blocking> {
830 837
831 Lpuart::<Blocking>::init::<T>(Some(&tx_pin), None, None, None, config)?; 838 Lpuart::<Blocking>::init::<T>(Some(&tx_pin), None, None, None, config)?;
832 839
833 Ok(Self::new_inner(T::info(), tx_pin, None)) 840 Ok(Self::new_inner(T::info(), tx_pin))
834 } 841 }
835 842
836 fn write_byte_internal(&mut self, byte: u8) -> Result<()> { 843 fn write_byte_internal(&mut self, byte: u8) -> Result<()> {
@@ -909,11 +916,10 @@ impl<'a> LpuartTx<'a, Blocking> {
909// ---------------------------------------------------------------------------- 916// ----------------------------------------------------------------------------
910 917
911impl<'a, M: Mode> LpuartRx<'a, M> { 918impl<'a, M: Mode> LpuartRx<'a, M> {
912 fn new_inner(info: Info, rx_pin: Peri<'a, AnyPin>, rx_dma: Option<Channel<'a>>) -> Self { 919 fn new_inner(info: Info, rx_pin: Peri<'a, AnyPin>) -> Self {
913 Self { 920 Self {
914 info, 921 info,
915 _rx_pin: rx_pin, 922 _rx_pin: rx_pin,
916 _rx_dma: rx_dma,
917 mode: PhantomData, 923 mode: PhantomData,
918 } 924 }
919 } 925 }
@@ -932,7 +938,7 @@ impl<'a> LpuartRx<'a, Blocking> {
932 938
933 Lpuart::<Blocking>::init::<T>(None, Some(&rx_pin), None, None, config)?; 939 Lpuart::<Blocking>::init::<T>(None, Some(&rx_pin), None, None, config)?;
934 940
935 Ok(Self::new_inner(T::info(), rx_pin, None)) 941 Ok(Self::new_inner(T::info(), rx_pin))
936 } 942 }
937 943
938 fn read_byte_internal(&mut self) -> Result<u8> { 944 fn read_byte_internal(&mut self) -> Result<u8> {
@@ -1027,10 +1033,373 @@ impl<'a> Lpuart<'a, Blocking> {
1027} 1033}
1028 1034
1029// ============================================================================ 1035// ============================================================================
1030// ASYNC MODE IMPLEMENTATIONS 1036// ASYNC MODE IMPLEMENTATIONS (DMA-based)
1037// ============================================================================
1038
1039/// Maximum bytes per DMA transfer (eDMA CITER/BITER are 15-bit fields).
1040const DMA_MAX_TRANSFER_SIZE: usize = 0x7FFF;
1041
1042/// Guard struct that ensures DMA is stopped if the async future is cancelled.
1043///
1044/// This implements the RAII pattern: if the future is dropped before completion
1045/// (e.g., due to a timeout), the DMA transfer is automatically aborted to prevent
1046/// use-after-free when the buffer goes out of scope.
1047struct TxDmaGuard<'a, C: DmaChannelTrait> {
1048 dma: &'a DmaChannel<C>,
1049 regs: Regs,
1050}
1051
1052impl<'a, C: DmaChannelTrait> TxDmaGuard<'a, C> {
1053 fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self {
1054 Self { dma, regs }
1055 }
1056
1057 /// Complete the transfer normally (don't abort on drop).
1058 fn complete(self) {
1059 // Cleanup
1060 self.regs.baud().modify(|_, w| w.tdmae().disabled());
1061 unsafe {
1062 self.dma.disable_request();
1063 self.dma.clear_done();
1064 }
1065 // Don't run drop since we've cleaned up
1066 core::mem::forget(self);
1067 }
1068}
1069
1070impl<C: DmaChannelTrait> Drop for TxDmaGuard<'_, C> {
1071 fn drop(&mut self) {
1072 // Abort the DMA transfer if still running
1073 unsafe {
1074 self.dma.disable_request();
1075 self.dma.clear_done();
1076 self.dma.clear_interrupt();
1077 }
1078 // Disable UART TX DMA request
1079 self.regs.baud().modify(|_, w| w.tdmae().disabled());
1080 }
1081}
1082
1083/// Guard struct for RX DMA transfers.
1084struct RxDmaGuard<'a, C: DmaChannelTrait> {
1085 dma: &'a DmaChannel<C>,
1086 regs: Regs,
1087}
1088
1089impl<'a, C: DmaChannelTrait> RxDmaGuard<'a, C> {
1090 fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self {
1091 Self { dma, regs }
1092 }
1093
1094 /// Complete the transfer normally (don't abort on drop).
1095 fn complete(self) {
1096 // Ensure DMA writes are visible to CPU
1097 cortex_m::asm::dsb();
1098 // Cleanup
1099 self.regs.baud().modify(|_, w| w.rdmae().disabled());
1100 unsafe {
1101 self.dma.disable_request();
1102 self.dma.clear_done();
1103 }
1104 // Don't run drop since we've cleaned up
1105 core::mem::forget(self);
1106 }
1107}
1108
1109impl<C: DmaChannelTrait> Drop for RxDmaGuard<'_, C> {
1110 fn drop(&mut self) {
1111 // Abort the DMA transfer if still running
1112 unsafe {
1113 self.dma.disable_request();
1114 self.dma.clear_done();
1115 self.dma.clear_interrupt();
1116 }
1117 // Disable UART RX DMA request
1118 self.regs.baud().modify(|_, w| w.rdmae().disabled());
1119 }
1120}
1121
1122impl<'a, C: DmaChannelTrait> LpuartTxDma<'a, C> {
1123 /// Create a new LPUART TX driver with DMA support.
1124 pub fn new<T: Instance>(
1125 _inner: Peri<'a, T>,
1126 tx_pin: Peri<'a, impl TxPin<T>>,
1127 tx_dma_ch: Peri<'a, C>,
1128 config: Config,
1129 ) -> Result<Self> {
1130 tx_pin.as_tx();
1131 let tx_pin: Peri<'a, AnyPin> = tx_pin.into();
1132
1133 Lpuart::<Blocking>::init::<T>(Some(&tx_pin), None, None, None, config)?;
1134
1135 Ok(Self {
1136 info: T::info(),
1137 _tx_pin: tx_pin,
1138 tx_dma: DmaChannel::new(tx_dma_ch),
1139 })
1140 }
1141
1142 /// Write data using DMA.
1143 ///
1144 /// This configures the DMA channel for a memory-to-peripheral transfer
1145 /// and waits for completion asynchronously. Large buffers are automatically
1146 /// split into chunks that fit within the DMA transfer limit.
1147 ///
1148 /// # Safety
1149 ///
1150 /// If the returned future is dropped before completion (e.g., due to a timeout),
1151 /// the DMA transfer is automatically aborted to prevent use-after-free.
1152 ///
1153 /// # Arguments
1154 /// * `edma` - Reference to the EDMA TCD register block
1155 /// * `request_source` - DMA request source number (e.g., `dma::DMA_REQ_LPUART2_TX`)
1156 /// * `buf` - Data buffer to transmit
1157 pub async fn write_dma(&mut self, request_source: u8, buf: &[u8]) -> Result<usize> {
1158 if buf.is_empty() {
1159 return Ok(0);
1160 }
1161
1162 let mut total = 0;
1163 for chunk in buf.chunks(DMA_MAX_TRANSFER_SIZE) {
1164 total += self.write_dma_inner(request_source, chunk).await?;
1165 }
1166
1167 Ok(total)
1168 }
1169
1170 /// Internal helper to write a single chunk (max 0x7FFF bytes) using DMA.
1171 async fn write_dma_inner(&mut self, request_source: u8, buf: &[u8]) -> Result<usize> {
1172 let len = buf.len();
1173 let peri_addr = self.info.regs.data().as_ptr() as *mut u8;
1174
1175 unsafe {
1176 // Clean up channel state
1177 self.tx_dma.disable_request();
1178 self.tx_dma.clear_done();
1179 self.tx_dma.clear_interrupt();
1180
1181 // Set DMA request source
1182 self.tx_dma.set_request_source(request_source);
1183
1184 // Configure TCD for memory-to-peripheral transfer
1185 self.tx_dma
1186 .setup_write_to_peripheral(buf, peri_addr, EnableInterrupt::Yes);
1187
1188 // Enable UART TX DMA request
1189 self.info.regs.baud().modify(|_, w| w.tdmae().enabled());
1190
1191 // Enable DMA channel request
1192 self.tx_dma.enable_request();
1193 }
1194
1195 // Create guard that will abort DMA if this future is dropped
1196 let guard = TxDmaGuard::new(&self.tx_dma, self.info.regs);
1197
1198 // Wait for completion asynchronously
1199 core::future::poll_fn(|cx| {
1200 self.tx_dma.waker().register(cx.waker());
1201 if self.tx_dma.is_done() {
1202 core::task::Poll::Ready(())
1203 } else {
1204 core::task::Poll::Pending
1205 }
1206 })
1207 .await;
1208
1209 // Transfer completed successfully - clean up without aborting
1210 guard.complete();
1211
1212 Ok(len)
1213 }
1214
1215 /// Blocking write (fallback when DMA is not needed)
1216 pub fn blocking_write(&mut self, buf: &[u8]) -> Result<()> {
1217 for &byte in buf {
1218 while self.info.regs.stat().read().tdre().is_txdata() {}
1219 self.info.regs.data().modify(|_, w| unsafe { w.bits(u32::from(byte)) });
1220 }
1221 Ok(())
1222 }
1223
1224 /// Flush TX blocking
1225 pub fn blocking_flush(&mut self) -> Result<()> {
1226 while self.info.regs.water().read().txcount().bits() != 0 {}
1227 while self.info.regs.stat().read().tc().is_active() {}
1228 Ok(())
1229 }
1230}
1231
1232impl<'a, C: DmaChannelTrait> LpuartRxDma<'a, C> {
1233 /// Create a new LPUART RX driver with DMA support.
1234 pub fn new<T: Instance>(
1235 _inner: Peri<'a, T>,
1236 rx_pin: Peri<'a, impl RxPin<T>>,
1237 rx_dma_ch: Peri<'a, C>,
1238 config: Config,
1239 ) -> Result<Self> {
1240 rx_pin.as_rx();
1241 let rx_pin: Peri<'a, AnyPin> = rx_pin.into();
1242
1243 Lpuart::<Blocking>::init::<T>(None, Some(&rx_pin), None, None, config)?;
1244
1245 Ok(Self {
1246 info: T::info(),
1247 _rx_pin: rx_pin,
1248 rx_dma: DmaChannel::new(rx_dma_ch),
1249 })
1250 }
1251
1252 /// Read data using DMA.
1253 ///
1254 /// This configures the DMA channel for a peripheral-to-memory transfer
1255 /// and waits for completion asynchronously. Large buffers are automatically
1256 /// split into chunks that fit within the DMA transfer limit.
1257 ///
1258 /// # Safety
1259 ///
1260 /// If the returned future is dropped before completion (e.g., due to a timeout),
1261 /// the DMA transfer is automatically aborted to prevent use-after-free.
1262 ///
1263 /// # Arguments
1264 /// * `request_source` - DMA request source number (e.g., `dma::DMA_REQ_LPUART2_RX`)
1265 /// * `buf` - Buffer to receive data into
1266 pub async fn read_dma(&mut self, request_source: u8, buf: &mut [u8]) -> Result<usize> {
1267 if buf.is_empty() {
1268 return Ok(0);
1269 }
1270
1271 let mut total = 0;
1272 for chunk in buf.chunks_mut(DMA_MAX_TRANSFER_SIZE) {
1273 total += self.read_dma_inner(request_source, chunk).await?;
1274 }
1275
1276 Ok(total)
1277 }
1278
1279 /// Internal helper to read a single chunk (max 0x7FFF bytes) using DMA.
1280 async fn read_dma_inner(&mut self, request_source: u8, buf: &mut [u8]) -> Result<usize> {
1281 let len = buf.len();
1282 let peri_addr = self.info.regs.data().as_ptr() as *const u8;
1283
1284 unsafe {
1285 // Clean up channel state
1286 self.rx_dma.disable_request();
1287 self.rx_dma.clear_done();
1288 self.rx_dma.clear_interrupt();
1289
1290 // Set DMA request source
1291 self.rx_dma.set_request_source(request_source);
1292
1293 // Configure TCD for peripheral-to-memory transfer
1294 self.rx_dma
1295 .setup_read_from_peripheral(peri_addr, buf, EnableInterrupt::Yes);
1296
1297 // Enable UART RX DMA request
1298 self.info.regs.baud().modify(|_, w| w.rdmae().enabled());
1299
1300 // Enable DMA channel request
1301 self.rx_dma.enable_request();
1302 }
1303
1304 // Create guard that will abort DMA if this future is dropped
1305 let guard = RxDmaGuard::new(&self.rx_dma, self.info.regs);
1306
1307 // Wait for completion asynchronously
1308 core::future::poll_fn(|cx| {
1309 self.rx_dma.waker().register(cx.waker());
1310 if self.rx_dma.is_done() {
1311 core::task::Poll::Ready(())
1312 } else {
1313 core::task::Poll::Pending
1314 }
1315 })
1316 .await;
1317
1318 // Transfer completed successfully - clean up without aborting
1319 guard.complete();
1320
1321 Ok(len)
1322 }
1323
1324 /// Blocking read (fallback when DMA is not needed)
1325 pub fn blocking_read(&mut self, buf: &mut [u8]) -> Result<()> {
1326 for byte in buf.iter_mut() {
1327 loop {
1328 if has_data(self.info.regs) {
1329 *byte = (self.info.regs.data().read().bits() & 0xFF) as u8;
1330 break;
1331 }
1332 check_and_clear_rx_errors(self.info.regs)?;
1333 }
1334 }
1335 Ok(())
1336 }
1337}
1338
1339impl<'a, TxC: DmaChannelTrait, RxC: DmaChannelTrait> LpuartDma<'a, TxC, RxC> {
1340 /// Create a new LPUART driver with DMA support for both TX and RX.
1341 pub fn new<T: Instance>(
1342 _inner: Peri<'a, T>,
1343 tx_pin: Peri<'a, impl TxPin<T>>,
1344 rx_pin: Peri<'a, impl RxPin<T>>,
1345 tx_dma_ch: Peri<'a, TxC>,
1346 rx_dma_ch: Peri<'a, RxC>,
1347 config: Config,
1348 ) -> Result<Self> {
1349 tx_pin.as_tx();
1350 rx_pin.as_rx();
1351
1352 let tx_pin: Peri<'a, AnyPin> = tx_pin.into();
1353 let rx_pin: Peri<'a, AnyPin> = rx_pin.into();
1354
1355 Lpuart::<Blocking>::init::<T>(Some(&tx_pin), Some(&rx_pin), None, None, config)?;
1356
1357 Ok(Self {
1358 info: T::info(),
1359 tx: LpuartTxDma {
1360 info: T::info(),
1361 _tx_pin: tx_pin,
1362 tx_dma: DmaChannel::new(tx_dma_ch),
1363 },
1364 rx: LpuartRxDma {
1365 info: T::info(),
1366 _rx_pin: rx_pin,
1367 rx_dma: DmaChannel::new(rx_dma_ch),
1368 },
1369 })
1370 }
1371
1372 /// Split into separate TX and RX drivers
1373 pub fn split(self) -> (LpuartTxDma<'a, TxC>, LpuartRxDma<'a, RxC>) {
1374 (self.tx, self.rx)
1375 }
1376
1377 /// Write data using DMA
1378 pub async fn write_dma(&mut self, request_source: u8, buf: &[u8]) -> Result<usize> {
1379 self.tx.write_dma(request_source, buf).await
1380 }
1381
1382 /// Read data using DMA
1383 pub async fn read_dma(&mut self, request_source: u8, buf: &mut [u8]) -> Result<usize> {
1384 self.rx.read_dma(request_source, buf).await
1385 }
1386}
1387
1388// ============================================================================
1389// EMBEDDED-IO-ASYNC TRAIT IMPLEMENTATIONS
1031// ============================================================================ 1390// ============================================================================
1032 1391
1033// TODO: Implement async mode for LPUART 1392impl<C: DmaChannelTrait> embedded_io::ErrorType for LpuartTxDma<'_, C> {
1393 type Error = Error;
1394}
1395
1396impl<C: DmaChannelTrait> embedded_io::ErrorType for LpuartRxDma<'_, C> {
1397 type Error = Error;
1398}
1399
1400impl<TxC: DmaChannelTrait, RxC: DmaChannelTrait> embedded_io::ErrorType for LpuartDma<'_, TxC, RxC> {
1401 type Error = Error;
1402}
1034 1403
1035// ============================================================================ 1404// ============================================================================
1036// EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS 1405// EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS
diff --git a/src/pins.rs b/src/pins.rs
index fdf1b0a86..9adbe64c8 100644
--- a/src/pins.rs
+++ b/src/pins.rs
@@ -1,6 +1,11 @@
1//! Pin configuration helpers (separate from peripheral drivers). 1//! Pin configuration helpers (separate from peripheral drivers).
2use crate::pac; 2use crate::pac;
3 3
4/// Configure pins for ADC usage.
5///
6/// # Safety
7///
8/// Must be called after PORT clocks are enabled.
4pub unsafe fn configure_adc_pins() { 9pub unsafe fn configure_adc_pins() {
5 // P1_10 = ADC1_A8 10 // P1_10 = ADC1_A8
6 let port1 = &*pac::Port1::ptr(); 11 let port1 = &*pac::Port1::ptr();