diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/clocks/mod.rs | 7 | ||||
| -rw-r--r-- | src/dma.rs | 2467 | ||||
| -rw-r--r-- | src/interrupt.rs | 2 | ||||
| -rw-r--r-- | src/lib.rs | 9 | ||||
| -rw-r--r-- | src/lpuart/mod.rs | 421 | ||||
| -rw-r--r-- | src/pins.rs | 5 |
6 files changed, 2884 insertions, 27 deletions
diff --git a/src/clocks/mod.rs b/src/clocks/mod.rs index 9c9e6ef3d..ac30115f6 100644 --- a/src/clocks/mod.rs +++ b/src/clocks/mod.rs | |||
| @@ -399,6 +399,10 @@ pub unsafe fn assert_reset<G: Gate>() { | |||
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | /// Check whether the peripheral is held in reset. | 401 | /// Check whether the peripheral is held in reset. |
| 402 | /// | ||
| 403 | /// # Safety | ||
| 404 | /// | ||
| 405 | /// Must be called with a valid peripheral gate type. | ||
| 402 | #[inline] | 406 | #[inline] |
| 403 | pub unsafe fn is_reset_released<G: Gate>() -> bool { | 407 | pub unsafe fn is_reset_released<G: Gate>() -> bool { |
| 404 | G::is_reset_released() | 408 | G::is_reset_released() |
| @@ -940,4 +944,7 @@ pub(crate) mod gate { | |||
| 940 | impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig); | 944 | impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig); |
| 941 | impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig); | 945 | impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig); |
| 942 | impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig); | 946 | impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig); |
| 947 | |||
| 948 | // DMA0 peripheral - uses NoConfig since it has no selectable clock source | ||
| 949 | impl_cc_gate!(DMA0, mrcc_glb_cc0, mrcc_glb_rst0, dma0, NoConfig); | ||
| 943 | } | 950 | } |
diff --git a/src/dma.rs b/src/dma.rs new file mode 100644 index 000000000..f6badc826 --- /dev/null +++ b/src/dma.rs | |||
| @@ -0,0 +1,2467 @@ | |||
| 1 | //! DMA driver for MCXA276. | ||
| 2 | //! | ||
| 3 | //! This module provides a typed channel abstraction over the EDMA_0_TCD0 array | ||
| 4 | //! and helpers for configuring the channel MUX. The driver supports both | ||
| 5 | //! low-level TCD configuration and higher-level async transfer APIs. | ||
| 6 | //! | ||
| 7 | //! # Architecture | ||
| 8 | //! | ||
| 9 | //! The MCXA276 has 8 DMA channels (0-7), each with its own interrupt vector. | ||
| 10 | //! Each channel has a Transfer Control Descriptor (TCD) that defines the | ||
| 11 | //! transfer parameters. | ||
| 12 | //! | ||
| 13 | //! # Choosing the Right API | ||
| 14 | //! | ||
| 15 | //! This module provides several API levels to match different use cases: | ||
| 16 | //! | ||
| 17 | //! ## High-Level Async API (Recommended for Most Users) | ||
| 18 | //! | ||
| 19 | //! Use the async methods when you want simple, safe DMA transfers: | ||
| 20 | //! | ||
| 21 | //! | Method | Description | | ||
| 22 | //! |--------|-------------| | ||
| 23 | //! | [`DmaChannel::mem_to_mem()`] | Memory-to-memory copy | | ||
| 24 | //! | [`DmaChannel::memset()`] | Fill memory with a pattern | | ||
| 25 | //! | [`DmaChannel::write()`] | Memory-to-peripheral (TX) | | ||
| 26 | //! | [`DmaChannel::read()`] | Peripheral-to-memory (RX) | | ||
| 27 | //! | ||
| 28 | //! These return a [`Transfer`] future that can be `.await`ed: | ||
| 29 | //! | ||
| 30 | //! ```no_run | ||
| 31 | //! # use embassy_mcxa::dma::{DmaChannel, TransferOptions}; | ||
| 32 | //! # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 33 | //! # let src = [0u32; 4]; | ||
| 34 | //! # let mut dst = [0u32; 4]; | ||
| 35 | //! // Simple memory-to-memory transfer | ||
| 36 | //! unsafe { | ||
| 37 | //! dma_ch.mem_to_mem(&src, &mut dst, TransferOptions::default()).await; | ||
| 38 | //! } | ||
| 39 | //! ``` | ||
| 40 | //! | ||
| 41 | //! ## Setup Methods (For Peripheral Drivers) | ||
| 42 | //! | ||
| 43 | //! Use setup methods when you need manual lifecycle control: | ||
| 44 | //! | ||
| 45 | //! | Method | Description | | ||
| 46 | //! |--------|-------------| | ||
| 47 | //! | [`DmaChannel::setup_write()`] | Configure TX without starting | | ||
| 48 | //! | [`DmaChannel::setup_read()`] | Configure RX without starting | | ||
| 49 | //! | ||
| 50 | //! These configure the TCD but don't start the transfer. You control: | ||
| 51 | //! 1. When to call [`DmaChannel::enable_request()`] | ||
| 52 | //! 2. How to detect completion (polling or interrupts) | ||
| 53 | //! 3. When to clean up with [`DmaChannel::clear_done()`] | ||
| 54 | //! | ||
| 55 | //! ## Circular/Ring Buffer API (For Continuous Reception) | ||
| 56 | //! | ||
| 57 | //! Use [`DmaChannel::setup_circular_read()`] for continuous data reception: | ||
| 58 | //! | ||
| 59 | //! ```no_run | ||
| 60 | //! # use embassy_mcxa::dma::DmaChannel; | ||
| 61 | //! # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 62 | //! # let uart_rx_addr = 0x4000_0000 as *const u8; | ||
| 63 | //! static mut RX_BUF: [u8; 64] = [0; 64]; | ||
| 64 | //! | ||
| 65 | //! let ring_buf = unsafe { | ||
| 66 | //! dma_ch.setup_circular_read(uart_rx_addr, &mut RX_BUF) | ||
| 67 | //! }; | ||
| 68 | //! | ||
| 69 | //! // Read data as it arrives | ||
| 70 | //! let mut buf = [0u8; 16]; | ||
| 71 | //! let n = ring_buf.read(&mut buf).await.unwrap(); | ||
| 72 | //! ``` | ||
| 73 | //! | ||
| 74 | //! ## Scatter-Gather Builder (For Chained Transfers) | ||
| 75 | //! | ||
| 76 | //! Use [`ScatterGatherBuilder`] for complex multi-segment transfers: | ||
| 77 | //! | ||
| 78 | //! ```no_run | ||
| 79 | //! # use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder}; | ||
| 80 | //! # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 81 | //! let mut builder = ScatterGatherBuilder::<u32>::new(); | ||
| 82 | //! builder.add_transfer(&src1, &mut dst1); | ||
| 83 | //! builder.add_transfer(&src2, &mut dst2); | ||
| 84 | //! | ||
| 85 | //! let transfer = unsafe { builder.build(&dma_ch).unwrap() }; | ||
| 86 | //! transfer.await; | ||
| 87 | //! ``` | ||
| 88 | //! | ||
| 89 | //! ## Direct TCD Access (For Advanced Use Cases) | ||
| 90 | //! | ||
| 91 | //! For full control, use the channel's `tcd()` method to access TCD registers directly. | ||
| 92 | //! See the `dma_*` examples for patterns. | ||
| 93 | //! | ||
| 94 | //! # Example | ||
| 95 | //! | ||
| 96 | //! ```no_run | ||
| 97 | //! use embassy_mcxa::dma::{DmaChannel, TransferOptions, Direction}; | ||
| 98 | //! | ||
| 99 | //! let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 100 | //! // Configure and trigger a transfer... | ||
| 101 | //! ``` | ||
| 102 | |||
| 103 | use core::future::Future; | ||
| 104 | use core::marker::PhantomData; | ||
| 105 | use core::pin::Pin; | ||
| 106 | use core::ptr::NonNull; | ||
| 107 | use core::sync::atomic::{fence, AtomicUsize, Ordering}; | ||
| 108 | use core::task::{Context, Poll}; | ||
| 109 | |||
| 110 | use crate::pac; | ||
| 111 | use crate::pac::Interrupt; | ||
| 112 | use embassy_hal_internal::PeripheralType; | ||
| 113 | use embassy_sync::waitqueue::AtomicWaker; | ||
| 114 | |||
| 115 | // ============================================================================ | ||
| 116 | // Phase 1: Foundation Types (Embassy-aligned) | ||
| 117 | // ============================================================================ | ||
| 118 | |||
| 119 | /// DMA transfer direction. | ||
| 120 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 121 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 122 | pub enum Direction { | ||
| 123 | /// Transfer from memory to memory. | ||
| 124 | MemoryToMemory, | ||
| 125 | /// Transfer from memory to a peripheral register. | ||
| 126 | MemoryToPeripheral, | ||
| 127 | /// Transfer from a peripheral register to memory. | ||
| 128 | PeripheralToMemory, | ||
| 129 | } | ||
| 130 | |||
| 131 | /// DMA transfer priority. | ||
| 132 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] | ||
| 133 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 134 | pub enum Priority { | ||
| 135 | /// Low priority (channel priority 7). | ||
| 136 | Low, | ||
| 137 | /// Medium priority (channel priority 4). | ||
| 138 | Medium, | ||
| 139 | /// High priority (channel priority 1). | ||
| 140 | #[default] | ||
| 141 | High, | ||
| 142 | /// Highest priority (channel priority 0). | ||
| 143 | Highest, | ||
| 144 | } | ||
| 145 | |||
| 146 | impl Priority { | ||
| 147 | /// Convert to hardware priority value (0 = highest, 7 = lowest). | ||
| 148 | pub fn to_hw_priority(self) -> u8 { | ||
| 149 | match self { | ||
| 150 | Priority::Low => 7, | ||
| 151 | Priority::Medium => 4, | ||
| 152 | Priority::High => 1, | ||
| 153 | Priority::Highest => 0, | ||
| 154 | } | ||
| 155 | } | ||
| 156 | } | ||
| 157 | |||
| 158 | /// DMA transfer data width. | ||
| 159 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] | ||
| 160 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 161 | pub enum WordSize { | ||
| 162 | /// 8-bit (1 byte) transfers. | ||
| 163 | OneByte, | ||
| 164 | /// 16-bit (2 byte) transfers. | ||
| 165 | TwoBytes, | ||
| 166 | /// 32-bit (4 byte) transfers. | ||
| 167 | #[default] | ||
| 168 | FourBytes, | ||
| 169 | } | ||
| 170 | |||
| 171 | impl WordSize { | ||
| 172 | /// Size in bytes. | ||
| 173 | pub const fn bytes(self) -> usize { | ||
| 174 | match self { | ||
| 175 | WordSize::OneByte => 1, | ||
| 176 | WordSize::TwoBytes => 2, | ||
| 177 | WordSize::FourBytes => 4, | ||
| 178 | } | ||
| 179 | } | ||
| 180 | |||
| 181 | /// Convert to hardware SSIZE/DSIZE field value. | ||
| 182 | pub const fn to_hw_size(self) -> u8 { | ||
| 183 | match self { | ||
| 184 | WordSize::OneByte => 0, | ||
| 185 | WordSize::TwoBytes => 1, | ||
| 186 | WordSize::FourBytes => 2, | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 190 | /// Create from byte width (1, 2, or 4). | ||
| 191 | pub const fn from_bytes(bytes: u8) -> Option<Self> { | ||
| 192 | match bytes { | ||
| 193 | 1 => Some(WordSize::OneByte), | ||
| 194 | 2 => Some(WordSize::TwoBytes), | ||
| 195 | 4 => Some(WordSize::FourBytes), | ||
| 196 | _ => None, | ||
| 197 | } | ||
| 198 | } | ||
| 199 | } | ||
| 200 | |||
| 201 | /// Trait for types that can be transferred via DMA. | ||
| 202 | /// | ||
| 203 | /// This provides compile-time type safety for DMA transfers. | ||
| 204 | pub trait Word: Copy + 'static { | ||
| 205 | /// The word size for this type. | ||
| 206 | fn size() -> WordSize; | ||
| 207 | } | ||
| 208 | |||
| 209 | impl Word for u8 { | ||
| 210 | fn size() -> WordSize { | ||
| 211 | WordSize::OneByte | ||
| 212 | } | ||
| 213 | } | ||
| 214 | |||
| 215 | impl Word for u16 { | ||
| 216 | fn size() -> WordSize { | ||
| 217 | WordSize::TwoBytes | ||
| 218 | } | ||
| 219 | } | ||
| 220 | |||
| 221 | impl Word for u32 { | ||
| 222 | fn size() -> WordSize { | ||
| 223 | WordSize::FourBytes | ||
| 224 | } | ||
| 225 | } | ||
| 226 | |||
| 227 | /// DMA transfer options. | ||
| 228 | /// | ||
| 229 | /// This struct configures various aspects of a DMA transfer. | ||
| 230 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 231 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 232 | #[non_exhaustive] | ||
| 233 | pub struct TransferOptions { | ||
| 234 | /// Transfer priority. | ||
| 235 | pub priority: Priority, | ||
| 236 | /// Enable circular (continuous) mode. | ||
| 237 | /// | ||
| 238 | /// When enabled, the transfer repeats automatically after completing. | ||
| 239 | pub circular: bool, | ||
| 240 | /// Enable interrupt on half transfer complete. | ||
| 241 | pub half_transfer_interrupt: bool, | ||
| 242 | /// Enable interrupt on transfer complete. | ||
| 243 | pub complete_transfer_interrupt: bool, | ||
| 244 | } | ||
| 245 | |||
| 246 | impl Default for TransferOptions { | ||
| 247 | fn default() -> Self { | ||
| 248 | Self { | ||
| 249 | priority: Priority::High, | ||
| 250 | circular: false, | ||
| 251 | half_transfer_interrupt: false, | ||
| 252 | complete_transfer_interrupt: true, | ||
| 253 | } | ||
| 254 | } | ||
| 255 | } | ||
| 256 | |||
| 257 | /// DMA error types. | ||
| 258 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 259 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 260 | pub enum Error { | ||
| 261 | /// The DMA controller reported a bus error. | ||
| 262 | BusError, | ||
| 263 | /// The transfer was aborted. | ||
| 264 | Aborted, | ||
| 265 | /// Configuration error (e.g., invalid parameters). | ||
| 266 | Configuration, | ||
| 267 | /// Buffer overrun (for ring buffers). | ||
| 268 | Overrun, | ||
| 269 | } | ||
| 270 | |||
| 271 | /// Whether to enable the major loop completion interrupt. | ||
| 272 | /// | ||
| 273 | /// This enum provides better readability than a boolean parameter | ||
| 274 | /// for functions that configure DMA interrupt behavior. | ||
| 275 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 276 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 277 | pub enum EnableInterrupt { | ||
| 278 | /// Enable the interrupt on major loop completion. | ||
| 279 | Yes, | ||
| 280 | /// Do not enable the interrupt. | ||
| 281 | No, | ||
| 282 | } | ||
| 283 | |||
| 284 | // ============================================================================ | ||
| 285 | // DMA Request Source Constants | ||
| 286 | // ============================================================================ | ||
| 287 | |||
| 288 | /// DMA request source numbers for LPUART peripherals on DMA0. | ||
| 289 | pub const DMA_REQ_LPUART0_RX: u8 = 21; | ||
| 290 | pub const DMA_REQ_LPUART0_TX: u8 = 22; | ||
| 291 | pub const DMA_REQ_LPUART1_RX: u8 = 23; | ||
| 292 | pub const DMA_REQ_LPUART1_TX: u8 = 24; | ||
| 293 | pub const DMA_REQ_LPUART2_RX: u8 = 25; | ||
| 294 | pub const DMA_REQ_LPUART2_TX: u8 = 26; | ||
| 295 | pub const DMA_REQ_LPUART3_RX: u8 = 27; | ||
| 296 | pub const DMA_REQ_LPUART3_TX: u8 = 28; | ||
| 297 | pub const DMA_REQ_LPUART4_RX: u8 = 29; | ||
| 298 | pub const DMA_REQ_LPUART4_TX: u8 = 30; | ||
| 299 | pub const DMA_REQ_LPUART5_RX: u8 = 31; | ||
| 300 | pub const DMA_REQ_LPUART5_TX: u8 = 32; | ||
| 301 | |||
| 302 | // ============================================================================ | ||
| 303 | // Channel Trait (Sealed Pattern) | ||
| 304 | // ============================================================================ | ||
| 305 | |||
| 306 | mod sealed { | ||
| 307 | use crate::pac::Interrupt; | ||
| 308 | |||
| 309 | /// Sealed trait for DMA channels. | ||
| 310 | pub trait SealedChannel { | ||
| 311 | /// Zero-based channel index into the TCD array. | ||
| 312 | fn index(&self) -> usize; | ||
| 313 | /// Interrupt vector for this channel. | ||
| 314 | fn interrupt(&self) -> Interrupt; | ||
| 315 | } | ||
| 316 | } | ||
| 317 | |||
| 318 | /// Marker trait implemented by HAL peripheral tokens that map to a DMA0 | ||
| 319 | /// channel backed by one EDMA_0_TCD0 TCD slot. | ||
| 320 | /// | ||
| 321 | /// This trait is sealed and cannot be implemented outside this crate. | ||
| 322 | #[allow(private_bounds)] | ||
| 323 | pub trait Channel: sealed::SealedChannel + PeripheralType + Into<AnyChannel> + 'static { | ||
| 324 | /// Zero-based channel index into the TCD array. | ||
| 325 | const INDEX: usize; | ||
| 326 | /// Interrupt vector for this channel. | ||
| 327 | const INTERRUPT: Interrupt; | ||
| 328 | } | ||
| 329 | |||
| 330 | /// Type-erased DMA channel. | ||
| 331 | /// | ||
| 332 | /// This allows storing DMA channels in a uniform way regardless of their | ||
| 333 | /// concrete type, useful for async transfer futures and runtime channel selection. | ||
| 334 | #[derive(Debug, Clone, Copy)] | ||
| 335 | pub struct AnyChannel { | ||
| 336 | index: usize, | ||
| 337 | interrupt: Interrupt, | ||
| 338 | } | ||
| 339 | |||
| 340 | impl AnyChannel { | ||
| 341 | /// Get the channel index. | ||
| 342 | #[inline] | ||
| 343 | pub const fn index(&self) -> usize { | ||
| 344 | self.index | ||
| 345 | } | ||
| 346 | |||
| 347 | /// Get the channel interrupt. | ||
| 348 | #[inline] | ||
| 349 | pub const fn interrupt(&self) -> Interrupt { | ||
| 350 | self.interrupt | ||
| 351 | } | ||
| 352 | |||
| 353 | /// Get a reference to the TCD register block for this channel. | ||
| 354 | /// | ||
| 355 | /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance. | ||
| 356 | #[inline] | ||
| 357 | fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd { | ||
| 358 | // Safety: MCXA276 has a single eDMA instance, and we're only accessing | ||
| 359 | // the TCD for this specific channel | ||
| 360 | let edma = unsafe { &*pac::Edma0Tcd0::ptr() }; | ||
| 361 | edma.tcd(self.index) | ||
| 362 | } | ||
| 363 | |||
| 364 | /// Check if the channel's DONE flag is set. | ||
| 365 | pub fn is_done(&self) -> bool { | ||
| 366 | self.tcd().ch_csr().read().done().bit_is_set() | ||
| 367 | } | ||
| 368 | |||
| 369 | /// Get the waker for this channel. | ||
| 370 | pub fn waker(&self) -> &'static AtomicWaker { | ||
| 371 | &STATES[self.index].waker | ||
| 372 | } | ||
| 373 | } | ||
| 374 | |||
| 375 | impl sealed::SealedChannel for AnyChannel { | ||
| 376 | fn index(&self) -> usize { | ||
| 377 | self.index | ||
| 378 | } | ||
| 379 | |||
| 380 | fn interrupt(&self) -> Interrupt { | ||
| 381 | self.interrupt | ||
| 382 | } | ||
| 383 | } | ||
| 384 | |||
| 385 | /// Macro to implement Channel trait for a peripheral. | ||
| 386 | macro_rules! impl_channel { | ||
| 387 | ($peri:ident, $index:expr, $irq:ident) => { | ||
| 388 | impl sealed::SealedChannel for crate::peripherals::$peri { | ||
| 389 | fn index(&self) -> usize { | ||
| 390 | $index | ||
| 391 | } | ||
| 392 | |||
| 393 | fn interrupt(&self) -> Interrupt { | ||
| 394 | Interrupt::$irq | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | impl Channel for crate::peripherals::$peri { | ||
| 399 | const INDEX: usize = $index; | ||
| 400 | const INTERRUPT: Interrupt = Interrupt::$irq; | ||
| 401 | } | ||
| 402 | |||
| 403 | impl From<crate::peripherals::$peri> for AnyChannel { | ||
| 404 | fn from(_: crate::peripherals::$peri) -> Self { | ||
| 405 | AnyChannel { | ||
| 406 | index: $index, | ||
| 407 | interrupt: Interrupt::$irq, | ||
| 408 | } | ||
| 409 | } | ||
| 410 | } | ||
| 411 | }; | ||
| 412 | } | ||
| 413 | |||
| 414 | impl_channel!(DMA_CH0, 0, DMA_CH0); | ||
| 415 | impl_channel!(DMA_CH1, 1, DMA_CH1); | ||
| 416 | impl_channel!(DMA_CH2, 2, DMA_CH2); | ||
| 417 | impl_channel!(DMA_CH3, 3, DMA_CH3); | ||
| 418 | impl_channel!(DMA_CH4, 4, DMA_CH4); | ||
| 419 | impl_channel!(DMA_CH5, 5, DMA_CH5); | ||
| 420 | impl_channel!(DMA_CH6, 6, DMA_CH6); | ||
| 421 | impl_channel!(DMA_CH7, 7, DMA_CH7); | ||
| 422 | |||
| 423 | /// Strongly-typed handle to a DMA0 channel. | ||
| 424 | /// | ||
| 425 | /// The lifetime of this value is tied to the unique peripheral token | ||
| 426 | /// supplied by `embassy_hal_internal::peripherals!`, so safe code cannot | ||
| 427 | /// create two `DmaChannel` instances for the same hardware channel. | ||
| 428 | pub struct DmaChannel<C: Channel> { | ||
| 429 | _ch: core::marker::PhantomData<C>, | ||
| 430 | } | ||
| 431 | |||
| 432 | // ============================================================================ | ||
| 433 | // DMA Transfer Methods - API Overview | ||
| 434 | // ============================================================================ | ||
| 435 | // | ||
| 436 | // The DMA API provides two categories of methods for configuring transfers: | ||
| 437 | // | ||
| 438 | // ## 1. Async Methods (Return `Transfer` Future) | ||
| 439 | // | ||
| 440 | // These methods return a [`Transfer`] Future that must be `.await`ed: | ||
| 441 | // | ||
| 442 | // - [`write()`](DmaChannel::write) - Memory-to-peripheral using default eDMA TCD block | ||
| 443 | // - [`read()`](DmaChannel::read) - Peripheral-to-memory using default eDMA TCD block | ||
| 444 | // - [`write_to_peripheral()`](DmaChannel::write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block | ||
| 445 | // - [`read_from_peripheral()`](DmaChannel::read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block | ||
| 446 | // - [`mem_to_mem()`](DmaChannel::mem_to_mem) - Memory-to-memory using default eDMA TCD block | ||
| 447 | // - [`transfer_mem_to_mem()`](DmaChannel::transfer_mem_to_mem) - Memory-to-memory with custom eDMA TCD block | ||
| 448 | // | ||
| 449 | // The `Transfer` manages the DMA lifecycle automatically: | ||
| 450 | // - Enables channel request | ||
| 451 | // - Waits for completion via async/await | ||
| 452 | // - Cleans up on completion | ||
| 453 | // | ||
| 454 | // **Important:** `Transfer::Drop` aborts the transfer if dropped before completion. | ||
| 455 | // This means you MUST `.await` the Transfer or it will be aborted when it goes out of scope. | ||
| 456 | // | ||
| 457 | // **Use case:** When you want to use async/await and let the Transfer handle lifecycle management. | ||
| 458 | // | ||
| 459 | // ## 2. Setup Methods (Configure TCD Only) | ||
| 460 | // | ||
| 461 | // These methods configure the TCD but do NOT return a `Transfer`: | ||
| 462 | // | ||
| 463 | // - [`setup_write()`](DmaChannel::setup_write) - Memory-to-peripheral using default eDMA TCD block | ||
| 464 | // - [`setup_read()`](DmaChannel::setup_read) - Peripheral-to-memory using default eDMA TCD block | ||
| 465 | // - [`setup_write_to_peripheral()`](DmaChannel::setup_write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block | ||
| 466 | // - [`setup_read_from_peripheral()`](DmaChannel::setup_read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block | ||
| 467 | // | ||
| 468 | // The caller is responsible for the complete DMA lifecycle: | ||
| 469 | // 1. Call [`enable_request()`](DmaChannel::enable_request) to start the transfer | ||
| 470 | // 2. Poll [`is_done()`](DmaChannel::is_done) or use interrupts to detect completion | ||
| 471 | // 3. Call [`disable_request()`](DmaChannel::disable_request), [`clear_done()`](DmaChannel::clear_done), | ||
| 472 | // [`clear_interrupt()`](DmaChannel::clear_interrupt) for cleanup | ||
| 473 | // | ||
| 474 | // **Use case:** Peripheral drivers (like LPUART) that implement their own `poll_fn`-based | ||
| 475 | // completion mechanism and cannot use the `Transfer` Future approach. | ||
| 476 | // | ||
| 477 | // ============================================================================ | ||
| 478 | |||
| 479 | impl<C: Channel> DmaChannel<C> { | ||
| 480 | /// Wrap a DMA channel token (takes ownership of the Peri wrapper). | ||
| 481 | #[inline] | ||
| 482 | pub fn new(_ch: embassy_hal_internal::Peri<'_, C>) -> Self { | ||
| 483 | Self { | ||
| 484 | _ch: core::marker::PhantomData, | ||
| 485 | } | ||
| 486 | } | ||
| 487 | |||
| 488 | /// Wrap a DMA channel token directly (for internal use). | ||
| 489 | #[inline] | ||
| 490 | pub fn from_token(_ch: C) -> Self { | ||
| 491 | Self { | ||
| 492 | _ch: core::marker::PhantomData, | ||
| 493 | } | ||
| 494 | } | ||
| 495 | |||
| 496 | /// Channel index in the EDMA_0_TCD0 array. | ||
| 497 | #[inline] | ||
| 498 | pub const fn index(&self) -> usize { | ||
| 499 | C::INDEX | ||
| 500 | } | ||
| 501 | |||
| 502 | /// Convert this typed channel into a type-erased `AnyChannel`. | ||
| 503 | #[inline] | ||
| 504 | pub fn into_any(self) -> AnyChannel { | ||
| 505 | AnyChannel { | ||
| 506 | index: C::INDEX, | ||
| 507 | interrupt: C::INTERRUPT, | ||
| 508 | } | ||
| 509 | } | ||
| 510 | |||
| 511 | /// Get a reference to the type-erased channel info. | ||
| 512 | #[inline] | ||
| 513 | pub fn as_any(&self) -> AnyChannel { | ||
| 514 | AnyChannel { | ||
| 515 | index: C::INDEX, | ||
| 516 | interrupt: C::INTERRUPT, | ||
| 517 | } | ||
| 518 | } | ||
| 519 | |||
| 520 | /// Return a reference to the underlying TCD register block. | ||
| 521 | /// | ||
| 522 | /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance. | ||
| 523 | #[inline] | ||
| 524 | pub fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd { | ||
| 525 | // Safety: MCXA276 has a single eDMA instance | ||
| 526 | let edma = unsafe { &*pac::Edma0Tcd0::ptr() }; | ||
| 527 | edma.tcd(C::INDEX) | ||
| 528 | } | ||
| 529 | |||
| 530 | /// Start an async transfer. | ||
| 531 | /// | ||
| 532 | /// The channel must already be configured. This enables the channel | ||
| 533 | /// request and returns a `Transfer` future that resolves when the | ||
| 534 | /// DMA transfer completes. | ||
| 535 | /// | ||
| 536 | /// # Safety | ||
| 537 | /// | ||
| 538 | /// The caller must ensure the DMA channel has been properly configured | ||
| 539 | /// and that source/destination buffers remain valid for the duration | ||
| 540 | /// of the transfer. | ||
| 541 | pub unsafe fn start_transfer(&self) -> Transfer<'_> { | ||
| 542 | // Clear any previous DONE/INT flags | ||
| 543 | let t = self.tcd(); | ||
| 544 | t.ch_csr().modify(|_, w| w.done().clear_bit_by_one()); | ||
| 545 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 546 | |||
| 547 | // Enable the channel request | ||
| 548 | t.ch_csr().modify(|_, w| w.erq().enable()); | ||
| 549 | |||
| 550 | Transfer::new(self.as_any()) | ||
| 551 | } | ||
| 552 | |||
| 553 | // ======================================================================== | ||
| 554 | // Type-Safe Transfer Methods (Embassy-style API) | ||
| 555 | // ======================================================================== | ||
| 556 | |||
| 557 | /// Perform a memory-to-memory DMA transfer (simplified API). | ||
| 558 | /// | ||
| 559 | /// This is a type-safe wrapper that uses the `Word` trait to determine | ||
| 560 | /// the correct transfer width automatically. Uses the global eDMA TCD | ||
| 561 | /// register accessor internally. | ||
| 562 | /// | ||
| 563 | /// # Arguments | ||
| 564 | /// | ||
| 565 | /// * `src` - Source buffer | ||
| 566 | /// * `dst` - Destination buffer (must be at least as large as src) | ||
| 567 | /// * `options` - Transfer configuration options | ||
| 568 | /// | ||
| 569 | /// # Safety | ||
| 570 | /// | ||
| 571 | /// The source and destination buffers must remain valid for the | ||
| 572 | /// duration of the transfer. | ||
| 573 | pub unsafe fn mem_to_mem<W: Word>(&self, src: &[W], dst: &mut [W], options: TransferOptions) -> Transfer<'_> { | ||
| 574 | self.transfer_mem_to_mem(src, dst, options) | ||
| 575 | } | ||
| 576 | |||
| 577 | /// Perform a memory-to-memory DMA transfer. | ||
| 578 | /// | ||
| 579 | /// This is a type-safe wrapper that uses the `Word` trait to determine | ||
| 580 | /// the correct transfer width automatically. | ||
| 581 | /// | ||
| 582 | /// # Arguments | ||
| 583 | /// | ||
| 584 | /// * `edma` - Reference to the eDMA TCD register block | ||
| 585 | /// * `src` - Source buffer | ||
| 586 | /// * `dst` - Destination buffer (must be at least as large as src) | ||
| 587 | /// * `options` - Transfer configuration options | ||
| 588 | /// | ||
| 589 | /// # Safety | ||
| 590 | /// | ||
| 591 | /// The source and destination buffers must remain valid for the | ||
| 592 | /// duration of the transfer. | ||
| 593 | pub unsafe fn transfer_mem_to_mem<W: Word>( | ||
| 594 | &self, | ||
| 595 | src: &[W], | ||
| 596 | dst: &mut [W], | ||
| 597 | options: TransferOptions, | ||
| 598 | ) -> Transfer<'_> { | ||
| 599 | assert!(!src.is_empty()); | ||
| 600 | assert!(dst.len() >= src.len()); | ||
| 601 | assert!(src.len() <= 0x7fff); | ||
| 602 | |||
| 603 | let size = W::size(); | ||
| 604 | let byte_count = (src.len() * size.bytes()) as u32; | ||
| 605 | |||
| 606 | let t = self.tcd(); | ||
| 607 | |||
| 608 | // Reset channel state - clear DONE, disable requests, clear errors | ||
| 609 | t.ch_csr().write(|w| { | ||
| 610 | w.erq() | ||
| 611 | .disable() | ||
| 612 | .earq() | ||
| 613 | .disable() | ||
| 614 | .eei() | ||
| 615 | .no_error() | ||
| 616 | .done() | ||
| 617 | .clear_bit_by_one() | ||
| 618 | }); | ||
| 619 | t.ch_es().write(|w| w.err().clear_bit_by_one()); | ||
| 620 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 621 | |||
| 622 | // Memory barrier to ensure channel state is fully reset before touching TCD | ||
| 623 | cortex_m::asm::dsb(); | ||
| 624 | |||
| 625 | // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt). | ||
| 626 | // Reset ALL TCD registers to 0 to clear any stale configuration from | ||
| 627 | // previous transfers. This is critical when reusing a channel. | ||
| 628 | t.tcd_saddr().write(|w| w.saddr().bits(0)); | ||
| 629 | t.tcd_soff().write(|w| w.soff().bits(0)); | ||
| 630 | t.tcd_attr().write(|w| w.bits(0)); | ||
| 631 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0)); | ||
| 632 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); | ||
| 633 | t.tcd_daddr().write(|w| w.daddr().bits(0)); | ||
| 634 | t.tcd_doff().write(|w| w.doff().bits(0)); | ||
| 635 | t.tcd_citer_elinkno().write(|w| w.bits(0)); | ||
| 636 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0)); | ||
| 637 | t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely | ||
| 638 | t.tcd_biter_elinkno().write(|w| w.bits(0)); | ||
| 639 | |||
| 640 | // Memory barrier after TCD reset | ||
| 641 | cortex_m::asm::dsb(); | ||
| 642 | |||
| 643 | // Note: Priority is managed by round-robin arbitration (set in init()) | ||
| 644 | // Per-channel priority can be configured via ch_pri() if needed | ||
| 645 | |||
| 646 | // Now configure the new transfer | ||
| 647 | |||
| 648 | // Source address and increment | ||
| 649 | t.tcd_saddr().write(|w| w.saddr().bits(src.as_ptr() as u32)); | ||
| 650 | t.tcd_soff().write(|w| w.soff().bits(size.bytes() as u16)); | ||
| 651 | |||
| 652 | // Destination address and increment | ||
| 653 | t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32)); | ||
| 654 | t.tcd_doff().write(|w| w.doff().bits(size.bytes() as u16)); | ||
| 655 | |||
| 656 | // Transfer attributes (size) | ||
| 657 | let hw_size = size.to_hw_size(); | ||
| 658 | t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size)); | ||
| 659 | |||
| 660 | // Minor loop: transfer all bytes in one minor loop | ||
| 661 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_count)); | ||
| 662 | |||
| 663 | // No source/dest adjustment after major loop | ||
| 664 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); | ||
| 665 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0)); | ||
| 666 | |||
| 667 | // Major loop count = 1 (single major loop) | ||
| 668 | // Write BITER first, then CITER (CITER must match BITER at start) | ||
| 669 | t.tcd_biter_elinkno().write(|w| w.biter().bits(1)); | ||
| 670 | t.tcd_citer_elinkno().write(|w| w.citer().bits(1)); | ||
| 671 | |||
| 672 | // Memory barrier before setting START | ||
| 673 | cortex_m::asm::dsb(); | ||
| 674 | |||
| 675 | // Control/status: interrupt on major complete, start | ||
| 676 | // Write this last after all other TCD registers are configured | ||
| 677 | let int_major = options.complete_transfer_interrupt; | ||
| 678 | t.tcd_csr().write(|w| { | ||
| 679 | w.intmajor() | ||
| 680 | .bit(int_major) | ||
| 681 | .inthalf() | ||
| 682 | .bit(options.half_transfer_interrupt) | ||
| 683 | .dreq() | ||
| 684 | .set_bit() // Auto-disable request after major loop | ||
| 685 | .start() | ||
| 686 | .set_bit() // Start the channel | ||
| 687 | }); | ||
| 688 | |||
| 689 | Transfer::new(self.as_any()) | ||
| 690 | } | ||
| 691 | |||
| 692 | /// Fill a memory buffer with a pattern value (memset). | ||
| 693 | /// | ||
| 694 | /// This performs a DMA transfer where the source address remains fixed | ||
| 695 | /// (pattern value) while the destination address increments through the buffer. | ||
| 696 | /// It's useful for quickly filling large memory regions with a constant value. | ||
| 697 | /// | ||
| 698 | /// # Arguments | ||
| 699 | /// | ||
| 700 | /// * `pattern` - Reference to the pattern value (will be read repeatedly) | ||
| 701 | /// * `dst` - Destination buffer to fill | ||
| 702 | /// * `options` - Transfer configuration options | ||
| 703 | /// | ||
| 704 | /// # Example | ||
| 705 | /// | ||
| 706 | /// ```no_run | ||
| 707 | /// use embassy_mcxa::dma::{DmaChannel, TransferOptions}; | ||
| 708 | /// | ||
| 709 | /// let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 710 | /// let pattern: u32 = 0xDEADBEEF; | ||
| 711 | /// let mut buffer = [0u32; 256]; | ||
| 712 | /// | ||
| 713 | /// unsafe { | ||
| 714 | /// dma_ch.memset(&pattern, &mut buffer, TransferOptions::default()).await; | ||
| 715 | /// } | ||
| 716 | /// // buffer is now filled with 0xDEADBEEF | ||
| 717 | /// ``` | ||
| 718 | /// | ||
| 719 | /// # Safety | ||
| 720 | /// | ||
| 721 | /// - The pattern and destination buffer must remain valid for the duration of the transfer. | ||
| 722 | pub unsafe fn memset<W: Word>(&self, pattern: &W, dst: &mut [W], options: TransferOptions) -> Transfer<'_> { | ||
| 723 | assert!(!dst.is_empty()); | ||
| 724 | assert!(dst.len() <= 0x7fff); | ||
| 725 | |||
| 726 | let size = W::size(); | ||
| 727 | let byte_size = size.bytes(); | ||
| 728 | // Total bytes to transfer - all in one minor loop for software-triggered transfers | ||
| 729 | let total_bytes = (dst.len() * byte_size) as u32; | ||
| 730 | |||
| 731 | let t = self.tcd(); | ||
| 732 | |||
| 733 | // Reset channel state - clear DONE, disable requests, clear errors | ||
| 734 | t.ch_csr().write(|w| { | ||
| 735 | w.erq() | ||
| 736 | .disable() | ||
| 737 | .earq() | ||
| 738 | .disable() | ||
| 739 | .eei() | ||
| 740 | .no_error() | ||
| 741 | .done() | ||
| 742 | .clear_bit_by_one() | ||
| 743 | }); | ||
| 744 | t.ch_es().write(|w| w.err().clear_bit_by_one()); | ||
| 745 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 746 | |||
| 747 | // Memory barrier to ensure channel state is fully reset before touching TCD | ||
| 748 | cortex_m::asm::dsb(); | ||
| 749 | |||
| 750 | // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt). | ||
| 751 | // Reset ALL TCD registers to 0 to clear any stale configuration from | ||
| 752 | // previous transfers. This is critical when reusing a channel. | ||
| 753 | t.tcd_saddr().write(|w| w.saddr().bits(0)); | ||
| 754 | t.tcd_soff().write(|w| w.soff().bits(0)); | ||
| 755 | t.tcd_attr().write(|w| w.bits(0)); | ||
| 756 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0)); | ||
| 757 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); | ||
| 758 | t.tcd_daddr().write(|w| w.daddr().bits(0)); | ||
| 759 | t.tcd_doff().write(|w| w.doff().bits(0)); | ||
| 760 | t.tcd_citer_elinkno().write(|w| w.bits(0)); | ||
| 761 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0)); | ||
| 762 | t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely | ||
| 763 | t.tcd_biter_elinkno().write(|w| w.bits(0)); | ||
| 764 | |||
| 765 | // Memory barrier after TCD reset | ||
| 766 | cortex_m::asm::dsb(); | ||
| 767 | |||
| 768 | // Now configure the new transfer | ||
| 769 | // | ||
| 770 | // For software-triggered memset, we use a SINGLE minor loop that transfers | ||
| 771 | // all bytes at once. The source address stays fixed (SOFF=0) while the | ||
| 772 | // destination increments (DOFF=byte_size). The eDMA will read from the | ||
| 773 | // same source address for each destination word. | ||
| 774 | // | ||
| 775 | // This is necessary because the START bit only triggers ONE minor loop | ||
| 776 | // iteration. Using CITER>1 with software trigger would require multiple | ||
| 777 | // START triggers. | ||
| 778 | |||
| 779 | // Source: pattern address, fixed (soff=0) | ||
| 780 | t.tcd_saddr().write(|w| w.saddr().bits(pattern as *const W as u32)); | ||
| 781 | t.tcd_soff().write(|w| w.soff().bits(0)); // Fixed source - reads pattern repeatedly | ||
| 782 | |||
| 783 | // Destination: memory buffer, incrementing by word size | ||
| 784 | t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32)); | ||
| 785 | t.tcd_doff().write(|w| w.doff().bits(byte_size as u16)); | ||
| 786 | |||
| 787 | // Transfer attributes - source and dest are same word size | ||
| 788 | let hw_size = size.to_hw_size(); | ||
| 789 | t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size)); | ||
| 790 | |||
| 791 | // Minor loop: transfer ALL bytes in one minor loop (like mem_to_mem) | ||
| 792 | // This allows the entire transfer to complete with a single START trigger | ||
| 793 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(total_bytes)); | ||
| 794 | |||
| 795 | // No address adjustment after major loop | ||
| 796 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); | ||
| 797 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0)); | ||
| 798 | |||
| 799 | // Major loop count = 1 (single major loop, all data in minor loop) | ||
| 800 | // Write BITER first, then CITER (CITER must match BITER at start) | ||
| 801 | t.tcd_biter_elinkno().write(|w| w.biter().bits(1)); | ||
| 802 | t.tcd_citer_elinkno().write(|w| w.citer().bits(1)); | ||
| 803 | |||
| 804 | // Memory barrier before setting START | ||
| 805 | cortex_m::asm::dsb(); | ||
| 806 | |||
| 807 | // Control/status: interrupt on major complete, start immediately | ||
| 808 | // Write this last after all other TCD registers are configured | ||
| 809 | let int_major = options.complete_transfer_interrupt; | ||
| 810 | t.tcd_csr().write(|w| { | ||
| 811 | w.intmajor() | ||
| 812 | .bit(int_major) | ||
| 813 | .inthalf() | ||
| 814 | .bit(options.half_transfer_interrupt) | ||
| 815 | .dreq() | ||
| 816 | .set_bit() // Auto-disable request after major loop | ||
| 817 | .start() | ||
| 818 | .set_bit() // Start the channel | ||
| 819 | }); | ||
| 820 | |||
| 821 | Transfer::new(self.as_any()) | ||
| 822 | } | ||
| 823 | |||
| 824 | /// Write data from memory to a peripheral register. | ||
| 825 | /// | ||
| 826 | /// The destination address remains fixed (peripheral register) while | ||
| 827 | /// the source address increments through the buffer. | ||
| 828 | /// | ||
| 829 | /// # Arguments | ||
| 830 | /// | ||
| 831 | /// * `buf` - Source buffer to write from | ||
| 832 | /// * `peri_addr` - Peripheral register address | ||
| 833 | /// * `options` - Transfer configuration options | ||
| 834 | /// | ||
| 835 | /// # Safety | ||
| 836 | /// | ||
| 837 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 838 | /// - The peripheral address must be valid for writes. | ||
| 839 | pub unsafe fn write<W: Word>(&self, buf: &[W], peri_addr: *mut W, options: TransferOptions) -> Transfer<'_> { | ||
| 840 | self.write_to_peripheral(buf, peri_addr, options) | ||
| 841 | } | ||
| 842 | |||
| 843 | /// Configure a memory-to-peripheral DMA transfer without starting it. | ||
| 844 | /// | ||
| 845 | /// This is a convenience wrapper around [`setup_write_to_peripheral()`](Self::setup_write_to_peripheral) | ||
| 846 | /// that uses the default eDMA TCD register block. | ||
| 847 | /// | ||
| 848 | /// This method configures the TCD but does NOT return a `Transfer`. The caller | ||
| 849 | /// is responsible for the complete DMA lifecycle: | ||
| 850 | /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer | ||
| 851 | /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion | ||
| 852 | /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done), | ||
| 853 | /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup | ||
| 854 | /// | ||
| 855 | /// # Example | ||
| 856 | /// | ||
| 857 | /// ```no_run | ||
| 858 | /// # use embassy_mcxa::dma::DmaChannel; | ||
| 859 | /// # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 860 | /// # let uart_tx_addr = 0x4000_0000 as *mut u8; | ||
| 861 | /// let data = [0x48, 0x65, 0x6c, 0x6c, 0x6f]; // "Hello" | ||
| 862 | /// | ||
| 863 | /// unsafe { | ||
| 864 | /// // Configure the transfer | ||
| 865 | /// dma_ch.setup_write(&data, uart_tx_addr, true); | ||
| 866 | /// | ||
| 867 | /// // Start when peripheral is ready | ||
| 868 | /// dma_ch.enable_request(); | ||
| 869 | /// | ||
| 870 | /// // Wait for completion (or use interrupt) | ||
| 871 | /// while !dma_ch.is_done() {} | ||
| 872 | /// | ||
| 873 | /// // Clean up | ||
| 874 | /// dma_ch.clear_done(); | ||
| 875 | /// dma_ch.clear_interrupt(); | ||
| 876 | /// } | ||
| 877 | /// ``` | ||
| 878 | /// | ||
| 879 | /// # Arguments | ||
| 880 | /// | ||
| 881 | /// * `buf` - Source buffer to write from | ||
| 882 | /// * `peri_addr` - Peripheral register address | ||
| 883 | /// * `enable_interrupt` - Whether to enable interrupt on completion | ||
| 884 | /// | ||
| 885 | /// # Safety | ||
| 886 | /// | ||
| 887 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 888 | /// - The peripheral address must be valid for writes. | ||
| 889 | pub unsafe fn setup_write<W: Word>(&self, buf: &[W], peri_addr: *mut W, enable_interrupt: EnableInterrupt) { | ||
| 890 | self.setup_write_to_peripheral(buf, peri_addr, enable_interrupt) | ||
| 891 | } | ||
| 892 | |||
| 893 | /// Write data from memory to a peripheral register. | ||
| 894 | /// | ||
| 895 | /// The destination address remains fixed (peripheral register) while | ||
| 896 | /// the source address increments through the buffer. | ||
| 897 | /// | ||
| 898 | /// # Arguments | ||
| 899 | /// | ||
| 900 | /// * `buf` - Source buffer to write from | ||
| 901 | /// * `peri_addr` - Peripheral register address | ||
| 902 | /// * `options` - Transfer configuration options | ||
| 903 | /// | ||
| 904 | /// # Safety | ||
| 905 | /// | ||
| 906 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 907 | /// - The peripheral address must be valid for writes. | ||
| 908 | pub unsafe fn write_to_peripheral<W: Word>( | ||
| 909 | &self, | ||
| 910 | buf: &[W], | ||
| 911 | peri_addr: *mut W, | ||
| 912 | options: TransferOptions, | ||
| 913 | ) -> Transfer<'_> { | ||
| 914 | assert!(!buf.is_empty()); | ||
| 915 | assert!(buf.len() <= 0x7fff); | ||
| 916 | |||
| 917 | let size = W::size(); | ||
| 918 | let byte_size = size.bytes(); | ||
| 919 | |||
| 920 | let t = self.tcd(); | ||
| 921 | |||
| 922 | // Reset channel state | ||
| 923 | t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one()); | ||
| 924 | t.ch_es().write(|w| w.bits(0)); | ||
| 925 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 926 | |||
| 927 | // Addresses | ||
| 928 | t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32)); | ||
| 929 | t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32)); | ||
| 930 | |||
| 931 | // Offsets: Source increments, Dest fixed | ||
| 932 | t.tcd_soff().write(|w| w.soff().bits(byte_size as u16)); | ||
| 933 | t.tcd_doff().write(|w| w.doff().bits(0)); | ||
| 934 | |||
| 935 | // Attributes: set size and explicitly disable modulo | ||
| 936 | let hw_size = size.to_hw_size(); | ||
| 937 | t.tcd_attr().write(|w| { | ||
| 938 | w.ssize() | ||
| 939 | .bits(hw_size) | ||
| 940 | .dsize() | ||
| 941 | .bits(hw_size) | ||
| 942 | .smod() | ||
| 943 | .disable() | ||
| 944 | .dmod() | ||
| 945 | .bits(0) | ||
| 946 | }); | ||
| 947 | |||
| 948 | // Minor loop: transfer one word per request (match old: only set nbytes) | ||
| 949 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32)); | ||
| 950 | |||
| 951 | // No final adjustments | ||
| 952 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); | ||
| 953 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0)); | ||
| 954 | |||
| 955 | // Major loop count = number of words | ||
| 956 | let count = buf.len() as u16; | ||
| 957 | t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable()); | ||
| 958 | t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable()); | ||
| 959 | |||
| 960 | // CSR: interrupt on major loop complete and auto-clear ERQ | ||
| 961 | t.tcd_csr().write(|w| { | ||
| 962 | let w = if options.complete_transfer_interrupt { | ||
| 963 | w.intmajor().enable() | ||
| 964 | } else { | ||
| 965 | w.intmajor().disable() | ||
| 966 | }; | ||
| 967 | w.inthalf() | ||
| 968 | .disable() | ||
| 969 | .dreq() | ||
| 970 | .erq_field_clear() // Disable request when done | ||
| 971 | .esg() | ||
| 972 | .normal_format() | ||
| 973 | .majorelink() | ||
| 974 | .disable() | ||
| 975 | .eeop() | ||
| 976 | .disable() | ||
| 977 | .esda() | ||
| 978 | .disable() | ||
| 979 | .bwc() | ||
| 980 | .no_stall() | ||
| 981 | }); | ||
| 982 | |||
| 983 | // Ensure all TCD writes have completed before DMA engine reads them | ||
| 984 | cortex_m::asm::dsb(); | ||
| 985 | |||
| 986 | Transfer::new(self.as_any()) | ||
| 987 | } | ||
| 988 | |||
| 989 | /// Read data from a peripheral register to memory. | ||
| 990 | /// | ||
| 991 | /// The source address remains fixed (peripheral register) while | ||
| 992 | /// the destination address increments through the buffer. | ||
| 993 | /// | ||
| 994 | /// # Arguments | ||
| 995 | /// | ||
| 996 | /// * `peri_addr` - Peripheral register address | ||
| 997 | /// * `buf` - Destination buffer to read into | ||
| 998 | /// * `options` - Transfer configuration options | ||
| 999 | /// | ||
| 1000 | /// # Safety | ||
| 1001 | /// | ||
| 1002 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1003 | /// - The peripheral address must be valid for reads. | ||
| 1004 | pub unsafe fn read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], options: TransferOptions) -> Transfer<'_> { | ||
| 1005 | self.read_from_peripheral(peri_addr, buf, options) | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | /// Configure a peripheral-to-memory DMA transfer without starting it. | ||
| 1009 | /// | ||
| 1010 | /// This is a convenience wrapper around [`setup_read_from_peripheral()`](Self::setup_read_from_peripheral) | ||
| 1011 | /// that uses the default eDMA TCD register block. | ||
| 1012 | /// | ||
| 1013 | /// This method configures the TCD but does NOT return a `Transfer`. The caller | ||
| 1014 | /// is responsible for the complete DMA lifecycle: | ||
| 1015 | /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer | ||
| 1016 | /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion | ||
| 1017 | /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done), | ||
| 1018 | /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup | ||
| 1019 | /// | ||
| 1020 | /// # Example | ||
| 1021 | /// | ||
| 1022 | /// ```no_run | ||
| 1023 | /// # use embassy_mcxa::dma::DmaChannel; | ||
| 1024 | /// # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 1025 | /// # let uart_rx_addr = 0x4000_0000 as *const u8; | ||
| 1026 | /// let mut buf = [0u8; 32]; | ||
| 1027 | /// | ||
| 1028 | /// unsafe { | ||
| 1029 | /// // Configure the transfer | ||
| 1030 | /// dma_ch.setup_read(uart_rx_addr, &mut buf, true); | ||
| 1031 | /// | ||
| 1032 | /// // Start when peripheral is ready | ||
| 1033 | /// dma_ch.enable_request(); | ||
| 1034 | /// | ||
| 1035 | /// // Wait for completion (or use interrupt) | ||
| 1036 | /// while !dma_ch.is_done() {} | ||
| 1037 | /// | ||
| 1038 | /// // Clean up | ||
| 1039 | /// dma_ch.clear_done(); | ||
| 1040 | /// dma_ch.clear_interrupt(); | ||
| 1041 | /// } | ||
| 1042 | /// // buf now contains received data | ||
| 1043 | /// ``` | ||
| 1044 | /// | ||
| 1045 | /// # Arguments | ||
| 1046 | /// | ||
| 1047 | /// * `peri_addr` - Peripheral register address | ||
| 1048 | /// * `buf` - Destination buffer to read into | ||
| 1049 | /// * `enable_interrupt` - Whether to enable interrupt on completion | ||
| 1050 | /// | ||
| 1051 | /// # Safety | ||
| 1052 | /// | ||
| 1053 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1054 | /// - The peripheral address must be valid for reads. | ||
| 1055 | pub unsafe fn setup_read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], enable_interrupt: EnableInterrupt) { | ||
| 1056 | self.setup_read_from_peripheral(peri_addr, buf, enable_interrupt) | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | /// Read data from a peripheral register to memory. | ||
| 1060 | /// | ||
| 1061 | /// The source address remains fixed (peripheral register) while | ||
| 1062 | /// the destination address increments through the buffer. | ||
| 1063 | /// | ||
| 1064 | /// # Arguments | ||
| 1065 | /// | ||
| 1066 | /// * `peri_addr` - Peripheral register address | ||
| 1067 | /// * `buf` - Destination buffer to read into | ||
| 1068 | /// * `options` - Transfer configuration options | ||
| 1069 | /// | ||
| 1070 | /// # Safety | ||
| 1071 | /// | ||
| 1072 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1073 | /// - The peripheral address must be valid for reads. | ||
| 1074 | pub unsafe fn read_from_peripheral<W: Word>( | ||
| 1075 | &self, | ||
| 1076 | peri_addr: *const W, | ||
| 1077 | buf: &mut [W], | ||
| 1078 | options: TransferOptions, | ||
| 1079 | ) -> Transfer<'_> { | ||
| 1080 | assert!(!buf.is_empty()); | ||
| 1081 | assert!(buf.len() <= 0x7fff); | ||
| 1082 | |||
| 1083 | let size = W::size(); | ||
| 1084 | let byte_size = size.bytes(); | ||
| 1085 | |||
| 1086 | let t = self.tcd(); | ||
| 1087 | |||
| 1088 | // Reset channel control/error/interrupt state | ||
| 1089 | t.ch_csr().write(|w| { | ||
| 1090 | w.erq() | ||
| 1091 | .disable() | ||
| 1092 | .earq() | ||
| 1093 | .disable() | ||
| 1094 | .eei() | ||
| 1095 | .no_error() | ||
| 1096 | .ebw() | ||
| 1097 | .disable() | ||
| 1098 | .done() | ||
| 1099 | .clear_bit_by_one() | ||
| 1100 | }); | ||
| 1101 | t.ch_es().write(|w| w.bits(0)); | ||
| 1102 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 1103 | |||
| 1104 | // Source: peripheral register, fixed | ||
| 1105 | t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32)); | ||
| 1106 | t.tcd_soff().write(|w| w.soff().bits(0)); // No increment | ||
| 1107 | |||
| 1108 | // Destination: memory buffer, incrementing | ||
| 1109 | t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32)); | ||
| 1110 | t.tcd_doff().write(|w| w.doff().bits(byte_size as u16)); | ||
| 1111 | |||
| 1112 | // Transfer attributes: set size and explicitly disable modulo | ||
| 1113 | let hw_size = size.to_hw_size(); | ||
| 1114 | t.tcd_attr().write(|w| { | ||
| 1115 | w.ssize() | ||
| 1116 | .bits(hw_size) | ||
| 1117 | .dsize() | ||
| 1118 | .bits(hw_size) | ||
| 1119 | .smod() | ||
| 1120 | .disable() | ||
| 1121 | .dmod() | ||
| 1122 | .bits(0) | ||
| 1123 | }); | ||
| 1124 | |||
| 1125 | // Minor loop: transfer one word per request, no offsets | ||
| 1126 | t.tcd_nbytes_mloffno().write(|w| { | ||
| 1127 | w.nbytes() | ||
| 1128 | .bits(byte_size as u32) | ||
| 1129 | .dmloe() | ||
| 1130 | .offset_not_applied() | ||
| 1131 | .smloe() | ||
| 1132 | .offset_not_applied() | ||
| 1133 | }); | ||
| 1134 | |||
| 1135 | // Major loop count = number of words | ||
| 1136 | let count = buf.len() as u16; | ||
| 1137 | t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable()); | ||
| 1138 | t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable()); | ||
| 1139 | |||
| 1140 | // No address adjustment after major loop | ||
| 1141 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); | ||
| 1142 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0)); | ||
| 1143 | |||
| 1144 | // Control/status: interrupt on major complete, auto-clear ERQ when done | ||
| 1145 | t.tcd_csr().write(|w| { | ||
| 1146 | let w = if options.complete_transfer_interrupt { | ||
| 1147 | w.intmajor().enable() | ||
| 1148 | } else { | ||
| 1149 | w.intmajor().disable() | ||
| 1150 | }; | ||
| 1151 | let w = if options.half_transfer_interrupt { | ||
| 1152 | w.inthalf().enable() | ||
| 1153 | } else { | ||
| 1154 | w.inthalf().disable() | ||
| 1155 | }; | ||
| 1156 | w.dreq() | ||
| 1157 | .erq_field_clear() // Disable request when done (important for peripheral DMA) | ||
| 1158 | .esg() | ||
| 1159 | .normal_format() | ||
| 1160 | .majorelink() | ||
| 1161 | .disable() | ||
| 1162 | .eeop() | ||
| 1163 | .disable() | ||
| 1164 | .esda() | ||
| 1165 | .disable() | ||
| 1166 | .bwc() | ||
| 1167 | .no_stall() | ||
| 1168 | }); | ||
| 1169 | |||
| 1170 | // Ensure all TCD writes have completed before DMA engine reads them | ||
| 1171 | cortex_m::asm::dsb(); | ||
| 1172 | |||
| 1173 | Transfer::new(self.as_any()) | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | /// Configure a memory-to-peripheral DMA transfer without starting it. | ||
| 1177 | /// | ||
| 1178 | /// This configures the TCD for a memory-to-peripheral transfer but does NOT | ||
| 1179 | /// return a Transfer object. The caller is responsible for: | ||
| 1180 | /// 1. Enabling the peripheral's DMA request | ||
| 1181 | /// 2. Calling `enable_request()` to start the transfer | ||
| 1182 | /// 3. Polling `is_done()` or using interrupts to detect completion | ||
| 1183 | /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup | ||
| 1184 | /// | ||
| 1185 | /// Use this when you need manual control over the DMA lifecycle (e.g., in | ||
| 1186 | /// peripheral drivers that have their own completion polling). | ||
| 1187 | /// | ||
| 1188 | /// # Arguments | ||
| 1189 | /// | ||
| 1190 | /// * `buf` - Source buffer to write from | ||
| 1191 | /// * `peri_addr` - Peripheral register address | ||
| 1192 | /// * `enable_interrupt` - Whether to enable interrupt on completion | ||
| 1193 | /// | ||
| 1194 | /// # Safety | ||
| 1195 | /// | ||
| 1196 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1197 | /// - The peripheral address must be valid for writes. | ||
| 1198 | pub unsafe fn setup_write_to_peripheral<W: Word>( | ||
| 1199 | &self, | ||
| 1200 | buf: &[W], | ||
| 1201 | peri_addr: *mut W, | ||
| 1202 | enable_interrupt: EnableInterrupt, | ||
| 1203 | ) { | ||
| 1204 | assert!(!buf.is_empty()); | ||
| 1205 | assert!(buf.len() <= 0x7fff); | ||
| 1206 | |||
| 1207 | let size = W::size(); | ||
| 1208 | let byte_size = size.bytes(); | ||
| 1209 | |||
| 1210 | let t = self.tcd(); | ||
| 1211 | |||
| 1212 | // Reset channel state | ||
| 1213 | t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one()); | ||
| 1214 | t.ch_es().write(|w| w.bits(0)); | ||
| 1215 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 1216 | |||
| 1217 | // Addresses | ||
| 1218 | t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32)); | ||
| 1219 | t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32)); | ||
| 1220 | |||
| 1221 | // Offsets: Source increments, Dest fixed | ||
| 1222 | t.tcd_soff().write(|w| w.soff().bits(byte_size as u16)); | ||
| 1223 | t.tcd_doff().write(|w| w.doff().bits(0)); | ||
| 1224 | |||
| 1225 | // Attributes: set size and explicitly disable modulo | ||
| 1226 | let hw_size = size.to_hw_size(); | ||
| 1227 | t.tcd_attr().write(|w| { | ||
| 1228 | w.ssize() | ||
| 1229 | .bits(hw_size) | ||
| 1230 | .dsize() | ||
| 1231 | .bits(hw_size) | ||
| 1232 | .smod() | ||
| 1233 | .disable() | ||
| 1234 | .dmod() | ||
| 1235 | .bits(0) | ||
| 1236 | }); | ||
| 1237 | |||
| 1238 | // Minor loop: transfer one word per request | ||
| 1239 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32)); | ||
| 1240 | |||
| 1241 | // No final adjustments | ||
| 1242 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); | ||
| 1243 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0)); | ||
| 1244 | |||
| 1245 | // Major loop count = number of words | ||
| 1246 | let count = buf.len() as u16; | ||
| 1247 | t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable()); | ||
| 1248 | t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable()); | ||
| 1249 | |||
| 1250 | // CSR: optional interrupt on major loop complete and auto-clear ERQ | ||
| 1251 | t.tcd_csr().write(|w| { | ||
| 1252 | let w = match enable_interrupt { | ||
| 1253 | EnableInterrupt::Yes => w.intmajor().enable(), | ||
| 1254 | EnableInterrupt::No => w.intmajor().disable(), | ||
| 1255 | }; | ||
| 1256 | w.inthalf() | ||
| 1257 | .disable() | ||
| 1258 | .dreq() | ||
| 1259 | .erq_field_clear() | ||
| 1260 | .esg() | ||
| 1261 | .normal_format() | ||
| 1262 | .majorelink() | ||
| 1263 | .disable() | ||
| 1264 | .eeop() | ||
| 1265 | .disable() | ||
| 1266 | .esda() | ||
| 1267 | .disable() | ||
| 1268 | .bwc() | ||
| 1269 | .no_stall() | ||
| 1270 | }); | ||
| 1271 | |||
| 1272 | // Ensure all TCD writes have completed before DMA engine reads them | ||
| 1273 | cortex_m::asm::dsb(); | ||
| 1274 | } | ||
| 1275 | |||
| 1276 | /// Configure a peripheral-to-memory DMA transfer without starting it. | ||
| 1277 | /// | ||
| 1278 | /// This configures the TCD for a peripheral-to-memory transfer but does NOT | ||
| 1279 | /// return a Transfer object. The caller is responsible for: | ||
| 1280 | /// 1. Enabling the peripheral's DMA request | ||
| 1281 | /// 2. Calling `enable_request()` to start the transfer | ||
| 1282 | /// 3. Polling `is_done()` or using interrupts to detect completion | ||
| 1283 | /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup | ||
| 1284 | /// | ||
| 1285 | /// Use this when you need manual control over the DMA lifecycle (e.g., in | ||
| 1286 | /// peripheral drivers that have their own completion polling). | ||
| 1287 | /// | ||
| 1288 | /// # Arguments | ||
| 1289 | /// | ||
| 1290 | /// * `peri_addr` - Peripheral register address | ||
| 1291 | /// * `buf` - Destination buffer to read into | ||
| 1292 | /// * `enable_interrupt` - Whether to enable interrupt on completion | ||
| 1293 | /// | ||
| 1294 | /// # Safety | ||
| 1295 | /// | ||
| 1296 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1297 | /// - The peripheral address must be valid for reads. | ||
| 1298 | pub unsafe fn setup_read_from_peripheral<W: Word>( | ||
| 1299 | &self, | ||
| 1300 | peri_addr: *const W, | ||
| 1301 | buf: &mut [W], | ||
| 1302 | enable_interrupt: EnableInterrupt, | ||
| 1303 | ) { | ||
| 1304 | assert!(!buf.is_empty()); | ||
| 1305 | assert!(buf.len() <= 0x7fff); | ||
| 1306 | |||
| 1307 | let size = W::size(); | ||
| 1308 | let byte_size = size.bytes(); | ||
| 1309 | |||
| 1310 | let t = self.tcd(); | ||
| 1311 | |||
| 1312 | // Reset channel control/error/interrupt state | ||
| 1313 | t.ch_csr().write(|w| { | ||
| 1314 | w.erq() | ||
| 1315 | .disable() | ||
| 1316 | .earq() | ||
| 1317 | .disable() | ||
| 1318 | .eei() | ||
| 1319 | .no_error() | ||
| 1320 | .ebw() | ||
| 1321 | .disable() | ||
| 1322 | .done() | ||
| 1323 | .clear_bit_by_one() | ||
| 1324 | }); | ||
| 1325 | t.ch_es().write(|w| w.bits(0)); | ||
| 1326 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 1327 | |||
| 1328 | // Source: peripheral register, fixed | ||
| 1329 | t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32)); | ||
| 1330 | t.tcd_soff().write(|w| w.soff().bits(0)); | ||
| 1331 | |||
| 1332 | // Destination: memory buffer, incrementing | ||
| 1333 | t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32)); | ||
| 1334 | t.tcd_doff().write(|w| w.doff().bits(byte_size as u16)); | ||
| 1335 | |||
| 1336 | // Attributes: set size and explicitly disable modulo | ||
| 1337 | let hw_size = size.to_hw_size(); | ||
| 1338 | t.tcd_attr().write(|w| { | ||
| 1339 | w.ssize() | ||
| 1340 | .bits(hw_size) | ||
| 1341 | .dsize() | ||
| 1342 | .bits(hw_size) | ||
| 1343 | .smod() | ||
| 1344 | .disable() | ||
| 1345 | .dmod() | ||
| 1346 | .bits(0) | ||
| 1347 | }); | ||
| 1348 | |||
| 1349 | // Minor loop: transfer one word per request | ||
| 1350 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32)); | ||
| 1351 | |||
| 1352 | // No final adjustments | ||
| 1353 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); | ||
| 1354 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0)); | ||
| 1355 | |||
| 1356 | // Major loop count = number of words | ||
| 1357 | let count = buf.len() as u16; | ||
| 1358 | t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable()); | ||
| 1359 | t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable()); | ||
| 1360 | |||
| 1361 | // CSR: optional interrupt on major loop complete and auto-clear ERQ | ||
| 1362 | t.tcd_csr().write(|w| { | ||
| 1363 | let w = match enable_interrupt { | ||
| 1364 | EnableInterrupt::Yes => w.intmajor().enable(), | ||
| 1365 | EnableInterrupt::No => w.intmajor().disable(), | ||
| 1366 | }; | ||
| 1367 | w.inthalf() | ||
| 1368 | .disable() | ||
| 1369 | .dreq() | ||
| 1370 | .erq_field_clear() | ||
| 1371 | .esg() | ||
| 1372 | .normal_format() | ||
| 1373 | .majorelink() | ||
| 1374 | .disable() | ||
| 1375 | .eeop() | ||
| 1376 | .disable() | ||
| 1377 | .esda() | ||
| 1378 | .disable() | ||
| 1379 | .bwc() | ||
| 1380 | .no_stall() | ||
| 1381 | }); | ||
| 1382 | |||
| 1383 | // Ensure all TCD writes have completed before DMA engine reads them | ||
| 1384 | cortex_m::asm::dsb(); | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | /// Configure the integrated channel MUX to use the given request | ||
| 1388 | /// source value (for example [`DMA_REQ_LPUART2_TX`] or | ||
| 1389 | /// [`DMA_REQ_LPUART2_RX`]). | ||
| 1390 | /// | ||
| 1391 | /// # Safety | ||
| 1392 | /// | ||
| 1393 | /// Caller must ensure the request source mapping matches the | ||
| 1394 | /// peripheral that will drive this channel. | ||
| 1395 | /// | ||
| 1396 | /// # Note | ||
| 1397 | /// | ||
| 1398 | /// The NXP SDK requires a two-step write sequence: first clear | ||
| 1399 | /// the mux to 0, then set the actual source. This is a hardware | ||
| 1400 | /// requirement on eDMA4 for the mux to properly latch. | ||
| 1401 | #[inline] | ||
| 1402 | pub unsafe fn set_request_source(&self, request: u8) { | ||
| 1403 | // Two-step write per NXP SDK: clear to 0, then set actual source. | ||
| 1404 | self.tcd().ch_mux().write(|w| w.src().bits(0)); | ||
| 1405 | cortex_m::asm::dsb(); // Ensure the clear completes before setting new source | ||
| 1406 | self.tcd().ch_mux().write(|w| w.src().bits(request)); | ||
| 1407 | } | ||
| 1408 | |||
| 1409 | /// Enable hardware requests for this channel (ERQ=1). | ||
| 1410 | /// | ||
| 1411 | /// # Safety | ||
| 1412 | /// | ||
| 1413 | /// The channel must be properly configured before enabling requests. | ||
| 1414 | pub unsafe fn enable_request(&self) { | ||
| 1415 | let t = self.tcd(); | ||
| 1416 | t.ch_csr().modify(|_, w| w.erq().enable()); | ||
| 1417 | } | ||
| 1418 | |||
| 1419 | /// Disable hardware requests for this channel (ERQ=0). | ||
| 1420 | /// | ||
| 1421 | /// # Safety | ||
| 1422 | /// | ||
| 1423 | /// Disabling requests on an active transfer may leave the transfer incomplete. | ||
| 1424 | pub unsafe fn disable_request(&self) { | ||
| 1425 | let t = self.tcd(); | ||
| 1426 | t.ch_csr().modify(|_, w| w.erq().disable()); | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | /// Return true if the channel's DONE flag is set. | ||
| 1430 | pub fn is_done(&self) -> bool { | ||
| 1431 | let t = self.tcd(); | ||
| 1432 | t.ch_csr().read().done().bit_is_set() | ||
| 1433 | } | ||
| 1434 | |||
| 1435 | /// Clear the DONE flag for this channel. | ||
| 1436 | /// | ||
| 1437 | /// Uses modify to preserve other bits (especially ERQ) unlike write | ||
| 1438 | /// which would clear ERQ and halt an active transfer. | ||
| 1439 | /// | ||
| 1440 | /// # Safety | ||
| 1441 | /// | ||
| 1442 | /// Clearing DONE while a transfer is in progress may cause undefined behavior. | ||
| 1443 | pub unsafe fn clear_done(&self) { | ||
| 1444 | let t = self.tcd(); | ||
| 1445 | t.ch_csr().modify(|_, w| w.done().clear_bit_by_one()); | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | /// Clear the channel interrupt flag (CH_INT.INT). | ||
| 1449 | /// | ||
| 1450 | /// # Safety | ||
| 1451 | /// | ||
| 1452 | /// Must be called from the correct interrupt context or with interrupts disabled. | ||
| 1453 | pub unsafe fn clear_interrupt(&self) { | ||
| 1454 | let t = self.tcd(); | ||
| 1455 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 1456 | } | ||
| 1457 | |||
| 1458 | /// Trigger a software start for this channel. | ||
| 1459 | /// | ||
| 1460 | /// # Safety | ||
| 1461 | /// | ||
| 1462 | /// The channel must be properly configured with a valid TCD before triggering. | ||
| 1463 | pub unsafe fn trigger_start(&self) { | ||
| 1464 | let t = self.tcd(); | ||
| 1465 | t.tcd_csr().modify(|_, w| w.start().channel_started()); | ||
| 1466 | } | ||
| 1467 | |||
| 1468 | /// Get the waker for this channel | ||
| 1469 | pub fn waker(&self) -> &'static AtomicWaker { | ||
| 1470 | &STATES[C::INDEX].waker | ||
| 1471 | } | ||
| 1472 | |||
| 1473 | /// Enable the interrupt for this channel in the NVIC. | ||
| 1474 | pub fn enable_interrupt(&self) { | ||
| 1475 | unsafe { | ||
| 1476 | cortex_m::peripheral::NVIC::unmask(C::INTERRUPT); | ||
| 1477 | } | ||
| 1478 | } | ||
| 1479 | |||
| 1480 | /// Enable Major Loop Linking. | ||
| 1481 | /// | ||
| 1482 | /// When the major loop completes, the hardware will trigger a service request | ||
| 1483 | /// on `link_ch`. | ||
| 1484 | /// | ||
| 1485 | /// # Arguments | ||
| 1486 | /// | ||
| 1487 | /// * `link_ch` - Target channel index (0-7) to link to | ||
| 1488 | /// | ||
| 1489 | /// # Safety | ||
| 1490 | /// | ||
| 1491 | /// The channel must be properly configured before setting up linking. | ||
| 1492 | pub unsafe fn set_major_link(&self, link_ch: usize) { | ||
| 1493 | let t = self.tcd(); | ||
| 1494 | t.tcd_csr() | ||
| 1495 | .modify(|_, w| w.majorelink().enable().majorlinkch().bits(link_ch as u8)); | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | /// Disable Major Loop Linking. | ||
| 1499 | /// | ||
| 1500 | /// Removes any major loop channel linking previously configured. | ||
| 1501 | /// | ||
| 1502 | /// # Safety | ||
| 1503 | /// | ||
| 1504 | /// The caller must ensure this doesn't disrupt an active transfer that | ||
| 1505 | /// depends on the linking. | ||
| 1506 | pub unsafe fn clear_major_link(&self) { | ||
| 1507 | let t = self.tcd(); | ||
| 1508 | t.tcd_csr().modify(|_, w| w.majorelink().disable()); | ||
| 1509 | } | ||
| 1510 | |||
| 1511 | /// Enable Minor Loop Linking. | ||
| 1512 | /// | ||
| 1513 | /// After each minor loop, the hardware will trigger a service request | ||
| 1514 | /// on `link_ch`. | ||
| 1515 | /// | ||
| 1516 | /// # Arguments | ||
| 1517 | /// | ||
| 1518 | /// * `link_ch` - Target channel index (0-7) to link to | ||
| 1519 | /// | ||
| 1520 | /// # Note | ||
| 1521 | /// | ||
| 1522 | /// This rewrites CITER and BITER registers to the ELINKYES format. | ||
| 1523 | /// It preserves the current loop count. | ||
| 1524 | /// | ||
| 1525 | /// # Safety | ||
| 1526 | /// | ||
| 1527 | /// The channel must be properly configured before setting up linking. | ||
| 1528 | pub unsafe fn set_minor_link(&self, link_ch: usize) { | ||
| 1529 | let t = self.tcd(); | ||
| 1530 | |||
| 1531 | // Read current CITER (assuming ELINKNO format initially) | ||
| 1532 | let current_citer = t.tcd_citer_elinkno().read().citer().bits(); | ||
| 1533 | let current_biter = t.tcd_biter_elinkno().read().biter().bits(); | ||
| 1534 | |||
| 1535 | // Write back using ELINKYES format | ||
| 1536 | t.tcd_citer_elinkyes().write(|w| { | ||
| 1537 | w.citer() | ||
| 1538 | .bits(current_citer) | ||
| 1539 | .elink() | ||
| 1540 | .enable() | ||
| 1541 | .linkch() | ||
| 1542 | .bits(link_ch as u8) | ||
| 1543 | }); | ||
| 1544 | |||
| 1545 | t.tcd_biter_elinkyes().write(|w| { | ||
| 1546 | w.biter() | ||
| 1547 | .bits(current_biter) | ||
| 1548 | .elink() | ||
| 1549 | .enable() | ||
| 1550 | .linkch() | ||
| 1551 | .bits(link_ch as u8) | ||
| 1552 | }); | ||
| 1553 | } | ||
| 1554 | |||
| 1555 | /// Disable Minor Loop Linking. | ||
| 1556 | /// | ||
| 1557 | /// Removes any minor loop channel linking previously configured. | ||
| 1558 | /// This rewrites CITER and BITER registers to the ELINKNO format, | ||
| 1559 | /// preserving the current loop count. | ||
| 1560 | /// | ||
| 1561 | /// # Safety | ||
| 1562 | /// | ||
| 1563 | /// The caller must ensure this doesn't disrupt an active transfer that | ||
| 1564 | /// depends on the linking. | ||
| 1565 | pub unsafe fn clear_minor_link(&self) { | ||
| 1566 | let t = self.tcd(); | ||
| 1567 | |||
| 1568 | // Read current CITER (could be in either format, but we only need the count) | ||
| 1569 | // Note: In ELINKYES format, citer is 9 bits; in ELINKNO, it's 15 bits. | ||
| 1570 | // We read from ELINKNO which will give us the combined value. | ||
| 1571 | let current_citer = t.tcd_citer_elinkno().read().citer().bits(); | ||
| 1572 | let current_biter = t.tcd_biter_elinkno().read().biter().bits(); | ||
| 1573 | |||
| 1574 | // Write back using ELINKNO format (disabling link) | ||
| 1575 | t.tcd_citer_elinkno() | ||
| 1576 | .write(|w| w.citer().bits(current_citer).elink().disable()); | ||
| 1577 | |||
| 1578 | t.tcd_biter_elinkno() | ||
| 1579 | .write(|w| w.biter().bits(current_biter).elink().disable()); | ||
| 1580 | } | ||
| 1581 | |||
| 1582 | /// Load a TCD from memory into the hardware channel registers. | ||
| 1583 | /// | ||
| 1584 | /// This is useful for scatter/gather and ping-pong transfers where | ||
| 1585 | /// TCDs are prepared in RAM and then loaded into the hardware. | ||
| 1586 | /// | ||
| 1587 | /// # Safety | ||
| 1588 | /// | ||
| 1589 | /// - The TCD must be properly initialized. | ||
| 1590 | /// - The caller must ensure no concurrent access to the same channel. | ||
| 1591 | pub unsafe fn load_tcd(&self, tcd: &Tcd) { | ||
| 1592 | let t = self.tcd(); | ||
| 1593 | t.tcd_saddr().write(|w| w.saddr().bits(tcd.saddr)); | ||
| 1594 | t.tcd_soff().write(|w| w.soff().bits(tcd.soff as u16)); | ||
| 1595 | t.tcd_attr().write(|w| w.bits(tcd.attr)); | ||
| 1596 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(tcd.nbytes)); | ||
| 1597 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(tcd.slast as u32)); | ||
| 1598 | t.tcd_daddr().write(|w| w.daddr().bits(tcd.daddr)); | ||
| 1599 | t.tcd_doff().write(|w| w.doff().bits(tcd.doff as u16)); | ||
| 1600 | t.tcd_citer_elinkno().write(|w| w.citer().bits(tcd.citer)); | ||
| 1601 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(tcd.dlast_sga as u32)); | ||
| 1602 | t.tcd_csr().write(|w| w.bits(tcd.csr)); | ||
| 1603 | t.tcd_biter_elinkno().write(|w| w.biter().bits(tcd.biter)); | ||
| 1604 | } | ||
| 1605 | } | ||
| 1606 | |||
| 1607 | // ============================================================================ | ||
| 1608 | // Global DMA Initialization | ||
| 1609 | // ============================================================================ | ||
| 1610 | |||
| 1611 | /// Basic global DMA0 init. | ||
| 1612 | /// | ||
| 1613 | /// This enables debug mode and round-robin arbitration and makes sure | ||
| 1614 | /// the controller is not halted. Clock gate and reset must be handled | ||
| 1615 | /// separately via `crate::clocks` and `crate::reset`. | ||
| 1616 | /// | ||
| 1617 | /// # Safety | ||
| 1618 | /// | ||
| 1619 | /// Must be called after DMA clock is enabled and reset is released. | ||
| 1620 | /// Should only be called once during system initialization. | ||
| 1621 | pub unsafe fn init(peripherals: &pac::Peripherals) { | ||
| 1622 | let dma = &peripherals.dma0; | ||
| 1623 | |||
| 1624 | dma.mp_csr().modify(|_, w| { | ||
| 1625 | w.edbg() | ||
| 1626 | .enable() | ||
| 1627 | .erca() | ||
| 1628 | .enable() | ||
| 1629 | // Leave HAE/ECX/CX at reset defaults. | ||
| 1630 | .halt() | ||
| 1631 | .normal_operation() | ||
| 1632 | // Allow per-channel linking and master-ID replication if used. | ||
| 1633 | .gclc() | ||
| 1634 | .available() | ||
| 1635 | .gmrc() | ||
| 1636 | .available() | ||
| 1637 | }); | ||
| 1638 | } | ||
| 1639 | |||
| 1640 | /// In-memory representation of a Transfer Control Descriptor (TCD). | ||
| 1641 | /// | ||
| 1642 | /// This matches the hardware layout (32 bytes). | ||
| 1643 | #[repr(C, align(32))] | ||
| 1644 | #[derive(Clone, Copy, Debug, Default)] | ||
| 1645 | pub struct Tcd { | ||
| 1646 | pub saddr: u32, | ||
| 1647 | pub soff: i16, | ||
| 1648 | pub attr: u16, | ||
| 1649 | pub nbytes: u32, | ||
| 1650 | pub slast: i32, | ||
| 1651 | pub daddr: u32, | ||
| 1652 | pub doff: i16, | ||
| 1653 | pub citer: u16, | ||
| 1654 | pub dlast_sga: i32, | ||
| 1655 | pub csr: u16, | ||
| 1656 | pub biter: u16, | ||
| 1657 | } | ||
| 1658 | |||
| 1659 | struct State { | ||
| 1660 | /// Waker for transfer complete interrupt | ||
| 1661 | waker: AtomicWaker, | ||
| 1662 | /// Waker for half-transfer interrupt | ||
| 1663 | half_waker: AtomicWaker, | ||
| 1664 | } | ||
| 1665 | |||
| 1666 | impl State { | ||
| 1667 | const fn new() -> Self { | ||
| 1668 | Self { | ||
| 1669 | waker: AtomicWaker::new(), | ||
| 1670 | half_waker: AtomicWaker::new(), | ||
| 1671 | } | ||
| 1672 | } | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | static STATES: [State; 8] = [ | ||
| 1676 | State::new(), | ||
| 1677 | State::new(), | ||
| 1678 | State::new(), | ||
| 1679 | State::new(), | ||
| 1680 | State::new(), | ||
| 1681 | State::new(), | ||
| 1682 | State::new(), | ||
| 1683 | State::new(), | ||
| 1684 | ]; | ||
| 1685 | |||
| 1686 | pub(crate) fn waker(idx: usize) -> &'static AtomicWaker { | ||
| 1687 | &STATES[idx].waker | ||
| 1688 | } | ||
| 1689 | |||
| 1690 | pub(crate) fn half_waker(idx: usize) -> &'static AtomicWaker { | ||
| 1691 | &STATES[idx].half_waker | ||
| 1692 | } | ||
| 1693 | |||
| 1694 | // ============================================================================ | ||
| 1695 | // Async Transfer Future | ||
| 1696 | // ============================================================================ | ||
| 1697 | |||
| 1698 | /// An in-progress DMA transfer. | ||
| 1699 | /// | ||
| 1700 | /// This type implements `Future` and can be `.await`ed to wait for the | ||
| 1701 | /// transfer to complete. Dropping the transfer will abort it. | ||
| 1702 | #[must_use = "futures do nothing unless you `.await` or poll them"] | ||
| 1703 | pub struct Transfer<'a> { | ||
| 1704 | channel: AnyChannel, | ||
| 1705 | _phantom: core::marker::PhantomData<&'a ()>, | ||
| 1706 | } | ||
| 1707 | |||
| 1708 | impl<'a> Transfer<'a> { | ||
| 1709 | /// Create a new transfer for the given channel. | ||
| 1710 | /// | ||
| 1711 | /// The caller must have already configured and started the DMA channel. | ||
| 1712 | pub(crate) fn new(channel: AnyChannel) -> Self { | ||
| 1713 | Self { | ||
| 1714 | channel, | ||
| 1715 | _phantom: core::marker::PhantomData, | ||
| 1716 | } | ||
| 1717 | } | ||
| 1718 | |||
| 1719 | /// Check if the transfer is still running. | ||
| 1720 | pub fn is_running(&self) -> bool { | ||
| 1721 | !self.channel.is_done() | ||
| 1722 | } | ||
| 1723 | |||
| 1724 | /// Get the remaining transfer count. | ||
| 1725 | pub fn remaining(&self) -> u16 { | ||
| 1726 | let t = self.channel.tcd(); | ||
| 1727 | t.tcd_citer_elinkno().read().citer().bits() | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | /// Block until the transfer completes. | ||
| 1731 | pub fn blocking_wait(self) { | ||
| 1732 | while self.is_running() { | ||
| 1733 | core::hint::spin_loop(); | ||
| 1734 | } | ||
| 1735 | |||
| 1736 | // Ensure all DMA writes are visible | ||
| 1737 | fence(Ordering::SeqCst); | ||
| 1738 | |||
| 1739 | // Don't run drop (which would abort) | ||
| 1740 | core::mem::forget(self); | ||
| 1741 | } | ||
| 1742 | |||
| 1743 | /// Wait for the half-transfer interrupt asynchronously. | ||
| 1744 | /// | ||
| 1745 | /// This is useful for double-buffering scenarios where you want to process | ||
| 1746 | /// the first half of the buffer while the second half is being filled. | ||
| 1747 | /// | ||
| 1748 | /// Returns `true` if the half-transfer occurred, `false` if the transfer | ||
| 1749 | /// completed before the half-transfer interrupt. | ||
| 1750 | /// | ||
| 1751 | /// # Note | ||
| 1752 | /// | ||
| 1753 | /// The transfer must be configured with `TransferOptions::half_transfer_interrupt = true` | ||
| 1754 | /// for this method to work correctly. | ||
| 1755 | pub async fn wait_half(&mut self) -> bool { | ||
| 1756 | use core::future::poll_fn; | ||
| 1757 | |||
| 1758 | poll_fn(|cx| { | ||
| 1759 | let state = &STATES[self.channel.index]; | ||
| 1760 | |||
| 1761 | // Register the half-transfer waker | ||
| 1762 | state.half_waker.register(cx.waker()); | ||
| 1763 | |||
| 1764 | // Check if we're past the half-way point | ||
| 1765 | let t = self.channel.tcd(); | ||
| 1766 | let biter = t.tcd_biter_elinkno().read().biter().bits(); | ||
| 1767 | let citer = t.tcd_citer_elinkno().read().citer().bits(); | ||
| 1768 | let half_point = biter / 2; | ||
| 1769 | |||
| 1770 | if self.channel.is_done() { | ||
| 1771 | // Transfer completed before half-transfer | ||
| 1772 | Poll::Ready(false) | ||
| 1773 | } else if citer <= half_point { | ||
| 1774 | // We're past the half-way point | ||
| 1775 | fence(Ordering::SeqCst); | ||
| 1776 | Poll::Ready(true) | ||
| 1777 | } else { | ||
| 1778 | Poll::Pending | ||
| 1779 | } | ||
| 1780 | }) | ||
| 1781 | .await | ||
| 1782 | } | ||
| 1783 | |||
| 1784 | /// Abort the transfer. | ||
| 1785 | fn abort(&mut self) { | ||
| 1786 | let t = self.channel.tcd(); | ||
| 1787 | |||
| 1788 | // Disable channel requests | ||
| 1789 | t.ch_csr().modify(|_, w| w.erq().disable()); | ||
| 1790 | |||
| 1791 | // Clear any pending interrupt | ||
| 1792 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 1793 | |||
| 1794 | // Clear DONE flag | ||
| 1795 | t.ch_csr().modify(|_, w| w.done().clear_bit_by_one()); | ||
| 1796 | |||
| 1797 | fence(Ordering::SeqCst); | ||
| 1798 | } | ||
| 1799 | } | ||
| 1800 | |||
| 1801 | impl<'a> Unpin for Transfer<'a> {} | ||
| 1802 | |||
| 1803 | impl<'a> Future for Transfer<'a> { | ||
| 1804 | type Output = (); | ||
| 1805 | |||
| 1806 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { | ||
| 1807 | let state = &STATES[self.channel.index]; | ||
| 1808 | |||
| 1809 | // Register waker first | ||
| 1810 | state.waker.register(cx.waker()); | ||
| 1811 | |||
| 1812 | let done = self.channel.is_done(); | ||
| 1813 | |||
| 1814 | if done { | ||
| 1815 | // Ensure all DMA writes are visible before returning | ||
| 1816 | fence(Ordering::SeqCst); | ||
| 1817 | Poll::Ready(()) | ||
| 1818 | } else { | ||
| 1819 | Poll::Pending | ||
| 1820 | } | ||
| 1821 | } | ||
| 1822 | } | ||
| 1823 | |||
| 1824 | impl<'a> Drop for Transfer<'a> { | ||
| 1825 | fn drop(&mut self) { | ||
| 1826 | // Only abort if the transfer is still running | ||
| 1827 | // If already complete, no need to abort | ||
| 1828 | if self.is_running() { | ||
| 1829 | self.abort(); | ||
| 1830 | |||
| 1831 | // Wait for abort to complete | ||
| 1832 | while self.is_running() { | ||
| 1833 | core::hint::spin_loop(); | ||
| 1834 | } | ||
| 1835 | } | ||
| 1836 | |||
| 1837 | fence(Ordering::SeqCst); | ||
| 1838 | } | ||
| 1839 | } | ||
| 1840 | |||
| 1841 | // ============================================================================ | ||
| 1842 | // Ring Buffer for Circular DMA | ||
| 1843 | // ============================================================================ | ||
| 1844 | |||
| 1845 | /// A ring buffer for continuous DMA reception. | ||
| 1846 | /// | ||
| 1847 | /// This structure manages a circular DMA transfer, allowing continuous | ||
| 1848 | /// reception of data without losing bytes between reads. It uses both | ||
| 1849 | /// half-transfer and complete-transfer interrupts to track available data. | ||
| 1850 | /// | ||
| 1851 | /// # Example | ||
| 1852 | /// | ||
| 1853 | /// ```no_run | ||
| 1854 | /// use embassy_mcxa::dma::{DmaChannel, RingBuffer, TransferOptions}; | ||
| 1855 | /// | ||
| 1856 | /// static mut RX_BUF: [u8; 64] = [0; 64]; | ||
| 1857 | /// | ||
| 1858 | /// let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 1859 | /// let ring_buf = unsafe { | ||
| 1860 | /// dma_ch.setup_circular_read( | ||
| 1861 | /// uart_rx_addr, | ||
| 1862 | /// &mut RX_BUF, | ||
| 1863 | /// ) | ||
| 1864 | /// }; | ||
| 1865 | /// | ||
| 1866 | /// // Read data as it arrives | ||
| 1867 | /// let mut buf = [0u8; 16]; | ||
| 1868 | /// let n = ring_buf.read(&mut buf).await?; | ||
| 1869 | /// ``` | ||
| 1870 | pub struct RingBuffer<'a, W: Word> { | ||
| 1871 | channel: AnyChannel, | ||
| 1872 | /// Buffer pointer. We use NonNull instead of &mut because DMA acts like | ||
| 1873 | /// a separate thread writing to this buffer, and &mut claims exclusive | ||
| 1874 | /// access which the compiler could optimize incorrectly. | ||
| 1875 | buf: NonNull<[W]>, | ||
| 1876 | /// Buffer length cached for convenience | ||
| 1877 | buf_len: usize, | ||
| 1878 | /// Read position in the buffer (consumer side) | ||
| 1879 | read_pos: AtomicUsize, | ||
| 1880 | /// Phantom data to tie the lifetime to the original buffer | ||
| 1881 | _lt: PhantomData<&'a mut [W]>, | ||
| 1882 | } | ||
| 1883 | |||
| 1884 | impl<'a, W: Word> RingBuffer<'a, W> { | ||
| 1885 | /// Create a new ring buffer for the given channel and buffer. | ||
| 1886 | /// | ||
| 1887 | /// # Safety | ||
| 1888 | /// | ||
| 1889 | /// The caller must ensure: | ||
| 1890 | /// - The DMA channel has been configured for circular transfer | ||
| 1891 | /// - The buffer remains valid for the lifetime of the ring buffer | ||
| 1892 | /// - Only one RingBuffer exists per DMA channel at a time | ||
| 1893 | pub(crate) unsafe fn new(channel: AnyChannel, buf: &'a mut [W]) -> Self { | ||
| 1894 | let buf_len = buf.len(); | ||
| 1895 | Self { | ||
| 1896 | channel, | ||
| 1897 | buf: NonNull::from(buf), | ||
| 1898 | buf_len, | ||
| 1899 | read_pos: AtomicUsize::new(0), | ||
| 1900 | _lt: PhantomData, | ||
| 1901 | } | ||
| 1902 | } | ||
| 1903 | |||
| 1904 | /// Get a slice reference to the buffer. | ||
| 1905 | /// | ||
| 1906 | /// # Safety | ||
| 1907 | /// | ||
| 1908 | /// The caller must ensure that DMA is not actively writing to the | ||
| 1909 | /// portion of the buffer being accessed, or that the access is | ||
| 1910 | /// appropriately synchronized. | ||
| 1911 | #[inline] | ||
| 1912 | unsafe fn buf_slice(&self) -> &[W] { | ||
| 1913 | self.buf.as_ref() | ||
| 1914 | } | ||
| 1915 | |||
| 1916 | /// Get the current DMA write position in the buffer. | ||
| 1917 | /// | ||
| 1918 | /// This reads the current destination address from the DMA controller | ||
| 1919 | /// and calculates the buffer offset. | ||
| 1920 | fn dma_write_pos(&self) -> usize { | ||
| 1921 | let t = self.channel.tcd(); | ||
| 1922 | let daddr = t.tcd_daddr().read().daddr().bits() as usize; | ||
| 1923 | let buf_start = self.buf.as_ptr() as *const W as usize; | ||
| 1924 | |||
| 1925 | // Calculate offset from buffer start | ||
| 1926 | let offset = daddr.wrapping_sub(buf_start) / core::mem::size_of::<W>(); | ||
| 1927 | |||
| 1928 | // Ensure we're within bounds (DMA wraps around) | ||
| 1929 | offset % self.buf_len | ||
| 1930 | } | ||
| 1931 | |||
| 1932 | /// Returns the number of bytes available to read. | ||
| 1933 | pub fn available(&self) -> usize { | ||
| 1934 | let write_pos = self.dma_write_pos(); | ||
| 1935 | let read_pos = self.read_pos.load(Ordering::Acquire); | ||
| 1936 | |||
| 1937 | if write_pos >= read_pos { | ||
| 1938 | write_pos - read_pos | ||
| 1939 | } else { | ||
| 1940 | self.buf_len - read_pos + write_pos | ||
| 1941 | } | ||
| 1942 | } | ||
| 1943 | |||
| 1944 | /// Check if the buffer has overrun (data was lost). | ||
| 1945 | /// | ||
| 1946 | /// This happens when DMA writes faster than the application reads. | ||
| 1947 | pub fn is_overrun(&self) -> bool { | ||
| 1948 | // In a true overrun, the DMA would have wrapped around and caught up | ||
| 1949 | // to our read position. We can detect this by checking if available() | ||
| 1950 | // equals the full buffer size (minus 1 to distinguish from empty). | ||
| 1951 | self.available() >= self.buf_len - 1 | ||
| 1952 | } | ||
| 1953 | |||
| 1954 | /// Read data from the ring buffer into the provided slice. | ||
| 1955 | /// | ||
| 1956 | /// Returns the number of elements read, which may be less than | ||
| 1957 | /// `dst.len()` if not enough data is available. | ||
| 1958 | /// | ||
| 1959 | /// This method does not block; use `read_async()` for async waiting. | ||
| 1960 | pub fn read_immediate(&self, dst: &mut [W]) -> usize { | ||
| 1961 | let write_pos = self.dma_write_pos(); | ||
| 1962 | let read_pos = self.read_pos.load(Ordering::Acquire); | ||
| 1963 | |||
| 1964 | // Calculate available bytes | ||
| 1965 | let available = if write_pos >= read_pos { | ||
| 1966 | write_pos - read_pos | ||
| 1967 | } else { | ||
| 1968 | self.buf_len - read_pos + write_pos | ||
| 1969 | }; | ||
| 1970 | |||
| 1971 | let to_read = dst.len().min(available); | ||
| 1972 | if to_read == 0 { | ||
| 1973 | return 0; | ||
| 1974 | } | ||
| 1975 | |||
| 1976 | // Safety: We only read from portions of the buffer that DMA has | ||
| 1977 | // already written to (between read_pos and write_pos). | ||
| 1978 | let buf = unsafe { self.buf_slice() }; | ||
| 1979 | |||
| 1980 | // Read data, handling wrap-around | ||
| 1981 | let first_chunk = (self.buf_len - read_pos).min(to_read); | ||
| 1982 | dst[..first_chunk].copy_from_slice(&buf[read_pos..read_pos + first_chunk]); | ||
| 1983 | |||
| 1984 | if to_read > first_chunk { | ||
| 1985 | let second_chunk = to_read - first_chunk; | ||
| 1986 | dst[first_chunk..to_read].copy_from_slice(&buf[..second_chunk]); | ||
| 1987 | } | ||
| 1988 | |||
| 1989 | // Update read position | ||
| 1990 | let new_read_pos = (read_pos + to_read) % self.buf_len; | ||
| 1991 | self.read_pos.store(new_read_pos, Ordering::Release); | ||
| 1992 | |||
| 1993 | to_read | ||
| 1994 | } | ||
| 1995 | |||
| 1996 | /// Read data from the ring buffer asynchronously. | ||
| 1997 | /// | ||
| 1998 | /// This waits until at least one byte is available, then reads as much | ||
| 1999 | /// as possible into the destination buffer. | ||
| 2000 | /// | ||
| 2001 | /// Returns the number of elements read. | ||
| 2002 | pub async fn read(&self, dst: &mut [W]) -> Result<usize, Error> { | ||
| 2003 | use core::future::poll_fn; | ||
| 2004 | |||
| 2005 | if dst.is_empty() { | ||
| 2006 | return Ok(0); | ||
| 2007 | } | ||
| 2008 | |||
| 2009 | poll_fn(|cx| { | ||
| 2010 | // Check for overrun | ||
| 2011 | if self.is_overrun() { | ||
| 2012 | return Poll::Ready(Err(Error::Overrun)); | ||
| 2013 | } | ||
| 2014 | |||
| 2015 | // Try to read immediately | ||
| 2016 | let n = self.read_immediate(dst); | ||
| 2017 | if n > 0 { | ||
| 2018 | return Poll::Ready(Ok(n)); | ||
| 2019 | } | ||
| 2020 | |||
| 2021 | // Register wakers for both half and complete interrupts | ||
| 2022 | let state = &STATES[self.channel.index()]; | ||
| 2023 | state.waker.register(cx.waker()); | ||
| 2024 | state.half_waker.register(cx.waker()); | ||
| 2025 | |||
| 2026 | // Check again after registering waker (avoid race) | ||
| 2027 | let n = self.read_immediate(dst); | ||
| 2028 | if n > 0 { | ||
| 2029 | return Poll::Ready(Ok(n)); | ||
| 2030 | } | ||
| 2031 | |||
| 2032 | Poll::Pending | ||
| 2033 | }) | ||
| 2034 | .await | ||
| 2035 | } | ||
| 2036 | |||
| 2037 | /// Clear the ring buffer, discarding all unread data. | ||
| 2038 | pub fn clear(&self) { | ||
| 2039 | let write_pos = self.dma_write_pos(); | ||
| 2040 | self.read_pos.store(write_pos, Ordering::Release); | ||
| 2041 | } | ||
| 2042 | |||
| 2043 | /// Stop the DMA transfer and consume the ring buffer. | ||
| 2044 | /// | ||
| 2045 | /// Returns any remaining unread data count. | ||
| 2046 | pub fn stop(self) -> usize { | ||
| 2047 | let available = self.available(); | ||
| 2048 | |||
| 2049 | // Disable the channel | ||
| 2050 | let t = self.channel.tcd(); | ||
| 2051 | t.ch_csr().modify(|_, w| w.erq().disable()); | ||
| 2052 | |||
| 2053 | // Clear flags | ||
| 2054 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 2055 | t.ch_csr().modify(|_, w| w.done().clear_bit_by_one()); | ||
| 2056 | |||
| 2057 | fence(Ordering::SeqCst); | ||
| 2058 | |||
| 2059 | available | ||
| 2060 | } | ||
| 2061 | } | ||
| 2062 | |||
| 2063 | impl<C: Channel> DmaChannel<C> { | ||
| 2064 | /// Set up a circular DMA transfer for continuous peripheral-to-memory reception. | ||
| 2065 | /// | ||
| 2066 | /// This configures the DMA channel for circular operation with both half-transfer | ||
| 2067 | /// and complete-transfer interrupts enabled. The transfer runs continuously until | ||
| 2068 | /// stopped via [`RingBuffer::stop()`]. | ||
| 2069 | /// | ||
| 2070 | /// # Arguments | ||
| 2071 | /// | ||
| 2072 | /// * `peri_addr` - Peripheral register address to read from | ||
| 2073 | /// * `buf` - Destination buffer (should be power-of-2 size for best efficiency) | ||
| 2074 | /// | ||
| 2075 | /// # Returns | ||
| 2076 | /// | ||
| 2077 | /// A [`RingBuffer`] that can be used to read received data. | ||
| 2078 | /// | ||
| 2079 | /// # Safety | ||
| 2080 | /// | ||
| 2081 | /// - The buffer must remain valid for the lifetime of the returned RingBuffer. | ||
| 2082 | /// - The peripheral address must be valid for reads. | ||
| 2083 | /// - The peripheral's DMA request must be configured to trigger this channel. | ||
| 2084 | pub unsafe fn setup_circular_read<'a, W: Word>(&self, peri_addr: *const W, buf: &'a mut [W]) -> RingBuffer<'a, W> { | ||
| 2085 | assert!(!buf.is_empty()); | ||
| 2086 | assert!(buf.len() <= 0x7fff); | ||
| 2087 | // For circular mode, buffer size should ideally be power of 2 | ||
| 2088 | // but we don't enforce it | ||
| 2089 | |||
| 2090 | let size = W::size(); | ||
| 2091 | let byte_size = size.bytes(); | ||
| 2092 | |||
| 2093 | let t = self.tcd(); | ||
| 2094 | |||
| 2095 | // Reset channel state | ||
| 2096 | t.ch_csr().write(|w| { | ||
| 2097 | w.erq() | ||
| 2098 | .disable() | ||
| 2099 | .earq() | ||
| 2100 | .disable() | ||
| 2101 | .eei() | ||
| 2102 | .no_error() | ||
| 2103 | .ebw() | ||
| 2104 | .disable() | ||
| 2105 | .done() | ||
| 2106 | .clear_bit_by_one() | ||
| 2107 | }); | ||
| 2108 | t.ch_es().write(|w| w.bits(0)); | ||
| 2109 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 2110 | |||
| 2111 | // Source: peripheral register, fixed | ||
| 2112 | t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32)); | ||
| 2113 | t.tcd_soff().write(|w| w.soff().bits(0)); // No increment | ||
| 2114 | |||
| 2115 | // Destination: memory buffer, incrementing | ||
| 2116 | t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32)); | ||
| 2117 | t.tcd_doff().write(|w| w.doff().bits(byte_size as u16)); | ||
| 2118 | |||
| 2119 | // Transfer attributes | ||
| 2120 | let hw_size = size.to_hw_size(); | ||
| 2121 | t.tcd_attr().write(|w| { | ||
| 2122 | w.ssize() | ||
| 2123 | .bits(hw_size) | ||
| 2124 | .dsize() | ||
| 2125 | .bits(hw_size) | ||
| 2126 | .smod() | ||
| 2127 | .disable() | ||
| 2128 | .dmod() | ||
| 2129 | .bits(0) | ||
| 2130 | }); | ||
| 2131 | |||
| 2132 | // Minor loop: transfer one word per request | ||
| 2133 | t.tcd_nbytes_mloffno().write(|w| { | ||
| 2134 | w.nbytes() | ||
| 2135 | .bits(byte_size as u32) | ||
| 2136 | .dmloe() | ||
| 2137 | .offset_not_applied() | ||
| 2138 | .smloe() | ||
| 2139 | .offset_not_applied() | ||
| 2140 | }); | ||
| 2141 | |||
| 2142 | // Major loop count = buffer size | ||
| 2143 | let count = buf.len() as u16; | ||
| 2144 | t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable()); | ||
| 2145 | t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable()); | ||
| 2146 | |||
| 2147 | // After major loop: reset destination to buffer start (circular) | ||
| 2148 | let buf_bytes = (buf.len() * byte_size) as i32; | ||
| 2149 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); // Source doesn't change | ||
| 2150 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits((-buf_bytes) as u32)); | ||
| 2151 | |||
| 2152 | // Control/status: enable both half and complete interrupts, NO DREQ (continuous) | ||
| 2153 | t.tcd_csr().write(|w| { | ||
| 2154 | w.intmajor() | ||
| 2155 | .enable() | ||
| 2156 | .inthalf() | ||
| 2157 | .enable() | ||
| 2158 | .dreq() | ||
| 2159 | .channel_not_affected() // Don't clear ERQ on complete (circular) | ||
| 2160 | .esg() | ||
| 2161 | .normal_format() | ||
| 2162 | .majorelink() | ||
| 2163 | .disable() | ||
| 2164 | .eeop() | ||
| 2165 | .disable() | ||
| 2166 | .esda() | ||
| 2167 | .disable() | ||
| 2168 | .bwc() | ||
| 2169 | .no_stall() | ||
| 2170 | }); | ||
| 2171 | |||
| 2172 | cortex_m::asm::dsb(); | ||
| 2173 | |||
| 2174 | // Enable the channel request | ||
| 2175 | t.ch_csr().modify(|_, w| w.erq().enable()); | ||
| 2176 | |||
| 2177 | RingBuffer::new(self.as_any(), buf) | ||
| 2178 | } | ||
| 2179 | } | ||
| 2180 | |||
| 2181 | // ============================================================================ | ||
| 2182 | // Scatter-Gather Builder | ||
| 2183 | // ============================================================================ | ||
| 2184 | |||
| 2185 | /// Maximum number of TCDs in a scatter-gather chain. | ||
| 2186 | pub const MAX_SCATTER_GATHER_TCDS: usize = 16; | ||
| 2187 | |||
| 2188 | /// A builder for constructing scatter-gather DMA transfer chains. | ||
| 2189 | /// | ||
| 2190 | /// This provides a type-safe way to build TCD chains for scatter-gather | ||
| 2191 | /// transfers without manual TCD manipulation. | ||
| 2192 | /// | ||
| 2193 | /// # Example | ||
| 2194 | /// | ||
| 2195 | /// ```no_run | ||
| 2196 | /// use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder}; | ||
| 2197 | /// | ||
| 2198 | /// let mut builder = ScatterGatherBuilder::<u32>::new(); | ||
| 2199 | /// | ||
| 2200 | /// // Add transfer segments | ||
| 2201 | /// builder.add_transfer(&src1, &mut dst1); | ||
| 2202 | /// builder.add_transfer(&src2, &mut dst2); | ||
| 2203 | /// builder.add_transfer(&src3, &mut dst3); | ||
| 2204 | /// | ||
| 2205 | /// // Build and execute | ||
| 2206 | /// let transfer = unsafe { builder.build(&dma_ch).unwrap() }; | ||
| 2207 | /// transfer.await; | ||
| 2208 | /// ``` | ||
| 2209 | pub struct ScatterGatherBuilder<W: Word> { | ||
| 2210 | /// TCD pool (must be 32-byte aligned) | ||
| 2211 | tcds: [Tcd; MAX_SCATTER_GATHER_TCDS], | ||
| 2212 | /// Number of TCDs configured | ||
| 2213 | count: usize, | ||
| 2214 | /// Phantom marker for word type | ||
| 2215 | _phantom: core::marker::PhantomData<W>, | ||
| 2216 | } | ||
| 2217 | |||
| 2218 | impl<W: Word> ScatterGatherBuilder<W> { | ||
| 2219 | /// Create a new scatter-gather builder. | ||
| 2220 | pub fn new() -> Self { | ||
| 2221 | Self { | ||
| 2222 | tcds: [Tcd::default(); MAX_SCATTER_GATHER_TCDS], | ||
| 2223 | count: 0, | ||
| 2224 | _phantom: core::marker::PhantomData, | ||
| 2225 | } | ||
| 2226 | } | ||
| 2227 | |||
| 2228 | /// Add a memory-to-memory transfer segment to the chain. | ||
| 2229 | /// | ||
| 2230 | /// # Arguments | ||
| 2231 | /// | ||
| 2232 | /// * `src` - Source buffer for this segment | ||
| 2233 | /// * `dst` - Destination buffer for this segment | ||
| 2234 | /// | ||
| 2235 | /// # Panics | ||
| 2236 | /// | ||
| 2237 | /// Panics if the maximum number of segments (16) is exceeded. | ||
| 2238 | pub fn add_transfer(&mut self, src: &[W], dst: &mut [W]) -> &mut Self { | ||
| 2239 | assert!(self.count < MAX_SCATTER_GATHER_TCDS, "Too many scatter-gather segments"); | ||
| 2240 | assert!(!src.is_empty()); | ||
| 2241 | assert!(dst.len() >= src.len()); | ||
| 2242 | |||
| 2243 | let size = W::size(); | ||
| 2244 | let byte_size = size.bytes(); | ||
| 2245 | let hw_size = size.to_hw_size(); | ||
| 2246 | let nbytes = (src.len() * byte_size) as u32; | ||
| 2247 | |||
| 2248 | // Build the TCD for this segment | ||
| 2249 | self.tcds[self.count] = Tcd { | ||
| 2250 | saddr: src.as_ptr() as u32, | ||
| 2251 | soff: byte_size as i16, | ||
| 2252 | attr: ((hw_size as u16) << 8) | (hw_size as u16), // SSIZE | DSIZE | ||
| 2253 | nbytes, | ||
| 2254 | slast: 0, | ||
| 2255 | daddr: dst.as_mut_ptr() as u32, | ||
| 2256 | doff: byte_size as i16, | ||
| 2257 | citer: 1, | ||
| 2258 | dlast_sga: 0, // Will be filled in by build() | ||
| 2259 | csr: 0x0002, // INTMAJOR only (ESG will be set for non-last TCDs) | ||
| 2260 | biter: 1, | ||
| 2261 | }; | ||
| 2262 | |||
| 2263 | self.count += 1; | ||
| 2264 | self | ||
| 2265 | } | ||
| 2266 | |||
| 2267 | /// Get the number of transfer segments added. | ||
| 2268 | pub fn segment_count(&self) -> usize { | ||
| 2269 | self.count | ||
| 2270 | } | ||
| 2271 | |||
| 2272 | /// Build the scatter-gather chain and start the transfer. | ||
| 2273 | /// | ||
| 2274 | /// # Arguments | ||
| 2275 | /// | ||
| 2276 | /// * `channel` - The DMA channel to use for the transfer | ||
| 2277 | /// | ||
| 2278 | /// # Returns | ||
| 2279 | /// | ||
| 2280 | /// A `Transfer` future that completes when the entire chain has executed. | ||
| 2281 | /// | ||
| 2282 | /// # Safety | ||
| 2283 | /// | ||
| 2284 | /// All source and destination buffers passed to `add_transfer()` must | ||
| 2285 | /// remain valid for the duration of the transfer. | ||
| 2286 | pub unsafe fn build<C: Channel>(&mut self, channel: &DmaChannel<C>) -> Result<Transfer<'_>, Error> { | ||
| 2287 | if self.count == 0 { | ||
| 2288 | return Err(Error::Configuration); | ||
| 2289 | } | ||
| 2290 | |||
| 2291 | // Link TCDs together | ||
| 2292 | // | ||
| 2293 | // CSR bit definitions: | ||
| 2294 | // - START = bit 0 = 0x0001 (triggers transfer when set) | ||
| 2295 | // - INTMAJOR = bit 1 = 0x0002 (interrupt on major loop complete) | ||
| 2296 | // - ESG = bit 4 = 0x0010 (enable scatter-gather, loads next TCD on complete) | ||
| 2297 | // | ||
| 2298 | // When hardware loads a TCD via scatter-gather (ESG), it copies the TCD's | ||
| 2299 | // CSR directly into the hardware register. If START is not set in that CSR, | ||
| 2300 | // the hardware will NOT auto-execute the loaded TCD. | ||
| 2301 | // | ||
| 2302 | // Strategy: | ||
| 2303 | // - First TCD: ESG | INTMAJOR (no START - we add it manually after loading) | ||
| 2304 | // - Middle TCDs: ESG | INTMAJOR | START (auto-execute when loaded via S/G) | ||
| 2305 | // - Last TCD: INTMAJOR | START (auto-execute, no further linking) | ||
| 2306 | for i in 0..self.count { | ||
| 2307 | let is_first = i == 0; | ||
| 2308 | let is_last = i == self.count - 1; | ||
| 2309 | |||
| 2310 | if is_first { | ||
| 2311 | if is_last { | ||
| 2312 | // Only one TCD - no ESG, no START (we add START manually) | ||
| 2313 | self.tcds[i].dlast_sga = 0; | ||
| 2314 | self.tcds[i].csr = 0x0002; // INTMAJOR only | ||
| 2315 | } else { | ||
| 2316 | // First of multiple - ESG to link, no START (we add START manually) | ||
| 2317 | self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32; | ||
| 2318 | self.tcds[i].csr = 0x0012; // ESG | INTMAJOR | ||
| 2319 | } | ||
| 2320 | } else if is_last { | ||
| 2321 | // Last TCD (not first) - no ESG, but START so it auto-executes | ||
| 2322 | self.tcds[i].dlast_sga = 0; | ||
| 2323 | self.tcds[i].csr = 0x0003; // INTMAJOR | START | ||
| 2324 | } else { | ||
| 2325 | // Middle TCD - ESG to link, and START so it auto-executes | ||
| 2326 | self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32; | ||
| 2327 | self.tcds[i].csr = 0x0013; // ESG | INTMAJOR | START | ||
| 2328 | } | ||
| 2329 | } | ||
| 2330 | |||
| 2331 | let t = channel.tcd(); | ||
| 2332 | |||
| 2333 | // Reset channel state - clear DONE, disable requests, clear errors | ||
| 2334 | // This ensures the channel is in a clean state before loading the TCD | ||
| 2335 | t.ch_csr().write(|w| { | ||
| 2336 | w.erq() | ||
| 2337 | .disable() | ||
| 2338 | .earq() | ||
| 2339 | .disable() | ||
| 2340 | .eei() | ||
| 2341 | .no_error() | ||
| 2342 | .done() | ||
| 2343 | .clear_bit_by_one() | ||
| 2344 | }); | ||
| 2345 | t.ch_es().write(|w| w.err().clear_bit_by_one()); | ||
| 2346 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 2347 | |||
| 2348 | // Memory barrier to ensure channel state is reset before loading TCD | ||
| 2349 | cortex_m::asm::dsb(); | ||
| 2350 | |||
| 2351 | // Load first TCD into hardware | ||
| 2352 | channel.load_tcd(&self.tcds[0]); | ||
| 2353 | |||
| 2354 | // Memory barrier before setting START | ||
| 2355 | cortex_m::asm::dsb(); | ||
| 2356 | |||
| 2357 | // Start the transfer | ||
| 2358 | t.tcd_csr().modify(|_, w| w.start().channel_started()); | ||
| 2359 | |||
| 2360 | Ok(Transfer::new(channel.as_any())) | ||
| 2361 | } | ||
| 2362 | |||
| 2363 | /// Reset the builder for reuse. | ||
| 2364 | pub fn clear(&mut self) { | ||
| 2365 | self.count = 0; | ||
| 2366 | } | ||
| 2367 | } | ||
| 2368 | |||
| 2369 | impl<W: Word> Default for ScatterGatherBuilder<W> { | ||
| 2370 | fn default() -> Self { | ||
| 2371 | Self::new() | ||
| 2372 | } | ||
| 2373 | } | ||
| 2374 | |||
| 2375 | /// A completed scatter-gather transfer result. | ||
| 2376 | /// | ||
| 2377 | /// This type is returned after a scatter-gather transfer completes, | ||
| 2378 | /// providing access to any error information. | ||
| 2379 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] | ||
| 2380 | pub struct ScatterGatherResult { | ||
| 2381 | /// Number of segments successfully transferred | ||
| 2382 | pub segments_completed: usize, | ||
| 2383 | /// Error if any occurred | ||
| 2384 | pub error: Option<Error>, | ||
| 2385 | } | ||
| 2386 | |||
| 2387 | // ============================================================================ | ||
| 2388 | // Interrupt Handler | ||
| 2389 | // ============================================================================ | ||
| 2390 | |||
| 2391 | /// Interrupt handler helper. | ||
| 2392 | /// | ||
| 2393 | /// Call this from your interrupt handler to clear the interrupt flag and wake the waker. | ||
| 2394 | /// This handles both half-transfer and complete-transfer interrupts. | ||
| 2395 | /// | ||
| 2396 | /// # Safety | ||
| 2397 | /// Must be called from the correct DMA channel interrupt context. | ||
| 2398 | pub unsafe fn on_interrupt(ch_index: usize) { | ||
| 2399 | let p = pac::Peripherals::steal(); | ||
| 2400 | let edma = &p.edma_0_tcd0; | ||
| 2401 | let t = edma.tcd(ch_index); | ||
| 2402 | |||
| 2403 | // Read TCD CSR to determine interrupt source | ||
| 2404 | let csr = t.tcd_csr().read(); | ||
| 2405 | |||
| 2406 | // Check if this is a half-transfer interrupt | ||
| 2407 | // INTHALF is set and we're at or past the half-way point | ||
| 2408 | if csr.inthalf().bit_is_set() { | ||
| 2409 | let biter = t.tcd_biter_elinkno().read().biter().bits(); | ||
| 2410 | let citer = t.tcd_citer_elinkno().read().citer().bits(); | ||
| 2411 | let half_point = biter / 2; | ||
| 2412 | |||
| 2413 | if citer <= half_point && citer > 0 { | ||
| 2414 | // Half-transfer interrupt - wake half_waker | ||
| 2415 | half_waker(ch_index).wake(); | ||
| 2416 | } | ||
| 2417 | } | ||
| 2418 | |||
| 2419 | // Clear INT flag | ||
| 2420 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 2421 | |||
| 2422 | // If DONE is set, this is a complete-transfer interrupt | ||
| 2423 | let done = t.ch_csr().read().done().bit_is_set(); | ||
| 2424 | if done { | ||
| 2425 | waker(ch_index).wake(); | ||
| 2426 | } else { | ||
| 2427 | // Also wake the complete waker in case we're polling for progress | ||
| 2428 | waker(ch_index).wake(); | ||
| 2429 | } | ||
| 2430 | } | ||
| 2431 | |||
| 2432 | // ============================================================================ | ||
| 2433 | // Type-level Interrupt Handlers for bind_interrupts! macro | ||
| 2434 | // ============================================================================ | ||
| 2435 | |||
| 2436 | /// Macro to generate DMA channel interrupt handlers. | ||
| 2437 | /// | ||
| 2438 | /// This generates handler structs that implement the `Handler` trait for use | ||
| 2439 | /// with the `bind_interrupts!` macro. | ||
| 2440 | macro_rules! impl_dma_interrupt_handler { | ||
| 2441 | ($name:ident, $irq:ident, $ch:expr) => { | ||
| 2442 | /// Interrupt handler for DMA channel. | ||
| 2443 | /// | ||
| 2444 | /// Use this with the `bind_interrupts!` macro: | ||
| 2445 | /// ```ignore | ||
| 2446 | /// bind_interrupts!(struct Irqs { | ||
| 2447 | #[doc = concat!(" ", stringify!($irq), " => dma::", stringify!($name), ";")] | ||
| 2448 | /// }); | ||
| 2449 | /// ``` | ||
| 2450 | pub struct $name; | ||
| 2451 | |||
| 2452 | impl crate::interrupt::typelevel::Handler<crate::interrupt::typelevel::$irq> for $name { | ||
| 2453 | unsafe fn on_interrupt() { | ||
| 2454 | on_interrupt($ch); | ||
| 2455 | } | ||
| 2456 | } | ||
| 2457 | }; | ||
| 2458 | } | ||
| 2459 | |||
| 2460 | impl_dma_interrupt_handler!(DmaCh0InterruptHandler, DMA_CH0, 0); | ||
| 2461 | impl_dma_interrupt_handler!(DmaCh1InterruptHandler, DMA_CH1, 1); | ||
| 2462 | impl_dma_interrupt_handler!(DmaCh2InterruptHandler, DMA_CH2, 2); | ||
| 2463 | impl_dma_interrupt_handler!(DmaCh3InterruptHandler, DMA_CH3, 3); | ||
| 2464 | impl_dma_interrupt_handler!(DmaCh4InterruptHandler, DMA_CH4, 4); | ||
| 2465 | impl_dma_interrupt_handler!(DmaCh5InterruptHandler, DMA_CH5, 5); | ||
| 2466 | impl_dma_interrupt_handler!(DmaCh6InterruptHandler, DMA_CH6, 6); | ||
| 2467 | impl_dma_interrupt_handler!(DmaCh7InterruptHandler, DMA_CH7, 7); | ||
diff --git a/src/interrupt.rs b/src/interrupt.rs index 0490e3a66..000b2f9cd 100644 --- a/src/interrupt.rs +++ b/src/interrupt.rs | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | mod generated { | 9 | mod generated { |
| 10 | embassy_hal_internal::interrupt_mod!( | 10 | embassy_hal_internal::interrupt_mod!( |
| 11 | OS_EVENT, RTC, ADC1, GPIO0, GPIO1, GPIO2, GPIO3, GPIO4, LPI2C0, LPI2C1, LPI2C2, LPI2C3, LPUART0, LPUART1, | 11 | OS_EVENT, RTC, ADC1, GPIO0, GPIO1, GPIO2, GPIO3, GPIO4, LPI2C0, LPI2C1, LPI2C2, LPI2C3, LPUART0, LPUART1, |
| 12 | LPUART2, LPUART3, LPUART4, LPUART5, | 12 | LPUART2, LPUART3, LPUART4, LPUART5, DMA_CH0, DMA_CH1, DMA_CH2, DMA_CH3, DMA_CH4, DMA_CH5, DMA_CH6, DMA_CH7, |
| 13 | ); | 13 | ); |
| 14 | } | 14 | } |
| 15 | 15 | ||
diff --git a/src/lib.rs b/src/lib.rs index fb204d27b..d3560e651 100644 --- a/src/lib.rs +++ b/src/lib.rs | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | // #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)] | 6 | // #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)] |
| 7 | 7 | ||
| 8 | pub mod clocks; // still provide clock helpers | 8 | pub mod clocks; // still provide clock helpers |
| 9 | pub mod dma; | ||
| 9 | pub mod gpio; | 10 | pub mod gpio; |
| 10 | pub mod pins; // pin mux helpers | 11 | pub mod pins; // pin mux helpers |
| 11 | 12 | ||
| @@ -51,6 +52,14 @@ embassy_hal_internal::peripherals!( | |||
| 51 | 52 | ||
| 52 | DBGMAILBOX, | 53 | DBGMAILBOX, |
| 53 | DMA0, | 54 | DMA0, |
| 55 | DMA_CH0, | ||
| 56 | DMA_CH1, | ||
| 57 | DMA_CH2, | ||
| 58 | DMA_CH3, | ||
| 59 | DMA_CH4, | ||
| 60 | DMA_CH5, | ||
| 61 | DMA_CH6, | ||
| 62 | DMA_CH7, | ||
| 54 | EDMA0_TCD0, | 63 | EDMA0_TCD0, |
| 55 | EIM0, | 64 | EIM0, |
| 56 | EQDC0, | 65 | EQDC0, |
diff --git a/src/lpuart/mod.rs b/src/lpuart/mod.rs index 317274a79..b29fe287d 100644 --- a/src/lpuart/mod.rs +++ b/src/lpuart/mod.rs | |||
| @@ -15,22 +15,10 @@ use crate::{interrupt, pac, AnyPin}; | |||
| 15 | pub mod buffered; | 15 | pub mod buffered; |
| 16 | 16 | ||
| 17 | // ============================================================================ | 17 | // ============================================================================ |
| 18 | // STUB IMPLEMENTATION | 18 | // DMA INTEGRATION |
| 19 | // ============================================================================ | 19 | // ============================================================================ |
| 20 | 20 | ||
| 21 | // Stub implementation for LIB (Peripherals), GPIO, DMA and CLOCK until stable API | 21 | use crate::dma::{Channel as DmaChannelTrait, DmaChannel, EnableInterrupt}; |
| 22 | // Pin and Clock initialization is currently done at the examples level. | ||
| 23 | |||
| 24 | // --- START DMA --- | ||
| 25 | mod dma { | ||
| 26 | pub struct Channel<'d> { | ||
| 27 | pub(super) _lifetime: core::marker::PhantomData<&'d ()>, | ||
| 28 | } | ||
| 29 | } | ||
| 30 | |||
| 31 | use dma::Channel; | ||
| 32 | |||
| 33 | // --- END DMA --- | ||
| 34 | 22 | ||
| 35 | // ============================================================================ | 23 | // ============================================================================ |
| 36 | // MISC | 24 | // MISC |
| @@ -694,7 +682,6 @@ pub struct Lpuart<'a, M: Mode> { | |||
| 694 | pub struct LpuartTx<'a, M: Mode> { | 682 | pub struct LpuartTx<'a, M: Mode> { |
| 695 | info: Info, | 683 | info: Info, |
| 696 | _tx_pin: Peri<'a, AnyPin>, | 684 | _tx_pin: Peri<'a, AnyPin>, |
| 697 | _tx_dma: Option<Channel<'a>>, | ||
| 698 | mode: PhantomData<(&'a (), M)>, | 685 | mode: PhantomData<(&'a (), M)>, |
| 699 | } | 686 | } |
| 700 | 687 | ||
| @@ -702,10 +689,31 @@ pub struct LpuartTx<'a, M: Mode> { | |||
| 702 | pub struct LpuartRx<'a, M: Mode> { | 689 | pub struct LpuartRx<'a, M: Mode> { |
| 703 | info: Info, | 690 | info: Info, |
| 704 | _rx_pin: Peri<'a, AnyPin>, | 691 | _rx_pin: Peri<'a, AnyPin>, |
| 705 | _rx_dma: Option<Channel<'a>>, | ||
| 706 | mode: PhantomData<(&'a (), M)>, | 692 | mode: PhantomData<(&'a (), M)>, |
| 707 | } | 693 | } |
| 708 | 694 | ||
| 695 | /// Lpuart TX driver with DMA support. | ||
| 696 | pub struct LpuartTxDma<'a, C: DmaChannelTrait> { | ||
| 697 | info: Info, | ||
| 698 | _tx_pin: Peri<'a, AnyPin>, | ||
| 699 | tx_dma: DmaChannel<C>, | ||
| 700 | } | ||
| 701 | |||
| 702 | /// Lpuart RX driver with DMA support. | ||
| 703 | pub struct LpuartRxDma<'a, C: DmaChannelTrait> { | ||
| 704 | info: Info, | ||
| 705 | _rx_pin: Peri<'a, AnyPin>, | ||
| 706 | rx_dma: DmaChannel<C>, | ||
| 707 | } | ||
| 708 | |||
| 709 | /// Lpuart driver with DMA support for both TX and RX. | ||
| 710 | pub struct LpuartDma<'a, TxC: DmaChannelTrait, RxC: DmaChannelTrait> { | ||
| 711 | #[allow(dead_code)] | ||
| 712 | info: Info, | ||
| 713 | tx: LpuartTxDma<'a, TxC>, | ||
| 714 | rx: LpuartRxDma<'a, RxC>, | ||
| 715 | } | ||
| 716 | |||
| 709 | // ============================================================================ | 717 | // ============================================================================ |
| 710 | // LPUART CORE IMPLEMENTATION | 718 | // LPUART CORE IMPLEMENTATION |
| 711 | // ============================================================================ | 719 | // ============================================================================ |
| @@ -796,8 +804,8 @@ impl<'a> Lpuart<'a, Blocking> { | |||
| 796 | 804 | ||
| 797 | Ok(Self { | 805 | Ok(Self { |
| 798 | info: T::info(), | 806 | info: T::info(), |
| 799 | tx: LpuartTx::new_inner(T::info(), tx_pin, None), | 807 | tx: LpuartTx::new_inner(T::info(), tx_pin), |
| 800 | rx: LpuartRx::new_inner(T::info(), rx_pin, None), | 808 | rx: LpuartRx::new_inner(T::info(), rx_pin), |
| 801 | }) | 809 | }) |
| 802 | } | 810 | } |
| 803 | } | 811 | } |
| @@ -807,11 +815,10 @@ impl<'a> Lpuart<'a, Blocking> { | |||
| 807 | // ---------------------------------------------------------------------------- | 815 | // ---------------------------------------------------------------------------- |
| 808 | 816 | ||
| 809 | impl<'a, M: Mode> LpuartTx<'a, M> { | 817 | impl<'a, M: Mode> LpuartTx<'a, M> { |
| 810 | fn new_inner(info: Info, tx_pin: Peri<'a, AnyPin>, tx_dma: Option<Channel<'a>>) -> Self { | 818 | fn new_inner(info: Info, tx_pin: Peri<'a, AnyPin>) -> Self { |
| 811 | Self { | 819 | Self { |
| 812 | info, | 820 | info, |
| 813 | _tx_pin: tx_pin, | 821 | _tx_pin: tx_pin, |
| 814 | _tx_dma: tx_dma, | ||
| 815 | mode: PhantomData, | 822 | mode: PhantomData, |
| 816 | } | 823 | } |
| 817 | } | 824 | } |
| @@ -830,7 +837,7 @@ impl<'a> LpuartTx<'a, Blocking> { | |||
| 830 | 837 | ||
| 831 | Lpuart::<Blocking>::init::<T>(Some(&tx_pin), None, None, None, config)?; | 838 | Lpuart::<Blocking>::init::<T>(Some(&tx_pin), None, None, None, config)?; |
| 832 | 839 | ||
| 833 | Ok(Self::new_inner(T::info(), tx_pin, None)) | 840 | Ok(Self::new_inner(T::info(), tx_pin)) |
| 834 | } | 841 | } |
| 835 | 842 | ||
| 836 | fn write_byte_internal(&mut self, byte: u8) -> Result<()> { | 843 | fn write_byte_internal(&mut self, byte: u8) -> Result<()> { |
| @@ -909,11 +916,10 @@ impl<'a> LpuartTx<'a, Blocking> { | |||
| 909 | // ---------------------------------------------------------------------------- | 916 | // ---------------------------------------------------------------------------- |
| 910 | 917 | ||
| 911 | impl<'a, M: Mode> LpuartRx<'a, M> { | 918 | impl<'a, M: Mode> LpuartRx<'a, M> { |
| 912 | fn new_inner(info: Info, rx_pin: Peri<'a, AnyPin>, rx_dma: Option<Channel<'a>>) -> Self { | 919 | fn new_inner(info: Info, rx_pin: Peri<'a, AnyPin>) -> Self { |
| 913 | Self { | 920 | Self { |
| 914 | info, | 921 | info, |
| 915 | _rx_pin: rx_pin, | 922 | _rx_pin: rx_pin, |
| 916 | _rx_dma: rx_dma, | ||
| 917 | mode: PhantomData, | 923 | mode: PhantomData, |
| 918 | } | 924 | } |
| 919 | } | 925 | } |
| @@ -932,7 +938,7 @@ impl<'a> LpuartRx<'a, Blocking> { | |||
| 932 | 938 | ||
| 933 | Lpuart::<Blocking>::init::<T>(None, Some(&rx_pin), None, None, config)?; | 939 | Lpuart::<Blocking>::init::<T>(None, Some(&rx_pin), None, None, config)?; |
| 934 | 940 | ||
| 935 | Ok(Self::new_inner(T::info(), rx_pin, None)) | 941 | Ok(Self::new_inner(T::info(), rx_pin)) |
| 936 | } | 942 | } |
| 937 | 943 | ||
| 938 | fn read_byte_internal(&mut self) -> Result<u8> { | 944 | fn read_byte_internal(&mut self) -> Result<u8> { |
| @@ -1027,10 +1033,373 @@ impl<'a> Lpuart<'a, Blocking> { | |||
| 1027 | } | 1033 | } |
| 1028 | 1034 | ||
| 1029 | // ============================================================================ | 1035 | // ============================================================================ |
| 1030 | // ASYNC MODE IMPLEMENTATIONS | 1036 | // ASYNC MODE IMPLEMENTATIONS (DMA-based) |
| 1037 | // ============================================================================ | ||
| 1038 | |||
| 1039 | /// Maximum bytes per DMA transfer (eDMA CITER/BITER are 15-bit fields). | ||
| 1040 | const DMA_MAX_TRANSFER_SIZE: usize = 0x7FFF; | ||
| 1041 | |||
| 1042 | /// Guard struct that ensures DMA is stopped if the async future is cancelled. | ||
| 1043 | /// | ||
| 1044 | /// This implements the RAII pattern: if the future is dropped before completion | ||
| 1045 | /// (e.g., due to a timeout), the DMA transfer is automatically aborted to prevent | ||
| 1046 | /// use-after-free when the buffer goes out of scope. | ||
| 1047 | struct TxDmaGuard<'a, C: DmaChannelTrait> { | ||
| 1048 | dma: &'a DmaChannel<C>, | ||
| 1049 | regs: Regs, | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | impl<'a, C: DmaChannelTrait> TxDmaGuard<'a, C> { | ||
| 1053 | fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self { | ||
| 1054 | Self { dma, regs } | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | /// Complete the transfer normally (don't abort on drop). | ||
| 1058 | fn complete(self) { | ||
| 1059 | // Cleanup | ||
| 1060 | self.regs.baud().modify(|_, w| w.tdmae().disabled()); | ||
| 1061 | unsafe { | ||
| 1062 | self.dma.disable_request(); | ||
| 1063 | self.dma.clear_done(); | ||
| 1064 | } | ||
| 1065 | // Don't run drop since we've cleaned up | ||
| 1066 | core::mem::forget(self); | ||
| 1067 | } | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | impl<C: DmaChannelTrait> Drop for TxDmaGuard<'_, C> { | ||
| 1071 | fn drop(&mut self) { | ||
| 1072 | // Abort the DMA transfer if still running | ||
| 1073 | unsafe { | ||
| 1074 | self.dma.disable_request(); | ||
| 1075 | self.dma.clear_done(); | ||
| 1076 | self.dma.clear_interrupt(); | ||
| 1077 | } | ||
| 1078 | // Disable UART TX DMA request | ||
| 1079 | self.regs.baud().modify(|_, w| w.tdmae().disabled()); | ||
| 1080 | } | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | /// Guard struct for RX DMA transfers. | ||
| 1084 | struct RxDmaGuard<'a, C: DmaChannelTrait> { | ||
| 1085 | dma: &'a DmaChannel<C>, | ||
| 1086 | regs: Regs, | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | impl<'a, C: DmaChannelTrait> RxDmaGuard<'a, C> { | ||
| 1090 | fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self { | ||
| 1091 | Self { dma, regs } | ||
| 1092 | } | ||
| 1093 | |||
| 1094 | /// Complete the transfer normally (don't abort on drop). | ||
| 1095 | fn complete(self) { | ||
| 1096 | // Ensure DMA writes are visible to CPU | ||
| 1097 | cortex_m::asm::dsb(); | ||
| 1098 | // Cleanup | ||
| 1099 | self.regs.baud().modify(|_, w| w.rdmae().disabled()); | ||
| 1100 | unsafe { | ||
| 1101 | self.dma.disable_request(); | ||
| 1102 | self.dma.clear_done(); | ||
| 1103 | } | ||
| 1104 | // Don't run drop since we've cleaned up | ||
| 1105 | core::mem::forget(self); | ||
| 1106 | } | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | impl<C: DmaChannelTrait> Drop for RxDmaGuard<'_, C> { | ||
| 1110 | fn drop(&mut self) { | ||
| 1111 | // Abort the DMA transfer if still running | ||
| 1112 | unsafe { | ||
| 1113 | self.dma.disable_request(); | ||
| 1114 | self.dma.clear_done(); | ||
| 1115 | self.dma.clear_interrupt(); | ||
| 1116 | } | ||
| 1117 | // Disable UART RX DMA request | ||
| 1118 | self.regs.baud().modify(|_, w| w.rdmae().disabled()); | ||
| 1119 | } | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | impl<'a, C: DmaChannelTrait> LpuartTxDma<'a, C> { | ||
| 1123 | /// Create a new LPUART TX driver with DMA support. | ||
| 1124 | pub fn new<T: Instance>( | ||
| 1125 | _inner: Peri<'a, T>, | ||
| 1126 | tx_pin: Peri<'a, impl TxPin<T>>, | ||
| 1127 | tx_dma_ch: Peri<'a, C>, | ||
| 1128 | config: Config, | ||
| 1129 | ) -> Result<Self> { | ||
| 1130 | tx_pin.as_tx(); | ||
| 1131 | let tx_pin: Peri<'a, AnyPin> = tx_pin.into(); | ||
| 1132 | |||
| 1133 | Lpuart::<Blocking>::init::<T>(Some(&tx_pin), None, None, None, config)?; | ||
| 1134 | |||
| 1135 | Ok(Self { | ||
| 1136 | info: T::info(), | ||
| 1137 | _tx_pin: tx_pin, | ||
| 1138 | tx_dma: DmaChannel::new(tx_dma_ch), | ||
| 1139 | }) | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | /// Write data using DMA. | ||
| 1143 | /// | ||
| 1144 | /// This configures the DMA channel for a memory-to-peripheral transfer | ||
| 1145 | /// and waits for completion asynchronously. Large buffers are automatically | ||
| 1146 | /// split into chunks that fit within the DMA transfer limit. | ||
| 1147 | /// | ||
| 1148 | /// # Safety | ||
| 1149 | /// | ||
| 1150 | /// If the returned future is dropped before completion (e.g., due to a timeout), | ||
| 1151 | /// the DMA transfer is automatically aborted to prevent use-after-free. | ||
| 1152 | /// | ||
| 1153 | /// # Arguments | ||
| 1154 | /// * `edma` - Reference to the EDMA TCD register block | ||
| 1155 | /// * `request_source` - DMA request source number (e.g., `dma::DMA_REQ_LPUART2_TX`) | ||
| 1156 | /// * `buf` - Data buffer to transmit | ||
| 1157 | pub async fn write_dma(&mut self, request_source: u8, buf: &[u8]) -> Result<usize> { | ||
| 1158 | if buf.is_empty() { | ||
| 1159 | return Ok(0); | ||
| 1160 | } | ||
| 1161 | |||
| 1162 | let mut total = 0; | ||
| 1163 | for chunk in buf.chunks(DMA_MAX_TRANSFER_SIZE) { | ||
| 1164 | total += self.write_dma_inner(request_source, chunk).await?; | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | Ok(total) | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | /// Internal helper to write a single chunk (max 0x7FFF bytes) using DMA. | ||
| 1171 | async fn write_dma_inner(&mut self, request_source: u8, buf: &[u8]) -> Result<usize> { | ||
| 1172 | let len = buf.len(); | ||
| 1173 | let peri_addr = self.info.regs.data().as_ptr() as *mut u8; | ||
| 1174 | |||
| 1175 | unsafe { | ||
| 1176 | // Clean up channel state | ||
| 1177 | self.tx_dma.disable_request(); | ||
| 1178 | self.tx_dma.clear_done(); | ||
| 1179 | self.tx_dma.clear_interrupt(); | ||
| 1180 | |||
| 1181 | // Set DMA request source | ||
| 1182 | self.tx_dma.set_request_source(request_source); | ||
| 1183 | |||
| 1184 | // Configure TCD for memory-to-peripheral transfer | ||
| 1185 | self.tx_dma | ||
| 1186 | .setup_write_to_peripheral(buf, peri_addr, EnableInterrupt::Yes); | ||
| 1187 | |||
| 1188 | // Enable UART TX DMA request | ||
| 1189 | self.info.regs.baud().modify(|_, w| w.tdmae().enabled()); | ||
| 1190 | |||
| 1191 | // Enable DMA channel request | ||
| 1192 | self.tx_dma.enable_request(); | ||
| 1193 | } | ||
| 1194 | |||
| 1195 | // Create guard that will abort DMA if this future is dropped | ||
| 1196 | let guard = TxDmaGuard::new(&self.tx_dma, self.info.regs); | ||
| 1197 | |||
| 1198 | // Wait for completion asynchronously | ||
| 1199 | core::future::poll_fn(|cx| { | ||
| 1200 | self.tx_dma.waker().register(cx.waker()); | ||
| 1201 | if self.tx_dma.is_done() { | ||
| 1202 | core::task::Poll::Ready(()) | ||
| 1203 | } else { | ||
| 1204 | core::task::Poll::Pending | ||
| 1205 | } | ||
| 1206 | }) | ||
| 1207 | .await; | ||
| 1208 | |||
| 1209 | // Transfer completed successfully - clean up without aborting | ||
| 1210 | guard.complete(); | ||
| 1211 | |||
| 1212 | Ok(len) | ||
| 1213 | } | ||
| 1214 | |||
| 1215 | /// Blocking write (fallback when DMA is not needed) | ||
| 1216 | pub fn blocking_write(&mut self, buf: &[u8]) -> Result<()> { | ||
| 1217 | for &byte in buf { | ||
| 1218 | while self.info.regs.stat().read().tdre().is_txdata() {} | ||
| 1219 | self.info.regs.data().modify(|_, w| unsafe { w.bits(u32::from(byte)) }); | ||
| 1220 | } | ||
| 1221 | Ok(()) | ||
| 1222 | } | ||
| 1223 | |||
| 1224 | /// Flush TX blocking | ||
| 1225 | pub fn blocking_flush(&mut self) -> Result<()> { | ||
| 1226 | while self.info.regs.water().read().txcount().bits() != 0 {} | ||
| 1227 | while self.info.regs.stat().read().tc().is_active() {} | ||
| 1228 | Ok(()) | ||
| 1229 | } | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | impl<'a, C: DmaChannelTrait> LpuartRxDma<'a, C> { | ||
| 1233 | /// Create a new LPUART RX driver with DMA support. | ||
| 1234 | pub fn new<T: Instance>( | ||
| 1235 | _inner: Peri<'a, T>, | ||
| 1236 | rx_pin: Peri<'a, impl RxPin<T>>, | ||
| 1237 | rx_dma_ch: Peri<'a, C>, | ||
| 1238 | config: Config, | ||
| 1239 | ) -> Result<Self> { | ||
| 1240 | rx_pin.as_rx(); | ||
| 1241 | let rx_pin: Peri<'a, AnyPin> = rx_pin.into(); | ||
| 1242 | |||
| 1243 | Lpuart::<Blocking>::init::<T>(None, Some(&rx_pin), None, None, config)?; | ||
| 1244 | |||
| 1245 | Ok(Self { | ||
| 1246 | info: T::info(), | ||
| 1247 | _rx_pin: rx_pin, | ||
| 1248 | rx_dma: DmaChannel::new(rx_dma_ch), | ||
| 1249 | }) | ||
| 1250 | } | ||
| 1251 | |||
| 1252 | /// Read data using DMA. | ||
| 1253 | /// | ||
| 1254 | /// This configures the DMA channel for a peripheral-to-memory transfer | ||
| 1255 | /// and waits for completion asynchronously. Large buffers are automatically | ||
| 1256 | /// split into chunks that fit within the DMA transfer limit. | ||
| 1257 | /// | ||
| 1258 | /// # Safety | ||
| 1259 | /// | ||
| 1260 | /// If the returned future is dropped before completion (e.g., due to a timeout), | ||
| 1261 | /// the DMA transfer is automatically aborted to prevent use-after-free. | ||
| 1262 | /// | ||
| 1263 | /// # Arguments | ||
| 1264 | /// * `request_source` - DMA request source number (e.g., `dma::DMA_REQ_LPUART2_RX`) | ||
| 1265 | /// * `buf` - Buffer to receive data into | ||
| 1266 | pub async fn read_dma(&mut self, request_source: u8, buf: &mut [u8]) -> Result<usize> { | ||
| 1267 | if buf.is_empty() { | ||
| 1268 | return Ok(0); | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | let mut total = 0; | ||
| 1272 | for chunk in buf.chunks_mut(DMA_MAX_TRANSFER_SIZE) { | ||
| 1273 | total += self.read_dma_inner(request_source, chunk).await?; | ||
| 1274 | } | ||
| 1275 | |||
| 1276 | Ok(total) | ||
| 1277 | } | ||
| 1278 | |||
| 1279 | /// Internal helper to read a single chunk (max 0x7FFF bytes) using DMA. | ||
| 1280 | async fn read_dma_inner(&mut self, request_source: u8, buf: &mut [u8]) -> Result<usize> { | ||
| 1281 | let len = buf.len(); | ||
| 1282 | let peri_addr = self.info.regs.data().as_ptr() as *const u8; | ||
| 1283 | |||
| 1284 | unsafe { | ||
| 1285 | // Clean up channel state | ||
| 1286 | self.rx_dma.disable_request(); | ||
| 1287 | self.rx_dma.clear_done(); | ||
| 1288 | self.rx_dma.clear_interrupt(); | ||
| 1289 | |||
| 1290 | // Set DMA request source | ||
| 1291 | self.rx_dma.set_request_source(request_source); | ||
| 1292 | |||
| 1293 | // Configure TCD for peripheral-to-memory transfer | ||
| 1294 | self.rx_dma | ||
| 1295 | .setup_read_from_peripheral(peri_addr, buf, EnableInterrupt::Yes); | ||
| 1296 | |||
| 1297 | // Enable UART RX DMA request | ||
| 1298 | self.info.regs.baud().modify(|_, w| w.rdmae().enabled()); | ||
| 1299 | |||
| 1300 | // Enable DMA channel request | ||
| 1301 | self.rx_dma.enable_request(); | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | // Create guard that will abort DMA if this future is dropped | ||
| 1305 | let guard = RxDmaGuard::new(&self.rx_dma, self.info.regs); | ||
| 1306 | |||
| 1307 | // Wait for completion asynchronously | ||
| 1308 | core::future::poll_fn(|cx| { | ||
| 1309 | self.rx_dma.waker().register(cx.waker()); | ||
| 1310 | if self.rx_dma.is_done() { | ||
| 1311 | core::task::Poll::Ready(()) | ||
| 1312 | } else { | ||
| 1313 | core::task::Poll::Pending | ||
| 1314 | } | ||
| 1315 | }) | ||
| 1316 | .await; | ||
| 1317 | |||
| 1318 | // Transfer completed successfully - clean up without aborting | ||
| 1319 | guard.complete(); | ||
| 1320 | |||
| 1321 | Ok(len) | ||
| 1322 | } | ||
| 1323 | |||
| 1324 | /// Blocking read (fallback when DMA is not needed) | ||
| 1325 | pub fn blocking_read(&mut self, buf: &mut [u8]) -> Result<()> { | ||
| 1326 | for byte in buf.iter_mut() { | ||
| 1327 | loop { | ||
| 1328 | if has_data(self.info.regs) { | ||
| 1329 | *byte = (self.info.regs.data().read().bits() & 0xFF) as u8; | ||
| 1330 | break; | ||
| 1331 | } | ||
| 1332 | check_and_clear_rx_errors(self.info.regs)?; | ||
| 1333 | } | ||
| 1334 | } | ||
| 1335 | Ok(()) | ||
| 1336 | } | ||
| 1337 | } | ||
| 1338 | |||
| 1339 | impl<'a, TxC: DmaChannelTrait, RxC: DmaChannelTrait> LpuartDma<'a, TxC, RxC> { | ||
| 1340 | /// Create a new LPUART driver with DMA support for both TX and RX. | ||
| 1341 | pub fn new<T: Instance>( | ||
| 1342 | _inner: Peri<'a, T>, | ||
| 1343 | tx_pin: Peri<'a, impl TxPin<T>>, | ||
| 1344 | rx_pin: Peri<'a, impl RxPin<T>>, | ||
| 1345 | tx_dma_ch: Peri<'a, TxC>, | ||
| 1346 | rx_dma_ch: Peri<'a, RxC>, | ||
| 1347 | config: Config, | ||
| 1348 | ) -> Result<Self> { | ||
| 1349 | tx_pin.as_tx(); | ||
| 1350 | rx_pin.as_rx(); | ||
| 1351 | |||
| 1352 | let tx_pin: Peri<'a, AnyPin> = tx_pin.into(); | ||
| 1353 | let rx_pin: Peri<'a, AnyPin> = rx_pin.into(); | ||
| 1354 | |||
| 1355 | Lpuart::<Blocking>::init::<T>(Some(&tx_pin), Some(&rx_pin), None, None, config)?; | ||
| 1356 | |||
| 1357 | Ok(Self { | ||
| 1358 | info: T::info(), | ||
| 1359 | tx: LpuartTxDma { | ||
| 1360 | info: T::info(), | ||
| 1361 | _tx_pin: tx_pin, | ||
| 1362 | tx_dma: DmaChannel::new(tx_dma_ch), | ||
| 1363 | }, | ||
| 1364 | rx: LpuartRxDma { | ||
| 1365 | info: T::info(), | ||
| 1366 | _rx_pin: rx_pin, | ||
| 1367 | rx_dma: DmaChannel::new(rx_dma_ch), | ||
| 1368 | }, | ||
| 1369 | }) | ||
| 1370 | } | ||
| 1371 | |||
| 1372 | /// Split into separate TX and RX drivers | ||
| 1373 | pub fn split(self) -> (LpuartTxDma<'a, TxC>, LpuartRxDma<'a, RxC>) { | ||
| 1374 | (self.tx, self.rx) | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | /// Write data using DMA | ||
| 1378 | pub async fn write_dma(&mut self, request_source: u8, buf: &[u8]) -> Result<usize> { | ||
| 1379 | self.tx.write_dma(request_source, buf).await | ||
| 1380 | } | ||
| 1381 | |||
| 1382 | /// Read data using DMA | ||
| 1383 | pub async fn read_dma(&mut self, request_source: u8, buf: &mut [u8]) -> Result<usize> { | ||
| 1384 | self.rx.read_dma(request_source, buf).await | ||
| 1385 | } | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | // ============================================================================ | ||
| 1389 | // EMBEDDED-IO-ASYNC TRAIT IMPLEMENTATIONS | ||
| 1031 | // ============================================================================ | 1390 | // ============================================================================ |
| 1032 | 1391 | ||
| 1033 | // TODO: Implement async mode for LPUART | 1392 | impl<C: DmaChannelTrait> embedded_io::ErrorType for LpuartTxDma<'_, C> { |
| 1393 | type Error = Error; | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | impl<C: DmaChannelTrait> embedded_io::ErrorType for LpuartRxDma<'_, C> { | ||
| 1397 | type Error = Error; | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | impl<TxC: DmaChannelTrait, RxC: DmaChannelTrait> embedded_io::ErrorType for LpuartDma<'_, TxC, RxC> { | ||
| 1401 | type Error = Error; | ||
| 1402 | } | ||
| 1034 | 1403 | ||
| 1035 | // ============================================================================ | 1404 | // ============================================================================ |
| 1036 | // EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS | 1405 | // EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS |
diff --git a/src/pins.rs b/src/pins.rs index fdf1b0a86..9adbe64c8 100644 --- a/src/pins.rs +++ b/src/pins.rs | |||
| @@ -1,6 +1,11 @@ | |||
| 1 | //! Pin configuration helpers (separate from peripheral drivers). | 1 | //! Pin configuration helpers (separate from peripheral drivers). |
| 2 | use crate::pac; | 2 | use crate::pac; |
| 3 | 3 | ||
| 4 | /// Configure pins for ADC usage. | ||
| 5 | /// | ||
| 6 | /// # Safety | ||
| 7 | /// | ||
| 8 | /// Must be called after PORT clocks are enabled. | ||
| 4 | pub unsafe fn configure_adc_pins() { | 9 | pub unsafe fn configure_adc_pins() { |
| 5 | // P1_10 = ADC1_A8 | 10 | // P1_10 = ADC1_A8 |
| 6 | let port1 = &*pac::Port1::ptr(); | 11 | let port1 = &*pac::Port1::ptr(); |
