diff options
Diffstat (limited to 'embassy-mcxa')
| -rw-r--r-- | embassy-mcxa/src/clocks/mod.rs | 7 | ||||
| -rw-r--r-- | embassy-mcxa/src/dma.rs | 2602 | ||||
| -rw-r--r-- | embassy-mcxa/src/interrupt.rs | 8 | ||||
| -rw-r--r-- | embassy-mcxa/src/lib.rs | 47 | ||||
| -rw-r--r-- | embassy-mcxa/src/lpuart/mod.rs | 575 | ||||
| -rw-r--r-- | embassy-mcxa/src/pins.rs | 5 |
6 files changed, 3167 insertions, 77 deletions
diff --git a/embassy-mcxa/src/clocks/mod.rs b/embassy-mcxa/src/clocks/mod.rs index 1b23a9d9f..014a12519 100644 --- a/embassy-mcxa/src/clocks/mod.rs +++ b/embassy-mcxa/src/clocks/mod.rs | |||
| @@ -399,6 +399,10 @@ pub unsafe fn assert_reset<G: Gate>() { | |||
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | /// Check whether the peripheral is held in reset. | 401 | /// Check whether the peripheral is held in reset. |
| 402 | /// | ||
| 403 | /// # Safety | ||
| 404 | /// | ||
| 405 | /// Must be called with a valid peripheral gate type. | ||
| 402 | #[inline] | 406 | #[inline] |
| 403 | pub unsafe fn is_reset_released<G: Gate>() -> bool { | 407 | pub unsafe fn is_reset_released<G: Gate>() -> bool { |
| 404 | G::is_reset_released() | 408 | G::is_reset_released() |
| @@ -942,4 +946,7 @@ pub(crate) mod gate { | |||
| 942 | impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig); | 946 | impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig); |
| 943 | impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig); | 947 | impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig); |
| 944 | impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig); | 948 | impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig); |
| 949 | |||
| 950 | // DMA0 peripheral - uses NoConfig since it has no selectable clock source | ||
| 951 | impl_cc_gate!(DMA0, mrcc_glb_cc0, mrcc_glb_rst0, dma0, NoConfig); | ||
| 945 | } | 952 | } |
diff --git a/embassy-mcxa/src/dma.rs b/embassy-mcxa/src/dma.rs new file mode 100644 index 000000000..8d519d99b --- /dev/null +++ b/embassy-mcxa/src/dma.rs | |||
| @@ -0,0 +1,2602 @@ | |||
| 1 | //! DMA driver for MCXA276. | ||
| 2 | //! | ||
| 3 | //! This module provides a typed channel abstraction over the EDMA_0_TCD0 array | ||
| 4 | //! and helpers for configuring the channel MUX. The driver supports both | ||
| 5 | //! low-level TCD configuration and higher-level async transfer APIs. | ||
| 6 | //! | ||
| 7 | //! # Architecture | ||
| 8 | //! | ||
| 9 | //! The MCXA276 has 8 DMA channels (0-7), each with its own interrupt vector. | ||
| 10 | //! Each channel has a Transfer Control Descriptor (TCD) that defines the | ||
| 11 | //! transfer parameters. | ||
| 12 | //! | ||
| 13 | //! # Choosing the Right API | ||
| 14 | //! | ||
| 15 | //! This module provides several API levels to match different use cases: | ||
| 16 | //! | ||
| 17 | //! ## High-Level Async API (Recommended for Most Users) | ||
| 18 | //! | ||
| 19 | //! Use the async methods when you want simple, safe DMA transfers: | ||
| 20 | //! | ||
| 21 | //! | Method | Description | | ||
| 22 | //! |--------|-------------| | ||
| 23 | //! | [`DmaChannel::mem_to_mem()`] | Memory-to-memory copy | | ||
| 24 | //! | [`DmaChannel::memset()`] | Fill memory with a pattern | | ||
| 25 | //! | [`DmaChannel::write()`] | Memory-to-peripheral (TX) | | ||
| 26 | //! | [`DmaChannel::read()`] | Peripheral-to-memory (RX) | | ||
| 27 | //! | ||
| 28 | //! These return a [`Transfer`] future that can be `.await`ed: | ||
| 29 | //! | ||
| 30 | //! ```no_run | ||
| 31 | //! # use embassy_mcxa::dma::{DmaChannel, TransferOptions}; | ||
| 32 | //! # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 33 | //! # let src = [0u32; 4]; | ||
| 34 | //! # let mut dst = [0u32; 4]; | ||
| 35 | //! // Simple memory-to-memory transfer | ||
| 36 | //! unsafe { | ||
| 37 | //! dma_ch.mem_to_mem(&src, &mut dst, TransferOptions::default()).await; | ||
| 38 | //! } | ||
| 39 | //! ``` | ||
| 40 | //! | ||
| 41 | //! ## Setup Methods (For Peripheral Drivers) | ||
| 42 | //! | ||
| 43 | //! Use setup methods when you need manual lifecycle control: | ||
| 44 | //! | ||
| 45 | //! | Method | Description | | ||
| 46 | //! |--------|-------------| | ||
| 47 | //! | [`DmaChannel::setup_write()`] | Configure TX without starting | | ||
| 48 | //! | [`DmaChannel::setup_read()`] | Configure RX without starting | | ||
| 49 | //! | ||
| 50 | //! These configure the TCD but don't start the transfer. You control: | ||
| 51 | //! 1. When to call [`DmaChannel::enable_request()`] | ||
| 52 | //! 2. How to detect completion (polling or interrupts) | ||
| 53 | //! 3. When to clean up with [`DmaChannel::clear_done()`] | ||
| 54 | //! | ||
| 55 | //! ## Circular/Ring Buffer API (For Continuous Reception) | ||
| 56 | //! | ||
| 57 | //! Use [`DmaChannel::setup_circular_read()`] for continuous data reception: | ||
| 58 | //! | ||
| 59 | //! ```no_run | ||
| 60 | //! # use embassy_mcxa::dma::DmaChannel; | ||
| 61 | //! # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 62 | //! # let uart_rx_addr = 0x4000_0000 as *const u8; | ||
| 63 | //! static mut RX_BUF: [u8; 64] = [0; 64]; | ||
| 64 | //! | ||
| 65 | //! let ring_buf = unsafe { | ||
| 66 | //! dma_ch.setup_circular_read(uart_rx_addr, &mut RX_BUF) | ||
| 67 | //! }; | ||
| 68 | //! | ||
| 69 | //! // Read data as it arrives | ||
| 70 | //! let mut buf = [0u8; 16]; | ||
| 71 | //! let n = ring_buf.read(&mut buf).await.unwrap(); | ||
| 72 | //! ``` | ||
| 73 | //! | ||
| 74 | //! ## Scatter-Gather Builder (For Chained Transfers) | ||
| 75 | //! | ||
| 76 | //! Use [`ScatterGatherBuilder`] for complex multi-segment transfers: | ||
| 77 | //! | ||
| 78 | //! ```no_run | ||
| 79 | //! # use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder}; | ||
| 80 | //! # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 81 | //! let mut builder = ScatterGatherBuilder::<u32>::new(); | ||
| 82 | //! builder.add_transfer(&src1, &mut dst1); | ||
| 83 | //! builder.add_transfer(&src2, &mut dst2); | ||
| 84 | //! | ||
| 85 | //! let transfer = unsafe { builder.build(&dma_ch).unwrap() }; | ||
| 86 | //! transfer.await; | ||
| 87 | //! ``` | ||
| 88 | //! | ||
| 89 | //! ## Direct TCD Access (For Advanced Use Cases) | ||
| 90 | //! | ||
| 91 | //! For full control, use the channel's `tcd()` method to access TCD registers directly. | ||
| 92 | //! See the `dma_*` examples for patterns. | ||
| 93 | //! | ||
| 94 | //! # Example | ||
| 95 | //! | ||
| 96 | //! ```no_run | ||
| 97 | //! use embassy_mcxa::dma::{DmaChannel, TransferOptions, Direction}; | ||
| 98 | //! | ||
| 99 | //! let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 100 | //! // Configure and trigger a transfer... | ||
| 101 | //! ``` | ||
| 102 | |||
| 103 | use core::future::Future; | ||
| 104 | use core::marker::PhantomData; | ||
| 105 | use core::pin::Pin; | ||
| 106 | use core::ptr::NonNull; | ||
| 107 | use core::sync::atomic::{AtomicUsize, Ordering, fence}; | ||
| 108 | use core::task::{Context, Poll}; | ||
| 109 | |||
| 110 | use embassy_hal_internal::PeripheralType; | ||
| 111 | use embassy_sync::waitqueue::AtomicWaker; | ||
| 112 | |||
| 113 | use crate::clocks::Gate; | ||
| 114 | use crate::pac; | ||
| 115 | use crate::pac::Interrupt; | ||
| 116 | use crate::peripherals::DMA0; | ||
| 117 | |||
| 118 | /// Initialize DMA controller (clock enabled, reset released, controller configured). | ||
| 119 | /// | ||
| 120 | /// This function is intended to be called ONCE during HAL initialization (`hal::init()`). | ||
| 121 | /// | ||
| 122 | /// The function enables the DMA0 clock, releases reset, and configures the controller | ||
| 123 | /// for normal operation with round-robin arbitration. | ||
| 124 | pub(crate) fn init() { | ||
| 125 | unsafe { | ||
| 126 | // Enable DMA0 clock and release reset | ||
| 127 | DMA0::enable_clock(); | ||
| 128 | DMA0::release_reset(); | ||
| 129 | |||
| 130 | // Configure DMA controller | ||
| 131 | let dma = &(*pac::Dma0::ptr()); | ||
| 132 | dma.mp_csr().modify(|_, w| { | ||
| 133 | w.edbg() | ||
| 134 | .enable() | ||
| 135 | .erca() | ||
| 136 | .enable() | ||
| 137 | .halt() | ||
| 138 | .normal_operation() | ||
| 139 | .gclc() | ||
| 140 | .available() | ||
| 141 | .gmrc() | ||
| 142 | .available() | ||
| 143 | }); | ||
| 144 | } | ||
| 145 | } | ||
| 146 | |||
| 147 | // ============================================================================ | ||
| 148 | // Phase 1: Foundation Types (Embassy-aligned) | ||
| 149 | // ============================================================================ | ||
| 150 | |||
| 151 | /// DMA transfer direction. | ||
| 152 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 153 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 154 | pub enum Direction { | ||
| 155 | /// Transfer from memory to memory. | ||
| 156 | MemoryToMemory, | ||
| 157 | /// Transfer from memory to a peripheral register. | ||
| 158 | MemoryToPeripheral, | ||
| 159 | /// Transfer from a peripheral register to memory. | ||
| 160 | PeripheralToMemory, | ||
| 161 | } | ||
| 162 | |||
| 163 | /// DMA transfer priority. | ||
| 164 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] | ||
| 165 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 166 | pub enum Priority { | ||
| 167 | /// Low priority (channel priority 7). | ||
| 168 | Low, | ||
| 169 | /// Medium priority (channel priority 4). | ||
| 170 | Medium, | ||
| 171 | /// High priority (channel priority 1). | ||
| 172 | #[default] | ||
| 173 | High, | ||
| 174 | /// Highest priority (channel priority 0). | ||
| 175 | Highest, | ||
| 176 | } | ||
| 177 | |||
| 178 | impl Priority { | ||
| 179 | /// Convert to hardware priority value (0 = highest, 7 = lowest). | ||
| 180 | pub fn to_hw_priority(self) -> u8 { | ||
| 181 | match self { | ||
| 182 | Priority::Low => 7, | ||
| 183 | Priority::Medium => 4, | ||
| 184 | Priority::High => 1, | ||
| 185 | Priority::Highest => 0, | ||
| 186 | } | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 190 | /// DMA transfer data width. | ||
| 191 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] | ||
| 192 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 193 | pub enum WordSize { | ||
| 194 | /// 8-bit (1 byte) transfers. | ||
| 195 | OneByte, | ||
| 196 | /// 16-bit (2 byte) transfers. | ||
| 197 | TwoBytes, | ||
| 198 | /// 32-bit (4 byte) transfers. | ||
| 199 | #[default] | ||
| 200 | FourBytes, | ||
| 201 | } | ||
| 202 | |||
| 203 | impl WordSize { | ||
| 204 | /// Size in bytes. | ||
| 205 | pub const fn bytes(self) -> usize { | ||
| 206 | match self { | ||
| 207 | WordSize::OneByte => 1, | ||
| 208 | WordSize::TwoBytes => 2, | ||
| 209 | WordSize::FourBytes => 4, | ||
| 210 | } | ||
| 211 | } | ||
| 212 | |||
| 213 | /// Convert to hardware SSIZE/DSIZE field value. | ||
| 214 | pub const fn to_hw_size(self) -> u8 { | ||
| 215 | match self { | ||
| 216 | WordSize::OneByte => 0, | ||
| 217 | WordSize::TwoBytes => 1, | ||
| 218 | WordSize::FourBytes => 2, | ||
| 219 | } | ||
| 220 | } | ||
| 221 | |||
| 222 | /// Create from byte width (1, 2, or 4). | ||
| 223 | pub const fn from_bytes(bytes: u8) -> Option<Self> { | ||
| 224 | match bytes { | ||
| 225 | 1 => Some(WordSize::OneByte), | ||
| 226 | 2 => Some(WordSize::TwoBytes), | ||
| 227 | 4 => Some(WordSize::FourBytes), | ||
| 228 | _ => None, | ||
| 229 | } | ||
| 230 | } | ||
| 231 | } | ||
| 232 | |||
| 233 | /// Trait for types that can be transferred via DMA. | ||
| 234 | /// | ||
| 235 | /// This provides compile-time type safety for DMA transfers. | ||
| 236 | pub trait Word: Copy + 'static { | ||
| 237 | /// The word size for this type. | ||
| 238 | fn size() -> WordSize; | ||
| 239 | } | ||
| 240 | |||
| 241 | impl Word for u8 { | ||
| 242 | fn size() -> WordSize { | ||
| 243 | WordSize::OneByte | ||
| 244 | } | ||
| 245 | } | ||
| 246 | |||
| 247 | impl Word for u16 { | ||
| 248 | fn size() -> WordSize { | ||
| 249 | WordSize::TwoBytes | ||
| 250 | } | ||
| 251 | } | ||
| 252 | |||
| 253 | impl Word for u32 { | ||
| 254 | fn size() -> WordSize { | ||
| 255 | WordSize::FourBytes | ||
| 256 | } | ||
| 257 | } | ||
| 258 | |||
| 259 | /// DMA transfer options. | ||
| 260 | /// | ||
| 261 | /// This struct configures various aspects of a DMA transfer. | ||
| 262 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 263 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 264 | #[non_exhaustive] | ||
| 265 | pub struct TransferOptions { | ||
| 266 | /// Transfer priority. | ||
| 267 | pub priority: Priority, | ||
| 268 | /// Enable circular (continuous) mode. | ||
| 269 | /// | ||
| 270 | /// When enabled, the transfer repeats automatically after completing. | ||
| 271 | pub circular: bool, | ||
| 272 | /// Enable interrupt on half transfer complete. | ||
| 273 | pub half_transfer_interrupt: bool, | ||
| 274 | /// Enable interrupt on transfer complete. | ||
| 275 | pub complete_transfer_interrupt: bool, | ||
| 276 | } | ||
| 277 | |||
| 278 | impl Default for TransferOptions { | ||
| 279 | fn default() -> Self { | ||
| 280 | Self { | ||
| 281 | priority: Priority::High, | ||
| 282 | circular: false, | ||
| 283 | half_transfer_interrupt: false, | ||
| 284 | complete_transfer_interrupt: true, | ||
| 285 | } | ||
| 286 | } | ||
| 287 | } | ||
| 288 | |||
| 289 | /// DMA error types. | ||
| 290 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 291 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 292 | pub enum Error { | ||
| 293 | /// The DMA controller reported a bus error. | ||
| 294 | BusError, | ||
| 295 | /// The transfer was aborted. | ||
| 296 | Aborted, | ||
| 297 | /// Configuration error (e.g., invalid parameters). | ||
| 298 | Configuration, | ||
| 299 | /// Buffer overrun (for ring buffers). | ||
| 300 | Overrun, | ||
| 301 | } | ||
| 302 | |||
| 303 | /// Whether to enable the major loop completion interrupt. | ||
| 304 | /// | ||
| 305 | /// This enum provides better readability than a boolean parameter | ||
| 306 | /// for functions that configure DMA interrupt behavior. | ||
| 307 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 308 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 309 | pub enum EnableInterrupt { | ||
| 310 | /// Enable the interrupt on major loop completion. | ||
| 311 | Yes, | ||
| 312 | /// Do not enable the interrupt. | ||
| 313 | No, | ||
| 314 | } | ||
| 315 | |||
| 316 | // ============================================================================ | ||
| 317 | // DMA Constants | ||
| 318 | // ============================================================================ | ||
| 319 | |||
| 320 | /// Maximum bytes per DMA transfer (eDMA4 CITER/BITER are 15-bit fields). | ||
| 321 | /// | ||
| 322 | /// This is a hardware limitation of the eDMA4 controller. Transfers larger | ||
| 323 | /// than this must be split into multiple DMA operations. | ||
| 324 | pub const DMA_MAX_TRANSFER_SIZE: usize = 0x7FFF; | ||
| 325 | |||
| 326 | // ============================================================================ | ||
| 327 | // DMA Request Source Types (Type-Safe API) | ||
| 328 | // ============================================================================ | ||
| 329 | |||
| 330 | /// Trait for type-safe DMA request sources. | ||
| 331 | /// | ||
| 332 | /// Each peripheral that can trigger DMA requests implements this trait | ||
| 333 | /// with marker types that encode the correct request source number at | ||
| 334 | /// compile time. This prevents using the wrong request source for a | ||
| 335 | /// peripheral. | ||
| 336 | /// | ||
| 337 | /// # Example | ||
| 338 | /// | ||
| 339 | /// ```ignore | ||
| 340 | /// // The LPUART2 RX request source is automatically derived from the type: | ||
| 341 | /// channel.set_request_source::<Lpuart2RxRequest>(); | ||
| 342 | /// ``` | ||
| 343 | /// | ||
| 344 | /// This trait is sealed and cannot be implemented outside this crate. | ||
| 345 | #[allow(private_bounds)] | ||
| 346 | pub trait DmaRequest: sealed::SealedDmaRequest { | ||
| 347 | /// The hardware request source number for the DMA mux. | ||
| 348 | const REQUEST_NUMBER: u8; | ||
| 349 | } | ||
| 350 | |||
| 351 | /// Macro to define a DMA request type. | ||
| 352 | /// | ||
| 353 | /// Creates a zero-sized marker type that implements `DmaRequest` with | ||
| 354 | /// the specified request number. | ||
| 355 | macro_rules! define_dma_request { | ||
| 356 | ($(#[$meta:meta])* $name:ident = $num:expr) => { | ||
| 357 | $(#[$meta])* | ||
| 358 | #[derive(Debug, Copy, Clone)] | ||
| 359 | pub struct $name; | ||
| 360 | |||
| 361 | impl sealed::SealedDmaRequest for $name {} | ||
| 362 | |||
| 363 | impl DmaRequest for $name { | ||
| 364 | const REQUEST_NUMBER: u8 = $num; | ||
| 365 | } | ||
| 366 | }; | ||
| 367 | } | ||
| 368 | |||
| 369 | // LPUART DMA request sources (from MCXA276 reference manual Table 4-8) | ||
| 370 | define_dma_request!( | ||
| 371 | /// DMA request source for LPUART0 RX. | ||
| 372 | Lpuart0RxRequest = 21 | ||
| 373 | ); | ||
| 374 | define_dma_request!( | ||
| 375 | /// DMA request source for LPUART0 TX. | ||
| 376 | Lpuart0TxRequest = 22 | ||
| 377 | ); | ||
| 378 | define_dma_request!( | ||
| 379 | /// DMA request source for LPUART1 RX. | ||
| 380 | Lpuart1RxRequest = 23 | ||
| 381 | ); | ||
| 382 | define_dma_request!( | ||
| 383 | /// DMA request source for LPUART1 TX. | ||
| 384 | Lpuart1TxRequest = 24 | ||
| 385 | ); | ||
| 386 | define_dma_request!( | ||
| 387 | /// DMA request source for LPUART2 RX. | ||
| 388 | Lpuart2RxRequest = 25 | ||
| 389 | ); | ||
| 390 | define_dma_request!( | ||
| 391 | /// DMA request source for LPUART2 TX. | ||
| 392 | Lpuart2TxRequest = 26 | ||
| 393 | ); | ||
| 394 | define_dma_request!( | ||
| 395 | /// DMA request source for LPUART3 RX. | ||
| 396 | Lpuart3RxRequest = 27 | ||
| 397 | ); | ||
| 398 | define_dma_request!( | ||
| 399 | /// DMA request source for LPUART3 TX. | ||
| 400 | Lpuart3TxRequest = 28 | ||
| 401 | ); | ||
| 402 | define_dma_request!( | ||
| 403 | /// DMA request source for LPUART4 RX. | ||
| 404 | Lpuart4RxRequest = 29 | ||
| 405 | ); | ||
| 406 | define_dma_request!( | ||
| 407 | /// DMA request source for LPUART4 TX. | ||
| 408 | Lpuart4TxRequest = 30 | ||
| 409 | ); | ||
| 410 | define_dma_request!( | ||
| 411 | /// DMA request source for LPUART5 RX. | ||
| 412 | Lpuart5RxRequest = 31 | ||
| 413 | ); | ||
| 414 | define_dma_request!( | ||
| 415 | /// DMA request source for LPUART5 TX. | ||
| 416 | Lpuart5TxRequest = 32 | ||
| 417 | ); | ||
| 418 | |||
| 419 | // ============================================================================ | ||
| 420 | // Channel Trait (Sealed Pattern) | ||
| 421 | // ============================================================================ | ||
| 422 | |||
| 423 | mod sealed { | ||
| 424 | use crate::pac::Interrupt; | ||
| 425 | |||
| 426 | /// Sealed trait for DMA channels. | ||
| 427 | pub trait SealedChannel { | ||
| 428 | /// Zero-based channel index into the TCD array. | ||
| 429 | fn index(&self) -> usize; | ||
| 430 | /// Interrupt vector for this channel. | ||
| 431 | fn interrupt(&self) -> Interrupt; | ||
| 432 | } | ||
| 433 | |||
| 434 | /// Sealed trait for DMA request sources. | ||
| 435 | pub trait SealedDmaRequest {} | ||
| 436 | } | ||
| 437 | |||
| 438 | /// Marker trait implemented by HAL peripheral tokens that map to a DMA0 | ||
| 439 | /// channel backed by one EDMA_0_TCD0 TCD slot. | ||
| 440 | /// | ||
| 441 | /// This trait is sealed and cannot be implemented outside this crate. | ||
| 442 | #[allow(private_bounds)] | ||
| 443 | pub trait Channel: sealed::SealedChannel + PeripheralType + Into<AnyChannel> + 'static { | ||
| 444 | /// Zero-based channel index into the TCD array. | ||
| 445 | const INDEX: usize; | ||
| 446 | /// Interrupt vector for this channel. | ||
| 447 | const INTERRUPT: Interrupt; | ||
| 448 | } | ||
| 449 | |||
| 450 | /// Type-erased DMA channel. | ||
| 451 | /// | ||
| 452 | /// This allows storing DMA channels in a uniform way regardless of their | ||
| 453 | /// concrete type, useful for async transfer futures and runtime channel selection. | ||
| 454 | #[derive(Debug, Clone, Copy)] | ||
| 455 | pub struct AnyChannel { | ||
| 456 | index: usize, | ||
| 457 | interrupt: Interrupt, | ||
| 458 | } | ||
| 459 | |||
| 460 | impl AnyChannel { | ||
| 461 | /// Get the channel index. | ||
| 462 | #[inline] | ||
| 463 | pub const fn index(&self) -> usize { | ||
| 464 | self.index | ||
| 465 | } | ||
| 466 | |||
| 467 | /// Get the channel interrupt. | ||
| 468 | #[inline] | ||
| 469 | pub const fn interrupt(&self) -> Interrupt { | ||
| 470 | self.interrupt | ||
| 471 | } | ||
| 472 | |||
| 473 | /// Get a reference to the TCD register block for this channel. | ||
| 474 | /// | ||
| 475 | /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance. | ||
| 476 | #[inline] | ||
| 477 | fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd { | ||
| 478 | // Safety: MCXA276 has a single eDMA instance, and we're only accessing | ||
| 479 | // the TCD for this specific channel | ||
| 480 | let edma = unsafe { &*pac::Edma0Tcd0::ptr() }; | ||
| 481 | edma.tcd(self.index) | ||
| 482 | } | ||
| 483 | |||
| 484 | /// Check if the channel's DONE flag is set. | ||
| 485 | pub fn is_done(&self) -> bool { | ||
| 486 | self.tcd().ch_csr().read().done().bit_is_set() | ||
| 487 | } | ||
| 488 | |||
| 489 | /// Get the waker for this channel. | ||
| 490 | pub fn waker(&self) -> &'static AtomicWaker { | ||
| 491 | &STATES[self.index].waker | ||
| 492 | } | ||
| 493 | } | ||
| 494 | |||
| 495 | impl sealed::SealedChannel for AnyChannel { | ||
| 496 | fn index(&self) -> usize { | ||
| 497 | self.index | ||
| 498 | } | ||
| 499 | |||
| 500 | fn interrupt(&self) -> Interrupt { | ||
| 501 | self.interrupt | ||
| 502 | } | ||
| 503 | } | ||
| 504 | |||
| 505 | /// Macro to implement Channel trait for a peripheral. | ||
| 506 | macro_rules! impl_channel { | ||
| 507 | ($peri:ident, $index:expr, $irq:ident) => { | ||
| 508 | impl sealed::SealedChannel for crate::peripherals::$peri { | ||
| 509 | fn index(&self) -> usize { | ||
| 510 | $index | ||
| 511 | } | ||
| 512 | |||
| 513 | fn interrupt(&self) -> Interrupt { | ||
| 514 | Interrupt::$irq | ||
| 515 | } | ||
| 516 | } | ||
| 517 | |||
| 518 | impl Channel for crate::peripherals::$peri { | ||
| 519 | const INDEX: usize = $index; | ||
| 520 | const INTERRUPT: Interrupt = Interrupt::$irq; | ||
| 521 | } | ||
| 522 | |||
| 523 | impl From<crate::peripherals::$peri> for AnyChannel { | ||
| 524 | fn from(_: crate::peripherals::$peri) -> Self { | ||
| 525 | AnyChannel { | ||
| 526 | index: $index, | ||
| 527 | interrupt: Interrupt::$irq, | ||
| 528 | } | ||
| 529 | } | ||
| 530 | } | ||
| 531 | }; | ||
| 532 | } | ||
| 533 | |||
| 534 | impl_channel!(DMA_CH0, 0, DMA_CH0); | ||
| 535 | impl_channel!(DMA_CH1, 1, DMA_CH1); | ||
| 536 | impl_channel!(DMA_CH2, 2, DMA_CH2); | ||
| 537 | impl_channel!(DMA_CH3, 3, DMA_CH3); | ||
| 538 | impl_channel!(DMA_CH4, 4, DMA_CH4); | ||
| 539 | impl_channel!(DMA_CH5, 5, DMA_CH5); | ||
| 540 | impl_channel!(DMA_CH6, 6, DMA_CH6); | ||
| 541 | impl_channel!(DMA_CH7, 7, DMA_CH7); | ||
| 542 | |||
| 543 | /// Strongly-typed handle to a DMA0 channel. | ||
| 544 | /// | ||
| 545 | /// The lifetime of this value is tied to the unique peripheral token | ||
| 546 | /// supplied by `embassy_hal_internal::peripherals!`, so safe code cannot | ||
| 547 | /// create two `DmaChannel` instances for the same hardware channel. | ||
| 548 | pub struct DmaChannel<C: Channel> { | ||
| 549 | _ch: core::marker::PhantomData<C>, | ||
| 550 | } | ||
| 551 | |||
| 552 | // ============================================================================ | ||
| 553 | // DMA Transfer Methods - API Overview | ||
| 554 | // ============================================================================ | ||
| 555 | // | ||
| 556 | // The DMA API provides two categories of methods for configuring transfers: | ||
| 557 | // | ||
| 558 | // ## 1. Async Methods (Return `Transfer` Future) | ||
| 559 | // | ||
| 560 | // These methods return a [`Transfer`] Future that must be `.await`ed: | ||
| 561 | // | ||
| 562 | // - [`write()`](DmaChannel::write) - Memory-to-peripheral using default eDMA TCD block | ||
| 563 | // - [`read()`](DmaChannel::read) - Peripheral-to-memory using default eDMA TCD block | ||
| 564 | // - [`write_to_peripheral()`](DmaChannel::write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block | ||
| 565 | // - [`read_from_peripheral()`](DmaChannel::read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block | ||
| 566 | // - [`mem_to_mem()`](DmaChannel::mem_to_mem) - Memory-to-memory using default eDMA TCD block | ||
| 567 | // | ||
| 568 | // The `Transfer` manages the DMA lifecycle automatically: | ||
| 569 | // - Enables channel request | ||
| 570 | // - Waits for completion via async/await | ||
| 571 | // - Cleans up on completion | ||
| 572 | // | ||
| 573 | // **Important:** `Transfer::Drop` aborts the transfer if dropped before completion. | ||
| 574 | // This means you MUST `.await` the Transfer or it will be aborted when it goes out of scope. | ||
| 575 | // | ||
| 576 | // **Use case:** When you want to use async/await and let the Transfer handle lifecycle management. | ||
| 577 | // | ||
| 578 | // ## 2. Setup Methods (Configure TCD Only) | ||
| 579 | // | ||
| 580 | // These methods configure the TCD but do NOT return a `Transfer`: | ||
| 581 | // | ||
| 582 | // - [`setup_write()`](DmaChannel::setup_write) - Memory-to-peripheral using default eDMA TCD block | ||
| 583 | // - [`setup_read()`](DmaChannel::setup_read) - Peripheral-to-memory using default eDMA TCD block | ||
| 584 | // - [`setup_write_to_peripheral()`](DmaChannel::setup_write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block | ||
| 585 | // - [`setup_read_from_peripheral()`](DmaChannel::setup_read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block | ||
| 586 | // | ||
| 587 | // The caller is responsible for the complete DMA lifecycle: | ||
| 588 | // 1. Call [`enable_request()`](DmaChannel::enable_request) to start the transfer | ||
| 589 | // 2. Poll [`is_done()`](DmaChannel::is_done) or use interrupts to detect completion | ||
| 590 | // 3. Call [`disable_request()`](DmaChannel::disable_request), [`clear_done()`](DmaChannel::clear_done), | ||
| 591 | // [`clear_interrupt()`](DmaChannel::clear_interrupt) for cleanup | ||
| 592 | // | ||
| 593 | // **Use case:** Peripheral drivers (like LPUART) that need fine-grained control over | ||
| 594 | // DMA setup before starting a `Transfer`. | ||
| 595 | // | ||
| 596 | // ============================================================================ | ||
| 597 | |||
| 598 | impl<C: Channel> DmaChannel<C> { | ||
| 599 | /// Wrap a DMA channel token (takes ownership of the Peri wrapper). | ||
| 600 | /// | ||
| 601 | /// Note: DMA is initialized during `hal::init()` via `dma::init()`. | ||
| 602 | #[inline] | ||
| 603 | pub fn new(_ch: embassy_hal_internal::Peri<'_, C>) -> Self { | ||
| 604 | unsafe { | ||
| 605 | cortex_m::peripheral::NVIC::unmask(C::INTERRUPT); | ||
| 606 | } | ||
| 607 | Self { | ||
| 608 | _ch: core::marker::PhantomData, | ||
| 609 | } | ||
| 610 | } | ||
| 611 | |||
| 612 | /// Channel index in the EDMA_0_TCD0 array. | ||
| 613 | #[inline] | ||
| 614 | pub const fn index(&self) -> usize { | ||
| 615 | C::INDEX | ||
| 616 | } | ||
| 617 | |||
| 618 | /// Convert this typed channel into a type-erased `AnyChannel`. | ||
| 619 | #[inline] | ||
| 620 | pub fn into_any(self) -> AnyChannel { | ||
| 621 | AnyChannel { | ||
| 622 | index: C::INDEX, | ||
| 623 | interrupt: C::INTERRUPT, | ||
| 624 | } | ||
| 625 | } | ||
| 626 | |||
| 627 | /// Get a reference to the type-erased channel info. | ||
| 628 | #[inline] | ||
| 629 | pub fn as_any(&self) -> AnyChannel { | ||
| 630 | AnyChannel { | ||
| 631 | index: C::INDEX, | ||
| 632 | interrupt: C::INTERRUPT, | ||
| 633 | } | ||
| 634 | } | ||
| 635 | |||
| 636 | /// Return a reference to the underlying TCD register block. | ||
| 637 | /// | ||
| 638 | /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance. | ||
| 639 | /// | ||
| 640 | /// # Note | ||
| 641 | /// | ||
| 642 | /// This is exposed for advanced use cases that need direct TCD access. | ||
| 643 | /// For most use cases, prefer the higher-level transfer methods. | ||
| 644 | #[inline] | ||
| 645 | pub fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd { | ||
| 646 | // Safety: MCXA276 has a single eDMA instance | ||
| 647 | let edma = unsafe { &*pac::Edma0Tcd0::ptr() }; | ||
| 648 | edma.tcd(C::INDEX) | ||
| 649 | } | ||
| 650 | |||
| 651 | fn clear_tcd(t: &'static pac::edma_0_tcd0::Tcd) { | ||
| 652 | // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt). | ||
| 653 | // Reset ALL TCD registers to 0 to clear any stale configuration from | ||
| 654 | // previous transfers. This is critical when reusing a channel. | ||
| 655 | t.tcd_saddr().write(|w| unsafe { w.saddr().bits(0) }); | ||
| 656 | t.tcd_soff().write(|w| unsafe { w.soff().bits(0) }); | ||
| 657 | t.tcd_attr().write(|w| unsafe { w.bits(0) }); | ||
| 658 | t.tcd_nbytes_mloffno().write(|w| unsafe { w.nbytes().bits(0) }); | ||
| 659 | t.tcd_slast_sda().write(|w| unsafe { w.slast_sda().bits(0) }); | ||
| 660 | t.tcd_daddr().write(|w| unsafe { w.daddr().bits(0) }); | ||
| 661 | t.tcd_doff().write(|w| unsafe { w.doff().bits(0) }); | ||
| 662 | t.tcd_citer_elinkno().write(|w| unsafe { w.bits(0) }); | ||
| 663 | t.tcd_dlast_sga().write(|w| unsafe { w.dlast_sga().bits(0) }); | ||
| 664 | t.tcd_csr().write(|w| unsafe { w.bits(0) }); // Clear CSR completely | ||
| 665 | t.tcd_biter_elinkno().write(|w| unsafe { w.bits(0) }); | ||
| 666 | } | ||
| 667 | |||
| 668 | #[inline] | ||
| 669 | fn set_major_loop_ct_elinkno(t: &'static pac::edma_0_tcd0::Tcd, count: u16) { | ||
| 670 | t.tcd_biter_elinkno().write(|w| unsafe { w.biter().bits(count) }); | ||
| 671 | t.tcd_citer_elinkno().write(|w| unsafe { w.citer().bits(count) }); | ||
| 672 | } | ||
| 673 | |||
| 674 | #[inline] | ||
| 675 | fn set_minor_loop_ct_no_offsets(t: &'static pac::edma_0_tcd0::Tcd, count: u32) { | ||
| 676 | t.tcd_nbytes_mloffno().write(|w| unsafe { w.nbytes().bits(count) }); | ||
| 677 | } | ||
| 678 | |||
| 679 | #[inline] | ||
| 680 | fn set_no_final_adjustments(t: &'static pac::edma_0_tcd0::Tcd) { | ||
| 681 | // No source/dest adjustment after major loop | ||
| 682 | t.tcd_slast_sda().write(|w| unsafe { w.slast_sda().bits(0) }); | ||
| 683 | t.tcd_dlast_sga().write(|w| unsafe { w.dlast_sga().bits(0) }); | ||
| 684 | } | ||
| 685 | |||
| 686 | #[inline] | ||
| 687 | fn set_source_ptr<T>(t: &'static pac::edma_0_tcd0::Tcd, p: *const T) { | ||
| 688 | t.tcd_saddr().write(|w| unsafe { w.saddr().bits(p as u32) }); | ||
| 689 | } | ||
| 690 | |||
| 691 | #[inline] | ||
| 692 | fn set_source_increment(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) { | ||
| 693 | t.tcd_soff().write(|w| unsafe { w.soff().bits(sz.bytes() as u16) }); | ||
| 694 | } | ||
| 695 | |||
| 696 | #[inline] | ||
| 697 | fn set_source_fixed(t: &'static pac::edma_0_tcd0::Tcd) { | ||
| 698 | t.tcd_soff().write(|w| unsafe { w.soff().bits(0) }); | ||
| 699 | } | ||
| 700 | |||
| 701 | #[inline] | ||
| 702 | fn set_dest_ptr<T>(t: &'static pac::edma_0_tcd0::Tcd, p: *mut T) { | ||
| 703 | t.tcd_daddr().write(|w| unsafe { w.daddr().bits(p as u32) }); | ||
| 704 | } | ||
| 705 | |||
| 706 | #[inline] | ||
| 707 | fn set_dest_increment(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) { | ||
| 708 | t.tcd_doff().write(|w| unsafe { w.doff().bits(sz.bytes() as u16) }); | ||
| 709 | } | ||
| 710 | |||
| 711 | #[inline] | ||
| 712 | fn set_dest_fixed(t: &'static pac::edma_0_tcd0::Tcd) { | ||
| 713 | t.tcd_doff().write(|w| unsafe { w.doff().bits(0) }); | ||
| 714 | } | ||
| 715 | |||
| 716 | #[inline] | ||
| 717 | fn set_even_transfer_size(t: &'static pac::edma_0_tcd0::Tcd, sz: WordSize) { | ||
| 718 | let hw_size = sz.to_hw_size(); | ||
| 719 | t.tcd_attr() | ||
| 720 | .write(|w| unsafe { w.ssize().bits(hw_size).dsize().bits(hw_size) }); | ||
| 721 | } | ||
| 722 | |||
| 723 | #[inline] | ||
| 724 | fn reset_channel_state(t: &'static pac::edma_0_tcd0::Tcd) { | ||
| 725 | // CSR: Resets to all zeroes (disabled), "done" is cleared by writing 1 | ||
| 726 | t.ch_csr().write(|w| w.done().clear_bit_by_one()); | ||
| 727 | // ES: Resets to all zeroes (disabled), "err" is cleared by writing 1 | ||
| 728 | t.ch_es().write(|w| w.err().clear_bit_by_one()); | ||
| 729 | // INT: Resets to all zeroes (disabled), "int" is cleared by writing 1 | ||
| 730 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 731 | } | ||
| 732 | |||
| 733 | /// Start an async transfer. | ||
| 734 | /// | ||
| 735 | /// The channel must already be configured. This enables the channel | ||
| 736 | /// request and returns a `Transfer` future that resolves when the | ||
| 737 | /// DMA transfer completes. | ||
| 738 | /// | ||
| 739 | /// # Safety | ||
| 740 | /// | ||
| 741 | /// The caller must ensure the DMA channel has been properly configured | ||
| 742 | /// and that source/destination buffers remain valid for the duration | ||
| 743 | /// of the transfer. | ||
| 744 | pub unsafe fn start_transfer(&self) -> Transfer<'_> { | ||
| 745 | // Clear any previous DONE/INT flags | ||
| 746 | let t = self.tcd(); | ||
| 747 | t.ch_csr().modify(|_, w| w.done().clear_bit_by_one()); | ||
| 748 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 749 | |||
| 750 | // Enable the channel request | ||
| 751 | t.ch_csr().modify(|_, w| w.erq().enable()); | ||
| 752 | |||
| 753 | Transfer::new(self.as_any()) | ||
| 754 | } | ||
| 755 | |||
| 756 | // ======================================================================== | ||
| 757 | // Type-Safe Transfer Methods (Embassy-style API) | ||
| 758 | // ======================================================================== | ||
| 759 | |||
| 760 | /// Perform a memory-to-memory DMA transfer (simplified API). | ||
| 761 | /// | ||
| 762 | /// This is a type-safe wrapper that uses the `Word` trait to determine | ||
| 763 | /// the correct transfer width automatically. Uses the global eDMA TCD | ||
| 764 | /// register accessor internally. | ||
| 765 | /// | ||
| 766 | /// # Arguments | ||
| 767 | /// | ||
| 768 | /// * `src` - Source buffer | ||
| 769 | /// * `dst` - Destination buffer (must be at least as large as src) | ||
| 770 | /// * `options` - Transfer configuration options | ||
| 771 | /// | ||
| 772 | /// # Safety | ||
| 773 | /// | ||
| 774 | /// The source and destination buffers must remain valid for the | ||
| 775 | /// duration of the transfer. | ||
| 776 | pub fn mem_to_mem<W: Word>( | ||
| 777 | &self, | ||
| 778 | src: &[W], | ||
| 779 | dst: &mut [W], | ||
| 780 | options: TransferOptions, | ||
| 781 | ) -> Result<Transfer<'_>, Error> { | ||
| 782 | let mut invalid = false; | ||
| 783 | invalid |= src.is_empty(); | ||
| 784 | invalid |= src.len() > dst.len(); | ||
| 785 | invalid |= src.len() > 0x7fff; | ||
| 786 | if invalid { | ||
| 787 | return Err(Error::Configuration); | ||
| 788 | } | ||
| 789 | |||
| 790 | let size = W::size(); | ||
| 791 | let byte_count = (src.len() * size.bytes()) as u32; | ||
| 792 | |||
| 793 | let t = self.tcd(); | ||
| 794 | |||
| 795 | // Reset channel state - clear DONE, disable requests, clear errors | ||
| 796 | Self::reset_channel_state(t); | ||
| 797 | |||
| 798 | // Memory barrier to ensure channel state is fully reset before touching TCD | ||
| 799 | cortex_m::asm::dsb(); | ||
| 800 | |||
| 801 | Self::clear_tcd(t); | ||
| 802 | |||
| 803 | // Memory barrier after TCD reset | ||
| 804 | cortex_m::asm::dsb(); | ||
| 805 | |||
| 806 | // Note: Priority is managed by round-robin arbitration (set in init()) | ||
| 807 | // Per-channel priority can be configured via ch_pri() if needed | ||
| 808 | |||
| 809 | // Now configure the new transfer | ||
| 810 | |||
| 811 | // Source address and increment | ||
| 812 | Self::set_source_ptr(t, src.as_ptr()); | ||
| 813 | Self::set_source_increment(t, size); | ||
| 814 | |||
| 815 | // Destination address and increment | ||
| 816 | Self::set_dest_ptr(t, dst.as_mut_ptr()); | ||
| 817 | Self::set_dest_increment(t, size); | ||
| 818 | |||
| 819 | // Transfer attributes (size) | ||
| 820 | Self::set_even_transfer_size(t, size); | ||
| 821 | |||
| 822 | // Minor loop: transfer all bytes in one minor loop | ||
| 823 | Self::set_minor_loop_ct_no_offsets(t, byte_count); | ||
| 824 | |||
| 825 | // No source/dest adjustment after major loop | ||
| 826 | Self::set_no_final_adjustments(t); | ||
| 827 | |||
| 828 | // Major loop count = 1 (single major loop) | ||
| 829 | // Write BITER first, then CITER (CITER must match BITER at start) | ||
| 830 | Self::set_major_loop_ct_elinkno(t, 1); | ||
| 831 | |||
| 832 | // Memory barrier before setting START | ||
| 833 | cortex_m::asm::dsb(); | ||
| 834 | |||
| 835 | // Control/status: interrupt on major complete, start | ||
| 836 | // Write this last after all other TCD registers are configured | ||
| 837 | let int_major = options.complete_transfer_interrupt; | ||
| 838 | t.tcd_csr().write(|w| { | ||
| 839 | w.intmajor() | ||
| 840 | .bit(int_major) | ||
| 841 | .inthalf() | ||
| 842 | .bit(options.half_transfer_interrupt) | ||
| 843 | .dreq() | ||
| 844 | .set_bit() // Auto-disable request after major loop | ||
| 845 | .start() | ||
| 846 | .set_bit() // Start the channel | ||
| 847 | }); | ||
| 848 | |||
| 849 | Ok(Transfer::new(self.as_any())) | ||
| 850 | } | ||
| 851 | |||
| 852 | /// Fill a memory buffer with a pattern value (memset). | ||
| 853 | /// | ||
| 854 | /// This performs a DMA transfer where the source address remains fixed | ||
| 855 | /// (pattern value) while the destination address increments through the buffer. | ||
| 856 | /// It's useful for quickly filling large memory regions with a constant value. | ||
| 857 | /// | ||
| 858 | /// # Arguments | ||
| 859 | /// | ||
| 860 | /// * `pattern` - Reference to the pattern value (will be read repeatedly) | ||
| 861 | /// * `dst` - Destination buffer to fill | ||
| 862 | /// * `options` - Transfer configuration options | ||
| 863 | /// | ||
| 864 | /// # Example | ||
| 865 | /// | ||
| 866 | /// ```no_run | ||
| 867 | /// use embassy_mcxa::dma::{DmaChannel, TransferOptions}; | ||
| 868 | /// | ||
| 869 | /// let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 870 | /// let pattern: u32 = 0xDEADBEEF; | ||
| 871 | /// let mut buffer = [0u32; 256]; | ||
| 872 | /// | ||
| 873 | /// unsafe { | ||
| 874 | /// dma_ch.memset(&pattern, &mut buffer, TransferOptions::default()).await; | ||
| 875 | /// } | ||
| 876 | /// // buffer is now filled with 0xDEADBEEF | ||
| 877 | /// ``` | ||
| 878 | /// | ||
| 879 | pub fn memset<W: Word>(&self, pattern: &W, dst: &mut [W], options: TransferOptions) -> Transfer<'_> { | ||
| 880 | assert!(!dst.is_empty()); | ||
| 881 | assert!(dst.len() <= 0x7fff); | ||
| 882 | |||
| 883 | let size = W::size(); | ||
| 884 | let byte_size = size.bytes(); | ||
| 885 | // Total bytes to transfer - all in one minor loop for software-triggered transfers | ||
| 886 | let total_bytes = (dst.len() * byte_size) as u32; | ||
| 887 | |||
| 888 | let t = self.tcd(); | ||
| 889 | |||
| 890 | // Reset channel state - clear DONE, disable requests, clear errors | ||
| 891 | Self::reset_channel_state(t); | ||
| 892 | |||
| 893 | // Memory barrier to ensure channel state is fully reset before touching TCD | ||
| 894 | cortex_m::asm::dsb(); | ||
| 895 | |||
| 896 | Self::clear_tcd(t); | ||
| 897 | |||
| 898 | // Memory barrier after TCD reset | ||
| 899 | cortex_m::asm::dsb(); | ||
| 900 | |||
| 901 | // Now configure the new transfer | ||
| 902 | // | ||
| 903 | // For software-triggered memset, we use a SINGLE minor loop that transfers | ||
| 904 | // all bytes at once. The source address stays fixed (SOFF=0) while the | ||
| 905 | // destination increments (DOFF=byte_size). The eDMA will read from the | ||
| 906 | // same source address for each destination word. | ||
| 907 | // | ||
| 908 | // This is necessary because the START bit only triggers ONE minor loop | ||
| 909 | // iteration. Using CITER>1 with software trigger would require multiple | ||
| 910 | // START triggers. | ||
| 911 | |||
| 912 | // Source: pattern address, fixed (soff=0) | ||
| 913 | Self::set_source_ptr(t, pattern); | ||
| 914 | Self::set_source_fixed(t); | ||
| 915 | |||
| 916 | // Destination: memory buffer, incrementing by word size | ||
| 917 | Self::set_dest_ptr(t, dst.as_mut_ptr()); | ||
| 918 | Self::set_dest_increment(t, size); | ||
| 919 | |||
| 920 | // Transfer attributes - source and dest are same word size | ||
| 921 | Self::set_even_transfer_size(t, size); | ||
| 922 | |||
| 923 | // Minor loop: transfer ALL bytes in one minor loop (like mem_to_mem) | ||
| 924 | // This allows the entire transfer to complete with a single START trigger | ||
| 925 | Self::set_minor_loop_ct_no_offsets(t, total_bytes); | ||
| 926 | |||
| 927 | // No address adjustment after major loop | ||
| 928 | Self::set_no_final_adjustments(t); | ||
| 929 | |||
| 930 | // Major loop count = 1 (single major loop, all data in minor loop) | ||
| 931 | // Write BITER first, then CITER (CITER must match BITER at start) | ||
| 932 | Self::set_major_loop_ct_elinkno(t, 1); | ||
| 933 | |||
| 934 | // Memory barrier before setting START | ||
| 935 | cortex_m::asm::dsb(); | ||
| 936 | |||
| 937 | // Control/status: interrupt on major complete, start immediately | ||
| 938 | // Write this last after all other TCD registers are configured | ||
| 939 | let int_major = options.complete_transfer_interrupt; | ||
| 940 | t.tcd_csr().write(|w| { | ||
| 941 | w.intmajor() | ||
| 942 | .bit(int_major) | ||
| 943 | .inthalf() | ||
| 944 | .bit(options.half_transfer_interrupt) | ||
| 945 | .dreq() | ||
| 946 | .set_bit() // Auto-disable request after major loop | ||
| 947 | .start() | ||
| 948 | .set_bit() // Start the channel | ||
| 949 | }); | ||
| 950 | |||
| 951 | Transfer::new(self.as_any()) | ||
| 952 | } | ||
| 953 | |||
| 954 | /// Write data from memory to a peripheral register. | ||
| 955 | /// | ||
| 956 | /// The destination address remains fixed (peripheral register) while | ||
| 957 | /// the source address increments through the buffer. | ||
| 958 | /// | ||
| 959 | /// # Arguments | ||
| 960 | /// | ||
| 961 | /// * `buf` - Source buffer to write from | ||
| 962 | /// * `peri_addr` - Peripheral register address | ||
| 963 | /// * `options` - Transfer configuration options | ||
| 964 | /// | ||
| 965 | /// # Safety | ||
| 966 | /// | ||
| 967 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 968 | /// - The peripheral address must be valid for writes. | ||
| 969 | pub unsafe fn write<W: Word>(&self, buf: &[W], peri_addr: *mut W, options: TransferOptions) -> Transfer<'_> { | ||
| 970 | self.write_to_peripheral(buf, peri_addr, options) | ||
| 971 | } | ||
| 972 | |||
| 973 | /// Configure a memory-to-peripheral DMA transfer without starting it. | ||
| 974 | /// | ||
| 975 | /// This is a convenience wrapper around [`setup_write_to_peripheral()`](Self::setup_write_to_peripheral) | ||
| 976 | /// that uses the default eDMA TCD register block. | ||
| 977 | /// | ||
| 978 | /// This method configures the TCD but does NOT return a `Transfer`. The caller | ||
| 979 | /// is responsible for the complete DMA lifecycle: | ||
| 980 | /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer | ||
| 981 | /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion | ||
| 982 | /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done), | ||
| 983 | /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup | ||
| 984 | /// | ||
| 985 | /// # Example | ||
| 986 | /// | ||
| 987 | /// ```no_run | ||
| 988 | /// # use embassy_mcxa::dma::DmaChannel; | ||
| 989 | /// # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 990 | /// # let uart_tx_addr = 0x4000_0000 as *mut u8; | ||
| 991 | /// let data = [0x48, 0x65, 0x6c, 0x6c, 0x6f]; // "Hello" | ||
| 992 | /// | ||
| 993 | /// unsafe { | ||
| 994 | /// // Configure the transfer | ||
| 995 | /// dma_ch.setup_write(&data, uart_tx_addr, EnableInterrupt::Yes); | ||
| 996 | /// | ||
| 997 | /// // Start when peripheral is ready | ||
| 998 | /// dma_ch.enable_request(); | ||
| 999 | /// | ||
| 1000 | /// // Wait for completion (or use interrupt) | ||
| 1001 | /// while !dma_ch.is_done() {} | ||
| 1002 | /// | ||
| 1003 | /// // Clean up | ||
| 1004 | /// dma_ch.clear_done(); | ||
| 1005 | /// dma_ch.clear_interrupt(); | ||
| 1006 | /// } | ||
| 1007 | /// ``` | ||
| 1008 | /// | ||
| 1009 | /// # Arguments | ||
| 1010 | /// | ||
| 1011 | /// * `buf` - Source buffer to write from | ||
| 1012 | /// * `peri_addr` - Peripheral register address | ||
| 1013 | /// * `enable_interrupt` - Whether to enable interrupt on completion | ||
| 1014 | /// | ||
| 1015 | /// # Safety | ||
| 1016 | /// | ||
| 1017 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1018 | /// - The peripheral address must be valid for writes. | ||
| 1019 | pub unsafe fn setup_write<W: Word>(&self, buf: &[W], peri_addr: *mut W, enable_interrupt: EnableInterrupt) { | ||
| 1020 | self.setup_write_to_peripheral(buf, peri_addr, enable_interrupt) | ||
| 1021 | } | ||
| 1022 | |||
| 1023 | /// Write data from memory to a peripheral register. | ||
| 1024 | /// | ||
| 1025 | /// The destination address remains fixed (peripheral register) while | ||
| 1026 | /// the source address increments through the buffer. | ||
| 1027 | /// | ||
| 1028 | /// # Arguments | ||
| 1029 | /// | ||
| 1030 | /// * `buf` - Source buffer to write from | ||
| 1031 | /// * `peri_addr` - Peripheral register address | ||
| 1032 | /// * `options` - Transfer configuration options | ||
| 1033 | /// | ||
| 1034 | /// # Safety | ||
| 1035 | /// | ||
| 1036 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1037 | /// - The peripheral address must be valid for writes. | ||
| 1038 | pub unsafe fn write_to_peripheral<W: Word>( | ||
| 1039 | &self, | ||
| 1040 | buf: &[W], | ||
| 1041 | peri_addr: *mut W, | ||
| 1042 | options: TransferOptions, | ||
| 1043 | ) -> Transfer<'_> { | ||
| 1044 | assert!(!buf.is_empty()); | ||
| 1045 | assert!(buf.len() <= 0x7fff); | ||
| 1046 | |||
| 1047 | let size = W::size(); | ||
| 1048 | let byte_size = size.bytes(); | ||
| 1049 | |||
| 1050 | let t = self.tcd(); | ||
| 1051 | |||
| 1052 | // Reset channel state | ||
| 1053 | Self::reset_channel_state(t); | ||
| 1054 | |||
| 1055 | // Addresses | ||
| 1056 | Self::set_source_ptr(t, buf.as_ptr()); | ||
| 1057 | Self::set_dest_ptr(t, peri_addr); | ||
| 1058 | |||
| 1059 | // Offsets: Source increments, Dest fixed | ||
| 1060 | Self::set_source_increment(t, size); | ||
| 1061 | Self::set_dest_fixed(t); | ||
| 1062 | |||
| 1063 | // Attributes: set size and explicitly disable modulo | ||
| 1064 | Self::set_even_transfer_size(t, size); | ||
| 1065 | |||
| 1066 | // Minor loop: transfer one word per request (match old: only set nbytes) | ||
| 1067 | Self::set_minor_loop_ct_no_offsets(t, byte_size as u32); | ||
| 1068 | |||
| 1069 | // No final adjustments | ||
| 1070 | Self::set_no_final_adjustments(t); | ||
| 1071 | |||
| 1072 | // Major loop count = number of words | ||
| 1073 | let count = buf.len() as u16; | ||
| 1074 | Self::set_major_loop_ct_elinkno(t, count); | ||
| 1075 | |||
| 1076 | // CSR: interrupt on major loop complete and auto-clear ERQ | ||
| 1077 | t.tcd_csr().write(|w| { | ||
| 1078 | let w = if options.complete_transfer_interrupt { | ||
| 1079 | w.intmajor().enable() | ||
| 1080 | } else { | ||
| 1081 | w.intmajor().disable() | ||
| 1082 | }; | ||
| 1083 | w.inthalf() | ||
| 1084 | .disable() | ||
| 1085 | .dreq() | ||
| 1086 | .erq_field_clear() // Disable request when done | ||
| 1087 | .esg() | ||
| 1088 | .normal_format() | ||
| 1089 | .majorelink() | ||
| 1090 | .disable() | ||
| 1091 | .eeop() | ||
| 1092 | .disable() | ||
| 1093 | .esda() | ||
| 1094 | .disable() | ||
| 1095 | .bwc() | ||
| 1096 | .no_stall() | ||
| 1097 | }); | ||
| 1098 | |||
| 1099 | // Ensure all TCD writes have completed before DMA engine reads them | ||
| 1100 | cortex_m::asm::dsb(); | ||
| 1101 | |||
| 1102 | Transfer::new(self.as_any()) | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | /// Read data from a peripheral register to memory. | ||
| 1106 | /// | ||
| 1107 | /// The source address remains fixed (peripheral register) while | ||
| 1108 | /// the destination address increments through the buffer. | ||
| 1109 | /// | ||
| 1110 | /// # Arguments | ||
| 1111 | /// | ||
| 1112 | /// * `peri_addr` - Peripheral register address | ||
| 1113 | /// * `buf` - Destination buffer to read into | ||
| 1114 | /// * `options` - Transfer configuration options | ||
| 1115 | /// | ||
| 1116 | /// # Safety | ||
| 1117 | /// | ||
| 1118 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1119 | /// - The peripheral address must be valid for reads. | ||
| 1120 | pub unsafe fn read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], options: TransferOptions) -> Transfer<'_> { | ||
| 1121 | self.read_from_peripheral(peri_addr, buf, options) | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | /// Configure a peripheral-to-memory DMA transfer without starting it. | ||
| 1125 | /// | ||
| 1126 | /// This is a convenience wrapper around [`setup_read_from_peripheral()`](Self::setup_read_from_peripheral) | ||
| 1127 | /// that uses the default eDMA TCD register block. | ||
| 1128 | /// | ||
| 1129 | /// This method configures the TCD but does NOT return a `Transfer`. The caller | ||
| 1130 | /// is responsible for the complete DMA lifecycle: | ||
| 1131 | /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer | ||
| 1132 | /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion | ||
| 1133 | /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done), | ||
| 1134 | /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup | ||
| 1135 | /// | ||
| 1136 | /// # Example | ||
| 1137 | /// | ||
| 1138 | /// ```no_run | ||
| 1139 | /// # use embassy_mcxa::dma::DmaChannel; | ||
| 1140 | /// # let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 1141 | /// # let uart_rx_addr = 0x4000_0000 as *const u8; | ||
| 1142 | /// let mut buf = [0u8; 32]; | ||
| 1143 | /// | ||
| 1144 | /// unsafe { | ||
| 1145 | /// // Configure the transfer | ||
| 1146 | /// dma_ch.setup_read(uart_rx_addr, &mut buf, EnableInterrupt::Yes); | ||
| 1147 | /// | ||
| 1148 | /// // Start when peripheral is ready | ||
| 1149 | /// dma_ch.enable_request(); | ||
| 1150 | /// | ||
| 1151 | /// // Wait for completion (or use interrupt) | ||
| 1152 | /// while !dma_ch.is_done() {} | ||
| 1153 | /// | ||
| 1154 | /// // Clean up | ||
| 1155 | /// dma_ch.clear_done(); | ||
| 1156 | /// dma_ch.clear_interrupt(); | ||
| 1157 | /// } | ||
| 1158 | /// // buf now contains received data | ||
| 1159 | /// ``` | ||
| 1160 | /// | ||
| 1161 | /// # Arguments | ||
| 1162 | /// | ||
| 1163 | /// * `peri_addr` - Peripheral register address | ||
| 1164 | /// * `buf` - Destination buffer to read into | ||
| 1165 | /// * `enable_interrupt` - Whether to enable interrupt on completion | ||
| 1166 | /// | ||
| 1167 | /// # Safety | ||
| 1168 | /// | ||
| 1169 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1170 | /// - The peripheral address must be valid for reads. | ||
| 1171 | pub unsafe fn setup_read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], enable_interrupt: EnableInterrupt) { | ||
| 1172 | self.setup_read_from_peripheral(peri_addr, buf, enable_interrupt) | ||
| 1173 | } | ||
| 1174 | |||
| 1175 | /// Read data from a peripheral register to memory. | ||
| 1176 | /// | ||
| 1177 | /// The source address remains fixed (peripheral register) while | ||
| 1178 | /// the destination address increments through the buffer. | ||
| 1179 | /// | ||
| 1180 | /// # Arguments | ||
| 1181 | /// | ||
| 1182 | /// * `peri_addr` - Peripheral register address | ||
| 1183 | /// * `buf` - Destination buffer to read into | ||
| 1184 | /// * `options` - Transfer configuration options | ||
| 1185 | /// | ||
| 1186 | /// # Safety | ||
| 1187 | /// | ||
| 1188 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1189 | /// - The peripheral address must be valid for reads. | ||
| 1190 | pub unsafe fn read_from_peripheral<W: Word>( | ||
| 1191 | &self, | ||
| 1192 | peri_addr: *const W, | ||
| 1193 | buf: &mut [W], | ||
| 1194 | options: TransferOptions, | ||
| 1195 | ) -> Transfer<'_> { | ||
| 1196 | assert!(!buf.is_empty()); | ||
| 1197 | assert!(buf.len() <= 0x7fff); | ||
| 1198 | |||
| 1199 | let size = W::size(); | ||
| 1200 | let byte_size = size.bytes(); | ||
| 1201 | |||
| 1202 | let t = self.tcd(); | ||
| 1203 | |||
| 1204 | // Reset channel control/error/interrupt state | ||
| 1205 | Self::reset_channel_state(t); | ||
| 1206 | |||
| 1207 | // Source: peripheral register, fixed | ||
| 1208 | Self::set_source_ptr(t, peri_addr); | ||
| 1209 | Self::set_source_fixed(t); | ||
| 1210 | |||
| 1211 | // Destination: memory buffer, incrementing | ||
| 1212 | Self::set_dest_ptr(t, buf.as_mut_ptr()); | ||
| 1213 | Self::set_dest_increment(t, size); | ||
| 1214 | |||
| 1215 | // Transfer attributes: set size and explicitly disable modulo | ||
| 1216 | Self::set_even_transfer_size(t, size); | ||
| 1217 | |||
| 1218 | // Minor loop: transfer one word per request, no offsets | ||
| 1219 | Self::set_minor_loop_ct_no_offsets(t, byte_size as u32); | ||
| 1220 | |||
| 1221 | // Major loop count = number of words | ||
| 1222 | let count = buf.len() as u16; | ||
| 1223 | Self::set_major_loop_ct_elinkno(t, count); | ||
| 1224 | |||
| 1225 | // No address adjustment after major loop | ||
| 1226 | Self::set_no_final_adjustments(t); | ||
| 1227 | |||
| 1228 | // Control/status: interrupt on major complete, auto-clear ERQ when done | ||
| 1229 | t.tcd_csr().write(|w| { | ||
| 1230 | let w = if options.complete_transfer_interrupt { | ||
| 1231 | w.intmajor().enable() | ||
| 1232 | } else { | ||
| 1233 | w.intmajor().disable() | ||
| 1234 | }; | ||
| 1235 | let w = if options.half_transfer_interrupt { | ||
| 1236 | w.inthalf().enable() | ||
| 1237 | } else { | ||
| 1238 | w.inthalf().disable() | ||
| 1239 | }; | ||
| 1240 | w.dreq() | ||
| 1241 | .erq_field_clear() // Disable request when done (important for peripheral DMA) | ||
| 1242 | .esg() | ||
| 1243 | .normal_format() | ||
| 1244 | .majorelink() | ||
| 1245 | .disable() | ||
| 1246 | .eeop() | ||
| 1247 | .disable() | ||
| 1248 | .esda() | ||
| 1249 | .disable() | ||
| 1250 | .bwc() | ||
| 1251 | .no_stall() | ||
| 1252 | }); | ||
| 1253 | |||
| 1254 | // Ensure all TCD writes have completed before DMA engine reads them | ||
| 1255 | cortex_m::asm::dsb(); | ||
| 1256 | |||
| 1257 | Transfer::new(self.as_any()) | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | /// Configure a memory-to-peripheral DMA transfer without starting it. | ||
| 1261 | /// | ||
| 1262 | /// This configures the TCD for a memory-to-peripheral transfer but does NOT | ||
| 1263 | /// return a Transfer object. The caller is responsible for: | ||
| 1264 | /// 1. Enabling the peripheral's DMA request | ||
| 1265 | /// 2. Calling `enable_request()` to start the transfer | ||
| 1266 | /// 3. Polling `is_done()` or using interrupts to detect completion | ||
| 1267 | /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup | ||
| 1268 | /// | ||
| 1269 | /// Use this when you need manual control over the DMA lifecycle (e.g., in | ||
| 1270 | /// peripheral drivers that have their own completion polling). | ||
| 1271 | /// | ||
| 1272 | /// # Arguments | ||
| 1273 | /// | ||
| 1274 | /// * `buf` - Source buffer to write from | ||
| 1275 | /// * `peri_addr` - Peripheral register address | ||
| 1276 | /// * `enable_interrupt` - Whether to enable interrupt on completion | ||
| 1277 | /// | ||
| 1278 | /// # Safety | ||
| 1279 | /// | ||
| 1280 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1281 | /// - The peripheral address must be valid for writes. | ||
| 1282 | pub unsafe fn setup_write_to_peripheral<W: Word>( | ||
| 1283 | &self, | ||
| 1284 | buf: &[W], | ||
| 1285 | peri_addr: *mut W, | ||
| 1286 | enable_interrupt: EnableInterrupt, | ||
| 1287 | ) { | ||
| 1288 | assert!(!buf.is_empty()); | ||
| 1289 | assert!(buf.len() <= 0x7fff); | ||
| 1290 | |||
| 1291 | let size = W::size(); | ||
| 1292 | let byte_size = size.bytes(); | ||
| 1293 | |||
| 1294 | let t = self.tcd(); | ||
| 1295 | |||
| 1296 | // Reset channel state | ||
| 1297 | Self::reset_channel_state(t); | ||
| 1298 | |||
| 1299 | // Addresses | ||
| 1300 | Self::set_source_ptr(t, buf.as_ptr()); | ||
| 1301 | Self::set_dest_ptr(t, peri_addr); | ||
| 1302 | |||
| 1303 | // Offsets: Source increments, Dest fixed | ||
| 1304 | Self::set_source_increment(t, size); | ||
| 1305 | Self::set_dest_fixed(t); | ||
| 1306 | |||
| 1307 | // Attributes: set size and explicitly disable modulo | ||
| 1308 | Self::set_even_transfer_size(t, size); | ||
| 1309 | |||
| 1310 | // Minor loop: transfer one word per request | ||
| 1311 | Self::set_minor_loop_ct_no_offsets(t, byte_size as u32); | ||
| 1312 | |||
| 1313 | // No final adjustments | ||
| 1314 | Self::set_no_final_adjustments(t); | ||
| 1315 | |||
| 1316 | // Major loop count = number of words | ||
| 1317 | let count = buf.len() as u16; | ||
| 1318 | Self::set_major_loop_ct_elinkno(t, count); | ||
| 1319 | |||
| 1320 | // CSR: optional interrupt on major loop complete and auto-clear ERQ | ||
| 1321 | t.tcd_csr().write(|w| { | ||
| 1322 | let w = match enable_interrupt { | ||
| 1323 | EnableInterrupt::Yes => w.intmajor().enable(), | ||
| 1324 | EnableInterrupt::No => w.intmajor().disable(), | ||
| 1325 | }; | ||
| 1326 | w.inthalf() | ||
| 1327 | .disable() | ||
| 1328 | .dreq() | ||
| 1329 | .erq_field_clear() | ||
| 1330 | .esg() | ||
| 1331 | .normal_format() | ||
| 1332 | .majorelink() | ||
| 1333 | .disable() | ||
| 1334 | .eeop() | ||
| 1335 | .disable() | ||
| 1336 | .esda() | ||
| 1337 | .disable() | ||
| 1338 | .bwc() | ||
| 1339 | .no_stall() | ||
| 1340 | }); | ||
| 1341 | |||
| 1342 | // Ensure all TCD writes have completed before DMA engine reads them | ||
| 1343 | cortex_m::asm::dsb(); | ||
| 1344 | } | ||
| 1345 | |||
| 1346 | /// Configure a peripheral-to-memory DMA transfer without starting it. | ||
| 1347 | /// | ||
| 1348 | /// This configures the TCD for a peripheral-to-memory transfer but does NOT | ||
| 1349 | /// return a Transfer object. The caller is responsible for: | ||
| 1350 | /// 1. Enabling the peripheral's DMA request | ||
| 1351 | /// 2. Calling `enable_request()` to start the transfer | ||
| 1352 | /// 3. Polling `is_done()` or using interrupts to detect completion | ||
| 1353 | /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup | ||
| 1354 | /// | ||
| 1355 | /// Use this when you need manual control over the DMA lifecycle (e.g., in | ||
| 1356 | /// peripheral drivers that have their own completion polling). | ||
| 1357 | /// | ||
| 1358 | /// # Arguments | ||
| 1359 | /// | ||
| 1360 | /// * `peri_addr` - Peripheral register address | ||
| 1361 | /// * `buf` - Destination buffer to read into | ||
| 1362 | /// * `enable_interrupt` - Whether to enable interrupt on completion | ||
| 1363 | /// | ||
| 1364 | /// # Safety | ||
| 1365 | /// | ||
| 1366 | /// - The buffer must remain valid for the duration of the transfer. | ||
| 1367 | /// - The peripheral address must be valid for reads. | ||
| 1368 | pub unsafe fn setup_read_from_peripheral<W: Word>( | ||
| 1369 | &self, | ||
| 1370 | peri_addr: *const W, | ||
| 1371 | buf: &mut [W], | ||
| 1372 | enable_interrupt: EnableInterrupt, | ||
| 1373 | ) { | ||
| 1374 | assert!(!buf.is_empty()); | ||
| 1375 | assert!(buf.len() <= 0x7fff); | ||
| 1376 | |||
| 1377 | let size = W::size(); | ||
| 1378 | let byte_size = size.bytes(); | ||
| 1379 | |||
| 1380 | let t = self.tcd(); | ||
| 1381 | |||
| 1382 | // Reset channel control/error/interrupt state | ||
| 1383 | Self::reset_channel_state(t); | ||
| 1384 | |||
| 1385 | // Source: peripheral register, fixed | ||
| 1386 | Self::set_source_ptr(t, peri_addr); | ||
| 1387 | Self::set_source_fixed(t); | ||
| 1388 | |||
| 1389 | // Destination: memory buffer, incrementing | ||
| 1390 | Self::set_dest_ptr(t, buf.as_mut_ptr()); | ||
| 1391 | Self::set_dest_increment(t, size); | ||
| 1392 | |||
| 1393 | // Attributes: set size and explicitly disable modulo | ||
| 1394 | Self::set_even_transfer_size(t, size); | ||
| 1395 | |||
| 1396 | // Minor loop: transfer one word per request | ||
| 1397 | Self::set_minor_loop_ct_no_offsets(t, byte_size as u32); | ||
| 1398 | |||
| 1399 | // No final adjustments | ||
| 1400 | Self::set_no_final_adjustments(t); | ||
| 1401 | |||
| 1402 | // Major loop count = number of words | ||
| 1403 | let count = buf.len() as u16; | ||
| 1404 | Self::set_major_loop_ct_elinkno(t, count); | ||
| 1405 | |||
| 1406 | // CSR: optional interrupt on major loop complete and auto-clear ERQ | ||
| 1407 | t.tcd_csr().write(|w| { | ||
| 1408 | let w = match enable_interrupt { | ||
| 1409 | EnableInterrupt::Yes => w.intmajor().enable(), | ||
| 1410 | EnableInterrupt::No => w.intmajor().disable(), | ||
| 1411 | }; | ||
| 1412 | w.inthalf() | ||
| 1413 | .disable() | ||
| 1414 | .dreq() | ||
| 1415 | .erq_field_clear() | ||
| 1416 | .esg() | ||
| 1417 | .normal_format() | ||
| 1418 | .majorelink() | ||
| 1419 | .disable() | ||
| 1420 | .eeop() | ||
| 1421 | .disable() | ||
| 1422 | .esda() | ||
| 1423 | .disable() | ||
| 1424 | .bwc() | ||
| 1425 | .no_stall() | ||
| 1426 | }); | ||
| 1427 | |||
| 1428 | // Ensure all TCD writes have completed before DMA engine reads them | ||
| 1429 | cortex_m::asm::dsb(); | ||
| 1430 | } | ||
| 1431 | |||
| 1432 | /// Configure the integrated channel MUX to use the given typed | ||
| 1433 | /// DMA request source (e.g., [`Lpuart2TxRequest`] or [`Lpuart2RxRequest`]). | ||
| 1434 | /// | ||
| 1435 | /// This is the type-safe version that uses marker types to ensure | ||
| 1436 | /// compile-time verification of request source validity. | ||
| 1437 | /// | ||
| 1438 | /// # Safety | ||
| 1439 | /// | ||
| 1440 | /// The channel must be properly configured before enabling requests. | ||
| 1441 | /// The caller must ensure the DMA request source matches the peripheral | ||
| 1442 | /// that will drive this channel. | ||
| 1443 | /// | ||
| 1444 | /// # Note | ||
| 1445 | /// | ||
| 1446 | /// The NXP SDK requires a two-step write sequence: first clear | ||
| 1447 | /// the mux to 0, then set the actual source. This is a hardware | ||
| 1448 | /// requirement on eDMA4 for the mux to properly latch. | ||
| 1449 | /// | ||
| 1450 | /// # Example | ||
| 1451 | /// | ||
| 1452 | /// ```ignore | ||
| 1453 | /// use embassy_mcxa::dma::{DmaChannel, Lpuart2RxRequest}; | ||
| 1454 | /// | ||
| 1455 | /// // Type-safe: compiler verifies this is a valid DMA request type | ||
| 1456 | /// unsafe { | ||
| 1457 | /// channel.set_request_source::<Lpuart2RxRequest>(); | ||
| 1458 | /// } | ||
| 1459 | /// ``` | ||
| 1460 | #[inline] | ||
| 1461 | pub unsafe fn set_request_source<R: DmaRequest>(&self) { | ||
| 1462 | // Two-step write per NXP SDK: clear to 0, then set actual source. | ||
| 1463 | self.tcd().ch_mux().write(|w| w.src().bits(0)); | ||
| 1464 | cortex_m::asm::dsb(); // Ensure the clear completes before setting new source | ||
| 1465 | self.tcd().ch_mux().write(|w| w.src().bits(R::REQUEST_NUMBER)); | ||
| 1466 | } | ||
| 1467 | |||
| 1468 | /// Enable hardware requests for this channel (ERQ=1). | ||
| 1469 | /// | ||
| 1470 | /// # Safety | ||
| 1471 | /// | ||
| 1472 | /// The channel must be properly configured before enabling requests. | ||
| 1473 | pub unsafe fn enable_request(&self) { | ||
| 1474 | let t = self.tcd(); | ||
| 1475 | t.ch_csr().modify(|_, w| w.erq().enable()); | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | /// Disable hardware requests for this channel (ERQ=0). | ||
| 1479 | /// | ||
| 1480 | /// # Safety | ||
| 1481 | /// | ||
| 1482 | /// Disabling requests on an active transfer may leave the transfer incomplete. | ||
| 1483 | pub unsafe fn disable_request(&self) { | ||
| 1484 | let t = self.tcd(); | ||
| 1485 | t.ch_csr().modify(|_, w| w.erq().disable()); | ||
| 1486 | } | ||
| 1487 | |||
| 1488 | /// Return true if the channel's DONE flag is set. | ||
| 1489 | pub fn is_done(&self) -> bool { | ||
| 1490 | let t = self.tcd(); | ||
| 1491 | t.ch_csr().read().done().bit_is_set() | ||
| 1492 | } | ||
| 1493 | |||
| 1494 | /// Clear the DONE flag for this channel. | ||
| 1495 | /// | ||
| 1496 | /// Uses modify to preserve other bits (especially ERQ) unlike write | ||
| 1497 | /// which would clear ERQ and halt an active transfer. | ||
| 1498 | /// | ||
| 1499 | /// # Safety | ||
| 1500 | /// | ||
| 1501 | /// Clearing DONE while a transfer is in progress may cause undefined behavior. | ||
| 1502 | pub unsafe fn clear_done(&self) { | ||
| 1503 | let t = self.tcd(); | ||
| 1504 | t.ch_csr().modify(|_, w| w.done().clear_bit_by_one()); | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | /// Clear the channel interrupt flag (CH_INT.INT). | ||
| 1508 | /// | ||
| 1509 | /// # Safety | ||
| 1510 | /// | ||
| 1511 | /// Must be called from the correct interrupt context or with interrupts disabled. | ||
| 1512 | pub unsafe fn clear_interrupt(&self) { | ||
| 1513 | let t = self.tcd(); | ||
| 1514 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 1515 | } | ||
| 1516 | |||
| 1517 | /// Trigger a software start for this channel. | ||
| 1518 | /// | ||
| 1519 | /// # Safety | ||
| 1520 | /// | ||
| 1521 | /// The channel must be properly configured with a valid TCD before triggering. | ||
| 1522 | pub unsafe fn trigger_start(&self) { | ||
| 1523 | let t = self.tcd(); | ||
| 1524 | t.tcd_csr().modify(|_, w| w.start().channel_started()); | ||
| 1525 | } | ||
| 1526 | |||
| 1527 | /// Get the waker for this channel | ||
| 1528 | pub fn waker(&self) -> &'static AtomicWaker { | ||
| 1529 | &STATES[C::INDEX].waker | ||
| 1530 | } | ||
| 1531 | |||
| 1532 | /// Enable the interrupt for this channel in the NVIC. | ||
| 1533 | pub fn enable_interrupt(&self) { | ||
| 1534 | unsafe { | ||
| 1535 | cortex_m::peripheral::NVIC::unmask(C::INTERRUPT); | ||
| 1536 | } | ||
| 1537 | } | ||
| 1538 | |||
| 1539 | /// Enable Major Loop Linking. | ||
| 1540 | /// | ||
| 1541 | /// When the major loop completes, the hardware will trigger a service request | ||
| 1542 | /// on `link_ch`. | ||
| 1543 | /// | ||
| 1544 | /// # Arguments | ||
| 1545 | /// | ||
| 1546 | /// * `link_ch` - Target channel index (0-7) to link to | ||
| 1547 | /// | ||
| 1548 | /// # Safety | ||
| 1549 | /// | ||
| 1550 | /// The channel must be properly configured before setting up linking. | ||
| 1551 | pub unsafe fn set_major_link(&self, link_ch: usize) { | ||
| 1552 | let t = self.tcd(); | ||
| 1553 | t.tcd_csr() | ||
| 1554 | .modify(|_, w| w.majorelink().enable().majorlinkch().bits(link_ch as u8)); | ||
| 1555 | } | ||
| 1556 | |||
| 1557 | /// Disable Major Loop Linking. | ||
| 1558 | /// | ||
| 1559 | /// Removes any major loop channel linking previously configured. | ||
| 1560 | /// | ||
| 1561 | /// # Safety | ||
| 1562 | /// | ||
| 1563 | /// The caller must ensure this doesn't disrupt an active transfer that | ||
| 1564 | /// depends on the linking. | ||
| 1565 | pub unsafe fn clear_major_link(&self) { | ||
| 1566 | let t = self.tcd(); | ||
| 1567 | t.tcd_csr().modify(|_, w| w.majorelink().disable()); | ||
| 1568 | } | ||
| 1569 | |||
| 1570 | /// Enable Minor Loop Linking. | ||
| 1571 | /// | ||
| 1572 | /// After each minor loop, the hardware will trigger a service request | ||
| 1573 | /// on `link_ch`. | ||
| 1574 | /// | ||
| 1575 | /// # Arguments | ||
| 1576 | /// | ||
| 1577 | /// * `link_ch` - Target channel index (0-7) to link to | ||
| 1578 | /// | ||
| 1579 | /// # Note | ||
| 1580 | /// | ||
| 1581 | /// This rewrites CITER and BITER registers to the ELINKYES format. | ||
| 1582 | /// It preserves the current loop count. | ||
| 1583 | /// | ||
| 1584 | /// # Safety | ||
| 1585 | /// | ||
| 1586 | /// The channel must be properly configured before setting up linking. | ||
| 1587 | pub unsafe fn set_minor_link(&self, link_ch: usize) { | ||
| 1588 | let t = self.tcd(); | ||
| 1589 | |||
| 1590 | // Read current CITER (assuming ELINKNO format initially) | ||
| 1591 | let current_citer = t.tcd_citer_elinkno().read().citer().bits(); | ||
| 1592 | let current_biter = t.tcd_biter_elinkno().read().biter().bits(); | ||
| 1593 | |||
| 1594 | // Write back using ELINKYES format | ||
| 1595 | t.tcd_citer_elinkyes().write(|w| { | ||
| 1596 | w.citer() | ||
| 1597 | .bits(current_citer) | ||
| 1598 | .elink() | ||
| 1599 | .enable() | ||
| 1600 | .linkch() | ||
| 1601 | .bits(link_ch as u8) | ||
| 1602 | }); | ||
| 1603 | |||
| 1604 | t.tcd_biter_elinkyes().write(|w| { | ||
| 1605 | w.biter() | ||
| 1606 | .bits(current_biter) | ||
| 1607 | .elink() | ||
| 1608 | .enable() | ||
| 1609 | .linkch() | ||
| 1610 | .bits(link_ch as u8) | ||
| 1611 | }); | ||
| 1612 | } | ||
| 1613 | |||
| 1614 | /// Disable Minor Loop Linking. | ||
| 1615 | /// | ||
| 1616 | /// Removes any minor loop channel linking previously configured. | ||
| 1617 | /// This rewrites CITER and BITER registers to the ELINKNO format, | ||
| 1618 | /// preserving the current loop count. | ||
| 1619 | /// | ||
| 1620 | /// # Safety | ||
| 1621 | /// | ||
| 1622 | /// The caller must ensure this doesn't disrupt an active transfer that | ||
| 1623 | /// depends on the linking. | ||
| 1624 | pub unsafe fn clear_minor_link(&self) { | ||
| 1625 | let t = self.tcd(); | ||
| 1626 | |||
| 1627 | // Read current CITER (could be in either format, but we only need the count) | ||
| 1628 | // Note: In ELINKYES format, citer is 9 bits; in ELINKNO, it's 15 bits. | ||
| 1629 | // We read from ELINKNO which will give us the combined value. | ||
| 1630 | let current_citer = t.tcd_citer_elinkno().read().citer().bits(); | ||
| 1631 | let current_biter = t.tcd_biter_elinkno().read().biter().bits(); | ||
| 1632 | |||
| 1633 | // Write back using ELINKNO format (disabling link) | ||
| 1634 | t.tcd_citer_elinkno() | ||
| 1635 | .write(|w| w.citer().bits(current_citer).elink().disable()); | ||
| 1636 | |||
| 1637 | t.tcd_biter_elinkno() | ||
| 1638 | .write(|w| w.biter().bits(current_biter).elink().disable()); | ||
| 1639 | } | ||
| 1640 | |||
| 1641 | /// Load a TCD from memory into the hardware channel registers. | ||
| 1642 | /// | ||
| 1643 | /// This is useful for scatter/gather and ping-pong transfers where | ||
| 1644 | /// TCDs are prepared in RAM and then loaded into the hardware. | ||
| 1645 | /// | ||
| 1646 | /// # Safety | ||
| 1647 | /// | ||
| 1648 | /// - The TCD must be properly initialized. | ||
| 1649 | /// - The caller must ensure no concurrent access to the same channel. | ||
| 1650 | pub unsafe fn load_tcd(&self, tcd: &Tcd) { | ||
| 1651 | let t = self.tcd(); | ||
| 1652 | t.tcd_saddr().write(|w| w.saddr().bits(tcd.saddr)); | ||
| 1653 | t.tcd_soff().write(|w| w.soff().bits(tcd.soff as u16)); | ||
| 1654 | t.tcd_attr().write(|w| w.bits(tcd.attr)); | ||
| 1655 | t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(tcd.nbytes)); | ||
| 1656 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(tcd.slast as u32)); | ||
| 1657 | t.tcd_daddr().write(|w| w.daddr().bits(tcd.daddr)); | ||
| 1658 | t.tcd_doff().write(|w| w.doff().bits(tcd.doff as u16)); | ||
| 1659 | t.tcd_citer_elinkno().write(|w| w.citer().bits(tcd.citer)); | ||
| 1660 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(tcd.dlast_sga as u32)); | ||
| 1661 | t.tcd_csr().write(|w| w.bits(tcd.csr)); | ||
| 1662 | t.tcd_biter_elinkno().write(|w| w.biter().bits(tcd.biter)); | ||
| 1663 | } | ||
| 1664 | } | ||
| 1665 | |||
| 1666 | /// In-memory representation of a Transfer Control Descriptor (TCD). | ||
| 1667 | /// | ||
| 1668 | /// This matches the hardware layout (32 bytes). | ||
| 1669 | #[repr(C, align(32))] | ||
| 1670 | #[derive(Clone, Copy, Debug, Default)] | ||
| 1671 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 1672 | pub struct Tcd { | ||
| 1673 | pub saddr: u32, | ||
| 1674 | pub soff: i16, | ||
| 1675 | pub attr: u16, | ||
| 1676 | pub nbytes: u32, | ||
| 1677 | pub slast: i32, | ||
| 1678 | pub daddr: u32, | ||
| 1679 | pub doff: i16, | ||
| 1680 | pub citer: u16, | ||
| 1681 | pub dlast_sga: i32, | ||
| 1682 | pub csr: u16, | ||
| 1683 | pub biter: u16, | ||
| 1684 | } | ||
| 1685 | |||
| 1686 | struct State { | ||
| 1687 | /// Waker for transfer complete interrupt | ||
| 1688 | waker: AtomicWaker, | ||
| 1689 | /// Waker for half-transfer interrupt | ||
| 1690 | half_waker: AtomicWaker, | ||
| 1691 | } | ||
| 1692 | |||
| 1693 | impl State { | ||
| 1694 | const fn new() -> Self { | ||
| 1695 | Self { | ||
| 1696 | waker: AtomicWaker::new(), | ||
| 1697 | half_waker: AtomicWaker::new(), | ||
| 1698 | } | ||
| 1699 | } | ||
| 1700 | } | ||
| 1701 | |||
| 1702 | static STATES: [State; 8] = [ | ||
| 1703 | State::new(), | ||
| 1704 | State::new(), | ||
| 1705 | State::new(), | ||
| 1706 | State::new(), | ||
| 1707 | State::new(), | ||
| 1708 | State::new(), | ||
| 1709 | State::new(), | ||
| 1710 | State::new(), | ||
| 1711 | ]; | ||
| 1712 | |||
| 1713 | pub(crate) fn waker(idx: usize) -> &'static AtomicWaker { | ||
| 1714 | &STATES[idx].waker | ||
| 1715 | } | ||
| 1716 | |||
| 1717 | pub(crate) fn half_waker(idx: usize) -> &'static AtomicWaker { | ||
| 1718 | &STATES[idx].half_waker | ||
| 1719 | } | ||
| 1720 | |||
| 1721 | // ============================================================================ | ||
| 1722 | // Async Transfer Future | ||
| 1723 | // ============================================================================ | ||
| 1724 | |||
| 1725 | /// An in-progress DMA transfer. | ||
| 1726 | /// | ||
| 1727 | /// This type implements `Future` and can be `.await`ed to wait for the | ||
| 1728 | /// transfer to complete. Dropping the transfer will abort it. | ||
| 1729 | #[must_use = "futures do nothing unless you `.await` or poll them"] | ||
| 1730 | pub struct Transfer<'a> { | ||
| 1731 | channel: AnyChannel, | ||
| 1732 | _phantom: core::marker::PhantomData<&'a ()>, | ||
| 1733 | } | ||
| 1734 | |||
| 1735 | impl<'a> Transfer<'a> { | ||
| 1736 | /// Create a new transfer for the given channel. | ||
| 1737 | /// | ||
| 1738 | /// The caller must have already configured and started the DMA channel. | ||
| 1739 | pub(crate) fn new(channel: AnyChannel) -> Self { | ||
| 1740 | Self { | ||
| 1741 | channel, | ||
| 1742 | _phantom: core::marker::PhantomData, | ||
| 1743 | } | ||
| 1744 | } | ||
| 1745 | |||
| 1746 | /// Check if the transfer is still running. | ||
| 1747 | pub fn is_running(&self) -> bool { | ||
| 1748 | !self.channel.is_done() | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | /// Get the remaining transfer count. | ||
| 1752 | pub fn remaining(&self) -> u16 { | ||
| 1753 | let t = self.channel.tcd(); | ||
| 1754 | t.tcd_citer_elinkno().read().citer().bits() | ||
| 1755 | } | ||
| 1756 | |||
| 1757 | /// Block until the transfer completes. | ||
| 1758 | pub fn blocking_wait(self) { | ||
| 1759 | while self.is_running() { | ||
| 1760 | core::hint::spin_loop(); | ||
| 1761 | } | ||
| 1762 | |||
| 1763 | // Ensure all DMA writes are visible | ||
| 1764 | fence(Ordering::SeqCst); | ||
| 1765 | |||
| 1766 | // Don't run drop (which would abort) | ||
| 1767 | core::mem::forget(self); | ||
| 1768 | } | ||
| 1769 | |||
| 1770 | /// Wait for the half-transfer interrupt asynchronously. | ||
| 1771 | /// | ||
| 1772 | /// This is useful for double-buffering scenarios where you want to process | ||
| 1773 | /// the first half of the buffer while the second half is being filled. | ||
| 1774 | /// | ||
| 1775 | /// Returns `true` if the half-transfer occurred, `false` if the transfer | ||
| 1776 | /// completed before the half-transfer interrupt. | ||
| 1777 | /// | ||
| 1778 | /// # Note | ||
| 1779 | /// | ||
| 1780 | /// The transfer must be configured with `TransferOptions::half_transfer_interrupt = true` | ||
| 1781 | /// for this method to work correctly. | ||
| 1782 | pub async fn wait_half(&mut self) -> Result<bool, TransferErrorRaw> { | ||
| 1783 | use core::future::poll_fn; | ||
| 1784 | |||
| 1785 | poll_fn(|cx| { | ||
| 1786 | let state = &STATES[self.channel.index]; | ||
| 1787 | |||
| 1788 | // Register the half-transfer waker | ||
| 1789 | state.half_waker.register(cx.waker()); | ||
| 1790 | |||
| 1791 | // Check if there's an error | ||
| 1792 | let t = self.channel.tcd(); | ||
| 1793 | let es = t.ch_es().read(); | ||
| 1794 | if es.err().is_error() { | ||
| 1795 | // Currently, all error fields are in the lowest 8 bits, as-casting truncates | ||
| 1796 | let errs = es.bits() as u8; | ||
| 1797 | return Poll::Ready(Err(TransferErrorRaw(errs))); | ||
| 1798 | } | ||
| 1799 | |||
| 1800 | // Check if we're past the half-way point | ||
| 1801 | let biter = t.tcd_biter_elinkno().read().biter().bits(); | ||
| 1802 | let citer = t.tcd_citer_elinkno().read().citer().bits(); | ||
| 1803 | let half_point = biter / 2; | ||
| 1804 | |||
| 1805 | if self.channel.is_done() { | ||
| 1806 | // Transfer completed before half-transfer | ||
| 1807 | Poll::Ready(Ok(false)) | ||
| 1808 | } else if citer <= half_point { | ||
| 1809 | // We're past the half-way point | ||
| 1810 | fence(Ordering::SeqCst); | ||
| 1811 | Poll::Ready(Ok(true)) | ||
| 1812 | } else { | ||
| 1813 | Poll::Pending | ||
| 1814 | } | ||
| 1815 | }) | ||
| 1816 | .await | ||
| 1817 | } | ||
| 1818 | |||
| 1819 | /// Abort the transfer. | ||
| 1820 | fn abort(&mut self) { | ||
| 1821 | let t = self.channel.tcd(); | ||
| 1822 | |||
| 1823 | // Disable channel requests | ||
| 1824 | t.ch_csr().modify(|_, w| w.erq().disable()); | ||
| 1825 | |||
| 1826 | // Clear any pending interrupt | ||
| 1827 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 1828 | |||
| 1829 | // Clear DONE flag | ||
| 1830 | t.ch_csr().modify(|_, w| w.done().clear_bit_by_one()); | ||
| 1831 | |||
| 1832 | fence(Ordering::SeqCst); | ||
| 1833 | } | ||
| 1834 | } | ||
| 1835 | |||
| 1836 | /// Raw transfer error bits. Can be queried or all errors can be iterated over | ||
| 1837 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 1838 | #[derive(Copy, Clone, Debug)] | ||
| 1839 | pub struct TransferErrorRaw(u8); | ||
| 1840 | |||
| 1841 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 1842 | #[derive(Copy, Clone, Debug)] | ||
| 1843 | pub struct TransferErrorRawIter(u8); | ||
| 1844 | |||
| 1845 | impl TransferErrorRaw { | ||
| 1846 | const MAP: &[(u8, TransferError)] = &[ | ||
| 1847 | (1 << 0, TransferError::DestinationBus), | ||
| 1848 | (1 << 1, TransferError::SourceBus), | ||
| 1849 | (1 << 2, TransferError::ScatterGatherConfiguration), | ||
| 1850 | (1 << 3, TransferError::NbytesCiterConfiguration), | ||
| 1851 | (1 << 4, TransferError::DestinationOffset), | ||
| 1852 | (1 << 5, TransferError::DestinationAddress), | ||
| 1853 | (1 << 6, TransferError::SourceOffset), | ||
| 1854 | (1 << 7, TransferError::SourceAddress), | ||
| 1855 | ]; | ||
| 1856 | |||
| 1857 | /// Convert to an iterator of contained errors | ||
| 1858 | pub fn err_iter(self) -> TransferErrorRawIter { | ||
| 1859 | TransferErrorRawIter(self.0) | ||
| 1860 | } | ||
| 1861 | |||
| 1862 | /// Destination Bus Error | ||
| 1863 | #[inline] | ||
| 1864 | pub fn has_destination_bus_err(&self) -> bool { | ||
| 1865 | (self.0 & (1 << 0)) != 0 | ||
| 1866 | } | ||
| 1867 | |||
| 1868 | /// Source Bus Error | ||
| 1869 | #[inline] | ||
| 1870 | pub fn has_source_bus_err(&self) -> bool { | ||
| 1871 | (self.0 & (1 << 1)) != 0 | ||
| 1872 | } | ||
| 1873 | |||
| 1874 | /// Indicates that `TCDn_DLAST_SGA` is not on a 32-byte boundary. This field is | ||
| 1875 | /// checked at the beginning of a scatter/gather operation after major loop completion | ||
| 1876 | /// if `TCDn_CSR[ESG]` is enabled. | ||
| 1877 | #[inline] | ||
| 1878 | pub fn has_scatter_gather_configuration_err(&self) -> bool { | ||
| 1879 | (self.0 & (1 << 2)) != 0 | ||
| 1880 | } | ||
| 1881 | |||
| 1882 | /// This error indicates that one of the following has occurred: | ||
| 1883 | /// | ||
| 1884 | /// * `TCDn_NBYTES` is not a multiple of `TCDn_ATTR[SSIZE]` and `TCDn_ATTR[DSIZE]` | ||
| 1885 | /// * `TCDn_CITER[CITER]` is equal to zero | ||
| 1886 | /// * `TCDn_CITER[ELINK]` is not equal to `TCDn_BITER[ELINK]` | ||
| 1887 | #[inline] | ||
| 1888 | pub fn has_nbytes_citer_configuration_err(&self) -> bool { | ||
| 1889 | (self.0 & (1 << 3)) != 0 | ||
| 1890 | } | ||
| 1891 | |||
| 1892 | /// `TCDn_DOFF` is inconsistent with `TCDn_ATTR[DSIZE]`. | ||
| 1893 | #[inline] | ||
| 1894 | pub fn has_destination_offset_err(&self) -> bool { | ||
| 1895 | (self.0 & (1 << 4)) != 0 | ||
| 1896 | } | ||
| 1897 | |||
| 1898 | /// `TCDn_DADDR` is inconsistent with `TCDn_ATTR[DSIZE]`. | ||
| 1899 | #[inline] | ||
| 1900 | pub fn has_destination_address_err(&self) -> bool { | ||
| 1901 | (self.0 & (1 << 5)) != 0 | ||
| 1902 | } | ||
| 1903 | |||
| 1904 | /// `TCDn_SOFF` is inconsistent with `TCDn_ATTR[SSIZE]`. | ||
| 1905 | #[inline] | ||
| 1906 | pub fn has_source_offset_err(&self) -> bool { | ||
| 1907 | (self.0 & (1 << 6)) != 0 | ||
| 1908 | } | ||
| 1909 | |||
| 1910 | /// `TCDn_SADDR` is inconsistent with `TCDn_ATTR[SSIZE]` | ||
| 1911 | #[inline] | ||
| 1912 | pub fn has_source_address_err(&self) -> bool { | ||
| 1913 | (self.0 & (1 << 7)) != 0 | ||
| 1914 | } | ||
| 1915 | } | ||
| 1916 | |||
| 1917 | impl Iterator for TransferErrorRawIter { | ||
| 1918 | type Item = TransferError; | ||
| 1919 | |||
| 1920 | fn next(&mut self) -> Option<Self::Item> { | ||
| 1921 | if self.0 == 0 { | ||
| 1922 | return None; | ||
| 1923 | } | ||
| 1924 | |||
| 1925 | for (mask, var) in TransferErrorRaw::MAP { | ||
| 1926 | // If the bit is set... | ||
| 1927 | if self.0 | mask != 0 { | ||
| 1928 | // clear the bit | ||
| 1929 | self.0 &= !mask; | ||
| 1930 | // and return the answer | ||
| 1931 | return Some(*var); | ||
| 1932 | } | ||
| 1933 | } | ||
| 1934 | |||
| 1935 | // Shouldn't happen, but oh well. | ||
| 1936 | None | ||
| 1937 | } | ||
| 1938 | } | ||
| 1939 | |||
| 1940 | #[derive(Copy, Clone, Debug)] | ||
| 1941 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 1942 | pub enum TransferError { | ||
| 1943 | /// `TCDn_SADDR` is inconsistent with `TCDn_ATTR[SSIZE]` | ||
| 1944 | SourceAddress, | ||
| 1945 | /// `TCDn_SOFF` is inconsistent with `TCDn_ATTR[SSIZE]`. | ||
| 1946 | SourceOffset, | ||
| 1947 | /// `TCDn_DADDR` is inconsistent with `TCDn_ATTR[DSIZE]`. | ||
| 1948 | DestinationAddress, | ||
| 1949 | /// `TCDn_DOFF` is inconsistent with `TCDn_ATTR[DSIZE]`. | ||
| 1950 | DestinationOffset, | ||
| 1951 | /// This error indicates that one of the following has occurred: | ||
| 1952 | /// | ||
| 1953 | /// * `TCDn_NBYTES` is not a multiple of `TCDn_ATTR[SSIZE]` and `TCDn_ATTR[DSIZE]` | ||
| 1954 | /// * `TCDn_CITER[CITER]` is equal to zero | ||
| 1955 | /// * `TCDn_CITER[ELINK]` is not equal to `TCDn_BITER[ELINK]` | ||
| 1956 | NbytesCiterConfiguration, | ||
| 1957 | /// Indicates that `TCDn_DLAST_SGA` is not on a 32-byte boundary. This field is | ||
| 1958 | /// checked at the beginning of a scatter/gather operation after major loop completion | ||
| 1959 | /// if `TCDn_CSR[ESG]` is enabled. | ||
| 1960 | ScatterGatherConfiguration, | ||
| 1961 | /// Source Bus Error | ||
| 1962 | SourceBus, | ||
| 1963 | /// Destination Bus Error | ||
| 1964 | DestinationBus, | ||
| 1965 | } | ||
| 1966 | |||
| 1967 | impl<'a> Unpin for Transfer<'a> {} | ||
| 1968 | |||
| 1969 | impl<'a> Future for Transfer<'a> { | ||
| 1970 | type Output = Result<(), TransferErrorRaw>; | ||
| 1971 | |||
| 1972 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { | ||
| 1973 | let state = &STATES[self.channel.index]; | ||
| 1974 | |||
| 1975 | // Register waker first | ||
| 1976 | state.waker.register(cx.waker()); | ||
| 1977 | |||
| 1978 | let done = self.channel.is_done(); | ||
| 1979 | |||
| 1980 | if done { | ||
| 1981 | // Ensure all DMA writes are visible before returning | ||
| 1982 | fence(Ordering::SeqCst); | ||
| 1983 | |||
| 1984 | let es = self.channel.tcd().ch_es().read(); | ||
| 1985 | if es.err().is_error() { | ||
| 1986 | // Currently, all error fields are in the lowest 8 bits, as-casting truncates | ||
| 1987 | let errs = es.bits() as u8; | ||
| 1988 | Poll::Ready(Err(TransferErrorRaw(errs))) | ||
| 1989 | } else { | ||
| 1990 | Poll::Ready(Ok(())) | ||
| 1991 | } | ||
| 1992 | } else { | ||
| 1993 | Poll::Pending | ||
| 1994 | } | ||
| 1995 | } | ||
| 1996 | } | ||
| 1997 | |||
| 1998 | impl<'a> Drop for Transfer<'a> { | ||
| 1999 | fn drop(&mut self) { | ||
| 2000 | // Only abort if the transfer is still running | ||
| 2001 | // If already complete, no need to abort | ||
| 2002 | if self.is_running() { | ||
| 2003 | self.abort(); | ||
| 2004 | |||
| 2005 | // Wait for abort to complete | ||
| 2006 | while self.is_running() { | ||
| 2007 | core::hint::spin_loop(); | ||
| 2008 | } | ||
| 2009 | } | ||
| 2010 | |||
| 2011 | fence(Ordering::SeqCst); | ||
| 2012 | } | ||
| 2013 | } | ||
| 2014 | |||
| 2015 | // ============================================================================ | ||
| 2016 | // Ring Buffer for Circular DMA | ||
| 2017 | // ============================================================================ | ||
| 2018 | |||
| 2019 | /// A ring buffer for continuous DMA reception. | ||
| 2020 | /// | ||
| 2021 | /// This structure manages a circular DMA transfer, allowing continuous | ||
| 2022 | /// reception of data without losing bytes between reads. It uses both | ||
| 2023 | /// half-transfer and complete-transfer interrupts to track available data. | ||
| 2024 | /// | ||
| 2025 | /// # Example | ||
| 2026 | /// | ||
| 2027 | /// ```no_run | ||
| 2028 | /// use embassy_mcxa::dma::{DmaChannel, RingBuffer, TransferOptions}; | ||
| 2029 | /// | ||
| 2030 | /// static mut RX_BUF: [u8; 64] = [0; 64]; | ||
| 2031 | /// | ||
| 2032 | /// let dma_ch = DmaChannel::new(p.DMA_CH0); | ||
| 2033 | /// let ring_buf = unsafe { | ||
| 2034 | /// dma_ch.setup_circular_read( | ||
| 2035 | /// uart_rx_addr, | ||
| 2036 | /// &mut RX_BUF, | ||
| 2037 | /// ) | ||
| 2038 | /// }; | ||
| 2039 | /// | ||
| 2040 | /// // Read data as it arrives | ||
| 2041 | /// let mut buf = [0u8; 16]; | ||
| 2042 | /// let n = ring_buf.read(&mut buf).await?; | ||
| 2043 | /// ``` | ||
| 2044 | pub struct RingBuffer<'a, W: Word> { | ||
| 2045 | channel: AnyChannel, | ||
| 2046 | /// Buffer pointer. We use NonNull instead of &mut because DMA acts like | ||
| 2047 | /// a separate thread writing to this buffer, and &mut claims exclusive | ||
| 2048 | /// access which the compiler could optimize incorrectly. | ||
| 2049 | buf: NonNull<[W]>, | ||
| 2050 | /// Buffer length cached for convenience | ||
| 2051 | buf_len: usize, | ||
| 2052 | /// Read position in the buffer (consumer side) | ||
| 2053 | read_pos: AtomicUsize, | ||
| 2054 | /// Phantom data to tie the lifetime to the original buffer | ||
| 2055 | _lt: PhantomData<&'a mut [W]>, | ||
| 2056 | } | ||
| 2057 | |||
| 2058 | impl<'a, W: Word> RingBuffer<'a, W> { | ||
| 2059 | /// Create a new ring buffer for the given channel and buffer. | ||
| 2060 | /// | ||
| 2061 | /// # Safety | ||
| 2062 | /// | ||
| 2063 | /// The caller must ensure: | ||
| 2064 | /// - The DMA channel has been configured for circular transfer | ||
| 2065 | /// - The buffer remains valid for the lifetime of the ring buffer | ||
| 2066 | /// - Only one RingBuffer exists per DMA channel at a time | ||
| 2067 | pub(crate) unsafe fn new(channel: AnyChannel, buf: &'a mut [W]) -> Self { | ||
| 2068 | let buf_len = buf.len(); | ||
| 2069 | Self { | ||
| 2070 | channel, | ||
| 2071 | buf: NonNull::from(buf), | ||
| 2072 | buf_len, | ||
| 2073 | read_pos: AtomicUsize::new(0), | ||
| 2074 | _lt: PhantomData, | ||
| 2075 | } | ||
| 2076 | } | ||
| 2077 | |||
| 2078 | /// Get a slice reference to the buffer. | ||
| 2079 | /// | ||
| 2080 | /// # Safety | ||
| 2081 | /// | ||
| 2082 | /// The caller must ensure that DMA is not actively writing to the | ||
| 2083 | /// portion of the buffer being accessed, or that the access is | ||
| 2084 | /// appropriately synchronized. | ||
| 2085 | #[inline] | ||
| 2086 | unsafe fn buf_slice(&self) -> &[W] { | ||
| 2087 | self.buf.as_ref() | ||
| 2088 | } | ||
| 2089 | |||
| 2090 | /// Get the current DMA write position in the buffer. | ||
| 2091 | /// | ||
| 2092 | /// This reads the current destination address from the DMA controller | ||
| 2093 | /// and calculates the buffer offset. | ||
| 2094 | fn dma_write_pos(&self) -> usize { | ||
| 2095 | let t = self.channel.tcd(); | ||
| 2096 | let daddr = t.tcd_daddr().read().daddr().bits() as usize; | ||
| 2097 | let buf_start = self.buf.as_ptr() as *const W as usize; | ||
| 2098 | |||
| 2099 | // Calculate offset from buffer start | ||
| 2100 | let offset = daddr.wrapping_sub(buf_start) / core::mem::size_of::<W>(); | ||
| 2101 | |||
| 2102 | // Ensure we're within bounds (DMA wraps around) | ||
| 2103 | offset % self.buf_len | ||
| 2104 | } | ||
| 2105 | |||
| 2106 | /// Returns the number of bytes available to read. | ||
| 2107 | pub fn available(&self) -> usize { | ||
| 2108 | let write_pos = self.dma_write_pos(); | ||
| 2109 | let read_pos = self.read_pos.load(Ordering::Acquire); | ||
| 2110 | |||
| 2111 | if write_pos >= read_pos { | ||
| 2112 | write_pos - read_pos | ||
| 2113 | } else { | ||
| 2114 | self.buf_len - read_pos + write_pos | ||
| 2115 | } | ||
| 2116 | } | ||
| 2117 | |||
| 2118 | /// Check if the buffer has overrun (data was lost). | ||
| 2119 | /// | ||
| 2120 | /// This happens when DMA writes faster than the application reads. | ||
| 2121 | pub fn is_overrun(&self) -> bool { | ||
| 2122 | // In a true overrun, the DMA would have wrapped around and caught up | ||
| 2123 | // to our read position. We can detect this by checking if available() | ||
| 2124 | // equals the full buffer size (minus 1 to distinguish from empty). | ||
| 2125 | self.available() >= self.buf_len - 1 | ||
| 2126 | } | ||
| 2127 | |||
| 2128 | /// Read data from the ring buffer into the provided slice. | ||
| 2129 | /// | ||
| 2130 | /// Returns the number of elements read, which may be less than | ||
| 2131 | /// `dst.len()` if not enough data is available. | ||
| 2132 | /// | ||
| 2133 | /// This method does not block; use `read_async()` for async waiting. | ||
| 2134 | pub fn read_immediate(&self, dst: &mut [W]) -> usize { | ||
| 2135 | let write_pos = self.dma_write_pos(); | ||
| 2136 | let read_pos = self.read_pos.load(Ordering::Acquire); | ||
| 2137 | |||
| 2138 | // Calculate available bytes | ||
| 2139 | let available = if write_pos >= read_pos { | ||
| 2140 | write_pos - read_pos | ||
| 2141 | } else { | ||
| 2142 | self.buf_len - read_pos + write_pos | ||
| 2143 | }; | ||
| 2144 | |||
| 2145 | let to_read = dst.len().min(available); | ||
| 2146 | if to_read == 0 { | ||
| 2147 | return 0; | ||
| 2148 | } | ||
| 2149 | |||
| 2150 | // Safety: We only read from portions of the buffer that DMA has | ||
| 2151 | // already written to (between read_pos and write_pos). | ||
| 2152 | let buf = unsafe { self.buf_slice() }; | ||
| 2153 | |||
| 2154 | // Read data, handling wrap-around | ||
| 2155 | let first_chunk = (self.buf_len - read_pos).min(to_read); | ||
| 2156 | dst[..first_chunk].copy_from_slice(&buf[read_pos..read_pos + first_chunk]); | ||
| 2157 | |||
| 2158 | if to_read > first_chunk { | ||
| 2159 | let second_chunk = to_read - first_chunk; | ||
| 2160 | dst[first_chunk..to_read].copy_from_slice(&buf[..second_chunk]); | ||
| 2161 | } | ||
| 2162 | |||
| 2163 | // Update read position | ||
| 2164 | let new_read_pos = (read_pos + to_read) % self.buf_len; | ||
| 2165 | self.read_pos.store(new_read_pos, Ordering::Release); | ||
| 2166 | |||
| 2167 | to_read | ||
| 2168 | } | ||
| 2169 | |||
| 2170 | /// Read data from the ring buffer asynchronously. | ||
| 2171 | /// | ||
| 2172 | /// This waits until at least one byte is available, then reads as much | ||
| 2173 | /// as possible into the destination buffer. | ||
| 2174 | /// | ||
| 2175 | /// Returns the number of elements read. | ||
| 2176 | pub async fn read(&self, dst: &mut [W]) -> Result<usize, Error> { | ||
| 2177 | use core::future::poll_fn; | ||
| 2178 | |||
| 2179 | if dst.is_empty() { | ||
| 2180 | return Ok(0); | ||
| 2181 | } | ||
| 2182 | |||
| 2183 | poll_fn(|cx| { | ||
| 2184 | // Check for overrun | ||
| 2185 | if self.is_overrun() { | ||
| 2186 | return Poll::Ready(Err(Error::Overrun)); | ||
| 2187 | } | ||
| 2188 | |||
| 2189 | // Try to read immediately | ||
| 2190 | let n = self.read_immediate(dst); | ||
| 2191 | if n > 0 { | ||
| 2192 | return Poll::Ready(Ok(n)); | ||
| 2193 | } | ||
| 2194 | |||
| 2195 | // Register wakers for both half and complete interrupts | ||
| 2196 | let state = &STATES[self.channel.index()]; | ||
| 2197 | state.waker.register(cx.waker()); | ||
| 2198 | state.half_waker.register(cx.waker()); | ||
| 2199 | |||
| 2200 | // Check again after registering waker (avoid race) | ||
| 2201 | let n = self.read_immediate(dst); | ||
| 2202 | if n > 0 { | ||
| 2203 | return Poll::Ready(Ok(n)); | ||
| 2204 | } | ||
| 2205 | |||
| 2206 | Poll::Pending | ||
| 2207 | }) | ||
| 2208 | .await | ||
| 2209 | } | ||
| 2210 | |||
| 2211 | /// Clear the ring buffer, discarding all unread data. | ||
| 2212 | pub fn clear(&self) { | ||
| 2213 | let write_pos = self.dma_write_pos(); | ||
| 2214 | self.read_pos.store(write_pos, Ordering::Release); | ||
| 2215 | } | ||
| 2216 | |||
| 2217 | /// Stop the DMA transfer and consume the ring buffer. | ||
| 2218 | /// | ||
| 2219 | /// Returns any remaining unread data count. | ||
| 2220 | pub fn stop(mut self) -> usize { | ||
| 2221 | let res = self.teardown(); | ||
| 2222 | drop(self); | ||
| 2223 | res | ||
| 2224 | } | ||
| 2225 | |||
| 2226 | /// Stop the DMA transfer. Intended to be called by `stop()` or `Drop`. | ||
| 2227 | fn teardown(&mut self) -> usize { | ||
| 2228 | let available = self.available(); | ||
| 2229 | |||
| 2230 | // Disable the channel | ||
| 2231 | let t = self.channel.tcd(); | ||
| 2232 | t.ch_csr().modify(|_, w| w.erq().disable()); | ||
| 2233 | |||
| 2234 | // Clear flags | ||
| 2235 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 2236 | t.ch_csr().modify(|_, w| w.done().clear_bit_by_one()); | ||
| 2237 | |||
| 2238 | fence(Ordering::SeqCst); | ||
| 2239 | |||
| 2240 | available | ||
| 2241 | } | ||
| 2242 | } | ||
| 2243 | |||
| 2244 | impl<'a, W: Word> Drop for RingBuffer<'a, W> { | ||
| 2245 | fn drop(&mut self) { | ||
| 2246 | self.teardown(); | ||
| 2247 | } | ||
| 2248 | } | ||
| 2249 | |||
| 2250 | impl<C: Channel> DmaChannel<C> { | ||
| 2251 | /// Set up a circular DMA transfer for continuous peripheral-to-memory reception. | ||
| 2252 | /// | ||
| 2253 | /// This configures the DMA channel for circular operation with both half-transfer | ||
| 2254 | /// and complete-transfer interrupts enabled. The transfer runs continuously until | ||
| 2255 | /// stopped via [`RingBuffer::stop()`]. | ||
| 2256 | /// | ||
| 2257 | /// # Arguments | ||
| 2258 | /// | ||
| 2259 | /// * `peri_addr` - Peripheral register address to read from | ||
| 2260 | /// * `buf` - Destination buffer (should be power-of-2 size for best efficiency) | ||
| 2261 | /// | ||
| 2262 | /// # Returns | ||
| 2263 | /// | ||
| 2264 | /// A [`RingBuffer`] that can be used to read received data. | ||
| 2265 | /// | ||
| 2266 | /// # Safety | ||
| 2267 | /// | ||
| 2268 | /// - The buffer must remain valid for the lifetime of the returned RingBuffer. | ||
| 2269 | /// - The peripheral address must be valid for reads. | ||
| 2270 | /// - The peripheral's DMA request must be configured to trigger this channel. | ||
| 2271 | pub unsafe fn setup_circular_read<'a, W: Word>(&self, peri_addr: *const W, buf: &'a mut [W]) -> RingBuffer<'a, W> { | ||
| 2272 | assert!(!buf.is_empty()); | ||
| 2273 | assert!(buf.len() <= 0x7fff); | ||
| 2274 | // For circular mode, buffer size should ideally be power of 2 | ||
| 2275 | // but we don't enforce it | ||
| 2276 | |||
| 2277 | let size = W::size(); | ||
| 2278 | let byte_size = size.bytes(); | ||
| 2279 | |||
| 2280 | let t = self.tcd(); | ||
| 2281 | |||
| 2282 | // Reset channel state | ||
| 2283 | Self::reset_channel_state(t); | ||
| 2284 | |||
| 2285 | // Source: peripheral register, fixed | ||
| 2286 | Self::set_source_ptr(t, peri_addr); | ||
| 2287 | Self::set_source_fixed(t); | ||
| 2288 | |||
| 2289 | // Destination: memory buffer, incrementing | ||
| 2290 | Self::set_dest_ptr(t, buf.as_mut_ptr()); | ||
| 2291 | Self::set_dest_increment(t, size); | ||
| 2292 | |||
| 2293 | // Transfer attributes | ||
| 2294 | Self::set_even_transfer_size(t, size); | ||
| 2295 | |||
| 2296 | // Minor loop: transfer one word per request | ||
| 2297 | Self::set_minor_loop_ct_no_offsets(t, byte_size as u32); | ||
| 2298 | |||
| 2299 | // Major loop count = buffer size | ||
| 2300 | let count = buf.len() as u16; | ||
| 2301 | Self::set_major_loop_ct_elinkno(t, count); | ||
| 2302 | |||
| 2303 | // After major loop: reset destination to buffer start (circular) | ||
| 2304 | let buf_bytes = (buf.len() * byte_size) as i32; | ||
| 2305 | t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); // Source doesn't change | ||
| 2306 | t.tcd_dlast_sga().write(|w| w.dlast_sga().bits((-buf_bytes) as u32)); | ||
| 2307 | |||
| 2308 | // Control/status: enable both half and complete interrupts, NO DREQ (continuous) | ||
| 2309 | t.tcd_csr().write(|w| { | ||
| 2310 | w.intmajor() | ||
| 2311 | .enable() | ||
| 2312 | .inthalf() | ||
| 2313 | .enable() | ||
| 2314 | .dreq() | ||
| 2315 | .channel_not_affected() // Don't clear ERQ on complete (circular) | ||
| 2316 | .esg() | ||
| 2317 | .normal_format() | ||
| 2318 | .majorelink() | ||
| 2319 | .disable() | ||
| 2320 | .eeop() | ||
| 2321 | .disable() | ||
| 2322 | .esda() | ||
| 2323 | .disable() | ||
| 2324 | .bwc() | ||
| 2325 | .no_stall() | ||
| 2326 | }); | ||
| 2327 | |||
| 2328 | cortex_m::asm::dsb(); | ||
| 2329 | |||
| 2330 | // Enable the channel request | ||
| 2331 | t.ch_csr().modify(|_, w| w.erq().enable()); | ||
| 2332 | |||
| 2333 | // Enable NVIC interrupt for this channel so async wakeups work | ||
| 2334 | self.enable_interrupt(); | ||
| 2335 | |||
| 2336 | RingBuffer::new(self.as_any(), buf) | ||
| 2337 | } | ||
| 2338 | } | ||
| 2339 | |||
| 2340 | // ============================================================================ | ||
| 2341 | // Scatter-Gather Builder | ||
| 2342 | // ============================================================================ | ||
| 2343 | |||
| 2344 | /// Maximum number of TCDs in a scatter-gather chain. | ||
| 2345 | pub const MAX_SCATTER_GATHER_TCDS: usize = 16; | ||
| 2346 | |||
| 2347 | /// A builder for constructing scatter-gather DMA transfer chains. | ||
| 2348 | /// | ||
| 2349 | /// This provides a type-safe way to build TCD chains for scatter-gather | ||
| 2350 | /// transfers without manual TCD manipulation. | ||
| 2351 | /// | ||
| 2352 | /// # Example | ||
| 2353 | /// | ||
| 2354 | /// ```no_run | ||
| 2355 | /// use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder}; | ||
| 2356 | /// | ||
| 2357 | /// let mut builder = ScatterGatherBuilder::<u32>::new(); | ||
| 2358 | /// | ||
| 2359 | /// // Add transfer segments | ||
| 2360 | /// builder.add_transfer(&src1, &mut dst1); | ||
| 2361 | /// builder.add_transfer(&src2, &mut dst2); | ||
| 2362 | /// builder.add_transfer(&src3, &mut dst3); | ||
| 2363 | /// | ||
| 2364 | /// // Build and execute | ||
| 2365 | /// let transfer = unsafe { builder.build(&dma_ch).unwrap() }; | ||
| 2366 | /// transfer.await; | ||
| 2367 | /// ``` | ||
| 2368 | pub struct ScatterGatherBuilder<'a, W: Word> { | ||
| 2369 | /// TCD pool (must be 32-byte aligned) | ||
| 2370 | tcds: [Tcd; MAX_SCATTER_GATHER_TCDS], | ||
| 2371 | /// Number of TCDs configured | ||
| 2372 | count: usize, | ||
| 2373 | /// Phantom marker for word type | ||
| 2374 | _phantom: core::marker::PhantomData<W>, | ||
| 2375 | |||
| 2376 | _plt: core::marker::PhantomData<&'a mut W>, | ||
| 2377 | } | ||
| 2378 | |||
| 2379 | impl<'a, W: Word> ScatterGatherBuilder<'a, W> { | ||
| 2380 | /// Create a new scatter-gather builder. | ||
| 2381 | pub fn new() -> Self { | ||
| 2382 | ScatterGatherBuilder { | ||
| 2383 | tcds: [Tcd::default(); MAX_SCATTER_GATHER_TCDS], | ||
| 2384 | count: 0, | ||
| 2385 | _phantom: core::marker::PhantomData, | ||
| 2386 | _plt: core::marker::PhantomData, | ||
| 2387 | } | ||
| 2388 | } | ||
| 2389 | |||
| 2390 | /// Add a memory-to-memory transfer segment to the chain. | ||
| 2391 | /// | ||
| 2392 | /// # Arguments | ||
| 2393 | /// | ||
| 2394 | /// * `src` - Source buffer for this segment | ||
| 2395 | /// * `dst` - Destination buffer for this segment | ||
| 2396 | /// | ||
| 2397 | /// # Panics | ||
| 2398 | /// | ||
| 2399 | /// Panics if the maximum number of segments (16) is exceeded. | ||
| 2400 | pub fn add_transfer<'b: 'a>(&mut self, src: &'b [W], dst: &'b mut [W]) -> &mut Self { | ||
| 2401 | assert!(self.count < MAX_SCATTER_GATHER_TCDS, "Too many scatter-gather segments"); | ||
| 2402 | assert!(!src.is_empty()); | ||
| 2403 | assert!(dst.len() >= src.len()); | ||
| 2404 | |||
| 2405 | let size = W::size(); | ||
| 2406 | let byte_size = size.bytes(); | ||
| 2407 | let hw_size = size.to_hw_size(); | ||
| 2408 | let nbytes = (src.len() * byte_size) as u32; | ||
| 2409 | |||
| 2410 | // Build the TCD for this segment | ||
| 2411 | self.tcds[self.count] = Tcd { | ||
| 2412 | saddr: src.as_ptr() as u32, | ||
| 2413 | soff: byte_size as i16, | ||
| 2414 | attr: ((hw_size as u16) << 8) | (hw_size as u16), // SSIZE | DSIZE | ||
| 2415 | nbytes, | ||
| 2416 | slast: 0, | ||
| 2417 | daddr: dst.as_mut_ptr() as u32, | ||
| 2418 | doff: byte_size as i16, | ||
| 2419 | citer: 1, | ||
| 2420 | dlast_sga: 0, // Will be filled in by build() | ||
| 2421 | csr: 0x0002, // INTMAJOR only (ESG will be set for non-last TCDs) | ||
| 2422 | biter: 1, | ||
| 2423 | }; | ||
| 2424 | |||
| 2425 | self.count += 1; | ||
| 2426 | self | ||
| 2427 | } | ||
| 2428 | |||
| 2429 | /// Get the number of transfer segments added. | ||
| 2430 | pub fn segment_count(&self) -> usize { | ||
| 2431 | self.count | ||
| 2432 | } | ||
| 2433 | |||
| 2434 | /// Build the scatter-gather chain and start the transfer. | ||
| 2435 | /// | ||
| 2436 | /// # Arguments | ||
| 2437 | /// | ||
| 2438 | /// * `channel` - The DMA channel to use for the transfer | ||
| 2439 | /// | ||
| 2440 | /// # Returns | ||
| 2441 | /// | ||
| 2442 | /// A `Transfer` future that completes when the entire chain has executed. | ||
| 2443 | pub fn build<C: Channel>(&mut self, channel: &DmaChannel<C>) -> Result<Transfer<'a>, Error> { | ||
| 2444 | if self.count == 0 { | ||
| 2445 | return Err(Error::Configuration); | ||
| 2446 | } | ||
| 2447 | |||
| 2448 | // Link TCDs together | ||
| 2449 | // | ||
| 2450 | // CSR bit definitions: | ||
| 2451 | // - START = bit 0 = 0x0001 (triggers transfer when set) | ||
| 2452 | // - INTMAJOR = bit 1 = 0x0002 (interrupt on major loop complete) | ||
| 2453 | // - ESG = bit 4 = 0x0010 (enable scatter-gather, loads next TCD on complete) | ||
| 2454 | // | ||
| 2455 | // When hardware loads a TCD via scatter-gather (ESG), it copies the TCD's | ||
| 2456 | // CSR directly into the hardware register. If START is not set in that CSR, | ||
| 2457 | // the hardware will NOT auto-execute the loaded TCD. | ||
| 2458 | // | ||
| 2459 | // Strategy: | ||
| 2460 | // - First TCD: ESG | INTMAJOR (no START - we add it manually after loading) | ||
| 2461 | // - Middle TCDs: ESG | INTMAJOR | START (auto-execute when loaded via S/G) | ||
| 2462 | // - Last TCD: INTMAJOR | START (auto-execute, no further linking) | ||
| 2463 | for i in 0..self.count { | ||
| 2464 | let is_first = i == 0; | ||
| 2465 | let is_last = i == self.count - 1; | ||
| 2466 | |||
| 2467 | if is_first { | ||
| 2468 | if is_last { | ||
| 2469 | // Only one TCD - no ESG, no START (we add START manually) | ||
| 2470 | self.tcds[i].dlast_sga = 0; | ||
| 2471 | self.tcds[i].csr = 0x0002; // INTMAJOR only | ||
| 2472 | } else { | ||
| 2473 | // First of multiple - ESG to link, no START (we add START manually) | ||
| 2474 | self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32; | ||
| 2475 | self.tcds[i].csr = 0x0012; // ESG | INTMAJOR | ||
| 2476 | } | ||
| 2477 | } else if is_last { | ||
| 2478 | // Last TCD (not first) - no ESG, but START so it auto-executes | ||
| 2479 | self.tcds[i].dlast_sga = 0; | ||
| 2480 | self.tcds[i].csr = 0x0003; // INTMAJOR | START | ||
| 2481 | } else { | ||
| 2482 | // Middle TCD - ESG to link, and START so it auto-executes | ||
| 2483 | self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32; | ||
| 2484 | self.tcds[i].csr = 0x0013; // ESG | INTMAJOR | START | ||
| 2485 | } | ||
| 2486 | } | ||
| 2487 | |||
| 2488 | let t = channel.tcd(); | ||
| 2489 | |||
| 2490 | // Reset channel state - clear DONE, disable requests, clear errors | ||
| 2491 | // This ensures the channel is in a clean state before loading the TCD | ||
| 2492 | DmaChannel::<C>::reset_channel_state(t); | ||
| 2493 | |||
| 2494 | // Memory barrier to ensure channel state is reset before loading TCD | ||
| 2495 | cortex_m::asm::dsb(); | ||
| 2496 | |||
| 2497 | // Load first TCD into hardware | ||
| 2498 | unsafe { | ||
| 2499 | channel.load_tcd(&self.tcds[0]); | ||
| 2500 | } | ||
| 2501 | |||
| 2502 | // Memory barrier before setting START | ||
| 2503 | cortex_m::asm::dsb(); | ||
| 2504 | |||
| 2505 | // Start the transfer | ||
| 2506 | t.tcd_csr().modify(|_, w| w.start().channel_started()); | ||
| 2507 | |||
| 2508 | Ok(Transfer::new(channel.as_any())) | ||
| 2509 | } | ||
| 2510 | |||
| 2511 | /// Reset the builder for reuse. | ||
| 2512 | pub fn clear(&mut self) { | ||
| 2513 | self.count = 0; | ||
| 2514 | } | ||
| 2515 | } | ||
| 2516 | |||
| 2517 | impl<W: Word> Default for ScatterGatherBuilder<'_, W> { | ||
| 2518 | fn default() -> Self { | ||
| 2519 | Self::new() | ||
| 2520 | } | ||
| 2521 | } | ||
| 2522 | |||
| 2523 | /// A completed scatter-gather transfer result. | ||
| 2524 | /// | ||
| 2525 | /// This type is returned after a scatter-gather transfer completes, | ||
| 2526 | /// providing access to any error information. | ||
| 2527 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] | ||
| 2528 | pub struct ScatterGatherResult { | ||
| 2529 | /// Number of segments successfully transferred | ||
| 2530 | pub segments_completed: usize, | ||
| 2531 | /// Error if any occurred | ||
| 2532 | pub error: Option<Error>, | ||
| 2533 | } | ||
| 2534 | |||
| 2535 | // ============================================================================ | ||
| 2536 | // Interrupt Handler | ||
| 2537 | // ============================================================================ | ||
| 2538 | |||
| 2539 | /// Interrupt handler helper. | ||
| 2540 | /// | ||
| 2541 | /// Call this from your interrupt handler to clear the interrupt flag and wake the waker. | ||
| 2542 | /// This handles both half-transfer and complete-transfer interrupts. | ||
| 2543 | /// | ||
| 2544 | /// # Safety | ||
| 2545 | /// Must be called from the correct DMA channel interrupt context. | ||
| 2546 | pub unsafe fn on_interrupt(ch_index: usize) { | ||
| 2547 | let p = pac::Peripherals::steal(); | ||
| 2548 | let edma = &p.edma_0_tcd0; | ||
| 2549 | let t = edma.tcd(ch_index); | ||
| 2550 | |||
| 2551 | // Read TCD CSR to determine interrupt source | ||
| 2552 | let csr = t.tcd_csr().read(); | ||
| 2553 | |||
| 2554 | // Check if this is a half-transfer interrupt | ||
| 2555 | // INTHALF is set and we're at or past the half-way point | ||
| 2556 | if csr.inthalf().bit_is_set() { | ||
| 2557 | let biter = t.tcd_biter_elinkno().read().biter().bits(); | ||
| 2558 | let citer = t.tcd_citer_elinkno().read().citer().bits(); | ||
| 2559 | let half_point = biter / 2; | ||
| 2560 | |||
| 2561 | if citer <= half_point && citer > 0 { | ||
| 2562 | // Half-transfer interrupt - wake half_waker | ||
| 2563 | half_waker(ch_index).wake(); | ||
| 2564 | } | ||
| 2565 | } | ||
| 2566 | |||
| 2567 | // Clear INT flag | ||
| 2568 | t.ch_int().write(|w| w.int().clear_bit_by_one()); | ||
| 2569 | |||
| 2570 | // If DONE is set, this is a complete-transfer interrupt | ||
| 2571 | // Only wake the full-transfer waker when the transfer is actually complete | ||
| 2572 | if t.ch_csr().read().done().bit_is_set() { | ||
| 2573 | waker(ch_index).wake(); | ||
| 2574 | } | ||
| 2575 | } | ||
| 2576 | |||
| 2577 | // ============================================================================ | ||
| 2578 | // Type-level Interrupt Handlers | ||
| 2579 | // ============================================================================ | ||
| 2580 | |||
| 2581 | /// Macro to generate DMA channel interrupt handlers. | ||
| 2582 | macro_rules! impl_dma_interrupt_handler { | ||
| 2583 | ($irq:ident, $ch:expr) => { | ||
| 2584 | #[interrupt] | ||
| 2585 | fn $irq() { | ||
| 2586 | unsafe { | ||
| 2587 | on_interrupt($ch); | ||
| 2588 | } | ||
| 2589 | } | ||
| 2590 | }; | ||
| 2591 | } | ||
| 2592 | |||
| 2593 | use crate::pac::interrupt; | ||
| 2594 | |||
| 2595 | impl_dma_interrupt_handler!(DMA_CH0, 0); | ||
| 2596 | impl_dma_interrupt_handler!(DMA_CH1, 1); | ||
| 2597 | impl_dma_interrupt_handler!(DMA_CH2, 2); | ||
| 2598 | impl_dma_interrupt_handler!(DMA_CH3, 3); | ||
| 2599 | impl_dma_interrupt_handler!(DMA_CH4, 4); | ||
| 2600 | impl_dma_interrupt_handler!(DMA_CH5, 5); | ||
| 2601 | impl_dma_interrupt_handler!(DMA_CH6, 6); | ||
| 2602 | impl_dma_interrupt_handler!(DMA_CH7, 7); | ||
diff --git a/embassy-mcxa/src/interrupt.rs b/embassy-mcxa/src/interrupt.rs index c1f7e55a0..c960af7a2 100644 --- a/embassy-mcxa/src/interrupt.rs +++ b/embassy-mcxa/src/interrupt.rs | |||
| @@ -10,6 +10,14 @@ mod generated { | |||
| 10 | #[rustfmt::skip] | 10 | #[rustfmt::skip] |
| 11 | embassy_hal_internal::interrupt_mod!( | 11 | embassy_hal_internal::interrupt_mod!( |
| 12 | ADC1, | 12 | ADC1, |
| 13 | DMA_CH0, | ||
| 14 | DMA_CH1, | ||
| 15 | DMA_CH2, | ||
| 16 | DMA_CH3, | ||
| 17 | DMA_CH4, | ||
| 18 | DMA_CH5, | ||
| 19 | DMA_CH6, | ||
| 20 | DMA_CH7, | ||
| 13 | GPIO0, | 21 | GPIO0, |
| 14 | GPIO1, | 22 | GPIO1, |
| 15 | GPIO2, | 23 | GPIO2, |
diff --git a/embassy-mcxa/src/lib.rs b/embassy-mcxa/src/lib.rs index 64eeb4012..1bbdffa06 100644 --- a/embassy-mcxa/src/lib.rs +++ b/embassy-mcxa/src/lib.rs | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | // #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)] | 6 | // #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)] |
| 7 | 7 | ||
| 8 | pub mod clocks; // still provide clock helpers | 8 | pub mod clocks; // still provide clock helpers |
| 9 | pub mod dma; | ||
| 9 | pub mod gpio; | 10 | pub mod gpio; |
| 10 | pub mod pins; // pin mux helpers | 11 | pub mod pins; // pin mux helpers |
| 11 | 12 | ||
| @@ -52,6 +53,14 @@ embassy_hal_internal::peripherals!( | |||
| 52 | 53 | ||
| 53 | DBGMAILBOX, | 54 | DBGMAILBOX, |
| 54 | DMA0, | 55 | DMA0, |
| 56 | DMA_CH0, | ||
| 57 | DMA_CH1, | ||
| 58 | DMA_CH2, | ||
| 59 | DMA_CH3, | ||
| 60 | DMA_CH4, | ||
| 61 | DMA_CH5, | ||
| 62 | DMA_CH6, | ||
| 63 | DMA_CH7, | ||
| 55 | EDMA0_TCD0, | 64 | EDMA0_TCD0, |
| 56 | EIM0, | 65 | EIM0, |
| 57 | EQDC0, | 66 | EQDC0, |
| @@ -364,6 +373,9 @@ pub fn init(cfg: crate::config::Config) -> Peripherals { | |||
| 364 | crate::gpio::init(); | 373 | crate::gpio::init(); |
| 365 | } | 374 | } |
| 366 | 375 | ||
| 376 | // Initialize DMA controller (clock, reset, configuration) | ||
| 377 | crate::dma::init(); | ||
| 378 | |||
| 367 | // Initialize embassy-time global driver backed by OSTIMER0 | 379 | // Initialize embassy-time global driver backed by OSTIMER0 |
| 368 | #[cfg(feature = "time")] | 380 | #[cfg(feature = "time")] |
| 369 | crate::ostimer::time_driver::init(crate::config::Config::default().time_interrupt_priority, 1_000_000); | 381 | crate::ostimer::time_driver::init(crate::config::Config::default().time_interrupt_priority, 1_000_000); |
| @@ -389,41 +401,6 @@ pub fn init(cfg: crate::config::Config) -> Peripherals { | |||
| 389 | peripherals | 401 | peripherals |
| 390 | } | 402 | } |
| 391 | 403 | ||
| 392 | // /// Optional hook called by cortex-m-rt before RAM init. | ||
| 393 | // /// We proactively mask and clear all NVIC IRQs to avoid wedges from stale state | ||
| 394 | // /// left by soft resets/debug sessions. | ||
| 395 | // /// | ||
| 396 | // /// NOTE: Manual VTOR setup is required for RAM execution. The cortex-m-rt 'set-vtor' | ||
| 397 | // /// feature is incompatible with our setup because it expects __vector_table to be | ||
| 398 | // /// defined differently than how our RAM-based linker script arranges it. | ||
| 399 | // #[no_mangle] | ||
| 400 | // pub unsafe extern "C" fn __pre_init() { | ||
| 401 | // // Set the VTOR to point to the interrupt vector table in RAM | ||
| 402 | // // This is required since code runs from RAM on this MCU | ||
| 403 | // crate::interrupt::vtor_set_ram_vector_base(0x2000_0000 as *const u32); | ||
| 404 | |||
| 405 | // // Mask and clear pending for all NVIC lines (0..127) to avoid stale state across runs. | ||
| 406 | // let nvic = &*cortex_m::peripheral::NVIC::PTR; | ||
| 407 | // for i in 0..4 { | ||
| 408 | // // 4 words x 32 = 128 IRQs | ||
| 409 | // nvic.icer[i].write(0xFFFF_FFFF); | ||
| 410 | // nvic.icpr[i].write(0xFFFF_FFFF); | ||
| 411 | // } | ||
| 412 | // // Do NOT touch peripheral registers here: clocks may be off and accesses can fault. | ||
| 413 | // crate::interrupt::clear_default_handler_snapshot(); | ||
| 414 | // } | ||
| 415 | |||
| 416 | /// Internal helper to dispatch a type-level interrupt handler. | ||
| 417 | #[inline(always)] | ||
| 418 | #[doc(hidden)] | ||
| 419 | pub unsafe fn __handle_interrupt<T, H>() | ||
| 420 | where | ||
| 421 | T: crate::interrupt::typelevel::Interrupt, | ||
| 422 | H: crate::interrupt::typelevel::Handler<T>, | ||
| 423 | { | ||
| 424 | H::on_interrupt(); | ||
| 425 | } | ||
| 426 | |||
| 427 | /// Macro to bind interrupts to handlers, similar to embassy-imxrt. | 404 | /// Macro to bind interrupts to handlers, similar to embassy-imxrt. |
| 428 | /// | 405 | /// |
| 429 | /// Example: | 406 | /// Example: |
diff --git a/embassy-mcxa/src/lpuart/mod.rs b/embassy-mcxa/src/lpuart/mod.rs index b8a2d5172..e59ce8140 100644 --- a/embassy-mcxa/src/lpuart/mod.rs +++ b/embassy-mcxa/src/lpuart/mod.rs | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | use core::future::Future; | ||
| 1 | use core::marker::PhantomData; | 2 | use core::marker::PhantomData; |
| 2 | 3 | ||
| 3 | use embassy_hal_internal::{Peri, PeripheralType}; | 4 | use embassy_hal_internal::{Peri, PeripheralType}; |
| @@ -15,22 +16,12 @@ use crate::{AnyPin, interrupt, pac}; | |||
| 15 | pub mod buffered; | 16 | pub mod buffered; |
| 16 | 17 | ||
| 17 | // ============================================================================ | 18 | // ============================================================================ |
| 18 | // STUB IMPLEMENTATION | 19 | // DMA INTEGRATION |
| 19 | // ============================================================================ | 20 | // ============================================================================ |
| 20 | 21 | ||
| 21 | // Stub implementation for LIB (Peripherals), GPIO, DMA and CLOCK until stable API | 22 | use crate::dma::{ |
| 22 | // Pin and Clock initialization is currently done at the examples level. | 23 | Channel as DmaChannelTrait, DMA_MAX_TRANSFER_SIZE, DmaChannel, DmaRequest, EnableInterrupt, RingBuffer, |
| 23 | 24 | }; | |
| 24 | // --- START DMA --- | ||
| 25 | mod dma { | ||
| 26 | pub struct Channel<'d> { | ||
| 27 | pub(super) _lifetime: core::marker::PhantomData<&'d ()>, | ||
| 28 | } | ||
| 29 | } | ||
| 30 | |||
| 31 | use dma::Channel; | ||
| 32 | |||
| 33 | // --- END DMA --- | ||
| 34 | 25 | ||
| 35 | // ============================================================================ | 26 | // ============================================================================ |
| 36 | // MISC | 27 | // MISC |
| @@ -62,10 +53,14 @@ pub struct Info { | |||
| 62 | pub trait Instance: SealedInstance + PeripheralType + 'static + Send + Gate<MrccPeriphConfig = LpuartConfig> { | 53 | pub trait Instance: SealedInstance + PeripheralType + 'static + Send + Gate<MrccPeriphConfig = LpuartConfig> { |
| 63 | const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance; | 54 | const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance; |
| 64 | type Interrupt: interrupt::typelevel::Interrupt; | 55 | type Interrupt: interrupt::typelevel::Interrupt; |
| 56 | /// Type-safe DMA request source for TX | ||
| 57 | type TxDmaRequest: DmaRequest; | ||
| 58 | /// Type-safe DMA request source for RX | ||
| 59 | type RxDmaRequest: DmaRequest; | ||
| 65 | } | 60 | } |
| 66 | 61 | ||
| 67 | macro_rules! impl_instance { | 62 | macro_rules! impl_instance { |
| 68 | ($($n:expr),*) => { | 63 | ($($n:expr);* $(;)?) => { |
| 69 | $( | 64 | $( |
| 70 | paste!{ | 65 | paste!{ |
| 71 | impl SealedInstance for crate::peripherals::[<LPUART $n>] { | 66 | impl SealedInstance for crate::peripherals::[<LPUART $n>] { |
| @@ -90,13 +85,23 @@ macro_rules! impl_instance { | |||
| 90 | const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance | 85 | const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance |
| 91 | = crate::clocks::periph_helpers::LpuartInstance::[<Lpuart $n>]; | 86 | = crate::clocks::periph_helpers::LpuartInstance::[<Lpuart $n>]; |
| 92 | type Interrupt = crate::interrupt::typelevel::[<LPUART $n>]; | 87 | type Interrupt = crate::interrupt::typelevel::[<LPUART $n>]; |
| 88 | type TxDmaRequest = crate::dma::[<Lpuart $n TxRequest>]; | ||
| 89 | type RxDmaRequest = crate::dma::[<Lpuart $n RxRequest>]; | ||
| 93 | } | 90 | } |
| 94 | } | 91 | } |
| 95 | )* | 92 | )* |
| 96 | }; | 93 | }; |
| 97 | } | 94 | } |
| 98 | 95 | ||
| 99 | impl_instance!(0, 1, 2, 3, 4, 5); | 96 | // DMA request sources are now type-safe via associated types. |
| 97 | // The request source numbers are defined in src/dma.rs: | ||
| 98 | // LPUART0: RX=21, TX=22 -> Lpuart0RxRequest, Lpuart0TxRequest | ||
| 99 | // LPUART1: RX=23, TX=24 -> Lpuart1RxRequest, Lpuart1TxRequest | ||
| 100 | // LPUART2: RX=25, TX=26 -> Lpuart2RxRequest, Lpuart2TxRequest | ||
| 101 | // LPUART3: RX=27, TX=28 -> Lpuart3RxRequest, Lpuart3TxRequest | ||
| 102 | // LPUART4: RX=29, TX=30 -> Lpuart4RxRequest, Lpuart4TxRequest | ||
| 103 | // LPUART5: RX=31, TX=32 -> Lpuart5RxRequest, Lpuart5TxRequest | ||
| 104 | impl_instance!(0; 1; 2; 3; 4; 5); | ||
| 100 | 105 | ||
| 101 | // ============================================================================ | 106 | // ============================================================================ |
| 102 | // INSTANCE HELPER FUNCTIONS | 107 | // INSTANCE HELPER FUNCTIONS |
| @@ -683,7 +688,6 @@ pub struct LpuartTx<'a, M: Mode> { | |||
| 683 | info: Info, | 688 | info: Info, |
| 684 | _tx_pin: Peri<'a, AnyPin>, | 689 | _tx_pin: Peri<'a, AnyPin>, |
| 685 | _cts_pin: Option<Peri<'a, AnyPin>>, | 690 | _cts_pin: Option<Peri<'a, AnyPin>>, |
| 686 | _tx_dma: Option<Channel<'a>>, | ||
| 687 | mode: PhantomData<(&'a (), M)>, | 691 | mode: PhantomData<(&'a (), M)>, |
| 688 | } | 692 | } |
| 689 | 693 | ||
| @@ -692,10 +696,37 @@ pub struct LpuartRx<'a, M: Mode> { | |||
| 692 | info: Info, | 696 | info: Info, |
| 693 | _rx_pin: Peri<'a, AnyPin>, | 697 | _rx_pin: Peri<'a, AnyPin>, |
| 694 | _rts_pin: Option<Peri<'a, AnyPin>>, | 698 | _rts_pin: Option<Peri<'a, AnyPin>>, |
| 695 | _rx_dma: Option<Channel<'a>>, | ||
| 696 | mode: PhantomData<(&'a (), M)>, | 699 | mode: PhantomData<(&'a (), M)>, |
| 697 | } | 700 | } |
| 698 | 701 | ||
| 702 | /// Lpuart TX driver with DMA support. | ||
| 703 | pub struct LpuartTxDma<'a, T: Instance, C: DmaChannelTrait> { | ||
| 704 | info: Info, | ||
| 705 | _tx_pin: Peri<'a, AnyPin>, | ||
| 706 | tx_dma: DmaChannel<C>, | ||
| 707 | _instance: core::marker::PhantomData<T>, | ||
| 708 | } | ||
| 709 | |||
| 710 | /// Lpuart RX driver with DMA support. | ||
| 711 | pub struct LpuartRxDma<'a, T: Instance, C: DmaChannelTrait> { | ||
| 712 | info: Info, | ||
| 713 | _rx_pin: Peri<'a, AnyPin>, | ||
| 714 | rx_dma: DmaChannel<C>, | ||
| 715 | _instance: core::marker::PhantomData<T>, | ||
| 716 | } | ||
| 717 | |||
| 718 | /// Lpuart driver with DMA support for both TX and RX. | ||
| 719 | pub struct LpuartDma<'a, T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> { | ||
| 720 | tx: LpuartTxDma<'a, T, TxC>, | ||
| 721 | rx: LpuartRxDma<'a, T, RxC>, | ||
| 722 | } | ||
| 723 | |||
| 724 | /// Lpuart RX driver with ring-buffered DMA support. | ||
| 725 | pub struct LpuartRxRingDma<'peri, 'ring, T: Instance, C: DmaChannelTrait> { | ||
| 726 | _inner: LpuartRxDma<'peri, T, C>, | ||
| 727 | ring: RingBuffer<'ring, u8>, | ||
| 728 | } | ||
| 729 | |||
| 699 | // ============================================================================ | 730 | // ============================================================================ |
| 700 | // LPUART CORE IMPLEMENTATION | 731 | // LPUART CORE IMPLEMENTATION |
| 701 | // ============================================================================ | 732 | // ============================================================================ |
| @@ -782,8 +813,8 @@ impl<'a> Lpuart<'a, Blocking> { | |||
| 782 | 813 | ||
| 783 | Ok(Self { | 814 | Ok(Self { |
| 784 | info: T::info(), | 815 | info: T::info(), |
| 785 | tx: LpuartTx::new_inner(T::info(), tx_pin.into(), None, None), | 816 | tx: LpuartTx::new_inner(T::info(), tx_pin.into(), None), |
| 786 | rx: LpuartRx::new_inner(T::info(), rx_pin.into(), None, None), | 817 | rx: LpuartRx::new_inner(T::info(), rx_pin.into(), None), |
| 787 | }) | 818 | }) |
| 788 | } | 819 | } |
| 789 | 820 | ||
| @@ -807,8 +838,8 @@ impl<'a> Lpuart<'a, Blocking> { | |||
| 807 | 838 | ||
| 808 | Ok(Self { | 839 | Ok(Self { |
| 809 | info: T::info(), | 840 | info: T::info(), |
| 810 | rx: LpuartRx::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into()), None), | 841 | rx: LpuartRx::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into())), |
| 811 | tx: LpuartTx::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into()), None), | 842 | tx: LpuartTx::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into())), |
| 812 | }) | 843 | }) |
| 813 | } | 844 | } |
| 814 | } | 845 | } |
| @@ -818,17 +849,11 @@ impl<'a> Lpuart<'a, Blocking> { | |||
| 818 | // ---------------------------------------------------------------------------- | 849 | // ---------------------------------------------------------------------------- |
| 819 | 850 | ||
| 820 | impl<'a, M: Mode> LpuartTx<'a, M> { | 851 | impl<'a, M: Mode> LpuartTx<'a, M> { |
| 821 | fn new_inner( | 852 | fn new_inner(info: Info, tx_pin: Peri<'a, AnyPin>, cts_pin: Option<Peri<'a, AnyPin>>) -> Self { |
| 822 | info: Info, | ||
| 823 | tx_pin: Peri<'a, AnyPin>, | ||
| 824 | cts_pin: Option<Peri<'a, AnyPin>>, | ||
| 825 | tx_dma: Option<Channel<'a>>, | ||
| 826 | ) -> Self { | ||
| 827 | Self { | 853 | Self { |
| 828 | info, | 854 | info, |
| 829 | _tx_pin: tx_pin, | 855 | _tx_pin: tx_pin, |
| 830 | _cts_pin: cts_pin, | 856 | _cts_pin: cts_pin, |
| 831 | _tx_dma: tx_dma, | ||
| 832 | mode: PhantomData, | 857 | mode: PhantomData, |
| 833 | } | 858 | } |
| 834 | } | 859 | } |
| @@ -847,7 +872,7 @@ impl<'a> LpuartTx<'a, Blocking> { | |||
| 847 | // Initialize the peripheral | 872 | // Initialize the peripheral |
| 848 | Lpuart::<Blocking>::init::<T>(true, false, false, false, config)?; | 873 | Lpuart::<Blocking>::init::<T>(true, false, false, false, config)?; |
| 849 | 874 | ||
| 850 | Ok(Self::new_inner(T::info(), tx_pin.into(), None, None)) | 875 | Ok(Self::new_inner(T::info(), tx_pin.into(), None)) |
| 851 | } | 876 | } |
| 852 | 877 | ||
| 853 | /// Create a new blocking LPUART transmitter instance with CTS flow control | 878 | /// Create a new blocking LPUART transmitter instance with CTS flow control |
| @@ -862,7 +887,7 @@ impl<'a> LpuartTx<'a, Blocking> { | |||
| 862 | 887 | ||
| 863 | Lpuart::<Blocking>::init::<T>(true, false, true, false, config)?; | 888 | Lpuart::<Blocking>::init::<T>(true, false, true, false, config)?; |
| 864 | 889 | ||
| 865 | Ok(Self::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into()), None)) | 890 | Ok(Self::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into()))) |
| 866 | } | 891 | } |
| 867 | 892 | ||
| 868 | fn write_byte_internal(&mut self, byte: u8) -> Result<()> { | 893 | fn write_byte_internal(&mut self, byte: u8) -> Result<()> { |
| @@ -941,17 +966,11 @@ impl<'a> LpuartTx<'a, Blocking> { | |||
| 941 | // ---------------------------------------------------------------------------- | 966 | // ---------------------------------------------------------------------------- |
| 942 | 967 | ||
| 943 | impl<'a, M: Mode> LpuartRx<'a, M> { | 968 | impl<'a, M: Mode> LpuartRx<'a, M> { |
| 944 | fn new_inner( | 969 | fn new_inner(info: Info, rx_pin: Peri<'a, AnyPin>, rts_pin: Option<Peri<'a, AnyPin>>) -> Self { |
| 945 | info: Info, | ||
| 946 | rx_pin: Peri<'a, AnyPin>, | ||
| 947 | rts_pin: Option<Peri<'a, AnyPin>>, | ||
| 948 | rx_dma: Option<Channel<'a>>, | ||
| 949 | ) -> Self { | ||
| 950 | Self { | 970 | Self { |
| 951 | info, | 971 | info, |
| 952 | _rx_pin: rx_pin, | 972 | _rx_pin: rx_pin, |
| 953 | _rts_pin: rts_pin, | 973 | _rts_pin: rts_pin, |
| 954 | _rx_dma: rx_dma, | ||
| 955 | mode: PhantomData, | 974 | mode: PhantomData, |
| 956 | } | 975 | } |
| 957 | } | 976 | } |
| @@ -968,7 +987,7 @@ impl<'a> LpuartRx<'a, Blocking> { | |||
| 968 | 987 | ||
| 969 | Lpuart::<Blocking>::init::<T>(false, true, false, false, config)?; | 988 | Lpuart::<Blocking>::init::<T>(false, true, false, false, config)?; |
| 970 | 989 | ||
| 971 | Ok(Self::new_inner(T::info(), rx_pin.into(), None, None)) | 990 | Ok(Self::new_inner(T::info(), rx_pin.into(), None)) |
| 972 | } | 991 | } |
| 973 | 992 | ||
| 974 | /// Create a new blocking LPUART Receiver instance with RTS flow control | 993 | /// Create a new blocking LPUART Receiver instance with RTS flow control |
| @@ -983,7 +1002,7 @@ impl<'a> LpuartRx<'a, Blocking> { | |||
| 983 | 1002 | ||
| 984 | Lpuart::<Blocking>::init::<T>(false, true, false, true, config)?; | 1003 | Lpuart::<Blocking>::init::<T>(false, true, false, true, config)?; |
| 985 | 1004 | ||
| 986 | Ok(Self::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into()), None)) | 1005 | Ok(Self::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into()))) |
| 987 | } | 1006 | } |
| 988 | 1007 | ||
| 989 | fn read_byte_internal(&mut self) -> Result<u8> { | 1008 | fn read_byte_internal(&mut self) -> Result<u8> { |
| @@ -1078,10 +1097,476 @@ impl<'a> Lpuart<'a, Blocking> { | |||
| 1078 | } | 1097 | } |
| 1079 | 1098 | ||
| 1080 | // ============================================================================ | 1099 | // ============================================================================ |
| 1081 | // ASYNC MODE IMPLEMENTATIONS | 1100 | // ASYNC MODE IMPLEMENTATIONS (DMA-based) |
| 1082 | // ============================================================================ | 1101 | // ============================================================================ |
| 1083 | 1102 | ||
| 1084 | // TODO: Implement async mode for LPUART | 1103 | /// Guard struct that ensures DMA is stopped if the async future is cancelled. |
| 1104 | /// | ||
| 1105 | /// This implements the RAII pattern: if the future is dropped before completion | ||
| 1106 | /// (e.g., due to a timeout), the DMA transfer is automatically aborted to prevent | ||
| 1107 | /// use-after-free when the buffer goes out of scope. | ||
| 1108 | struct TxDmaGuard<'a, C: DmaChannelTrait> { | ||
| 1109 | dma: &'a DmaChannel<C>, | ||
| 1110 | regs: Regs, | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | impl<'a, C: DmaChannelTrait> TxDmaGuard<'a, C> { | ||
| 1114 | fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self { | ||
| 1115 | Self { dma, regs } | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | /// Complete the transfer normally (don't abort on drop). | ||
| 1119 | fn complete(self) { | ||
| 1120 | // Cleanup | ||
| 1121 | self.regs.baud().modify(|_, w| w.tdmae().disabled()); | ||
| 1122 | unsafe { | ||
| 1123 | self.dma.disable_request(); | ||
| 1124 | self.dma.clear_done(); | ||
| 1125 | } | ||
| 1126 | // Don't run drop since we've cleaned up | ||
| 1127 | core::mem::forget(self); | ||
| 1128 | } | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | impl<C: DmaChannelTrait> Drop for TxDmaGuard<'_, C> { | ||
| 1132 | fn drop(&mut self) { | ||
| 1133 | // Abort the DMA transfer if still running | ||
| 1134 | unsafe { | ||
| 1135 | self.dma.disable_request(); | ||
| 1136 | self.dma.clear_done(); | ||
| 1137 | self.dma.clear_interrupt(); | ||
| 1138 | } | ||
| 1139 | // Disable UART TX DMA request | ||
| 1140 | self.regs.baud().modify(|_, w| w.tdmae().disabled()); | ||
| 1141 | } | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | /// Guard struct for RX DMA transfers. | ||
| 1145 | struct RxDmaGuard<'a, C: DmaChannelTrait> { | ||
| 1146 | dma: &'a DmaChannel<C>, | ||
| 1147 | regs: Regs, | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | impl<'a, C: DmaChannelTrait> RxDmaGuard<'a, C> { | ||
| 1151 | fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self { | ||
| 1152 | Self { dma, regs } | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | /// Complete the transfer normally (don't abort on drop). | ||
| 1156 | fn complete(self) { | ||
| 1157 | // Ensure DMA writes are visible to CPU | ||
| 1158 | cortex_m::asm::dsb(); | ||
| 1159 | // Cleanup | ||
| 1160 | self.regs.baud().modify(|_, w| w.rdmae().disabled()); | ||
| 1161 | unsafe { | ||
| 1162 | self.dma.disable_request(); | ||
| 1163 | self.dma.clear_done(); | ||
| 1164 | } | ||
| 1165 | // Don't run drop since we've cleaned up | ||
| 1166 | core::mem::forget(self); | ||
| 1167 | } | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | impl<C: DmaChannelTrait> Drop for RxDmaGuard<'_, C> { | ||
| 1171 | fn drop(&mut self) { | ||
| 1172 | // Abort the DMA transfer if still running | ||
| 1173 | unsafe { | ||
| 1174 | self.dma.disable_request(); | ||
| 1175 | self.dma.clear_done(); | ||
| 1176 | self.dma.clear_interrupt(); | ||
| 1177 | } | ||
| 1178 | // Disable UART RX DMA request | ||
| 1179 | self.regs.baud().modify(|_, w| w.rdmae().disabled()); | ||
| 1180 | } | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | impl<'a, T: Instance, C: DmaChannelTrait> LpuartTxDma<'a, T, C> { | ||
| 1184 | /// Create a new LPUART TX driver with DMA support. | ||
| 1185 | pub fn new( | ||
| 1186 | _inner: Peri<'a, T>, | ||
| 1187 | tx_pin: Peri<'a, impl TxPin<T>>, | ||
| 1188 | tx_dma_ch: Peri<'a, C>, | ||
| 1189 | config: Config, | ||
| 1190 | ) -> Result<Self> { | ||
| 1191 | tx_pin.as_tx(); | ||
| 1192 | let tx_pin: Peri<'a, AnyPin> = tx_pin.into(); | ||
| 1193 | |||
| 1194 | // Initialize LPUART with TX enabled, RX disabled, no flow control | ||
| 1195 | Lpuart::<Async>::init::<T>(true, false, false, false, config)?; | ||
| 1196 | |||
| 1197 | // Enable interrupt | ||
| 1198 | let tx_dma = DmaChannel::new(tx_dma_ch); | ||
| 1199 | tx_dma.enable_interrupt(); | ||
| 1200 | |||
| 1201 | Ok(Self { | ||
| 1202 | info: T::info(), | ||
| 1203 | _tx_pin: tx_pin, | ||
| 1204 | tx_dma, | ||
| 1205 | _instance: core::marker::PhantomData, | ||
| 1206 | }) | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | /// Write data using DMA. | ||
| 1210 | /// | ||
| 1211 | /// This configures the DMA channel for a memory-to-peripheral transfer | ||
| 1212 | /// and waits for completion asynchronously. Large buffers are automatically | ||
| 1213 | /// split into chunks that fit within the DMA transfer limit. | ||
| 1214 | /// | ||
| 1215 | /// The DMA request source is automatically derived from the LPUART instance type. | ||
| 1216 | /// | ||
| 1217 | /// # Safety | ||
| 1218 | /// | ||
| 1219 | /// If the returned future is dropped before completion (e.g., due to a timeout), | ||
| 1220 | /// the DMA transfer is automatically aborted to prevent use-after-free. | ||
| 1221 | /// | ||
| 1222 | /// # Arguments | ||
| 1223 | /// * `buf` - Data buffer to transmit | ||
| 1224 | pub async fn write_dma(&mut self, buf: &[u8]) -> Result<usize> { | ||
| 1225 | if buf.is_empty() { | ||
| 1226 | return Ok(0); | ||
| 1227 | } | ||
| 1228 | |||
| 1229 | let mut total = 0; | ||
| 1230 | for chunk in buf.chunks(DMA_MAX_TRANSFER_SIZE) { | ||
| 1231 | total += self.write_dma_inner(chunk).await?; | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | Ok(total) | ||
| 1235 | } | ||
| 1236 | |||
| 1237 | /// Internal helper to write a single chunk (max 0x7FFF bytes) using DMA. | ||
| 1238 | async fn write_dma_inner(&mut self, buf: &[u8]) -> Result<usize> { | ||
| 1239 | let len = buf.len(); | ||
| 1240 | let peri_addr = self.info.regs.data().as_ptr() as *mut u8; | ||
| 1241 | |||
| 1242 | unsafe { | ||
| 1243 | // Clean up channel state | ||
| 1244 | self.tx_dma.disable_request(); | ||
| 1245 | self.tx_dma.clear_done(); | ||
| 1246 | self.tx_dma.clear_interrupt(); | ||
| 1247 | |||
| 1248 | // Set DMA request source from instance type (type-safe) | ||
| 1249 | self.tx_dma.set_request_source::<T::TxDmaRequest>(); | ||
| 1250 | |||
| 1251 | // Configure TCD for memory-to-peripheral transfer | ||
| 1252 | self.tx_dma | ||
| 1253 | .setup_write_to_peripheral(buf, peri_addr, EnableInterrupt::Yes); | ||
| 1254 | |||
| 1255 | // Enable UART TX DMA request | ||
| 1256 | self.info.regs.baud().modify(|_, w| w.tdmae().enabled()); | ||
| 1257 | |||
| 1258 | // Enable DMA channel request | ||
| 1259 | self.tx_dma.enable_request(); | ||
| 1260 | } | ||
| 1261 | |||
| 1262 | // Create guard that will abort DMA if this future is dropped | ||
| 1263 | let guard = TxDmaGuard::new(&self.tx_dma, self.info.regs); | ||
| 1264 | |||
| 1265 | // Wait for completion asynchronously | ||
| 1266 | core::future::poll_fn(|cx| { | ||
| 1267 | self.tx_dma.waker().register(cx.waker()); | ||
| 1268 | if self.tx_dma.is_done() { | ||
| 1269 | core::task::Poll::Ready(()) | ||
| 1270 | } else { | ||
| 1271 | core::task::Poll::Pending | ||
| 1272 | } | ||
| 1273 | }) | ||
| 1274 | .await; | ||
| 1275 | |||
| 1276 | // Transfer completed successfully - clean up without aborting | ||
| 1277 | guard.complete(); | ||
| 1278 | |||
| 1279 | Ok(len) | ||
| 1280 | } | ||
| 1281 | |||
| 1282 | /// Blocking write (fallback when DMA is not needed) | ||
| 1283 | pub fn blocking_write(&mut self, buf: &[u8]) -> Result<()> { | ||
| 1284 | for &byte in buf { | ||
| 1285 | while self.info.regs.stat().read().tdre().is_txdata() {} | ||
| 1286 | self.info.regs.data().modify(|_, w| unsafe { w.bits(u32::from(byte)) }); | ||
| 1287 | } | ||
| 1288 | Ok(()) | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | /// Flush TX blocking | ||
| 1292 | pub fn blocking_flush(&mut self) -> Result<()> { | ||
| 1293 | while self.info.regs.water().read().txcount().bits() != 0 {} | ||
| 1294 | while self.info.regs.stat().read().tc().is_active() {} | ||
| 1295 | Ok(()) | ||
| 1296 | } | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | impl<'a, T: Instance, C: DmaChannelTrait> LpuartRxDma<'a, T, C> { | ||
| 1300 | /// Create a new LPUART RX driver with DMA support. | ||
| 1301 | pub fn new( | ||
| 1302 | _inner: Peri<'a, T>, | ||
| 1303 | rx_pin: Peri<'a, impl RxPin<T>>, | ||
| 1304 | rx_dma_ch: Peri<'a, C>, | ||
| 1305 | config: Config, | ||
| 1306 | ) -> Result<Self> { | ||
| 1307 | rx_pin.as_rx(); | ||
| 1308 | let rx_pin: Peri<'a, AnyPin> = rx_pin.into(); | ||
| 1309 | |||
| 1310 | // Initialize LPUART with TX disabled, RX enabled, no flow control | ||
| 1311 | Lpuart::<Async>::init::<T>(false, true, false, false, config)?; | ||
| 1312 | |||
| 1313 | // Enable dma interrupt | ||
| 1314 | let rx_dma = DmaChannel::new(rx_dma_ch); | ||
| 1315 | rx_dma.enable_interrupt(); | ||
| 1316 | |||
| 1317 | Ok(Self { | ||
| 1318 | info: T::info(), | ||
| 1319 | _rx_pin: rx_pin, | ||
| 1320 | rx_dma, | ||
| 1321 | _instance: core::marker::PhantomData, | ||
| 1322 | }) | ||
| 1323 | } | ||
| 1324 | |||
| 1325 | /// Read data using DMA. | ||
| 1326 | /// | ||
| 1327 | /// This configures the DMA channel for a peripheral-to-memory transfer | ||
| 1328 | /// and waits for completion asynchronously. Large buffers are automatically | ||
| 1329 | /// split into chunks that fit within the DMA transfer limit. | ||
| 1330 | /// | ||
| 1331 | /// The DMA request source is automatically derived from the LPUART instance type. | ||
| 1332 | /// | ||
| 1333 | /// # Safety | ||
| 1334 | /// | ||
| 1335 | /// If the returned future is dropped before completion (e.g., due to a timeout), | ||
| 1336 | /// the DMA transfer is automatically aborted to prevent use-after-free. | ||
| 1337 | /// | ||
| 1338 | /// # Arguments | ||
| 1339 | /// * `buf` - Buffer to receive data into | ||
| 1340 | pub async fn read_dma(&mut self, buf: &mut [u8]) -> Result<usize> { | ||
| 1341 | if buf.is_empty() { | ||
| 1342 | return Ok(0); | ||
| 1343 | } | ||
| 1344 | |||
| 1345 | let mut total = 0; | ||
| 1346 | for chunk in buf.chunks_mut(DMA_MAX_TRANSFER_SIZE) { | ||
| 1347 | total += self.read_dma_inner(chunk).await?; | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | Ok(total) | ||
| 1351 | } | ||
| 1352 | |||
| 1353 | /// Internal helper to read a single chunk (max 0x7FFF bytes) using DMA. | ||
| 1354 | async fn read_dma_inner(&mut self, buf: &mut [u8]) -> Result<usize> { | ||
| 1355 | let len = buf.len(); | ||
| 1356 | let peri_addr = self.info.regs.data().as_ptr() as *const u8; | ||
| 1357 | |||
| 1358 | unsafe { | ||
| 1359 | // Clean up channel state | ||
| 1360 | self.rx_dma.disable_request(); | ||
| 1361 | self.rx_dma.clear_done(); | ||
| 1362 | self.rx_dma.clear_interrupt(); | ||
| 1363 | |||
| 1364 | // Set DMA request source from instance type (type-safe) | ||
| 1365 | self.rx_dma.set_request_source::<T::RxDmaRequest>(); | ||
| 1366 | |||
| 1367 | // Configure TCD for peripheral-to-memory transfer | ||
| 1368 | self.rx_dma | ||
| 1369 | .setup_read_from_peripheral(peri_addr, buf, EnableInterrupt::Yes); | ||
| 1370 | |||
| 1371 | // Enable UART RX DMA request | ||
| 1372 | self.info.regs.baud().modify(|_, w| w.rdmae().enabled()); | ||
| 1373 | |||
| 1374 | // Enable DMA channel request | ||
| 1375 | self.rx_dma.enable_request(); | ||
| 1376 | } | ||
| 1377 | |||
| 1378 | // Create guard that will abort DMA if this future is dropped | ||
| 1379 | let guard = RxDmaGuard::new(&self.rx_dma, self.info.regs); | ||
| 1380 | |||
| 1381 | // Wait for completion asynchronously | ||
| 1382 | core::future::poll_fn(|cx| { | ||
| 1383 | self.rx_dma.waker().register(cx.waker()); | ||
| 1384 | if self.rx_dma.is_done() { | ||
| 1385 | core::task::Poll::Ready(()) | ||
| 1386 | } else { | ||
| 1387 | core::task::Poll::Pending | ||
| 1388 | } | ||
| 1389 | }) | ||
| 1390 | .await; | ||
| 1391 | |||
| 1392 | // Transfer completed successfully - clean up without aborting | ||
| 1393 | guard.complete(); | ||
| 1394 | |||
| 1395 | Ok(len) | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | /// Blocking read (fallback when DMA is not needed) | ||
| 1399 | pub fn blocking_read(&mut self, buf: &mut [u8]) -> Result<()> { | ||
| 1400 | for byte in buf.iter_mut() { | ||
| 1401 | loop { | ||
| 1402 | if has_data(self.info.regs) { | ||
| 1403 | *byte = (self.info.regs.data().read().bits() & 0xFF) as u8; | ||
| 1404 | break; | ||
| 1405 | } | ||
| 1406 | check_and_clear_rx_errors(self.info.regs)?; | ||
| 1407 | } | ||
| 1408 | } | ||
| 1409 | Ok(()) | ||
| 1410 | } | ||
| 1411 | |||
| 1412 | pub fn into_ring_dma_rx<'buf>(self, buf: &'buf mut [u8]) -> LpuartRxRingDma<'a, 'buf, T, C> { | ||
| 1413 | unsafe { | ||
| 1414 | let ring = self.setup_ring_buffer(buf); | ||
| 1415 | self.enable_dma_request(); | ||
| 1416 | LpuartRxRingDma { _inner: self, ring } | ||
| 1417 | } | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | /// Set up a ring buffer for continuous DMA reception. | ||
| 1421 | /// | ||
| 1422 | /// This configures the DMA channel for circular operation, enabling continuous | ||
| 1423 | /// reception of data without gaps. The DMA will continuously write received | ||
| 1424 | /// bytes into the buffer, wrapping around when it reaches the end. | ||
| 1425 | /// | ||
| 1426 | /// This method encapsulates all the low-level setup: | ||
| 1427 | /// - Configures the DMA request source for this LPUART instance | ||
| 1428 | /// - Enables the RX DMA request in the LPUART peripheral | ||
| 1429 | /// - Sets up the circular DMA transfer | ||
| 1430 | /// - Enables the NVIC interrupt for async wakeups | ||
| 1431 | /// | ||
| 1432 | /// # Arguments | ||
| 1433 | /// | ||
| 1434 | /// * `buf` - Destination buffer for received data (power-of-2 size is ideal for efficiency) | ||
| 1435 | /// | ||
| 1436 | /// # Returns | ||
| 1437 | /// | ||
| 1438 | /// A [`RingBuffer`] that can be used to asynchronously read received data. | ||
| 1439 | /// | ||
| 1440 | /// # Example | ||
| 1441 | /// | ||
| 1442 | /// ```no_run | ||
| 1443 | /// static mut RX_BUF: [u8; 64] = [0; 64]; | ||
| 1444 | /// | ||
| 1445 | /// let rx = LpuartRxDma::new(p.LPUART2, p.P2_3, p.DMA_CH0, config).unwrap(); | ||
| 1446 | /// let ring_buf = unsafe { rx.setup_ring_buffer(&mut RX_BUF) }; | ||
| 1447 | /// | ||
| 1448 | /// // Read data as it arrives | ||
| 1449 | /// let mut buf = [0u8; 16]; | ||
| 1450 | /// let n = ring_buf.read(&mut buf).await.unwrap(); | ||
| 1451 | /// ``` | ||
| 1452 | /// | ||
| 1453 | /// # Safety | ||
| 1454 | /// | ||
| 1455 | /// - The buffer must remain valid for the lifetime of the returned RingBuffer. | ||
| 1456 | /// - Only one RingBuffer should exist per LPUART RX channel at a time. | ||
| 1457 | /// - The caller must ensure the static buffer is not accessed elsewhere while | ||
| 1458 | /// the ring buffer is active. | ||
| 1459 | unsafe fn setup_ring_buffer<'b>(&self, buf: &'b mut [u8]) -> RingBuffer<'b, u8> { | ||
| 1460 | // Get the peripheral data register address | ||
| 1461 | let peri_addr = self.info.regs.data().as_ptr() as *const u8; | ||
| 1462 | |||
| 1463 | // Configure DMA request source for this LPUART instance (type-safe) | ||
| 1464 | self.rx_dma.set_request_source::<T::RxDmaRequest>(); | ||
| 1465 | |||
| 1466 | // Enable RX DMA request in the LPUART peripheral | ||
| 1467 | self.info.regs.baud().modify(|_, w| w.rdmae().enabled()); | ||
| 1468 | |||
| 1469 | // Set up circular DMA transfer (this also enables NVIC interrupt) | ||
| 1470 | self.rx_dma.setup_circular_read(peri_addr, buf) | ||
| 1471 | } | ||
| 1472 | |||
| 1473 | /// Enable the DMA channel request. | ||
| 1474 | /// | ||
| 1475 | /// Call this after `setup_ring_buffer()` to start continuous reception. | ||
| 1476 | /// This is separated from setup to allow for any additional configuration | ||
| 1477 | /// before starting the transfer. | ||
| 1478 | unsafe fn enable_dma_request(&self) { | ||
| 1479 | self.rx_dma.enable_request(); | ||
| 1480 | } | ||
| 1481 | } | ||
| 1482 | |||
| 1483 | impl<'peri, 'buf, T: Instance, C: DmaChannelTrait> LpuartRxRingDma<'peri, 'buf, T, C> { | ||
| 1484 | /// Read from the ring buffer | ||
| 1485 | pub fn read<'d>( | ||
| 1486 | &mut self, | ||
| 1487 | dst: &'d mut [u8], | ||
| 1488 | ) -> impl Future<Output = core::result::Result<usize, crate::dma::Error>> + use<'_, 'buf, 'd, T, C> { | ||
| 1489 | self.ring.read(dst) | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | /// Clear the current contents of the ring buffer | ||
| 1493 | pub fn clear(&mut self) { | ||
| 1494 | self.ring.clear(); | ||
| 1495 | } | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | impl<'a, T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> LpuartDma<'a, T, TxC, RxC> { | ||
| 1499 | /// Create a new LPUART driver with DMA support for both TX and RX. | ||
| 1500 | pub fn new( | ||
| 1501 | _inner: Peri<'a, T>, | ||
| 1502 | tx_pin: Peri<'a, impl TxPin<T>>, | ||
| 1503 | rx_pin: Peri<'a, impl RxPin<T>>, | ||
| 1504 | tx_dma_ch: Peri<'a, TxC>, | ||
| 1505 | rx_dma_ch: Peri<'a, RxC>, | ||
| 1506 | config: Config, | ||
| 1507 | ) -> Result<Self> { | ||
| 1508 | tx_pin.as_tx(); | ||
| 1509 | rx_pin.as_rx(); | ||
| 1510 | |||
| 1511 | let tx_pin: Peri<'a, AnyPin> = tx_pin.into(); | ||
| 1512 | let rx_pin: Peri<'a, AnyPin> = rx_pin.into(); | ||
| 1513 | |||
| 1514 | // Initialize LPUART with both TX and RX enabled, no flow control | ||
| 1515 | Lpuart::<Async>::init::<T>(true, true, false, false, config)?; | ||
| 1516 | |||
| 1517 | // Enable DMA interrupts | ||
| 1518 | let tx_dma = DmaChannel::new(tx_dma_ch); | ||
| 1519 | let rx_dma = DmaChannel::new(rx_dma_ch); | ||
| 1520 | tx_dma.enable_interrupt(); | ||
| 1521 | rx_dma.enable_interrupt(); | ||
| 1522 | |||
| 1523 | Ok(Self { | ||
| 1524 | tx: LpuartTxDma { | ||
| 1525 | info: T::info(), | ||
| 1526 | _tx_pin: tx_pin, | ||
| 1527 | tx_dma, | ||
| 1528 | _instance: core::marker::PhantomData, | ||
| 1529 | }, | ||
| 1530 | rx: LpuartRxDma { | ||
| 1531 | info: T::info(), | ||
| 1532 | _rx_pin: rx_pin, | ||
| 1533 | rx_dma, | ||
| 1534 | _instance: core::marker::PhantomData, | ||
| 1535 | }, | ||
| 1536 | }) | ||
| 1537 | } | ||
| 1538 | |||
| 1539 | /// Split into separate TX and RX drivers | ||
| 1540 | pub fn split(self) -> (LpuartTxDma<'a, T, TxC>, LpuartRxDma<'a, T, RxC>) { | ||
| 1541 | (self.tx, self.rx) | ||
| 1542 | } | ||
| 1543 | |||
| 1544 | /// Write data using DMA | ||
| 1545 | pub async fn write_dma(&mut self, buf: &[u8]) -> Result<usize> { | ||
| 1546 | self.tx.write_dma(buf).await | ||
| 1547 | } | ||
| 1548 | |||
| 1549 | /// Read data using DMA | ||
| 1550 | pub async fn read_dma(&mut self, buf: &mut [u8]) -> Result<usize> { | ||
| 1551 | self.rx.read_dma(buf).await | ||
| 1552 | } | ||
| 1553 | } | ||
| 1554 | |||
| 1555 | // ============================================================================ | ||
| 1556 | // EMBEDDED-IO-ASYNC TRAIT IMPLEMENTATIONS | ||
| 1557 | // ============================================================================ | ||
| 1558 | |||
| 1559 | impl<T: Instance, C: DmaChannelTrait> embedded_io::ErrorType for LpuartTxDma<'_, T, C> { | ||
| 1560 | type Error = Error; | ||
| 1561 | } | ||
| 1562 | |||
| 1563 | impl<T: Instance, C: DmaChannelTrait> embedded_io::ErrorType for LpuartRxDma<'_, T, C> { | ||
| 1564 | type Error = Error; | ||
| 1565 | } | ||
| 1566 | |||
| 1567 | impl<T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> embedded_io::ErrorType for LpuartDma<'_, T, TxC, RxC> { | ||
| 1568 | type Error = Error; | ||
| 1569 | } | ||
| 1085 | 1570 | ||
| 1086 | // ============================================================================ | 1571 | // ============================================================================ |
| 1087 | // EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS | 1572 | // EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS |
| @@ -1221,6 +1706,12 @@ impl embedded_hal_nb::serial::Write for LpuartTx<'_, Blocking> { | |||
| 1221 | } | 1706 | } |
| 1222 | } | 1707 | } |
| 1223 | 1708 | ||
| 1709 | impl core::fmt::Write for LpuartTx<'_, Blocking> { | ||
| 1710 | fn write_str(&mut self, s: &str) -> core::fmt::Result { | ||
| 1711 | self.blocking_write(s.as_bytes()).map_err(|_| core::fmt::Error) | ||
| 1712 | } | ||
| 1713 | } | ||
| 1714 | |||
| 1224 | impl embedded_hal_nb::serial::Read for Lpuart<'_, Blocking> { | 1715 | impl embedded_hal_nb::serial::Read for Lpuart<'_, Blocking> { |
| 1225 | fn read(&mut self) -> nb::Result<u8, Self::Error> { | 1716 | fn read(&mut self) -> nb::Result<u8, Self::Error> { |
| 1226 | embedded_hal_nb::serial::Read::read(&mut self.rx) | 1717 | embedded_hal_nb::serial::Read::read(&mut self.rx) |
diff --git a/embassy-mcxa/src/pins.rs b/embassy-mcxa/src/pins.rs index fdf1b0a86..9adbe64c8 100644 --- a/embassy-mcxa/src/pins.rs +++ b/embassy-mcxa/src/pins.rs | |||
| @@ -1,6 +1,11 @@ | |||
| 1 | //! Pin configuration helpers (separate from peripheral drivers). | 1 | //! Pin configuration helpers (separate from peripheral drivers). |
| 2 | use crate::pac; | 2 | use crate::pac; |
| 3 | 3 | ||
| 4 | /// Configure pins for ADC usage. | ||
| 5 | /// | ||
| 6 | /// # Safety | ||
| 7 | /// | ||
| 8 | /// Must be called after PORT clocks are enabled. | ||
| 4 | pub unsafe fn configure_adc_pins() { | 9 | pub unsafe fn configure_adc_pins() { |
| 5 | // P1_10 = ADC1_A8 | 10 | // P1_10 = ADC1_A8 |
| 6 | let port1 = &*pac::Port1::ptr(); | 11 | let port1 = &*pac::Port1::ptr(); |
