aboutsummaryrefslogtreecommitdiff
path: root/embassy-mcxa/src
diff options
context:
space:
mode:
authorJames Munns <[email protected]>2025-12-05 14:28:47 +0100
committerJames Munns <[email protected]>2025-12-05 14:28:47 +0100
commitb252db845e19603faf528cf93fe0c44757a27430 (patch)
tree99e646d17bed747df244dd607a15f5a67baa530a /embassy-mcxa/src
parent6a1eed83b9df8ffa81b93860f530f5bb3252d996 (diff)
Move
Diffstat (limited to 'embassy-mcxa/src')
-rw-r--r--embassy-mcxa/src/adc.rs409
-rw-r--r--embassy-mcxa/src/clkout.rs169
-rw-r--r--embassy-mcxa/src/clocks/config.rs204
-rw-r--r--embassy-mcxa/src/clocks/mod.rs950
-rw-r--r--embassy-mcxa/src/clocks/periph_helpers.rs506
-rw-r--r--embassy-mcxa/src/config.rs27
-rw-r--r--embassy-mcxa/src/dma.rs2594
-rw-r--r--embassy-mcxa/src/gpio.rs1062
-rw-r--r--embassy-mcxa/src/i2c/controller.rs455
-rw-r--r--embassy-mcxa/src/i2c/mod.rs171
-rw-r--r--embassy-mcxa/src/interrupt.rs546
-rw-r--r--embassy-mcxa/src/lib.rs484
-rw-r--r--embassy-mcxa/src/lpuart/buffered.rs780
-rw-r--r--embassy-mcxa/src/lpuart/mod.rs1733
-rw-r--r--embassy-mcxa/src/ostimer.rs745
-rw-r--r--embassy-mcxa/src/pins.rs33
-rw-r--r--embassy-mcxa/src/rtc.rs299
17 files changed, 11167 insertions, 0 deletions
diff --git a/embassy-mcxa/src/adc.rs b/embassy-mcxa/src/adc.rs
new file mode 100644
index 000000000..b5ec5983f
--- /dev/null
+++ b/embassy-mcxa/src/adc.rs
@@ -0,0 +1,409 @@
1//! ADC driver
2use core::sync::atomic::{AtomicBool, Ordering};
3
4use embassy_hal_internal::{Peri, PeripheralType};
5
6use crate::clocks::periph_helpers::{AdcClockSel, AdcConfig, Div4};
7use crate::clocks::{enable_and_reset, Gate, PoweredClock};
8use crate::pac;
9use crate::pac::adc1::cfg::{HptExdi, Pwrsel, Refsel, Tcmdres, Tprictrl, Tres};
10use crate::pac::adc1::cmdh1::{Avgs, Cmpen, Next, Sts};
11use crate::pac::adc1::cmdl1::{Adch, Ctype, Mode};
12use crate::pac::adc1::ctrl::CalAvgs;
13use crate::pac::adc1::tctrl::{Tcmd, Tpri};
14
15type Regs = pac::adc1::RegisterBlock;
16
17static INTERRUPT_TRIGGERED: AtomicBool = AtomicBool::new(false);
18// Token-based instance pattern like embassy-imxrt
19pub trait Instance: Gate<MrccPeriphConfig = AdcConfig> + PeripheralType {
20 fn ptr() -> *const Regs;
21}
22
23/// Token for ADC1
24pub type Adc1 = crate::peripherals::ADC1;
25impl Instance for crate::peripherals::ADC1 {
26 #[inline(always)]
27 fn ptr() -> *const Regs {
28 pac::Adc1::ptr()
29 }
30}
31
32// Also implement Instance for the Peri wrapper type
33// impl Instance for embassy_hal_internal::Peri<'_, crate::peripherals::ADC1> {
34// #[inline(always)]
35// fn ptr() -> *const Regs {
36// pac::Adc1::ptr()
37// }
38// }
39
40#[derive(Debug, Clone, Copy, PartialEq, Eq)]
41#[repr(u8)]
42pub enum TriggerPriorityPolicy {
43 ConvPreemptImmediatelyNotAutoResumed = 0,
44 ConvPreemptSoftlyNotAutoResumed = 1,
45 ConvPreemptImmediatelyAutoRestarted = 4,
46 ConvPreemptSoftlyAutoRestarted = 5,
47 ConvPreemptImmediatelyAutoResumed = 12,
48 ConvPreemptSoftlyAutoResumed = 13,
49 ConvPreemptSubsequentlyNotAutoResumed = 2,
50 ConvPreemptSubsequentlyAutoRestarted = 6,
51 ConvPreemptSubsequentlyAutoResumed = 14,
52 TriggerPriorityExceptionDisabled = 16,
53}
54
55#[derive(Debug, Clone, Copy, PartialEq, Eq)]
56pub struct LpadcConfig {
57 pub enable_in_doze_mode: bool,
58 pub conversion_average_mode: CalAvgs,
59 pub enable_analog_preliminary: bool,
60 pub power_up_delay: u8,
61 pub reference_voltage_source: Refsel,
62 pub power_level_mode: Pwrsel,
63 pub trigger_priority_policy: TriggerPriorityPolicy,
64 pub enable_conv_pause: bool,
65 pub conv_pause_delay: u16,
66 pub fifo_watermark: u8,
67 pub power: PoweredClock,
68 pub source: AdcClockSel,
69 pub div: Div4,
70}
71
72impl Default for LpadcConfig {
73 fn default() -> Self {
74 LpadcConfig {
75 enable_in_doze_mode: true,
76 conversion_average_mode: CalAvgs::NoAverage,
77 enable_analog_preliminary: false,
78 power_up_delay: 0x80,
79 reference_voltage_source: Refsel::Option1,
80 power_level_mode: Pwrsel::Lowest,
81 trigger_priority_policy: TriggerPriorityPolicy::ConvPreemptImmediatelyNotAutoResumed,
82 enable_conv_pause: false,
83 conv_pause_delay: 0,
84 fifo_watermark: 0,
85 power: PoweredClock::NormalEnabledDeepSleepDisabled,
86 source: AdcClockSel::FroLfDiv,
87 div: Div4::no_div(),
88 }
89 }
90}
91
92#[derive(Debug, Clone, Copy, PartialEq, Eq)]
93pub struct ConvCommandConfig {
94 pub sample_channel_mode: Ctype,
95 pub channel_number: Adch,
96 pub chained_next_command_number: Next,
97 pub enable_auto_channel_increment: bool,
98 pub loop_count: u8,
99 pub hardware_average_mode: Avgs,
100 pub sample_time_mode: Sts,
101 pub hardware_compare_mode: Cmpen,
102 pub hardware_compare_value_high: u32,
103 pub hardware_compare_value_low: u32,
104 pub conversion_resolution_mode: Mode,
105 pub enable_wait_trigger: bool,
106}
107
108#[derive(Debug, Clone, Copy, PartialEq, Eq)]
109pub struct ConvTriggerConfig {
110 pub target_command_id: Tcmd,
111 pub delay_power: u8,
112 pub priority: Tpri,
113 pub enable_hardware_trigger: bool,
114}
115
116#[derive(Debug, Clone, Copy, PartialEq, Eq)]
117pub struct ConvResult {
118 pub command_id_source: u32,
119 pub loop_count_index: u32,
120 pub trigger_id_source: u32,
121 pub conv_value: u16,
122}
123
124pub struct Adc<'a, I: Instance> {
125 _inst: core::marker::PhantomData<&'a mut I>,
126}
127
128impl<'a, I: Instance> Adc<'a, I> {
129 /// initialize ADC
130 pub fn new(_inst: Peri<'a, I>, config: LpadcConfig) -> Self {
131 let adc = unsafe { &*I::ptr() };
132
133 let _clock_freq = unsafe {
134 enable_and_reset::<I>(&AdcConfig {
135 power: config.power,
136 source: config.source,
137 div: config.div,
138 })
139 .expect("Adc Init should not fail")
140 };
141
142 /* Reset the module. */
143 adc.ctrl().modify(|_, w| w.rst().held_in_reset());
144 adc.ctrl().modify(|_, w| w.rst().released_from_reset());
145
146 adc.ctrl().modify(|_, w| w.rstfifo0().trigger_reset());
147
148 /* Disable the module before setting configuration. */
149 adc.ctrl().modify(|_, w| w.adcen().disabled());
150
151 /* Configure the module generally. */
152 if config.enable_in_doze_mode {
153 adc.ctrl().modify(|_, w| w.dozen().enabled());
154 } else {
155 adc.ctrl().modify(|_, w| w.dozen().disabled());
156 }
157
158 /* Set calibration average mode. */
159 adc.ctrl()
160 .modify(|_, w| w.cal_avgs().variant(config.conversion_average_mode));
161
162 adc.cfg().write(|w| unsafe {
163 let w = if config.enable_analog_preliminary {
164 w.pwren().pre_enabled()
165 } else {
166 w
167 };
168
169 w.pudly()
170 .bits(config.power_up_delay)
171 .refsel()
172 .variant(config.reference_voltage_source)
173 .pwrsel()
174 .variant(config.power_level_mode)
175 .tprictrl()
176 .variant(match config.trigger_priority_policy {
177 TriggerPriorityPolicy::ConvPreemptSoftlyNotAutoResumed
178 | TriggerPriorityPolicy::ConvPreemptSoftlyAutoRestarted
179 | TriggerPriorityPolicy::ConvPreemptSoftlyAutoResumed => Tprictrl::FinishCurrentOnPriority,
180 TriggerPriorityPolicy::ConvPreemptSubsequentlyNotAutoResumed
181 | TriggerPriorityPolicy::ConvPreemptSubsequentlyAutoRestarted
182 | TriggerPriorityPolicy::ConvPreemptSubsequentlyAutoResumed => Tprictrl::FinishSequenceOnPriority,
183 _ => Tprictrl::AbortCurrentOnPriority,
184 })
185 .tres()
186 .variant(match config.trigger_priority_policy {
187 TriggerPriorityPolicy::ConvPreemptImmediatelyAutoRestarted
188 | TriggerPriorityPolicy::ConvPreemptSoftlyAutoRestarted
189 | TriggerPriorityPolicy::ConvPreemptImmediatelyAutoResumed
190 | TriggerPriorityPolicy::ConvPreemptSoftlyAutoResumed
191 | TriggerPriorityPolicy::ConvPreemptSubsequentlyAutoRestarted
192 | TriggerPriorityPolicy::ConvPreemptSubsequentlyAutoResumed => Tres::Enabled,
193 _ => Tres::Disabled,
194 })
195 .tcmdres()
196 .variant(match config.trigger_priority_policy {
197 TriggerPriorityPolicy::ConvPreemptImmediatelyAutoResumed
198 | TriggerPriorityPolicy::ConvPreemptSoftlyAutoResumed
199 | TriggerPriorityPolicy::ConvPreemptSubsequentlyAutoResumed
200 | TriggerPriorityPolicy::TriggerPriorityExceptionDisabled => Tcmdres::Enabled,
201 _ => Tcmdres::Disabled,
202 })
203 .hpt_exdi()
204 .variant(match config.trigger_priority_policy {
205 TriggerPriorityPolicy::TriggerPriorityExceptionDisabled => HptExdi::Disabled,
206 _ => HptExdi::Enabled,
207 })
208 });
209
210 if config.enable_conv_pause {
211 adc.pause()
212 .modify(|_, w| unsafe { w.pauseen().enabled().pausedly().bits(config.conv_pause_delay) });
213 } else {
214 adc.pause().write(|w| unsafe { w.bits(0) });
215 }
216
217 adc.fctrl0()
218 .write(|w| unsafe { w.fwmark().bits(config.fifo_watermark) });
219
220 // Enable ADC
221 adc.ctrl().modify(|_, w| w.adcen().enabled());
222
223 Self {
224 _inst: core::marker::PhantomData,
225 }
226 }
227
228 pub fn deinit(&self) {
229 let adc = unsafe { &*I::ptr() };
230 adc.ctrl().modify(|_, w| w.adcen().disabled());
231 }
232
233 pub fn do_offset_calibration(&self) {
234 let adc = unsafe { &*I::ptr() };
235 // Enable calibration mode
236 adc.ctrl()
237 .modify(|_, w| w.calofs().offset_calibration_request_pending());
238
239 // Wait for calibration to complete (polling status register)
240 while adc.stat().read().cal_rdy().is_not_set() {}
241 }
242
243 pub fn get_gain_conv_result(&self, mut gain_adjustment: f32) -> u32 {
244 let mut gcra_array = [0u32; 17];
245 let mut gcalr: u32 = 0;
246
247 for i in (1..=17).rev() {
248 let shift = 16 - (i - 1);
249 let step = 1.0 / (1u32 << shift) as f32;
250 let tmp = (gain_adjustment / step) as u32;
251 gcra_array[i - 1] = tmp;
252 gain_adjustment -= tmp as f32 * step;
253 }
254
255 for i in (1..=17).rev() {
256 gcalr += gcra_array[i - 1] << (i - 1);
257 }
258 gcalr
259 }
260
261 pub fn do_auto_calibration(&self) {
262 let adc = unsafe { &*I::ptr() };
263 adc.ctrl().modify(|_, w| w.cal_req().calibration_request_pending());
264
265 while adc.gcc0().read().rdy().is_gain_cal_not_valid() {}
266
267 let mut gcca = adc.gcc0().read().gain_cal().bits() as u32;
268 if gcca & ((0xFFFF + 1) >> 1) != 0 {
269 gcca |= !0xFFFF;
270 }
271
272 let gcra = 131072.0 / (131072.0 - gcca as f32);
273
274 // Write to GCR0
275 adc.gcr0().write(|w| unsafe { w.bits(self.get_gain_conv_result(gcra)) });
276
277 adc.gcr0().modify(|_, w| w.rdy().set_bit());
278
279 // Wait for calibration to complete (polling status register)
280 while adc.stat().read().cal_rdy().is_not_set() {}
281 }
282
283 pub fn do_software_trigger(&self, trigger_id_mask: u32) {
284 let adc = unsafe { &*I::ptr() };
285 adc.swtrig().write(|w| unsafe { w.bits(trigger_id_mask) });
286 }
287
288 pub fn get_default_conv_command_config(&self) -> ConvCommandConfig {
289 ConvCommandConfig {
290 sample_channel_mode: Ctype::SingleEndedASideChannel,
291 channel_number: Adch::SelectCh0,
292 chained_next_command_number: Next::NoNextCmdTerminateOnFinish,
293 enable_auto_channel_increment: false,
294 loop_count: 0,
295 hardware_average_mode: Avgs::NoAverage,
296 sample_time_mode: Sts::Sample3p5,
297 hardware_compare_mode: Cmpen::DisabledAlwaysStoreResult,
298 hardware_compare_value_high: 0,
299 hardware_compare_value_low: 0,
300 conversion_resolution_mode: Mode::Data12Bits,
301 enable_wait_trigger: false,
302 }
303 }
304
305 //TBD Need to add cmdlx and cmdhx with x {2..7}
306 pub fn set_conv_command_config(&self, index: u32, config: &ConvCommandConfig) {
307 let adc = unsafe { &*I::ptr() };
308
309 match index {
310 1 => {
311 adc.cmdl1().write(|w| {
312 w.adch()
313 .variant(config.channel_number)
314 .mode()
315 .variant(config.conversion_resolution_mode)
316 });
317 adc.cmdh1().write(|w| unsafe {
318 w.next()
319 .variant(config.chained_next_command_number)
320 .loop_()
321 .bits(config.loop_count)
322 .avgs()
323 .variant(config.hardware_average_mode)
324 .sts()
325 .variant(config.sample_time_mode)
326 .cmpen()
327 .variant(config.hardware_compare_mode);
328 if config.enable_wait_trigger {
329 w.wait_trig().enabled();
330 }
331 if config.enable_auto_channel_increment {
332 w.lwi().enabled();
333 }
334 w
335 });
336 }
337 _ => panic!("Invalid command index: must be between 1 and 7"),
338 }
339 }
340
341 pub fn get_default_conv_trigger_config(&self) -> ConvTriggerConfig {
342 ConvTriggerConfig {
343 target_command_id: Tcmd::NotValid,
344 delay_power: 0,
345 priority: Tpri::HighestPriority,
346 enable_hardware_trigger: false,
347 }
348 }
349
350 pub fn set_conv_trigger_config(&self, trigger_id: usize, config: &ConvTriggerConfig) {
351 let adc = unsafe { &*I::ptr() };
352 let tctrl = &adc.tctrl(trigger_id);
353
354 tctrl.write(|w| unsafe {
355 let w = w.tcmd().variant(config.target_command_id);
356 let w = w.tdly().bits(config.delay_power);
357 w.tpri().variant(config.priority);
358 if config.enable_hardware_trigger {
359 w.hten().enabled()
360 } else {
361 w
362 }
363 });
364 }
365
366 pub fn do_reset_fifo(&self) {
367 let adc = unsafe { &*I::ptr() };
368 adc.ctrl().modify(|_, w| w.rstfifo0().trigger_reset());
369 }
370
371 pub fn enable_interrupt(&self, mask: u32) {
372 let adc = unsafe { &*I::ptr() };
373 adc.ie().modify(|r, w| unsafe { w.bits(r.bits() | mask) });
374 INTERRUPT_TRIGGERED.store(false, Ordering::SeqCst);
375 }
376
377 pub fn is_interrupt_triggered(&self) -> bool {
378 INTERRUPT_TRIGGERED.load(Ordering::Relaxed)
379 }
380}
381
382pub fn get_conv_result() -> Option<ConvResult> {
383 let adc = unsafe { &*pac::Adc1::ptr() };
384 let fifo = adc.resfifo0().read().bits();
385 const VALID_MASK: u32 = 1 << 31;
386 if fifo & VALID_MASK == 0 {
387 return None;
388 }
389
390 Some(ConvResult {
391 command_id_source: (fifo >> 24) & 0x0F,
392 loop_count_index: (fifo >> 20) & 0x0F,
393 trigger_id_source: (fifo >> 16) & 0x0F,
394 conv_value: (fifo & 0xFFFF) as u16,
395 })
396}
397
398pub fn on_interrupt() {
399 if get_conv_result().is_some() {
400 INTERRUPT_TRIGGERED.store(true, Ordering::SeqCst);
401 }
402}
403
404pub struct AdcHandler;
405impl crate::interrupt::typelevel::Handler<crate::interrupt::typelevel::ADC1> for AdcHandler {
406 unsafe fn on_interrupt() {
407 on_interrupt();
408 }
409}
diff --git a/embassy-mcxa/src/clkout.rs b/embassy-mcxa/src/clkout.rs
new file mode 100644
index 000000000..88c731df1
--- /dev/null
+++ b/embassy-mcxa/src/clkout.rs
@@ -0,0 +1,169 @@
1//! CLKOUT pseudo-peripheral
2//!
3//! CLKOUT is a part of the clock generation subsystem, and can be used
4//! either to generate arbitrary waveforms, or to debug the state of
5//! internal oscillators.
6
7use core::marker::PhantomData;
8
9use embassy_hal_internal::Peri;
10
11pub use crate::clocks::periph_helpers::Div4;
12use crate::clocks::{with_clocks, ClockError, PoweredClock};
13use crate::pac::mrcc0::mrcc_clkout_clksel::Mux;
14use crate::peripherals::CLKOUT;
15
16/// A peripheral representing the CLKOUT pseudo-peripheral
17pub struct ClockOut<'a> {
18 _p: PhantomData<&'a mut CLKOUT>,
19 freq: u32,
20}
21
22/// Selected clock source to output
23pub enum ClockOutSel {
24 /// 12MHz Internal Oscillator
25 Fro12M,
26 /// FRO180M Internal Oscillator, via divisor
27 FroHfDiv,
28 /// External Oscillator
29 ClkIn,
30 /// 16KHz oscillator
31 Clk16K,
32 /// Output of PLL1
33 Pll1Clk,
34 /// Main System CPU clock, divided by 6
35 SlowClk,
36}
37
38/// Configuration for the ClockOut
39pub struct Config {
40 /// Selected Source Clock
41 pub sel: ClockOutSel,
42 /// Selected division level
43 pub div: Div4,
44 /// Selected power level
45 pub level: PoweredClock,
46}
47
48impl<'a> ClockOut<'a> {
49 /// Create a new ClockOut pin. On success, the clock signal will begin immediately
50 /// on the given pin.
51 pub fn new(
52 _peri: Peri<'a, CLKOUT>,
53 pin: Peri<'a, impl sealed::ClockOutPin>,
54 cfg: Config,
55 ) -> Result<Self, ClockError> {
56 // There's no MRCC enable bit, so we check the validity of the clocks here
57 //
58 // TODO: Should we check that the frequency is suitably low?
59 let (freq, mux) = check_sel(cfg.sel, cfg.level)?;
60
61 // All good! Apply requested config, starting with the pin.
62 pin.mux();
63
64 setup_clkout(mux, cfg.div);
65
66 Ok(Self {
67 _p: PhantomData,
68 freq: freq / cfg.div.into_divisor(),
69 })
70 }
71
72 /// Frequency of the clkout pin
73 #[inline]
74 pub fn frequency(&self) -> u32 {
75 self.freq
76 }
77}
78
79impl Drop for ClockOut<'_> {
80 fn drop(&mut self) {
81 disable_clkout();
82 }
83}
84
85/// Check whether the given clock selection is valid
86fn check_sel(sel: ClockOutSel, level: PoweredClock) -> Result<(u32, Mux), ClockError> {
87 let res = with_clocks(|c| {
88 Ok(match sel {
89 ClockOutSel::Fro12M => (c.ensure_fro_hf_active(&level)?, Mux::Clkroot12m),
90 ClockOutSel::FroHfDiv => (c.ensure_fro_hf_div_active(&level)?, Mux::ClkrootFircDiv),
91 ClockOutSel::ClkIn => (c.ensure_clk_in_active(&level)?, Mux::ClkrootSosc),
92 ClockOutSel::Clk16K => (c.ensure_clk_16k_vdd_core_active(&level)?, Mux::Clkroot16k),
93 ClockOutSel::Pll1Clk => (c.ensure_pll1_clk_active(&level)?, Mux::ClkrootSpll),
94 ClockOutSel::SlowClk => (c.ensure_slow_clk_active(&level)?, Mux::ClkrootSlow),
95 })
96 });
97 let Some(res) = res else {
98 return Err(ClockError::NeverInitialized);
99 };
100 res
101}
102
103/// Set up the clkout pin using the given mux and div settings
104fn setup_clkout(mux: Mux, div: Div4) {
105 let mrcc = unsafe { crate::pac::Mrcc0::steal() };
106
107 mrcc.mrcc_clkout_clksel().write(|w| w.mux().variant(mux));
108
109 // Set up clkdiv
110 mrcc.mrcc_clkout_clkdiv().write(|w| {
111 w.halt().set_bit();
112 w.reset().set_bit();
113 unsafe { w.div().bits(div.into_bits()) };
114 w
115 });
116 mrcc.mrcc_clkout_clkdiv().write(|w| {
117 w.halt().clear_bit();
118 w.reset().clear_bit();
119 unsafe { w.div().bits(div.into_bits()) };
120 w
121 });
122
123 while mrcc.mrcc_clkout_clkdiv().read().unstab().bit_is_set() {}
124}
125
126/// Stop the clkout
127fn disable_clkout() {
128 // Stop the output by selecting the "none" clock
129 //
130 // TODO: restore the pin to hi-z or something?
131 let mrcc = unsafe { crate::pac::Mrcc0::steal() };
132 mrcc.mrcc_clkout_clkdiv().write(|w| {
133 w.reset().set_bit();
134 w.halt().set_bit();
135 unsafe {
136 w.div().bits(0);
137 }
138 w
139 });
140 mrcc.mrcc_clkout_clksel().write(|w| unsafe { w.bits(0b111) });
141}
142
143mod sealed {
144 use embassy_hal_internal::PeripheralType;
145
146 use crate::gpio::{Pull, SealedPin};
147
148 /// Sealed marker trait for clockout pins
149 pub trait ClockOutPin: PeripheralType {
150 /// Set the given pin to the correct muxing state
151 fn mux(&self);
152 }
153
154 macro_rules! impl_pin {
155 ($pin:ident, $func:ident) => {
156 impl ClockOutPin for crate::peripherals::$pin {
157 fn mux(&self) {
158 self.set_function(crate::pac::port0::pcr0::Mux::$func);
159 self.set_pull(Pull::Disabled);
160 }
161 }
162 };
163 }
164
165 impl_pin!(P0_6, Mux12);
166 impl_pin!(P3_6, Mux1);
167 impl_pin!(P3_8, Mux12);
168 impl_pin!(P4_2, Mux1);
169}
diff --git a/embassy-mcxa/src/clocks/config.rs b/embassy-mcxa/src/clocks/config.rs
new file mode 100644
index 000000000..0563b8917
--- /dev/null
+++ b/embassy-mcxa/src/clocks/config.rs
@@ -0,0 +1,204 @@
1//! Clock Configuration
2//!
3//! This module holds configuration types used for the system clocks. For
4//! configuration of individual peripherals, see [`super::periph_helpers`].
5
6use super::PoweredClock;
7
8/// This type represents a divider in the range 1..=256.
9///
10/// At a hardware level, this is an 8-bit register from 0..=255,
11/// which adds one.
12#[derive(Copy, Clone, Debug, PartialEq, Eq)]
13pub struct Div8(pub(super) u8);
14
15impl Div8 {
16 /// Store a "raw" divisor value that will divide the source by
17 /// `(n + 1)`, e.g. `Div8::from_raw(0)` will divide the source
18 /// by 1, and `Div8::from_raw(255)` will divide the source by
19 /// 256.
20 pub const fn from_raw(n: u8) -> Self {
21 Self(n)
22 }
23
24 /// Divide by one, or no division
25 pub const fn no_div() -> Self {
26 Self(0)
27 }
28
29 /// Store a specific divisor value that will divide the source
30 /// by `n`. e.g. `Div8::from_divisor(1)` will divide the source
31 /// by 1, and `Div8::from_divisor(256)` will divide the source
32 /// by 256.
33 ///
34 /// Will return `None` if `n` is not in the range `1..=256`.
35 /// Consider [`Self::from_raw`] for an infallible version.
36 pub const fn from_divisor(n: u16) -> Option<Self> {
37 let Some(n) = n.checked_sub(1) else {
38 return None;
39 };
40 if n > (u8::MAX as u16) {
41 return None;
42 }
43 Some(Self(n as u8))
44 }
45
46 /// Convert into "raw" bits form
47 #[inline(always)]
48 pub const fn into_bits(self) -> u8 {
49 self.0
50 }
51
52 /// Convert into "divisor" form, as a u32 for convenient frequency math
53 #[inline(always)]
54 pub const fn into_divisor(self) -> u32 {
55 self.0 as u32 + 1
56 }
57}
58
59/// ```text
60/// ┌─────────────────────────────────────────────────────────┐
61/// │ │
62/// │ ┌───────────┐ clk_out ┌─────────┐ │
63/// XTAL ──────┼──▷│ System │───────────▷│ │ clk_in │
64/// │ │ OSC │ clkout_byp │ MUX │──────────────────┼──────▷
65/// EXTAL ──────┼──▷│ │───────────▷│ │ │
66/// │ └───────────┘ └─────────┘ │
67/// │ │
68/// │ ┌───────────┐ fro_hf_root ┌────┐ fro_hf │
69/// │ │ FRO180 ├───────┬─────▷│ CG │─────────────────────┼──────▷
70/// │ │ │ │ ├────┤ clk_45m │
71/// │ │ │ └─────▷│ CG │─────────────────────┼──────▷
72/// │ └───────────┘ └────┘ │
73/// │ ┌───────────┐ fro_12m_root ┌────┐ fro_12m │
74/// │ │ FRO12M │────────┬─────▷│ CG │────────────────────┼──────▷
75/// │ │ │ │ ├────┤ clk_1m │
76/// │ │ │ └─────▷│1/12│────────────────────┼──────▷
77/// │ └───────────┘ └────┘ │
78/// │ │
79/// │ ┌──────────┐ │
80/// │ │000 │ │
81/// │ clk_in │ │ │
82/// │ ───────────────▷│001 │ │
83/// │ fro_12m │ │ │
84/// │ ───────────────▷│010 │ │
85/// │ fro_hf_root │ │ │
86/// │ ───────────────▷│011 │ main_clk │
87/// │ │ │───────────────────────────┼──────▷
88/// clk_16k ──────┼─────────────────▷│100 │ │
89/// │ none │ │ │
90/// │ ───────────────▷│101 │ │
91/// │ pll1_clk │ │ │
92/// │ ───────────────▷│110 │ │
93/// │ none │ │ │
94/// │ ───────────────▷│111 │ │
95/// │ └──────────┘ │
96/// │ ▲ │
97/// │ │ │
98/// │ SCG SCS │
99/// │ SCG-Lite │
100/// └─────────────────────────────────────────────────────────┘
101///
102///
103/// clk_in ┌─────┐
104/// ───────────────▷│00 │
105/// clk_45m │ │
106/// ───────────────▷│01 │ ┌───────────┐ pll1_clk
107/// none │ │─────▷│ SPLL │───────────────▷
108/// ───────────────▷│10 │ └───────────┘
109/// fro_12m │ │
110/// ───────────────▷│11 │
111/// └─────┘
112/// ```
113#[non_exhaustive]
114pub struct ClocksConfig {
115 /// FIRC, FRO180, 45/60/90/180M clock source
116 pub firc: Option<FircConfig>,
117 /// SIRC, FRO12M, clk_12m clock source
118 // NOTE: I don't think we *can* disable the SIRC?
119 pub sirc: SircConfig,
120 /// FRO16K clock source
121 pub fro16k: Option<Fro16KConfig>,
122}
123
124// FIRC/FRO180M
125
126/// ```text
127/// ┌───────────┐ fro_hf_root ┌────┐ fro_hf
128/// │ FRO180M ├───────┬─────▷│GATE│──────────▷
129/// │ │ │ ├────┤ clk_45m
130/// │ │ └─────▷│GATE│──────────▷
131/// └───────────┘ └────┘
132/// ```
133#[non_exhaustive]
134pub struct FircConfig {
135 /// Selected clock frequency
136 pub frequency: FircFreqSel,
137 /// Selected power state of the clock
138 pub power: PoweredClock,
139 /// Is the "fro_hf" gated clock enabled?
140 pub fro_hf_enabled: bool,
141 /// Is the "clk_45m" gated clock enabled?
142 pub clk_45m_enabled: bool,
143 /// Is the "fro_hf_div" clock enabled? Requires `fro_hf`!
144 pub fro_hf_div: Option<Div8>,
145}
146
147/// Selected FIRC frequency
148pub enum FircFreqSel {
149 /// 45MHz Output
150 Mhz45,
151 /// 60MHz Output
152 Mhz60,
153 /// 90MHz Output
154 Mhz90,
155 /// 180MHz Output
156 Mhz180,
157}
158
159// SIRC/FRO12M
160
161/// ```text
162/// ┌───────────┐ fro_12m_root ┌────┐ fro_12m
163/// │ FRO12M │────────┬─────▷│ CG │──────────▷
164/// │ │ │ ├────┤ clk_1m
165/// │ │ └─────▷│1/12│──────────▷
166/// └───────────┘ └────┘
167/// ```
168#[non_exhaustive]
169pub struct SircConfig {
170 pub power: PoweredClock,
171 // peripheral output, aka sirc_12mhz
172 pub fro_12m_enabled: bool,
173 /// Is the "fro_lf_div" clock enabled? Requires `fro_12m`!
174 pub fro_lf_div: Option<Div8>,
175}
176
177#[non_exhaustive]
178pub struct Fro16KConfig {
179 pub vsys_domain_active: bool,
180 pub vdd_core_domain_active: bool,
181}
182
183impl Default for ClocksConfig {
184 fn default() -> Self {
185 Self {
186 firc: Some(FircConfig {
187 frequency: FircFreqSel::Mhz45,
188 power: PoweredClock::NormalEnabledDeepSleepDisabled,
189 fro_hf_enabled: true,
190 clk_45m_enabled: true,
191 fro_hf_div: None,
192 }),
193 sirc: SircConfig {
194 power: PoweredClock::AlwaysEnabled,
195 fro_12m_enabled: true,
196 fro_lf_div: None,
197 },
198 fro16k: Some(Fro16KConfig {
199 vsys_domain_active: true,
200 vdd_core_domain_active: true,
201 }),
202 }
203 }
204}
diff --git a/embassy-mcxa/src/clocks/mod.rs b/embassy-mcxa/src/clocks/mod.rs
new file mode 100644
index 000000000..ac30115f6
--- /dev/null
+++ b/embassy-mcxa/src/clocks/mod.rs
@@ -0,0 +1,950 @@
1//! # Clock Module
2//!
3//! For the MCX-A, we separate clock and peripheral control into two main stages:
4//!
5//! 1. At startup, e.g. when `embassy_mcxa::init()` is called, we configure the
6//! core system clocks, including external and internal oscillators. This
7//! configuration is then largely static for the duration of the program.
8//! 2. When HAL drivers are created, e.g. `Lpuart::new()` is called, the driver
9//! is responsible for two main things:
10//! * Ensuring that any required "upstream" core system clocks necessary for
11//! clocking the peripheral is active and configured to a reasonable value
12//! * Enabling the clock gates for that peripheral, and resetting the peripheral
13//!
14//! From a user perspective, only step 1 is visible. Step 2 is automatically handled
15//! by HAL drivers, using interfaces defined in this module.
16//!
17//! It is also possible to *view* the state of the clock configuration after [`init()`]
18//! has been called, using the [`with_clocks()`] function, which provides a view of the
19//! [`Clocks`] structure.
20//!
21//! ## For HAL driver implementors
22//!
23//! The majority of peripherals in the MCXA chip are fed from either a "hard-coded" or
24//! configurable clock source, e.g. selecting the FROM12M or `clk_1m` as a source. This
25//! selection, as well as often any pre-scaler division from that source clock, is made
26//! through MRCC registers.
27//!
28//! Any peripheral that is controlled through the MRCC register can automatically implement
29//! the necessary APIs using the `impl_cc_gate!` macro in this module. You will also need
30//! to define the configuration surface and steps necessary to fully configure that peripheral
31//! from a clocks perspective by:
32//!
33//! 1. Defining a configuration type in the [`periph_helpers`] module that contains any selects
34//! or divisions available to the HAL driver
35//! 2. Implementing the [`periph_helpers::SPConfHelper`] trait, which should check that the
36//! necessary input clocks are reasonable
37
38use core::cell::RefCell;
39
40use config::{ClocksConfig, FircConfig, FircFreqSel, Fro16KConfig, SircConfig};
41use mcxa_pac::scg0::firccsr::{FircFclkPeriphEn, FircSclkPeriphEn, Fircsten};
42use mcxa_pac::scg0::sirccsr::{SircClkPeriphEn, Sircsten};
43use periph_helpers::SPConfHelper;
44
45use crate::pac;
46pub mod config;
47pub mod periph_helpers;
48
49//
50// Statics/Consts
51//
52
53/// The state of system core clocks.
54///
55/// Initialized by [`init()`], and then unchanged for the remainder of the program.
56static CLOCKS: critical_section::Mutex<RefCell<Option<Clocks>>> = critical_section::Mutex::new(RefCell::new(None));
57
58//
59// Free functions
60//
61
62/// Initialize the core system clocks with the given [`ClocksConfig`].
63///
64/// This function should be called EXACTLY once at start-up, usually via a
65/// call to [`embassy_mcxa::init()`](crate::init()). Subsequent calls will
66/// return an error.
67pub fn init(settings: ClocksConfig) -> Result<(), ClockError> {
68 critical_section::with(|cs| {
69 if CLOCKS.borrow_ref(cs).is_some() {
70 Err(ClockError::AlreadyInitialized)
71 } else {
72 Ok(())
73 }
74 })?;
75
76 let mut clocks = Clocks::default();
77 let mut operator = ClockOperator {
78 clocks: &mut clocks,
79 config: &settings,
80
81 _mrcc0: unsafe { pac::Mrcc0::steal() },
82 scg0: unsafe { pac::Scg0::steal() },
83 syscon: unsafe { pac::Syscon::steal() },
84 vbat0: unsafe { pac::Vbat0::steal() },
85 };
86
87 operator.configure_firc_clocks()?;
88 operator.configure_sirc_clocks()?;
89 operator.configure_fro16k_clocks()?;
90
91 // For now, just use FIRC as the main/cpu clock, which should already be
92 // the case on reset
93 assert!(operator.scg0.rccr().read().scs().is_firc());
94 let input = operator.clocks.fro_hf_root.clone().unwrap();
95 operator.clocks.main_clk = Some(input.clone());
96 // We can also assume cpu/system clk == fro_hf because div is /1.
97 assert_eq!(operator.syscon.ahbclkdiv().read().div().bits(), 0);
98 operator.clocks.cpu_system_clk = Some(input);
99
100 critical_section::with(|cs| {
101 let mut clks = CLOCKS.borrow_ref_mut(cs);
102 assert!(clks.is_none(), "Clock setup race!");
103 *clks = Some(clocks);
104 });
105
106 Ok(())
107}
108
109/// Obtain the full clocks structure, calling the given closure in a critical section.
110///
111/// The given closure will be called with read-only access to the state of the system
112/// clocks. This can be used to query and return the state of a given clock.
113///
114/// As the caller's closure will be called in a critical section, care must be taken
115/// not to block or cause any other undue delays while accessing.
116///
117/// Calls to this function will not succeed until after a successful call to `init()`,
118/// and will always return None.
119pub fn with_clocks<R: 'static, F: FnOnce(&Clocks) -> R>(f: F) -> Option<R> {
120 critical_section::with(|cs| {
121 let c = CLOCKS.borrow_ref(cs);
122 let c = c.as_ref()?;
123 Some(f(c))
124 })
125}
126
127//
128// Structs/Enums
129//
130
131/// The `Clocks` structure contains the initialized state of the core system clocks
132///
133/// These values are configured by providing [`config::ClocksConfig`] to the [`init()`] function
134/// at boot time.
135#[derive(Default, Debug, Clone)]
136#[non_exhaustive]
137pub struct Clocks {
138 /// The `clk_in` is a clock provided by an external oscillator
139 pub clk_in: Option<Clock>,
140
141 // FRO180M stuff
142 //
143 /// `fro_hf_root` is the direct output of the `FRO180M` internal oscillator
144 ///
145 /// It is used to feed downstream clocks, such as `fro_hf`, `clk_45m`,
146 /// and `fro_hf_div`.
147 pub fro_hf_root: Option<Clock>,
148
149 /// `fro_hf` is the same frequency as `fro_hf_root`, but behind a gate.
150 pub fro_hf: Option<Clock>,
151
152 /// `clk_45` is a 45MHz clock, sourced from `fro_hf`.
153 pub clk_45m: Option<Clock>,
154
155 /// `fro_hf_div` is a configurable frequency clock, sourced from `fro_hf`.
156 pub fro_hf_div: Option<Clock>,
157
158 //
159 // End FRO180M
160
161 // FRO12M stuff
162 //
163 /// `fro_12m_root` is the direct output of the `FRO12M` internal oscillator
164 ///
165 /// It is used to feed downstream clocks, such as `fro_12m`, `clk_1m`,
166 /// `and `fro_lf_div`.
167 pub fro_12m_root: Option<Clock>,
168
169 /// `fro_12m` is the same frequency as `fro_12m_root`, but behind a gate.
170 pub fro_12m: Option<Clock>,
171
172 /// `clk_1m` is a 1MHz clock, sourced from `fro_12m`
173 pub clk_1m: Option<Clock>,
174
175 /// `fro_lf_div` is a configurable frequency clock, sourced from `fro_12m`
176 pub fro_lf_div: Option<Clock>,
177 //
178 // End FRO12M stuff
179 /// `clk_16k_vsys` is one of two outputs of the `FRO16K` internal oscillator.
180 ///
181 /// Also referred to as `clk_16k[0]` in the datasheet, it feeds peripherals in
182 /// the system domain, such as the CMP and RTC.
183 pub clk_16k_vsys: Option<Clock>,
184
185 /// `clk_16k_vdd_core` is one of two outputs of the `FRO16K` internal oscillator.
186 ///
187 /// Also referred to as `clk_16k[1]` in the datasheet, it feeds peripherals in
188 /// the VDD Core domain, such as the OSTimer or LPUarts.
189 pub clk_16k_vdd_core: Option<Clock>,
190
191 /// `main_clk` is the main clock used by the CPU, AHB, APB, IPS bus, and some
192 /// peripherals.
193 pub main_clk: Option<Clock>,
194
195 /// `CPU_CLK` or `SYSTEM_CLK` is the output of `main_clk`, run through the `AHBCLKDIV`
196 pub cpu_system_clk: Option<Clock>,
197
198 /// `pll1_clk` is the output of the main system PLL, `pll1`.
199 pub pll1_clk: Option<Clock>,
200}
201
202/// `ClockError` is the main error returned when configuring or checking clock state
203#[derive(Debug, Copy, Clone, Eq, PartialEq)]
204#[cfg_attr(feature = "defmt", derive(defmt::Format))]
205#[non_exhaustive]
206pub enum ClockError {
207 /// The system clocks were never initialized by calling [`init()`]
208 NeverInitialized,
209 /// The [`init()`] function was called more than once
210 AlreadyInitialized,
211 /// The requested configuration was not possible to fulfill, as the system clocks
212 /// were not configured in a compatible way
213 BadConfig { clock: &'static str, reason: &'static str },
214 /// The requested configuration was not possible to fulfill, as the required system
215 /// clocks have not yet been implemented.
216 NotImplemented { clock: &'static str },
217 /// The requested peripheral could not be configured, as the steps necessary to
218 /// enable it have not yet been implemented.
219 UnimplementedConfig,
220}
221
222/// Information regarding a system clock
223#[derive(Debug, Clone)]
224pub struct Clock {
225 /// The frequency, in Hz, of the given clock
226 pub frequency: u32,
227 /// The power state of the clock, e.g. whether it is active in deep sleep mode
228 /// or not.
229 pub power: PoweredClock,
230}
231
232/// The power state of a given clock.
233///
234/// On the MCX-A, when Deep-Sleep is entered, any clock not configured for Deep Sleep
235/// mode will be stopped. This means that any downstream usage, e.g. by peripherals,
236/// will also stop.
237///
238/// In the future, we will provide an API for entering Deep Sleep, and if there are
239/// any peripherals that are NOT using an `AlwaysEnabled` clock active, entry into
240/// Deep Sleep will be prevented, in order to avoid misbehaving peripherals.
241#[derive(Debug, Clone, Copy, PartialEq, Eq)]
242pub enum PoweredClock {
243 /// The given clock will NOT continue running in Deep Sleep mode
244 NormalEnabledDeepSleepDisabled,
245 /// The given clock WILL continue running in Deep Sleep mode
246 AlwaysEnabled,
247}
248
249/// The ClockOperator is a private helper type that contains the methods used
250/// during system clock initialization.
251///
252/// # SAFETY
253///
254/// Concurrent access to clock-relevant peripheral registers, such as `MRCC`, `SCG`,
255/// `SYSCON`, and `VBAT` should not be allowed for the duration of the [`init()`] function.
256struct ClockOperator<'a> {
257 /// A mutable reference to the current state of system clocks
258 clocks: &'a mut Clocks,
259 /// A reference to the requested configuration provided by the caller of [`init()`]
260 config: &'a ClocksConfig,
261
262 // We hold on to stolen peripherals
263 _mrcc0: pac::Mrcc0,
264 scg0: pac::Scg0,
265 syscon: pac::Syscon,
266 vbat0: pac::Vbat0,
267}
268
269/// Trait describing an AHB clock gate that can be toggled through MRCC.
270pub trait Gate {
271 type MrccPeriphConfig: SPConfHelper;
272
273 /// Enable the clock gate.
274 ///
275 /// # SAFETY
276 ///
277 /// The current peripheral must be disabled prior to calling this method
278 unsafe fn enable_clock();
279
280 /// Disable the clock gate.
281 ///
282 /// # SAFETY
283 ///
284 /// There must be no active user of this peripheral when calling this method
285 unsafe fn disable_clock();
286
287 /// Drive the peripheral into reset.
288 ///
289 /// # SAFETY
290 ///
291 /// There must be no active user of this peripheral when calling this method
292 unsafe fn assert_reset();
293
294 /// Drive the peripheral out of reset.
295 ///
296 /// # SAFETY
297 ///
298 /// There must be no active user of this peripheral when calling this method
299 unsafe fn release_reset();
300
301 /// Return whether the clock gate for this peripheral is currently enabled.
302 fn is_clock_enabled() -> bool;
303
304 /// Return whether the peripheral is currently held in reset.
305 fn is_reset_released() -> bool;
306}
307
308/// This is the primary helper method HAL drivers are expected to call when creating
309/// an instance of the peripheral.
310///
311/// This method:
312///
313/// 1. Enables the MRCC clock gate for this peripheral
314/// 2. Calls the `G::MrccPeriphConfig::post_enable_config()` method, returning an error
315/// and re-disabling the peripheral if this fails.
316/// 3. Pulses the MRCC reset line, to reset the peripheral to the default state
317/// 4. Returns the frequency, in Hz that is fed into the peripheral, taking into account
318/// the selected upstream clock, as well as any division specified by `cfg`.
319///
320/// NOTE: if a clock is disabled, sourced from an "ambient" clock source, this method
321/// may return `Ok(0)`. In the future, this might be updated to return the correct
322/// "ambient" clock, e.g. the AHB/APB frequency.
323///
324/// # SAFETY
325///
326/// This peripheral must not yet be in use prior to calling `enable_and_reset`.
327#[inline]
328pub unsafe fn enable_and_reset<G: Gate>(cfg: &G::MrccPeriphConfig) -> Result<u32, ClockError> {
329 let freq = enable::<G>(cfg).inspect_err(|_| disable::<G>())?;
330 pulse_reset::<G>();
331 Ok(freq)
332}
333
334/// Enable the clock gate for the given peripheral.
335///
336/// Prefer [`enable_and_reset`] unless you are specifically avoiding a pulse of the reset, or need
337/// to control the duration of the pulse more directly.
338///
339/// # SAFETY
340///
341/// This peripheral must not yet be in use prior to calling `enable`.
342#[inline]
343pub unsafe fn enable<G: Gate>(cfg: &G::MrccPeriphConfig) -> Result<u32, ClockError> {
344 G::enable_clock();
345 while !G::is_clock_enabled() {}
346 core::arch::asm!("dsb sy; isb sy", options(nomem, nostack, preserves_flags));
347
348 let freq = critical_section::with(|cs| {
349 let clocks = CLOCKS.borrow_ref(cs);
350 let clocks = clocks.as_ref().ok_or(ClockError::NeverInitialized)?;
351 cfg.post_enable_config(clocks)
352 });
353
354 freq.inspect_err(|_e| {
355 G::disable_clock();
356 })
357}
358
359/// Disable the clock gate for the given peripheral.
360///
361/// # SAFETY
362///
363/// This peripheral must no longer be in use prior to calling `enable`.
364#[allow(dead_code)]
365#[inline]
366pub unsafe fn disable<G: Gate>() {
367 G::disable_clock();
368}
369
370/// Check whether a gate is currently enabled.
371#[allow(dead_code)]
372#[inline]
373pub fn is_clock_enabled<G: Gate>() -> bool {
374 G::is_clock_enabled()
375}
376
377/// Release a reset line for the given peripheral set.
378///
379/// Prefer [`enable_and_reset`].
380///
381/// # SAFETY
382///
383/// This peripheral must not yet be in use prior to calling `release_reset`.
384#[inline]
385pub unsafe fn release_reset<G: Gate>() {
386 G::release_reset();
387}
388
389/// Assert a reset line for the given peripheral set.
390///
391/// Prefer [`enable_and_reset`].
392///
393/// # SAFETY
394///
395/// This peripheral must not yet be in use prior to calling `assert_reset`.
396#[inline]
397pub unsafe fn assert_reset<G: Gate>() {
398 G::assert_reset();
399}
400
401/// Check whether the peripheral is held in reset.
402///
403/// # Safety
404///
405/// Must be called with a valid peripheral gate type.
406#[inline]
407pub unsafe fn is_reset_released<G: Gate>() -> bool {
408 G::is_reset_released()
409}
410
411/// Pulse a reset line (assert then release) with a short delay.
412///
413/// Prefer [`enable_and_reset`].
414///
415/// # SAFETY
416///
417/// This peripheral must not yet be in use prior to calling `release_reset`.
418#[inline]
419pub unsafe fn pulse_reset<G: Gate>() {
420 G::assert_reset();
421 cortex_m::asm::nop();
422 cortex_m::asm::nop();
423 G::release_reset();
424}
425
426//
427// `impl`s for structs/enums
428//
429
430/// The [`Clocks`] type's methods generally take the form of "ensure X clock is active".
431///
432/// These methods are intended to be used by HAL peripheral implementors to ensure that their
433/// selected clocks are active at a suitable level at time of construction. These methods
434/// return the frequency of the requested clock, in Hertz, or a [`ClockError`].
435impl Clocks {
436 /// Ensure the `fro_lf_div` clock is active and valid at the given power state.
437 pub fn ensure_fro_lf_div_active(&self, at_level: &PoweredClock) -> Result<u32, ClockError> {
438 let Some(clk) = self.fro_lf_div.as_ref() else {
439 return Err(ClockError::BadConfig {
440 clock: "fro_lf_div",
441 reason: "required but not active",
442 });
443 };
444 if !clk.power.meets_requirement_of(at_level) {
445 return Err(ClockError::BadConfig {
446 clock: "fro_lf_div",
447 reason: "not low power active",
448 });
449 }
450 Ok(clk.frequency)
451 }
452
453 /// Ensure the `fro_hf` clock is active and valid at the given power state.
454 pub fn ensure_fro_hf_active(&self, at_level: &PoweredClock) -> Result<u32, ClockError> {
455 let Some(clk) = self.fro_hf.as_ref() else {
456 return Err(ClockError::BadConfig {
457 clock: "fro_hf",
458 reason: "required but not active",
459 });
460 };
461 if !clk.power.meets_requirement_of(at_level) {
462 return Err(ClockError::BadConfig {
463 clock: "fro_hf",
464 reason: "not low power active",
465 });
466 }
467 Ok(clk.frequency)
468 }
469
470 /// Ensure the `fro_hf_div` clock is active and valid at the given power state.
471 pub fn ensure_fro_hf_div_active(&self, at_level: &PoweredClock) -> Result<u32, ClockError> {
472 let Some(clk) = self.fro_hf_div.as_ref() else {
473 return Err(ClockError::BadConfig {
474 clock: "fro_hf_div",
475 reason: "required but not active",
476 });
477 };
478 if !clk.power.meets_requirement_of(at_level) {
479 return Err(ClockError::BadConfig {
480 clock: "fro_hf_div",
481 reason: "not low power active",
482 });
483 }
484 Ok(clk.frequency)
485 }
486
487 /// Ensure the `clk_in` clock is active and valid at the given power state.
488 pub fn ensure_clk_in_active(&self, _at_level: &PoweredClock) -> Result<u32, ClockError> {
489 Err(ClockError::NotImplemented { clock: "clk_in" })
490 }
491
492 /// Ensure the `clk_16k_vsys` clock is active and valid at the given power state.
493 pub fn ensure_clk_16k_vsys_active(&self, _at_level: &PoweredClock) -> Result<u32, ClockError> {
494 // NOTE: clk_16k is always active in low power mode
495 Ok(self
496 .clk_16k_vsys
497 .as_ref()
498 .ok_or(ClockError::BadConfig {
499 clock: "clk_16k_vsys",
500 reason: "required but not active",
501 })?
502 .frequency)
503 }
504
505 /// Ensure the `clk_16k_vdd_core` clock is active and valid at the given power state.
506 pub fn ensure_clk_16k_vdd_core_active(&self, _at_level: &PoweredClock) -> Result<u32, ClockError> {
507 // NOTE: clk_16k is always active in low power mode
508 Ok(self
509 .clk_16k_vdd_core
510 .as_ref()
511 .ok_or(ClockError::BadConfig {
512 clock: "clk_16k_vdd_core",
513 reason: "required but not active",
514 })?
515 .frequency)
516 }
517
518 /// Ensure the `clk_1m` clock is active and valid at the given power state.
519 pub fn ensure_clk_1m_active(&self, at_level: &PoweredClock) -> Result<u32, ClockError> {
520 let Some(clk) = self.clk_1m.as_ref() else {
521 return Err(ClockError::BadConfig {
522 clock: "clk_1m",
523 reason: "required but not active",
524 });
525 };
526 if !clk.power.meets_requirement_of(at_level) {
527 return Err(ClockError::BadConfig {
528 clock: "clk_1m",
529 reason: "not low power active",
530 });
531 }
532 Ok(clk.frequency)
533 }
534
535 /// Ensure the `pll1_clk` clock is active and valid at the given power state.
536 pub fn ensure_pll1_clk_active(&self, _at_level: &PoweredClock) -> Result<u32, ClockError> {
537 Err(ClockError::NotImplemented { clock: "pll1_clk" })
538 }
539
540 /// Ensure the `pll1_clk_div` clock is active and valid at the given power state.
541 pub fn ensure_pll1_clk_div_active(&self, _at_level: &PoweredClock) -> Result<u32, ClockError> {
542 Err(ClockError::NotImplemented { clock: "pll1_clk_div" })
543 }
544
545 /// Ensure the `CPU_CLK` or `SYSTEM_CLK` is active
546 pub fn ensure_cpu_system_clk_active(&self, at_level: &PoweredClock) -> Result<u32, ClockError> {
547 let Some(clk) = self.cpu_system_clk.as_ref() else {
548 return Err(ClockError::BadConfig {
549 clock: "cpu_system_clk",
550 reason: "required but not active",
551 });
552 };
553 // Can the main_clk ever be active in deep sleep? I think it is gated?
554 match at_level {
555 PoweredClock::NormalEnabledDeepSleepDisabled => {}
556 PoweredClock::AlwaysEnabled => {
557 return Err(ClockError::BadConfig {
558 clock: "main_clk",
559 reason: "not low power active",
560 });
561 }
562 }
563
564 Ok(clk.frequency)
565 }
566
567 pub fn ensure_slow_clk_active(&self, at_level: &PoweredClock) -> Result<u32, ClockError> {
568 let freq = self.ensure_cpu_system_clk_active(at_level)?;
569
570 Ok(freq / 6)
571 }
572}
573
574impl PoweredClock {
575 /// Does THIS clock meet the power requirements of the OTHER clock?
576 pub fn meets_requirement_of(&self, other: &Self) -> bool {
577 match (self, other) {
578 (PoweredClock::NormalEnabledDeepSleepDisabled, PoweredClock::AlwaysEnabled) => false,
579 (PoweredClock::NormalEnabledDeepSleepDisabled, PoweredClock::NormalEnabledDeepSleepDisabled) => true,
580 (PoweredClock::AlwaysEnabled, PoweredClock::NormalEnabledDeepSleepDisabled) => true,
581 (PoweredClock::AlwaysEnabled, PoweredClock::AlwaysEnabled) => true,
582 }
583 }
584}
585
586impl ClockOperator<'_> {
587 /// Configure the FIRC/FRO180M clock family
588 ///
589 /// NOTE: Currently we require this to be a fairly hardcoded value, as this clock is used
590 /// as the main clock used for the CPU, AHB, APB, etc.
591 fn configure_firc_clocks(&mut self) -> Result<(), ClockError> {
592 const HARDCODED_ERR: Result<(), ClockError> = Err(ClockError::BadConfig {
593 clock: "firc",
594 reason: "For now, FIRC must be enabled and in default state!",
595 });
596
597 // Did the user give us a FIRC config?
598 let Some(firc) = self.config.firc.as_ref() else {
599 return HARDCODED_ERR;
600 };
601 // Is the FIRC set to 45MHz (should be reset default)
602 if !matches!(firc.frequency, FircFreqSel::Mhz45) {
603 return HARDCODED_ERR;
604 }
605 let base_freq = 45_000_000;
606
607 // Now, check if the FIRC as expected for our hardcoded value
608 let mut firc_ok = true;
609
610 // Is the hardware currently set to the default 45MHz?
611 //
612 // NOTE: the SVD currently has the wrong(?) values for these:
613 // 45 -> 48
614 // 60 -> 64
615 // 90 -> 96
616 // 180 -> 192
617 // Probably correct-ish, but for a different trim value?
618 firc_ok &= self.scg0.firccfg().read().freq_sel().is_firc_48mhz_192s();
619
620 // Check some values in the CSR
621 let csr = self.scg0.firccsr().read();
622 // Is it enabled?
623 firc_ok &= csr.fircen().is_enabled();
624 // Is it accurate?
625 firc_ok &= csr.fircacc().is_enabled_and_valid();
626 // Is there no error?
627 firc_ok &= csr.fircerr().is_error_not_detected();
628 // Is the FIRC the system clock?
629 firc_ok &= csr.fircsel().is_firc();
630 // Is it valid?
631 firc_ok &= csr.fircvld().is_enabled_and_valid();
632
633 // Are we happy with the current (hardcoded) state?
634 if !firc_ok {
635 return HARDCODED_ERR;
636 }
637
638 // Note that the fro_hf_root is active
639 self.clocks.fro_hf_root = Some(Clock {
640 frequency: base_freq,
641 power: firc.power,
642 });
643
644 // Okay! Now we're past that, let's enable all the downstream clocks.
645 let FircConfig {
646 frequency: _,
647 power,
648 fro_hf_enabled,
649 clk_45m_enabled,
650 fro_hf_div,
651 } = firc;
652
653 // When is the FRO enabled?
654 let pow_set = match power {
655 PoweredClock::NormalEnabledDeepSleepDisabled => Fircsten::DisabledInStopModes,
656 PoweredClock::AlwaysEnabled => Fircsten::EnabledInStopModes,
657 };
658
659 // Do we enable the `fro_hf` output?
660 let fro_hf_set = if *fro_hf_enabled {
661 self.clocks.fro_hf = Some(Clock {
662 frequency: base_freq,
663 power: *power,
664 });
665 FircFclkPeriphEn::Enabled
666 } else {
667 FircFclkPeriphEn::Disabled
668 };
669
670 // Do we enable the `clk_45m` output?
671 let clk_45m_set = if *clk_45m_enabled {
672 self.clocks.clk_45m = Some(Clock {
673 frequency: 45_000_000,
674 power: *power,
675 });
676 FircSclkPeriphEn::Enabled
677 } else {
678 FircSclkPeriphEn::Disabled
679 };
680
681 self.scg0.firccsr().modify(|_r, w| {
682 w.fircsten().variant(pow_set);
683 w.firc_fclk_periph_en().variant(fro_hf_set);
684 w.firc_sclk_periph_en().variant(clk_45m_set);
685 w
686 });
687
688 // Do we enable the `fro_hf_div` output?
689 if let Some(d) = fro_hf_div.as_ref() {
690 // We need `fro_hf` to be enabled
691 if !*fro_hf_enabled {
692 return Err(ClockError::BadConfig {
693 clock: "fro_hf_div",
694 reason: "fro_hf not enabled",
695 });
696 }
697
698 // Halt and reset the div; then set our desired div.
699 self.syscon.frohfdiv().write(|w| {
700 w.halt().halt();
701 w.reset().asserted();
702 unsafe { w.div().bits(d.into_bits()) };
703 w
704 });
705 // Then unhalt it, and reset it
706 self.syscon.frohfdiv().write(|w| {
707 w.halt().run();
708 w.reset().released();
709 w
710 });
711
712 // Wait for clock to stabilize
713 while self.syscon.frohfdiv().read().unstab().is_ongoing() {}
714
715 // Store off the clock info
716 self.clocks.fro_hf_div = Some(Clock {
717 frequency: base_freq / d.into_divisor(),
718 power: *power,
719 });
720 }
721
722 Ok(())
723 }
724
725 /// Configure the SIRC/FRO12M clock family
726 fn configure_sirc_clocks(&mut self) -> Result<(), ClockError> {
727 let SircConfig {
728 power,
729 fro_12m_enabled,
730 fro_lf_div,
731 } = &self.config.sirc;
732 let base_freq = 12_000_000;
733
734 // Allow writes
735 self.scg0.sirccsr().modify(|_r, w| w.lk().write_enabled());
736 self.clocks.fro_12m_root = Some(Clock {
737 frequency: base_freq,
738 power: *power,
739 });
740
741 let deep = match power {
742 PoweredClock::NormalEnabledDeepSleepDisabled => Sircsten::Disabled,
743 PoweredClock::AlwaysEnabled => Sircsten::Enabled,
744 };
745 let pclk = if *fro_12m_enabled {
746 self.clocks.fro_12m = Some(Clock {
747 frequency: base_freq,
748 power: *power,
749 });
750 self.clocks.clk_1m = Some(Clock {
751 frequency: base_freq / 12,
752 power: *power,
753 });
754 SircClkPeriphEn::Enabled
755 } else {
756 SircClkPeriphEn::Disabled
757 };
758
759 // Set sleep/peripheral usage
760 self.scg0.sirccsr().modify(|_r, w| {
761 w.sircsten().variant(deep);
762 w.sirc_clk_periph_en().variant(pclk);
763 w
764 });
765
766 while self.scg0.sirccsr().read().sircvld().is_disabled_or_not_valid() {}
767 if self.scg0.sirccsr().read().sircerr().is_error_detected() {
768 return Err(ClockError::BadConfig {
769 clock: "sirc",
770 reason: "error set",
771 });
772 }
773
774 // reset lock
775 self.scg0.sirccsr().modify(|_r, w| w.lk().write_disabled());
776
777 // Do we enable the `fro_lf_div` output?
778 if let Some(d) = fro_lf_div.as_ref() {
779 // We need `fro_lf` to be enabled
780 if !*fro_12m_enabled {
781 return Err(ClockError::BadConfig {
782 clock: "fro_lf_div",
783 reason: "fro_12m not enabled",
784 });
785 }
786
787 // Halt and reset the div; then set our desired div.
788 self.syscon.frolfdiv().write(|w| {
789 w.halt().halt();
790 w.reset().asserted();
791 unsafe { w.div().bits(d.into_bits()) };
792 w
793 });
794 // Then unhalt it, and reset it
795 self.syscon.frolfdiv().modify(|_r, w| {
796 w.halt().run();
797 w.reset().released();
798 w
799 });
800
801 // Wait for clock to stabilize
802 while self.syscon.frolfdiv().read().unstab().is_ongoing() {}
803
804 // Store off the clock info
805 self.clocks.fro_lf_div = Some(Clock {
806 frequency: base_freq / d.into_divisor(),
807 power: *power,
808 });
809 }
810
811 Ok(())
812 }
813
814 /// Configure the FRO16K/clk_16k clock family
815 fn configure_fro16k_clocks(&mut self) -> Result<(), ClockError> {
816 let Some(fro16k) = self.config.fro16k.as_ref() else {
817 return Ok(());
818 };
819 // Enable FRO16K oscillator
820 self.vbat0.froctla().modify(|_, w| w.fro_en().set_bit());
821
822 // Lock the control register
823 self.vbat0.frolcka().modify(|_, w| w.lock().set_bit());
824
825 let Fro16KConfig {
826 vsys_domain_active,
827 vdd_core_domain_active,
828 } = fro16k;
829
830 // Enable clock outputs to both VSYS and VDD_CORE domains
831 // Bit 0: clk_16k0 to VSYS domain
832 // Bit 1: clk_16k1 to VDD_CORE domain
833 //
834 // TODO: Define sub-fields for this register with a PAC patch?
835 let mut bits = 0;
836 if *vsys_domain_active {
837 bits |= 0b01;
838 self.clocks.clk_16k_vsys = Some(Clock {
839 frequency: 16_384,
840 power: PoweredClock::AlwaysEnabled,
841 });
842 }
843 if *vdd_core_domain_active {
844 bits |= 0b10;
845 self.clocks.clk_16k_vdd_core = Some(Clock {
846 frequency: 16_384,
847 power: PoweredClock::AlwaysEnabled,
848 });
849 }
850 self.vbat0.froclke().modify(|_r, w| unsafe { w.clke().bits(bits) });
851
852 Ok(())
853 }
854}
855
856//
857// Macros/macro impls
858//
859
860/// This macro is used to implement the [`Gate`] trait for a given peripheral
861/// that is controlled by the MRCC peripheral.
862macro_rules! impl_cc_gate {
863 ($name:ident, $clk_reg:ident, $rst_reg:ident, $field:ident, $config:ty) => {
864 impl Gate for crate::peripherals::$name {
865 type MrccPeriphConfig = $config;
866
867 #[inline]
868 unsafe fn enable_clock() {
869 let mrcc = unsafe { pac::Mrcc0::steal() };
870 mrcc.$clk_reg().modify(|_, w| w.$field().enabled());
871 }
872
873 #[inline]
874 unsafe fn disable_clock() {
875 let mrcc = unsafe { pac::Mrcc0::steal() };
876 mrcc.$clk_reg().modify(|_r, w| w.$field().disabled());
877 }
878
879 #[inline]
880 fn is_clock_enabled() -> bool {
881 let mrcc = unsafe { pac::Mrcc0::steal() };
882 mrcc.$clk_reg().read().$field().is_enabled()
883 }
884
885 #[inline]
886 unsafe fn release_reset() {
887 let mrcc = unsafe { pac::Mrcc0::steal() };
888 mrcc.$rst_reg().modify(|_, w| w.$field().enabled());
889 }
890
891 #[inline]
892 unsafe fn assert_reset() {
893 let mrcc = unsafe { pac::Mrcc0::steal() };
894 mrcc.$rst_reg().modify(|_, w| w.$field().disabled());
895 }
896
897 #[inline]
898 fn is_reset_released() -> bool {
899 let mrcc = unsafe { pac::Mrcc0::steal() };
900 mrcc.$rst_reg().read().$field().is_enabled()
901 }
902 }
903 };
904}
905
906/// This module contains implementations of MRCC APIs, specifically of the [`Gate`] trait,
907/// for various low level peripherals.
908pub(crate) mod gate {
909 #[cfg(not(feature = "time"))]
910 use super::periph_helpers::OsTimerConfig;
911 use super::periph_helpers::{AdcConfig, Lpi2cConfig, LpuartConfig, NoConfig};
912 use super::*;
913
914 // These peripherals have no additional upstream clocks or configuration required
915 // other than enabling through the MRCC gate. Currently, these peripherals will
916 // ALWAYS return `Ok(0)` when calling [`enable_and_reset()`] and/or
917 // [`SPConfHelper::post_enable_config()`].
918 impl_cc_gate!(PORT0, mrcc_glb_cc1, mrcc_glb_rst1, port0, NoConfig);
919 impl_cc_gate!(PORT1, mrcc_glb_cc1, mrcc_glb_rst1, port1, NoConfig);
920 impl_cc_gate!(PORT2, mrcc_glb_cc1, mrcc_glb_rst1, port2, NoConfig);
921 impl_cc_gate!(PORT3, mrcc_glb_cc1, mrcc_glb_rst1, port3, NoConfig);
922 impl_cc_gate!(PORT4, mrcc_glb_cc1, mrcc_glb_rst1, port4, NoConfig);
923
924 impl_cc_gate!(GPIO0, mrcc_glb_cc2, mrcc_glb_rst2, gpio0, NoConfig);
925 impl_cc_gate!(GPIO1, mrcc_glb_cc2, mrcc_glb_rst2, gpio1, NoConfig);
926 impl_cc_gate!(GPIO2, mrcc_glb_cc2, mrcc_glb_rst2, gpio2, NoConfig);
927 impl_cc_gate!(GPIO3, mrcc_glb_cc2, mrcc_glb_rst2, gpio3, NoConfig);
928 impl_cc_gate!(GPIO4, mrcc_glb_cc2, mrcc_glb_rst2, gpio4, NoConfig);
929
930 // These peripherals DO have meaningful configuration, and could fail if the system
931 // clocks do not match their needs.
932 #[cfg(not(feature = "time"))]
933 impl_cc_gate!(OSTIMER0, mrcc_glb_cc1, mrcc_glb_rst1, ostimer0, OsTimerConfig);
934
935 impl_cc_gate!(LPI2C0, mrcc_glb_cc0, mrcc_glb_rst0, lpi2c0, Lpi2cConfig);
936 impl_cc_gate!(LPI2C1, mrcc_glb_cc0, mrcc_glb_rst0, lpi2c1, Lpi2cConfig);
937 impl_cc_gate!(LPI2C2, mrcc_glb_cc1, mrcc_glb_rst1, lpi2c2, Lpi2cConfig);
938 impl_cc_gate!(LPI2C3, mrcc_glb_cc1, mrcc_glb_rst1, lpi2c3, Lpi2cConfig);
939
940 impl_cc_gate!(LPUART0, mrcc_glb_cc0, mrcc_glb_rst0, lpuart0, LpuartConfig);
941 impl_cc_gate!(LPUART1, mrcc_glb_cc0, mrcc_glb_rst0, lpuart1, LpuartConfig);
942 impl_cc_gate!(LPUART2, mrcc_glb_cc0, mrcc_glb_rst0, lpuart2, LpuartConfig);
943 impl_cc_gate!(LPUART3, mrcc_glb_cc0, mrcc_glb_rst0, lpuart3, LpuartConfig);
944 impl_cc_gate!(LPUART4, mrcc_glb_cc0, mrcc_glb_rst0, lpuart4, LpuartConfig);
945 impl_cc_gate!(LPUART5, mrcc_glb_cc1, mrcc_glb_rst1, lpuart5, LpuartConfig);
946 impl_cc_gate!(ADC1, mrcc_glb_cc1, mrcc_glb_rst1, adc1, AdcConfig);
947
948 // DMA0 peripheral - uses NoConfig since it has no selectable clock source
949 impl_cc_gate!(DMA0, mrcc_glb_cc0, mrcc_glb_rst0, dma0, NoConfig);
950}
diff --git a/embassy-mcxa/src/clocks/periph_helpers.rs b/embassy-mcxa/src/clocks/periph_helpers.rs
new file mode 100644
index 000000000..24d074e8a
--- /dev/null
+++ b/embassy-mcxa/src/clocks/periph_helpers.rs
@@ -0,0 +1,506 @@
1//! Peripheral Helpers
2//!
3//! The purpose of this module is to define the per-peripheral special handling
4//! required from a clocking perspective. Different peripherals have different
5//! selectable source clocks, and some peripherals have additional pre-dividers
6//! that can be used.
7//!
8//! See the docs of [`SPConfHelper`] for more details.
9
10use super::{ClockError, Clocks, PoweredClock};
11use crate::pac;
12
13/// Sealed Peripheral Configuration Helper
14///
15/// NOTE: the name "sealed" doesn't *totally* make sense because its not sealed yet in the
16/// embassy-mcxa project, but it derives from embassy-imxrt where it is. We should
17/// fix the name, or actually do the sealing of peripherals.
18///
19/// This trait serves to act as a per-peripheral customization for clocking behavior.
20///
21/// This trait should be implemented on a configuration type for a given peripheral, and
22/// provide the methods that will be called by the higher level operations like
23/// `embassy_mcxa::clocks::enable_and_reset()`.
24pub trait SPConfHelper {
25 /// This method is called AFTER a given MRCC peripheral has been enabled (e.g. un-gated),
26 /// but BEFORE the peripheral reset line is reset.
27 ///
28 /// This function should check that any relevant upstream clocks are enabled, are in a
29 /// reasonable power state, and that the requested configuration can be made. If any of
30 /// these checks fail, an `Err(ClockError)` should be returned, likely `ClockError::BadConfig`.
31 ///
32 /// This function SHOULD NOT make any changes to the system clock configuration, even
33 /// unsafely, as this should remain static for the duration of the program.
34 ///
35 /// This function WILL be called in a critical section, care should be taken not to delay
36 /// for an unreasonable amount of time.
37 ///
38 /// On success, this function MUST return an `Ok(freq)`, where `freq` is the frequency
39 /// fed into the peripheral, taking into account the selected source clock, as well as
40 /// any pre-divisors.
41 fn post_enable_config(&self, clocks: &Clocks) -> Result<u32, ClockError>;
42}
43
44// config types
45
46/// This type represents a divider in the range 1..=16.
47///
48/// At a hardware level, this is an 8-bit register from 0..=15,
49/// which adds one.
50///
51/// While the *clock* domain seems to use 8-bit dividers, the *peripheral* domain
52/// seems to use 4 bit dividers!
53#[derive(Copy, Clone, Debug, PartialEq, Eq)]
54pub struct Div4(pub(super) u8);
55
56impl Div4 {
57 /// Divide by one, or no division
58 pub const fn no_div() -> Self {
59 Self(0)
60 }
61
62 /// Store a "raw" divisor value that will divide the source by
63 /// `(n + 1)`, e.g. `Div4::from_raw(0)` will divide the source
64 /// by 1, and `Div4::from_raw(15)` will divide the source by
65 /// 16.
66 pub const fn from_raw(n: u8) -> Option<Self> {
67 if n > 0b1111 {
68 None
69 } else {
70 Some(Self(n))
71 }
72 }
73
74 /// Store a specific divisor value that will divide the source
75 /// by `n`. e.g. `Div4::from_divisor(1)` will divide the source
76 /// by 1, and `Div4::from_divisor(16)` will divide the source
77 /// by 16.
78 ///
79 /// Will return `None` if `n` is not in the range `1..=16`.
80 /// Consider [`Self::from_raw`] for an infallible version.
81 pub const fn from_divisor(n: u8) -> Option<Self> {
82 let Some(n) = n.checked_sub(1) else {
83 return None;
84 };
85 if n > 0b1111 {
86 return None;
87 }
88 Some(Self(n))
89 }
90
91 /// Convert into "raw" bits form
92 #[inline(always)]
93 pub const fn into_bits(self) -> u8 {
94 self.0
95 }
96
97 /// Convert into "divisor" form, as a u32 for convenient frequency math
98 #[inline(always)]
99 pub const fn into_divisor(self) -> u32 {
100 self.0 as u32 + 1
101 }
102}
103
104/// A basic type that always returns an error when `post_enable_config` is called.
105///
106/// Should only be used as a placeholder.
107pub struct UnimplementedConfig;
108
109impl SPConfHelper for UnimplementedConfig {
110 fn post_enable_config(&self, _clocks: &Clocks) -> Result<u32, ClockError> {
111 Err(ClockError::UnimplementedConfig)
112 }
113}
114
115/// A basic type that always returns `Ok(0)` when `post_enable_config` is called.
116///
117/// This should only be used for peripherals that are "ambiently" clocked, like `PORTn`
118/// peripherals, which have no selectable/configurable source clock.
119pub struct NoConfig;
120impl SPConfHelper for NoConfig {
121 fn post_enable_config(&self, _clocks: &Clocks) -> Result<u32, ClockError> {
122 Ok(0)
123 }
124}
125
126//
127// LPI2c
128//
129
130/// Selectable clocks for `Lpi2c` peripherals
131#[derive(Debug, Clone, Copy)]
132pub enum Lpi2cClockSel {
133 /// FRO12M/FRO_LF/SIRC clock source, passed through divider
134 /// "fro_lf_div"
135 FroLfDiv,
136 /// FRO180M/FRO_HF/FIRC clock source, passed through divider
137 /// "fro_hf_div"
138 FroHfDiv,
139 /// SOSC/XTAL/EXTAL clock source
140 ClkIn,
141 /// clk_1m/FRO_LF divided by 12
142 Clk1M,
143 /// Output of PLL1, passed through clock divider,
144 /// "pll1_clk_div", maybe "pll1_lf_div"?
145 Pll1ClkDiv,
146 /// Disabled
147 None,
148}
149
150/// Which instance of the `Lpi2c` is this?
151///
152/// Should not be directly selectable by end-users.
153#[derive(Copy, Clone, Debug, PartialEq, Eq)]
154pub enum Lpi2cInstance {
155 /// Instance 0
156 Lpi2c0,
157 /// Instance 1
158 Lpi2c1,
159 /// Instance 2
160 Lpi2c2,
161 /// Instance 3
162 Lpi2c3,
163}
164
165/// Top level configuration for `Lpi2c` instances.
166pub struct Lpi2cConfig {
167 /// Power state required for this peripheral
168 pub power: PoweredClock,
169 /// Clock source
170 pub source: Lpi2cClockSel,
171 /// Clock divisor
172 pub div: Div4,
173 /// Which instance is this?
174 // NOTE: should not be user settable
175 pub(crate) instance: Lpi2cInstance,
176}
177
178impl SPConfHelper for Lpi2cConfig {
179 fn post_enable_config(&self, clocks: &Clocks) -> Result<u32, ClockError> {
180 // check that source is suitable
181 let mrcc0 = unsafe { pac::Mrcc0::steal() };
182 use mcxa_pac::mrcc0::mrcc_lpi2c0_clksel::Mux;
183
184 let (clkdiv, clksel) = match self.instance {
185 Lpi2cInstance::Lpi2c0 => (mrcc0.mrcc_lpi2c0_clkdiv(), mrcc0.mrcc_lpi2c0_clksel()),
186 Lpi2cInstance::Lpi2c1 => (mrcc0.mrcc_lpi2c1_clkdiv(), mrcc0.mrcc_lpi2c1_clksel()),
187 Lpi2cInstance::Lpi2c2 => (mrcc0.mrcc_lpi2c2_clkdiv(), mrcc0.mrcc_lpi2c2_clksel()),
188 Lpi2cInstance::Lpi2c3 => (mrcc0.mrcc_lpi2c3_clkdiv(), mrcc0.mrcc_lpi2c3_clksel()),
189 };
190
191 let (freq, variant) = match self.source {
192 Lpi2cClockSel::FroLfDiv => {
193 let freq = clocks.ensure_fro_lf_div_active(&self.power)?;
194 (freq, Mux::ClkrootFunc0)
195 }
196 Lpi2cClockSel::FroHfDiv => {
197 let freq = clocks.ensure_fro_hf_div_active(&self.power)?;
198 (freq, Mux::ClkrootFunc2)
199 }
200 Lpi2cClockSel::ClkIn => {
201 let freq = clocks.ensure_clk_in_active(&self.power)?;
202 (freq, Mux::ClkrootFunc3)
203 }
204 Lpi2cClockSel::Clk1M => {
205 let freq = clocks.ensure_clk_1m_active(&self.power)?;
206 (freq, Mux::ClkrootFunc5)
207 }
208 Lpi2cClockSel::Pll1ClkDiv => {
209 let freq = clocks.ensure_pll1_clk_div_active(&self.power)?;
210 (freq, Mux::ClkrootFunc6)
211 }
212 Lpi2cClockSel::None => unsafe {
213 // no ClkrootFunc7, just write manually for now
214 clksel.write(|w| w.bits(0b111));
215 clkdiv.modify(|_r, w| w.reset().asserted().halt().asserted());
216 return Ok(0);
217 },
218 };
219
220 // set clksel
221 clksel.modify(|_r, w| w.mux().variant(variant));
222
223 // Set up clkdiv
224 clkdiv.modify(|_r, w| {
225 unsafe { w.div().bits(self.div.into_bits()) }
226 .halt()
227 .asserted()
228 .reset()
229 .asserted()
230 });
231 clkdiv.modify(|_r, w| w.halt().deasserted().reset().deasserted());
232
233 while clkdiv.read().unstab().is_unstable() {}
234
235 Ok(freq / self.div.into_divisor())
236 }
237}
238
239//
240// LPUart
241//
242
243/// Selectable clocks for Lpuart peripherals
244#[derive(Debug, Clone, Copy)]
245pub enum LpuartClockSel {
246 /// FRO12M/FRO_LF/SIRC clock source, passed through divider
247 /// "fro_lf_div"
248 FroLfDiv,
249 /// FRO180M/FRO_HF/FIRC clock source, passed through divider
250 /// "fro_hf_div"
251 FroHfDiv,
252 /// SOSC/XTAL/EXTAL clock source
253 ClkIn,
254 /// FRO16K/clk_16k source
255 Clk16K,
256 /// clk_1m/FRO_LF divided by 12
257 Clk1M,
258 /// Output of PLL1, passed through clock divider,
259 /// "pll1_clk_div", maybe "pll1_lf_div"?
260 Pll1ClkDiv,
261 /// Disabled
262 None,
263}
264
265/// Which instance of the Lpuart is this?
266///
267/// Should not be directly selectable by end-users.
268#[derive(Copy, Clone, Debug, PartialEq, Eq)]
269pub enum LpuartInstance {
270 /// Instance 0
271 Lpuart0,
272 /// Instance 1
273 Lpuart1,
274 /// Instance 2
275 Lpuart2,
276 /// Instance 3
277 Lpuart3,
278 /// Instance 4
279 Lpuart4,
280 /// Instance 5
281 Lpuart5,
282}
283
284/// Top level configuration for `Lpuart` instances.
285pub struct LpuartConfig {
286 /// Power state required for this peripheral
287 pub power: PoweredClock,
288 /// Clock source
289 pub source: LpuartClockSel,
290 /// Clock divisor
291 pub div: Div4,
292 /// Which instance is this?
293 // NOTE: should not be user settable
294 pub(crate) instance: LpuartInstance,
295}
296
297impl SPConfHelper for LpuartConfig {
298 fn post_enable_config(&self, clocks: &Clocks) -> Result<u32, ClockError> {
299 // check that source is suitable
300 let mrcc0 = unsafe { pac::Mrcc0::steal() };
301 use mcxa_pac::mrcc0::mrcc_lpuart0_clksel::Mux;
302
303 let (clkdiv, clksel) = match self.instance {
304 LpuartInstance::Lpuart0 => (mrcc0.mrcc_lpuart0_clkdiv(), mrcc0.mrcc_lpuart0_clksel()),
305 LpuartInstance::Lpuart1 => (mrcc0.mrcc_lpuart1_clkdiv(), mrcc0.mrcc_lpuart1_clksel()),
306 LpuartInstance::Lpuart2 => (mrcc0.mrcc_lpuart2_clkdiv(), mrcc0.mrcc_lpuart2_clksel()),
307 LpuartInstance::Lpuart3 => (mrcc0.mrcc_lpuart3_clkdiv(), mrcc0.mrcc_lpuart3_clksel()),
308 LpuartInstance::Lpuart4 => (mrcc0.mrcc_lpuart4_clkdiv(), mrcc0.mrcc_lpuart4_clksel()),
309 LpuartInstance::Lpuart5 => (mrcc0.mrcc_lpuart5_clkdiv(), mrcc0.mrcc_lpuart5_clksel()),
310 };
311
312 let (freq, variant) = match self.source {
313 LpuartClockSel::FroLfDiv => {
314 let freq = clocks.ensure_fro_lf_div_active(&self.power)?;
315 (freq, Mux::ClkrootFunc0)
316 }
317 LpuartClockSel::FroHfDiv => {
318 let freq = clocks.ensure_fro_hf_div_active(&self.power)?;
319 (freq, Mux::ClkrootFunc2)
320 }
321 LpuartClockSel::ClkIn => {
322 let freq = clocks.ensure_clk_in_active(&self.power)?;
323 (freq, Mux::ClkrootFunc3)
324 }
325 LpuartClockSel::Clk16K => {
326 let freq = clocks.ensure_clk_16k_vdd_core_active(&self.power)?;
327 (freq, Mux::ClkrootFunc4)
328 }
329 LpuartClockSel::Clk1M => {
330 let freq = clocks.ensure_clk_1m_active(&self.power)?;
331 (freq, Mux::ClkrootFunc5)
332 }
333 LpuartClockSel::Pll1ClkDiv => {
334 let freq = clocks.ensure_pll1_clk_div_active(&self.power)?;
335 (freq, Mux::ClkrootFunc6)
336 }
337 LpuartClockSel::None => unsafe {
338 // no ClkrootFunc7, just write manually for now
339 clksel.write(|w| w.bits(0b111));
340 clkdiv.modify(|_r, w| {
341 w.reset().asserted();
342 w.halt().asserted();
343 w
344 });
345 return Ok(0);
346 },
347 };
348
349 // set clksel
350 clksel.modify(|_r, w| w.mux().variant(variant));
351
352 // Set up clkdiv
353 clkdiv.modify(|_r, w| {
354 w.halt().asserted();
355 w.reset().asserted();
356 unsafe { w.div().bits(self.div.into_bits()) };
357 w
358 });
359 clkdiv.modify(|_r, w| {
360 w.halt().deasserted();
361 w.reset().deasserted();
362 w
363 });
364
365 while clkdiv.read().unstab().is_unstable() {}
366
367 Ok(freq / self.div.into_divisor())
368 }
369}
370
371//
372// OSTimer
373//
374
375/// Selectable clocks for the OSTimer peripheral
376#[derive(Copy, Clone, Debug, PartialEq, Eq)]
377pub enum OstimerClockSel {
378 /// 16k clock, sourced from FRO16K (Vdd Core)
379 Clk16kVddCore,
380 /// 1 MHz Clock sourced from FRO12M
381 Clk1M,
382 /// Disabled
383 None,
384}
385
386/// Top level configuration for the `OSTimer` peripheral
387pub struct OsTimerConfig {
388 /// Power state required for this peripheral
389 pub power: PoweredClock,
390 /// Selected clock source for this peripheral
391 pub source: OstimerClockSel,
392}
393
394impl SPConfHelper for OsTimerConfig {
395 fn post_enable_config(&self, clocks: &Clocks) -> Result<u32, ClockError> {
396 let mrcc0 = unsafe { pac::Mrcc0::steal() };
397 Ok(match self.source {
398 OstimerClockSel::Clk16kVddCore => {
399 let freq = clocks.ensure_clk_16k_vdd_core_active(&self.power)?;
400 mrcc0.mrcc_ostimer0_clksel().write(|w| w.mux().clkroot_16k());
401 freq
402 }
403 OstimerClockSel::Clk1M => {
404 let freq = clocks.ensure_clk_1m_active(&self.power)?;
405 mrcc0.mrcc_ostimer0_clksel().write(|w| w.mux().clkroot_1m());
406 freq
407 }
408 OstimerClockSel::None => {
409 mrcc0.mrcc_ostimer0_clksel().write(|w| unsafe { w.mux().bits(0b11) });
410 0
411 }
412 })
413 }
414}
415
416//
417// Adc
418//
419
420/// Selectable clocks for the ADC peripheral
421#[derive(Copy, Clone, Debug, PartialEq, Eq)]
422pub enum AdcClockSel {
423 /// Divided `fro_lf`/`clk_12m`/FRO12M source
424 FroLfDiv,
425 /// Gated `fro_hf`/`FRO180M` source
426 FroHf,
427 /// External Clock Source
428 ClkIn,
429 /// 1MHz clock sourced by a divided `fro_lf`/`clk_12m`
430 Clk1M,
431 /// Internal PLL output, with configurable divisor
432 Pll1ClkDiv,
433 /// No clock/disabled
434 None,
435}
436
437/// Top level configuration for the ADC peripheral
438pub struct AdcConfig {
439 /// Power state required for this peripheral
440 pub power: PoweredClock,
441 /// Selected clock-source for this peripheral
442 pub source: AdcClockSel,
443 /// Pre-divisor, applied to the upstream clock output
444 pub div: Div4,
445}
446
447impl SPConfHelper for AdcConfig {
448 fn post_enable_config(&self, clocks: &Clocks) -> Result<u32, ClockError> {
449 use mcxa_pac::mrcc0::mrcc_adc_clksel::Mux;
450 let mrcc0 = unsafe { pac::Mrcc0::steal() };
451 let (freq, variant) = match self.source {
452 AdcClockSel::FroLfDiv => {
453 let freq = clocks.ensure_fro_lf_div_active(&self.power)?;
454 (freq, Mux::ClkrootFunc0)
455 }
456 AdcClockSel::FroHf => {
457 let freq = clocks.ensure_fro_hf_active(&self.power)?;
458 (freq, Mux::ClkrootFunc1)
459 }
460 AdcClockSel::ClkIn => {
461 let freq = clocks.ensure_clk_in_active(&self.power)?;
462 (freq, Mux::ClkrootFunc3)
463 }
464 AdcClockSel::Clk1M => {
465 let freq = clocks.ensure_clk_1m_active(&self.power)?;
466 (freq, Mux::ClkrootFunc5)
467 }
468 AdcClockSel::Pll1ClkDiv => {
469 let freq = clocks.ensure_pll1_clk_div_active(&self.power)?;
470 (freq, Mux::ClkrootFunc6)
471 }
472 AdcClockSel::None => {
473 mrcc0.mrcc_adc_clksel().write(|w| unsafe {
474 // no ClkrootFunc7, just write manually for now
475 w.mux().bits(0b111)
476 });
477 mrcc0.mrcc_adc_clkdiv().modify(|_r, w| {
478 w.reset().asserted();
479 w.halt().asserted();
480 w
481 });
482 return Ok(0);
483 }
484 };
485
486 // set clksel
487 mrcc0.mrcc_adc_clksel().modify(|_r, w| w.mux().variant(variant));
488
489 // Set up clkdiv
490 mrcc0.mrcc_adc_clkdiv().modify(|_r, w| {
491 w.halt().asserted();
492 w.reset().asserted();
493 unsafe { w.div().bits(self.div.into_bits()) };
494 w
495 });
496 mrcc0.mrcc_adc_clkdiv().modify(|_r, w| {
497 w.halt().deasserted();
498 w.reset().deasserted();
499 w
500 });
501
502 while mrcc0.mrcc_adc_clkdiv().read().unstab().is_unstable() {}
503
504 Ok(freq / self.div.into_divisor())
505 }
506}
diff --git a/embassy-mcxa/src/config.rs b/embassy-mcxa/src/config.rs
new file mode 100644
index 000000000..8d52a1d44
--- /dev/null
+++ b/embassy-mcxa/src/config.rs
@@ -0,0 +1,27 @@
1// HAL configuration (minimal), mirroring embassy-imxrt style
2
3use crate::clocks::config::ClocksConfig;
4use crate::interrupt::Priority;
5
6#[non_exhaustive]
7pub struct Config {
8 #[cfg(feature = "time")]
9 pub time_interrupt_priority: Priority,
10 pub rtc_interrupt_priority: Priority,
11 pub adc_interrupt_priority: Priority,
12 pub gpio_interrupt_priority: Priority,
13 pub clock_cfg: ClocksConfig,
14}
15
16impl Default for Config {
17 fn default() -> Self {
18 Self {
19 #[cfg(feature = "time")]
20 time_interrupt_priority: Priority::from(0),
21 rtc_interrupt_priority: Priority::from(0),
22 adc_interrupt_priority: Priority::from(0),
23 gpio_interrupt_priority: Priority::from(0),
24 clock_cfg: ClocksConfig::default(),
25 }
26 }
27}
diff --git a/embassy-mcxa/src/dma.rs b/embassy-mcxa/src/dma.rs
new file mode 100644
index 000000000..7d1588516
--- /dev/null
+++ b/embassy-mcxa/src/dma.rs
@@ -0,0 +1,2594 @@
1//! DMA driver for MCXA276.
2//!
3//! This module provides a typed channel abstraction over the EDMA_0_TCD0 array
4//! and helpers for configuring the channel MUX. The driver supports both
5//! low-level TCD configuration and higher-level async transfer APIs.
6//!
7//! # Architecture
8//!
9//! The MCXA276 has 8 DMA channels (0-7), each with its own interrupt vector.
10//! Each channel has a Transfer Control Descriptor (TCD) that defines the
11//! transfer parameters.
12//!
13//! # Choosing the Right API
14//!
15//! This module provides several API levels to match different use cases:
16//!
17//! ## High-Level Async API (Recommended for Most Users)
18//!
19//! Use the async methods when you want simple, safe DMA transfers:
20//!
21//! | Method | Description |
22//! |--------|-------------|
23//! | [`DmaChannel::mem_to_mem()`] | Memory-to-memory copy |
24//! | [`DmaChannel::memset()`] | Fill memory with a pattern |
25//! | [`DmaChannel::write()`] | Memory-to-peripheral (TX) |
26//! | [`DmaChannel::read()`] | Peripheral-to-memory (RX) |
27//!
28//! These return a [`Transfer`] future that can be `.await`ed:
29//!
30//! ```no_run
31//! # use embassy_mcxa::dma::{DmaChannel, TransferOptions};
32//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
33//! # let src = [0u32; 4];
34//! # let mut dst = [0u32; 4];
35//! // Simple memory-to-memory transfer
36//! unsafe {
37//! dma_ch.mem_to_mem(&src, &mut dst, TransferOptions::default()).await;
38//! }
39//! ```
40//!
41//! ## Setup Methods (For Peripheral Drivers)
42//!
43//! Use setup methods when you need manual lifecycle control:
44//!
45//! | Method | Description |
46//! |--------|-------------|
47//! | [`DmaChannel::setup_write()`] | Configure TX without starting |
48//! | [`DmaChannel::setup_read()`] | Configure RX without starting |
49//!
50//! These configure the TCD but don't start the transfer. You control:
51//! 1. When to call [`DmaChannel::enable_request()`]
52//! 2. How to detect completion (polling or interrupts)
53//! 3. When to clean up with [`DmaChannel::clear_done()`]
54//!
55//! ## Circular/Ring Buffer API (For Continuous Reception)
56//!
57//! Use [`DmaChannel::setup_circular_read()`] for continuous data reception:
58//!
59//! ```no_run
60//! # use embassy_mcxa::dma::DmaChannel;
61//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
62//! # let uart_rx_addr = 0x4000_0000 as *const u8;
63//! static mut RX_BUF: [u8; 64] = [0; 64];
64//!
65//! let ring_buf = unsafe {
66//! dma_ch.setup_circular_read(uart_rx_addr, &mut RX_BUF)
67//! };
68//!
69//! // Read data as it arrives
70//! let mut buf = [0u8; 16];
71//! let n = ring_buf.read(&mut buf).await.unwrap();
72//! ```
73//!
74//! ## Scatter-Gather Builder (For Chained Transfers)
75//!
76//! Use [`ScatterGatherBuilder`] for complex multi-segment transfers:
77//!
78//! ```no_run
79//! # use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
80//! # let dma_ch = DmaChannel::new(p.DMA_CH0);
81//! let mut builder = ScatterGatherBuilder::<u32>::new();
82//! builder.add_transfer(&src1, &mut dst1);
83//! builder.add_transfer(&src2, &mut dst2);
84//!
85//! let transfer = unsafe { builder.build(&dma_ch).unwrap() };
86//! transfer.await;
87//! ```
88//!
89//! ## Direct TCD Access (For Advanced Use Cases)
90//!
91//! For full control, use the channel's `tcd()` method to access TCD registers directly.
92//! See the `dma_*` examples for patterns.
93//!
94//! # Example
95//!
96//! ```no_run
97//! use embassy_mcxa::dma::{DmaChannel, TransferOptions, Direction};
98//!
99//! let dma_ch = DmaChannel::new(p.DMA_CH0);
100//! // Configure and trigger a transfer...
101//! ```
102
103use core::future::Future;
104use core::marker::PhantomData;
105use core::pin::Pin;
106use core::ptr::NonNull;
107use core::sync::atomic::{fence, AtomicBool, AtomicUsize, Ordering};
108use core::task::{Context, Poll};
109
110use embassy_hal_internal::PeripheralType;
111use embassy_sync::waitqueue::AtomicWaker;
112
113use crate::clocks::Gate;
114use crate::pac;
115use crate::pac::Interrupt;
116use crate::peripherals::DMA0;
117
118/// Static flag to track whether DMA has been initialized.
119static DMA_INITIALIZED: AtomicBool = AtomicBool::new(false);
120
121/// Initialize DMA controller (clock enabled, reset released, controller configured).
122///
123/// This function is intended to be called during HAL initialization (`hal::init()`).
124/// It is idempotent - it will only initialize DMA once, even if called multiple times.
125///
126/// The function enables the DMA0 clock, releases reset, and configures the controller
127/// for normal operation with round-robin arbitration.
128pub fn init() {
129 // Fast path: already initialized
130 if DMA_INITIALIZED.load(Ordering::Acquire) {
131 return;
132 }
133
134 // Slow path: initialize DMA
135 // Use compare_exchange to ensure only one caller initializes
136 if DMA_INITIALIZED
137 .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
138 .is_ok()
139 {
140 // We won the race - initialize DMA
141 unsafe {
142 // Enable DMA0 clock and release reset
143 DMA0::enable_clock();
144 DMA0::release_reset();
145
146 // Configure DMA controller
147 let dma = &(*pac::Dma0::ptr());
148 dma.mp_csr().modify(|_, w| {
149 w.edbg()
150 .enable()
151 .erca()
152 .enable()
153 .halt()
154 .normal_operation()
155 .gclc()
156 .available()
157 .gmrc()
158 .available()
159 });
160 }
161 }
162}
163
164// ============================================================================
165// Phase 1: Foundation Types (Embassy-aligned)
166// ============================================================================
167
168/// DMA transfer direction.
169#[derive(Debug, Copy, Clone, PartialEq, Eq)]
170#[cfg_attr(feature = "defmt", derive(defmt::Format))]
171pub enum Direction {
172 /// Transfer from memory to memory.
173 MemoryToMemory,
174 /// Transfer from memory to a peripheral register.
175 MemoryToPeripheral,
176 /// Transfer from a peripheral register to memory.
177 PeripheralToMemory,
178}
179
180/// DMA transfer priority.
181#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
182#[cfg_attr(feature = "defmt", derive(defmt::Format))]
183pub enum Priority {
184 /// Low priority (channel priority 7).
185 Low,
186 /// Medium priority (channel priority 4).
187 Medium,
188 /// High priority (channel priority 1).
189 #[default]
190 High,
191 /// Highest priority (channel priority 0).
192 Highest,
193}
194
195impl Priority {
196 /// Convert to hardware priority value (0 = highest, 7 = lowest).
197 pub fn to_hw_priority(self) -> u8 {
198 match self {
199 Priority::Low => 7,
200 Priority::Medium => 4,
201 Priority::High => 1,
202 Priority::Highest => 0,
203 }
204 }
205}
206
207/// DMA transfer data width.
208#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
209#[cfg_attr(feature = "defmt", derive(defmt::Format))]
210pub enum WordSize {
211 /// 8-bit (1 byte) transfers.
212 OneByte,
213 /// 16-bit (2 byte) transfers.
214 TwoBytes,
215 /// 32-bit (4 byte) transfers.
216 #[default]
217 FourBytes,
218}
219
220impl WordSize {
221 /// Size in bytes.
222 pub const fn bytes(self) -> usize {
223 match self {
224 WordSize::OneByte => 1,
225 WordSize::TwoBytes => 2,
226 WordSize::FourBytes => 4,
227 }
228 }
229
230 /// Convert to hardware SSIZE/DSIZE field value.
231 pub const fn to_hw_size(self) -> u8 {
232 match self {
233 WordSize::OneByte => 0,
234 WordSize::TwoBytes => 1,
235 WordSize::FourBytes => 2,
236 }
237 }
238
239 /// Create from byte width (1, 2, or 4).
240 pub const fn from_bytes(bytes: u8) -> Option<Self> {
241 match bytes {
242 1 => Some(WordSize::OneByte),
243 2 => Some(WordSize::TwoBytes),
244 4 => Some(WordSize::FourBytes),
245 _ => None,
246 }
247 }
248}
249
250/// Trait for types that can be transferred via DMA.
251///
252/// This provides compile-time type safety for DMA transfers.
253pub trait Word: Copy + 'static {
254 /// The word size for this type.
255 fn size() -> WordSize;
256}
257
258impl Word for u8 {
259 fn size() -> WordSize {
260 WordSize::OneByte
261 }
262}
263
264impl Word for u16 {
265 fn size() -> WordSize {
266 WordSize::TwoBytes
267 }
268}
269
270impl Word for u32 {
271 fn size() -> WordSize {
272 WordSize::FourBytes
273 }
274}
275
276/// DMA transfer options.
277///
278/// This struct configures various aspects of a DMA transfer.
279#[derive(Debug, Copy, Clone, PartialEq, Eq)]
280#[cfg_attr(feature = "defmt", derive(defmt::Format))]
281#[non_exhaustive]
282pub struct TransferOptions {
283 /// Transfer priority.
284 pub priority: Priority,
285 /// Enable circular (continuous) mode.
286 ///
287 /// When enabled, the transfer repeats automatically after completing.
288 pub circular: bool,
289 /// Enable interrupt on half transfer complete.
290 pub half_transfer_interrupt: bool,
291 /// Enable interrupt on transfer complete.
292 pub complete_transfer_interrupt: bool,
293}
294
295impl Default for TransferOptions {
296 fn default() -> Self {
297 Self {
298 priority: Priority::High,
299 circular: false,
300 half_transfer_interrupt: false,
301 complete_transfer_interrupt: true,
302 }
303 }
304}
305
306/// DMA error types.
307#[derive(Debug, Copy, Clone, PartialEq, Eq)]
308#[cfg_attr(feature = "defmt", derive(defmt::Format))]
309pub enum Error {
310 /// The DMA controller reported a bus error.
311 BusError,
312 /// The transfer was aborted.
313 Aborted,
314 /// Configuration error (e.g., invalid parameters).
315 Configuration,
316 /// Buffer overrun (for ring buffers).
317 Overrun,
318}
319
320/// Whether to enable the major loop completion interrupt.
321///
322/// This enum provides better readability than a boolean parameter
323/// for functions that configure DMA interrupt behavior.
324#[derive(Debug, Copy, Clone, PartialEq, Eq)]
325#[cfg_attr(feature = "defmt", derive(defmt::Format))]
326pub enum EnableInterrupt {
327 /// Enable the interrupt on major loop completion.
328 Yes,
329 /// Do not enable the interrupt.
330 No,
331}
332
333// ============================================================================
334// DMA Constants
335// ============================================================================
336
337/// Maximum bytes per DMA transfer (eDMA4 CITER/BITER are 15-bit fields).
338///
339/// This is a hardware limitation of the eDMA4 controller. Transfers larger
340/// than this must be split into multiple DMA operations.
341pub const DMA_MAX_TRANSFER_SIZE: usize = 0x7FFF;
342
343// ============================================================================
344// DMA Request Source Types (Type-Safe API)
345// ============================================================================
346
347/// Trait for type-safe DMA request sources.
348///
349/// Each peripheral that can trigger DMA requests implements this trait
350/// with marker types that encode the correct request source number at
351/// compile time. This prevents using the wrong request source for a
352/// peripheral.
353///
354/// # Example
355///
356/// ```ignore
357/// // The LPUART2 RX request source is automatically derived from the type:
358/// channel.set_request_source::<Lpuart2RxRequest>();
359/// ```
360///
361/// This trait is sealed and cannot be implemented outside this crate.
362#[allow(private_bounds)]
363pub trait DmaRequest: sealed::SealedDmaRequest {
364 /// The hardware request source number for the DMA mux.
365 const REQUEST_NUMBER: u8;
366}
367
368/// Macro to define a DMA request type.
369///
370/// Creates a zero-sized marker type that implements `DmaRequest` with
371/// the specified request number.
372macro_rules! define_dma_request {
373 ($(#[$meta:meta])* $name:ident = $num:expr) => {
374 $(#[$meta])*
375 #[derive(Debug, Copy, Clone)]
376 pub struct $name;
377
378 impl sealed::SealedDmaRequest for $name {}
379
380 impl DmaRequest for $name {
381 const REQUEST_NUMBER: u8 = $num;
382 }
383 };
384}
385
386// LPUART DMA request sources (from MCXA276 reference manual Table 4-8)
387define_dma_request!(
388 /// DMA request source for LPUART0 RX.
389 Lpuart0RxRequest = 21
390);
391define_dma_request!(
392 /// DMA request source for LPUART0 TX.
393 Lpuart0TxRequest = 22
394);
395define_dma_request!(
396 /// DMA request source for LPUART1 RX.
397 Lpuart1RxRequest = 23
398);
399define_dma_request!(
400 /// DMA request source for LPUART1 TX.
401 Lpuart1TxRequest = 24
402);
403define_dma_request!(
404 /// DMA request source for LPUART2 RX.
405 Lpuart2RxRequest = 25
406);
407define_dma_request!(
408 /// DMA request source for LPUART2 TX.
409 Lpuart2TxRequest = 26
410);
411define_dma_request!(
412 /// DMA request source for LPUART3 RX.
413 Lpuart3RxRequest = 27
414);
415define_dma_request!(
416 /// DMA request source for LPUART3 TX.
417 Lpuart3TxRequest = 28
418);
419define_dma_request!(
420 /// DMA request source for LPUART4 RX.
421 Lpuart4RxRequest = 29
422);
423define_dma_request!(
424 /// DMA request source for LPUART4 TX.
425 Lpuart4TxRequest = 30
426);
427define_dma_request!(
428 /// DMA request source for LPUART5 RX.
429 Lpuart5RxRequest = 31
430);
431define_dma_request!(
432 /// DMA request source for LPUART5 TX.
433 Lpuart5TxRequest = 32
434);
435
436// ============================================================================
437// Channel Trait (Sealed Pattern)
438// ============================================================================
439
440mod sealed {
441 use crate::pac::Interrupt;
442
443 /// Sealed trait for DMA channels.
444 pub trait SealedChannel {
445 /// Zero-based channel index into the TCD array.
446 fn index(&self) -> usize;
447 /// Interrupt vector for this channel.
448 fn interrupt(&self) -> Interrupt;
449 }
450
451 /// Sealed trait for DMA request sources.
452 pub trait SealedDmaRequest {}
453}
454
455/// Marker trait implemented by HAL peripheral tokens that map to a DMA0
456/// channel backed by one EDMA_0_TCD0 TCD slot.
457///
458/// This trait is sealed and cannot be implemented outside this crate.
459#[allow(private_bounds)]
460pub trait Channel: sealed::SealedChannel + PeripheralType + Into<AnyChannel> + 'static {
461 /// Zero-based channel index into the TCD array.
462 const INDEX: usize;
463 /// Interrupt vector for this channel.
464 const INTERRUPT: Interrupt;
465}
466
467/// Type-erased DMA channel.
468///
469/// This allows storing DMA channels in a uniform way regardless of their
470/// concrete type, useful for async transfer futures and runtime channel selection.
471#[derive(Debug, Clone, Copy)]
472pub struct AnyChannel {
473 index: usize,
474 interrupt: Interrupt,
475}
476
477impl AnyChannel {
478 /// Get the channel index.
479 #[inline]
480 pub const fn index(&self) -> usize {
481 self.index
482 }
483
484 /// Get the channel interrupt.
485 #[inline]
486 pub const fn interrupt(&self) -> Interrupt {
487 self.interrupt
488 }
489
490 /// Get a reference to the TCD register block for this channel.
491 ///
492 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
493 #[inline]
494 fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
495 // Safety: MCXA276 has a single eDMA instance, and we're only accessing
496 // the TCD for this specific channel
497 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
498 edma.tcd(self.index)
499 }
500
501 /// Check if the channel's DONE flag is set.
502 pub fn is_done(&self) -> bool {
503 self.tcd().ch_csr().read().done().bit_is_set()
504 }
505
506 /// Get the waker for this channel.
507 pub fn waker(&self) -> &'static AtomicWaker {
508 &STATES[self.index].waker
509 }
510}
511
512impl sealed::SealedChannel for AnyChannel {
513 fn index(&self) -> usize {
514 self.index
515 }
516
517 fn interrupt(&self) -> Interrupt {
518 self.interrupt
519 }
520}
521
522/// Macro to implement Channel trait for a peripheral.
523macro_rules! impl_channel {
524 ($peri:ident, $index:expr, $irq:ident) => {
525 impl sealed::SealedChannel for crate::peripherals::$peri {
526 fn index(&self) -> usize {
527 $index
528 }
529
530 fn interrupt(&self) -> Interrupt {
531 Interrupt::$irq
532 }
533 }
534
535 impl Channel for crate::peripherals::$peri {
536 const INDEX: usize = $index;
537 const INTERRUPT: Interrupt = Interrupt::$irq;
538 }
539
540 impl From<crate::peripherals::$peri> for AnyChannel {
541 fn from(_: crate::peripherals::$peri) -> Self {
542 AnyChannel {
543 index: $index,
544 interrupt: Interrupt::$irq,
545 }
546 }
547 }
548 };
549}
550
551impl_channel!(DMA_CH0, 0, DMA_CH0);
552impl_channel!(DMA_CH1, 1, DMA_CH1);
553impl_channel!(DMA_CH2, 2, DMA_CH2);
554impl_channel!(DMA_CH3, 3, DMA_CH3);
555impl_channel!(DMA_CH4, 4, DMA_CH4);
556impl_channel!(DMA_CH5, 5, DMA_CH5);
557impl_channel!(DMA_CH6, 6, DMA_CH6);
558impl_channel!(DMA_CH7, 7, DMA_CH7);
559
560/// Strongly-typed handle to a DMA0 channel.
561///
562/// The lifetime of this value is tied to the unique peripheral token
563/// supplied by `embassy_hal_internal::peripherals!`, so safe code cannot
564/// create two `DmaChannel` instances for the same hardware channel.
565pub struct DmaChannel<C: Channel> {
566 _ch: core::marker::PhantomData<C>,
567}
568
569// ============================================================================
570// DMA Transfer Methods - API Overview
571// ============================================================================
572//
573// The DMA API provides two categories of methods for configuring transfers:
574//
575// ## 1. Async Methods (Return `Transfer` Future)
576//
577// These methods return a [`Transfer`] Future that must be `.await`ed:
578//
579// - [`write()`](DmaChannel::write) - Memory-to-peripheral using default eDMA TCD block
580// - [`read()`](DmaChannel::read) - Peripheral-to-memory using default eDMA TCD block
581// - [`write_to_peripheral()`](DmaChannel::write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
582// - [`read_from_peripheral()`](DmaChannel::read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
583// - [`mem_to_mem()`](DmaChannel::mem_to_mem) - Memory-to-memory using default eDMA TCD block
584// - [`transfer_mem_to_mem()`](DmaChannel::transfer_mem_to_mem) - Memory-to-memory with custom eDMA TCD block
585//
586// The `Transfer` manages the DMA lifecycle automatically:
587// - Enables channel request
588// - Waits for completion via async/await
589// - Cleans up on completion
590//
591// **Important:** `Transfer::Drop` aborts the transfer if dropped before completion.
592// This means you MUST `.await` the Transfer or it will be aborted when it goes out of scope.
593//
594// **Use case:** When you want to use async/await and let the Transfer handle lifecycle management.
595//
596// ## 2. Setup Methods (Configure TCD Only)
597//
598// These methods configure the TCD but do NOT return a `Transfer`:
599//
600// - [`setup_write()`](DmaChannel::setup_write) - Memory-to-peripheral using default eDMA TCD block
601// - [`setup_read()`](DmaChannel::setup_read) - Peripheral-to-memory using default eDMA TCD block
602// - [`setup_write_to_peripheral()`](DmaChannel::setup_write_to_peripheral) - Memory-to-peripheral with custom eDMA TCD block
603// - [`setup_read_from_peripheral()`](DmaChannel::setup_read_from_peripheral) - Peripheral-to-memory with custom eDMA TCD block
604//
605// The caller is responsible for the complete DMA lifecycle:
606// 1. Call [`enable_request()`](DmaChannel::enable_request) to start the transfer
607// 2. Poll [`is_done()`](DmaChannel::is_done) or use interrupts to detect completion
608// 3. Call [`disable_request()`](DmaChannel::disable_request), [`clear_done()`](DmaChannel::clear_done),
609// [`clear_interrupt()`](DmaChannel::clear_interrupt) for cleanup
610//
611// **Use case:** Peripheral drivers (like LPUART) that need fine-grained control over
612// DMA setup before starting a `Transfer`.
613//
614// ============================================================================
615
616impl<C: Channel> DmaChannel<C> {
617 /// Wrap a DMA channel token (takes ownership of the Peri wrapper).
618 ///
619 /// Note: DMA is initialized during `hal::init()` via `dma::init()`.
620 #[inline]
621 pub fn new(_ch: embassy_hal_internal::Peri<'_, C>) -> Self {
622 Self {
623 _ch: core::marker::PhantomData,
624 }
625 }
626
627 /// Wrap a DMA channel token directly (for internal use).
628 ///
629 /// Note: DMA is initialized during `hal::init()` via `dma::init()`.
630 #[inline]
631 pub fn from_token(_ch: C) -> Self {
632 Self {
633 _ch: core::marker::PhantomData,
634 }
635 }
636
637 /// Channel index in the EDMA_0_TCD0 array.
638 #[inline]
639 pub const fn index(&self) -> usize {
640 C::INDEX
641 }
642
643 /// Convert this typed channel into a type-erased `AnyChannel`.
644 #[inline]
645 pub fn into_any(self) -> AnyChannel {
646 AnyChannel {
647 index: C::INDEX,
648 interrupt: C::INTERRUPT,
649 }
650 }
651
652 /// Get a reference to the type-erased channel info.
653 #[inline]
654 pub fn as_any(&self) -> AnyChannel {
655 AnyChannel {
656 index: C::INDEX,
657 interrupt: C::INTERRUPT,
658 }
659 }
660
661 /// Return a reference to the underlying TCD register block.
662 ///
663 /// This steals the eDMA pointer internally since MCXA276 has only one eDMA instance.
664 ///
665 /// # Note
666 ///
667 /// This is exposed for advanced use cases that need direct TCD access.
668 /// For most use cases, prefer the higher-level transfer methods.
669 #[inline]
670 pub fn tcd(&self) -> &'static pac::edma_0_tcd0::Tcd {
671 // Safety: MCXA276 has a single eDMA instance
672 let edma = unsafe { &*pac::Edma0Tcd0::ptr() };
673 edma.tcd(C::INDEX)
674 }
675
676 /// Start an async transfer.
677 ///
678 /// The channel must already be configured. This enables the channel
679 /// request and returns a `Transfer` future that resolves when the
680 /// DMA transfer completes.
681 ///
682 /// # Safety
683 ///
684 /// The caller must ensure the DMA channel has been properly configured
685 /// and that source/destination buffers remain valid for the duration
686 /// of the transfer.
687 pub unsafe fn start_transfer(&self) -> Transfer<'_> {
688 // Clear any previous DONE/INT flags
689 let t = self.tcd();
690 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
691 t.ch_int().write(|w| w.int().clear_bit_by_one());
692
693 // Enable the channel request
694 t.ch_csr().modify(|_, w| w.erq().enable());
695
696 Transfer::new(self.as_any())
697 }
698
699 // ========================================================================
700 // Type-Safe Transfer Methods (Embassy-style API)
701 // ========================================================================
702
703 /// Perform a memory-to-memory DMA transfer (simplified API).
704 ///
705 /// This is a type-safe wrapper that uses the `Word` trait to determine
706 /// the correct transfer width automatically. Uses the global eDMA TCD
707 /// register accessor internally.
708 ///
709 /// # Arguments
710 ///
711 /// * `src` - Source buffer
712 /// * `dst` - Destination buffer (must be at least as large as src)
713 /// * `options` - Transfer configuration options
714 ///
715 /// # Safety
716 ///
717 /// The source and destination buffers must remain valid for the
718 /// duration of the transfer.
719 pub unsafe fn mem_to_mem<W: Word>(&self, src: &[W], dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
720 self.transfer_mem_to_mem(src, dst, options)
721 }
722
723 /// Perform a memory-to-memory DMA transfer.
724 ///
725 /// This is a type-safe wrapper that uses the `Word` trait to determine
726 /// the correct transfer width automatically.
727 ///
728 /// # Arguments
729 ///
730 /// * `edma` - Reference to the eDMA TCD register block
731 /// * `src` - Source buffer
732 /// * `dst` - Destination buffer (must be at least as large as src)
733 /// * `options` - Transfer configuration options
734 ///
735 /// # Safety
736 ///
737 /// The source and destination buffers must remain valid for the
738 /// duration of the transfer.
739 pub unsafe fn transfer_mem_to_mem<W: Word>(
740 &self,
741 src: &[W],
742 dst: &mut [W],
743 options: TransferOptions,
744 ) -> Transfer<'_> {
745 assert!(!src.is_empty());
746 assert!(dst.len() >= src.len());
747 assert!(src.len() <= 0x7fff);
748
749 let size = W::size();
750 let byte_count = (src.len() * size.bytes()) as u32;
751
752 let t = self.tcd();
753
754 // Reset channel state - clear DONE, disable requests, clear errors
755 t.ch_csr().write(|w| {
756 w.erq()
757 .disable()
758 .earq()
759 .disable()
760 .eei()
761 .no_error()
762 .done()
763 .clear_bit_by_one()
764 });
765 t.ch_es().write(|w| w.err().clear_bit_by_one());
766 t.ch_int().write(|w| w.int().clear_bit_by_one());
767
768 // Memory barrier to ensure channel state is fully reset before touching TCD
769 cortex_m::asm::dsb();
770
771 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
772 // Reset ALL TCD registers to 0 to clear any stale configuration from
773 // previous transfers. This is critical when reusing a channel.
774 t.tcd_saddr().write(|w| w.saddr().bits(0));
775 t.tcd_soff().write(|w| w.soff().bits(0));
776 t.tcd_attr().write(|w| w.bits(0));
777 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0));
778 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
779 t.tcd_daddr().write(|w| w.daddr().bits(0));
780 t.tcd_doff().write(|w| w.doff().bits(0));
781 t.tcd_citer_elinkno().write(|w| w.bits(0));
782 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
783 t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely
784 t.tcd_biter_elinkno().write(|w| w.bits(0));
785
786 // Memory barrier after TCD reset
787 cortex_m::asm::dsb();
788
789 // Note: Priority is managed by round-robin arbitration (set in init())
790 // Per-channel priority can be configured via ch_pri() if needed
791
792 // Now configure the new transfer
793
794 // Source address and increment
795 t.tcd_saddr().write(|w| w.saddr().bits(src.as_ptr() as u32));
796 t.tcd_soff().write(|w| w.soff().bits(size.bytes() as u16));
797
798 // Destination address and increment
799 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
800 t.tcd_doff().write(|w| w.doff().bits(size.bytes() as u16));
801
802 // Transfer attributes (size)
803 let hw_size = size.to_hw_size();
804 t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size));
805
806 // Minor loop: transfer all bytes in one minor loop
807 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_count));
808
809 // No source/dest adjustment after major loop
810 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
811 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
812
813 // Major loop count = 1 (single major loop)
814 // Write BITER first, then CITER (CITER must match BITER at start)
815 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
816 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
817
818 // Memory barrier before setting START
819 cortex_m::asm::dsb();
820
821 // Control/status: interrupt on major complete, start
822 // Write this last after all other TCD registers are configured
823 let int_major = options.complete_transfer_interrupt;
824 t.tcd_csr().write(|w| {
825 w.intmajor()
826 .bit(int_major)
827 .inthalf()
828 .bit(options.half_transfer_interrupt)
829 .dreq()
830 .set_bit() // Auto-disable request after major loop
831 .start()
832 .set_bit() // Start the channel
833 });
834
835 Transfer::new(self.as_any())
836 }
837
838 /// Fill a memory buffer with a pattern value (memset).
839 ///
840 /// This performs a DMA transfer where the source address remains fixed
841 /// (pattern value) while the destination address increments through the buffer.
842 /// It's useful for quickly filling large memory regions with a constant value.
843 ///
844 /// # Arguments
845 ///
846 /// * `pattern` - Reference to the pattern value (will be read repeatedly)
847 /// * `dst` - Destination buffer to fill
848 /// * `options` - Transfer configuration options
849 ///
850 /// # Example
851 ///
852 /// ```no_run
853 /// use embassy_mcxa::dma::{DmaChannel, TransferOptions};
854 ///
855 /// let dma_ch = DmaChannel::new(p.DMA_CH0);
856 /// let pattern: u32 = 0xDEADBEEF;
857 /// let mut buffer = [0u32; 256];
858 ///
859 /// unsafe {
860 /// dma_ch.memset(&pattern, &mut buffer, TransferOptions::default()).await;
861 /// }
862 /// // buffer is now filled with 0xDEADBEEF
863 /// ```
864 ///
865 /// # Safety
866 ///
867 /// - The pattern and destination buffer must remain valid for the duration of the transfer.
868 pub unsafe fn memset<W: Word>(&self, pattern: &W, dst: &mut [W], options: TransferOptions) -> Transfer<'_> {
869 assert!(!dst.is_empty());
870 assert!(dst.len() <= 0x7fff);
871
872 let size = W::size();
873 let byte_size = size.bytes();
874 // Total bytes to transfer - all in one minor loop for software-triggered transfers
875 let total_bytes = (dst.len() * byte_size) as u32;
876
877 let t = self.tcd();
878
879 // Reset channel state - clear DONE, disable requests, clear errors
880 t.ch_csr().write(|w| {
881 w.erq()
882 .disable()
883 .earq()
884 .disable()
885 .eei()
886 .no_error()
887 .done()
888 .clear_bit_by_one()
889 });
890 t.ch_es().write(|w| w.err().clear_bit_by_one());
891 t.ch_int().write(|w| w.int().clear_bit_by_one());
892
893 // Memory barrier to ensure channel state is fully reset before touching TCD
894 cortex_m::asm::dsb();
895
896 // Full TCD reset following NXP SDK pattern (EDMA_TcdResetExt).
897 // Reset ALL TCD registers to 0 to clear any stale configuration from
898 // previous transfers. This is critical when reusing a channel.
899 t.tcd_saddr().write(|w| w.saddr().bits(0));
900 t.tcd_soff().write(|w| w.soff().bits(0));
901 t.tcd_attr().write(|w| w.bits(0));
902 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(0));
903 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
904 t.tcd_daddr().write(|w| w.daddr().bits(0));
905 t.tcd_doff().write(|w| w.doff().bits(0));
906 t.tcd_citer_elinkno().write(|w| w.bits(0));
907 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
908 t.tcd_csr().write(|w| w.bits(0)); // Clear CSR completely
909 t.tcd_biter_elinkno().write(|w| w.bits(0));
910
911 // Memory barrier after TCD reset
912 cortex_m::asm::dsb();
913
914 // Now configure the new transfer
915 //
916 // For software-triggered memset, we use a SINGLE minor loop that transfers
917 // all bytes at once. The source address stays fixed (SOFF=0) while the
918 // destination increments (DOFF=byte_size). The eDMA will read from the
919 // same source address for each destination word.
920 //
921 // This is necessary because the START bit only triggers ONE minor loop
922 // iteration. Using CITER>1 with software trigger would require multiple
923 // START triggers.
924
925 // Source: pattern address, fixed (soff=0)
926 t.tcd_saddr().write(|w| w.saddr().bits(pattern as *const W as u32));
927 t.tcd_soff().write(|w| w.soff().bits(0)); // Fixed source - reads pattern repeatedly
928
929 // Destination: memory buffer, incrementing by word size
930 t.tcd_daddr().write(|w| w.daddr().bits(dst.as_mut_ptr() as u32));
931 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
932
933 // Transfer attributes - source and dest are same word size
934 let hw_size = size.to_hw_size();
935 t.tcd_attr().write(|w| w.ssize().bits(hw_size).dsize().bits(hw_size));
936
937 // Minor loop: transfer ALL bytes in one minor loop (like mem_to_mem)
938 // This allows the entire transfer to complete with a single START trigger
939 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(total_bytes));
940
941 // No address adjustment after major loop
942 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
943 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
944
945 // Major loop count = 1 (single major loop, all data in minor loop)
946 // Write BITER first, then CITER (CITER must match BITER at start)
947 t.tcd_biter_elinkno().write(|w| w.biter().bits(1));
948 t.tcd_citer_elinkno().write(|w| w.citer().bits(1));
949
950 // Memory barrier before setting START
951 cortex_m::asm::dsb();
952
953 // Control/status: interrupt on major complete, start immediately
954 // Write this last after all other TCD registers are configured
955 let int_major = options.complete_transfer_interrupt;
956 t.tcd_csr().write(|w| {
957 w.intmajor()
958 .bit(int_major)
959 .inthalf()
960 .bit(options.half_transfer_interrupt)
961 .dreq()
962 .set_bit() // Auto-disable request after major loop
963 .start()
964 .set_bit() // Start the channel
965 });
966
967 Transfer::new(self.as_any())
968 }
969
970 /// Write data from memory to a peripheral register.
971 ///
972 /// The destination address remains fixed (peripheral register) while
973 /// the source address increments through the buffer.
974 ///
975 /// # Arguments
976 ///
977 /// * `buf` - Source buffer to write from
978 /// * `peri_addr` - Peripheral register address
979 /// * `options` - Transfer configuration options
980 ///
981 /// # Safety
982 ///
983 /// - The buffer must remain valid for the duration of the transfer.
984 /// - The peripheral address must be valid for writes.
985 pub unsafe fn write<W: Word>(&self, buf: &[W], peri_addr: *mut W, options: TransferOptions) -> Transfer<'_> {
986 self.write_to_peripheral(buf, peri_addr, options)
987 }
988
989 /// Configure a memory-to-peripheral DMA transfer without starting it.
990 ///
991 /// This is a convenience wrapper around [`setup_write_to_peripheral()`](Self::setup_write_to_peripheral)
992 /// that uses the default eDMA TCD register block.
993 ///
994 /// This method configures the TCD but does NOT return a `Transfer`. The caller
995 /// is responsible for the complete DMA lifecycle:
996 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
997 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
998 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
999 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1000 ///
1001 /// # Example
1002 ///
1003 /// ```no_run
1004 /// # use embassy_mcxa::dma::DmaChannel;
1005 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1006 /// # let uart_tx_addr = 0x4000_0000 as *mut u8;
1007 /// let data = [0x48, 0x65, 0x6c, 0x6c, 0x6f]; // "Hello"
1008 ///
1009 /// unsafe {
1010 /// // Configure the transfer
1011 /// dma_ch.setup_write(&data, uart_tx_addr, EnableInterrupt::Yes);
1012 ///
1013 /// // Start when peripheral is ready
1014 /// dma_ch.enable_request();
1015 ///
1016 /// // Wait for completion (or use interrupt)
1017 /// while !dma_ch.is_done() {}
1018 ///
1019 /// // Clean up
1020 /// dma_ch.clear_done();
1021 /// dma_ch.clear_interrupt();
1022 /// }
1023 /// ```
1024 ///
1025 /// # Arguments
1026 ///
1027 /// * `buf` - Source buffer to write from
1028 /// * `peri_addr` - Peripheral register address
1029 /// * `enable_interrupt` - Whether to enable interrupt on completion
1030 ///
1031 /// # Safety
1032 ///
1033 /// - The buffer must remain valid for the duration of the transfer.
1034 /// - The peripheral address must be valid for writes.
1035 pub unsafe fn setup_write<W: Word>(&self, buf: &[W], peri_addr: *mut W, enable_interrupt: EnableInterrupt) {
1036 self.setup_write_to_peripheral(buf, peri_addr, enable_interrupt)
1037 }
1038
1039 /// Write data from memory to a peripheral register.
1040 ///
1041 /// The destination address remains fixed (peripheral register) while
1042 /// the source address increments through the buffer.
1043 ///
1044 /// # Arguments
1045 ///
1046 /// * `buf` - Source buffer to write from
1047 /// * `peri_addr` - Peripheral register address
1048 /// * `options` - Transfer configuration options
1049 ///
1050 /// # Safety
1051 ///
1052 /// - The buffer must remain valid for the duration of the transfer.
1053 /// - The peripheral address must be valid for writes.
1054 pub unsafe fn write_to_peripheral<W: Word>(
1055 &self,
1056 buf: &[W],
1057 peri_addr: *mut W,
1058 options: TransferOptions,
1059 ) -> Transfer<'_> {
1060 assert!(!buf.is_empty());
1061 assert!(buf.len() <= 0x7fff);
1062
1063 let size = W::size();
1064 let byte_size = size.bytes();
1065
1066 let t = self.tcd();
1067
1068 // Reset channel state
1069 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
1070 t.ch_es().write(|w| w.bits(0));
1071 t.ch_int().write(|w| w.int().clear_bit_by_one());
1072
1073 // Addresses
1074 t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32));
1075 t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32));
1076
1077 // Offsets: Source increments, Dest fixed
1078 t.tcd_soff().write(|w| w.soff().bits(byte_size as u16));
1079 t.tcd_doff().write(|w| w.doff().bits(0));
1080
1081 // Attributes: set size and explicitly disable modulo
1082 let hw_size = size.to_hw_size();
1083 t.tcd_attr().write(|w| {
1084 w.ssize()
1085 .bits(hw_size)
1086 .dsize()
1087 .bits(hw_size)
1088 .smod()
1089 .disable()
1090 .dmod()
1091 .bits(0)
1092 });
1093
1094 // Minor loop: transfer one word per request (match old: only set nbytes)
1095 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1096
1097 // No final adjustments
1098 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1099 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1100
1101 // Major loop count = number of words
1102 let count = buf.len() as u16;
1103 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1104 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1105
1106 // CSR: interrupt on major loop complete and auto-clear ERQ
1107 t.tcd_csr().write(|w| {
1108 let w = if options.complete_transfer_interrupt {
1109 w.intmajor().enable()
1110 } else {
1111 w.intmajor().disable()
1112 };
1113 w.inthalf()
1114 .disable()
1115 .dreq()
1116 .erq_field_clear() // Disable request when done
1117 .esg()
1118 .normal_format()
1119 .majorelink()
1120 .disable()
1121 .eeop()
1122 .disable()
1123 .esda()
1124 .disable()
1125 .bwc()
1126 .no_stall()
1127 });
1128
1129 // Ensure all TCD writes have completed before DMA engine reads them
1130 cortex_m::asm::dsb();
1131
1132 Transfer::new(self.as_any())
1133 }
1134
1135 /// Read data from a peripheral register to memory.
1136 ///
1137 /// The source address remains fixed (peripheral register) while
1138 /// the destination address increments through the buffer.
1139 ///
1140 /// # Arguments
1141 ///
1142 /// * `peri_addr` - Peripheral register address
1143 /// * `buf` - Destination buffer to read into
1144 /// * `options` - Transfer configuration options
1145 ///
1146 /// # Safety
1147 ///
1148 /// - The buffer must remain valid for the duration of the transfer.
1149 /// - The peripheral address must be valid for reads.
1150 pub unsafe fn read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], options: TransferOptions) -> Transfer<'_> {
1151 self.read_from_peripheral(peri_addr, buf, options)
1152 }
1153
1154 /// Configure a peripheral-to-memory DMA transfer without starting it.
1155 ///
1156 /// This is a convenience wrapper around [`setup_read_from_peripheral()`](Self::setup_read_from_peripheral)
1157 /// that uses the default eDMA TCD register block.
1158 ///
1159 /// This method configures the TCD but does NOT return a `Transfer`. The caller
1160 /// is responsible for the complete DMA lifecycle:
1161 /// 1. Call [`enable_request()`](Self::enable_request) to start the transfer
1162 /// 2. Poll [`is_done()`](Self::is_done) or use interrupts to detect completion
1163 /// 3. Call [`disable_request()`](Self::disable_request), [`clear_done()`](Self::clear_done),
1164 /// [`clear_interrupt()`](Self::clear_interrupt) for cleanup
1165 ///
1166 /// # Example
1167 ///
1168 /// ```no_run
1169 /// # use embassy_mcxa::dma::DmaChannel;
1170 /// # let dma_ch = DmaChannel::new(p.DMA_CH0);
1171 /// # let uart_rx_addr = 0x4000_0000 as *const u8;
1172 /// let mut buf = [0u8; 32];
1173 ///
1174 /// unsafe {
1175 /// // Configure the transfer
1176 /// dma_ch.setup_read(uart_rx_addr, &mut buf, EnableInterrupt::Yes);
1177 ///
1178 /// // Start when peripheral is ready
1179 /// dma_ch.enable_request();
1180 ///
1181 /// // Wait for completion (or use interrupt)
1182 /// while !dma_ch.is_done() {}
1183 ///
1184 /// // Clean up
1185 /// dma_ch.clear_done();
1186 /// dma_ch.clear_interrupt();
1187 /// }
1188 /// // buf now contains received data
1189 /// ```
1190 ///
1191 /// # Arguments
1192 ///
1193 /// * `peri_addr` - Peripheral register address
1194 /// * `buf` - Destination buffer to read into
1195 /// * `enable_interrupt` - Whether to enable interrupt on completion
1196 ///
1197 /// # Safety
1198 ///
1199 /// - The buffer must remain valid for the duration of the transfer.
1200 /// - The peripheral address must be valid for reads.
1201 pub unsafe fn setup_read<W: Word>(&self, peri_addr: *const W, buf: &mut [W], enable_interrupt: EnableInterrupt) {
1202 self.setup_read_from_peripheral(peri_addr, buf, enable_interrupt)
1203 }
1204
1205 /// Read data from a peripheral register to memory.
1206 ///
1207 /// The source address remains fixed (peripheral register) while
1208 /// the destination address increments through the buffer.
1209 ///
1210 /// # Arguments
1211 ///
1212 /// * `peri_addr` - Peripheral register address
1213 /// * `buf` - Destination buffer to read into
1214 /// * `options` - Transfer configuration options
1215 ///
1216 /// # Safety
1217 ///
1218 /// - The buffer must remain valid for the duration of the transfer.
1219 /// - The peripheral address must be valid for reads.
1220 pub unsafe fn read_from_peripheral<W: Word>(
1221 &self,
1222 peri_addr: *const W,
1223 buf: &mut [W],
1224 options: TransferOptions,
1225 ) -> Transfer<'_> {
1226 assert!(!buf.is_empty());
1227 assert!(buf.len() <= 0x7fff);
1228
1229 let size = W::size();
1230 let byte_size = size.bytes();
1231
1232 let t = self.tcd();
1233
1234 // Reset channel control/error/interrupt state
1235 t.ch_csr().write(|w| {
1236 w.erq()
1237 .disable()
1238 .earq()
1239 .disable()
1240 .eei()
1241 .no_error()
1242 .ebw()
1243 .disable()
1244 .done()
1245 .clear_bit_by_one()
1246 });
1247 t.ch_es().write(|w| w.bits(0));
1248 t.ch_int().write(|w| w.int().clear_bit_by_one());
1249
1250 // Source: peripheral register, fixed
1251 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
1252 t.tcd_soff().write(|w| w.soff().bits(0)); // No increment
1253
1254 // Destination: memory buffer, incrementing
1255 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
1256 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
1257
1258 // Transfer attributes: set size and explicitly disable modulo
1259 let hw_size = size.to_hw_size();
1260 t.tcd_attr().write(|w| {
1261 w.ssize()
1262 .bits(hw_size)
1263 .dsize()
1264 .bits(hw_size)
1265 .smod()
1266 .disable()
1267 .dmod()
1268 .bits(0)
1269 });
1270
1271 // Minor loop: transfer one word per request, no offsets
1272 t.tcd_nbytes_mloffno().write(|w| {
1273 w.nbytes()
1274 .bits(byte_size as u32)
1275 .dmloe()
1276 .offset_not_applied()
1277 .smloe()
1278 .offset_not_applied()
1279 });
1280
1281 // Major loop count = number of words
1282 let count = buf.len() as u16;
1283 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1284 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1285
1286 // No address adjustment after major loop
1287 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1288 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1289
1290 // Control/status: interrupt on major complete, auto-clear ERQ when done
1291 t.tcd_csr().write(|w| {
1292 let w = if options.complete_transfer_interrupt {
1293 w.intmajor().enable()
1294 } else {
1295 w.intmajor().disable()
1296 };
1297 let w = if options.half_transfer_interrupt {
1298 w.inthalf().enable()
1299 } else {
1300 w.inthalf().disable()
1301 };
1302 w.dreq()
1303 .erq_field_clear() // Disable request when done (important for peripheral DMA)
1304 .esg()
1305 .normal_format()
1306 .majorelink()
1307 .disable()
1308 .eeop()
1309 .disable()
1310 .esda()
1311 .disable()
1312 .bwc()
1313 .no_stall()
1314 });
1315
1316 // Ensure all TCD writes have completed before DMA engine reads them
1317 cortex_m::asm::dsb();
1318
1319 Transfer::new(self.as_any())
1320 }
1321
1322 /// Configure a memory-to-peripheral DMA transfer without starting it.
1323 ///
1324 /// This configures the TCD for a memory-to-peripheral transfer but does NOT
1325 /// return a Transfer object. The caller is responsible for:
1326 /// 1. Enabling the peripheral's DMA request
1327 /// 2. Calling `enable_request()` to start the transfer
1328 /// 3. Polling `is_done()` or using interrupts to detect completion
1329 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1330 ///
1331 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1332 /// peripheral drivers that have their own completion polling).
1333 ///
1334 /// # Arguments
1335 ///
1336 /// * `buf` - Source buffer to write from
1337 /// * `peri_addr` - Peripheral register address
1338 /// * `enable_interrupt` - Whether to enable interrupt on completion
1339 ///
1340 /// # Safety
1341 ///
1342 /// - The buffer must remain valid for the duration of the transfer.
1343 /// - The peripheral address must be valid for writes.
1344 pub unsafe fn setup_write_to_peripheral<W: Word>(
1345 &self,
1346 buf: &[W],
1347 peri_addr: *mut W,
1348 enable_interrupt: EnableInterrupt,
1349 ) {
1350 assert!(!buf.is_empty());
1351 assert!(buf.len() <= 0x7fff);
1352
1353 let size = W::size();
1354 let byte_size = size.bytes();
1355
1356 let t = self.tcd();
1357
1358 // Reset channel state
1359 t.ch_csr().write(|w| w.erq().disable().done().clear_bit_by_one());
1360 t.ch_es().write(|w| w.bits(0));
1361 t.ch_int().write(|w| w.int().clear_bit_by_one());
1362
1363 // Addresses
1364 t.tcd_saddr().write(|w| w.saddr().bits(buf.as_ptr() as u32));
1365 t.tcd_daddr().write(|w| w.daddr().bits(peri_addr as u32));
1366
1367 // Offsets: Source increments, Dest fixed
1368 t.tcd_soff().write(|w| w.soff().bits(byte_size as u16));
1369 t.tcd_doff().write(|w| w.doff().bits(0));
1370
1371 // Attributes: set size and explicitly disable modulo
1372 let hw_size = size.to_hw_size();
1373 t.tcd_attr().write(|w| {
1374 w.ssize()
1375 .bits(hw_size)
1376 .dsize()
1377 .bits(hw_size)
1378 .smod()
1379 .disable()
1380 .dmod()
1381 .bits(0)
1382 });
1383
1384 // Minor loop: transfer one word per request
1385 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1386
1387 // No final adjustments
1388 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1389 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1390
1391 // Major loop count = number of words
1392 let count = buf.len() as u16;
1393 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1394 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1395
1396 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1397 t.tcd_csr().write(|w| {
1398 let w = match enable_interrupt {
1399 EnableInterrupt::Yes => w.intmajor().enable(),
1400 EnableInterrupt::No => w.intmajor().disable(),
1401 };
1402 w.inthalf()
1403 .disable()
1404 .dreq()
1405 .erq_field_clear()
1406 .esg()
1407 .normal_format()
1408 .majorelink()
1409 .disable()
1410 .eeop()
1411 .disable()
1412 .esda()
1413 .disable()
1414 .bwc()
1415 .no_stall()
1416 });
1417
1418 // Ensure all TCD writes have completed before DMA engine reads them
1419 cortex_m::asm::dsb();
1420 }
1421
1422 /// Configure a peripheral-to-memory DMA transfer without starting it.
1423 ///
1424 /// This configures the TCD for a peripheral-to-memory transfer but does NOT
1425 /// return a Transfer object. The caller is responsible for:
1426 /// 1. Enabling the peripheral's DMA request
1427 /// 2. Calling `enable_request()` to start the transfer
1428 /// 3. Polling `is_done()` or using interrupts to detect completion
1429 /// 4. Calling `disable_request()`, `clear_done()`, `clear_interrupt()` for cleanup
1430 ///
1431 /// Use this when you need manual control over the DMA lifecycle (e.g., in
1432 /// peripheral drivers that have their own completion polling).
1433 ///
1434 /// # Arguments
1435 ///
1436 /// * `peri_addr` - Peripheral register address
1437 /// * `buf` - Destination buffer to read into
1438 /// * `enable_interrupt` - Whether to enable interrupt on completion
1439 ///
1440 /// # Safety
1441 ///
1442 /// - The buffer must remain valid for the duration of the transfer.
1443 /// - The peripheral address must be valid for reads.
1444 pub unsafe fn setup_read_from_peripheral<W: Word>(
1445 &self,
1446 peri_addr: *const W,
1447 buf: &mut [W],
1448 enable_interrupt: EnableInterrupt,
1449 ) {
1450 assert!(!buf.is_empty());
1451 assert!(buf.len() <= 0x7fff);
1452
1453 let size = W::size();
1454 let byte_size = size.bytes();
1455
1456 let t = self.tcd();
1457
1458 // Reset channel control/error/interrupt state
1459 t.ch_csr().write(|w| {
1460 w.erq()
1461 .disable()
1462 .earq()
1463 .disable()
1464 .eei()
1465 .no_error()
1466 .ebw()
1467 .disable()
1468 .done()
1469 .clear_bit_by_one()
1470 });
1471 t.ch_es().write(|w| w.bits(0));
1472 t.ch_int().write(|w| w.int().clear_bit_by_one());
1473
1474 // Source: peripheral register, fixed
1475 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
1476 t.tcd_soff().write(|w| w.soff().bits(0));
1477
1478 // Destination: memory buffer, incrementing
1479 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
1480 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
1481
1482 // Attributes: set size and explicitly disable modulo
1483 let hw_size = size.to_hw_size();
1484 t.tcd_attr().write(|w| {
1485 w.ssize()
1486 .bits(hw_size)
1487 .dsize()
1488 .bits(hw_size)
1489 .smod()
1490 .disable()
1491 .dmod()
1492 .bits(0)
1493 });
1494
1495 // Minor loop: transfer one word per request
1496 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(byte_size as u32));
1497
1498 // No final adjustments
1499 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0));
1500 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(0));
1501
1502 // Major loop count = number of words
1503 let count = buf.len() as u16;
1504 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
1505 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
1506
1507 // CSR: optional interrupt on major loop complete and auto-clear ERQ
1508 t.tcd_csr().write(|w| {
1509 let w = match enable_interrupt {
1510 EnableInterrupt::Yes => w.intmajor().enable(),
1511 EnableInterrupt::No => w.intmajor().disable(),
1512 };
1513 w.inthalf()
1514 .disable()
1515 .dreq()
1516 .erq_field_clear()
1517 .esg()
1518 .normal_format()
1519 .majorelink()
1520 .disable()
1521 .eeop()
1522 .disable()
1523 .esda()
1524 .disable()
1525 .bwc()
1526 .no_stall()
1527 });
1528
1529 // Ensure all TCD writes have completed before DMA engine reads them
1530 cortex_m::asm::dsb();
1531 }
1532
1533 /// Configure the integrated channel MUX to use the given typed
1534 /// DMA request source (e.g., [`Lpuart2TxRequest`] or [`Lpuart2RxRequest`]).
1535 ///
1536 /// This is the type-safe version that uses marker types to ensure
1537 /// compile-time verification of request source validity.
1538 ///
1539 /// # Safety
1540 ///
1541 /// The channel must be properly configured before enabling requests.
1542 /// The caller must ensure the DMA request source matches the peripheral
1543 /// that will drive this channel.
1544 ///
1545 /// # Note
1546 ///
1547 /// The NXP SDK requires a two-step write sequence: first clear
1548 /// the mux to 0, then set the actual source. This is a hardware
1549 /// requirement on eDMA4 for the mux to properly latch.
1550 ///
1551 /// # Example
1552 ///
1553 /// ```ignore
1554 /// use embassy_mcxa::dma::{DmaChannel, Lpuart2RxRequest};
1555 ///
1556 /// // Type-safe: compiler verifies this is a valid DMA request type
1557 /// unsafe {
1558 /// channel.set_request_source::<Lpuart2RxRequest>();
1559 /// }
1560 /// ```
1561 #[inline]
1562 pub unsafe fn set_request_source<R: DmaRequest>(&self) {
1563 // Two-step write per NXP SDK: clear to 0, then set actual source.
1564 self.tcd().ch_mux().write(|w| w.src().bits(0));
1565 cortex_m::asm::dsb(); // Ensure the clear completes before setting new source
1566 self.tcd().ch_mux().write(|w| w.src().bits(R::REQUEST_NUMBER));
1567 }
1568
1569 /// Enable hardware requests for this channel (ERQ=1).
1570 ///
1571 /// # Safety
1572 ///
1573 /// The channel must be properly configured before enabling requests.
1574 pub unsafe fn enable_request(&self) {
1575 let t = self.tcd();
1576 t.ch_csr().modify(|_, w| w.erq().enable());
1577 }
1578
1579 /// Disable hardware requests for this channel (ERQ=0).
1580 ///
1581 /// # Safety
1582 ///
1583 /// Disabling requests on an active transfer may leave the transfer incomplete.
1584 pub unsafe fn disable_request(&self) {
1585 let t = self.tcd();
1586 t.ch_csr().modify(|_, w| w.erq().disable());
1587 }
1588
1589 /// Return true if the channel's DONE flag is set.
1590 pub fn is_done(&self) -> bool {
1591 let t = self.tcd();
1592 t.ch_csr().read().done().bit_is_set()
1593 }
1594
1595 /// Clear the DONE flag for this channel.
1596 ///
1597 /// Uses modify to preserve other bits (especially ERQ) unlike write
1598 /// which would clear ERQ and halt an active transfer.
1599 ///
1600 /// # Safety
1601 ///
1602 /// Clearing DONE while a transfer is in progress may cause undefined behavior.
1603 pub unsafe fn clear_done(&self) {
1604 let t = self.tcd();
1605 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1606 }
1607
1608 /// Clear the channel interrupt flag (CH_INT.INT).
1609 ///
1610 /// # Safety
1611 ///
1612 /// Must be called from the correct interrupt context or with interrupts disabled.
1613 pub unsafe fn clear_interrupt(&self) {
1614 let t = self.tcd();
1615 t.ch_int().write(|w| w.int().clear_bit_by_one());
1616 }
1617
1618 /// Trigger a software start for this channel.
1619 ///
1620 /// # Safety
1621 ///
1622 /// The channel must be properly configured with a valid TCD before triggering.
1623 pub unsafe fn trigger_start(&self) {
1624 let t = self.tcd();
1625 t.tcd_csr().modify(|_, w| w.start().channel_started());
1626 }
1627
1628 /// Get the waker for this channel
1629 pub fn waker(&self) -> &'static AtomicWaker {
1630 &STATES[C::INDEX].waker
1631 }
1632
1633 /// Enable the interrupt for this channel in the NVIC.
1634 pub fn enable_interrupt(&self) {
1635 unsafe {
1636 cortex_m::peripheral::NVIC::unmask(C::INTERRUPT);
1637 }
1638 }
1639
1640 /// Enable Major Loop Linking.
1641 ///
1642 /// When the major loop completes, the hardware will trigger a service request
1643 /// on `link_ch`.
1644 ///
1645 /// # Arguments
1646 ///
1647 /// * `link_ch` - Target channel index (0-7) to link to
1648 ///
1649 /// # Safety
1650 ///
1651 /// The channel must be properly configured before setting up linking.
1652 pub unsafe fn set_major_link(&self, link_ch: usize) {
1653 let t = self.tcd();
1654 t.tcd_csr()
1655 .modify(|_, w| w.majorelink().enable().majorlinkch().bits(link_ch as u8));
1656 }
1657
1658 /// Disable Major Loop Linking.
1659 ///
1660 /// Removes any major loop channel linking previously configured.
1661 ///
1662 /// # Safety
1663 ///
1664 /// The caller must ensure this doesn't disrupt an active transfer that
1665 /// depends on the linking.
1666 pub unsafe fn clear_major_link(&self) {
1667 let t = self.tcd();
1668 t.tcd_csr().modify(|_, w| w.majorelink().disable());
1669 }
1670
1671 /// Enable Minor Loop Linking.
1672 ///
1673 /// After each minor loop, the hardware will trigger a service request
1674 /// on `link_ch`.
1675 ///
1676 /// # Arguments
1677 ///
1678 /// * `link_ch` - Target channel index (0-7) to link to
1679 ///
1680 /// # Note
1681 ///
1682 /// This rewrites CITER and BITER registers to the ELINKYES format.
1683 /// It preserves the current loop count.
1684 ///
1685 /// # Safety
1686 ///
1687 /// The channel must be properly configured before setting up linking.
1688 pub unsafe fn set_minor_link(&self, link_ch: usize) {
1689 let t = self.tcd();
1690
1691 // Read current CITER (assuming ELINKNO format initially)
1692 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1693 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1694
1695 // Write back using ELINKYES format
1696 t.tcd_citer_elinkyes().write(|w| {
1697 w.citer()
1698 .bits(current_citer)
1699 .elink()
1700 .enable()
1701 .linkch()
1702 .bits(link_ch as u8)
1703 });
1704
1705 t.tcd_biter_elinkyes().write(|w| {
1706 w.biter()
1707 .bits(current_biter)
1708 .elink()
1709 .enable()
1710 .linkch()
1711 .bits(link_ch as u8)
1712 });
1713 }
1714
1715 /// Disable Minor Loop Linking.
1716 ///
1717 /// Removes any minor loop channel linking previously configured.
1718 /// This rewrites CITER and BITER registers to the ELINKNO format,
1719 /// preserving the current loop count.
1720 ///
1721 /// # Safety
1722 ///
1723 /// The caller must ensure this doesn't disrupt an active transfer that
1724 /// depends on the linking.
1725 pub unsafe fn clear_minor_link(&self) {
1726 let t = self.tcd();
1727
1728 // Read current CITER (could be in either format, but we only need the count)
1729 // Note: In ELINKYES format, citer is 9 bits; in ELINKNO, it's 15 bits.
1730 // We read from ELINKNO which will give us the combined value.
1731 let current_citer = t.tcd_citer_elinkno().read().citer().bits();
1732 let current_biter = t.tcd_biter_elinkno().read().biter().bits();
1733
1734 // Write back using ELINKNO format (disabling link)
1735 t.tcd_citer_elinkno()
1736 .write(|w| w.citer().bits(current_citer).elink().disable());
1737
1738 t.tcd_biter_elinkno()
1739 .write(|w| w.biter().bits(current_biter).elink().disable());
1740 }
1741
1742 /// Load a TCD from memory into the hardware channel registers.
1743 ///
1744 /// This is useful for scatter/gather and ping-pong transfers where
1745 /// TCDs are prepared in RAM and then loaded into the hardware.
1746 ///
1747 /// # Safety
1748 ///
1749 /// - The TCD must be properly initialized.
1750 /// - The caller must ensure no concurrent access to the same channel.
1751 pub unsafe fn load_tcd(&self, tcd: &Tcd) {
1752 let t = self.tcd();
1753 t.tcd_saddr().write(|w| w.saddr().bits(tcd.saddr));
1754 t.tcd_soff().write(|w| w.soff().bits(tcd.soff as u16));
1755 t.tcd_attr().write(|w| w.bits(tcd.attr));
1756 t.tcd_nbytes_mloffno().write(|w| w.nbytes().bits(tcd.nbytes));
1757 t.tcd_slast_sda().write(|w| w.slast_sda().bits(tcd.slast as u32));
1758 t.tcd_daddr().write(|w| w.daddr().bits(tcd.daddr));
1759 t.tcd_doff().write(|w| w.doff().bits(tcd.doff as u16));
1760 t.tcd_citer_elinkno().write(|w| w.citer().bits(tcd.citer));
1761 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits(tcd.dlast_sga as u32));
1762 t.tcd_csr().write(|w| w.bits(tcd.csr));
1763 t.tcd_biter_elinkno().write(|w| w.biter().bits(tcd.biter));
1764 }
1765}
1766
1767/// In-memory representation of a Transfer Control Descriptor (TCD).
1768///
1769/// This matches the hardware layout (32 bytes).
1770#[repr(C, align(32))]
1771#[derive(Clone, Copy, Debug, Default)]
1772pub struct Tcd {
1773 pub saddr: u32,
1774 pub soff: i16,
1775 pub attr: u16,
1776 pub nbytes: u32,
1777 pub slast: i32,
1778 pub daddr: u32,
1779 pub doff: i16,
1780 pub citer: u16,
1781 pub dlast_sga: i32,
1782 pub csr: u16,
1783 pub biter: u16,
1784}
1785
1786struct State {
1787 /// Waker for transfer complete interrupt
1788 waker: AtomicWaker,
1789 /// Waker for half-transfer interrupt
1790 half_waker: AtomicWaker,
1791}
1792
1793impl State {
1794 const fn new() -> Self {
1795 Self {
1796 waker: AtomicWaker::new(),
1797 half_waker: AtomicWaker::new(),
1798 }
1799 }
1800}
1801
1802static STATES: [State; 8] = [
1803 State::new(),
1804 State::new(),
1805 State::new(),
1806 State::new(),
1807 State::new(),
1808 State::new(),
1809 State::new(),
1810 State::new(),
1811];
1812
1813pub(crate) fn waker(idx: usize) -> &'static AtomicWaker {
1814 &STATES[idx].waker
1815}
1816
1817pub(crate) fn half_waker(idx: usize) -> &'static AtomicWaker {
1818 &STATES[idx].half_waker
1819}
1820
1821// ============================================================================
1822// Async Transfer Future
1823// ============================================================================
1824
1825/// An in-progress DMA transfer.
1826///
1827/// This type implements `Future` and can be `.await`ed to wait for the
1828/// transfer to complete. Dropping the transfer will abort it.
1829#[must_use = "futures do nothing unless you `.await` or poll them"]
1830pub struct Transfer<'a> {
1831 channel: AnyChannel,
1832 _phantom: core::marker::PhantomData<&'a ()>,
1833}
1834
1835impl<'a> Transfer<'a> {
1836 /// Create a new transfer for the given channel.
1837 ///
1838 /// The caller must have already configured and started the DMA channel.
1839 pub(crate) fn new(channel: AnyChannel) -> Self {
1840 Self {
1841 channel,
1842 _phantom: core::marker::PhantomData,
1843 }
1844 }
1845
1846 /// Check if the transfer is still running.
1847 pub fn is_running(&self) -> bool {
1848 !self.channel.is_done()
1849 }
1850
1851 /// Get the remaining transfer count.
1852 pub fn remaining(&self) -> u16 {
1853 let t = self.channel.tcd();
1854 t.tcd_citer_elinkno().read().citer().bits()
1855 }
1856
1857 /// Block until the transfer completes.
1858 pub fn blocking_wait(self) {
1859 while self.is_running() {
1860 core::hint::spin_loop();
1861 }
1862
1863 // Ensure all DMA writes are visible
1864 fence(Ordering::SeqCst);
1865
1866 // Don't run drop (which would abort)
1867 core::mem::forget(self);
1868 }
1869
1870 /// Wait for the half-transfer interrupt asynchronously.
1871 ///
1872 /// This is useful for double-buffering scenarios where you want to process
1873 /// the first half of the buffer while the second half is being filled.
1874 ///
1875 /// Returns `true` if the half-transfer occurred, `false` if the transfer
1876 /// completed before the half-transfer interrupt.
1877 ///
1878 /// # Note
1879 ///
1880 /// The transfer must be configured with `TransferOptions::half_transfer_interrupt = true`
1881 /// for this method to work correctly.
1882 pub async fn wait_half(&mut self) -> bool {
1883 use core::future::poll_fn;
1884
1885 poll_fn(|cx| {
1886 let state = &STATES[self.channel.index];
1887
1888 // Register the half-transfer waker
1889 state.half_waker.register(cx.waker());
1890
1891 // Check if we're past the half-way point
1892 let t = self.channel.tcd();
1893 let biter = t.tcd_biter_elinkno().read().biter().bits();
1894 let citer = t.tcd_citer_elinkno().read().citer().bits();
1895 let half_point = biter / 2;
1896
1897 if self.channel.is_done() {
1898 // Transfer completed before half-transfer
1899 Poll::Ready(false)
1900 } else if citer <= half_point {
1901 // We're past the half-way point
1902 fence(Ordering::SeqCst);
1903 Poll::Ready(true)
1904 } else {
1905 Poll::Pending
1906 }
1907 })
1908 .await
1909 }
1910
1911 /// Abort the transfer.
1912 fn abort(&mut self) {
1913 let t = self.channel.tcd();
1914
1915 // Disable channel requests
1916 t.ch_csr().modify(|_, w| w.erq().disable());
1917
1918 // Clear any pending interrupt
1919 t.ch_int().write(|w| w.int().clear_bit_by_one());
1920
1921 // Clear DONE flag
1922 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
1923
1924 fence(Ordering::SeqCst);
1925 }
1926}
1927
1928impl<'a> Unpin for Transfer<'a> {}
1929
1930impl<'a> Future for Transfer<'a> {
1931 type Output = ();
1932
1933 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1934 let state = &STATES[self.channel.index];
1935
1936 // Register waker first
1937 state.waker.register(cx.waker());
1938
1939 let done = self.channel.is_done();
1940
1941 if done {
1942 // Ensure all DMA writes are visible before returning
1943 fence(Ordering::SeqCst);
1944 Poll::Ready(())
1945 } else {
1946 Poll::Pending
1947 }
1948 }
1949}
1950
1951impl<'a> Drop for Transfer<'a> {
1952 fn drop(&mut self) {
1953 // Only abort if the transfer is still running
1954 // If already complete, no need to abort
1955 if self.is_running() {
1956 self.abort();
1957
1958 // Wait for abort to complete
1959 while self.is_running() {
1960 core::hint::spin_loop();
1961 }
1962 }
1963
1964 fence(Ordering::SeqCst);
1965 }
1966}
1967
1968// ============================================================================
1969// Ring Buffer for Circular DMA
1970// ============================================================================
1971
1972/// A ring buffer for continuous DMA reception.
1973///
1974/// This structure manages a circular DMA transfer, allowing continuous
1975/// reception of data without losing bytes between reads. It uses both
1976/// half-transfer and complete-transfer interrupts to track available data.
1977///
1978/// # Example
1979///
1980/// ```no_run
1981/// use embassy_mcxa::dma::{DmaChannel, RingBuffer, TransferOptions};
1982///
1983/// static mut RX_BUF: [u8; 64] = [0; 64];
1984///
1985/// let dma_ch = DmaChannel::new(p.DMA_CH0);
1986/// let ring_buf = unsafe {
1987/// dma_ch.setup_circular_read(
1988/// uart_rx_addr,
1989/// &mut RX_BUF,
1990/// )
1991/// };
1992///
1993/// // Read data as it arrives
1994/// let mut buf = [0u8; 16];
1995/// let n = ring_buf.read(&mut buf).await?;
1996/// ```
1997pub struct RingBuffer<'a, W: Word> {
1998 channel: AnyChannel,
1999 /// Buffer pointer. We use NonNull instead of &mut because DMA acts like
2000 /// a separate thread writing to this buffer, and &mut claims exclusive
2001 /// access which the compiler could optimize incorrectly.
2002 buf: NonNull<[W]>,
2003 /// Buffer length cached for convenience
2004 buf_len: usize,
2005 /// Read position in the buffer (consumer side)
2006 read_pos: AtomicUsize,
2007 /// Phantom data to tie the lifetime to the original buffer
2008 _lt: PhantomData<&'a mut [W]>,
2009}
2010
2011impl<'a, W: Word> RingBuffer<'a, W> {
2012 /// Create a new ring buffer for the given channel and buffer.
2013 ///
2014 /// # Safety
2015 ///
2016 /// The caller must ensure:
2017 /// - The DMA channel has been configured for circular transfer
2018 /// - The buffer remains valid for the lifetime of the ring buffer
2019 /// - Only one RingBuffer exists per DMA channel at a time
2020 pub(crate) unsafe fn new(channel: AnyChannel, buf: &'a mut [W]) -> Self {
2021 let buf_len = buf.len();
2022 Self {
2023 channel,
2024 buf: NonNull::from(buf),
2025 buf_len,
2026 read_pos: AtomicUsize::new(0),
2027 _lt: PhantomData,
2028 }
2029 }
2030
2031 /// Get a slice reference to the buffer.
2032 ///
2033 /// # Safety
2034 ///
2035 /// The caller must ensure that DMA is not actively writing to the
2036 /// portion of the buffer being accessed, or that the access is
2037 /// appropriately synchronized.
2038 #[inline]
2039 unsafe fn buf_slice(&self) -> &[W] {
2040 self.buf.as_ref()
2041 }
2042
2043 /// Get the current DMA write position in the buffer.
2044 ///
2045 /// This reads the current destination address from the DMA controller
2046 /// and calculates the buffer offset.
2047 fn dma_write_pos(&self) -> usize {
2048 let t = self.channel.tcd();
2049 let daddr = t.tcd_daddr().read().daddr().bits() as usize;
2050 let buf_start = self.buf.as_ptr() as *const W as usize;
2051
2052 // Calculate offset from buffer start
2053 let offset = daddr.wrapping_sub(buf_start) / core::mem::size_of::<W>();
2054
2055 // Ensure we're within bounds (DMA wraps around)
2056 offset % self.buf_len
2057 }
2058
2059 /// Returns the number of bytes available to read.
2060 pub fn available(&self) -> usize {
2061 let write_pos = self.dma_write_pos();
2062 let read_pos = self.read_pos.load(Ordering::Acquire);
2063
2064 if write_pos >= read_pos {
2065 write_pos - read_pos
2066 } else {
2067 self.buf_len - read_pos + write_pos
2068 }
2069 }
2070
2071 /// Check if the buffer has overrun (data was lost).
2072 ///
2073 /// This happens when DMA writes faster than the application reads.
2074 pub fn is_overrun(&self) -> bool {
2075 // In a true overrun, the DMA would have wrapped around and caught up
2076 // to our read position. We can detect this by checking if available()
2077 // equals the full buffer size (minus 1 to distinguish from empty).
2078 self.available() >= self.buf_len - 1
2079 }
2080
2081 /// Read data from the ring buffer into the provided slice.
2082 ///
2083 /// Returns the number of elements read, which may be less than
2084 /// `dst.len()` if not enough data is available.
2085 ///
2086 /// This method does not block; use `read_async()` for async waiting.
2087 pub fn read_immediate(&self, dst: &mut [W]) -> usize {
2088 let write_pos = self.dma_write_pos();
2089 let read_pos = self.read_pos.load(Ordering::Acquire);
2090
2091 // Calculate available bytes
2092 let available = if write_pos >= read_pos {
2093 write_pos - read_pos
2094 } else {
2095 self.buf_len - read_pos + write_pos
2096 };
2097
2098 let to_read = dst.len().min(available);
2099 if to_read == 0 {
2100 return 0;
2101 }
2102
2103 // Safety: We only read from portions of the buffer that DMA has
2104 // already written to (between read_pos and write_pos).
2105 let buf = unsafe { self.buf_slice() };
2106
2107 // Read data, handling wrap-around
2108 let first_chunk = (self.buf_len - read_pos).min(to_read);
2109 dst[..first_chunk].copy_from_slice(&buf[read_pos..read_pos + first_chunk]);
2110
2111 if to_read > first_chunk {
2112 let second_chunk = to_read - first_chunk;
2113 dst[first_chunk..to_read].copy_from_slice(&buf[..second_chunk]);
2114 }
2115
2116 // Update read position
2117 let new_read_pos = (read_pos + to_read) % self.buf_len;
2118 self.read_pos.store(new_read_pos, Ordering::Release);
2119
2120 to_read
2121 }
2122
2123 /// Read data from the ring buffer asynchronously.
2124 ///
2125 /// This waits until at least one byte is available, then reads as much
2126 /// as possible into the destination buffer.
2127 ///
2128 /// Returns the number of elements read.
2129 pub async fn read(&self, dst: &mut [W]) -> Result<usize, Error> {
2130 use core::future::poll_fn;
2131
2132 if dst.is_empty() {
2133 return Ok(0);
2134 }
2135
2136 poll_fn(|cx| {
2137 // Check for overrun
2138 if self.is_overrun() {
2139 return Poll::Ready(Err(Error::Overrun));
2140 }
2141
2142 // Try to read immediately
2143 let n = self.read_immediate(dst);
2144 if n > 0 {
2145 return Poll::Ready(Ok(n));
2146 }
2147
2148 // Register wakers for both half and complete interrupts
2149 let state = &STATES[self.channel.index()];
2150 state.waker.register(cx.waker());
2151 state.half_waker.register(cx.waker());
2152
2153 // Check again after registering waker (avoid race)
2154 let n = self.read_immediate(dst);
2155 if n > 0 {
2156 return Poll::Ready(Ok(n));
2157 }
2158
2159 Poll::Pending
2160 })
2161 .await
2162 }
2163
2164 /// Clear the ring buffer, discarding all unread data.
2165 pub fn clear(&self) {
2166 let write_pos = self.dma_write_pos();
2167 self.read_pos.store(write_pos, Ordering::Release);
2168 }
2169
2170 /// Stop the DMA transfer and consume the ring buffer.
2171 ///
2172 /// Returns any remaining unread data count.
2173 pub fn stop(self) -> usize {
2174 let available = self.available();
2175
2176 // Disable the channel
2177 let t = self.channel.tcd();
2178 t.ch_csr().modify(|_, w| w.erq().disable());
2179
2180 // Clear flags
2181 t.ch_int().write(|w| w.int().clear_bit_by_one());
2182 t.ch_csr().modify(|_, w| w.done().clear_bit_by_one());
2183
2184 fence(Ordering::SeqCst);
2185
2186 available
2187 }
2188}
2189
2190impl<C: Channel> DmaChannel<C> {
2191 /// Set up a circular DMA transfer for continuous peripheral-to-memory reception.
2192 ///
2193 /// This configures the DMA channel for circular operation with both half-transfer
2194 /// and complete-transfer interrupts enabled. The transfer runs continuously until
2195 /// stopped via [`RingBuffer::stop()`].
2196 ///
2197 /// # Arguments
2198 ///
2199 /// * `peri_addr` - Peripheral register address to read from
2200 /// * `buf` - Destination buffer (should be power-of-2 size for best efficiency)
2201 ///
2202 /// # Returns
2203 ///
2204 /// A [`RingBuffer`] that can be used to read received data.
2205 ///
2206 /// # Safety
2207 ///
2208 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
2209 /// - The peripheral address must be valid for reads.
2210 /// - The peripheral's DMA request must be configured to trigger this channel.
2211 pub unsafe fn setup_circular_read<'a, W: Word>(&self, peri_addr: *const W, buf: &'a mut [W]) -> RingBuffer<'a, W> {
2212 assert!(!buf.is_empty());
2213 assert!(buf.len() <= 0x7fff);
2214 // For circular mode, buffer size should ideally be power of 2
2215 // but we don't enforce it
2216
2217 let size = W::size();
2218 let byte_size = size.bytes();
2219
2220 let t = self.tcd();
2221
2222 // Reset channel state
2223 t.ch_csr().write(|w| {
2224 w.erq()
2225 .disable()
2226 .earq()
2227 .disable()
2228 .eei()
2229 .no_error()
2230 .ebw()
2231 .disable()
2232 .done()
2233 .clear_bit_by_one()
2234 });
2235 t.ch_es().write(|w| w.bits(0));
2236 t.ch_int().write(|w| w.int().clear_bit_by_one());
2237
2238 // Source: peripheral register, fixed
2239 t.tcd_saddr().write(|w| w.saddr().bits(peri_addr as u32));
2240 t.tcd_soff().write(|w| w.soff().bits(0)); // No increment
2241
2242 // Destination: memory buffer, incrementing
2243 t.tcd_daddr().write(|w| w.daddr().bits(buf.as_mut_ptr() as u32));
2244 t.tcd_doff().write(|w| w.doff().bits(byte_size as u16));
2245
2246 // Transfer attributes
2247 let hw_size = size.to_hw_size();
2248 t.tcd_attr().write(|w| {
2249 w.ssize()
2250 .bits(hw_size)
2251 .dsize()
2252 .bits(hw_size)
2253 .smod()
2254 .disable()
2255 .dmod()
2256 .bits(0)
2257 });
2258
2259 // Minor loop: transfer one word per request
2260 t.tcd_nbytes_mloffno().write(|w| {
2261 w.nbytes()
2262 .bits(byte_size as u32)
2263 .dmloe()
2264 .offset_not_applied()
2265 .smloe()
2266 .offset_not_applied()
2267 });
2268
2269 // Major loop count = buffer size
2270 let count = buf.len() as u16;
2271 t.tcd_citer_elinkno().write(|w| w.citer().bits(count).elink().disable());
2272 t.tcd_biter_elinkno().write(|w| w.biter().bits(count).elink().disable());
2273
2274 // After major loop: reset destination to buffer start (circular)
2275 let buf_bytes = (buf.len() * byte_size) as i32;
2276 t.tcd_slast_sda().write(|w| w.slast_sda().bits(0)); // Source doesn't change
2277 t.tcd_dlast_sga().write(|w| w.dlast_sga().bits((-buf_bytes) as u32));
2278
2279 // Control/status: enable both half and complete interrupts, NO DREQ (continuous)
2280 t.tcd_csr().write(|w| {
2281 w.intmajor()
2282 .enable()
2283 .inthalf()
2284 .enable()
2285 .dreq()
2286 .channel_not_affected() // Don't clear ERQ on complete (circular)
2287 .esg()
2288 .normal_format()
2289 .majorelink()
2290 .disable()
2291 .eeop()
2292 .disable()
2293 .esda()
2294 .disable()
2295 .bwc()
2296 .no_stall()
2297 });
2298
2299 cortex_m::asm::dsb();
2300
2301 // Enable the channel request
2302 t.ch_csr().modify(|_, w| w.erq().enable());
2303
2304 // Enable NVIC interrupt for this channel so async wakeups work
2305 self.enable_interrupt();
2306
2307 RingBuffer::new(self.as_any(), buf)
2308 }
2309}
2310
2311// ============================================================================
2312// Scatter-Gather Builder
2313// ============================================================================
2314
2315/// Maximum number of TCDs in a scatter-gather chain.
2316pub const MAX_SCATTER_GATHER_TCDS: usize = 16;
2317
2318/// A builder for constructing scatter-gather DMA transfer chains.
2319///
2320/// This provides a type-safe way to build TCD chains for scatter-gather
2321/// transfers without manual TCD manipulation.
2322///
2323/// # Example
2324///
2325/// ```no_run
2326/// use embassy_mcxa::dma::{DmaChannel, ScatterGatherBuilder};
2327///
2328/// let mut builder = ScatterGatherBuilder::<u32>::new();
2329///
2330/// // Add transfer segments
2331/// builder.add_transfer(&src1, &mut dst1);
2332/// builder.add_transfer(&src2, &mut dst2);
2333/// builder.add_transfer(&src3, &mut dst3);
2334///
2335/// // Build and execute
2336/// let transfer = unsafe { builder.build(&dma_ch).unwrap() };
2337/// transfer.await;
2338/// ```
2339pub struct ScatterGatherBuilder<W: Word> {
2340 /// TCD pool (must be 32-byte aligned)
2341 tcds: [Tcd; MAX_SCATTER_GATHER_TCDS],
2342 /// Number of TCDs configured
2343 count: usize,
2344 /// Phantom marker for word type
2345 _phantom: core::marker::PhantomData<W>,
2346}
2347
2348impl<W: Word> ScatterGatherBuilder<W> {
2349 /// Create a new scatter-gather builder.
2350 pub fn new() -> Self {
2351 Self {
2352 tcds: [Tcd::default(); MAX_SCATTER_GATHER_TCDS],
2353 count: 0,
2354 _phantom: core::marker::PhantomData,
2355 }
2356 }
2357
2358 /// Add a memory-to-memory transfer segment to the chain.
2359 ///
2360 /// # Arguments
2361 ///
2362 /// * `src` - Source buffer for this segment
2363 /// * `dst` - Destination buffer for this segment
2364 ///
2365 /// # Panics
2366 ///
2367 /// Panics if the maximum number of segments (16) is exceeded.
2368 pub fn add_transfer(&mut self, src: &[W], dst: &mut [W]) -> &mut Self {
2369 assert!(self.count < MAX_SCATTER_GATHER_TCDS, "Too many scatter-gather segments");
2370 assert!(!src.is_empty());
2371 assert!(dst.len() >= src.len());
2372
2373 let size = W::size();
2374 let byte_size = size.bytes();
2375 let hw_size = size.to_hw_size();
2376 let nbytes = (src.len() * byte_size) as u32;
2377
2378 // Build the TCD for this segment
2379 self.tcds[self.count] = Tcd {
2380 saddr: src.as_ptr() as u32,
2381 soff: byte_size as i16,
2382 attr: ((hw_size as u16) << 8) | (hw_size as u16), // SSIZE | DSIZE
2383 nbytes,
2384 slast: 0,
2385 daddr: dst.as_mut_ptr() as u32,
2386 doff: byte_size as i16,
2387 citer: 1,
2388 dlast_sga: 0, // Will be filled in by build()
2389 csr: 0x0002, // INTMAJOR only (ESG will be set for non-last TCDs)
2390 biter: 1,
2391 };
2392
2393 self.count += 1;
2394 self
2395 }
2396
2397 /// Get the number of transfer segments added.
2398 pub fn segment_count(&self) -> usize {
2399 self.count
2400 }
2401
2402 /// Build the scatter-gather chain and start the transfer.
2403 ///
2404 /// # Arguments
2405 ///
2406 /// * `channel` - The DMA channel to use for the transfer
2407 ///
2408 /// # Returns
2409 ///
2410 /// A `Transfer` future that completes when the entire chain has executed.
2411 ///
2412 /// # Safety
2413 ///
2414 /// All source and destination buffers passed to `add_transfer()` must
2415 /// remain valid for the duration of the transfer.
2416 pub unsafe fn build<C: Channel>(&mut self, channel: &DmaChannel<C>) -> Result<Transfer<'_>, Error> {
2417 if self.count == 0 {
2418 return Err(Error::Configuration);
2419 }
2420
2421 // Link TCDs together
2422 //
2423 // CSR bit definitions:
2424 // - START = bit 0 = 0x0001 (triggers transfer when set)
2425 // - INTMAJOR = bit 1 = 0x0002 (interrupt on major loop complete)
2426 // - ESG = bit 4 = 0x0010 (enable scatter-gather, loads next TCD on complete)
2427 //
2428 // When hardware loads a TCD via scatter-gather (ESG), it copies the TCD's
2429 // CSR directly into the hardware register. If START is not set in that CSR,
2430 // the hardware will NOT auto-execute the loaded TCD.
2431 //
2432 // Strategy:
2433 // - First TCD: ESG | INTMAJOR (no START - we add it manually after loading)
2434 // - Middle TCDs: ESG | INTMAJOR | START (auto-execute when loaded via S/G)
2435 // - Last TCD: INTMAJOR | START (auto-execute, no further linking)
2436 for i in 0..self.count {
2437 let is_first = i == 0;
2438 let is_last = i == self.count - 1;
2439
2440 if is_first {
2441 if is_last {
2442 // Only one TCD - no ESG, no START (we add START manually)
2443 self.tcds[i].dlast_sga = 0;
2444 self.tcds[i].csr = 0x0002; // INTMAJOR only
2445 } else {
2446 // First of multiple - ESG to link, no START (we add START manually)
2447 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2448 self.tcds[i].csr = 0x0012; // ESG | INTMAJOR
2449 }
2450 } else if is_last {
2451 // Last TCD (not first) - no ESG, but START so it auto-executes
2452 self.tcds[i].dlast_sga = 0;
2453 self.tcds[i].csr = 0x0003; // INTMAJOR | START
2454 } else {
2455 // Middle TCD - ESG to link, and START so it auto-executes
2456 self.tcds[i].dlast_sga = &self.tcds[i + 1] as *const Tcd as i32;
2457 self.tcds[i].csr = 0x0013; // ESG | INTMAJOR | START
2458 }
2459 }
2460
2461 let t = channel.tcd();
2462
2463 // Reset channel state - clear DONE, disable requests, clear errors
2464 // This ensures the channel is in a clean state before loading the TCD
2465 t.ch_csr().write(|w| {
2466 w.erq()
2467 .disable()
2468 .earq()
2469 .disable()
2470 .eei()
2471 .no_error()
2472 .done()
2473 .clear_bit_by_one()
2474 });
2475 t.ch_es().write(|w| w.err().clear_bit_by_one());
2476 t.ch_int().write(|w| w.int().clear_bit_by_one());
2477
2478 // Memory barrier to ensure channel state is reset before loading TCD
2479 cortex_m::asm::dsb();
2480
2481 // Load first TCD into hardware
2482 channel.load_tcd(&self.tcds[0]);
2483
2484 // Memory barrier before setting START
2485 cortex_m::asm::dsb();
2486
2487 // Start the transfer
2488 t.tcd_csr().modify(|_, w| w.start().channel_started());
2489
2490 Ok(Transfer::new(channel.as_any()))
2491 }
2492
2493 /// Reset the builder for reuse.
2494 pub fn clear(&mut self) {
2495 self.count = 0;
2496 }
2497}
2498
2499impl<W: Word> Default for ScatterGatherBuilder<W> {
2500 fn default() -> Self {
2501 Self::new()
2502 }
2503}
2504
2505/// A completed scatter-gather transfer result.
2506///
2507/// This type is returned after a scatter-gather transfer completes,
2508/// providing access to any error information.
2509#[derive(Debug, Clone, Copy, PartialEq, Eq)]
2510pub struct ScatterGatherResult {
2511 /// Number of segments successfully transferred
2512 pub segments_completed: usize,
2513 /// Error if any occurred
2514 pub error: Option<Error>,
2515}
2516
2517// ============================================================================
2518// Interrupt Handler
2519// ============================================================================
2520
2521/// Interrupt handler helper.
2522///
2523/// Call this from your interrupt handler to clear the interrupt flag and wake the waker.
2524/// This handles both half-transfer and complete-transfer interrupts.
2525///
2526/// # Safety
2527/// Must be called from the correct DMA channel interrupt context.
2528pub unsafe fn on_interrupt(ch_index: usize) {
2529 let p = pac::Peripherals::steal();
2530 let edma = &p.edma_0_tcd0;
2531 let t = edma.tcd(ch_index);
2532
2533 // Read TCD CSR to determine interrupt source
2534 let csr = t.tcd_csr().read();
2535
2536 // Check if this is a half-transfer interrupt
2537 // INTHALF is set and we're at or past the half-way point
2538 if csr.inthalf().bit_is_set() {
2539 let biter = t.tcd_biter_elinkno().read().biter().bits();
2540 let citer = t.tcd_citer_elinkno().read().citer().bits();
2541 let half_point = biter / 2;
2542
2543 if citer <= half_point && citer > 0 {
2544 // Half-transfer interrupt - wake half_waker
2545 half_waker(ch_index).wake();
2546 }
2547 }
2548
2549 // Clear INT flag
2550 t.ch_int().write(|w| w.int().clear_bit_by_one());
2551
2552 // If DONE is set, this is a complete-transfer interrupt
2553 // Only wake the full-transfer waker when the transfer is actually complete
2554 if t.ch_csr().read().done().bit_is_set() {
2555 waker(ch_index).wake();
2556 }
2557}
2558
2559// ============================================================================
2560// Type-level Interrupt Handlers for bind_interrupts! macro
2561// ============================================================================
2562
2563/// Macro to generate DMA channel interrupt handlers.
2564///
2565/// This generates handler structs that implement the `Handler` trait for use
2566/// with the `bind_interrupts!` macro.
2567macro_rules! impl_dma_interrupt_handler {
2568 ($name:ident, $irq:ident, $ch:expr) => {
2569 /// Interrupt handler for DMA channel.
2570 ///
2571 /// Use this with the `bind_interrupts!` macro:
2572 /// ```ignore
2573 /// bind_interrupts!(struct Irqs {
2574 #[doc = concat!(" ", stringify!($irq), " => dma::", stringify!($name), ";")]
2575 /// });
2576 /// ```
2577 pub struct $name;
2578
2579 impl crate::interrupt::typelevel::Handler<crate::interrupt::typelevel::$irq> for $name {
2580 unsafe fn on_interrupt() {
2581 on_interrupt($ch);
2582 }
2583 }
2584 };
2585}
2586
2587impl_dma_interrupt_handler!(DmaCh0InterruptHandler, DMA_CH0, 0);
2588impl_dma_interrupt_handler!(DmaCh1InterruptHandler, DMA_CH1, 1);
2589impl_dma_interrupt_handler!(DmaCh2InterruptHandler, DMA_CH2, 2);
2590impl_dma_interrupt_handler!(DmaCh3InterruptHandler, DMA_CH3, 3);
2591impl_dma_interrupt_handler!(DmaCh4InterruptHandler, DMA_CH4, 4);
2592impl_dma_interrupt_handler!(DmaCh5InterruptHandler, DMA_CH5, 5);
2593impl_dma_interrupt_handler!(DmaCh6InterruptHandler, DMA_CH6, 6);
2594impl_dma_interrupt_handler!(DmaCh7InterruptHandler, DMA_CH7, 7);
diff --git a/embassy-mcxa/src/gpio.rs b/embassy-mcxa/src/gpio.rs
new file mode 100644
index 000000000..65f8df985
--- /dev/null
+++ b/embassy-mcxa/src/gpio.rs
@@ -0,0 +1,1062 @@
1//! GPIO driver built around a type-erased `Flex` pin, similar to other Embassy HALs.
2//! The exported `Output`/`Input` drivers own a `Flex` so they no longer depend on the
3//! concrete pin type.
4
5use core::convert::Infallible;
6use core::future::Future;
7use core::marker::PhantomData;
8use core::pin::pin;
9
10use embassy_hal_internal::{Peri, PeripheralType};
11use maitake_sync::WaitMap;
12use paste::paste;
13
14use crate::pac::interrupt;
15use crate::pac::port0::pcr0::{Dse, Inv, Mux, Pe, Ps, Sre};
16
17struct BitIter(u32);
18
19impl Iterator for BitIter {
20 type Item = usize;
21
22 fn next(&mut self) -> Option<Self::Item> {
23 match self.0.trailing_zeros() {
24 32 => None,
25 b => {
26 self.0 &= !(1 << b);
27 Some(b as usize)
28 }
29 }
30 }
31}
32
33const PORT_COUNT: usize = 5;
34
35static PORT_WAIT_MAPS: [WaitMap<usize, ()>; PORT_COUNT] = [
36 WaitMap::new(),
37 WaitMap::new(),
38 WaitMap::new(),
39 WaitMap::new(),
40 WaitMap::new(),
41];
42
43fn irq_handler(port_index: usize, gpio_base: *const crate::pac::gpio0::RegisterBlock) {
44 let gpio = unsafe { &*gpio_base };
45 let isfr = gpio.isfr0().read().bits();
46
47 for pin in BitIter(isfr) {
48 // Clear all pending interrupts
49 gpio.isfr0().write(|w| unsafe { w.bits(1 << pin) });
50 gpio.icr(pin).modify(|_, w| w.irqc().irqc0()); // Disable interrupt
51
52 // Wake the corresponding port waker
53 if let Some(w) = PORT_WAIT_MAPS.get(port_index) {
54 w.wake(&pin, ());
55 }
56 }
57}
58
59#[interrupt]
60fn GPIO0() {
61 irq_handler(0, crate::pac::Gpio0::ptr());
62}
63
64#[interrupt]
65fn GPIO1() {
66 irq_handler(1, crate::pac::Gpio1::ptr());
67}
68
69#[interrupt]
70fn GPIO2() {
71 irq_handler(2, crate::pac::Gpio2::ptr());
72}
73
74#[interrupt]
75fn GPIO3() {
76 irq_handler(3, crate::pac::Gpio3::ptr());
77}
78
79#[interrupt]
80fn GPIO4() {
81 irq_handler(4, crate::pac::Gpio4::ptr());
82}
83
84pub(crate) unsafe fn init() {
85 use embassy_hal_internal::interrupt::InterruptExt;
86
87 crate::pac::interrupt::GPIO0.enable();
88 crate::pac::interrupt::GPIO1.enable();
89 crate::pac::interrupt::GPIO2.enable();
90 crate::pac::interrupt::GPIO3.enable();
91 crate::pac::interrupt::GPIO4.enable();
92
93 cortex_m::interrupt::enable();
94}
95
96/// Logical level for GPIO pins.
97#[derive(Copy, Clone, Eq, PartialEq, Debug)]
98#[cfg_attr(feature = "defmt", derive(defmt::Format))]
99pub enum Level {
100 Low,
101 High,
102}
103
104impl From<bool> for Level {
105 fn from(val: bool) -> Self {
106 match val {
107 true => Self::High,
108 false => Self::Low,
109 }
110 }
111}
112
113#[derive(Copy, Clone, Eq, PartialEq, Debug)]
114pub enum Pull {
115 Disabled,
116 Up,
117 Down,
118}
119
120impl From<Pull> for (Pe, Ps) {
121 fn from(pull: Pull) -> Self {
122 match pull {
123 Pull::Disabled => (Pe::Pe0, Ps::Ps0),
124 Pull::Up => (Pe::Pe1, Ps::Ps1),
125 Pull::Down => (Pe::Pe1, Ps::Ps0),
126 }
127 }
128}
129
130#[derive(Copy, Clone, Eq, PartialEq, Debug)]
131pub enum SlewRate {
132 Fast,
133 Slow,
134}
135
136impl From<SlewRate> for Sre {
137 fn from(slew_rate: SlewRate) -> Self {
138 match slew_rate {
139 SlewRate::Fast => Sre::Sre0,
140 SlewRate::Slow => Sre::Sre1,
141 }
142 }
143}
144
145#[derive(Copy, Clone, Eq, PartialEq, Debug)]
146pub enum DriveStrength {
147 Normal,
148 Double,
149}
150
151impl From<DriveStrength> for Dse {
152 fn from(strength: DriveStrength) -> Self {
153 match strength {
154 DriveStrength::Normal => Dse::Dse0,
155 DriveStrength::Double => Dse::Dse1,
156 }
157 }
158}
159
160#[derive(Copy, Clone, Eq, PartialEq, Debug)]
161pub enum Inverter {
162 Disabled,
163 Enabled,
164}
165
166impl From<Inverter> for Inv {
167 fn from(strength: Inverter) -> Self {
168 match strength {
169 Inverter::Disabled => Inv::Inv0,
170 Inverter::Enabled => Inv::Inv1,
171 }
172 }
173}
174
175pub type Gpio = crate::peripherals::GPIO0;
176
177/// Type-erased representation of a GPIO pin.
178pub struct AnyPin {
179 port: usize,
180 pin: usize,
181 gpio: &'static crate::pac::gpio0::RegisterBlock,
182 port_reg: &'static crate::pac::port0::RegisterBlock,
183 pcr_reg: &'static crate::pac::port0::Pcr0,
184}
185
186impl AnyPin {
187 /// Create an `AnyPin` from raw components.
188 fn new(
189 port: usize,
190 pin: usize,
191 gpio: &'static crate::pac::gpio0::RegisterBlock,
192 port_reg: &'static crate::pac::port0::RegisterBlock,
193 pcr_reg: &'static crate::pac::port0::Pcr0,
194 ) -> Self {
195 Self {
196 port,
197 pin,
198 gpio,
199 port_reg,
200 pcr_reg,
201 }
202 }
203
204 #[inline(always)]
205 fn mask(&self) -> u32 {
206 1 << self.pin
207 }
208
209 #[inline(always)]
210 fn gpio(&self) -> &'static crate::pac::gpio0::RegisterBlock {
211 self.gpio
212 }
213
214 #[inline(always)]
215 pub fn port_index(&self) -> usize {
216 self.port
217 }
218
219 #[inline(always)]
220 pub fn pin_index(&self) -> usize {
221 self.pin
222 }
223
224 #[inline(always)]
225 fn port_reg(&self) -> &'static crate::pac::port0::RegisterBlock {
226 self.port_reg
227 }
228
229 #[inline(always)]
230 fn pcr_reg(&self) -> &'static crate::pac::port0::Pcr0 {
231 self.pcr_reg
232 }
233}
234
235embassy_hal_internal::impl_peripheral!(AnyPin);
236
237pub(crate) trait SealedPin {
238 fn pin_port(&self) -> usize;
239
240 fn port(&self) -> usize {
241 self.pin_port() / 32
242 }
243
244 fn pin(&self) -> usize {
245 self.pin_port() % 32
246 }
247
248 fn gpio(&self) -> &'static crate::pac::gpio0::RegisterBlock;
249
250 fn port_reg(&self) -> &'static crate::pac::port0::RegisterBlock;
251
252 fn pcr_reg(&self) -> &'static crate::pac::port0::Pcr0;
253
254 fn set_function(&self, function: Mux);
255
256 fn set_pull(&self, pull: Pull);
257
258 fn set_drive_strength(&self, strength: Dse);
259
260 fn set_slew_rate(&self, slew_rate: Sre);
261
262 fn set_enable_input_buffer(&self);
263}
264
265/// GPIO pin trait.
266#[allow(private_bounds)]
267pub trait GpioPin: SealedPin + Sized + PeripheralType + Into<AnyPin> + 'static {
268 /// Type-erase the pin.
269 fn degrade(self) -> AnyPin {
270 // SAFETY: This is only called within the GpioPin trait, which is only
271 // implemented within this module on valid pin peripherals and thus
272 // has been verified to be correct.
273 AnyPin::new(self.port(), self.pin(), self.gpio(), self.port_reg(), self.pcr_reg())
274 }
275}
276
277impl SealedPin for AnyPin {
278 fn pin_port(&self) -> usize {
279 self.port * 32 + self.pin
280 }
281
282 fn gpio(&self) -> &'static crate::pac::gpio0::RegisterBlock {
283 self.gpio()
284 }
285
286 fn port_reg(&self) -> &'static crate::pac::port0::RegisterBlock {
287 self.port_reg()
288 }
289
290 fn pcr_reg(&self) -> &'static crate::pac::port0::Pcr0 {
291 self.pcr_reg()
292 }
293
294 fn set_function(&self, function: Mux) {
295 self.pcr_reg().modify(|_, w| w.mux().variant(function));
296 }
297
298 fn set_pull(&self, pull: Pull) {
299 let (pull_enable, pull_select) = pull.into();
300 self.pcr_reg().modify(|_, w| {
301 w.pe().variant(pull_enable);
302 w.ps().variant(pull_select)
303 });
304 }
305
306 fn set_drive_strength(&self, strength: Dse) {
307 self.pcr_reg().modify(|_, w| w.dse().variant(strength));
308 }
309
310 fn set_slew_rate(&self, slew_rate: Sre) {
311 self.pcr_reg().modify(|_, w| w.sre().variant(slew_rate));
312 }
313
314 fn set_enable_input_buffer(&self) {
315 self.pcr_reg().modify(|_, w| w.ibe().ibe1());
316 }
317}
318
319impl GpioPin for AnyPin {}
320
321macro_rules! impl_pin {
322 ($peri:ident, $port:expr, $pin:expr, $block:ident) => {
323 paste! {
324 impl SealedPin for crate::peripherals::$peri {
325 fn pin_port(&self) -> usize {
326 $port * 32 + $pin
327 }
328
329 fn gpio(&self) -> &'static crate::pac::gpio0::RegisterBlock {
330 unsafe { &*crate::pac::$block::ptr() }
331 }
332
333 fn port_reg(&self) -> &'static crate::pac::port0::RegisterBlock {
334 unsafe { &*crate::pac::[<Port $port>]::ptr() }
335 }
336
337 fn pcr_reg(&self) -> &'static crate::pac::port0::Pcr0 {
338 self.port_reg().[<pcr $pin>]()
339 }
340
341 fn set_function(&self, function: Mux) {
342 unsafe {
343 let port_reg = &*crate::pac::[<Port $port>]::ptr();
344 port_reg.[<pcr $pin>]().modify(|_, w| {
345 w.mux().variant(function)
346 });
347 }
348 }
349
350 fn set_pull(&self, pull: Pull) {
351 let port_reg = unsafe {&*crate::pac::[<Port $port>]::ptr()};
352 let (pull_enable, pull_select) = pull.into();
353 port_reg.[<pcr $pin>]().modify(|_, w| {
354 w.pe().variant(pull_enable);
355 w.ps().variant(pull_select)
356 });
357 }
358
359 fn set_drive_strength(&self, strength: Dse) {
360 let port_reg = unsafe {&*crate::pac::[<Port $port>]::ptr()};
361 port_reg.[<pcr $pin>]().modify(|_, w| w.dse().variant(strength));
362 }
363
364 fn set_slew_rate(&self, slew_rate: Sre) {
365 let port_reg = unsafe {&*crate::pac::[<Port $port>]::ptr()};
366 port_reg.[<pcr $pin>]().modify(|_, w| w.sre().variant(slew_rate));
367 }
368
369 fn set_enable_input_buffer(&self) {
370 let port_reg = unsafe {&*crate::pac::[<Port $port>]::ptr()};
371 port_reg.[<pcr $pin>]().modify(|_, w| w.ibe().ibe1());
372 }
373 }
374
375 impl GpioPin for crate::peripherals::$peri {}
376
377 impl From<crate::peripherals::$peri> for AnyPin {
378 fn from(value: crate::peripherals::$peri) -> Self {
379 value.degrade()
380 }
381 }
382
383 impl crate::peripherals::$peri {
384 /// Convenience helper to obtain a type-erased handle to this pin.
385 pub fn degrade(&self) -> AnyPin {
386 AnyPin::new(self.port(), self.pin(), self.gpio(), self.port_reg(), self.pcr_reg())
387 }
388 }
389 }
390 };
391}
392
393impl_pin!(P0_0, 0, 0, Gpio0);
394impl_pin!(P0_1, 0, 1, Gpio0);
395impl_pin!(P0_2, 0, 2, Gpio0);
396impl_pin!(P0_3, 0, 3, Gpio0);
397impl_pin!(P0_4, 0, 4, Gpio0);
398impl_pin!(P0_5, 0, 5, Gpio0);
399impl_pin!(P0_6, 0, 6, Gpio0);
400impl_pin!(P0_7, 0, 7, Gpio0);
401impl_pin!(P0_8, 0, 8, Gpio0);
402impl_pin!(P0_9, 0, 9, Gpio0);
403impl_pin!(P0_10, 0, 10, Gpio0);
404impl_pin!(P0_11, 0, 11, Gpio0);
405impl_pin!(P0_12, 0, 12, Gpio0);
406impl_pin!(P0_13, 0, 13, Gpio0);
407impl_pin!(P0_14, 0, 14, Gpio0);
408impl_pin!(P0_15, 0, 15, Gpio0);
409impl_pin!(P0_16, 0, 16, Gpio0);
410impl_pin!(P0_17, 0, 17, Gpio0);
411impl_pin!(P0_18, 0, 18, Gpio0);
412impl_pin!(P0_19, 0, 19, Gpio0);
413impl_pin!(P0_20, 0, 20, Gpio0);
414impl_pin!(P0_21, 0, 21, Gpio0);
415impl_pin!(P0_22, 0, 22, Gpio0);
416impl_pin!(P0_23, 0, 23, Gpio0);
417impl_pin!(P0_24, 0, 24, Gpio0);
418impl_pin!(P0_25, 0, 25, Gpio0);
419impl_pin!(P0_26, 0, 26, Gpio0);
420impl_pin!(P0_27, 0, 27, Gpio0);
421impl_pin!(P0_28, 0, 28, Gpio0);
422impl_pin!(P0_29, 0, 29, Gpio0);
423impl_pin!(P0_30, 0, 30, Gpio0);
424impl_pin!(P0_31, 0, 31, Gpio0);
425
426impl_pin!(P1_0, 1, 0, Gpio1);
427impl_pin!(P1_1, 1, 1, Gpio1);
428impl_pin!(P1_2, 1, 2, Gpio1);
429impl_pin!(P1_3, 1, 3, Gpio1);
430impl_pin!(P1_4, 1, 4, Gpio1);
431impl_pin!(P1_5, 1, 5, Gpio1);
432impl_pin!(P1_6, 1, 6, Gpio1);
433impl_pin!(P1_7, 1, 7, Gpio1);
434impl_pin!(P1_8, 1, 8, Gpio1);
435impl_pin!(P1_9, 1, 9, Gpio1);
436impl_pin!(P1_10, 1, 10, Gpio1);
437impl_pin!(P1_11, 1, 11, Gpio1);
438impl_pin!(P1_12, 1, 12, Gpio1);
439impl_pin!(P1_13, 1, 13, Gpio1);
440impl_pin!(P1_14, 1, 14, Gpio1);
441impl_pin!(P1_15, 1, 15, Gpio1);
442impl_pin!(P1_16, 1, 16, Gpio1);
443impl_pin!(P1_17, 1, 17, Gpio1);
444impl_pin!(P1_18, 1, 18, Gpio1);
445impl_pin!(P1_19, 1, 19, Gpio1);
446impl_pin!(P1_20, 1, 20, Gpio1);
447impl_pin!(P1_21, 1, 21, Gpio1);
448impl_pin!(P1_22, 1, 22, Gpio1);
449impl_pin!(P1_23, 1, 23, Gpio1);
450impl_pin!(P1_24, 1, 24, Gpio1);
451impl_pin!(P1_25, 1, 25, Gpio1);
452impl_pin!(P1_26, 1, 26, Gpio1);
453impl_pin!(P1_27, 1, 27, Gpio1);
454impl_pin!(P1_28, 1, 28, Gpio1);
455impl_pin!(P1_29, 1, 29, Gpio1);
456impl_pin!(P1_30, 1, 30, Gpio1);
457impl_pin!(P1_31, 1, 31, Gpio1);
458
459impl_pin!(P2_0, 2, 0, Gpio2);
460impl_pin!(P2_1, 2, 1, Gpio2);
461impl_pin!(P2_2, 2, 2, Gpio2);
462impl_pin!(P2_3, 2, 3, Gpio2);
463impl_pin!(P2_4, 2, 4, Gpio2);
464impl_pin!(P2_5, 2, 5, Gpio2);
465impl_pin!(P2_6, 2, 6, Gpio2);
466impl_pin!(P2_7, 2, 7, Gpio2);
467impl_pin!(P2_8, 2, 8, Gpio2);
468impl_pin!(P2_9, 2, 9, Gpio2);
469impl_pin!(P2_10, 2, 10, Gpio2);
470impl_pin!(P2_11, 2, 11, Gpio2);
471impl_pin!(P2_12, 2, 12, Gpio2);
472impl_pin!(P2_13, 2, 13, Gpio2);
473impl_pin!(P2_14, 2, 14, Gpio2);
474impl_pin!(P2_15, 2, 15, Gpio2);
475impl_pin!(P2_16, 2, 16, Gpio2);
476impl_pin!(P2_17, 2, 17, Gpio2);
477impl_pin!(P2_18, 2, 18, Gpio2);
478impl_pin!(P2_19, 2, 19, Gpio2);
479impl_pin!(P2_20, 2, 20, Gpio2);
480impl_pin!(P2_21, 2, 21, Gpio2);
481impl_pin!(P2_22, 2, 22, Gpio2);
482impl_pin!(P2_23, 2, 23, Gpio2);
483impl_pin!(P2_24, 2, 24, Gpio2);
484impl_pin!(P2_25, 2, 25, Gpio2);
485impl_pin!(P2_26, 2, 26, Gpio2);
486impl_pin!(P2_27, 2, 27, Gpio2);
487impl_pin!(P2_28, 2, 28, Gpio2);
488impl_pin!(P2_29, 2, 29, Gpio2);
489impl_pin!(P2_30, 2, 30, Gpio2);
490impl_pin!(P2_31, 2, 31, Gpio2);
491
492impl_pin!(P3_0, 3, 0, Gpio3);
493impl_pin!(P3_1, 3, 1, Gpio3);
494impl_pin!(P3_2, 3, 2, Gpio3);
495impl_pin!(P3_3, 3, 3, Gpio3);
496impl_pin!(P3_4, 3, 4, Gpio3);
497impl_pin!(P3_5, 3, 5, Gpio3);
498impl_pin!(P3_6, 3, 6, Gpio3);
499impl_pin!(P3_7, 3, 7, Gpio3);
500impl_pin!(P3_8, 3, 8, Gpio3);
501impl_pin!(P3_9, 3, 9, Gpio3);
502impl_pin!(P3_10, 3, 10, Gpio3);
503impl_pin!(P3_11, 3, 11, Gpio3);
504impl_pin!(P3_12, 3, 12, Gpio3);
505impl_pin!(P3_13, 3, 13, Gpio3);
506impl_pin!(P3_14, 3, 14, Gpio3);
507impl_pin!(P3_15, 3, 15, Gpio3);
508impl_pin!(P3_16, 3, 16, Gpio3);
509impl_pin!(P3_17, 3, 17, Gpio3);
510impl_pin!(P3_18, 3, 18, Gpio3);
511impl_pin!(P3_19, 3, 19, Gpio3);
512impl_pin!(P3_20, 3, 20, Gpio3);
513impl_pin!(P3_21, 3, 21, Gpio3);
514impl_pin!(P3_22, 3, 22, Gpio3);
515impl_pin!(P3_23, 3, 23, Gpio3);
516impl_pin!(P3_24, 3, 24, Gpio3);
517impl_pin!(P3_25, 3, 25, Gpio3);
518impl_pin!(P3_26, 3, 26, Gpio3);
519impl_pin!(P3_27, 3, 27, Gpio3);
520impl_pin!(P3_28, 3, 28, Gpio3);
521impl_pin!(P3_29, 3, 29, Gpio3);
522impl_pin!(P3_30, 3, 30, Gpio3);
523impl_pin!(P3_31, 3, 31, Gpio3);
524
525impl_pin!(P4_0, 4, 0, Gpio4);
526impl_pin!(P4_1, 4, 1, Gpio4);
527impl_pin!(P4_2, 4, 2, Gpio4);
528impl_pin!(P4_3, 4, 3, Gpio4);
529impl_pin!(P4_4, 4, 4, Gpio4);
530impl_pin!(P4_5, 4, 5, Gpio4);
531impl_pin!(P4_6, 4, 6, Gpio4);
532impl_pin!(P4_7, 4, 7, Gpio4);
533impl_pin!(P4_8, 4, 8, Gpio4);
534impl_pin!(P4_9, 4, 9, Gpio4);
535impl_pin!(P4_10, 4, 10, Gpio4);
536impl_pin!(P4_11, 4, 11, Gpio4);
537impl_pin!(P4_12, 4, 12, Gpio4);
538impl_pin!(P4_13, 4, 13, Gpio4);
539impl_pin!(P4_14, 4, 14, Gpio4);
540impl_pin!(P4_15, 4, 15, Gpio4);
541impl_pin!(P4_16, 4, 16, Gpio4);
542impl_pin!(P4_17, 4, 17, Gpio4);
543impl_pin!(P4_18, 4, 18, Gpio4);
544impl_pin!(P4_19, 4, 19, Gpio4);
545impl_pin!(P4_20, 4, 20, Gpio4);
546impl_pin!(P4_21, 4, 21, Gpio4);
547impl_pin!(P4_22, 4, 22, Gpio4);
548impl_pin!(P4_23, 4, 23, Gpio4);
549impl_pin!(P4_24, 4, 24, Gpio4);
550impl_pin!(P4_25, 4, 25, Gpio4);
551impl_pin!(P4_26, 4, 26, Gpio4);
552impl_pin!(P4_27, 4, 27, Gpio4);
553impl_pin!(P4_28, 4, 28, Gpio4);
554impl_pin!(P4_29, 4, 29, Gpio4);
555impl_pin!(P4_30, 4, 30, Gpio4);
556impl_pin!(P4_31, 4, 31, Gpio4);
557
558/// A flexible pin that can be configured as input or output.
559pub struct Flex<'d> {
560 pin: Peri<'d, AnyPin>,
561 _marker: PhantomData<&'d mut ()>,
562}
563
564impl<'d> Flex<'d> {
565 /// Wrap the pin in a `Flex`.
566 ///
567 /// The pin remains unmodified. The initial output level is unspecified, but
568 /// can be changed before the pin is put into output mode.
569 pub fn new(pin: Peri<'d, impl GpioPin>) -> Self {
570 pin.set_function(Mux::Mux0);
571 Self {
572 pin: pin.into(),
573 _marker: PhantomData,
574 }
575 }
576
577 #[inline]
578 fn gpio(&self) -> &'static crate::pac::gpio0::RegisterBlock {
579 self.pin.gpio()
580 }
581
582 #[inline]
583 fn mask(&self) -> u32 {
584 self.pin.mask()
585 }
586
587 /// Put the pin into input mode.
588 pub fn set_as_input(&mut self) {
589 let mask = self.mask();
590 let gpio = self.gpio();
591
592 self.set_enable_input_buffer();
593
594 gpio.pddr().modify(|r, w| unsafe { w.bits(r.bits() & !mask) });
595 }
596
597 /// Put the pin into output mode.
598 pub fn set_as_output(&mut self) {
599 let mask = self.mask();
600 let gpio = self.gpio();
601
602 self.set_pull(Pull::Disabled);
603
604 gpio.pddr().modify(|r, w| unsafe { w.bits(r.bits() | mask) });
605 }
606
607 /// Set output level to High.
608 #[inline]
609 pub fn set_high(&mut self) {
610 self.gpio().psor().write(|w| unsafe { w.bits(self.mask()) });
611 }
612
613 /// Set output level to Low.
614 #[inline]
615 pub fn set_low(&mut self) {
616 self.gpio().pcor().write(|w| unsafe { w.bits(self.mask()) });
617 }
618
619 /// Set output level to the given `Level`.
620 #[inline]
621 pub fn set_level(&mut self, level: Level) {
622 match level {
623 Level::High => self.set_high(),
624 Level::Low => self.set_low(),
625 }
626 }
627
628 /// Toggle output level.
629 #[inline]
630 pub fn toggle(&mut self) {
631 self.gpio().ptor().write(|w| unsafe { w.bits(self.mask()) });
632 }
633
634 /// Get whether the pin input level is high.
635 #[inline]
636 pub fn is_high(&self) -> bool {
637 (self.gpio().pdir().read().bits() & self.mask()) != 0
638 }
639
640 /// Get whether the pin input level is low.
641 #[inline]
642 pub fn is_low(&self) -> bool {
643 !self.is_high()
644 }
645
646 /// Is the output pin set as high?
647 #[inline]
648 pub fn is_set_high(&self) -> bool {
649 self.is_high()
650 }
651
652 /// Is the output pin set as low?
653 #[inline]
654 pub fn is_set_low(&self) -> bool {
655 !self.is_set_high()
656 }
657
658 /// Configure the pin pull up/down level.
659 pub fn set_pull(&mut self, pull_select: Pull) {
660 self.pin.set_pull(pull_select);
661 }
662
663 /// Configure the pin drive strength.
664 pub fn set_drive_strength(&mut self, strength: DriveStrength) {
665 self.pin.set_drive_strength(strength.into());
666 }
667
668 /// Configure the pin slew rate.
669 pub fn set_slew_rate(&mut self, slew_rate: SlewRate) {
670 self.pin.set_slew_rate(slew_rate.into());
671 }
672
673 /// Enable input buffer for the pin.
674 pub fn set_enable_input_buffer(&mut self) {
675 self.pin.set_enable_input_buffer();
676 }
677
678 /// Get pin level.
679 pub fn get_level(&self) -> Level {
680 self.is_high().into()
681 }
682}
683
684/// Async methods
685impl<'d> Flex<'d> {
686 /// Helper function that waits for a given interrupt trigger
687 async fn wait_for_inner(&mut self, level: crate::pac::gpio0::icr::Irqc) {
688 // First, ensure that we have a waker that is ready for this port+pin
689 let w = PORT_WAIT_MAPS[self.pin.port].wait(self.pin.pin);
690 let mut w = pin!(w);
691 // Wait for the subscription to occur, which requires polling at least once
692 //
693 // This function returns a result, but can only be an Err if:
694 //
695 // * We call `.close()` on a WaitMap, which we never do
696 // * We have a duplicate key, which can't happen because `wait_for_*` methods
697 // take an &mut ref of their unique port+pin combo
698 //
699 // So we wait for it to complete, but ignore the result.
700 _ = w.as_mut().subscribe().await;
701
702 // Now that our waker is in the map, we can enable the appropriate interrupt
703 //
704 // Clear any existing pending interrupt on this pin
705 self.pin
706 .gpio()
707 .isfr0()
708 .write(|w| unsafe { w.bits(1 << self.pin.pin()) });
709 self.pin.gpio().icr(self.pin.pin()).write(|w| w.isf().isf1());
710
711 // Pin interrupt configuration
712 self.pin
713 .gpio()
714 .icr(self.pin.pin())
715 .modify(|_, w| w.irqc().variant(level));
716
717 // Finally, we can await the matching call to `.wake()` from the interrupt.
718 //
719 // Again, technically, this could return a result, but for the same reasons
720 // as above, this can't be an error in our case, so just wait for it to complete
721 _ = w.await;
722 }
723
724 /// Wait until the pin is high. If it is already high, return immediately.
725 #[inline]
726 pub fn wait_for_high(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
727 self.wait_for_inner(crate::pac::gpio0::icr::Irqc::Irqc12)
728 }
729
730 /// Wait until the pin is low. If it is already low, return immediately.
731 #[inline]
732 pub fn wait_for_low(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
733 self.wait_for_inner(crate::pac::gpio0::icr::Irqc::Irqc8)
734 }
735
736 /// Wait for the pin to undergo a transition from low to high.
737 #[inline]
738 pub fn wait_for_rising_edge(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
739 self.wait_for_inner(crate::pac::gpio0::icr::Irqc::Irqc9)
740 }
741
742 /// Wait for the pin to undergo a transition from high to low.
743 #[inline]
744 pub fn wait_for_falling_edge(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
745 self.wait_for_inner(crate::pac::gpio0::icr::Irqc::Irqc10)
746 }
747
748 /// Wait for the pin to undergo any transition, i.e low to high OR high to low.
749 #[inline]
750 pub fn wait_for_any_edge(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
751 self.wait_for_inner(crate::pac::gpio0::icr::Irqc::Irqc11)
752 }
753}
754
755/// GPIO output driver that owns a `Flex` pin.
756pub struct Output<'d> {
757 flex: Flex<'d>,
758}
759
760impl<'d> Output<'d> {
761 /// Create a GPIO output driver for a [GpioPin] with the provided [Level].
762 pub fn new(pin: Peri<'d, impl GpioPin>, initial: Level, strength: DriveStrength, slew_rate: SlewRate) -> Self {
763 let mut flex = Flex::new(pin);
764 flex.set_level(initial);
765 flex.set_as_output();
766 flex.set_drive_strength(strength);
767 flex.set_slew_rate(slew_rate);
768 Self { flex }
769 }
770
771 /// Set the output as high.
772 #[inline]
773 pub fn set_high(&mut self) {
774 self.flex.set_high();
775 }
776
777 /// Set the output as low.
778 #[inline]
779 pub fn set_low(&mut self) {
780 self.flex.set_low();
781 }
782
783 /// Set the output level.
784 #[inline]
785 pub fn set_level(&mut self, level: Level) {
786 self.flex.set_level(level);
787 }
788
789 /// Toggle the output level.
790 #[inline]
791 pub fn toggle(&mut self) {
792 self.flex.toggle();
793 }
794
795 /// Is the output pin set as high?
796 #[inline]
797 pub fn is_set_high(&self) -> bool {
798 self.flex.is_high()
799 }
800
801 /// Is the output pin set as low?
802 #[inline]
803 pub fn is_set_low(&self) -> bool {
804 !self.is_set_high()
805 }
806
807 /// Expose the inner `Flex` if callers need to reconfigure the pin.
808 #[inline]
809 pub fn into_flex(self) -> Flex<'d> {
810 self.flex
811 }
812}
813
814/// GPIO input driver that owns a `Flex` pin.
815pub struct Input<'d> {
816 flex: Flex<'d>,
817}
818
819impl<'d> Input<'d> {
820 /// Create a GPIO input driver for a [GpioPin].
821 ///
822 pub fn new(pin: Peri<'d, impl GpioPin>, pull_select: Pull) -> Self {
823 let mut flex = Flex::new(pin);
824 flex.set_as_input();
825 flex.set_pull(pull_select);
826 Self { flex }
827 }
828
829 /// Get whether the pin input level is high.
830 #[inline]
831 pub fn is_high(&self) -> bool {
832 self.flex.is_high()
833 }
834
835 /// Get whether the pin input level is low.
836 #[inline]
837 pub fn is_low(&self) -> bool {
838 self.flex.is_low()
839 }
840
841 /// Expose the inner `Flex` if callers need to reconfigure the pin.
842 ///
843 /// Since Drive Strength and Slew Rate are not set when creating the Input
844 /// pin, they need to be set when converting
845 #[inline]
846 pub fn into_flex(mut self, strength: DriveStrength, slew_rate: SlewRate) -> Flex<'d> {
847 self.flex.set_drive_strength(strength);
848 self.flex.set_slew_rate(slew_rate);
849 self.flex
850 }
851
852 /// Get the pin level.
853 pub fn get_level(&self) -> Level {
854 self.flex.get_level()
855 }
856}
857
858/// Async methods
859impl<'d> Input<'d> {
860 /// Wait until the pin is high. If it is already high, return immediately.
861 #[inline]
862 pub fn wait_for_high(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
863 self.flex.wait_for_high()
864 }
865
866 /// Wait until the pin is low. If it is already low, return immediately.
867 #[inline]
868 pub fn wait_for_low(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
869 self.flex.wait_for_low()
870 }
871
872 /// Wait for the pin to undergo a transition from low to high.
873 #[inline]
874 pub fn wait_for_rising_edge(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
875 self.flex.wait_for_rising_edge()
876 }
877
878 /// Wait for the pin to undergo a transition from high to low.
879 #[inline]
880 pub fn wait_for_falling_edge(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
881 self.flex.wait_for_falling_edge()
882 }
883
884 /// Wait for the pin to undergo any transition, i.e low to high OR high to low.
885 #[inline]
886 pub fn wait_for_any_edge(&mut self) -> impl Future<Output = ()> + use<'_, 'd> {
887 self.flex.wait_for_any_edge()
888 }
889}
890
891impl embedded_hal_async::digital::Wait for Input<'_> {
892 async fn wait_for_high(&mut self) -> Result<(), Self::Error> {
893 self.wait_for_high().await;
894 Ok(())
895 }
896
897 async fn wait_for_low(&mut self) -> Result<(), Self::Error> {
898 self.wait_for_low().await;
899 Ok(())
900 }
901
902 async fn wait_for_rising_edge(&mut self) -> Result<(), Self::Error> {
903 self.wait_for_rising_edge().await;
904 Ok(())
905 }
906
907 async fn wait_for_falling_edge(&mut self) -> Result<(), Self::Error> {
908 self.wait_for_falling_edge().await;
909 Ok(())
910 }
911
912 async fn wait_for_any_edge(&mut self) -> Result<(), Self::Error> {
913 self.wait_for_any_edge().await;
914 Ok(())
915 }
916}
917
918impl embedded_hal_async::digital::Wait for Flex<'_> {
919 async fn wait_for_high(&mut self) -> Result<(), Self::Error> {
920 self.wait_for_high().await;
921 Ok(())
922 }
923
924 async fn wait_for_low(&mut self) -> Result<(), Self::Error> {
925 self.wait_for_low().await;
926 Ok(())
927 }
928
929 async fn wait_for_rising_edge(&mut self) -> Result<(), Self::Error> {
930 self.wait_for_rising_edge().await;
931 Ok(())
932 }
933
934 async fn wait_for_falling_edge(&mut self) -> Result<(), Self::Error> {
935 self.wait_for_falling_edge().await;
936 Ok(())
937 }
938
939 async fn wait_for_any_edge(&mut self) -> Result<(), Self::Error> {
940 self.wait_for_any_edge().await;
941 Ok(())
942 }
943}
944
945// Both embedded_hal 0.2 and 1.0 must be supported by embassy HALs.
946impl embedded_hal_02::digital::v2::InputPin for Flex<'_> {
947 // GPIO operations on this block cannot fail, therefor we set the error type
948 // to Infallible to guarantee that we can only produce Ok variants.
949 type Error = Infallible;
950
951 #[inline]
952 fn is_high(&self) -> Result<bool, Self::Error> {
953 Ok(self.is_high())
954 }
955
956 #[inline]
957 fn is_low(&self) -> Result<bool, Self::Error> {
958 Ok(self.is_low())
959 }
960}
961
962impl embedded_hal_02::digital::v2::InputPin for Input<'_> {
963 type Error = Infallible;
964
965 #[inline]
966 fn is_high(&self) -> Result<bool, Self::Error> {
967 Ok(self.is_high())
968 }
969
970 #[inline]
971 fn is_low(&self) -> Result<bool, Self::Error> {
972 Ok(self.is_low())
973 }
974}
975
976impl embedded_hal_02::digital::v2::OutputPin for Flex<'_> {
977 type Error = Infallible;
978
979 #[inline]
980 fn set_high(&mut self) -> Result<(), Self::Error> {
981 self.set_high();
982 Ok(())
983 }
984
985 #[inline]
986 fn set_low(&mut self) -> Result<(), Self::Error> {
987 self.set_low();
988 Ok(())
989 }
990}
991
992impl embedded_hal_02::digital::v2::StatefulOutputPin for Flex<'_> {
993 #[inline]
994 fn is_set_high(&self) -> Result<bool, Self::Error> {
995 Ok(self.is_set_high())
996 }
997
998 #[inline]
999 fn is_set_low(&self) -> Result<bool, Self::Error> {
1000 Ok(self.is_set_low())
1001 }
1002}
1003
1004impl embedded_hal_02::digital::v2::ToggleableOutputPin for Flex<'_> {
1005 type Error = Infallible;
1006
1007 #[inline]
1008 fn toggle(&mut self) -> Result<(), Self::Error> {
1009 self.toggle();
1010 Ok(())
1011 }
1012}
1013
1014impl embedded_hal_1::digital::ErrorType for Flex<'_> {
1015 type Error = Infallible;
1016}
1017
1018impl embedded_hal_1::digital::ErrorType for Input<'_> {
1019 type Error = Infallible;
1020}
1021
1022impl embedded_hal_1::digital::ErrorType for Output<'_> {
1023 type Error = Infallible;
1024}
1025
1026impl embedded_hal_1::digital::InputPin for Input<'_> {
1027 #[inline]
1028 fn is_high(&mut self) -> Result<bool, Self::Error> {
1029 Ok((*self).is_high())
1030 }
1031
1032 #[inline]
1033 fn is_low(&mut self) -> Result<bool, Self::Error> {
1034 Ok((*self).is_low())
1035 }
1036}
1037
1038impl embedded_hal_1::digital::OutputPin for Flex<'_> {
1039 #[inline]
1040 fn set_high(&mut self) -> Result<(), Self::Error> {
1041 self.set_high();
1042 Ok(())
1043 }
1044
1045 #[inline]
1046 fn set_low(&mut self) -> Result<(), Self::Error> {
1047 self.set_low();
1048 Ok(())
1049 }
1050}
1051
1052impl embedded_hal_1::digital::StatefulOutputPin for Flex<'_> {
1053 #[inline]
1054 fn is_set_high(&mut self) -> Result<bool, Self::Error> {
1055 Ok((*self).is_set_high())
1056 }
1057
1058 #[inline]
1059 fn is_set_low(&mut self) -> Result<bool, Self::Error> {
1060 Ok((*self).is_set_low())
1061 }
1062}
diff --git a/embassy-mcxa/src/i2c/controller.rs b/embassy-mcxa/src/i2c/controller.rs
new file mode 100644
index 000000000..41bbc821d
--- /dev/null
+++ b/embassy-mcxa/src/i2c/controller.rs
@@ -0,0 +1,455 @@
1//! LPI2C controller driver
2
3use core::marker::PhantomData;
4
5use embassy_hal_internal::Peri;
6use mcxa_pac::lpi2c0::mtdr::Cmd;
7
8use super::{Blocking, Error, Instance, Mode, Result, SclPin, SdaPin};
9use crate::clocks::periph_helpers::{Div4, Lpi2cClockSel, Lpi2cConfig};
10use crate::clocks::{enable_and_reset, PoweredClock};
11use crate::AnyPin;
12
13/// Bus speed (nominal SCL, no clock stretching)
14#[derive(Clone, Copy, Default, PartialEq)]
15#[cfg_attr(feature = "defmt", derive(defmt::Format))]
16pub enum Speed {
17 #[default]
18 /// 100 kbit/sec
19 Standard,
20 /// 400 kbit/sec
21 Fast,
22 /// 1 Mbit/sec
23 FastPlus,
24 /// 3.4 Mbit/sec
25 UltraFast,
26}
27
28impl From<Speed> for (u8, u8, u8, u8) {
29 fn from(value: Speed) -> (u8, u8, u8, u8) {
30 match value {
31 Speed::Standard => (0x3d, 0x37, 0x3b, 0x1d),
32 Speed::Fast => (0x0e, 0x0c, 0x0d, 0x06),
33 Speed::FastPlus => (0x04, 0x03, 0x03, 0x02),
34
35 // UltraFast is "special". Leaving it unimplemented until
36 // the driver and the clock API is further stabilized.
37 Speed::UltraFast => todo!(),
38 }
39 }
40}
41
42#[derive(Debug, Clone, Copy, PartialEq, Eq)]
43#[cfg_attr(feature = "defmt", derive(defmt::Format))]
44enum SendStop {
45 No,
46 Yes,
47}
48
49/// I2C controller configuration
50#[derive(Clone, Copy, Default)]
51#[non_exhaustive]
52pub struct Config {
53 /// Bus speed
54 pub speed: Speed,
55}
56
57/// I2C Controller Driver.
58pub struct I2c<'d, T: Instance, M: Mode> {
59 _peri: Peri<'d, T>,
60 _scl: Peri<'d, AnyPin>,
61 _sda: Peri<'d, AnyPin>,
62 _phantom: PhantomData<M>,
63 is_hs: bool,
64}
65
66impl<'d, T: Instance> I2c<'d, T, Blocking> {
67 /// Create a new blocking instance of the I2C Controller bus driver.
68 pub fn new_blocking(
69 peri: Peri<'d, T>,
70 scl: Peri<'d, impl SclPin<T>>,
71 sda: Peri<'d, impl SdaPin<T>>,
72 config: Config,
73 ) -> Result<Self> {
74 Self::new_inner(peri, scl, sda, config)
75 }
76}
77
78impl<'d, T: Instance, M: Mode> I2c<'d, T, M> {
79 fn new_inner(
80 _peri: Peri<'d, T>,
81 scl: Peri<'d, impl SclPin<T>>,
82 sda: Peri<'d, impl SdaPin<T>>,
83 config: Config,
84 ) -> Result<Self> {
85 let (power, source, div) = Self::clock_config(config.speed);
86
87 // Enable clocks
88 let conf = Lpi2cConfig {
89 power,
90 source,
91 div,
92 instance: T::CLOCK_INSTANCE,
93 };
94
95 _ = unsafe { enable_and_reset::<T>(&conf).map_err(Error::ClockSetup)? };
96
97 scl.mux();
98 sda.mux();
99
100 let _scl = scl.into();
101 let _sda = sda.into();
102
103 Self::set_config(&config)?;
104
105 Ok(Self {
106 _peri,
107 _scl,
108 _sda,
109 _phantom: PhantomData,
110 is_hs: config.speed == Speed::UltraFast,
111 })
112 }
113
114 fn set_config(config: &Config) -> Result<()> {
115 // Disable the controller.
116 critical_section::with(|_| T::regs().mcr().modify(|_, w| w.men().disabled()));
117
118 // Soft-reset the controller, read and write FIFOs.
119 critical_section::with(|_| {
120 T::regs()
121 .mcr()
122 .modify(|_, w| w.rst().reset().rtf().reset().rrf().reset());
123 // According to Reference Manual section 40.7.1.4, "There
124 // is no minimum delay required before clearing the
125 // software reset", therefore we clear it immediately.
126 T::regs().mcr().modify(|_, w| w.rst().not_reset());
127
128 T::regs().mcr().modify(|_, w| w.dozen().clear_bit().dbgen().clear_bit());
129 });
130
131 let (clklo, clkhi, sethold, datavd) = config.speed.into();
132
133 critical_section::with(|_| {
134 T::regs().mccr0().modify(|_, w| unsafe {
135 w.clklo()
136 .bits(clklo)
137 .clkhi()
138 .bits(clkhi)
139 .sethold()
140 .bits(sethold)
141 .datavd()
142 .bits(datavd)
143 })
144 });
145
146 // Enable the controller.
147 critical_section::with(|_| T::regs().mcr().modify(|_, w| w.men().enabled()));
148
149 // Clear all flags
150 T::regs().msr().write(|w| {
151 w.epf()
152 .clear_bit_by_one()
153 .sdf()
154 .clear_bit_by_one()
155 .ndf()
156 .clear_bit_by_one()
157 .alf()
158 .clear_bit_by_one()
159 .fef()
160 .clear_bit_by_one()
161 .pltf()
162 .clear_bit_by_one()
163 .dmf()
164 .clear_bit_by_one()
165 .stf()
166 .clear_bit_by_one()
167 });
168
169 Ok(())
170 }
171
172 // REVISIT: turn this into a function of the speed parameter
173 fn clock_config(speed: Speed) -> (PoweredClock, Lpi2cClockSel, Div4) {
174 match speed {
175 Speed::Standard | Speed::Fast | Speed::FastPlus => (
176 PoweredClock::NormalEnabledDeepSleepDisabled,
177 Lpi2cClockSel::FroLfDiv,
178 const { Div4::no_div() },
179 ),
180 Speed::UltraFast => (
181 PoweredClock::NormalEnabledDeepSleepDisabled,
182 Lpi2cClockSel::FroHfDiv,
183 const { Div4::no_div() },
184 ),
185 }
186 }
187
188 fn is_tx_fifo_full(&mut self) -> bool {
189 let txfifo_size = 1 << T::regs().param().read().mtxfifo().bits();
190 T::regs().mfsr().read().txcount().bits() == txfifo_size
191 }
192
193 fn is_tx_fifo_empty(&mut self) -> bool {
194 T::regs().mfsr().read().txcount() == 0
195 }
196
197 fn is_rx_fifo_empty(&mut self) -> bool {
198 T::regs().mfsr().read().rxcount() == 0
199 }
200
201 fn status(&mut self) -> Result<()> {
202 // Wait for TxFIFO to be drained
203 while !self.is_tx_fifo_empty() {}
204
205 let msr = T::regs().msr().read();
206 T::regs().msr().write(|w| {
207 w.epf()
208 .clear_bit_by_one()
209 .sdf()
210 .clear_bit_by_one()
211 .ndf()
212 .clear_bit_by_one()
213 .alf()
214 .clear_bit_by_one()
215 .fef()
216 .clear_bit_by_one()
217 .fef()
218 .clear_bit_by_one()
219 .pltf()
220 .clear_bit_by_one()
221 .dmf()
222 .clear_bit_by_one()
223 .stf()
224 .clear_bit_by_one()
225 });
226
227 if msr.ndf().bit_is_set() {
228 Err(Error::AddressNack)
229 } else if msr.alf().bit_is_set() {
230 Err(Error::ArbitrationLoss)
231 } else {
232 Ok(())
233 }
234 }
235
236 fn send_cmd(&mut self, cmd: Cmd, data: u8) {
237 #[cfg(feature = "defmt")]
238 defmt::trace!(
239 "Sending cmd '{}' ({}) with data '{:08x}' MSR: {:08x}",
240 cmd,
241 cmd as u8,
242 data,
243 T::regs().msr().read().bits()
244 );
245
246 T::regs()
247 .mtdr()
248 .write(|w| unsafe { w.data().bits(data) }.cmd().variant(cmd));
249 }
250
251 fn start(&mut self, address: u8, read: bool) -> Result<()> {
252 if address >= 0x80 {
253 return Err(Error::AddressOutOfRange(address));
254 }
255
256 // Wait until we have space in the TxFIFO
257 while self.is_tx_fifo_full() {}
258
259 let addr_rw = address << 1 | if read { 1 } else { 0 };
260 self.send_cmd(if self.is_hs { Cmd::StartHs } else { Cmd::Start }, addr_rw);
261
262 // Check controller status
263 self.status()
264 }
265
266 fn stop(&mut self) -> Result<()> {
267 // Wait until we have space in the TxFIFO
268 while self.is_tx_fifo_full() {}
269
270 self.send_cmd(Cmd::Stop, 0);
271 self.status()
272 }
273
274 fn blocking_read_internal(&mut self, address: u8, read: &mut [u8], send_stop: SendStop) -> Result<()> {
275 self.start(address, true)?;
276
277 if read.is_empty() {
278 return Err(Error::InvalidReadBufferLength);
279 }
280
281 for chunk in read.chunks_mut(256) {
282 // Wait until we have space in the TxFIFO
283 while self.is_tx_fifo_full() {}
284
285 self.send_cmd(Cmd::Receive, (chunk.len() - 1) as u8);
286
287 for byte in chunk.iter_mut() {
288 // Wait until there's data in the RxFIFO
289 while self.is_rx_fifo_empty() {}
290
291 *byte = T::regs().mrdr().read().data().bits();
292 }
293
294 if send_stop == SendStop::Yes {
295 self.stop()?;
296 }
297 }
298
299 Ok(())
300 }
301
302 fn blocking_write_internal(&mut self, address: u8, write: &[u8], send_stop: SendStop) -> Result<()> {
303 self.start(address, false)?;
304
305 // Usually, embassy HALs error out with an empty write,
306 // however empty writes are useful for writing I2C scanning
307 // logic through write probing. That is, we send a start with
308 // R/w bit cleared, but instead of writing any data, just send
309 // the stop onto the bus. This has the effect of checking if
310 // the resulting address got an ACK but causing no
311 // side-effects to the device on the other end.
312 //
313 // Because of this, we are not going to error out in case of
314 // empty writes.
315 #[cfg(feature = "defmt")]
316 if write.is_empty() {
317 defmt::trace!("Empty write, write probing?");
318 }
319
320 for byte in write {
321 // Wait until we have space in the TxFIFO
322 while self.is_tx_fifo_full() {}
323
324 self.send_cmd(Cmd::Transmit, *byte);
325 }
326
327 if send_stop == SendStop::Yes {
328 self.stop()?;
329 }
330
331 Ok(())
332 }
333
334 // Public API: Blocking
335
336 /// Read from address into buffer blocking caller until done.
337 pub fn blocking_read(&mut self, address: u8, read: &mut [u8]) -> Result<()> {
338 self.blocking_read_internal(address, read, SendStop::Yes)
339 // Automatic Stop
340 }
341
342 /// Write to address from buffer blocking caller until done.
343 pub fn blocking_write(&mut self, address: u8, write: &[u8]) -> Result<()> {
344 self.blocking_write_internal(address, write, SendStop::Yes)
345 }
346
347 /// Write to address from bytes and read from address into buffer blocking caller until done.
348 pub fn blocking_write_read(&mut self, address: u8, write: &[u8], read: &mut [u8]) -> Result<()> {
349 self.blocking_write_internal(address, write, SendStop::No)?;
350 self.blocking_read_internal(address, read, SendStop::Yes)
351 }
352}
353
354impl<'d, T: Instance, M: Mode> embedded_hal_02::blocking::i2c::Read for I2c<'d, T, M> {
355 type Error = Error;
356
357 fn read(&mut self, address: u8, buffer: &mut [u8]) -> Result<()> {
358 self.blocking_read(address, buffer)
359 }
360}
361
362impl<'d, T: Instance, M: Mode> embedded_hal_02::blocking::i2c::Write for I2c<'d, T, M> {
363 type Error = Error;
364
365 fn write(&mut self, address: u8, bytes: &[u8]) -> Result<()> {
366 self.blocking_write(address, bytes)
367 }
368}
369
370impl<'d, T: Instance, M: Mode> embedded_hal_02::blocking::i2c::WriteRead for I2c<'d, T, M> {
371 type Error = Error;
372
373 fn write_read(&mut self, address: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<()> {
374 self.blocking_write_read(address, bytes, buffer)
375 }
376}
377
378impl<'d, T: Instance, M: Mode> embedded_hal_02::blocking::i2c::Transactional for I2c<'d, T, M> {
379 type Error = Error;
380
381 fn exec(&mut self, address: u8, operations: &mut [embedded_hal_02::blocking::i2c::Operation<'_>]) -> Result<()> {
382 if let Some((last, rest)) = operations.split_last_mut() {
383 for op in rest {
384 match op {
385 embedded_hal_02::blocking::i2c::Operation::Read(buf) => {
386 self.blocking_read_internal(address, buf, SendStop::No)?
387 }
388 embedded_hal_02::blocking::i2c::Operation::Write(buf) => {
389 self.blocking_write_internal(address, buf, SendStop::No)?
390 }
391 }
392 }
393
394 match last {
395 embedded_hal_02::blocking::i2c::Operation::Read(buf) => {
396 self.blocking_read_internal(address, buf, SendStop::Yes)
397 }
398 embedded_hal_02::blocking::i2c::Operation::Write(buf) => {
399 self.blocking_write_internal(address, buf, SendStop::Yes)
400 }
401 }
402 } else {
403 Ok(())
404 }
405 }
406}
407
408impl embedded_hal_1::i2c::Error for Error {
409 fn kind(&self) -> embedded_hal_1::i2c::ErrorKind {
410 match *self {
411 Self::ArbitrationLoss => embedded_hal_1::i2c::ErrorKind::ArbitrationLoss,
412 Self::AddressNack => {
413 embedded_hal_1::i2c::ErrorKind::NoAcknowledge(embedded_hal_1::i2c::NoAcknowledgeSource::Address)
414 }
415 _ => embedded_hal_1::i2c::ErrorKind::Other,
416 }
417 }
418}
419
420impl<'d, T: Instance, M: Mode> embedded_hal_1::i2c::ErrorType for I2c<'d, T, M> {
421 type Error = Error;
422}
423
424impl<'d, T: Instance, M: Mode> embedded_hal_1::i2c::I2c for I2c<'d, T, M> {
425 fn transaction(&mut self, address: u8, operations: &mut [embedded_hal_1::i2c::Operation<'_>]) -> Result<()> {
426 if let Some((last, rest)) = operations.split_last_mut() {
427 for op in rest {
428 match op {
429 embedded_hal_1::i2c::Operation::Read(buf) => {
430 self.blocking_read_internal(address, buf, SendStop::No)?
431 }
432 embedded_hal_1::i2c::Operation::Write(buf) => {
433 self.blocking_write_internal(address, buf, SendStop::No)?
434 }
435 }
436 }
437
438 match last {
439 embedded_hal_1::i2c::Operation::Read(buf) => self.blocking_read_internal(address, buf, SendStop::Yes),
440 embedded_hal_1::i2c::Operation::Write(buf) => self.blocking_write_internal(address, buf, SendStop::Yes),
441 }
442 } else {
443 Ok(())
444 }
445 }
446}
447
448impl<'d, T: Instance, M: Mode> embassy_embedded_hal::SetConfig for I2c<'d, T, M> {
449 type Config = Config;
450 type ConfigError = Error;
451
452 fn set_config(&mut self, config: &Self::Config) -> Result<()> {
453 Self::set_config(config)
454 }
455}
diff --git a/embassy-mcxa/src/i2c/mod.rs b/embassy-mcxa/src/i2c/mod.rs
new file mode 100644
index 000000000..a1f842029
--- /dev/null
+++ b/embassy-mcxa/src/i2c/mod.rs
@@ -0,0 +1,171 @@
1//! I2C Support
2
3use core::marker::PhantomData;
4
5use embassy_hal_internal::PeripheralType;
6use embassy_sync::waitqueue::AtomicWaker;
7use paste::paste;
8
9use crate::clocks::periph_helpers::Lpi2cConfig;
10use crate::clocks::{ClockError, Gate};
11use crate::gpio::{GpioPin, SealedPin};
12use crate::{interrupt, pac};
13
14/// Shorthand for `Result<T>`.
15pub type Result<T> = core::result::Result<T, Error>;
16
17pub mod controller;
18
19/// Error information type
20#[derive(Debug, Copy, Clone, PartialEq, Eq)]
21#[cfg_attr(feature = "defmt", derive(defmt::Format))]
22pub enum Error {
23 /// Clock configuration error.
24 ClockSetup(ClockError),
25 /// Reading for I2C failed.
26 ReadFail,
27 /// Writing to I2C failed.
28 WriteFail,
29 /// I2C address NAK condition.
30 AddressNack,
31 /// Bus level arbitration loss.
32 ArbitrationLoss,
33 /// Address out of range.
34 AddressOutOfRange(u8),
35 /// Invalid write buffer length.
36 InvalidWriteBufferLength,
37 /// Invalid read buffer length.
38 InvalidReadBufferLength,
39 /// Other internal errors or unexpected state.
40 Other,
41}
42
43/// I2C interrupt handler.
44pub struct InterruptHandler<T: Instance> {
45 _phantom: PhantomData<T>,
46}
47
48impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
49 unsafe fn on_interrupt() {
50 let waker = T::waker();
51
52 waker.wake();
53
54 todo!()
55 }
56}
57
58mod sealed {
59 /// Seal a trait
60 pub trait Sealed {}
61}
62
63impl<T: GpioPin> sealed::Sealed for T {}
64
65trait SealedInstance {
66 fn regs() -> &'static pac::lpi2c0::RegisterBlock;
67 fn waker() -> &'static AtomicWaker;
68}
69
70/// I2C Instance
71#[allow(private_bounds)]
72pub trait Instance: SealedInstance + PeripheralType + 'static + Send + Gate<MrccPeriphConfig = Lpi2cConfig> {
73 /// Interrupt for this I2C instance.
74 type Interrupt: interrupt::typelevel::Interrupt;
75 /// Clock instance
76 const CLOCK_INSTANCE: crate::clocks::periph_helpers::Lpi2cInstance;
77}
78
79macro_rules! impl_instance {
80 ($($n:expr),*) => {
81 $(
82 paste!{
83 impl SealedInstance for crate::peripherals::[<LPI2C $n>] {
84 fn regs() -> &'static pac::lpi2c0::RegisterBlock {
85 unsafe { &*pac::[<Lpi2c $n>]::ptr() }
86 }
87
88 fn waker() -> &'static AtomicWaker {
89 static WAKER: AtomicWaker = AtomicWaker::new();
90 &WAKER
91 }
92 }
93
94 impl Instance for crate::peripherals::[<LPI2C $n>] {
95 type Interrupt = crate::interrupt::typelevel::[<LPI2C $n>];
96 const CLOCK_INSTANCE: crate::clocks::periph_helpers::Lpi2cInstance
97 = crate::clocks::periph_helpers::Lpi2cInstance::[<Lpi2c $n>];
98 }
99 }
100 )*
101 };
102}
103
104impl_instance!(0, 1, 2, 3);
105
106/// SCL pin trait.
107pub trait SclPin<Instance>: GpioPin + sealed::Sealed + PeripheralType {
108 fn mux(&self);
109}
110
111/// SDA pin trait.
112pub trait SdaPin<Instance>: GpioPin + sealed::Sealed + PeripheralType {
113 fn mux(&self);
114}
115
116/// Driver mode.
117#[allow(private_bounds)]
118pub trait Mode: sealed::Sealed {}
119
120/// Blocking mode.
121pub struct Blocking;
122impl sealed::Sealed for Blocking {}
123impl Mode for Blocking {}
124
125/// Async mode.
126pub struct Async;
127impl sealed::Sealed for Async {}
128impl Mode for Async {}
129
130macro_rules! impl_pin {
131 ($pin:ident, $peri:ident, $fn:ident, $trait:ident) => {
132 impl $trait<crate::peripherals::$peri> for crate::peripherals::$pin {
133 fn mux(&self) {
134 self.set_pull(crate::gpio::Pull::Disabled);
135 self.set_slew_rate(crate::gpio::SlewRate::Fast.into());
136 self.set_drive_strength(crate::gpio::DriveStrength::Double.into());
137 self.set_function(crate::pac::port0::pcr0::Mux::$fn);
138 self.set_enable_input_buffer();
139 }
140 }
141 };
142}
143
144impl_pin!(P0_16, LPI2C0, Mux2, SdaPin);
145impl_pin!(P0_17, LPI2C0, Mux2, SclPin);
146impl_pin!(P0_18, LPI2C0, Mux2, SclPin);
147impl_pin!(P0_19, LPI2C0, Mux2, SdaPin);
148impl_pin!(P1_0, LPI2C1, Mux3, SdaPin);
149impl_pin!(P1_1, LPI2C1, Mux3, SclPin);
150impl_pin!(P1_2, LPI2C1, Mux3, SdaPin);
151impl_pin!(P1_3, LPI2C1, Mux3, SclPin);
152impl_pin!(P1_8, LPI2C2, Mux3, SdaPin);
153impl_pin!(P1_9, LPI2C2, Mux3, SclPin);
154impl_pin!(P1_10, LPI2C2, Mux3, SdaPin);
155impl_pin!(P1_11, LPI2C2, Mux3, SclPin);
156impl_pin!(P1_12, LPI2C1, Mux2, SdaPin);
157impl_pin!(P1_13, LPI2C1, Mux2, SclPin);
158impl_pin!(P1_14, LPI2C1, Mux2, SclPin);
159impl_pin!(P1_15, LPI2C1, Mux2, SdaPin);
160impl_pin!(P1_30, LPI2C0, Mux3, SdaPin);
161impl_pin!(P1_31, LPI2C0, Mux3, SclPin);
162impl_pin!(P3_27, LPI2C3, Mux2, SclPin);
163impl_pin!(P3_28, LPI2C3, Mux2, SdaPin);
164// impl_pin!(P3_29, LPI2C3, Mux2, HreqPin); What is this HREQ pin?
165impl_pin!(P3_30, LPI2C3, Mux2, SclPin);
166impl_pin!(P3_31, LPI2C3, Mux2, SdaPin);
167impl_pin!(P4_2, LPI2C2, Mux2, SdaPin);
168impl_pin!(P4_3, LPI2C0, Mux2, SclPin);
169impl_pin!(P4_4, LPI2C2, Mux2, SdaPin);
170impl_pin!(P4_5, LPI2C0, Mux2, SclPin);
171// impl_pin!(P4_6, LPI2C0, Mux2, HreqPin); What is this HREQ pin?
diff --git a/embassy-mcxa/src/interrupt.rs b/embassy-mcxa/src/interrupt.rs
new file mode 100644
index 000000000..000b2f9cd
--- /dev/null
+++ b/embassy-mcxa/src/interrupt.rs
@@ -0,0 +1,546 @@
1//! Minimal interrupt helpers mirroring embassy-imxrt style for OS_EVENT and LPUART2.
2//! Type-level interrupt traits and bindings are provided by the
3//! `embassy_hal_internal::interrupt_mod!` macro via the generated module below.
4
5// TODO(AJM): As of 2025-11-13, we need to do a pass to ensure safety docs
6// are complete prior to release.
7#![allow(clippy::missing_safety_doc)]
8
9mod generated {
10 embassy_hal_internal::interrupt_mod!(
11 OS_EVENT, RTC, ADC1, GPIO0, GPIO1, GPIO2, GPIO3, GPIO4, LPI2C0, LPI2C1, LPI2C2, LPI2C3, LPUART0, LPUART1,
12 LPUART2, LPUART3, LPUART4, LPUART5, DMA_CH0, DMA_CH1, DMA_CH2, DMA_CH3, DMA_CH4, DMA_CH5, DMA_CH6, DMA_CH7,
13 );
14}
15
16use core::sync::atomic::{AtomicU16, AtomicU32, Ordering};
17
18pub use generated::interrupt::{typelevel, Priority};
19
20use crate::pac::Interrupt;
21
22/// Trait for configuring and controlling interrupts.
23///
24/// This trait provides a consistent interface for interrupt management across
25/// different interrupt sources, similar to embassy-imxrt's InterruptExt.
26pub trait InterruptExt {
27 /// Clear any pending interrupt in NVIC.
28 fn unpend(&self);
29
30 /// Set NVIC priority for this interrupt.
31 fn set_priority(&self, priority: Priority);
32
33 /// Enable this interrupt in NVIC.
34 ///
35 /// # Safety
36 /// This function is unsafe because it can enable interrupts that may not be
37 /// properly configured, potentially leading to undefined behavior.
38 unsafe fn enable(&self);
39
40 /// Disable this interrupt in NVIC.
41 ///
42 /// # Safety
43 /// This function is unsafe because disabling interrupts may leave the system
44 /// in an inconsistent state if the interrupt was expected to fire.
45 unsafe fn disable(&self);
46
47 /// Check if the interrupt is pending in NVIC.
48 fn is_pending(&self) -> bool;
49}
50
51#[derive(Clone, Copy, Debug, Default)]
52pub struct DefaultHandlerSnapshot {
53 pub vector: u16,
54 pub count: u32,
55 pub cfsr: u32,
56 pub hfsr: u32,
57 pub stacked_pc: u32,
58 pub stacked_lr: u32,
59 pub stacked_sp: u32,
60}
61
62static LAST_DEFAULT_VECTOR: AtomicU16 = AtomicU16::new(0);
63static LAST_DEFAULT_COUNT: AtomicU32 = AtomicU32::new(0);
64static LAST_DEFAULT_CFSR: AtomicU32 = AtomicU32::new(0);
65static LAST_DEFAULT_HFSR: AtomicU32 = AtomicU32::new(0);
66static LAST_DEFAULT_PC: AtomicU32 = AtomicU32::new(0);
67static LAST_DEFAULT_LR: AtomicU32 = AtomicU32::new(0);
68static LAST_DEFAULT_SP: AtomicU32 = AtomicU32::new(0);
69
70#[inline]
71pub fn default_handler_snapshot() -> DefaultHandlerSnapshot {
72 DefaultHandlerSnapshot {
73 vector: LAST_DEFAULT_VECTOR.load(Ordering::Relaxed),
74 count: LAST_DEFAULT_COUNT.load(Ordering::Relaxed),
75 cfsr: LAST_DEFAULT_CFSR.load(Ordering::Relaxed),
76 hfsr: LAST_DEFAULT_HFSR.load(Ordering::Relaxed),
77 stacked_pc: LAST_DEFAULT_PC.load(Ordering::Relaxed),
78 stacked_lr: LAST_DEFAULT_LR.load(Ordering::Relaxed),
79 stacked_sp: LAST_DEFAULT_SP.load(Ordering::Relaxed),
80 }
81}
82
83#[inline]
84pub fn clear_default_handler_snapshot() {
85 LAST_DEFAULT_VECTOR.store(0, Ordering::Relaxed);
86 LAST_DEFAULT_COUNT.store(0, Ordering::Relaxed);
87 LAST_DEFAULT_CFSR.store(0, Ordering::Relaxed);
88 LAST_DEFAULT_HFSR.store(0, Ordering::Relaxed);
89 LAST_DEFAULT_PC.store(0, Ordering::Relaxed);
90 LAST_DEFAULT_LR.store(0, Ordering::Relaxed);
91 LAST_DEFAULT_SP.store(0, Ordering::Relaxed);
92}
93
94/// OS_EVENT interrupt helper with methods similar to embassy-imxrt's InterruptExt.
95pub struct OsEvent;
96pub const OS_EVENT: OsEvent = OsEvent;
97
98impl InterruptExt for OsEvent {
99 /// Clear any pending OS_EVENT in NVIC.
100 #[inline]
101 fn unpend(&self) {
102 cortex_m::peripheral::NVIC::unpend(Interrupt::OS_EVENT);
103 }
104
105 /// Set NVIC priority for OS_EVENT.
106 #[inline]
107 fn set_priority(&self, priority: Priority) {
108 unsafe {
109 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
110 nvic.set_priority(Interrupt::OS_EVENT, u8::from(priority));
111 }
112 }
113
114 /// Enable OS_EVENT in NVIC.
115 #[inline]
116 unsafe fn enable(&self) {
117 cortex_m::peripheral::NVIC::unmask(Interrupt::OS_EVENT);
118 }
119
120 /// Disable OS_EVENT in NVIC.
121 #[inline]
122 unsafe fn disable(&self) {
123 cortex_m::peripheral::NVIC::mask(Interrupt::OS_EVENT);
124 }
125
126 /// Check if OS_EVENT is pending in NVIC.
127 #[inline]
128 fn is_pending(&self) -> bool {
129 cortex_m::peripheral::NVIC::is_pending(Interrupt::OS_EVENT)
130 }
131}
132
133impl OsEvent {
134 /// Configure OS_EVENT interrupt for timer operation.
135 /// This sets up the NVIC priority, enables the interrupt, and ensures global interrupts are enabled.
136 /// Also performs a software event to wake any pending WFE.
137 pub fn configure_for_timer(&self, priority: Priority) {
138 // Configure NVIC
139 self.unpend();
140 self.set_priority(priority);
141 unsafe {
142 self.enable();
143 }
144
145 // Ensure global interrupts are enabled in no-reset scenarios (e.g., cargo run)
146 // Debuggers typically perform a reset which leaves PRIMASK=0; cargo run may not.
147 unsafe {
148 cortex_m::interrupt::enable();
149 }
150
151 // Wake any executor WFE that might be sleeping when we armed the first deadline
152 cortex_m::asm::sev();
153 }
154}
155
156/// LPUART2 interrupt helper with methods similar to embassy-imxrt's InterruptExt.
157pub struct Lpuart2;
158pub const LPUART2: Lpuart2 = Lpuart2;
159
160impl InterruptExt for Lpuart2 {
161 /// Clear any pending LPUART2 in NVIC.
162 #[inline]
163 fn unpend(&self) {
164 cortex_m::peripheral::NVIC::unpend(Interrupt::LPUART2);
165 }
166
167 /// Set NVIC priority for LPUART2.
168 #[inline]
169 fn set_priority(&self, priority: Priority) {
170 unsafe {
171 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
172 nvic.set_priority(Interrupt::LPUART2, u8::from(priority));
173 }
174 }
175
176 /// Enable LPUART2 in NVIC.
177 #[inline]
178 unsafe fn enable(&self) {
179 cortex_m::peripheral::NVIC::unmask(Interrupt::LPUART2);
180 }
181
182 /// Disable LPUART2 in NVIC.
183 #[inline]
184 unsafe fn disable(&self) {
185 cortex_m::peripheral::NVIC::mask(Interrupt::LPUART2);
186 }
187
188 /// Check if LPUART2 is pending in NVIC.
189 #[inline]
190 fn is_pending(&self) -> bool {
191 cortex_m::peripheral::NVIC::is_pending(Interrupt::LPUART2)
192 }
193}
194
195impl Lpuart2 {
196 /// Configure LPUART2 interrupt for UART operation.
197 /// This sets up the NVIC priority, enables the interrupt, and ensures global interrupts are enabled.
198 pub fn configure_for_uart(&self, priority: Priority) {
199 // Configure NVIC
200 self.unpend();
201 self.set_priority(priority);
202 unsafe {
203 self.enable();
204 }
205
206 // Ensure global interrupts are enabled in no-reset scenarios (e.g., cargo run)
207 // Debuggers typically perform a reset which leaves PRIMASK=0; cargo run may not.
208 unsafe {
209 cortex_m::interrupt::enable();
210 }
211 }
212
213 /// Install LPUART2 handler into the RAM vector table.
214 /// Safety: See `install_irq_handler`.
215 pub unsafe fn install_handler(&self, handler: unsafe extern "C" fn()) {
216 install_irq_handler(Interrupt::LPUART2, handler);
217 }
218}
219
220pub struct Rtc;
221pub const RTC: Rtc = Rtc;
222
223impl InterruptExt for Rtc {
224 /// Clear any pending RTC in NVIC.
225 #[inline]
226 fn unpend(&self) {
227 cortex_m::peripheral::NVIC::unpend(Interrupt::RTC);
228 }
229
230 /// Set NVIC priority for RTC.
231 #[inline]
232 fn set_priority(&self, priority: Priority) {
233 unsafe {
234 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
235 nvic.set_priority(Interrupt::RTC, u8::from(priority));
236 }
237 }
238
239 /// Enable RTC in NVIC.
240 #[inline]
241 unsafe fn enable(&self) {
242 cortex_m::peripheral::NVIC::unmask(Interrupt::RTC);
243 }
244
245 /// Disable RTC in NVIC.
246 #[inline]
247 unsafe fn disable(&self) {
248 cortex_m::peripheral::NVIC::mask(Interrupt::RTC);
249 }
250
251 /// Check if RTC is pending in NVIC.
252 #[inline]
253 fn is_pending(&self) -> bool {
254 cortex_m::peripheral::NVIC::is_pending(Interrupt::RTC)
255 }
256}
257
258pub struct Adc;
259pub const ADC1: Adc = Adc;
260
261impl InterruptExt for Adc {
262 /// Clear any pending ADC1 in NVIC.
263 #[inline]
264 fn unpend(&self) {
265 cortex_m::peripheral::NVIC::unpend(Interrupt::ADC1);
266 }
267
268 /// Set NVIC priority for ADC1.
269 #[inline]
270 fn set_priority(&self, priority: Priority) {
271 unsafe {
272 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
273 nvic.set_priority(Interrupt::ADC1, u8::from(priority));
274 }
275 }
276
277 /// Enable ADC1 in NVIC.
278 #[inline]
279 unsafe fn enable(&self) {
280 cortex_m::peripheral::NVIC::unmask(Interrupt::ADC1);
281 }
282
283 /// Disable ADC1 in NVIC.
284 #[inline]
285 unsafe fn disable(&self) {
286 cortex_m::peripheral::NVIC::mask(Interrupt::ADC1);
287 }
288
289 /// Check if ADC1 is pending in NVIC.
290 #[inline]
291 fn is_pending(&self) -> bool {
292 cortex_m::peripheral::NVIC::is_pending(Interrupt::ADC1)
293 }
294}
295
296pub struct Gpio0;
297pub const GPIO0: Gpio0 = Gpio0;
298
299impl InterruptExt for Gpio0 {
300 /// Clear any pending GPIO0 in NVIC.
301 #[inline]
302 fn unpend(&self) {
303 cortex_m::peripheral::NVIC::unpend(Interrupt::GPIO0);
304 }
305
306 /// Set NVIC priority for GPIO0.
307 #[inline]
308 fn set_priority(&self, priority: Priority) {
309 unsafe {
310 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
311 nvic.set_priority(Interrupt::GPIO0, u8::from(priority));
312 }
313 }
314
315 /// Enable GPIO0 in NVIC.
316 #[inline]
317 unsafe fn enable(&self) {
318 cortex_m::peripheral::NVIC::unmask(Interrupt::GPIO0);
319 }
320
321 /// Disable GPIO0 in NVIC.
322 #[inline]
323 unsafe fn disable(&self) {
324 cortex_m::peripheral::NVIC::mask(Interrupt::GPIO0);
325 }
326
327 /// Check if GPIO0 is pending in NVIC.
328 #[inline]
329 fn is_pending(&self) -> bool {
330 cortex_m::peripheral::NVIC::is_pending(Interrupt::GPIO0)
331 }
332}
333
334pub struct Gpio1;
335pub const GPIO1: Gpio1 = Gpio1;
336
337impl InterruptExt for Gpio1 {
338 /// Clear any pending GPIO1 in NVIC.
339 #[inline]
340 fn unpend(&self) {
341 cortex_m::peripheral::NVIC::unpend(Interrupt::GPIO1);
342 }
343
344 /// Set NVIC priority for GPIO1.
345 #[inline]
346 fn set_priority(&self, priority: Priority) {
347 unsafe {
348 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
349 nvic.set_priority(Interrupt::GPIO1, u8::from(priority));
350 }
351 }
352
353 /// Enable GPIO1 in NVIC.
354 #[inline]
355 unsafe fn enable(&self) {
356 cortex_m::peripheral::NVIC::unmask(Interrupt::GPIO1);
357 }
358
359 /// Disable GPIO1 in NVIC.
360 #[inline]
361 unsafe fn disable(&self) {
362 cortex_m::peripheral::NVIC::mask(Interrupt::GPIO1);
363 }
364
365 /// Check if GPIO1 is pending in NVIC.
366 #[inline]
367 fn is_pending(&self) -> bool {
368 cortex_m::peripheral::NVIC::is_pending(Interrupt::GPIO1)
369 }
370}
371
372pub struct Gpio2;
373pub const GPIO2: Gpio2 = Gpio2;
374
375impl InterruptExt for Gpio2 {
376 /// Clear any pending GPIO2 in NVIC.
377 #[inline]
378 fn unpend(&self) {
379 cortex_m::peripheral::NVIC::unpend(Interrupt::GPIO2);
380 }
381
382 /// Set NVIC priority for GPIO2.
383 #[inline]
384 fn set_priority(&self, priority: Priority) {
385 unsafe {
386 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
387 nvic.set_priority(Interrupt::GPIO2, u8::from(priority));
388 }
389 }
390
391 /// Enable GPIO2 in NVIC.
392 #[inline]
393 unsafe fn enable(&self) {
394 cortex_m::peripheral::NVIC::unmask(Interrupt::GPIO2);
395 }
396
397 /// Disable GPIO2 in NVIC.
398 #[inline]
399 unsafe fn disable(&self) {
400 cortex_m::peripheral::NVIC::mask(Interrupt::GPIO2);
401 }
402
403 /// Check if GPIO2 is pending in NVIC.
404 #[inline]
405 fn is_pending(&self) -> bool {
406 cortex_m::peripheral::NVIC::is_pending(Interrupt::GPIO2)
407 }
408}
409
410pub struct Gpio3;
411pub const GPIO3: Gpio3 = Gpio3;
412
413impl InterruptExt for Gpio3 {
414 /// Clear any pending GPIO3 in NVIC.
415 #[inline]
416 fn unpend(&self) {
417 cortex_m::peripheral::NVIC::unpend(Interrupt::GPIO3);
418 }
419
420 /// Set NVIC priority for GPIO3.
421 #[inline]
422 fn set_priority(&self, priority: Priority) {
423 unsafe {
424 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
425 nvic.set_priority(Interrupt::GPIO3, u8::from(priority));
426 }
427 }
428
429 /// Enable GPIO3 in NVIC.
430 #[inline]
431 unsafe fn enable(&self) {
432 cortex_m::peripheral::NVIC::unmask(Interrupt::GPIO3);
433 }
434
435 /// Disable GPIO3 in NVIC.
436 #[inline]
437 unsafe fn disable(&self) {
438 cortex_m::peripheral::NVIC::mask(Interrupt::GPIO3);
439 }
440
441 /// Check if GPIO3 is pending in NVIC.
442 #[inline]
443 fn is_pending(&self) -> bool {
444 cortex_m::peripheral::NVIC::is_pending(Interrupt::GPIO3)
445 }
446}
447
448pub struct Gpio4;
449pub const GPIO4: Gpio4 = Gpio4;
450
451impl InterruptExt for Gpio4 {
452 /// Clear any pending GPIO4 in NVIC.
453 #[inline]
454 fn unpend(&self) {
455 cortex_m::peripheral::NVIC::unpend(Interrupt::GPIO4);
456 }
457
458 /// Set NVIC priority for GPIO4.
459 #[inline]
460 fn set_priority(&self, priority: Priority) {
461 unsafe {
462 let mut nvic = cortex_m::peripheral::Peripherals::steal().NVIC;
463 nvic.set_priority(Interrupt::GPIO4, u8::from(priority));
464 }
465 }
466
467 /// Enable GPIO4 in NVIC.
468 #[inline]
469 unsafe fn enable(&self) {
470 cortex_m::peripheral::NVIC::unmask(Interrupt::GPIO4);
471 }
472
473 /// Disable GPIO4 in NVIC.
474 #[inline]
475 unsafe fn disable(&self) {
476 cortex_m::peripheral::NVIC::mask(Interrupt::GPIO4);
477 }
478
479 /// Check if GPIO4 is pending in NVIC.
480 #[inline]
481 fn is_pending(&self) -> bool {
482 cortex_m::peripheral::NVIC::is_pending(Interrupt::GPIO4)
483 }
484}
485
486/// Set VTOR (Vector Table Offset) to a RAM-based vector table.
487/// Pass a pointer to the first word in the RAM table (stack pointer slot 0).
488/// Safety: Caller must ensure the RAM table is valid and aligned as required by the core.
489pub unsafe fn vtor_set_ram_vector_base(base: *const u32) {
490 core::ptr::write_volatile(0xE000_ED08 as *mut u32, base as u32);
491}
492
493/// Install an interrupt handler into the current VTOR-based vector table.
494/// This writes the function pointer at index 16 + irq number.
495/// Safety: Caller must ensure VTOR points at a writable RAM table and that `handler`
496/// has the correct ABI and lifetime.
497pub unsafe fn install_irq_handler(irq: Interrupt, handler: unsafe extern "C" fn()) {
498 let vtor_base = core::ptr::read_volatile(0xE000_ED08 as *const u32) as *mut u32;
499 let idx = 16 + (irq as isize as usize);
500 core::ptr::write_volatile(vtor_base.add(idx), handler as usize as u32);
501}
502
503impl OsEvent {
504 /// Convenience to install the OS_EVENT handler into the RAM vector table.
505 /// Safety: See `install_irq_handler`.
506 pub unsafe fn install_handler(&self, handler: extern "C" fn()) {
507 install_irq_handler(Interrupt::OS_EVENT, handler);
508 }
509}
510
511/// Install OS_EVENT handler by raw address. Useful to avoid fn pointer type mismatches.
512/// Safety: Caller must ensure the address is a valid `extern "C" fn()` handler.
513pub unsafe fn os_event_install_handler_raw(handler_addr: usize) {
514 let vtor_base = core::ptr::read_volatile(0xE000_ED08 as *const u32) as *mut u32;
515 let idx = 16 + (Interrupt::OS_EVENT as isize as usize);
516 core::ptr::write_volatile(vtor_base.add(idx), handler_addr as u32);
517}
518
519/// Provide a conservative default IRQ handler that avoids wedging the system.
520/// It clears all NVIC pending bits and returns, so spurious or reserved IRQs
521/// don’t trap the core in an infinite WFI loop during bring-up.
522#[no_mangle]
523pub unsafe extern "C" fn DefaultHandler() {
524 let active = core::ptr::read_volatile(0xE000_ED04 as *const u32) & 0x1FF;
525 let cfsr = core::ptr::read_volatile(0xE000_ED28 as *const u32);
526 let hfsr = core::ptr::read_volatile(0xE000_ED2C as *const u32);
527
528 let sp = cortex_m::register::msp::read();
529 let stacked = sp as *const u32;
530 // Stacked registers follow ARMv8-M procedure call standard order
531 let stacked_pc = unsafe { stacked.add(6).read() };
532 let stacked_lr = unsafe { stacked.add(5).read() };
533
534 LAST_DEFAULT_VECTOR.store(active as u16, Ordering::Relaxed);
535 LAST_DEFAULT_CFSR.store(cfsr, Ordering::Relaxed);
536 LAST_DEFAULT_HFSR.store(hfsr, Ordering::Relaxed);
537 LAST_DEFAULT_COUNT.fetch_add(1, Ordering::Relaxed);
538 LAST_DEFAULT_PC.store(stacked_pc, Ordering::Relaxed);
539 LAST_DEFAULT_LR.store(stacked_lr, Ordering::Relaxed);
540 LAST_DEFAULT_SP.store(sp, Ordering::Relaxed);
541
542 // Do nothing here: on some MCUs/TrustZone setups, writing NVIC from a spurious
543 // handler can fault if targeting the Secure bank. Just return.
544 cortex_m::asm::dsb();
545 cortex_m::asm::isb();
546}
diff --git a/embassy-mcxa/src/lib.rs b/embassy-mcxa/src/lib.rs
new file mode 100644
index 000000000..d721f53e6
--- /dev/null
+++ b/embassy-mcxa/src/lib.rs
@@ -0,0 +1,484 @@
1#![no_std]
2#![allow(async_fn_in_trait)]
3#![doc = include_str!("../README.md")]
4
5// //! ## Feature flags
6// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
7
8pub mod clocks; // still provide clock helpers
9pub mod dma;
10pub mod gpio;
11pub mod pins; // pin mux helpers
12
13pub mod adc;
14pub mod clkout;
15pub mod config;
16pub mod i2c;
17pub mod interrupt;
18pub mod lpuart;
19pub mod ostimer;
20pub mod rtc;
21
22pub use crate::pac::NVIC_PRIO_BITS;
23
24#[rustfmt::skip]
25embassy_hal_internal::peripherals!(
26 ADC0,
27 ADC1,
28
29 AOI0,
30 AOI1,
31
32 CAN0,
33 CAN1,
34
35 CDOG0,
36 CDOG1,
37
38 // CLKOUT is not specifically a peripheral (it's part of SYSCON),
39 // but we still want it to be a singleton.
40 CLKOUT,
41
42 CMC,
43 CMP0,
44 CMP1,
45 CRC0,
46
47 CTIMER0,
48 CTIMER1,
49 CTIMER2,
50 CTIMER3,
51 CTIMER4,
52
53 DBGMAILBOX,
54 DMA0,
55 DMA_CH0,
56 DMA_CH1,
57 DMA_CH2,
58 DMA_CH3,
59 DMA_CH4,
60 DMA_CH5,
61 DMA_CH6,
62 DMA_CH7,
63 EDMA0_TCD0,
64 EIM0,
65 EQDC0,
66 EQDC1,
67 ERM0,
68 FLEXIO0,
69 FLEXPWM0,
70 FLEXPWM1,
71 FMC0,
72 FMU0,
73 FREQME0,
74 GLIKEY0,
75
76 GPIO0,
77 GPIO1,
78 GPIO2,
79 GPIO3,
80 GPIO4,
81
82 I3C0,
83 INPUTMUX0,
84
85 LPI2C0,
86 LPI2C1,
87 LPI2C2,
88 LPI2C3,
89
90 LPSPI0,
91 LPSPI1,
92
93 LPTMR0,
94
95 LPUART0,
96 LPUART1,
97 LPUART2,
98 LPUART3,
99 LPUART4,
100 LPUART5,
101
102 MAU0,
103 MBC0,
104 MRCC0,
105 OPAMP0,
106
107 #[cfg(not(feature = "time"))]
108 OSTIMER0,
109
110 P0_0,
111 P0_1,
112 P0_2,
113 P0_3,
114 P0_4,
115 P0_5,
116 P0_6,
117 P0_7,
118 P0_8,
119 P0_9,
120 P0_10,
121 P0_11,
122 P0_12,
123 P0_13,
124 P0_14,
125 P0_15,
126 P0_16,
127 P0_17,
128 P0_18,
129 P0_19,
130 P0_20,
131 P0_21,
132 P0_22,
133 P0_23,
134 P0_24,
135 P0_25,
136 P0_26,
137 P0_27,
138 P0_28,
139 P0_29,
140 P0_30,
141 P0_31,
142
143 P1_0,
144 P1_1,
145 P1_2,
146 P1_3,
147 P1_4,
148 P1_5,
149 P1_6,
150 P1_7,
151 P1_8,
152 P1_9,
153 P1_10,
154 P1_11,
155 P1_12,
156 P1_13,
157 P1_14,
158 P1_15,
159 P1_16,
160 P1_17,
161 P1_18,
162 P1_19,
163 P1_20,
164 P1_21,
165 P1_22,
166 P1_23,
167 P1_24,
168 P1_25,
169 P1_26,
170 P1_27,
171 P1_28,
172 P1_29,
173 P1_30,
174 P1_31,
175
176 P2_0,
177 P2_1,
178 P2_2,
179 P2_3,
180 P2_4,
181 P2_5,
182 P2_6,
183 P2_7,
184 P2_8,
185 P2_9,
186 P2_10,
187 P2_11,
188 P2_12,
189 P2_13,
190 P2_14,
191 P2_15,
192 P2_16,
193 P2_17,
194 P2_18,
195 P2_19,
196 P2_20,
197 P2_21,
198 P2_22,
199 P2_23,
200 P2_24,
201 P2_25,
202 P2_26,
203 P2_27,
204 P2_28,
205 P2_29,
206 P2_30,
207 P2_31,
208
209 P3_0,
210 P3_1,
211 P3_2,
212 P3_3,
213 P3_4,
214 P3_5,
215 P3_6,
216 P3_7,
217 P3_8,
218 P3_9,
219 P3_10,
220 P3_11,
221 P3_12,
222 P3_13,
223 P3_14,
224 P3_15,
225 P3_16,
226 P3_17,
227 P3_18,
228 P3_19,
229 P3_20,
230 P3_21,
231 P3_22,
232 P3_23,
233 P3_24,
234 P3_25,
235 P3_26,
236 P3_27,
237 P3_28,
238 P3_29,
239 P3_30,
240 P3_31,
241
242 P4_0,
243 P4_1,
244 P4_2,
245 P4_3,
246 P4_4,
247 P4_5,
248 P4_6,
249 P4_7,
250 P4_8,
251 P4_9,
252 P4_10,
253 P4_11,
254 P4_12,
255 P4_13,
256 P4_14,
257 P4_15,
258 P4_16,
259 P4_17,
260 P4_18,
261 P4_19,
262 P4_20,
263 P4_21,
264 P4_22,
265 P4_23,
266 P4_24,
267 P4_25,
268 P4_26,
269 P4_27,
270 P4_28,
271 P4_29,
272 P4_30,
273 P4_31,
274
275 P5_0,
276 P5_1,
277 P5_2,
278 P5_3,
279 P5_4,
280 P5_5,
281 P5_6,
282 P5_7,
283 P5_8,
284 P5_9,
285 P5_10,
286 P5_11,
287 P5_12,
288 P5_13,
289 P5_14,
290 P5_15,
291 P5_16,
292 P5_17,
293 P5_18,
294 P5_19,
295 P5_20,
296 P5_21,
297 P5_22,
298 P5_23,
299 P5_24,
300 P5_25,
301 P5_26,
302 P5_27,
303 P5_28,
304 P5_29,
305 P5_30,
306 P5_31,
307
308 PKC0,
309
310 PORT0,
311 PORT1,
312 PORT2,
313 PORT3,
314 PORT4,
315
316 RTC0,
317 SAU,
318 SCG0,
319 SCN_SCB,
320 SGI0,
321 SMARTDMA0,
322 SPC0,
323 SYSCON,
324 TDET0,
325 TRNG0,
326 UDF0,
327 USB0,
328 UTICK0,
329 VBAT0,
330 WAKETIMER0,
331 WUU0,
332 WWDT0,
333);
334
335// Use cortex-m-rt's #[interrupt] attribute directly; PAC does not re-export it.
336
337// Re-export interrupt traits and types
338pub use adc::Adc1 as Adc1Token;
339pub use gpio::{AnyPin, Flex, Gpio as GpioToken, Input, Level, Output};
340pub use interrupt::InterruptExt;
341#[cfg(feature = "unstable-pac")]
342pub use mcxa_pac as pac;
343#[cfg(not(feature = "unstable-pac"))]
344pub(crate) use mcxa_pac as pac;
345pub use rtc::Rtc0 as Rtc0Token;
346
347/// Initialize HAL with configuration (mirrors embassy-imxrt style). Minimal: just take peripherals.
348/// Also applies configurable NVIC priority for the OSTIMER OS_EVENT interrupt (no enabling).
349pub fn init(cfg: crate::config::Config) -> Peripherals {
350 let peripherals = Peripherals::take();
351 // Apply user-configured priority early; enabling is left to examples/apps
352 #[cfg(feature = "time")]
353 crate::interrupt::OS_EVENT.set_priority(cfg.time_interrupt_priority);
354 // Apply user-configured priority early; enabling is left to examples/apps
355 crate::interrupt::RTC.set_priority(cfg.rtc_interrupt_priority);
356 // Apply user-configured priority early; enabling is left to examples/apps
357 crate::interrupt::ADC1.set_priority(cfg.adc_interrupt_priority);
358 // Apply user-configured priority early; enabling is left to examples/apps
359 crate::interrupt::GPIO0.set_priority(cfg.gpio_interrupt_priority);
360 // Apply user-configured priority early; enabling is left to examples/apps
361 crate::interrupt::GPIO1.set_priority(cfg.gpio_interrupt_priority);
362 // Apply user-configured priority early; enabling is left to examples/apps
363 crate::interrupt::GPIO2.set_priority(cfg.gpio_interrupt_priority);
364 // Apply user-configured priority early; enabling is left to examples/apps
365 crate::interrupt::GPIO3.set_priority(cfg.gpio_interrupt_priority);
366 // Apply user-configured priority early; enabling is left to examples/apps
367 crate::interrupt::GPIO4.set_priority(cfg.gpio_interrupt_priority);
368
369 // Configure clocks
370 crate::clocks::init(cfg.clock_cfg).unwrap();
371
372 unsafe {
373 crate::gpio::init();
374 }
375
376 // Initialize DMA controller (clock, reset, configuration)
377 crate::dma::init();
378
379 // Initialize embassy-time global driver backed by OSTIMER0
380 #[cfg(feature = "time")]
381 crate::ostimer::time_driver::init(crate::config::Config::default().time_interrupt_priority, 1_000_000);
382
383 // Enable GPIO clocks
384 unsafe {
385 _ = crate::clocks::enable_and_reset::<crate::peripherals::PORT0>(&crate::clocks::periph_helpers::NoConfig);
386 _ = crate::clocks::enable_and_reset::<crate::peripherals::GPIO0>(&crate::clocks::periph_helpers::NoConfig);
387
388 _ = crate::clocks::enable_and_reset::<crate::peripherals::PORT1>(&crate::clocks::periph_helpers::NoConfig);
389 _ = crate::clocks::enable_and_reset::<crate::peripherals::GPIO1>(&crate::clocks::periph_helpers::NoConfig);
390
391 _ = crate::clocks::enable_and_reset::<crate::peripherals::PORT2>(&crate::clocks::periph_helpers::NoConfig);
392 _ = crate::clocks::enable_and_reset::<crate::peripherals::GPIO2>(&crate::clocks::periph_helpers::NoConfig);
393
394 _ = crate::clocks::enable_and_reset::<crate::peripherals::PORT3>(&crate::clocks::periph_helpers::NoConfig);
395 _ = crate::clocks::enable_and_reset::<crate::peripherals::GPIO3>(&crate::clocks::periph_helpers::NoConfig);
396
397 _ = crate::clocks::enable_and_reset::<crate::peripherals::PORT4>(&crate::clocks::periph_helpers::NoConfig);
398 _ = crate::clocks::enable_and_reset::<crate::peripherals::GPIO4>(&crate::clocks::periph_helpers::NoConfig);
399 }
400
401 peripherals
402}
403
404// /// Optional hook called by cortex-m-rt before RAM init.
405// /// We proactively mask and clear all NVIC IRQs to avoid wedges from stale state
406// /// left by soft resets/debug sessions.
407// ///
408// /// NOTE: Manual VTOR setup is required for RAM execution. The cortex-m-rt 'set-vtor'
409// /// feature is incompatible with our setup because it expects __vector_table to be
410// /// defined differently than how our RAM-based linker script arranges it.
411// #[no_mangle]
412// pub unsafe extern "C" fn __pre_init() {
413// // Set the VTOR to point to the interrupt vector table in RAM
414// // This is required since code runs from RAM on this MCU
415// crate::interrupt::vtor_set_ram_vector_base(0x2000_0000 as *const u32);
416
417// // Mask and clear pending for all NVIC lines (0..127) to avoid stale state across runs.
418// let nvic = &*cortex_m::peripheral::NVIC::PTR;
419// for i in 0..4 {
420// // 4 words x 32 = 128 IRQs
421// nvic.icer[i].write(0xFFFF_FFFF);
422// nvic.icpr[i].write(0xFFFF_FFFF);
423// }
424// // Do NOT touch peripheral registers here: clocks may be off and accesses can fault.
425// crate::interrupt::clear_default_handler_snapshot();
426// }
427
428/// Internal helper to dispatch a type-level interrupt handler.
429#[inline(always)]
430#[doc(hidden)]
431pub unsafe fn __handle_interrupt<T, H>()
432where
433 T: crate::interrupt::typelevel::Interrupt,
434 H: crate::interrupt::typelevel::Handler<T>,
435{
436 H::on_interrupt();
437}
438
439/// Macro to bind interrupts to handlers, similar to embassy-imxrt.
440///
441/// Example:
442/// - Bind OS_EVENT to the OSTIMER time-driver handler
443/// bind_interrupts!(struct Irqs { OS_EVENT => crate::ostimer::time_driver::OsEventHandler; });
444#[macro_export]
445macro_rules! bind_interrupts {
446 ($(#[$attr:meta])* $vis:vis struct $name:ident {
447 $(
448 $(#[cfg($cond_irq:meta)])?
449 $irq:ident => $(
450 $(#[cfg($cond_handler:meta)])?
451 $handler:ty
452 ),*;
453 )*
454 }) => {
455 #[derive(Copy, Clone)]
456 $(#[$attr])*
457 $vis struct $name;
458
459 $(
460 #[allow(non_snake_case)]
461 #[no_mangle]
462 $(#[cfg($cond_irq)])?
463 unsafe extern "C" fn $irq() {
464 unsafe {
465 $(
466 $(#[cfg($cond_handler)])?
467 <$handler as $crate::interrupt::typelevel::Handler<$crate::interrupt::typelevel::$irq>>::on_interrupt();
468 )*
469 }
470 }
471
472 $(#[cfg($cond_irq)])?
473 $crate::bind_interrupts!(@inner
474 $(
475 $(#[cfg($cond_handler)])?
476 unsafe impl $crate::interrupt::typelevel::Binding<$crate::interrupt::typelevel::$irq, $handler> for $name {}
477 )*
478 );
479 )*
480 };
481 (@inner $($t:tt)*) => {
482 $($t)*
483 }
484}
diff --git a/embassy-mcxa/src/lpuart/buffered.rs b/embassy-mcxa/src/lpuart/buffered.rs
new file mode 100644
index 000000000..8eb443ca7
--- /dev/null
+++ b/embassy-mcxa/src/lpuart/buffered.rs
@@ -0,0 +1,780 @@
1use core::future::poll_fn;
2use core::marker::PhantomData;
3use core::sync::atomic::{AtomicBool, Ordering};
4use core::task::Poll;
5
6use embassy_hal_internal::atomic_ring_buffer::RingBuffer;
7use embassy_hal_internal::Peri;
8use embassy_sync::waitqueue::AtomicWaker;
9
10use super::*;
11use crate::interrupt;
12
13// ============================================================================
14// STATIC STATE MANAGEMENT
15// ============================================================================
16
17/// State for buffered LPUART operations
18pub struct State {
19 tx_waker: AtomicWaker,
20 tx_buf: RingBuffer,
21 tx_done: AtomicBool,
22 rx_waker: AtomicWaker,
23 rx_buf: RingBuffer,
24 initialized: AtomicBool,
25}
26
27impl Default for State {
28 fn default() -> Self {
29 Self::new()
30 }
31}
32
33impl State {
34 /// Create a new state instance
35 pub const fn new() -> Self {
36 Self {
37 tx_waker: AtomicWaker::new(),
38 tx_buf: RingBuffer::new(),
39 tx_done: AtomicBool::new(true),
40 rx_waker: AtomicWaker::new(),
41 rx_buf: RingBuffer::new(),
42 initialized: AtomicBool::new(false),
43 }
44 }
45}
46// ============================================================================
47// BUFFERED DRIVER STRUCTURES
48// ============================================================================
49
50/// Buffered LPUART driver
51pub struct BufferedLpuart<'a> {
52 tx: BufferedLpuartTx<'a>,
53 rx: BufferedLpuartRx<'a>,
54}
55
56/// Buffered LPUART TX driver
57pub struct BufferedLpuartTx<'a> {
58 info: Info,
59 state: &'static State,
60 _tx_pin: Peri<'a, AnyPin>,
61 _cts_pin: Option<Peri<'a, AnyPin>>,
62}
63
64/// Buffered LPUART RX driver
65pub struct BufferedLpuartRx<'a> {
66 info: Info,
67 state: &'static State,
68 _rx_pin: Peri<'a, AnyPin>,
69 _rts_pin: Option<Peri<'a, AnyPin>>,
70}
71
72// ============================================================================
73// BUFFERED LPUART IMPLEMENTATION
74// ============================================================================
75
76impl<'a> BufferedLpuart<'a> {
77 /// Common initialization logic
78 fn init_common<T: Instance>(
79 _inner: &Peri<'a, T>,
80 tx_buffer: Option<&'a mut [u8]>,
81 rx_buffer: Option<&'a mut [u8]>,
82 config: &Config,
83 enable_tx: bool,
84 enable_rx: bool,
85 enable_rts: bool,
86 enable_cts: bool,
87 ) -> Result<&'static State> {
88 let state = T::buffered_state();
89
90 if state.initialized.load(Ordering::Relaxed) {
91 return Err(Error::InvalidArgument);
92 }
93
94 // Initialize buffers
95 if let Some(tx_buffer) = tx_buffer {
96 if tx_buffer.is_empty() {
97 return Err(Error::InvalidArgument);
98 }
99 unsafe { state.tx_buf.init(tx_buffer.as_mut_ptr(), tx_buffer.len()) };
100 }
101
102 if let Some(rx_buffer) = rx_buffer {
103 if rx_buffer.is_empty() {
104 return Err(Error::InvalidArgument);
105 }
106 unsafe { state.rx_buf.init(rx_buffer.as_mut_ptr(), rx_buffer.len()) };
107 }
108
109 state.initialized.store(true, Ordering::Relaxed);
110
111 // Enable clocks and initialize hardware
112 let conf = LpuartConfig {
113 power: config.power,
114 source: config.source,
115 div: config.div,
116 instance: T::CLOCK_INSTANCE,
117 };
118 let clock_freq = unsafe { enable_and_reset::<T>(&conf).map_err(Error::ClockSetup)? };
119
120 Self::init_hardware(
121 T::info().regs,
122 *config,
123 clock_freq,
124 enable_tx,
125 enable_rx,
126 enable_rts,
127 enable_cts,
128 )?;
129
130 Ok(state)
131 }
132
133 /// Helper for full-duplex initialization
134 fn new_inner<T: Instance>(
135 inner: Peri<'a, T>,
136 tx_pin: Peri<'a, AnyPin>,
137 rx_pin: Peri<'a, AnyPin>,
138 rts_pin: Option<Peri<'a, AnyPin>>,
139 cts_pin: Option<Peri<'a, AnyPin>>,
140 tx_buffer: &'a mut [u8],
141 rx_buffer: &'a mut [u8],
142 config: Config,
143 ) -> Result<(BufferedLpuartTx<'a>, BufferedLpuartRx<'a>)> {
144 let state = Self::init_common::<T>(
145 &inner,
146 Some(tx_buffer),
147 Some(rx_buffer),
148 &config,
149 true,
150 true,
151 rts_pin.is_some(),
152 cts_pin.is_some(),
153 )?;
154
155 let tx = BufferedLpuartTx {
156 info: T::info(),
157 state,
158 _tx_pin: tx_pin,
159 _cts_pin: cts_pin,
160 };
161
162 let rx = BufferedLpuartRx {
163 info: T::info(),
164 state,
165 _rx_pin: rx_pin,
166 _rts_pin: rts_pin,
167 };
168
169 Ok((tx, rx))
170 }
171
172 /// Common hardware initialization logic
173 fn init_hardware(
174 regs: &'static mcxa_pac::lpuart0::RegisterBlock,
175 config: Config,
176 clock_freq: u32,
177 enable_tx: bool,
178 enable_rx: bool,
179 enable_rts: bool,
180 enable_cts: bool,
181 ) -> Result<()> {
182 // Perform standard initialization
183 perform_software_reset(regs);
184 disable_transceiver(regs);
185 configure_baudrate(regs, config.baudrate_bps, clock_freq)?;
186 configure_frame_format(regs, &config);
187 configure_control_settings(regs, &config);
188 configure_fifo(regs, &config);
189 clear_all_status_flags(regs);
190 configure_flow_control(regs, enable_rts, enable_cts, &config);
191 configure_bit_order(regs, config.msb_first);
192
193 // Enable interrupts for buffered operation
194 cortex_m::interrupt::free(|_| {
195 regs.ctrl().modify(|_, w| {
196 w.rie()
197 .enabled() // RX interrupt
198 .orie()
199 .enabled() // Overrun interrupt
200 .peie()
201 .enabled() // Parity error interrupt
202 .feie()
203 .enabled() // Framing error interrupt
204 .neie()
205 .enabled() // Noise error interrupt
206 });
207 });
208
209 // Enable the transceiver
210 enable_transceiver(regs, enable_rx, enable_tx);
211
212 Ok(())
213 }
214
215 /// Create a new full duplex buffered LPUART
216 pub fn new<T: Instance>(
217 inner: Peri<'a, T>,
218 tx_pin: Peri<'a, impl TxPin<T>>,
219 rx_pin: Peri<'a, impl RxPin<T>>,
220 _irq: impl interrupt::typelevel::Binding<T::Interrupt, BufferedInterruptHandler<T>> + 'a,
221 tx_buffer: &'a mut [u8],
222 rx_buffer: &'a mut [u8],
223 config: Config,
224 ) -> Result<Self> {
225 tx_pin.as_tx();
226 rx_pin.as_rx();
227
228 let (tx, rx) = Self::new_inner::<T>(
229 inner,
230 tx_pin.into(),
231 rx_pin.into(),
232 None,
233 None,
234 tx_buffer,
235 rx_buffer,
236 config,
237 )?;
238
239 Ok(Self { tx, rx })
240 }
241
242 /// Create a new buffered LPUART instance with RTS/CTS flow control
243 pub fn new_with_rtscts<T: Instance>(
244 inner: Peri<'a, T>,
245 tx_pin: Peri<'a, impl TxPin<T>>,
246 rx_pin: Peri<'a, impl RxPin<T>>,
247 rts_pin: Peri<'a, impl RtsPin<T>>,
248 cts_pin: Peri<'a, impl CtsPin<T>>,
249 _irq: impl interrupt::typelevel::Binding<T::Interrupt, BufferedInterruptHandler<T>> + 'a,
250 tx_buffer: &'a mut [u8],
251 rx_buffer: &'a mut [u8],
252 config: Config,
253 ) -> Result<Self> {
254 tx_pin.as_tx();
255 rx_pin.as_rx();
256 rts_pin.as_rts();
257 cts_pin.as_cts();
258
259 let (tx, rx) = Self::new_inner::<T>(
260 inner,
261 tx_pin.into(),
262 rx_pin.into(),
263 Some(rts_pin.into()),
264 Some(cts_pin.into()),
265 tx_buffer,
266 rx_buffer,
267 config,
268 )?;
269
270 Ok(Self { tx, rx })
271 }
272
273 /// Create a new buffered LPUART with only RTS flow control (RX flow control)
274 pub fn new_with_rts<T: Instance>(
275 inner: Peri<'a, T>,
276 tx_pin: Peri<'a, impl TxPin<T>>,
277 rx_pin: Peri<'a, impl RxPin<T>>,
278 rts_pin: Peri<'a, impl RtsPin<T>>,
279 _irq: impl interrupt::typelevel::Binding<T::Interrupt, BufferedInterruptHandler<T>> + 'a,
280 tx_buffer: &'a mut [u8],
281 rx_buffer: &'a mut [u8],
282 config: Config,
283 ) -> Result<Self> {
284 tx_pin.as_tx();
285 rx_pin.as_rx();
286 rts_pin.as_rts();
287
288 let (tx, rx) = Self::new_inner::<T>(
289 inner,
290 tx_pin.into(),
291 rx_pin.into(),
292 Some(rts_pin.into()),
293 None,
294 tx_buffer,
295 rx_buffer,
296 config,
297 )?;
298
299 Ok(Self { tx, rx })
300 }
301
302 /// Create a new buffered LPUART with only CTS flow control (TX flow control)
303 pub fn new_with_cts<T: Instance>(
304 inner: Peri<'a, T>,
305 tx_pin: Peri<'a, impl TxPin<T>>,
306 rx_pin: Peri<'a, impl RxPin<T>>,
307 cts_pin: Peri<'a, impl CtsPin<T>>,
308 _irq: impl interrupt::typelevel::Binding<T::Interrupt, BufferedInterruptHandler<T>> + 'a,
309 tx_buffer: &'a mut [u8],
310 rx_buffer: &'a mut [u8],
311 config: Config,
312 ) -> Result<Self> {
313 tx_pin.as_tx();
314 rx_pin.as_rx();
315 cts_pin.as_cts();
316
317 let (tx, rx) = Self::new_inner::<T>(
318 inner,
319 tx_pin.into(),
320 rx_pin.into(),
321 None,
322 Some(cts_pin.into()),
323 tx_buffer,
324 rx_buffer,
325 config,
326 )?;
327
328 Ok(Self { tx, rx })
329 }
330
331 /// Split the buffered LPUART into separate TX and RX parts
332 pub fn split(self) -> (BufferedLpuartTx<'a>, BufferedLpuartRx<'a>) {
333 (self.tx, self.rx)
334 }
335
336 /// Get mutable references to TX and RX parts
337 pub fn split_ref(&mut self) -> (&mut BufferedLpuartTx<'a>, &mut BufferedLpuartRx<'a>) {
338 (&mut self.tx, &mut self.rx)
339 }
340}
341
342// ============================================================================
343// BUFFERED TX IMPLEMENTATION
344// ============================================================================
345
346impl<'a> BufferedLpuartTx<'a> {
347 /// Helper for TX-only initialization
348 fn new_inner<T: Instance>(
349 inner: Peri<'a, T>,
350 tx_pin: Peri<'a, AnyPin>,
351 cts_pin: Option<Peri<'a, AnyPin>>,
352 tx_buffer: &'a mut [u8],
353 config: Config,
354 ) -> Result<BufferedLpuartTx<'a>> {
355 let state = BufferedLpuart::init_common::<T>(
356 &inner,
357 Some(tx_buffer),
358 None,
359 &config,
360 true,
361 false,
362 false,
363 cts_pin.is_some(),
364 )?;
365
366 Ok(BufferedLpuartTx {
367 info: T::info(),
368 state,
369 _tx_pin: tx_pin,
370 _cts_pin: cts_pin,
371 })
372 }
373
374 pub fn new<T: Instance>(
375 inner: Peri<'a, T>,
376 tx_pin: Peri<'a, impl TxPin<T>>,
377 _irq: impl interrupt::typelevel::Binding<T::Interrupt, BufferedInterruptHandler<T>> + 'a,
378 tx_buffer: &'a mut [u8],
379 config: Config,
380 ) -> Result<Self> {
381 tx_pin.as_tx();
382
383 Self::new_inner::<T>(inner, tx_pin.into(), None, tx_buffer, config)
384 }
385
386 /// Create a new TX-only buffered LPUART with CTS flow control
387 pub fn new_with_cts<T: Instance>(
388 inner: Peri<'a, T>,
389 tx_pin: Peri<'a, impl TxPin<T>>,
390 cts_pin: Peri<'a, impl CtsPin<T>>,
391 _irq: impl interrupt::typelevel::Binding<T::Interrupt, BufferedInterruptHandler<T>> + 'a,
392 tx_buffer: &'a mut [u8],
393 config: Config,
394 ) -> Result<Self> {
395 tx_pin.as_tx();
396 cts_pin.as_cts();
397
398 Self::new_inner::<T>(inner, tx_pin.into(), Some(cts_pin.into()), tx_buffer, config)
399 }
400}
401
402impl<'a> BufferedLpuartTx<'a> {
403 /// Write data asynchronously
404 pub async fn write(&mut self, buf: &[u8]) -> Result<usize> {
405 let mut written = 0;
406
407 for &byte in buf {
408 // Wait for space in the buffer
409 poll_fn(|cx| {
410 self.state.tx_waker.register(cx.waker());
411
412 let mut writer = unsafe { self.state.tx_buf.writer() };
413 if writer.push_one(byte) {
414 // Enable TX interrupt to start transmission
415 cortex_m::interrupt::free(|_| {
416 self.info.regs.ctrl().modify(|_, w| w.tie().enabled());
417 });
418 Poll::Ready(Ok(()))
419 } else {
420 Poll::Pending
421 }
422 })
423 .await?;
424
425 written += 1;
426 }
427
428 Ok(written)
429 }
430
431 /// Flush the TX buffer and wait for transmission to complete
432 pub async fn flush(&mut self) -> Result<()> {
433 // Wait for TX buffer to empty and transmission to complete
434 poll_fn(|cx| {
435 self.state.tx_waker.register(cx.waker());
436
437 let tx_empty = self.state.tx_buf.is_empty();
438 let fifo_empty = self.info.regs.water().read().txcount().bits() == 0;
439 let tc_complete = self.info.regs.stat().read().tc().is_complete();
440
441 if tx_empty && fifo_empty && tc_complete {
442 Poll::Ready(Ok(()))
443 } else {
444 // Enable appropriate interrupt
445 cortex_m::interrupt::free(|_| {
446 if !tx_empty {
447 self.info.regs.ctrl().modify(|_, w| w.tie().enabled());
448 } else {
449 self.info.regs.ctrl().modify(|_, w| w.tcie().enabled());
450 }
451 });
452 Poll::Pending
453 }
454 })
455 .await
456 }
457
458 /// Try to write without blocking
459 pub fn try_write(&mut self, buf: &[u8]) -> Result<usize> {
460 let mut writer = unsafe { self.state.tx_buf.writer() };
461 let mut written = 0;
462
463 for &byte in buf {
464 if writer.push_one(byte) {
465 written += 1;
466 } else {
467 break;
468 }
469 }
470
471 if written > 0 {
472 // Enable TX interrupt to start transmission
473 cortex_m::interrupt::free(|_| {
474 self.info.regs.ctrl().modify(|_, w| w.tie().enabled());
475 });
476 }
477
478 Ok(written)
479 }
480}
481
482// ============================================================================
483// BUFFERED RX IMPLEMENTATION
484// ============================================================================
485
486impl<'a> BufferedLpuartRx<'a> {
487 /// Helper for RX-only initialization
488 fn new_inner<T: Instance>(
489 inner: Peri<'a, T>,
490 rx_pin: Peri<'a, AnyPin>,
491 rts_pin: Option<Peri<'a, AnyPin>>,
492 rx_buffer: &'a mut [u8],
493 config: Config,
494 ) -> Result<BufferedLpuartRx<'a>> {
495 let state = BufferedLpuart::init_common::<T>(
496 &inner,
497 None,
498 Some(rx_buffer),
499 &config,
500 false,
501 true,
502 rts_pin.is_some(),
503 false,
504 )?;
505
506 Ok(BufferedLpuartRx {
507 info: T::info(),
508 state,
509 _rx_pin: rx_pin,
510 _rts_pin: rts_pin,
511 })
512 }
513
514 /// Create a new RX-only buffered LPUART
515 pub fn new<T: Instance>(
516 inner: Peri<'a, T>,
517 rx_pin: Peri<'a, impl RxPin<T>>,
518 _irq: impl interrupt::typelevel::Binding<T::Interrupt, BufferedInterruptHandler<T>> + 'a,
519 rx_buffer: &'a mut [u8],
520 config: Config,
521 ) -> Result<Self> {
522 rx_pin.as_rx();
523
524 Self::new_inner::<T>(inner, rx_pin.into(), None, rx_buffer, config)
525 }
526
527 /// Create a new RX-only buffered LPUART with RTS flow control
528 pub fn new_with_rts<T: Instance>(
529 inner: Peri<'a, T>,
530 rx_pin: Peri<'a, impl RxPin<T>>,
531 rts_pin: Peri<'a, impl RtsPin<T>>,
532 _irq: impl interrupt::typelevel::Binding<T::Interrupt, BufferedInterruptHandler<T>> + 'a,
533 rx_buffer: &'a mut [u8],
534 config: Config,
535 ) -> Result<Self> {
536 rx_pin.as_rx();
537 rts_pin.as_rts();
538
539 Self::new_inner::<T>(inner, rx_pin.into(), Some(rts_pin.into()), rx_buffer, config)
540 }
541}
542
543impl<'a> BufferedLpuartRx<'a> {
544 /// Read data asynchronously
545 pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
546 if buf.is_empty() {
547 return Ok(0);
548 }
549
550 let mut read = 0;
551
552 // Try to read available data
553 poll_fn(|cx| {
554 self.state.rx_waker.register(cx.waker());
555
556 // Disable RX interrupt while reading from buffer
557 cortex_m::interrupt::free(|_| {
558 self.info.regs.ctrl().modify(|_, w| w.rie().disabled());
559 });
560
561 let mut reader = unsafe { self.state.rx_buf.reader() };
562 let available = reader.pop(|data| {
563 let to_copy = core::cmp::min(data.len(), buf.len() - read);
564 if to_copy > 0 {
565 buf[read..read + to_copy].copy_from_slice(&data[..to_copy]);
566 read += to_copy;
567 }
568 to_copy
569 });
570
571 // Re-enable RX interrupt
572 cortex_m::interrupt::free(|_| {
573 self.info.regs.ctrl().modify(|_, w| w.rie().enabled());
574 });
575
576 if read > 0 {
577 Poll::Ready(Ok(read))
578 } else if available == 0 {
579 Poll::Pending
580 } else {
581 Poll::Ready(Ok(0))
582 }
583 })
584 .await
585 }
586
587 /// Try to read without blocking
588 pub fn try_read(&mut self, buf: &mut [u8]) -> Result<usize> {
589 if buf.is_empty() {
590 return Ok(0);
591 }
592
593 // Disable RX interrupt while reading from buffer
594 cortex_m::interrupt::free(|_| {
595 self.info.regs.ctrl().modify(|_, w| w.rie().disabled());
596 });
597
598 let mut reader = unsafe { self.state.rx_buf.reader() };
599 let read = reader.pop(|data| {
600 let to_copy = core::cmp::min(data.len(), buf.len());
601 if to_copy > 0 {
602 buf[..to_copy].copy_from_slice(&data[..to_copy]);
603 }
604 to_copy
605 });
606
607 // Re-enable RX interrupt
608 cortex_m::interrupt::free(|_| {
609 self.info.regs.ctrl().modify(|_, w| w.rie().enabled());
610 });
611
612 Ok(read)
613 }
614}
615
616// ============================================================================
617// INTERRUPT HANDLER
618// ============================================================================
619
620/// Buffered UART interrupt handler
621pub struct BufferedInterruptHandler<T: Instance> {
622 _phantom: PhantomData<T>,
623}
624
625impl<T: Instance> crate::interrupt::typelevel::Handler<T::Interrupt> for BufferedInterruptHandler<T> {
626 unsafe fn on_interrupt() {
627 let regs = T::info().regs;
628 let state = T::buffered_state();
629
630 // Check if this instance is initialized
631 if !state.initialized.load(Ordering::Relaxed) {
632 return;
633 }
634
635 let ctrl = regs.ctrl().read();
636 let stat = regs.stat().read();
637 let has_fifo = regs.param().read().rxfifo().bits() > 0;
638
639 // Handle overrun error
640 if stat.or().is_overrun() {
641 regs.stat().write(|w| w.or().clear_bit_by_one());
642 state.rx_waker.wake();
643 return;
644 }
645
646 // Clear other error flags
647 if stat.pf().is_parity() {
648 regs.stat().write(|w| w.pf().clear_bit_by_one());
649 }
650 if stat.fe().is_error() {
651 regs.stat().write(|w| w.fe().clear_bit_by_one());
652 }
653 if stat.nf().is_noise() {
654 regs.stat().write(|w| w.nf().clear_bit_by_one());
655 }
656
657 // Handle RX data
658 if ctrl.rie().is_enabled() && (has_data(regs) || stat.idle().is_idle()) {
659 let mut pushed_any = false;
660 let mut writer = state.rx_buf.writer();
661
662 if has_fifo {
663 // Read from FIFO
664 while regs.water().read().rxcount().bits() > 0 {
665 let byte = (regs.data().read().bits() & 0xFF) as u8;
666 if writer.push_one(byte) {
667 pushed_any = true;
668 } else {
669 // Buffer full, stop reading
670 break;
671 }
672 }
673 } else {
674 // Read single byte
675 if regs.stat().read().rdrf().is_rxdata() {
676 let byte = (regs.data().read().bits() & 0xFF) as u8;
677 if writer.push_one(byte) {
678 pushed_any = true;
679 }
680 }
681 }
682
683 if pushed_any {
684 state.rx_waker.wake();
685 }
686
687 // Clear idle flag if set
688 if stat.idle().is_idle() {
689 regs.stat().write(|w| w.idle().clear_bit_by_one());
690 }
691 }
692
693 // Handle TX data
694 if ctrl.tie().is_enabled() {
695 let mut sent_any = false;
696 let mut reader = state.tx_buf.reader();
697
698 // Send data while TX buffer is ready and we have data
699 while regs.stat().read().tdre().is_no_txdata() {
700 if let Some(byte) = reader.pop_one() {
701 regs.data().write(|w| w.bits(u32::from(byte)));
702 sent_any = true;
703 } else {
704 // No more data to send
705 break;
706 }
707 }
708
709 if sent_any {
710 state.tx_waker.wake();
711 }
712
713 // If buffer is empty, switch to TC interrupt or disable
714 if state.tx_buf.is_empty() {
715 cortex_m::interrupt::free(|_| {
716 regs.ctrl().modify(|_, w| w.tie().disabled().tcie().enabled());
717 });
718 }
719 }
720
721 // Handle transmission complete
722 if ctrl.tcie().is_enabled() && regs.stat().read().tc().is_complete() {
723 state.tx_done.store(true, Ordering::Release);
724 state.tx_waker.wake();
725
726 // Disable TC interrupt
727 cortex_m::interrupt::free(|_| {
728 regs.ctrl().modify(|_, w| w.tcie().disabled());
729 });
730 }
731 }
732}
733
734// ============================================================================
735// EMBEDDED-IO ASYNC TRAIT IMPLEMENTATIONS
736// ============================================================================
737
738impl embedded_io_async::ErrorType for BufferedLpuartTx<'_> {
739 type Error = Error;
740}
741
742impl embedded_io_async::ErrorType for BufferedLpuartRx<'_> {
743 type Error = Error;
744}
745
746impl embedded_io_async::ErrorType for BufferedLpuart<'_> {
747 type Error = Error;
748}
749
750impl embedded_io_async::Write for BufferedLpuartTx<'_> {
751 async fn write(&mut self, buf: &[u8]) -> core::result::Result<usize, Self::Error> {
752 self.write(buf).await
753 }
754
755 async fn flush(&mut self) -> core::result::Result<(), Self::Error> {
756 self.flush().await
757 }
758}
759
760impl embedded_io_async::Read for BufferedLpuartRx<'_> {
761 async fn read(&mut self, buf: &mut [u8]) -> core::result::Result<usize, Self::Error> {
762 self.read(buf).await
763 }
764}
765
766impl embedded_io_async::Write for BufferedLpuart<'_> {
767 async fn write(&mut self, buf: &[u8]) -> core::result::Result<usize, Self::Error> {
768 self.tx.write(buf).await
769 }
770
771 async fn flush(&mut self) -> core::result::Result<(), Self::Error> {
772 self.tx.flush().await
773 }
774}
775
776impl embedded_io_async::Read for BufferedLpuart<'_> {
777 async fn read(&mut self, buf: &mut [u8]) -> core::result::Result<usize, Self::Error> {
778 self.rx.read(buf).await
779 }
780}
diff --git a/embassy-mcxa/src/lpuart/mod.rs b/embassy-mcxa/src/lpuart/mod.rs
new file mode 100644
index 000000000..5722b759c
--- /dev/null
+++ b/embassy-mcxa/src/lpuart/mod.rs
@@ -0,0 +1,1733 @@
1use core::marker::PhantomData;
2
3use embassy_hal_internal::{Peri, PeripheralType};
4use paste::paste;
5
6use crate::clocks::periph_helpers::{Div4, LpuartClockSel, LpuartConfig};
7use crate::clocks::{enable_and_reset, ClockError, Gate, PoweredClock};
8use crate::gpio::SealedPin;
9use crate::pac::lpuart0::baud::Sbns as StopBits;
10use crate::pac::lpuart0::ctrl::{Idlecfg as IdleConfig, Ilt as IdleType, Pt as Parity, M as DataBits};
11use crate::pac::lpuart0::modir::{Txctsc as TxCtsConfig, Txctssrc as TxCtsSource};
12use crate::pac::lpuart0::stat::Msbf as MsbFirst;
13use crate::{interrupt, pac, AnyPin};
14
15pub mod buffered;
16
17// ============================================================================
18// DMA INTEGRATION
19// ============================================================================
20
21use crate::dma::{
22 Channel as DmaChannelTrait, DmaChannel, DmaRequest, EnableInterrupt, RingBuffer, DMA_MAX_TRANSFER_SIZE,
23};
24
25// ============================================================================
26// MISC
27// ============================================================================
28
29mod sealed {
30 /// Simply seal a trait to prevent external implementations
31 pub trait Sealed {}
32}
33
34// ============================================================================
35// INSTANCE TRAIT
36// ============================================================================
37
38pub type Regs = &'static crate::pac::lpuart0::RegisterBlock;
39
40pub trait SealedInstance {
41 fn info() -> Info;
42 fn index() -> usize;
43 fn buffered_state() -> &'static buffered::State;
44}
45
46pub struct Info {
47 pub regs: Regs,
48}
49
50/// Trait for LPUART peripheral instances
51#[allow(private_bounds)]
52pub trait Instance: SealedInstance + PeripheralType + 'static + Send + Gate<MrccPeriphConfig = LpuartConfig> {
53 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance;
54 type Interrupt: interrupt::typelevel::Interrupt;
55 /// Type-safe DMA request source for TX
56 type TxDmaRequest: DmaRequest;
57 /// Type-safe DMA request source for RX
58 type RxDmaRequest: DmaRequest;
59}
60
61macro_rules! impl_instance {
62 ($($n:expr);* $(;)?) => {
63 $(
64 paste!{
65 impl SealedInstance for crate::peripherals::[<LPUART $n>] {
66 fn info() -> Info {
67 Info {
68 regs: unsafe { &*pac::[<Lpuart $n>]::ptr() },
69 }
70 }
71
72 #[inline]
73 fn index() -> usize {
74 $n
75 }
76
77 fn buffered_state() -> &'static buffered::State {
78 static BUFFERED_STATE: buffered::State = buffered::State::new();
79 &BUFFERED_STATE
80 }
81 }
82
83 impl Instance for crate::peripherals::[<LPUART $n>] {
84 const CLOCK_INSTANCE: crate::clocks::periph_helpers::LpuartInstance
85 = crate::clocks::periph_helpers::LpuartInstance::[<Lpuart $n>];
86 type Interrupt = crate::interrupt::typelevel::[<LPUART $n>];
87 type TxDmaRequest = crate::dma::[<Lpuart $n TxRequest>];
88 type RxDmaRequest = crate::dma::[<Lpuart $n RxRequest>];
89 }
90 }
91 )*
92 };
93}
94
95// DMA request sources are now type-safe via associated types.
96// The request source numbers are defined in src/dma.rs:
97// LPUART0: RX=21, TX=22 -> Lpuart0RxRequest, Lpuart0TxRequest
98// LPUART1: RX=23, TX=24 -> Lpuart1RxRequest, Lpuart1TxRequest
99// LPUART2: RX=25, TX=26 -> Lpuart2RxRequest, Lpuart2TxRequest
100// LPUART3: RX=27, TX=28 -> Lpuart3RxRequest, Lpuart3TxRequest
101// LPUART4: RX=29, TX=30 -> Lpuart4RxRequest, Lpuart4TxRequest
102// LPUART5: RX=31, TX=32 -> Lpuart5RxRequest, Lpuart5TxRequest
103impl_instance!(0; 1; 2; 3; 4; 5);
104
105// ============================================================================
106// INSTANCE HELPER FUNCTIONS
107// ============================================================================
108
109/// Perform software reset on the LPUART peripheral
110pub fn perform_software_reset(regs: Regs) {
111 // Software reset - set and clear RST bit (Global register)
112 regs.global().write(|w| w.rst().reset());
113 regs.global().write(|w| w.rst().no_effect());
114}
115
116/// Disable both transmitter and receiver
117pub fn disable_transceiver(regs: Regs) {
118 regs.ctrl().modify(|_, w| w.te().disabled().re().disabled());
119}
120
121/// Calculate and configure baudrate settings
122pub fn configure_baudrate(regs: Regs, baudrate_bps: u32, clock_freq: u32) -> Result<()> {
123 let (osr, sbr) = calculate_baudrate(baudrate_bps, clock_freq)?;
124
125 // Configure BAUD register
126 regs.baud().modify(|_, w| unsafe {
127 // Clear and set OSR
128 w.osr().bits(osr - 1);
129 // Clear and set SBR
130 w.sbr().bits(sbr);
131 // Set BOTHEDGE if OSR is between 4 and 7
132 if osr > 3 && osr < 8 {
133 w.bothedge().enabled()
134 } else {
135 w.bothedge().disabled()
136 }
137 });
138
139 Ok(())
140}
141
142/// Configure frame format (stop bits, data bits)
143pub fn configure_frame_format(regs: Regs, config: &Config) {
144 // Configure stop bits
145 regs.baud().modify(|_, w| w.sbns().variant(config.stop_bits_count));
146
147 // Clear M10 for now (10-bit mode)
148 regs.baud().modify(|_, w| w.m10().disabled());
149}
150
151/// Configure control settings (parity, data bits, idle config, pin swap)
152pub fn configure_control_settings(regs: Regs, config: &Config) {
153 regs.ctrl().modify(|_, w| {
154 // Parity configuration
155 let mut w = if let Some(parity) = config.parity_mode {
156 w.pe().enabled().pt().variant(parity)
157 } else {
158 w.pe().disabled()
159 };
160
161 // Data bits configuration
162 w = match config.data_bits_count {
163 DataBits::Data8 => {
164 if config.parity_mode.is_some() {
165 w.m().data9() // 8 data + 1 parity = 9 bits
166 } else {
167 w.m().data8() // 8 data bits only
168 }
169 }
170 DataBits::Data9 => w.m().data9(),
171 };
172
173 // Idle configuration
174 w = w.idlecfg().variant(config.rx_idle_config);
175 w = w.ilt().variant(config.rx_idle_type);
176
177 // Swap TXD/RXD if configured
178 if config.swap_txd_rxd {
179 w.swap().swap()
180 } else {
181 w.swap().standard()
182 }
183 });
184}
185
186/// Configure FIFO settings and watermarks
187pub fn configure_fifo(regs: Regs, config: &Config) {
188 // Configure WATER register for FIFO watermarks
189 regs.water().write(|w| unsafe {
190 w.rxwater()
191 .bits(config.rx_fifo_watermark)
192 .txwater()
193 .bits(config.tx_fifo_watermark)
194 });
195
196 // Enable TX/RX FIFOs
197 regs.fifo().modify(|_, w| w.txfe().enabled().rxfe().enabled());
198
199 // Flush FIFOs
200 regs.fifo()
201 .modify(|_, w| w.txflush().txfifo_rst().rxflush().rxfifo_rst());
202}
203
204/// Clear all status flags
205pub fn clear_all_status_flags(regs: Regs) {
206 regs.stat().reset();
207}
208
209/// Configure hardware flow control if enabled
210pub fn configure_flow_control(regs: Regs, enable_tx_cts: bool, enable_rx_rts: bool, config: &Config) {
211 if enable_rx_rts || enable_tx_cts {
212 regs.modir().modify(|_, w| {
213 let mut w = w;
214
215 // Configure TX CTS
216 w = w.txctsc().variant(config.tx_cts_config);
217 w = w.txctssrc().variant(config.tx_cts_source);
218
219 if enable_rx_rts {
220 w = w.rxrtse().enabled();
221 } else {
222 w = w.rxrtse().disabled();
223 }
224
225 if enable_tx_cts {
226 w = w.txctse().enabled();
227 } else {
228 w = w.txctse().disabled();
229 }
230
231 w
232 });
233 }
234}
235
236/// Configure bit order (MSB first or LSB first)
237pub fn configure_bit_order(regs: Regs, msb_first: MsbFirst) {
238 regs.stat().modify(|_, w| w.msbf().variant(msb_first));
239}
240
241/// Enable transmitter and/or receiver based on configuration
242pub fn enable_transceiver(regs: Regs, enable_tx: bool, enable_rx: bool) {
243 regs.ctrl().modify(|_, w| {
244 let mut w = w;
245 if enable_tx {
246 w = w.te().enabled();
247 }
248 if enable_rx {
249 w = w.re().enabled();
250 }
251 w
252 });
253}
254
255pub fn calculate_baudrate(baudrate: u32, src_clock_hz: u32) -> Result<(u8, u16)> {
256 let mut baud_diff = baudrate;
257 let mut osr = 0u8;
258 let mut sbr = 0u16;
259
260 // Try OSR values from 4 to 32
261 for osr_temp in 4u8..=32u8 {
262 // Calculate SBR: (srcClock_Hz * 2 / (baudRate * osr) + 1) / 2
263 let sbr_calc = ((src_clock_hz * 2) / (baudrate * osr_temp as u32)).div_ceil(2);
264
265 let sbr_temp = if sbr_calc == 0 {
266 1
267 } else if sbr_calc > 0x1FFF {
268 0x1FFF
269 } else {
270 sbr_calc as u16
271 };
272
273 // Calculate actual baud rate
274 let calculated_baud = src_clock_hz / (osr_temp as u32 * sbr_temp as u32);
275
276 let temp_diff = calculated_baud.abs_diff(baudrate);
277
278 if temp_diff <= baud_diff {
279 baud_diff = temp_diff;
280 osr = osr_temp;
281 sbr = sbr_temp;
282 }
283 }
284
285 // Check if baud rate difference is within 3%
286 if baud_diff > (baudrate / 100) * 3 {
287 return Err(Error::UnsupportedBaudrate);
288 }
289
290 Ok((osr, sbr))
291}
292
293/// Wait for all transmit operations to complete
294pub fn wait_for_tx_complete(regs: Regs) {
295 // Wait for TX FIFO to empty
296 while regs.water().read().txcount().bits() != 0 {
297 // Wait for TX FIFO to drain
298 }
299
300 // Wait for last character to shift out (TC = Transmission Complete)
301 while regs.stat().read().tc().is_active() {
302 // Wait for transmission to complete
303 }
304}
305
306pub fn check_and_clear_rx_errors(regs: Regs) -> Result<()> {
307 let stat = regs.stat().read();
308 let mut status = Ok(());
309
310 // Check for overrun first - other error flags are prevented when OR is set
311 if stat.or().is_overrun() {
312 regs.stat().write(|w| w.or().clear_bit_by_one());
313
314 return Err(Error::Overrun);
315 }
316
317 if stat.pf().is_parity() {
318 regs.stat().write(|w| w.pf().clear_bit_by_one());
319 status = Err(Error::Parity);
320 }
321
322 if stat.fe().is_error() {
323 regs.stat().write(|w| w.fe().clear_bit_by_one());
324 status = Err(Error::Framing);
325 }
326
327 if stat.nf().is_noise() {
328 regs.stat().write(|w| w.nf().clear_bit_by_one());
329 status = Err(Error::Noise);
330 }
331
332 status
333}
334
335pub fn has_data(regs: Regs) -> bool {
336 if regs.param().read().rxfifo().bits() > 0 {
337 // FIFO is available - check RXCOUNT in WATER register
338 regs.water().read().rxcount().bits() > 0
339 } else {
340 // No FIFO - check RDRF flag in STAT register
341 regs.stat().read().rdrf().is_rxdata()
342 }
343}
344
345// ============================================================================
346// PIN TRAITS FOR LPUART FUNCTIONALITY
347// ============================================================================
348
349impl<T: SealedPin> sealed::Sealed for T {}
350
351/// io configuration trait for Lpuart Tx configuration
352pub trait TxPin<T: Instance>: Into<AnyPin> + sealed::Sealed + PeripheralType {
353 /// convert the pin to appropriate function for Lpuart Tx usage
354 fn as_tx(&self);
355}
356
357/// io configuration trait for Lpuart Rx configuration
358pub trait RxPin<T: Instance>: Into<AnyPin> + sealed::Sealed + PeripheralType {
359 /// convert the pin to appropriate function for Lpuart Rx usage
360 fn as_rx(&self);
361}
362
363/// io configuration trait for Lpuart Cts
364pub trait CtsPin<T: Instance>: Into<AnyPin> + sealed::Sealed + PeripheralType {
365 /// convert the pin to appropriate function for Lpuart Cts usage
366 fn as_cts(&self);
367}
368
369/// io configuration trait for Lpuart Rts
370pub trait RtsPin<T: Instance>: Into<AnyPin> + sealed::Sealed + PeripheralType {
371 /// convert the pin to appropriate function for Lpuart Rts usage
372 fn as_rts(&self);
373}
374
375macro_rules! impl_tx_pin {
376 ($inst:ident, $pin:ident, $alt:ident) => {
377 impl TxPin<crate::peripherals::$inst> for crate::peripherals::$pin {
378 fn as_tx(&self) {
379 // TODO: Check these are right
380 self.set_pull(crate::gpio::Pull::Up);
381 self.set_slew_rate(crate::gpio::SlewRate::Fast.into());
382 self.set_drive_strength(crate::gpio::DriveStrength::Double.into());
383 self.set_function(crate::pac::port0::pcr0::Mux::$alt);
384 self.set_enable_input_buffer();
385 }
386 }
387 };
388}
389
390macro_rules! impl_rx_pin {
391 ($inst:ident, $pin:ident, $alt:ident) => {
392 impl RxPin<crate::peripherals::$inst> for crate::peripherals::$pin {
393 fn as_rx(&self) {
394 // TODO: Check these are right
395 self.set_pull(crate::gpio::Pull::Up);
396 self.set_slew_rate(crate::gpio::SlewRate::Fast.into());
397 self.set_drive_strength(crate::gpio::DriveStrength::Double.into());
398 self.set_function(crate::pac::port0::pcr0::Mux::$alt);
399 self.set_enable_input_buffer();
400 }
401 }
402 };
403}
404
405// TODO: Macro and impls for CTS/RTS pins
406macro_rules! impl_cts_pin {
407 ($inst:ident, $pin:ident, $alt:ident) => {
408 impl CtsPin<crate::peripherals::$inst> for crate::peripherals::$pin {
409 fn as_cts(&self) {
410 todo!()
411 }
412 }
413 };
414}
415
416macro_rules! impl_rts_pin {
417 ($inst:ident, $pin:ident, $alt:ident) => {
418 impl RtsPin<crate::peripherals::$inst> for crate::peripherals::$pin {
419 fn as_rts(&self) {
420 todo!()
421 }
422 }
423 };
424}
425
426// LPUART 0
427impl_tx_pin!(LPUART0, P0_3, Mux2);
428impl_tx_pin!(LPUART0, P0_21, Mux3);
429impl_tx_pin!(LPUART0, P2_1, Mux2);
430
431impl_rx_pin!(LPUART0, P0_2, Mux2);
432impl_rx_pin!(LPUART0, P0_20, Mux3);
433impl_rx_pin!(LPUART0, P2_0, Mux2);
434
435impl_cts_pin!(LPUART0, P0_1, Mux2);
436impl_cts_pin!(LPUART0, P0_23, Mux3);
437impl_cts_pin!(LPUART0, P2_3, Mux2);
438
439impl_rts_pin!(LPUART0, P0_0, Mux2);
440impl_rts_pin!(LPUART0, P0_22, Mux3);
441impl_rts_pin!(LPUART0, P2_2, Mux2);
442
443// LPUART 1
444impl_tx_pin!(LPUART1, P1_9, Mux2);
445impl_tx_pin!(LPUART1, P2_13, Mux3);
446impl_tx_pin!(LPUART1, P3_9, Mux3);
447impl_tx_pin!(LPUART1, P3_21, Mux3);
448
449impl_rx_pin!(LPUART1, P1_8, Mux2);
450impl_rx_pin!(LPUART1, P2_12, Mux3);
451impl_rx_pin!(LPUART1, P3_8, Mux3);
452impl_rx_pin!(LPUART1, P3_20, Mux3);
453
454impl_cts_pin!(LPUART1, P1_11, Mux2);
455impl_cts_pin!(LPUART1, P2_17, Mux3);
456impl_cts_pin!(LPUART1, P3_11, Mux3);
457impl_cts_pin!(LPUART1, P3_23, Mux3);
458
459impl_rts_pin!(LPUART1, P1_10, Mux2);
460impl_rts_pin!(LPUART1, P2_15, Mux3);
461impl_rts_pin!(LPUART1, P2_16, Mux3);
462impl_rts_pin!(LPUART1, P3_10, Mux3);
463
464// LPUART 2
465impl_tx_pin!(LPUART2, P1_5, Mux3);
466impl_tx_pin!(LPUART2, P1_13, Mux3);
467impl_tx_pin!(LPUART2, P2_2, Mux3);
468impl_tx_pin!(LPUART2, P2_10, Mux3);
469impl_tx_pin!(LPUART2, P3_15, Mux2);
470
471impl_rx_pin!(LPUART2, P1_4, Mux3);
472impl_rx_pin!(LPUART2, P1_12, Mux3);
473impl_rx_pin!(LPUART2, P2_3, Mux3);
474impl_rx_pin!(LPUART2, P2_11, Mux3);
475impl_rx_pin!(LPUART2, P3_14, Mux2);
476
477impl_cts_pin!(LPUART2, P1_7, Mux3);
478impl_cts_pin!(LPUART2, P1_15, Mux3);
479impl_cts_pin!(LPUART2, P2_4, Mux3);
480impl_cts_pin!(LPUART2, P3_13, Mux2);
481
482impl_rts_pin!(LPUART2, P1_6, Mux3);
483impl_rts_pin!(LPUART2, P1_14, Mux3);
484impl_rts_pin!(LPUART2, P2_5, Mux3);
485impl_rts_pin!(LPUART2, P3_12, Mux2);
486
487// LPUART 3
488impl_tx_pin!(LPUART3, P3_1, Mux3);
489impl_tx_pin!(LPUART3, P3_12, Mux3);
490impl_tx_pin!(LPUART3, P4_5, Mux3);
491
492impl_rx_pin!(LPUART3, P3_0, Mux3);
493impl_rx_pin!(LPUART3, P3_13, Mux3);
494impl_rx_pin!(LPUART3, P4_2, Mux3);
495
496impl_cts_pin!(LPUART3, P3_7, Mux3);
497impl_cts_pin!(LPUART3, P3_14, Mux3);
498impl_cts_pin!(LPUART3, P4_6, Mux3);
499
500impl_rts_pin!(LPUART3, P3_6, Mux3);
501impl_rts_pin!(LPUART3, P3_15, Mux3);
502impl_rts_pin!(LPUART3, P4_7, Mux3);
503
504// LPUART 4
505impl_tx_pin!(LPUART4, P2_7, Mux3);
506impl_tx_pin!(LPUART4, P3_19, Mux2);
507impl_tx_pin!(LPUART4, P3_27, Mux3);
508impl_tx_pin!(LPUART4, P4_3, Mux3);
509
510impl_rx_pin!(LPUART4, P2_6, Mux3);
511impl_rx_pin!(LPUART4, P3_18, Mux2);
512impl_rx_pin!(LPUART4, P3_28, Mux3);
513impl_rx_pin!(LPUART4, P4_4, Mux3);
514
515impl_cts_pin!(LPUART4, P2_0, Mux3);
516impl_cts_pin!(LPUART4, P3_17, Mux2);
517impl_cts_pin!(LPUART4, P3_31, Mux3);
518
519impl_rts_pin!(LPUART4, P2_1, Mux3);
520impl_rts_pin!(LPUART4, P3_16, Mux2);
521impl_rts_pin!(LPUART4, P3_30, Mux3);
522
523// LPUART 5
524impl_tx_pin!(LPUART5, P1_10, Mux8);
525impl_tx_pin!(LPUART5, P1_17, Mux8);
526
527impl_rx_pin!(LPUART5, P1_11, Mux8);
528impl_rx_pin!(LPUART5, P1_16, Mux8);
529
530impl_cts_pin!(LPUART5, P1_12, Mux8);
531impl_cts_pin!(LPUART5, P1_19, Mux8);
532
533impl_rts_pin!(LPUART5, P1_13, Mux8);
534impl_rts_pin!(LPUART5, P1_18, Mux8);
535
536// ============================================================================
537// ERROR TYPES AND RESULTS
538// ============================================================================
539
540/// LPUART error types
541#[derive(Debug, Copy, Clone, Eq, PartialEq)]
542#[cfg_attr(feature = "defmt", derive(defmt::Format))]
543pub enum Error {
544 /// Read error
545 Read,
546 /// Buffer overflow
547 Overrun,
548 /// Noise error
549 Noise,
550 /// Framing error
551 Framing,
552 /// Parity error
553 Parity,
554 /// Failure
555 Fail,
556 /// Invalid argument
557 InvalidArgument,
558 /// Lpuart baud rate cannot be supported with the given clock
559 UnsupportedBaudrate,
560 /// RX FIFO Empty
561 RxFifoEmpty,
562 /// TX FIFO Full
563 TxFifoFull,
564 /// TX Busy
565 TxBusy,
566 /// Clock Error
567 ClockSetup(ClockError),
568}
569
570/// A specialized Result type for LPUART operations
571pub type Result<T> = core::result::Result<T, Error>;
572
573// ============================================================================
574// CONFIGURATION STRUCTURES
575// ============================================================================
576
577/// Lpuart config
578#[derive(Debug, Clone, Copy)]
579pub struct Config {
580 /// Power state required for this peripheral
581 pub power: PoweredClock,
582 /// Clock source
583 pub source: LpuartClockSel,
584 /// Clock divisor
585 pub div: Div4,
586 /// Baud rate in bits per second
587 pub baudrate_bps: u32,
588 /// Parity configuration
589 pub parity_mode: Option<Parity>,
590 /// Number of data bits
591 pub data_bits_count: DataBits,
592 /// MSB First or LSB First configuration
593 pub msb_first: MsbFirst,
594 /// Number of stop bits
595 pub stop_bits_count: StopBits,
596 /// TX FIFO watermark
597 pub tx_fifo_watermark: u8,
598 /// RX FIFO watermark
599 pub rx_fifo_watermark: u8,
600 /// TX CTS source
601 pub tx_cts_source: TxCtsSource,
602 /// TX CTS configure
603 pub tx_cts_config: TxCtsConfig,
604 /// RX IDLE type
605 pub rx_idle_type: IdleType,
606 /// RX IDLE configuration
607 pub rx_idle_config: IdleConfig,
608 /// Swap TXD and RXD pins
609 pub swap_txd_rxd: bool,
610}
611
612impl Default for Config {
613 fn default() -> Self {
614 Self {
615 baudrate_bps: 115_200u32,
616 parity_mode: None,
617 data_bits_count: DataBits::Data8,
618 msb_first: MsbFirst::LsbFirst,
619 stop_bits_count: StopBits::One,
620 tx_fifo_watermark: 0,
621 rx_fifo_watermark: 1,
622 tx_cts_source: TxCtsSource::Cts,
623 tx_cts_config: TxCtsConfig::Start,
624 rx_idle_type: IdleType::FromStart,
625 rx_idle_config: IdleConfig::Idle1,
626 swap_txd_rxd: false,
627 power: PoweredClock::NormalEnabledDeepSleepDisabled,
628 source: LpuartClockSel::FroLfDiv,
629 div: Div4::no_div(),
630 }
631 }
632}
633
634/// LPUART status flags
635#[derive(Debug, Clone, Copy)]
636#[cfg_attr(feature = "defmt", derive(defmt::Format))]
637pub struct Status {
638 /// Transmit data register empty
639 pub tx_empty: bool,
640 /// Transmission complete
641 pub tx_complete: bool,
642 /// Receive data register full
643 pub rx_full: bool,
644 /// Idle line detected
645 pub idle: bool,
646 /// Receiver overrun
647 pub overrun: bool,
648 /// Noise error
649 pub noise: bool,
650 /// Framing error
651 pub framing: bool,
652 /// Parity error
653 pub parity: bool,
654}
655
656// ============================================================================
657// MODE TRAITS (BLOCKING/ASYNC)
658// ============================================================================
659
660/// Driver move trait.
661#[allow(private_bounds)]
662pub trait Mode: sealed::Sealed {}
663
664/// Blocking mode.
665pub struct Blocking;
666impl sealed::Sealed for Blocking {}
667impl Mode for Blocking {}
668
669/// Async mode.
670pub struct Async;
671impl sealed::Sealed for Async {}
672impl Mode for Async {}
673
674// ============================================================================
675// CORE DRIVER STRUCTURES
676// ============================================================================
677
678/// Lpuart driver.
679pub struct Lpuart<'a, M: Mode> {
680 info: Info,
681 tx: LpuartTx<'a, M>,
682 rx: LpuartRx<'a, M>,
683}
684
685/// Lpuart TX driver.
686pub struct LpuartTx<'a, M: Mode> {
687 info: Info,
688 _tx_pin: Peri<'a, AnyPin>,
689 _cts_pin: Option<Peri<'a, AnyPin>>,
690 mode: PhantomData<(&'a (), M)>,
691}
692
693/// Lpuart Rx driver.
694pub struct LpuartRx<'a, M: Mode> {
695 info: Info,
696 _rx_pin: Peri<'a, AnyPin>,
697 _rts_pin: Option<Peri<'a, AnyPin>>,
698 mode: PhantomData<(&'a (), M)>,
699}
700
701/// Lpuart TX driver with DMA support.
702pub struct LpuartTxDma<'a, T: Instance, C: DmaChannelTrait> {
703 info: Info,
704 _tx_pin: Peri<'a, AnyPin>,
705 tx_dma: DmaChannel<C>,
706 _instance: core::marker::PhantomData<T>,
707}
708
709/// Lpuart RX driver with DMA support.
710pub struct LpuartRxDma<'a, T: Instance, C: DmaChannelTrait> {
711 info: Info,
712 _rx_pin: Peri<'a, AnyPin>,
713 rx_dma: DmaChannel<C>,
714 _instance: core::marker::PhantomData<T>,
715}
716
717/// Lpuart driver with DMA support for both TX and RX.
718pub struct LpuartDma<'a, T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> {
719 tx: LpuartTxDma<'a, T, TxC>,
720 rx: LpuartRxDma<'a, T, RxC>,
721}
722
723// ============================================================================
724// LPUART CORE IMPLEMENTATION
725// ============================================================================
726
727impl<'a, M: Mode> Lpuart<'a, M> {
728 fn init<T: Instance>(
729 enable_tx: bool,
730 enable_rx: bool,
731 enable_tx_cts: bool,
732 enable_rx_rts: bool,
733 config: Config,
734 ) -> Result<()> {
735 let regs = T::info().regs;
736
737 // Enable clocks
738 let conf = LpuartConfig {
739 power: config.power,
740 source: config.source,
741 div: config.div,
742 instance: T::CLOCK_INSTANCE,
743 };
744 let clock_freq = unsafe { enable_and_reset::<T>(&conf).map_err(Error::ClockSetup)? };
745
746 // Perform initialization sequence
747 perform_software_reset(regs);
748 disable_transceiver(regs);
749 configure_baudrate(regs, config.baudrate_bps, clock_freq)?;
750 configure_frame_format(regs, &config);
751 configure_control_settings(regs, &config);
752 configure_fifo(regs, &config);
753 clear_all_status_flags(regs);
754 configure_flow_control(regs, enable_tx_cts, enable_rx_rts, &config);
755 configure_bit_order(regs, config.msb_first);
756 enable_transceiver(regs, enable_rx, enable_tx);
757
758 Ok(())
759 }
760
761 /// Deinitialize the LPUART peripheral
762 pub fn deinit(&self) -> Result<()> {
763 let regs = self.info.regs;
764
765 // Wait for TX operations to complete
766 wait_for_tx_complete(regs);
767
768 // Clear all status flags
769 clear_all_status_flags(regs);
770
771 // Disable the module - clear all CTRL register bits
772 regs.ctrl().reset();
773
774 Ok(())
775 }
776
777 /// Split the Lpuart into a transmitter and receiver
778 pub fn split(self) -> (LpuartTx<'a, M>, LpuartRx<'a, M>) {
779 (self.tx, self.rx)
780 }
781
782 /// Split the Lpuart into a transmitter and receiver by mutable reference
783 pub fn split_ref(&mut self) -> (&mut LpuartTx<'a, M>, &mut LpuartRx<'a, M>) {
784 (&mut self.tx, &mut self.rx)
785 }
786}
787
788// ============================================================================
789// BLOCKING MODE IMPLEMENTATIONS
790// ============================================================================
791
792impl<'a> Lpuart<'a, Blocking> {
793 /// Create a new blocking LPUART instance with RX/TX pins.
794 pub fn new_blocking<T: Instance>(
795 _inner: Peri<'a, T>,
796 tx_pin: Peri<'a, impl TxPin<T>>,
797 rx_pin: Peri<'a, impl RxPin<T>>,
798 config: Config,
799 ) -> Result<Self> {
800 // Configure the pins for LPUART usage
801 tx_pin.as_tx();
802 rx_pin.as_rx();
803
804 // Initialize the peripheral
805 Self::init::<T>(true, true, false, false, config)?;
806
807 Ok(Self {
808 info: T::info(),
809 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), None),
810 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), None),
811 })
812 }
813
814 /// Create a new blocking LPUART instance with RX, TX and RTS/CTS flow control pins
815 pub fn new_blocking_with_rtscts<T: Instance>(
816 _inner: Peri<'a, T>,
817 tx_pin: Peri<'a, impl TxPin<T>>,
818 rx_pin: Peri<'a, impl RxPin<T>>,
819 cts_pin: Peri<'a, impl CtsPin<T>>,
820 rts_pin: Peri<'a, impl RtsPin<T>>,
821 config: Config,
822 ) -> Result<Self> {
823 // Configure the pins for LPUART usage
824 rx_pin.as_rx();
825 tx_pin.as_tx();
826 rts_pin.as_rts();
827 cts_pin.as_cts();
828
829 // Initialize the peripheral with flow control
830 Self::init::<T>(true, true, true, true, config)?;
831
832 Ok(Self {
833 info: T::info(),
834 rx: LpuartRx::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into())),
835 tx: LpuartTx::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into())),
836 })
837 }
838}
839
840// ----------------------------------------------------------------------------
841// Blocking TX Implementation
842// ----------------------------------------------------------------------------
843
844impl<'a, M: Mode> LpuartTx<'a, M> {
845 fn new_inner(info: Info, tx_pin: Peri<'a, AnyPin>, cts_pin: Option<Peri<'a, AnyPin>>) -> Self {
846 Self {
847 info,
848 _tx_pin: tx_pin,
849 _cts_pin: cts_pin,
850 mode: PhantomData,
851 }
852 }
853}
854
855impl<'a> LpuartTx<'a, Blocking> {
856 /// Create a new blocking LPUART transmitter instance
857 pub fn new_blocking<T: Instance>(
858 _inner: Peri<'a, T>,
859 tx_pin: Peri<'a, impl TxPin<T>>,
860 config: Config,
861 ) -> Result<Self> {
862 // Configure the pins for LPUART usage
863 tx_pin.as_tx();
864
865 // Initialize the peripheral
866 Lpuart::<Blocking>::init::<T>(true, false, false, false, config)?;
867
868 Ok(Self::new_inner(T::info(), tx_pin.into(), None))
869 }
870
871 /// Create a new blocking LPUART transmitter instance with CTS flow control
872 pub fn new_blocking_with_cts<T: Instance>(
873 _inner: Peri<'a, T>,
874 tx_pin: Peri<'a, impl TxPin<T>>,
875 cts_pin: Peri<'a, impl CtsPin<T>>,
876 config: Config,
877 ) -> Result<Self> {
878 tx_pin.as_tx();
879 cts_pin.as_cts();
880
881 Lpuart::<Blocking>::init::<T>(true, false, true, false, config)?;
882
883 Ok(Self::new_inner(T::info(), tx_pin.into(), Some(cts_pin.into())))
884 }
885
886 fn write_byte_internal(&mut self, byte: u8) -> Result<()> {
887 self.info.regs.data().modify(|_, w| unsafe { w.bits(u32::from(byte)) });
888
889 Ok(())
890 }
891
892 fn blocking_write_byte(&mut self, byte: u8) -> Result<()> {
893 while self.info.regs.stat().read().tdre().is_txdata() {}
894 self.write_byte_internal(byte)
895 }
896
897 fn write_byte(&mut self, byte: u8) -> Result<()> {
898 if self.info.regs.stat().read().tdre().is_txdata() {
899 Err(Error::TxFifoFull)
900 } else {
901 self.write_byte_internal(byte)
902 }
903 }
904
905 /// Write data to LPUART TX blocking execution until all data is sent.
906 pub fn blocking_write(&mut self, buf: &[u8]) -> Result<()> {
907 for x in buf {
908 self.blocking_write_byte(*x)?;
909 }
910
911 Ok(())
912 }
913
914 pub fn write_str_blocking(&mut self, buf: &str) {
915 let _ = self.blocking_write(buf.as_bytes());
916 }
917
918 /// Write data to LPUART TX without blocking.
919 pub fn write(&mut self, buf: &[u8]) -> Result<()> {
920 for x in buf {
921 self.write_byte(*x)?;
922 }
923
924 Ok(())
925 }
926
927 /// Flush LPUART TX blocking execution until all data has been transmitted.
928 pub fn blocking_flush(&mut self) -> Result<()> {
929 while self.info.regs.water().read().txcount().bits() != 0 {
930 // Wait for TX FIFO to drain
931 }
932
933 // Wait for last character to shift out
934 while self.info.regs.stat().read().tc().is_active() {
935 // Wait for transmission to complete
936 }
937
938 Ok(())
939 }
940
941 /// Flush LPUART TX.
942 pub fn flush(&mut self) -> Result<()> {
943 // Check if TX FIFO is empty
944 if self.info.regs.water().read().txcount().bits() != 0 {
945 return Err(Error::TxBusy);
946 }
947
948 // Check if transmission is complete
949 if self.info.regs.stat().read().tc().is_active() {
950 return Err(Error::TxBusy);
951 }
952
953 Ok(())
954 }
955}
956
957// ----------------------------------------------------------------------------
958// Blocking RX Implementation
959// ----------------------------------------------------------------------------
960
961impl<'a, M: Mode> LpuartRx<'a, M> {
962 fn new_inner(info: Info, rx_pin: Peri<'a, AnyPin>, rts_pin: Option<Peri<'a, AnyPin>>) -> Self {
963 Self {
964 info,
965 _rx_pin: rx_pin,
966 _rts_pin: rts_pin,
967 mode: PhantomData,
968 }
969 }
970}
971
972impl<'a> LpuartRx<'a, Blocking> {
973 /// Create a new blocking LPUART Receiver instance
974 pub fn new_blocking<T: Instance>(
975 _inner: Peri<'a, T>,
976 rx_pin: Peri<'a, impl RxPin<T>>,
977 config: Config,
978 ) -> Result<Self> {
979 rx_pin.as_rx();
980
981 Lpuart::<Blocking>::init::<T>(false, true, false, false, config)?;
982
983 Ok(Self::new_inner(T::info(), rx_pin.into(), None))
984 }
985
986 /// Create a new blocking LPUART Receiver instance with RTS flow control
987 pub fn new_blocking_with_rts<T: Instance>(
988 _inner: Peri<'a, T>,
989 rx_pin: Peri<'a, impl RxPin<T>>,
990 rts_pin: Peri<'a, impl RtsPin<T>>,
991 config: Config,
992 ) -> Result<Self> {
993 rx_pin.as_rx();
994 rts_pin.as_rts();
995
996 Lpuart::<Blocking>::init::<T>(false, true, false, true, config)?;
997
998 Ok(Self::new_inner(T::info(), rx_pin.into(), Some(rts_pin.into())))
999 }
1000
1001 fn read_byte_internal(&mut self) -> Result<u8> {
1002 let data = self.info.regs.data().read();
1003
1004 Ok((data.bits() & 0xFF) as u8)
1005 }
1006
1007 fn read_byte(&mut self) -> Result<u8> {
1008 check_and_clear_rx_errors(self.info.regs)?;
1009
1010 if !has_data(self.info.regs) {
1011 return Err(Error::RxFifoEmpty);
1012 }
1013
1014 self.read_byte_internal()
1015 }
1016
1017 fn blocking_read_byte(&mut self) -> Result<u8> {
1018 loop {
1019 if has_data(self.info.regs) {
1020 return self.read_byte_internal();
1021 }
1022
1023 check_and_clear_rx_errors(self.info.regs)?;
1024 }
1025 }
1026
1027 /// Read data from LPUART RX without blocking.
1028 pub fn read(&mut self, buf: &mut [u8]) -> Result<()> {
1029 for byte in buf.iter_mut() {
1030 *byte = self.read_byte()?;
1031 }
1032 Ok(())
1033 }
1034
1035 /// Read data from LPUART RX blocking execution until the buffer is filled.
1036 pub fn blocking_read(&mut self, buf: &mut [u8]) -> Result<()> {
1037 for byte in buf.iter_mut() {
1038 *byte = self.blocking_read_byte()?;
1039 }
1040 Ok(())
1041 }
1042}
1043
1044impl<'a> Lpuart<'a, Blocking> {
1045 /// Read data from LPUART RX blocking execution until the buffer is filled
1046 pub fn blocking_read(&mut self, buf: &mut [u8]) -> Result<()> {
1047 self.rx.blocking_read(buf)
1048 }
1049
1050 /// Read data from LPUART RX without blocking
1051 pub fn read(&mut self, buf: &mut [u8]) -> Result<()> {
1052 self.rx.read(buf)
1053 }
1054
1055 /// Write data to LPUART TX blocking execution until all data is sent
1056 pub fn blocking_write(&mut self, buf: &[u8]) -> Result<()> {
1057 self.tx.blocking_write(buf)
1058 }
1059
1060 pub fn write_byte(&mut self, byte: u8) -> Result<()> {
1061 self.tx.write_byte(byte)
1062 }
1063
1064 pub fn read_byte_blocking(&mut self) -> u8 {
1065 loop {
1066 if let Ok(b) = self.rx.read_byte() {
1067 return b;
1068 }
1069 }
1070 }
1071
1072 pub fn write_str_blocking(&mut self, buf: &str) {
1073 self.tx.write_str_blocking(buf);
1074 }
1075
1076 /// Write data to LPUART TX without blocking
1077 pub fn write(&mut self, buf: &[u8]) -> Result<()> {
1078 self.tx.write(buf)
1079 }
1080
1081 /// Flush LPUART TX blocking execution until all data has been transmitted
1082 pub fn blocking_flush(&mut self) -> Result<()> {
1083 self.tx.blocking_flush()
1084 }
1085
1086 /// Flush LPUART TX without blocking
1087 pub fn flush(&mut self) -> Result<()> {
1088 self.tx.flush()
1089 }
1090}
1091
1092// ============================================================================
1093// ASYNC MODE IMPLEMENTATIONS (DMA-based)
1094// ============================================================================
1095
1096/// Guard struct that ensures DMA is stopped if the async future is cancelled.
1097///
1098/// This implements the RAII pattern: if the future is dropped before completion
1099/// (e.g., due to a timeout), the DMA transfer is automatically aborted to prevent
1100/// use-after-free when the buffer goes out of scope.
1101struct TxDmaGuard<'a, C: DmaChannelTrait> {
1102 dma: &'a DmaChannel<C>,
1103 regs: Regs,
1104}
1105
1106impl<'a, C: DmaChannelTrait> TxDmaGuard<'a, C> {
1107 fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self {
1108 Self { dma, regs }
1109 }
1110
1111 /// Complete the transfer normally (don't abort on drop).
1112 fn complete(self) {
1113 // Cleanup
1114 self.regs.baud().modify(|_, w| w.tdmae().disabled());
1115 unsafe {
1116 self.dma.disable_request();
1117 self.dma.clear_done();
1118 }
1119 // Don't run drop since we've cleaned up
1120 core::mem::forget(self);
1121 }
1122}
1123
1124impl<C: DmaChannelTrait> Drop for TxDmaGuard<'_, C> {
1125 fn drop(&mut self) {
1126 // Abort the DMA transfer if still running
1127 unsafe {
1128 self.dma.disable_request();
1129 self.dma.clear_done();
1130 self.dma.clear_interrupt();
1131 }
1132 // Disable UART TX DMA request
1133 self.regs.baud().modify(|_, w| w.tdmae().disabled());
1134 }
1135}
1136
1137/// Guard struct for RX DMA transfers.
1138struct RxDmaGuard<'a, C: DmaChannelTrait> {
1139 dma: &'a DmaChannel<C>,
1140 regs: Regs,
1141}
1142
1143impl<'a, C: DmaChannelTrait> RxDmaGuard<'a, C> {
1144 fn new(dma: &'a DmaChannel<C>, regs: Regs) -> Self {
1145 Self { dma, regs }
1146 }
1147
1148 /// Complete the transfer normally (don't abort on drop).
1149 fn complete(self) {
1150 // Ensure DMA writes are visible to CPU
1151 cortex_m::asm::dsb();
1152 // Cleanup
1153 self.regs.baud().modify(|_, w| w.rdmae().disabled());
1154 unsafe {
1155 self.dma.disable_request();
1156 self.dma.clear_done();
1157 }
1158 // Don't run drop since we've cleaned up
1159 core::mem::forget(self);
1160 }
1161}
1162
1163impl<C: DmaChannelTrait> Drop for RxDmaGuard<'_, C> {
1164 fn drop(&mut self) {
1165 // Abort the DMA transfer if still running
1166 unsafe {
1167 self.dma.disable_request();
1168 self.dma.clear_done();
1169 self.dma.clear_interrupt();
1170 }
1171 // Disable UART RX DMA request
1172 self.regs.baud().modify(|_, w| w.rdmae().disabled());
1173 }
1174}
1175
1176impl<'a, T: Instance, C: DmaChannelTrait> LpuartTxDma<'a, T, C> {
1177 /// Create a new LPUART TX driver with DMA support.
1178 pub fn new(
1179 _inner: Peri<'a, T>,
1180 tx_pin: Peri<'a, impl TxPin<T>>,
1181 tx_dma_ch: Peri<'a, C>,
1182 config: Config,
1183 ) -> Result<Self> {
1184 tx_pin.as_tx();
1185 let tx_pin: Peri<'a, AnyPin> = tx_pin.into();
1186
1187 // Initialize LPUART with TX enabled, RX disabled, no flow control
1188 Lpuart::<Async>::init::<T>(true, false, false, false, config)?;
1189
1190 Ok(Self {
1191 info: T::info(),
1192 _tx_pin: tx_pin,
1193 tx_dma: DmaChannel::new(tx_dma_ch),
1194 _instance: core::marker::PhantomData,
1195 })
1196 }
1197
1198 /// Write data using DMA.
1199 ///
1200 /// This configures the DMA channel for a memory-to-peripheral transfer
1201 /// and waits for completion asynchronously. Large buffers are automatically
1202 /// split into chunks that fit within the DMA transfer limit.
1203 ///
1204 /// The DMA request source is automatically derived from the LPUART instance type.
1205 ///
1206 /// # Safety
1207 ///
1208 /// If the returned future is dropped before completion (e.g., due to a timeout),
1209 /// the DMA transfer is automatically aborted to prevent use-after-free.
1210 ///
1211 /// # Arguments
1212 /// * `buf` - Data buffer to transmit
1213 pub async fn write_dma(&mut self, buf: &[u8]) -> Result<usize> {
1214 if buf.is_empty() {
1215 return Ok(0);
1216 }
1217
1218 let mut total = 0;
1219 for chunk in buf.chunks(DMA_MAX_TRANSFER_SIZE) {
1220 total += self.write_dma_inner(chunk).await?;
1221 }
1222
1223 Ok(total)
1224 }
1225
1226 /// Internal helper to write a single chunk (max 0x7FFF bytes) using DMA.
1227 async fn write_dma_inner(&mut self, buf: &[u8]) -> Result<usize> {
1228 let len = buf.len();
1229 let peri_addr = self.info.regs.data().as_ptr() as *mut u8;
1230
1231 unsafe {
1232 // Clean up channel state
1233 self.tx_dma.disable_request();
1234 self.tx_dma.clear_done();
1235 self.tx_dma.clear_interrupt();
1236
1237 // Set DMA request source from instance type (type-safe)
1238 self.tx_dma.set_request_source::<T::TxDmaRequest>();
1239
1240 // Configure TCD for memory-to-peripheral transfer
1241 self.tx_dma
1242 .setup_write_to_peripheral(buf, peri_addr, EnableInterrupt::Yes);
1243
1244 // Enable UART TX DMA request
1245 self.info.regs.baud().modify(|_, w| w.tdmae().enabled());
1246
1247 // Enable DMA channel request
1248 self.tx_dma.enable_request();
1249 }
1250
1251 // Create guard that will abort DMA if this future is dropped
1252 let guard = TxDmaGuard::new(&self.tx_dma, self.info.regs);
1253
1254 // Wait for completion asynchronously
1255 core::future::poll_fn(|cx| {
1256 self.tx_dma.waker().register(cx.waker());
1257 if self.tx_dma.is_done() {
1258 core::task::Poll::Ready(())
1259 } else {
1260 core::task::Poll::Pending
1261 }
1262 })
1263 .await;
1264
1265 // Transfer completed successfully - clean up without aborting
1266 guard.complete();
1267
1268 Ok(len)
1269 }
1270
1271 /// Blocking write (fallback when DMA is not needed)
1272 pub fn blocking_write(&mut self, buf: &[u8]) -> Result<()> {
1273 for &byte in buf {
1274 while self.info.regs.stat().read().tdre().is_txdata() {}
1275 self.info.regs.data().modify(|_, w| unsafe { w.bits(u32::from(byte)) });
1276 }
1277 Ok(())
1278 }
1279
1280 /// Flush TX blocking
1281 pub fn blocking_flush(&mut self) -> Result<()> {
1282 while self.info.regs.water().read().txcount().bits() != 0 {}
1283 while self.info.regs.stat().read().tc().is_active() {}
1284 Ok(())
1285 }
1286}
1287
1288impl<'a, T: Instance, C: DmaChannelTrait> LpuartRxDma<'a, T, C> {
1289 /// Create a new LPUART RX driver with DMA support.
1290 pub fn new(
1291 _inner: Peri<'a, T>,
1292 rx_pin: Peri<'a, impl RxPin<T>>,
1293 rx_dma_ch: Peri<'a, C>,
1294 config: Config,
1295 ) -> Result<Self> {
1296 rx_pin.as_rx();
1297 let rx_pin: Peri<'a, AnyPin> = rx_pin.into();
1298
1299 // Initialize LPUART with TX disabled, RX enabled, no flow control
1300 Lpuart::<Async>::init::<T>(false, true, false, false, config)?;
1301
1302 Ok(Self {
1303 info: T::info(),
1304 _rx_pin: rx_pin,
1305 rx_dma: DmaChannel::new(rx_dma_ch),
1306 _instance: core::marker::PhantomData,
1307 })
1308 }
1309
1310 /// Read data using DMA.
1311 ///
1312 /// This configures the DMA channel for a peripheral-to-memory transfer
1313 /// and waits for completion asynchronously. Large buffers are automatically
1314 /// split into chunks that fit within the DMA transfer limit.
1315 ///
1316 /// The DMA request source is automatically derived from the LPUART instance type.
1317 ///
1318 /// # Safety
1319 ///
1320 /// If the returned future is dropped before completion (e.g., due to a timeout),
1321 /// the DMA transfer is automatically aborted to prevent use-after-free.
1322 ///
1323 /// # Arguments
1324 /// * `buf` - Buffer to receive data into
1325 pub async fn read_dma(&mut self, buf: &mut [u8]) -> Result<usize> {
1326 if buf.is_empty() {
1327 return Ok(0);
1328 }
1329
1330 let mut total = 0;
1331 for chunk in buf.chunks_mut(DMA_MAX_TRANSFER_SIZE) {
1332 total += self.read_dma_inner(chunk).await?;
1333 }
1334
1335 Ok(total)
1336 }
1337
1338 /// Internal helper to read a single chunk (max 0x7FFF bytes) using DMA.
1339 async fn read_dma_inner(&mut self, buf: &mut [u8]) -> Result<usize> {
1340 let len = buf.len();
1341 let peri_addr = self.info.regs.data().as_ptr() as *const u8;
1342
1343 unsafe {
1344 // Clean up channel state
1345 self.rx_dma.disable_request();
1346 self.rx_dma.clear_done();
1347 self.rx_dma.clear_interrupt();
1348
1349 // Set DMA request source from instance type (type-safe)
1350 self.rx_dma.set_request_source::<T::RxDmaRequest>();
1351
1352 // Configure TCD for peripheral-to-memory transfer
1353 self.rx_dma
1354 .setup_read_from_peripheral(peri_addr, buf, EnableInterrupt::Yes);
1355
1356 // Enable UART RX DMA request
1357 self.info.regs.baud().modify(|_, w| w.rdmae().enabled());
1358
1359 // Enable DMA channel request
1360 self.rx_dma.enable_request();
1361 }
1362
1363 // Create guard that will abort DMA if this future is dropped
1364 let guard = RxDmaGuard::new(&self.rx_dma, self.info.regs);
1365
1366 // Wait for completion asynchronously
1367 core::future::poll_fn(|cx| {
1368 self.rx_dma.waker().register(cx.waker());
1369 if self.rx_dma.is_done() {
1370 core::task::Poll::Ready(())
1371 } else {
1372 core::task::Poll::Pending
1373 }
1374 })
1375 .await;
1376
1377 // Transfer completed successfully - clean up without aborting
1378 guard.complete();
1379
1380 Ok(len)
1381 }
1382
1383 /// Blocking read (fallback when DMA is not needed)
1384 pub fn blocking_read(&mut self, buf: &mut [u8]) -> Result<()> {
1385 for byte in buf.iter_mut() {
1386 loop {
1387 if has_data(self.info.regs) {
1388 *byte = (self.info.regs.data().read().bits() & 0xFF) as u8;
1389 break;
1390 }
1391 check_and_clear_rx_errors(self.info.regs)?;
1392 }
1393 }
1394 Ok(())
1395 }
1396
1397 /// Set up a ring buffer for continuous DMA reception.
1398 ///
1399 /// This configures the DMA channel for circular operation, enabling continuous
1400 /// reception of data without gaps. The DMA will continuously write received
1401 /// bytes into the buffer, wrapping around when it reaches the end.
1402 ///
1403 /// This method encapsulates all the low-level setup:
1404 /// - Configures the DMA request source for this LPUART instance
1405 /// - Enables the RX DMA request in the LPUART peripheral
1406 /// - Sets up the circular DMA transfer
1407 /// - Enables the NVIC interrupt for async wakeups
1408 ///
1409 /// # Arguments
1410 ///
1411 /// * `buf` - Destination buffer for received data (power-of-2 size is ideal for efficiency)
1412 ///
1413 /// # Returns
1414 ///
1415 /// A [`RingBuffer`] that can be used to asynchronously read received data.
1416 ///
1417 /// # Example
1418 ///
1419 /// ```no_run
1420 /// static mut RX_BUF: [u8; 64] = [0; 64];
1421 ///
1422 /// let rx = LpuartRxDma::new(p.LPUART2, p.P2_3, p.DMA_CH0, config).unwrap();
1423 /// let ring_buf = unsafe { rx.setup_ring_buffer(&mut RX_BUF) };
1424 ///
1425 /// // Read data as it arrives
1426 /// let mut buf = [0u8; 16];
1427 /// let n = ring_buf.read(&mut buf).await.unwrap();
1428 /// ```
1429 ///
1430 /// # Safety
1431 ///
1432 /// - The buffer must remain valid for the lifetime of the returned RingBuffer.
1433 /// - Only one RingBuffer should exist per LPUART RX channel at a time.
1434 /// - The caller must ensure the static buffer is not accessed elsewhere while
1435 /// the ring buffer is active.
1436 pub unsafe fn setup_ring_buffer<'b>(&self, buf: &'b mut [u8]) -> RingBuffer<'b, u8> {
1437 // Get the peripheral data register address
1438 let peri_addr = self.info.regs.data().as_ptr() as *const u8;
1439
1440 // Configure DMA request source for this LPUART instance (type-safe)
1441 self.rx_dma.set_request_source::<T::RxDmaRequest>();
1442
1443 // Enable RX DMA request in the LPUART peripheral
1444 self.info.regs.baud().modify(|_, w| w.rdmae().enabled());
1445
1446 // Set up circular DMA transfer (this also enables NVIC interrupt)
1447 self.rx_dma.setup_circular_read(peri_addr, buf)
1448 }
1449
1450 /// Enable the DMA channel request.
1451 ///
1452 /// Call this after `setup_ring_buffer()` to start continuous reception.
1453 /// This is separated from setup to allow for any additional configuration
1454 /// before starting the transfer.
1455 pub unsafe fn enable_dma_request(&self) {
1456 self.rx_dma.enable_request();
1457 }
1458}
1459
1460impl<'a, T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> LpuartDma<'a, T, TxC, RxC> {
1461 /// Create a new LPUART driver with DMA support for both TX and RX.
1462 pub fn new(
1463 _inner: Peri<'a, T>,
1464 tx_pin: Peri<'a, impl TxPin<T>>,
1465 rx_pin: Peri<'a, impl RxPin<T>>,
1466 tx_dma_ch: Peri<'a, TxC>,
1467 rx_dma_ch: Peri<'a, RxC>,
1468 config: Config,
1469 ) -> Result<Self> {
1470 tx_pin.as_tx();
1471 rx_pin.as_rx();
1472
1473 let tx_pin: Peri<'a, AnyPin> = tx_pin.into();
1474 let rx_pin: Peri<'a, AnyPin> = rx_pin.into();
1475
1476 // Initialize LPUART with both TX and RX enabled, no flow control
1477 Lpuart::<Async>::init::<T>(true, true, false, false, config)?;
1478
1479 Ok(Self {
1480 tx: LpuartTxDma {
1481 info: T::info(),
1482 _tx_pin: tx_pin,
1483 tx_dma: DmaChannel::new(tx_dma_ch),
1484 _instance: core::marker::PhantomData,
1485 },
1486 rx: LpuartRxDma {
1487 info: T::info(),
1488 _rx_pin: rx_pin,
1489 rx_dma: DmaChannel::new(rx_dma_ch),
1490 _instance: core::marker::PhantomData,
1491 },
1492 })
1493 }
1494
1495 /// Split into separate TX and RX drivers
1496 pub fn split(self) -> (LpuartTxDma<'a, T, TxC>, LpuartRxDma<'a, T, RxC>) {
1497 (self.tx, self.rx)
1498 }
1499
1500 /// Write data using DMA
1501 pub async fn write_dma(&mut self, buf: &[u8]) -> Result<usize> {
1502 self.tx.write_dma(buf).await
1503 }
1504
1505 /// Read data using DMA
1506 pub async fn read_dma(&mut self, buf: &mut [u8]) -> Result<usize> {
1507 self.rx.read_dma(buf).await
1508 }
1509}
1510
1511// ============================================================================
1512// EMBEDDED-IO-ASYNC TRAIT IMPLEMENTATIONS
1513// ============================================================================
1514
1515impl<T: Instance, C: DmaChannelTrait> embedded_io::ErrorType for LpuartTxDma<'_, T, C> {
1516 type Error = Error;
1517}
1518
1519impl<T: Instance, C: DmaChannelTrait> embedded_io::ErrorType for LpuartRxDma<'_, T, C> {
1520 type Error = Error;
1521}
1522
1523impl<T: Instance, TxC: DmaChannelTrait, RxC: DmaChannelTrait> embedded_io::ErrorType for LpuartDma<'_, T, TxC, RxC> {
1524 type Error = Error;
1525}
1526
1527// ============================================================================
1528// EMBEDDED-HAL 0.2 TRAIT IMPLEMENTATIONS
1529// ============================================================================
1530
1531impl embedded_hal_02::serial::Read<u8> for LpuartRx<'_, Blocking> {
1532 type Error = Error;
1533
1534 fn read(&mut self) -> core::result::Result<u8, nb::Error<Self::Error>> {
1535 let mut buf = [0; 1];
1536 match self.read(&mut buf) {
1537 Ok(_) => Ok(buf[0]),
1538 Err(Error::RxFifoEmpty) => Err(nb::Error::WouldBlock),
1539 Err(e) => Err(nb::Error::Other(e)),
1540 }
1541 }
1542}
1543
1544impl embedded_hal_02::serial::Write<u8> for LpuartTx<'_, Blocking> {
1545 type Error = Error;
1546
1547 fn write(&mut self, word: u8) -> core::result::Result<(), nb::Error<Self::Error>> {
1548 match self.write(&[word]) {
1549 Ok(_) => Ok(()),
1550 Err(Error::TxFifoFull) => Err(nb::Error::WouldBlock),
1551 Err(e) => Err(nb::Error::Other(e)),
1552 }
1553 }
1554
1555 fn flush(&mut self) -> core::result::Result<(), nb::Error<Self::Error>> {
1556 match self.flush() {
1557 Ok(_) => Ok(()),
1558 Err(Error::TxBusy) => Err(nb::Error::WouldBlock),
1559 Err(e) => Err(nb::Error::Other(e)),
1560 }
1561 }
1562}
1563
1564impl embedded_hal_02::blocking::serial::Write<u8> for LpuartTx<'_, Blocking> {
1565 type Error = Error;
1566
1567 fn bwrite_all(&mut self, buffer: &[u8]) -> core::result::Result<(), Self::Error> {
1568 self.blocking_write(buffer)
1569 }
1570
1571 fn bflush(&mut self) -> core::result::Result<(), Self::Error> {
1572 self.blocking_flush()
1573 }
1574}
1575
1576impl embedded_hal_02::serial::Read<u8> for Lpuart<'_, Blocking> {
1577 type Error = Error;
1578
1579 fn read(&mut self) -> core::result::Result<u8, nb::Error<Self::Error>> {
1580 embedded_hal_02::serial::Read::read(&mut self.rx)
1581 }
1582}
1583
1584impl embedded_hal_02::serial::Write<u8> for Lpuart<'_, Blocking> {
1585 type Error = Error;
1586
1587 fn write(&mut self, word: u8) -> core::result::Result<(), nb::Error<Self::Error>> {
1588 embedded_hal_02::serial::Write::write(&mut self.tx, word)
1589 }
1590
1591 fn flush(&mut self) -> core::result::Result<(), nb::Error<Self::Error>> {
1592 embedded_hal_02::serial::Write::flush(&mut self.tx)
1593 }
1594}
1595
1596impl embedded_hal_02::blocking::serial::Write<u8> for Lpuart<'_, Blocking> {
1597 type Error = Error;
1598
1599 fn bwrite_all(&mut self, buffer: &[u8]) -> core::result::Result<(), Self::Error> {
1600 self.blocking_write(buffer)
1601 }
1602
1603 fn bflush(&mut self) -> core::result::Result<(), Self::Error> {
1604 self.blocking_flush()
1605 }
1606}
1607
1608// ============================================================================
1609// EMBEDDED-HAL-NB TRAIT IMPLEMENTATIONS
1610// ============================================================================
1611
1612impl embedded_hal_nb::serial::Error for Error {
1613 fn kind(&self) -> embedded_hal_nb::serial::ErrorKind {
1614 match *self {
1615 Self::Framing => embedded_hal_nb::serial::ErrorKind::FrameFormat,
1616 Self::Overrun => embedded_hal_nb::serial::ErrorKind::Overrun,
1617 Self::Parity => embedded_hal_nb::serial::ErrorKind::Parity,
1618 Self::Noise => embedded_hal_nb::serial::ErrorKind::Noise,
1619 _ => embedded_hal_nb::serial::ErrorKind::Other,
1620 }
1621 }
1622}
1623
1624impl embedded_hal_nb::serial::ErrorType for LpuartRx<'_, Blocking> {
1625 type Error = Error;
1626}
1627
1628impl embedded_hal_nb::serial::ErrorType for LpuartTx<'_, Blocking> {
1629 type Error = Error;
1630}
1631
1632impl embedded_hal_nb::serial::ErrorType for Lpuart<'_, Blocking> {
1633 type Error = Error;
1634}
1635
1636impl embedded_hal_nb::serial::Read for LpuartRx<'_, Blocking> {
1637 fn read(&mut self) -> nb::Result<u8, Self::Error> {
1638 let mut buf = [0; 1];
1639 match self.read(&mut buf) {
1640 Ok(_) => Ok(buf[0]),
1641 Err(Error::RxFifoEmpty) => Err(nb::Error::WouldBlock),
1642 Err(e) => Err(nb::Error::Other(e)),
1643 }
1644 }
1645}
1646
1647impl embedded_hal_nb::serial::Write for LpuartTx<'_, Blocking> {
1648 fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
1649 match self.write(&[word]) {
1650 Ok(_) => Ok(()),
1651 Err(Error::TxFifoFull) => Err(nb::Error::WouldBlock),
1652 Err(e) => Err(nb::Error::Other(e)),
1653 }
1654 }
1655
1656 fn flush(&mut self) -> nb::Result<(), Self::Error> {
1657 match self.flush() {
1658 Ok(_) => Ok(()),
1659 Err(Error::TxBusy) => Err(nb::Error::WouldBlock),
1660 Err(e) => Err(nb::Error::Other(e)),
1661 }
1662 }
1663}
1664
1665impl embedded_hal_nb::serial::Read for Lpuart<'_, Blocking> {
1666 fn read(&mut self) -> nb::Result<u8, Self::Error> {
1667 embedded_hal_nb::serial::Read::read(&mut self.rx)
1668 }
1669}
1670
1671impl embedded_hal_nb::serial::Write for Lpuart<'_, Blocking> {
1672 fn write(&mut self, char: u8) -> nb::Result<(), Self::Error> {
1673 embedded_hal_nb::serial::Write::write(&mut self.tx, char)
1674 }
1675
1676 fn flush(&mut self) -> nb::Result<(), Self::Error> {
1677 embedded_hal_nb::serial::Write::flush(&mut self.tx)
1678 }
1679}
1680
1681// ============================================================================
1682// EMBEDDED-IO TRAIT IMPLEMENTATIONS
1683// ============================================================================
1684
1685impl embedded_io::Error for Error {
1686 fn kind(&self) -> embedded_io::ErrorKind {
1687 embedded_io::ErrorKind::Other
1688 }
1689}
1690
1691impl embedded_io::ErrorType for LpuartRx<'_, Blocking> {
1692 type Error = Error;
1693}
1694
1695impl embedded_io::ErrorType for LpuartTx<'_, Blocking> {
1696 type Error = Error;
1697}
1698
1699impl embedded_io::ErrorType for Lpuart<'_, Blocking> {
1700 type Error = Error;
1701}
1702
1703impl embedded_io::Read for LpuartRx<'_, Blocking> {
1704 fn read(&mut self, buf: &mut [u8]) -> core::result::Result<usize, Self::Error> {
1705 self.blocking_read(buf).map(|_| buf.len())
1706 }
1707}
1708
1709impl embedded_io::Write for LpuartTx<'_, Blocking> {
1710 fn write(&mut self, buf: &[u8]) -> core::result::Result<usize, Self::Error> {
1711 self.blocking_write(buf).map(|_| buf.len())
1712 }
1713
1714 fn flush(&mut self) -> core::result::Result<(), Self::Error> {
1715 self.blocking_flush()
1716 }
1717}
1718
1719impl embedded_io::Read for Lpuart<'_, Blocking> {
1720 fn read(&mut self, buf: &mut [u8]) -> core::result::Result<usize, Self::Error> {
1721 embedded_io::Read::read(&mut self.rx, buf)
1722 }
1723}
1724
1725impl embedded_io::Write for Lpuart<'_, Blocking> {
1726 fn write(&mut self, buf: &[u8]) -> core::result::Result<usize, Self::Error> {
1727 embedded_io::Write::write(&mut self.tx, buf)
1728 }
1729
1730 fn flush(&mut self) -> core::result::Result<(), Self::Error> {
1731 embedded_io::Write::flush(&mut self.tx)
1732 }
1733}
diff --git a/embassy-mcxa/src/ostimer.rs b/embassy-mcxa/src/ostimer.rs
new file mode 100644
index 000000000..c51812e3d
--- /dev/null
+++ b/embassy-mcxa/src/ostimer.rs
@@ -0,0 +1,745 @@
1//! # OSTIMER Driver with Robustness Features
2//!
3//! This module provides an async timer driver for the NXP MCXA276 OSTIMER peripheral
4//! with protection against race conditions and timer rollover issues.
5//!
6//! ## Features
7//!
8//! - Async timing with embassy-time integration
9//! - Gray code counter handling (42-bit counter)
10//! - Interrupt-driven wakeups
11//! - Configurable interrupt priority
12//! - **Race condition protection**: Critical sections and atomic operations
13//! - **Timer rollover handling**: Bounds checking and rollover prevention
14//!
15//! ## Clock Frequency Configuration
16//!
17//! The OSTIMER frequency depends on your system's clock configuration. You must provide
18//! the actual frequency when calling `time_driver::init()`.
19//!
20//! ## Race Condition Protection
21//! - Critical sections in interrupt handlers prevent concurrent access
22//! - Atomic register operations with memory barriers
23//! - Proper interrupt flag clearing and validation
24//!
25//! ## Timer Rollover Handling
26//! - Bounds checking prevents scheduling beyond timer capacity
27//! - Immediate wake for timestamps that would cause rollover issues
28#![allow(dead_code)]
29
30use core::sync::atomic::{AtomicBool, Ordering};
31
32use embassy_hal_internal::{Peri, PeripheralType};
33
34use crate::clocks::periph_helpers::{OsTimerConfig, OstimerClockSel};
35use crate::clocks::{assert_reset, enable_and_reset, is_reset_released, release_reset, Gate, PoweredClock};
36use crate::interrupt::InterruptExt;
37use crate::pac;
38
39// PAC defines the shared RegisterBlock under `ostimer0`.
40type Regs = pac::ostimer0::RegisterBlock;
41
42// OSTIMER EVTIMER register layout constants
43/// Total width of the EVTIMER counter in bits (42 bits total)
44const EVTIMER_TOTAL_BITS: u32 = 42;
45/// Width of the low part of EVTIMER (bits 31:0)
46const EVTIMER_LO_BITS: u32 = 32;
47/// Width of the high part of EVTIMER (bits 41:32)
48const EVTIMER_HI_BITS: u32 = 10;
49/// Bit position where high part starts in the combined 64-bit value
50const EVTIMER_HI_SHIFT: u32 = 32;
51
52/// Bit mask for the high part of EVTIMER
53const EVTIMER_HI_MASK: u16 = (1 << EVTIMER_HI_BITS) - 1;
54
55/// Maximum value for MATCH_L register (32-bit)
56const MATCH_L_MAX: u32 = u32::MAX;
57/// Maximum value for MATCH_H register (10-bit)
58const MATCH_H_MAX: u16 = EVTIMER_HI_MASK;
59
60/// Bit mask for extracting the low 32 bits from a 64-bit value
61const LOW_32_BIT_MASK: u64 = u32::MAX as u64;
62
63/// Gray code conversion bit shifts (most significant to least)
64const GRAY_CONVERSION_SHIFTS: [u32; 6] = [32, 16, 8, 4, 2, 1];
65
66/// Maximum timer value before rollover (2^42 - 1 ticks)
67/// Actual rollover time depends on the configured clock frequency
68const TIMER_MAX_VALUE: u64 = (1u64 << EVTIMER_TOTAL_BITS) - 1;
69
70/// Threshold for detecting timer rollover in comparisons (1 second at 1MHz)
71const TIMER_ROLLOVER_THRESHOLD: u64 = 1_000_000;
72
73/// Common default interrupt priority for OSTIMER
74const DEFAULT_INTERRUPT_PRIORITY: u8 = 3;
75
76// Global alarm state for interrupt handling
77static ALARM_ACTIVE: AtomicBool = AtomicBool::new(false);
78static mut ALARM_CALLBACK: Option<fn()> = None;
79static mut ALARM_FLAG: Option<*const AtomicBool> = None;
80static mut ALARM_TARGET_TIME: u64 = 0;
81
82/// Number of tight spin iterations between elapsed time checks while waiting for MATCH writes to return to the idle (0) state.
83const MATCH_WRITE_READY_SPINS: usize = 512;
84/// Maximum time (in OSTIMER ticks) to wait for MATCH registers to become writable (~5 ms at 1 MHz).
85const MATCH_WRITE_READY_TIMEOUT_TICKS: u64 = 5_000;
86/// Short stabilization delay executed after toggling the MRCC reset line to let the OSTIMER bus interface settle.
87const RESET_STABILIZE_SPINS: usize = 512;
88
89pub(super) fn wait_for_match_write_ready(r: &Regs) -> bool {
90 let start = now_ticks_read();
91 let mut spin_budget = 0usize;
92
93 loop {
94 if r.osevent_ctrl().read().match_wr_rdy().bit_is_clear() {
95 return true;
96 }
97
98 cortex_m::asm::nop();
99 spin_budget += 1;
100
101 if spin_budget >= MATCH_WRITE_READY_SPINS {
102 spin_budget = 0;
103
104 let elapsed = now_ticks_read().wrapping_sub(start);
105 if elapsed >= MATCH_WRITE_READY_TIMEOUT_TICKS {
106 return false;
107 }
108 }
109 }
110}
111
112pub(super) fn wait_for_match_write_complete(r: &Regs) -> bool {
113 let start = now_ticks_read();
114 let mut spin_budget = 0usize;
115
116 loop {
117 if r.osevent_ctrl().read().match_wr_rdy().bit_is_clear() {
118 return true;
119 }
120
121 cortex_m::asm::nop();
122 spin_budget += 1;
123
124 if spin_budget >= MATCH_WRITE_READY_SPINS {
125 spin_budget = 0;
126
127 let elapsed = now_ticks_read().wrapping_sub(start);
128 if elapsed >= MATCH_WRITE_READY_TIMEOUT_TICKS {
129 return false;
130 }
131 }
132 }
133}
134
135fn prime_match_registers(r: &Regs) {
136 // Disable the interrupt, clear any pending flag, then wait until the MATCH registers are writable.
137 r.osevent_ctrl()
138 .write(|w| w.ostimer_intrflag().clear_bit_by_one().ostimer_intena().clear_bit());
139
140 if wait_for_match_write_ready(r) {
141 r.match_l().write(|w| unsafe { w.match_value().bits(MATCH_L_MAX) });
142 r.match_h().write(|w| unsafe { w.match_value().bits(MATCH_H_MAX) });
143 let _ = wait_for_match_write_complete(r);
144 }
145}
146
147/// Single-shot alarm functionality for OSTIMER
148pub struct Alarm<'d> {
149 /// Whether the alarm is currently active
150 active: AtomicBool,
151 /// Callback to execute when alarm expires (optional)
152 callback: Option<fn()>,
153 /// Flag that gets set when alarm expires (optional)
154 flag: Option<&'d AtomicBool>,
155 _phantom: core::marker::PhantomData<&'d mut ()>,
156}
157
158impl<'d> Default for Alarm<'d> {
159 fn default() -> Self {
160 Self::new()
161 }
162}
163
164impl<'d> Alarm<'d> {
165 /// Create a new alarm instance
166 pub fn new() -> Self {
167 Self {
168 active: AtomicBool::new(false),
169 callback: None,
170 flag: None,
171 _phantom: core::marker::PhantomData,
172 }
173 }
174
175 /// Set a callback that will be executed when the alarm expires
176 /// Note: Due to interrupt handler constraints, callbacks must be static function pointers
177 pub fn with_callback(mut self, callback: fn()) -> Self {
178 self.callback = Some(callback);
179 self
180 }
181
182 /// Set a flag that will be set to true when the alarm expires
183 pub fn with_flag(mut self, flag: &'d AtomicBool) -> Self {
184 self.flag = Some(flag);
185 self
186 }
187
188 /// Check if the alarm is currently active
189 pub fn is_active(&self) -> bool {
190 self.active.load(Ordering::Acquire)
191 }
192
193 /// Cancel the alarm if it's active
194 pub fn cancel(&self) {
195 self.active.store(false, Ordering::Release);
196 }
197}
198
199/// Configuration for Ostimer::new()
200#[derive(Copy, Clone)]
201pub struct Config {
202 /// Initialize MATCH registers to their max values and mask/clear the interrupt flag.
203 pub init_match_max: bool,
204 pub power: PoweredClock,
205 pub source: OstimerClockSel,
206}
207
208impl Default for Config {
209 fn default() -> Self {
210 Self {
211 init_match_max: true,
212 power: PoweredClock::NormalEnabledDeepSleepDisabled,
213 source: OstimerClockSel::Clk1M,
214 }
215 }
216}
217
218/// OSTIMER peripheral instance
219pub struct Ostimer<'d, I: Instance> {
220 _inst: core::marker::PhantomData<I>,
221 clock_frequency_hz: u64,
222 _phantom: core::marker::PhantomData<&'d mut ()>,
223}
224
225impl<'d, I: Instance> Ostimer<'d, I> {
226 /// Construct OSTIMER handle.
227 /// Requires clocks for the instance to be enabled by the board before calling.
228 /// Does not enable NVIC or INTENA; use time_driver::init() for async operation.
229 pub fn new(_inst: Peri<'d, I>, cfg: Config) -> Self {
230 let clock_freq = unsafe {
231 enable_and_reset::<I>(&OsTimerConfig {
232 power: cfg.power,
233 source: cfg.source,
234 })
235 .expect("Enabling OsTimer clock should not fail")
236 };
237
238 assert!(clock_freq > 0, "OSTIMER frequency must be greater than 0");
239
240 if cfg.init_match_max {
241 let r: &Regs = unsafe { &*I::ptr() };
242 // Mask INTENA, clear pending flag, and set MATCH to max so no spurious IRQ fires.
243 prime_match_registers(r);
244 }
245
246 Self {
247 _inst: core::marker::PhantomData,
248 clock_frequency_hz: clock_freq as u64,
249 _phantom: core::marker::PhantomData,
250 }
251 }
252
253 /// Get the configured clock frequency in Hz
254 pub fn clock_frequency_hz(&self) -> u64 {
255 self.clock_frequency_hz
256 }
257
258 /// Read the current timer counter value in timer ticks
259 ///
260 /// # Returns
261 /// Current timer counter value as a 64-bit unsigned integer
262 pub fn now(&self) -> u64 {
263 now_ticks_read()
264 }
265
266 /// Reset the timer counter to zero
267 ///
268 /// This performs a hardware reset of the OSTIMER peripheral, which will reset
269 /// the counter to zero and clear any pending interrupts. Note that this will
270 /// affect all timer operations including embassy-time.
271 ///
272 /// # Safety
273 /// This operation will reset the entire OSTIMER peripheral. Any active alarms
274 /// or time_driver operations will be disrupted. Use with caution.
275 pub fn reset(&self, _peripherals: &crate::pac::Peripherals) {
276 critical_section::with(|_| {
277 let r: &Regs = unsafe { &*I::ptr() };
278
279 // Mask the peripheral interrupt flag before we toggle the reset line so that
280 // no new NVIC activity races with the reset sequence.
281 r.osevent_ctrl()
282 .write(|w| w.ostimer_intrflag().clear_bit_by_one().ostimer_intena().clear_bit());
283
284 unsafe {
285 assert_reset::<I>();
286
287 for _ in 0..RESET_STABILIZE_SPINS {
288 cortex_m::asm::nop();
289 }
290
291 release_reset::<I>();
292
293 while !is_reset_released::<I>() {
294 cortex_m::asm::nop();
295 }
296 }
297
298 for _ in 0..RESET_STABILIZE_SPINS {
299 cortex_m::asm::nop();
300 }
301
302 // Clear alarm bookkeeping before re-arming MATCH registers.
303 ALARM_ACTIVE.store(false, Ordering::Release);
304 unsafe {
305 ALARM_TARGET_TIME = 0;
306 ALARM_CALLBACK = None;
307 ALARM_FLAG = None;
308 }
309
310 prime_match_registers(r);
311 });
312
313 // Ensure no stale OS_EVENT request remains pending after the reset sequence.
314 crate::interrupt::OS_EVENT.unpend();
315 }
316
317 /// Schedule a single-shot alarm to expire after the specified delay in microseconds
318 ///
319 /// # Parameters
320 /// * `alarm` - The alarm instance to schedule
321 /// * `delay_us` - Delay in microseconds from now
322 ///
323 /// # Returns
324 /// `true` if the alarm was scheduled successfully, `false` if it would exceed timer capacity
325 pub fn schedule_alarm_delay(&self, alarm: &Alarm, delay_us: u64) -> bool {
326 let delay_ticks = (delay_us * self.clock_frequency_hz) / 1_000_000;
327 let target_time = now_ticks_read() + delay_ticks;
328 self.schedule_alarm_at(alarm, target_time)
329 }
330
331 /// Schedule a single-shot alarm to expire at the specified absolute time in timer ticks
332 ///
333 /// # Parameters
334 /// * `alarm` - The alarm instance to schedule
335 /// * `target_ticks` - Absolute time in timer ticks when the alarm should expire
336 ///
337 /// # Returns
338 /// `true` if the alarm was scheduled successfully, `false` if it would exceed timer capacity
339 pub fn schedule_alarm_at(&self, alarm: &Alarm, target_ticks: u64) -> bool {
340 let now = now_ticks_read();
341
342 // Check if target time is in the past
343 if target_ticks <= now {
344 // Execute callback immediately if alarm was supposed to be active
345 if alarm.active.load(Ordering::Acquire) {
346 alarm.active.store(false, Ordering::Release);
347 if let Some(callback) = alarm.callback {
348 callback();
349 }
350 if let Some(flag) = &alarm.flag {
351 flag.store(true, Ordering::Release);
352 }
353 }
354 return true;
355 }
356
357 // Check for timer rollover
358 let max_future = now + TIMER_MAX_VALUE;
359 if target_ticks > max_future {
360 return false; // Would exceed timer capacity
361 }
362
363 // Program the timer
364 let r: &Regs = unsafe { &*I::ptr() };
365
366 critical_section::with(|_| {
367 // Disable interrupt and clear flag
368 r.osevent_ctrl()
369 .write(|w| w.ostimer_intrflag().clear_bit_by_one().ostimer_intena().clear_bit());
370
371 if !wait_for_match_write_ready(r) {
372 prime_match_registers(r);
373
374 if !wait_for_match_write_ready(r) {
375 alarm.active.store(false, Ordering::Release);
376 ALARM_ACTIVE.store(false, Ordering::Release);
377 unsafe {
378 ALARM_TARGET_TIME = 0;
379 ALARM_CALLBACK = None;
380 ALARM_FLAG = None;
381 }
382 return false;
383 }
384 }
385
386 // Mark alarm as active now that we know the MATCH registers are writable
387 alarm.active.store(true, Ordering::Release);
388
389 // Set global alarm state for interrupt handler
390 ALARM_ACTIVE.store(true, Ordering::Release);
391 unsafe {
392 ALARM_TARGET_TIME = target_ticks;
393 ALARM_CALLBACK = alarm.callback;
394 ALARM_FLAG = alarm.flag.map(|f| f as *const AtomicBool);
395 }
396
397 // Program MATCH registers (Gray-coded)
398 let gray = bin_to_gray(target_ticks);
399 let l = (gray & LOW_32_BIT_MASK) as u32;
400 let h = (((gray >> EVTIMER_HI_SHIFT) as u16) & EVTIMER_HI_MASK) as u16;
401
402 r.match_l().write(|w| unsafe { w.match_value().bits(l) });
403 r.match_h().write(|w| unsafe { w.match_value().bits(h) });
404
405 if !wait_for_match_write_complete(r) {
406 alarm.active.store(false, Ordering::Release);
407 ALARM_ACTIVE.store(false, Ordering::Release);
408 unsafe {
409 ALARM_TARGET_TIME = 0;
410 ALARM_CALLBACK = None;
411 ALARM_FLAG = None;
412 }
413 return false;
414 }
415
416 let now_after_program = now_ticks_read();
417 let intrflag_set = r.osevent_ctrl().read().ostimer_intrflag().bit_is_set();
418 if now_after_program >= target_ticks && !intrflag_set {
419 alarm.active.store(false, Ordering::Release);
420 ALARM_ACTIVE.store(false, Ordering::Release);
421 unsafe {
422 ALARM_TARGET_TIME = 0;
423 ALARM_CALLBACK = None;
424 ALARM_FLAG = None;
425 }
426 return false;
427 }
428
429 // Enable interrupt
430 r.osevent_ctrl().write(|w| w.ostimer_intena().set_bit());
431
432 true
433 })
434 }
435
436 /// Cancel any active alarm
437 pub fn cancel_alarm(&self, alarm: &Alarm) {
438 critical_section::with(|_| {
439 alarm.cancel();
440
441 // Clear global alarm state
442 ALARM_ACTIVE.store(false, Ordering::Release);
443 unsafe { ALARM_TARGET_TIME = 0 };
444
445 // Reset MATCH registers to maximum values to prevent spurious interrupts
446 let r: &Regs = unsafe { &*I::ptr() };
447 prime_match_registers(r);
448 });
449 }
450
451 /// Check if an alarm has expired (call this from your interrupt handler)
452 /// Returns true if the alarm was active and has now expired
453 pub fn check_alarm_expired(&self, alarm: &Alarm) -> bool {
454 if alarm.active.load(Ordering::Acquire) {
455 alarm.active.store(false, Ordering::Release);
456
457 // Execute callback
458 if let Some(callback) = alarm.callback {
459 callback();
460 }
461
462 // Set flag
463 if let Some(flag) = &alarm.flag {
464 flag.store(true, Ordering::Release);
465 }
466
467 true
468 } else {
469 false
470 }
471 }
472}
473
474/// Read current EVTIMER (Gray-coded) and convert to binary ticks.
475#[inline(always)]
476fn now_ticks_read() -> u64 {
477 let r: &Regs = unsafe { &*pac::Ostimer0::ptr() };
478
479 // Read high then low to minimize incoherent snapshots
480 let hi = (r.evtimerh().read().evtimer_count_value().bits() as u64) & (EVTIMER_HI_MASK as u64);
481 let lo = r.evtimerl().read().evtimer_count_value().bits() as u64;
482 // Combine and convert from Gray code to binary
483 let gray = lo | (hi << EVTIMER_HI_SHIFT);
484 gray_to_bin(gray)
485}
486
487// Instance trait like other drivers, providing a PAC pointer for this OSTIMER instance
488pub trait Instance: Gate<MrccPeriphConfig = OsTimerConfig> + PeripheralType {
489 fn ptr() -> *const Regs;
490}
491
492#[cfg(not(feature = "time"))]
493impl Instance for crate::peripherals::OSTIMER0 {
494 #[inline(always)]
495 fn ptr() -> *const Regs {
496 pac::Ostimer0::ptr()
497 }
498}
499
500#[inline(always)]
501fn bin_to_gray(x: u64) -> u64 {
502 x ^ (x >> 1)
503}
504
505#[inline(always)]
506fn gray_to_bin(gray: u64) -> u64 {
507 // More efficient iterative conversion using predefined shifts
508 let mut bin = gray;
509 for &shift in &GRAY_CONVERSION_SHIFTS {
510 bin ^= bin >> shift;
511 }
512 bin
513}
514
515#[cfg(feature = "time")]
516pub mod time_driver {
517 use core::sync::atomic::Ordering;
518 use core::task::Waker;
519
520 use embassy_sync::waitqueue::AtomicWaker;
521 use embassy_time_driver as etd;
522
523 use super::{
524 bin_to_gray, now_ticks_read, Regs, ALARM_ACTIVE, ALARM_CALLBACK, ALARM_FLAG, ALARM_TARGET_TIME,
525 EVTIMER_HI_MASK, EVTIMER_HI_SHIFT, LOW_32_BIT_MASK,
526 };
527 use crate::clocks::periph_helpers::{OsTimerConfig, OstimerClockSel};
528 use crate::clocks::{enable_and_reset, PoweredClock};
529 use crate::pac;
530
531 #[allow(non_camel_case_types)]
532 pub(crate) struct _OSTIMER0_TIME_DRIVER {
533 _x: (),
534 }
535
536 // #[cfg(feature = "time")]
537 // impl_cc_gate!(_OSTIMER0_TIME_DRIVER, mrcc_glb_cc1, mrcc_glb_rst1, ostimer0, OsTimerConfig);
538
539 impl crate::clocks::Gate for _OSTIMER0_TIME_DRIVER {
540 type MrccPeriphConfig = crate::clocks::periph_helpers::OsTimerConfig;
541
542 #[inline]
543 unsafe fn enable_clock() {
544 let mrcc = unsafe { pac::Mrcc0::steal() };
545 mrcc.mrcc_glb_cc1().modify(|_, w| w.ostimer0().enabled());
546 }
547
548 #[inline]
549 unsafe fn disable_clock() {
550 let mrcc = unsafe { pac::Mrcc0::steal() };
551 mrcc.mrcc_glb_cc1().modify(|_r, w| w.ostimer0().disabled());
552 }
553
554 #[inline]
555 fn is_clock_enabled() -> bool {
556 let mrcc = unsafe { pac::Mrcc0::steal() };
557 mrcc.mrcc_glb_cc1().read().ostimer0().is_enabled()
558 }
559
560 #[inline]
561 unsafe fn release_reset() {
562 let mrcc = unsafe { pac::Mrcc0::steal() };
563 mrcc.mrcc_glb_rst1().modify(|_, w| w.ostimer0().enabled());
564 }
565
566 #[inline]
567 unsafe fn assert_reset() {
568 let mrcc = unsafe { pac::Mrcc0::steal() };
569 mrcc.mrcc_glb_rst1().modify(|_, w| w.ostimer0().disabled());
570 }
571
572 #[inline]
573 fn is_reset_released() -> bool {
574 let mrcc = unsafe { pac::Mrcc0::steal() };
575 mrcc.mrcc_glb_rst1().read().ostimer0().is_enabled()
576 }
577 }
578
579 pub struct Driver;
580 static TIMER_WAKER: AtomicWaker = AtomicWaker::new();
581
582 impl etd::Driver for Driver {
583 fn now(&self) -> u64 {
584 // Use the hardware counter (frequency configured in init)
585 super::now_ticks_read()
586 }
587
588 fn schedule_wake(&self, timestamp: u64, waker: &Waker) {
589 let now = self.now();
590
591 // If timestamp is in the past or very close to now, wake immediately
592 if timestamp <= now {
593 waker.wake_by_ref();
594 return;
595 }
596
597 // Prevent scheduling too far in the future (beyond timer rollover)
598 // This prevents wraparound issues
599 let max_future = now + super::TIMER_MAX_VALUE;
600 if timestamp > max_future {
601 // For very long timeouts, wake immediately to avoid rollover issues
602 waker.wake_by_ref();
603 return;
604 }
605
606 // Register the waker first so any immediate wake below is observed by the executor.
607 TIMER_WAKER.register(waker);
608
609 let r: &Regs = unsafe { &*pac::Ostimer0::ptr() };
610
611 critical_section::with(|_| {
612 // Mask INTENA and clear flag
613 r.osevent_ctrl()
614 .write(|w| w.ostimer_intrflag().clear_bit_by_one().ostimer_intena().clear_bit());
615
616 // Read back to ensure W1C took effect on hardware
617 let _ = r.osevent_ctrl().read().ostimer_intrflag().bit();
618
619 if !super::wait_for_match_write_ready(r) {
620 super::prime_match_registers(r);
621
622 if !super::wait_for_match_write_ready(r) {
623 // If we can't safely program MATCH, wake immediately and leave INTENA masked.
624 waker.wake_by_ref();
625 return;
626 }
627 }
628
629 // Program MATCH (Gray-coded). Write low then high, then fence.
630 let gray = bin_to_gray(timestamp);
631 let l = (gray & LOW_32_BIT_MASK) as u32;
632
633 let h = (((gray >> EVTIMER_HI_SHIFT) as u16) & EVTIMER_HI_MASK) as u16;
634
635 r.match_l().write(|w| unsafe { w.match_value().bits(l) });
636 r.match_h().write(|w| unsafe { w.match_value().bits(h) });
637
638 if !super::wait_for_match_write_complete(r) {
639 waker.wake_by_ref();
640 return;
641 }
642
643 let now_after_program = super::now_ticks_read();
644 let intrflag_set = r.osevent_ctrl().read().ostimer_intrflag().bit_is_set();
645 if now_after_program >= timestamp && !intrflag_set {
646 waker.wake_by_ref();
647 return;
648 }
649
650 // Enable peripheral interrupt
651 r.osevent_ctrl().write(|w| w.ostimer_intena().set_bit());
652 });
653 }
654 }
655
656 /// Install the global embassy-time driver and configure NVIC priority for OS_EVENT.
657 ///
658 /// # Parameters
659 /// * `priority` - Interrupt priority for the OSTIMER interrupt
660 /// * `frequency_hz` - Actual OSTIMER clock frequency in Hz (stored for future use)
661 ///
662 /// Note: The frequency parameter is currently accepted for API compatibility.
663 /// The embassy_time_driver macro handles driver registration automatically.
664 pub fn init(priority: crate::interrupt::Priority, frequency_hz: u64) {
665 let _clock_freq = unsafe {
666 enable_and_reset::<_OSTIMER0_TIME_DRIVER>(&OsTimerConfig {
667 power: PoweredClock::AlwaysEnabled,
668 source: OstimerClockSel::Clk1M,
669 })
670 .expect("Enabling OsTimer clock should not fail")
671 };
672
673 // Mask/clear at peripheral and set default MATCH
674 let r: &Regs = unsafe { &*pac::Ostimer0::ptr() };
675 super::prime_match_registers(r);
676
677 // Configure NVIC for timer operation
678 crate::interrupt::OS_EVENT.configure_for_timer(priority);
679
680 // Note: The embassy_time_driver macro automatically registers the driver
681 // The frequency parameter is accepted for future compatibility
682 let _ = frequency_hz; // Suppress unused parameter warning
683 }
684
685 // Export the global time driver expected by embassy-time
686 embassy_time_driver::time_driver_impl!(static DRIVER: Driver = Driver);
687
688 /// To be called from the OS_EVENT IRQ.
689 pub fn on_interrupt() {
690 let r: &Regs = unsafe { &*pac::Ostimer0::ptr() };
691
692 // Critical section to prevent races with schedule_wake
693 critical_section::with(|_| {
694 // Check if interrupt is actually pending and handle it atomically
695 if r.osevent_ctrl().read().ostimer_intrflag().bit_is_set() {
696 // Clear flag and disable interrupt atomically
697 r.osevent_ctrl().write(|w| {
698 w.ostimer_intrflag()
699 .clear_bit_by_one() // Write-1-to-clear using safe helper
700 .ostimer_intena()
701 .clear_bit()
702 });
703
704 // Wake the waiting task
705 TIMER_WAKER.wake();
706
707 // Handle alarm callback if active and this interrupt is for the alarm
708 if ALARM_ACTIVE.load(Ordering::SeqCst) {
709 let current_time = now_ticks_read();
710 let target_time = unsafe { ALARM_TARGET_TIME };
711
712 // Check if current time is close to alarm target time (within 1000 ticks for timing variations)
713 if current_time >= target_time && current_time <= target_time + 1000 {
714 ALARM_ACTIVE.store(false, Ordering::SeqCst);
715 unsafe { ALARM_TARGET_TIME = 0 };
716
717 // Execute callback if set
718 unsafe {
719 if let Some(callback) = ALARM_CALLBACK {
720 callback();
721 }
722 }
723
724 // Set flag if provided
725 unsafe {
726 if let Some(flag) = ALARM_FLAG {
727 (*flag).store(true, Ordering::SeqCst);
728 }
729 }
730 }
731 }
732 }
733 });
734 }
735}
736
737#[cfg(feature = "time")]
738use crate::pac::interrupt;
739
740#[cfg(feature = "time")]
741#[allow(non_snake_case)]
742#[interrupt]
743fn OS_EVENT() {
744 time_driver::on_interrupt()
745}
diff --git a/embassy-mcxa/src/pins.rs b/embassy-mcxa/src/pins.rs
new file mode 100644
index 000000000..9adbe64c8
--- /dev/null
+++ b/embassy-mcxa/src/pins.rs
@@ -0,0 +1,33 @@
1//! Pin configuration helpers (separate from peripheral drivers).
2use crate::pac;
3
4/// Configure pins for ADC usage.
5///
6/// # Safety
7///
8/// Must be called after PORT clocks are enabled.
9pub unsafe fn configure_adc_pins() {
10 // P1_10 = ADC1_A8
11 let port1 = &*pac::Port1::ptr();
12 port1.pcr10().write(|w| {
13 w.ps()
14 .ps0()
15 .pe()
16 .pe0()
17 .sre()
18 .sre0()
19 .ode()
20 .ode0()
21 .dse()
22 .dse0()
23 .mux()
24 .mux0()
25 .ibe()
26 .ibe0()
27 .inv()
28 .inv0()
29 .lk()
30 .lk0()
31 });
32 core::arch::asm!("dsb sy; isb sy");
33}
diff --git a/embassy-mcxa/src/rtc.rs b/embassy-mcxa/src/rtc.rs
new file mode 100644
index 000000000..b750a97ea
--- /dev/null
+++ b/embassy-mcxa/src/rtc.rs
@@ -0,0 +1,299 @@
1//! RTC DateTime driver.
2use core::sync::atomic::{AtomicBool, Ordering};
3
4use embassy_hal_internal::{Peri, PeripheralType};
5
6use crate::clocks::with_clocks;
7use crate::pac;
8use crate::pac::rtc0::cr::Um;
9
10type Regs = pac::rtc0::RegisterBlock;
11
12static ALARM_TRIGGERED: AtomicBool = AtomicBool::new(false);
13
14// Token-based instance pattern like embassy-imxrt
15pub trait Instance: PeripheralType {
16 fn ptr() -> *const Regs;
17}
18
19/// Token for RTC0
20pub type Rtc0 = crate::peripherals::RTC0;
21impl Instance for crate::peripherals::RTC0 {
22 #[inline(always)]
23 fn ptr() -> *const Regs {
24 pac::Rtc0::ptr()
25 }
26}
27
28const DAYS_IN_A_YEAR: u32 = 365;
29const SECONDS_IN_A_DAY: u32 = 86400;
30const SECONDS_IN_A_HOUR: u32 = 3600;
31const SECONDS_IN_A_MINUTE: u32 = 60;
32const YEAR_RANGE_START: u16 = 1970;
33
34#[derive(Debug, Clone, Copy)]
35pub struct RtcDateTime {
36 pub year: u16,
37 pub month: u8,
38 pub day: u8,
39 pub hour: u8,
40 pub minute: u8,
41 pub second: u8,
42}
43#[derive(Copy, Clone)]
44pub struct RtcConfig {
45 #[allow(dead_code)]
46 wakeup_select: bool,
47 update_mode: Um,
48 #[allow(dead_code)]
49 supervisor_access: bool,
50 compensation_interval: u8,
51 compensation_time: u8,
52}
53
54#[derive(Copy, Clone)]
55pub struct RtcInterruptEnable;
56impl RtcInterruptEnable {
57 pub const RTC_TIME_INVALID_INTERRUPT_ENABLE: u32 = 1 << 0;
58 pub const RTC_TIME_OVERFLOW_INTERRUPT_ENABLE: u32 = 1 << 1;
59 pub const RTC_ALARM_INTERRUPT_ENABLE: u32 = 1 << 2;
60 pub const RTC_SECONDS_INTERRUPT_ENABLE: u32 = 1 << 4;
61}
62
63pub fn convert_datetime_to_seconds(datetime: &RtcDateTime) -> u32 {
64 let month_days: [u16; 13] = [0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334];
65
66 let mut seconds = (datetime.year as u32 - 1970) * DAYS_IN_A_YEAR;
67 seconds += (datetime.year as u32 / 4) - (1970 / 4);
68 seconds += month_days[datetime.month as usize] as u32;
69 seconds += datetime.day as u32 - 1;
70
71 if (datetime.year & 3 == 0) && (datetime.month <= 2) {
72 seconds -= 1;
73 }
74
75 seconds = seconds * SECONDS_IN_A_DAY
76 + (datetime.hour as u32 * SECONDS_IN_A_HOUR)
77 + (datetime.minute as u32 * SECONDS_IN_A_MINUTE)
78 + datetime.second as u32;
79
80 seconds
81}
82
83pub fn convert_seconds_to_datetime(seconds: u32) -> RtcDateTime {
84 let mut seconds_remaining = seconds;
85 let mut days = seconds_remaining / SECONDS_IN_A_DAY + 1;
86 seconds_remaining %= SECONDS_IN_A_DAY;
87
88 let hour = (seconds_remaining / SECONDS_IN_A_HOUR) as u8;
89 seconds_remaining %= SECONDS_IN_A_HOUR;
90 let minute = (seconds_remaining / SECONDS_IN_A_MINUTE) as u8;
91 let second = (seconds_remaining % SECONDS_IN_A_MINUTE) as u8;
92
93 let mut year = YEAR_RANGE_START;
94 let mut days_in_year = DAYS_IN_A_YEAR;
95
96 while days > days_in_year {
97 days -= days_in_year;
98 year += 1;
99
100 days_in_year = if year.is_multiple_of(4) {
101 DAYS_IN_A_YEAR + 1
102 } else {
103 DAYS_IN_A_YEAR
104 };
105 }
106
107 let days_per_month = [
108 31,
109 if year.is_multiple_of(4) { 29 } else { 28 },
110 31,
111 30,
112 31,
113 30,
114 31,
115 31,
116 30,
117 31,
118 30,
119 31,
120 ];
121
122 let mut month = 1;
123 for (m, month_days) in days_per_month.iter().enumerate() {
124 let m = m + 1;
125 if days <= *month_days as u32 {
126 month = m;
127 break;
128 } else {
129 days -= *month_days as u32;
130 }
131 }
132
133 let day = days as u8;
134
135 RtcDateTime {
136 year,
137 month: month as u8,
138 day,
139 hour,
140 minute,
141 second,
142 }
143}
144
145pub fn get_default_config() -> RtcConfig {
146 RtcConfig {
147 wakeup_select: false,
148 update_mode: Um::Um0,
149 supervisor_access: false,
150 compensation_interval: 0,
151 compensation_time: 0,
152 }
153}
154/// Minimal RTC handle for a specific instance I (store the zero-sized token like embassy)
155pub struct Rtc<'a, I: Instance> {
156 _inst: core::marker::PhantomData<&'a mut I>,
157}
158
159impl<'a, I: Instance> Rtc<'a, I> {
160 /// initialize RTC
161 pub fn new(_inst: Peri<'a, I>, config: RtcConfig) -> Self {
162 let rtc = unsafe { &*I::ptr() };
163
164 // The RTC is NOT gated by the MRCC, but we DO need to make sure the 16k clock
165 // on the vsys domain is active
166 let clocks = with_clocks(|c| c.clk_16k_vsys.clone());
167 match clocks {
168 None => panic!("Clocks have not been initialized"),
169 Some(None) => panic!("Clocks initialized, but clk_16k_vsys not active"),
170 Some(Some(_)) => {}
171 }
172
173 /* RTC reset */
174 rtc.cr().modify(|_, w| w.swr().set_bit());
175 rtc.cr().modify(|_, w| w.swr().clear_bit());
176 rtc.tsr().write(|w| unsafe { w.bits(1) });
177
178 rtc.cr().modify(|_, w| w.um().variant(config.update_mode));
179
180 rtc.tcr().modify(|_, w| unsafe {
181 w.cir()
182 .bits(config.compensation_interval)
183 .tcr()
184 .bits(config.compensation_time)
185 });
186
187 Self {
188 _inst: core::marker::PhantomData,
189 }
190 }
191
192 pub fn set_datetime(&self, datetime: RtcDateTime) {
193 let rtc = unsafe { &*I::ptr() };
194 let seconds = convert_datetime_to_seconds(&datetime);
195 rtc.tsr().write(|w| unsafe { w.bits(seconds) });
196 }
197
198 pub fn get_datetime(&self) -> RtcDateTime {
199 let rtc = unsafe { &*I::ptr() };
200 let seconds = rtc.tsr().read().bits();
201 convert_seconds_to_datetime(seconds)
202 }
203
204 pub fn set_alarm(&self, alarm: RtcDateTime) {
205 let rtc = unsafe { &*I::ptr() };
206 let seconds = convert_datetime_to_seconds(&alarm);
207
208 rtc.tar().write(|w| unsafe { w.bits(0) });
209 let mut timeout = 10000;
210 while rtc.tar().read().bits() != 0 && timeout > 0 {
211 timeout -= 1;
212 }
213
214 rtc.tar().write(|w| unsafe { w.bits(seconds) });
215
216 let mut timeout = 10000;
217 while rtc.tar().read().bits() != seconds && timeout > 0 {
218 timeout -= 1;
219 }
220 }
221
222 pub fn get_alarm(&self) -> RtcDateTime {
223 let rtc = unsafe { &*I::ptr() };
224 let alarm_seconds = rtc.tar().read().bits();
225 convert_seconds_to_datetime(alarm_seconds)
226 }
227
228 pub fn start(&self) {
229 let rtc = unsafe { &*I::ptr() };
230 rtc.sr().modify(|_, w| w.tce().set_bit());
231 }
232
233 pub fn stop(&self) {
234 let rtc = unsafe { &*I::ptr() };
235 rtc.sr().modify(|_, w| w.tce().clear_bit());
236 }
237
238 pub fn set_interrupt(&self, mask: u32) {
239 let rtc = unsafe { &*I::ptr() };
240
241 if (RtcInterruptEnable::RTC_TIME_INVALID_INTERRUPT_ENABLE & mask) != 0 {
242 rtc.ier().modify(|_, w| w.tiie().tiie_1());
243 }
244 if (RtcInterruptEnable::RTC_TIME_OVERFLOW_INTERRUPT_ENABLE & mask) != 0 {
245 rtc.ier().modify(|_, w| w.toie().toie_1());
246 }
247 if (RtcInterruptEnable::RTC_ALARM_INTERRUPT_ENABLE & mask) != 0 {
248 rtc.ier().modify(|_, w| w.taie().taie_1());
249 }
250 if (RtcInterruptEnable::RTC_SECONDS_INTERRUPT_ENABLE & mask) != 0 {
251 rtc.ier().modify(|_, w| w.tsie().tsie_1());
252 }
253
254 ALARM_TRIGGERED.store(false, Ordering::SeqCst);
255 }
256
257 pub fn disable_interrupt(&self, mask: u32) {
258 let rtc = unsafe { &*I::ptr() };
259
260 if (RtcInterruptEnable::RTC_TIME_INVALID_INTERRUPT_ENABLE & mask) != 0 {
261 rtc.ier().modify(|_, w| w.tiie().tiie_0());
262 }
263 if (RtcInterruptEnable::RTC_TIME_OVERFLOW_INTERRUPT_ENABLE & mask) != 0 {
264 rtc.ier().modify(|_, w| w.toie().toie_0());
265 }
266 if (RtcInterruptEnable::RTC_ALARM_INTERRUPT_ENABLE & mask) != 0 {
267 rtc.ier().modify(|_, w| w.taie().taie_0());
268 }
269 if (RtcInterruptEnable::RTC_SECONDS_INTERRUPT_ENABLE & mask) != 0 {
270 rtc.ier().modify(|_, w| w.tsie().tsie_0());
271 }
272 }
273
274 pub fn clear_alarm_flag(&self) {
275 let rtc = unsafe { &*I::ptr() };
276 rtc.ier().modify(|_, w| w.taie().clear_bit());
277 }
278
279 pub fn is_alarm_triggered(&self) -> bool {
280 ALARM_TRIGGERED.load(Ordering::Relaxed)
281 }
282}
283
284pub fn on_interrupt() {
285 let rtc = unsafe { &*pac::Rtc0::ptr() };
286 // Check if this is actually a time alarm interrupt
287 let sr = rtc.sr().read();
288 if sr.taf().bit_is_set() {
289 rtc.ier().modify(|_, w| w.taie().clear_bit());
290 ALARM_TRIGGERED.store(true, Ordering::SeqCst);
291 }
292}
293
294pub struct RtcHandler;
295impl crate::interrupt::typelevel::Handler<crate::interrupt::typelevel::RTC> for RtcHandler {
296 unsafe fn on_interrupt() {
297 on_interrupt();
298 }
299}