aboutsummaryrefslogtreecommitdiff
path: root/embassy-stm32/src/usart
diff options
context:
space:
mode:
authorAdam Greig <[email protected]>2025-09-15 03:32:23 +0100
committerAdam Greig <[email protected]>2025-10-01 21:11:05 +0100
commit2e9f3a815d440f33126d47cdcbf3bf1c9eab0ee1 (patch)
tree804288b17bd9119dd4f22b7b27fd4227e9a94d28 /embassy-stm32/src/usart
parentd382bf0b833e7d9a8e90b8064af1bb63030f0c68 (diff)
STM32: USART: Add `eager_reads` config option
Diffstat (limited to 'embassy-stm32/src/usart')
-rw-r--r--embassy-stm32/src/usart/buffered.rs18
-rw-r--r--embassy-stm32/src/usart/mod.rs20
-rw-r--r--embassy-stm32/src/usart/ringbuffered.rs104
3 files changed, 95 insertions, 47 deletions
diff --git a/embassy-stm32/src/usart/buffered.rs b/embassy-stm32/src/usart/buffered.rs
index c734eed49..83aa4439b 100644
--- a/embassy-stm32/src/usart/buffered.rs
+++ b/embassy-stm32/src/usart/buffered.rs
@@ -68,8 +68,14 @@ unsafe fn on_interrupt(r: Regs, state: &'static State) {
68 // FIXME: Should we disable any further RX interrupts when the buffer becomes full. 68 // FIXME: Should we disable any further RX interrupts when the buffer becomes full.
69 } 69 }
70 70
71 if !state.rx_buf.is_empty() { 71 if state.eager_reads.load(Ordering::Relaxed) {
72 state.rx_waker.wake(); 72 if !state.rx_buf.is_empty() {
73 state.rx_waker.wake();
74 }
75 } else {
76 if state.rx_buf.is_half_full() {
77 state.rx_waker.wake();
78 }
73 } 79 }
74 } 80 }
75 81
@@ -132,6 +138,7 @@ pub(super) struct State {
132 tx_done: AtomicBool, 138 tx_done: AtomicBool,
133 tx_rx_refcount: AtomicU8, 139 tx_rx_refcount: AtomicU8,
134 half_duplex_readback: AtomicBool, 140 half_duplex_readback: AtomicBool,
141 eager_reads: AtomicBool,
135} 142}
136 143
137impl State { 144impl State {
@@ -144,6 +151,7 @@ impl State {
144 tx_done: AtomicBool::new(true), 151 tx_done: AtomicBool::new(true),
145 tx_rx_refcount: AtomicU8::new(0), 152 tx_rx_refcount: AtomicU8::new(0),
146 half_duplex_readback: AtomicBool::new(false), 153 half_duplex_readback: AtomicBool::new(false),
154 eager_reads: AtomicBool::new(false),
147 } 155 }
148 } 156 }
149} 157}
@@ -419,6 +427,7 @@ impl<'d> BufferedUart<'d> {
419 let state = T::buffered_state(); 427 let state = T::buffered_state();
420 let kernel_clock = T::frequency(); 428 let kernel_clock = T::frequency();
421 429
430 state.eager_reads.store(config.eager_reads, Ordering::Relaxed);
422 state.half_duplex_readback.store( 431 state.half_duplex_readback.store(
423 config.duplex == Duplex::Half(HalfDuplexReadback::Readback), 432 config.duplex == Duplex::Half(HalfDuplexReadback::Readback),
424 Ordering::Relaxed, 433 Ordering::Relaxed,
@@ -456,6 +465,7 @@ impl<'d> BufferedUart<'d> {
456 let info = self.rx.info; 465 let info = self.rx.info;
457 let state = self.rx.state; 466 let state = self.rx.state;
458 state.tx_rx_refcount.store(2, Ordering::Relaxed); 467 state.tx_rx_refcount.store(2, Ordering::Relaxed);
468 state.eager_reads.store(config.eager_reads, Ordering::Relaxed);
459 469
460 info.rcc.enable_and_reset(); 470 info.rcc.enable_and_reset();
461 471
@@ -527,6 +537,8 @@ impl<'d> BufferedUart<'d> {
527 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { 537 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> {
528 reconfigure(self.rx.info, self.rx.kernel_clock, config)?; 538 reconfigure(self.rx.info, self.rx.kernel_clock, config)?;
529 539
540 self.rx.state.eager_reads.store(config.eager_reads, Ordering::Relaxed);
541
530 self.rx.info.regs.cr1().modify(|w| { 542 self.rx.info.regs.cr1().modify(|w| {
531 w.set_rxneie(true); 543 w.set_rxneie(true);
532 w.set_idleie(true); 544 w.set_idleie(true);
@@ -633,6 +645,8 @@ impl<'d> BufferedUartRx<'d> {
633 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { 645 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> {
634 reconfigure(self.info, self.kernel_clock, config)?; 646 reconfigure(self.info, self.kernel_clock, config)?;
635 647
648 self.state.eager_reads.store(config.eager_reads, Ordering::Relaxed);
649
636 self.info.regs.cr1().modify(|w| { 650 self.info.regs.cr1().modify(|w| {
637 w.set_rxneie(true); 651 w.set_rxneie(true);
638 w.set_idleie(true); 652 w.set_idleie(true);
diff --git a/embassy-stm32/src/usart/mod.rs b/embassy-stm32/src/usart/mod.rs
index ff211e0c9..e439f2cee 100644
--- a/embassy-stm32/src/usart/mod.rs
+++ b/embassy-stm32/src/usart/mod.rs
@@ -4,7 +4,7 @@
4 4
5use core::future::poll_fn; 5use core::future::poll_fn;
6use core::marker::PhantomData; 6use core::marker::PhantomData;
7use core::sync::atomic::{compiler_fence, AtomicU8, Ordering}; 7use core::sync::atomic::{compiler_fence, AtomicBool, AtomicU8, Ordering};
8use core::task::Poll; 8use core::task::Poll;
9 9
10use embassy_embedded_hal::SetConfig; 10use embassy_embedded_hal::SetConfig;
@@ -206,6 +206,18 @@ pub struct Config {
206 /// If false: the error is ignored and cleared 206 /// If false: the error is ignored and cleared
207 pub detect_previous_overrun: bool, 207 pub detect_previous_overrun: bool,
208 208
209 /// If true then read-like calls on `BufferedUartRx` and `RingBufferedUartRx`
210 /// are woken/return as soon as any data is available in the buffer.
211 ///
212 /// If false (the default) then reads started typically only wake/return after
213 /// line idle or after the buffer is at least half full (`BufferedUartRx`) or
214 /// the DMA buffer is written at the half or full positions (`RingBufferedUartRx`),
215 /// though it may also wake/return earlier in some circumstances.
216 ///
217 /// Has no effect on plain `Uart` or `UartRx` reads, which are specified to either
218 /// return a single word, a full buffer, or after line idle.
219 pub eager_reads: bool,
220
209 /// Set this to true if the line is considered noise free. 221 /// Set this to true if the line is considered noise free.
210 /// This will increase the receiver’s tolerance to clock deviations, 222 /// This will increase the receiver’s tolerance to clock deviations,
211 /// but will effectively disable noise detection. 223 /// but will effectively disable noise detection.
@@ -270,6 +282,7 @@ impl Default for Config {
270 parity: Parity::ParityNone, 282 parity: Parity::ParityNone,
271 // historical behavior 283 // historical behavior
272 detect_previous_overrun: false, 284 detect_previous_overrun: false,
285 eager_reads: false,
273 #[cfg(not(usart_v1))] 286 #[cfg(not(usart_v1))]
274 assume_noise_free: false, 287 assume_noise_free: false,
275 #[cfg(any(usart_v3, usart_v4))] 288 #[cfg(any(usart_v3, usart_v4))]
@@ -966,6 +979,7 @@ impl<'d, M: Mode> UartRx<'d, M> {
966 let info = self.info; 979 let info = self.info;
967 let state = self.state; 980 let state = self.state;
968 state.tx_rx_refcount.store(1, Ordering::Relaxed); 981 state.tx_rx_refcount.store(1, Ordering::Relaxed);
982 state.eager_reads.store(config.eager_reads, Ordering::Relaxed);
969 983
970 info.rcc.enable_and_reset(); 984 info.rcc.enable_and_reset();
971 985
@@ -982,6 +996,7 @@ impl<'d, M: Mode> UartRx<'d, M> {
982 996
983 /// Reconfigure the driver 997 /// Reconfigure the driver
984 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { 998 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> {
999 self.state.eager_reads.store(config.eager_reads, Ordering::Relaxed);
985 reconfigure(self.info, self.kernel_clock, config) 1000 reconfigure(self.info, self.kernel_clock, config)
986 } 1001 }
987 1002
@@ -1462,6 +1477,7 @@ impl<'d, M: Mode> Uart<'d, M> {
1462 let info = self.rx.info; 1477 let info = self.rx.info;
1463 let state = self.rx.state; 1478 let state = self.rx.state;
1464 state.tx_rx_refcount.store(2, Ordering::Relaxed); 1479 state.tx_rx_refcount.store(2, Ordering::Relaxed);
1480 state.eager_reads.store(config.eager_reads, Ordering::Relaxed);
1465 1481
1466 info.rcc.enable_and_reset(); 1482 info.rcc.enable_and_reset();
1467 1483
@@ -2022,6 +2038,7 @@ struct State {
2022 rx_waker: AtomicWaker, 2038 rx_waker: AtomicWaker,
2023 tx_waker: AtomicWaker, 2039 tx_waker: AtomicWaker,
2024 tx_rx_refcount: AtomicU8, 2040 tx_rx_refcount: AtomicU8,
2041 eager_reads: AtomicBool,
2025} 2042}
2026 2043
2027impl State { 2044impl State {
@@ -2030,6 +2047,7 @@ impl State {
2030 rx_waker: AtomicWaker::new(), 2047 rx_waker: AtomicWaker::new(),
2031 tx_waker: AtomicWaker::new(), 2048 tx_waker: AtomicWaker::new(),
2032 tx_rx_refcount: AtomicU8::new(0), 2049 tx_rx_refcount: AtomicU8::new(0),
2050 eager_reads: AtomicBool::new(false),
2033 } 2051 }
2034 } 2052 }
2035} 2053}
diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs
index 5f4e87834..d818e0bcc 100644
--- a/embassy-stm32/src/usart/ringbuffered.rs
+++ b/embassy-stm32/src/usart/ringbuffered.rs
@@ -26,9 +26,9 @@ use crate::Peri;
26/// contain enough bytes to fill the buffer passed by the caller of 26/// contain enough bytes to fill the buffer passed by the caller of
27/// the function, or is empty. 27/// the function, or is empty.
28/// 28///
29/// Waiting for bytes operates in one of two modes, depending on 29/// Waiting for bytes operates in one of three modes, depending on
30/// the behavior of the sender and the size of the buffer passed 30/// the behavior of the sender, the size of the buffer passed
31/// to the function: 31/// to the function, and the configuration:
32/// 32///
33/// - If the sender sends intermittently, the 'idle line' 33/// - If the sender sends intermittently, the 'idle line'
34/// condition will be detected when the sender stops, and any 34/// condition will be detected when the sender stops, and any
@@ -47,7 +47,11 @@ use crate::Peri;
47/// interrupt when those specific buffer addresses have been 47/// interrupt when those specific buffer addresses have been
48/// written. 48/// written.
49/// 49///
50/// In both cases this will result in variable latency due to the 50/// - If `eager_reads` is enabled in `config`, the UART interrupt
51/// is enabled on all data reception and the call will only wait
52/// for at least one byte to be available before returning.
53///
54/// In the first two cases this will result in variable latency due to the
51/// buffering effect. For example, if the baudrate is 2400 bps, and 55/// buffering effect. For example, if the baudrate is 2400 bps, and
52/// the configuration is 8 data bits, no parity bit, and one stop bit, 56/// the configuration is 8 data bits, no parity bit, and one stop bit,
53/// then a byte will be received every ~4.16ms. If the ring buffer is 57/// then a byte will be received every ~4.16ms. If the ring buffer is
@@ -68,15 +72,10 @@ use crate::Peri;
68/// sending, but would be falsely triggered in the worst-case 72/// sending, but would be falsely triggered in the worst-case
69/// buffer delay scenario. 73/// buffer delay scenario.
70/// 74///
71/// Note: This latency is caused by the limited capabilities of the 75/// Note: Enabling `eager_reads` with `RingBufferedUartRx` will enable
72/// STM32 DMA controller; since it cannot generate an interrupt when 76/// an UART RXNE interrupt, which will cause an interrupt to occur on
73/// it stores a byte into an empty ring buffer, or in any other 77/// every received data byte. The data is still copied using DMA, but
74/// configurable conditions, it is not possible to take notice of the 78/// there is nevertheless additional processing overhead for each byte.
75/// contents of the ring buffer more quickly without introducing
76/// polling. As a result the latency can be reduced by calling the
77/// read functions repeatedly with smaller buffers to receive the
78/// available bytes, as each call to a read function will explicitly
79/// check the ring buffer for available bytes.
80pub struct RingBufferedUartRx<'d> { 79pub struct RingBufferedUartRx<'d> {
81 info: &'static Info, 80 info: &'static Info,
82 state: &'static State, 81 state: &'static State,
@@ -133,6 +132,7 @@ impl<'d> UartRx<'d, Async> {
133impl<'d> RingBufferedUartRx<'d> { 132impl<'d> RingBufferedUartRx<'d> {
134 /// Reconfigure the driver 133 /// Reconfigure the driver
135 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { 134 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> {
135 self.state.eager_reads.store(config.eager_reads, Ordering::Relaxed);
136 reconfigure(self.info, self.kernel_clock, config) 136 reconfigure(self.info, self.kernel_clock, config)
137 } 137 }
138 138
@@ -148,8 +148,8 @@ impl<'d> RingBufferedUartRx<'d> {
148 let r = self.info.regs; 148 let r = self.info.regs;
149 // clear all interrupts and DMA Rx Request 149 // clear all interrupts and DMA Rx Request
150 r.cr1().modify(|w| { 150 r.cr1().modify(|w| {
151 // disable RXNE interrupt 151 // use RXNE only when returning reads early
152 w.set_rxneie(false); 152 w.set_rxneie(self.state.eager_reads.load(Ordering::Relaxed));
153 // enable parity interrupt if not ParityNone 153 // enable parity interrupt if not ParityNone
154 w.set_peie(w.pce()); 154 w.set_peie(w.pce());
155 // enable idle line interrupt 155 // enable idle line interrupt
@@ -248,39 +248,55 @@ impl<'d> RingBufferedUartRx<'d> {
248 async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { 248 async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> {
249 compiler_fence(Ordering::SeqCst); 249 compiler_fence(Ordering::SeqCst);
250 250
251 // Future which completes when idle line is detected 251 loop {
252 let s = self.state; 252 // Future which completes when idle line is detected
253 let uart = poll_fn(|cx| { 253 let s = self.state;
254 s.rx_waker.register(cx.waker()); 254 let mut uart_init = false;
255 255 let uart = poll_fn(|cx| {
256 compiler_fence(Ordering::SeqCst); 256 s.rx_waker.register(cx.waker());
257 257
258 if check_idle_and_errors(self.info.regs)? { 258 compiler_fence(Ordering::SeqCst);
259 // Idle line is detected 259
260 Poll::Ready(Ok(())) 260 // We may have been woken by IDLE or, if eager_reads is set, by RXNE.
261 } else { 261 // However, DMA will clear RXNE, so we can't check directly, and because
262 Poll::Pending 262 // the other future borrows `ring_buf`, we can't check `len()` here either.
263 } 263 // Instead, return from this future and we'll check the length afterwards.
264 }); 264 let eager = s.eager_reads.load(Ordering::Relaxed);
265
266 if check_idle_and_errors(self.info.regs)? || (eager && uart_init) {
267 // Idle line is detected, or eager reads is set and some data is available.
268 Poll::Ready(Ok(()))
269 } else {
270 uart_init = true;
271 Poll::Pending
272 }
273 });
265 274
266 let mut dma_init = false; 275 let mut dma_init = false;
267 // Future which completes when the DMA controller indicates it 276 // Future which completes when the DMA controller indicates it
268 // has written to the ring buffer's middle byte, or last byte 277 // has written to the ring buffer's middle byte, or last byte
269 let dma = poll_fn(|cx| { 278 let dma = poll_fn(|cx| {
270 self.ring_buf.set_waker(cx.waker()); 279 self.ring_buf.set_waker(cx.waker());
271 280
272 let status = match dma_init { 281 let status = match dma_init {
273 false => Poll::Pending, 282 false => Poll::Pending,
274 true => Poll::Ready(()), 283 true => Poll::Ready(()),
275 }; 284 };
276 285
277 dma_init = true; 286 dma_init = true;
278 status 287 status
279 }); 288 });
280 289
281 match select(uart, dma).await { 290 match select(uart, dma).await {
282 Either::Left((result, _)) => result, 291 Either::Left((result, _)) => {
283 Either::Right(((), _)) => Ok(()), 292 if self.ring_buf.len().unwrap_or(0) > 0 || result.is_err() {
293 return result;
294 } else {
295 continue;
296 }
297 }
298 Either::Right(((), _)) => return Ok(()),
299 }
284 } 300 }
285 } 301 }
286 302