diff options
| -rw-r--r-- | embassy-hal-internal/src/atomic_ring_buffer.rs | 43 | ||||
| -rw-r--r-- | embassy-stm32/CHANGELOG.md | 4 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/buffered.rs | 67 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/mod.rs | 71 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/ringbuffered.rs | 118 |
5 files changed, 239 insertions, 64 deletions
diff --git a/embassy-hal-internal/src/atomic_ring_buffer.rs b/embassy-hal-internal/src/atomic_ring_buffer.rs index 00b7a1249..8c3889b85 100644 --- a/embassy-hal-internal/src/atomic_ring_buffer.rs +++ b/embassy-hal-internal/src/atomic_ring_buffer.rs | |||
| @@ -133,6 +133,18 @@ impl RingBuffer { | |||
| 133 | self.len.load(Ordering::Relaxed) | 133 | self.len.load(Ordering::Relaxed) |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | /// Return number of items available to read. | ||
| 137 | pub fn available(&self) -> usize { | ||
| 138 | let end = self.end.load(Ordering::Relaxed); | ||
| 139 | let len = self.len.load(Ordering::Relaxed); | ||
| 140 | let start = self.start.load(Ordering::Relaxed); | ||
| 141 | if end >= start { | ||
| 142 | end - start | ||
| 143 | } else { | ||
| 144 | 2 * len - start + end | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 136 | /// Check if buffer is full. | 148 | /// Check if buffer is full. |
| 137 | pub fn is_full(&self) -> bool { | 149 | pub fn is_full(&self) -> bool { |
| 138 | let len = self.len.load(Ordering::Relaxed); | 150 | let len = self.len.load(Ordering::Relaxed); |
| @@ -142,6 +154,11 @@ impl RingBuffer { | |||
| 142 | self.wrap(start + len) == end | 154 | self.wrap(start + len) == end |
| 143 | } | 155 | } |
| 144 | 156 | ||
| 157 | /// Check if buffer is at least half full. | ||
| 158 | pub fn is_half_full(&self) -> bool { | ||
| 159 | self.available() >= self.len.load(Ordering::Relaxed) / 2 | ||
| 160 | } | ||
| 161 | |||
| 145 | /// Check if buffer is empty. | 162 | /// Check if buffer is empty. |
| 146 | pub fn is_empty(&self) -> bool { | 163 | pub fn is_empty(&self) -> bool { |
| 147 | let start = self.start.load(Ordering::Relaxed); | 164 | let start = self.start.load(Ordering::Relaxed); |
| @@ -394,6 +411,7 @@ mod tests { | |||
| 394 | rb.init(b.as_mut_ptr(), 4); | 411 | rb.init(b.as_mut_ptr(), 4); |
| 395 | 412 | ||
| 396 | assert_eq!(rb.is_empty(), true); | 413 | assert_eq!(rb.is_empty(), true); |
| 414 | assert_eq!(rb.is_half_full(), false); | ||
| 397 | assert_eq!(rb.is_full(), false); | 415 | assert_eq!(rb.is_full(), false); |
| 398 | 416 | ||
| 399 | rb.writer().push(|buf| { | 417 | rb.writer().push(|buf| { |
| @@ -406,6 +424,7 @@ mod tests { | |||
| 406 | }); | 424 | }); |
| 407 | 425 | ||
| 408 | assert_eq!(rb.is_empty(), false); | 426 | assert_eq!(rb.is_empty(), false); |
| 427 | assert_eq!(rb.is_half_full(), true); | ||
| 409 | assert_eq!(rb.is_full(), true); | 428 | assert_eq!(rb.is_full(), true); |
| 410 | 429 | ||
| 411 | rb.writer().push(|buf| { | 430 | rb.writer().push(|buf| { |
| @@ -415,6 +434,7 @@ mod tests { | |||
| 415 | }); | 434 | }); |
| 416 | 435 | ||
| 417 | assert_eq!(rb.is_empty(), false); | 436 | assert_eq!(rb.is_empty(), false); |
| 437 | assert_eq!(rb.is_half_full(), true); | ||
| 418 | assert_eq!(rb.is_full(), true); | 438 | assert_eq!(rb.is_full(), true); |
| 419 | 439 | ||
| 420 | rb.reader().pop(|buf| { | 440 | rb.reader().pop(|buf| { |
| @@ -424,6 +444,7 @@ mod tests { | |||
| 424 | }); | 444 | }); |
| 425 | 445 | ||
| 426 | assert_eq!(rb.is_empty(), false); | 446 | assert_eq!(rb.is_empty(), false); |
| 447 | assert_eq!(rb.is_half_full(), true); | ||
| 427 | assert_eq!(rb.is_full(), false); | 448 | assert_eq!(rb.is_full(), false); |
| 428 | 449 | ||
| 429 | rb.reader().pop(|buf| { | 450 | rb.reader().pop(|buf| { |
| @@ -432,6 +453,7 @@ mod tests { | |||
| 432 | }); | 453 | }); |
| 433 | 454 | ||
| 434 | assert_eq!(rb.is_empty(), false); | 455 | assert_eq!(rb.is_empty(), false); |
| 456 | assert_eq!(rb.is_half_full(), true); | ||
| 435 | assert_eq!(rb.is_full(), false); | 457 | assert_eq!(rb.is_full(), false); |
| 436 | 458 | ||
| 437 | rb.reader().pop(|buf| { | 459 | rb.reader().pop(|buf| { |
| @@ -447,6 +469,7 @@ mod tests { | |||
| 447 | }); | 469 | }); |
| 448 | 470 | ||
| 449 | assert_eq!(rb.is_empty(), true); | 471 | assert_eq!(rb.is_empty(), true); |
| 472 | assert_eq!(rb.is_half_full(), false); | ||
| 450 | assert_eq!(rb.is_full(), false); | 473 | assert_eq!(rb.is_full(), false); |
| 451 | 474 | ||
| 452 | rb.reader().pop(|buf| { | 475 | rb.reader().pop(|buf| { |
| @@ -460,14 +483,28 @@ mod tests { | |||
| 460 | 1 | 483 | 1 |
| 461 | }); | 484 | }); |
| 462 | 485 | ||
| 486 | assert_eq!(rb.is_empty(), false); | ||
| 487 | assert_eq!(rb.is_half_full(), false); | ||
| 488 | assert_eq!(rb.is_full(), false); | ||
| 489 | |||
| 463 | rb.writer().push(|buf| { | 490 | rb.writer().push(|buf| { |
| 464 | assert_eq!(3, buf.len()); | 491 | assert_eq!(3, buf.len()); |
| 465 | buf[0] = 11; | 492 | buf[0] = 11; |
| 466 | buf[1] = 12; | 493 | 1 |
| 467 | 2 | 494 | }); |
| 495 | |||
| 496 | assert_eq!(rb.is_empty(), false); | ||
| 497 | assert_eq!(rb.is_half_full(), true); | ||
| 498 | assert_eq!(rb.is_full(), false); | ||
| 499 | |||
| 500 | rb.writer().push(|buf| { | ||
| 501 | assert_eq!(2, buf.len()); | ||
| 502 | buf[0] = 12; | ||
| 503 | 1 | ||
| 468 | }); | 504 | }); |
| 469 | 505 | ||
| 470 | assert_eq!(rb.is_empty(), false); | 506 | assert_eq!(rb.is_empty(), false); |
| 507 | assert_eq!(rb.is_half_full(), true); | ||
| 471 | assert_eq!(rb.is_full(), false); | 508 | assert_eq!(rb.is_full(), false); |
| 472 | 509 | ||
| 473 | rb.writer().push(|buf| { | 510 | rb.writer().push(|buf| { |
| @@ -477,6 +514,7 @@ mod tests { | |||
| 477 | }); | 514 | }); |
| 478 | 515 | ||
| 479 | assert_eq!(rb.is_empty(), false); | 516 | assert_eq!(rb.is_empty(), false); |
| 517 | assert_eq!(rb.is_half_full(), true); | ||
| 480 | assert_eq!(rb.is_full(), true); | 518 | assert_eq!(rb.is_full(), true); |
| 481 | } | 519 | } |
| 482 | } | 520 | } |
| @@ -490,6 +528,7 @@ mod tests { | |||
| 490 | rb.init(b.as_mut_ptr(), b.len()); | 528 | rb.init(b.as_mut_ptr(), b.len()); |
| 491 | 529 | ||
| 492 | assert_eq!(rb.is_empty(), true); | 530 | assert_eq!(rb.is_empty(), true); |
| 531 | assert_eq!(rb.is_half_full(), true); | ||
| 493 | assert_eq!(rb.is_full(), true); | 532 | assert_eq!(rb.is_full(), true); |
| 494 | 533 | ||
| 495 | rb.writer().push(|buf| { | 534 | rb.writer().push(|buf| { |
diff --git a/embassy-stm32/CHANGELOG.md b/embassy-stm32/CHANGELOG.md index 1443472f5..a6ee5c4b8 100644 --- a/embassy-stm32/CHANGELOG.md +++ b/embassy-stm32/CHANGELOG.md | |||
| @@ -27,6 +27,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 | |||
| 27 | - fix: Cut down the capabilities of the STM32L412 and L422 RTC as those are missing binary timer mode and underflow interrupt. | 27 | - fix: Cut down the capabilities of the STM32L412 and L422 RTC as those are missing binary timer mode and underflow interrupt. |
| 28 | - fix: Allow configuration of the internal pull up/down resistors on the pins for the Qei peripheral, as well as the Qei decoder mode. | 28 | - fix: Allow configuration of the internal pull up/down resistors on the pins for the Qei peripheral, as well as the Qei decoder mode. |
| 29 | - feat: stm32/rcc/mco: Added support for IO driver strength when using Master Clock Out IO. This changes signature on Mco::new taking a McoConfig struct ([#4679](https://github.com/embassy-rs/embassy/pull/4679)) | 29 | - feat: stm32/rcc/mco: Added support for IO driver strength when using Master Clock Out IO. This changes signature on Mco::new taking a McoConfig struct ([#4679](https://github.com/embassy-rs/embassy/pull/4679)) |
| 30 | - feat: derive Clone, Copy and defmt::Format for all SPI-related configs | ||
| 31 | - feat: stm32/usart: add `eager_reads` option to control if buffered readers return as soon as possible or after more data is available ([#4668](https://github.com/embassy-rs/embassy/pull/4668)) | ||
| 32 | - feat: stm32/usart: add `de_assertion_time` and `de_deassertion_time` config options | ||
| 33 | - change: stm32/uart: BufferedUartRx now returns all available bytes from the internal buffer | ||
| 30 | 34 | ||
| 31 | ## 0.4.0 - 2025-08-26 | 35 | ## 0.4.0 - 2025-08-26 |
| 32 | 36 | ||
diff --git a/embassy-stm32/src/usart/buffered.rs b/embassy-stm32/src/usart/buffered.rs index c734eed49..10dc02334 100644 --- a/embassy-stm32/src/usart/buffered.rs +++ b/embassy-stm32/src/usart/buffered.rs | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | use core::future::poll_fn; | 1 | use core::future::poll_fn; |
| 2 | use core::marker::PhantomData; | 2 | use core::marker::PhantomData; |
| 3 | use core::slice; | 3 | use core::slice; |
| 4 | use core::sync::atomic::{AtomicBool, AtomicU8, Ordering}; | 4 | use core::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering}; |
| 5 | use core::task::Poll; | 5 | use core::task::Poll; |
| 6 | 6 | ||
| 7 | use embassy_embedded_hal::SetConfig; | 7 | use embassy_embedded_hal::SetConfig; |
| @@ -68,8 +68,15 @@ unsafe fn on_interrupt(r: Regs, state: &'static State) { | |||
| 68 | // FIXME: Should we disable any further RX interrupts when the buffer becomes full. | 68 | // FIXME: Should we disable any further RX interrupts when the buffer becomes full. |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | if !state.rx_buf.is_empty() { | 71 | let eager = state.eager_reads.load(Ordering::Relaxed); |
| 72 | state.rx_waker.wake(); | 72 | if eager > 0 { |
| 73 | if state.rx_buf.available() >= eager { | ||
| 74 | state.rx_waker.wake(); | ||
| 75 | } | ||
| 76 | } else { | ||
| 77 | if state.rx_buf.is_half_full() { | ||
| 78 | state.rx_waker.wake(); | ||
| 79 | } | ||
| 73 | } | 80 | } |
| 74 | } | 81 | } |
| 75 | 82 | ||
| @@ -132,6 +139,7 @@ pub(super) struct State { | |||
| 132 | tx_done: AtomicBool, | 139 | tx_done: AtomicBool, |
| 133 | tx_rx_refcount: AtomicU8, | 140 | tx_rx_refcount: AtomicU8, |
| 134 | half_duplex_readback: AtomicBool, | 141 | half_duplex_readback: AtomicBool, |
| 142 | eager_reads: AtomicUsize, | ||
| 135 | } | 143 | } |
| 136 | 144 | ||
| 137 | impl State { | 145 | impl State { |
| @@ -144,6 +152,7 @@ impl State { | |||
| 144 | tx_done: AtomicBool::new(true), | 152 | tx_done: AtomicBool::new(true), |
| 145 | tx_rx_refcount: AtomicU8::new(0), | 153 | tx_rx_refcount: AtomicU8::new(0), |
| 146 | half_duplex_readback: AtomicBool::new(false), | 154 | half_duplex_readback: AtomicBool::new(false), |
| 155 | eager_reads: AtomicUsize::new(0), | ||
| 147 | } | 156 | } |
| 148 | } | 157 | } |
| 149 | } | 158 | } |
| @@ -419,6 +428,9 @@ impl<'d> BufferedUart<'d> { | |||
| 419 | let state = T::buffered_state(); | 428 | let state = T::buffered_state(); |
| 420 | let kernel_clock = T::frequency(); | 429 | let kernel_clock = T::frequency(); |
| 421 | 430 | ||
| 431 | state | ||
| 432 | .eager_reads | ||
| 433 | .store(config.eager_reads.unwrap_or(0), Ordering::Relaxed); | ||
| 422 | state.half_duplex_readback.store( | 434 | state.half_duplex_readback.store( |
| 423 | config.duplex == Duplex::Half(HalfDuplexReadback::Readback), | 435 | config.duplex == Duplex::Half(HalfDuplexReadback::Readback), |
| 424 | Ordering::Relaxed, | 436 | Ordering::Relaxed, |
| @@ -456,6 +468,9 @@ impl<'d> BufferedUart<'d> { | |||
| 456 | let info = self.rx.info; | 468 | let info = self.rx.info; |
| 457 | let state = self.rx.state; | 469 | let state = self.rx.state; |
| 458 | state.tx_rx_refcount.store(2, Ordering::Relaxed); | 470 | state.tx_rx_refcount.store(2, Ordering::Relaxed); |
| 471 | state | ||
| 472 | .eager_reads | ||
| 473 | .store(config.eager_reads.unwrap_or(0), Ordering::Relaxed); | ||
| 459 | 474 | ||
| 460 | info.rcc.enable_and_reset(); | 475 | info.rcc.enable_and_reset(); |
| 461 | 476 | ||
| @@ -527,6 +542,11 @@ impl<'d> BufferedUart<'d> { | |||
| 527 | pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { | 542 | pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { |
| 528 | reconfigure(self.rx.info, self.rx.kernel_clock, config)?; | 543 | reconfigure(self.rx.info, self.rx.kernel_clock, config)?; |
| 529 | 544 | ||
| 545 | self.rx | ||
| 546 | .state | ||
| 547 | .eager_reads | ||
| 548 | .store(config.eager_reads.unwrap_or(0), Ordering::Relaxed); | ||
| 549 | |||
| 530 | self.rx.info.regs.cr1().modify(|w| { | 550 | self.rx.info.regs.cr1().modify(|w| { |
| 531 | w.set_rxneie(true); | 551 | w.set_rxneie(true); |
| 532 | w.set_idleie(true); | 552 | w.set_idleie(true); |
| @@ -553,24 +573,30 @@ impl<'d> BufferedUartRx<'d> { | |||
| 553 | poll_fn(move |cx| { | 573 | poll_fn(move |cx| { |
| 554 | let state = self.state; | 574 | let state = self.state; |
| 555 | let mut rx_reader = unsafe { state.rx_buf.reader() }; | 575 | let mut rx_reader = unsafe { state.rx_buf.reader() }; |
| 556 | let data = rx_reader.pop_slice(); | 576 | let mut buf_len = 0; |
| 577 | let mut data = rx_reader.pop_slice(); | ||
| 557 | 578 | ||
| 558 | if !data.is_empty() { | 579 | while !data.is_empty() && buf_len < buf.len() { |
| 559 | let len = data.len().min(buf.len()); | 580 | let data_len = data.len().min(buf.len() - buf_len); |
| 560 | buf[..len].copy_from_slice(&data[..len]); | 581 | buf[buf_len..buf_len + data_len].copy_from_slice(&data[..data_len]); |
| 582 | buf_len += data_len; | ||
| 561 | 583 | ||
| 562 | let do_pend = state.rx_buf.is_full(); | 584 | let do_pend = state.rx_buf.is_full(); |
| 563 | rx_reader.pop_done(len); | 585 | rx_reader.pop_done(data_len); |
| 564 | 586 | ||
| 565 | if do_pend { | 587 | if do_pend { |
| 566 | self.info.interrupt.pend(); | 588 | self.info.interrupt.pend(); |
| 567 | } | 589 | } |
| 568 | 590 | ||
| 569 | return Poll::Ready(Ok(len)); | 591 | data = rx_reader.pop_slice(); |
| 570 | } | 592 | } |
| 571 | 593 | ||
| 572 | state.rx_waker.register(cx.waker()); | 594 | if buf_len != 0 { |
| 573 | Poll::Pending | 595 | Poll::Ready(Ok(buf_len)) |
| 596 | } else { | ||
| 597 | state.rx_waker.register(cx.waker()); | ||
| 598 | Poll::Pending | ||
| 599 | } | ||
| 574 | }) | 600 | }) |
| 575 | .await | 601 | .await |
| 576 | } | 602 | } |
| @@ -579,21 +605,24 @@ impl<'d> BufferedUartRx<'d> { | |||
| 579 | loop { | 605 | loop { |
| 580 | let state = self.state; | 606 | let state = self.state; |
| 581 | let mut rx_reader = unsafe { state.rx_buf.reader() }; | 607 | let mut rx_reader = unsafe { state.rx_buf.reader() }; |
| 582 | let data = rx_reader.pop_slice(); | 608 | let mut buf_len = 0; |
| 609 | let mut data = rx_reader.pop_slice(); | ||
| 583 | 610 | ||
| 584 | if !data.is_empty() { | 611 | while !data.is_empty() && buf_len < buf.len() { |
| 585 | let len = data.len().min(buf.len()); | 612 | let data_len = data.len().min(buf.len() - buf_len); |
| 586 | buf[..len].copy_from_slice(&data[..len]); | 613 | buf[buf_len..buf_len + data_len].copy_from_slice(&data[..data_len]); |
| 614 | buf_len += data_len; | ||
| 587 | 615 | ||
| 588 | let do_pend = state.rx_buf.is_full(); | 616 | let do_pend = state.rx_buf.is_full(); |
| 589 | rx_reader.pop_done(len); | 617 | rx_reader.pop_done(data_len); |
| 590 | 618 | ||
| 591 | if do_pend { | 619 | if do_pend { |
| 592 | self.info.interrupt.pend(); | 620 | self.info.interrupt.pend(); |
| 593 | } | 621 | } |
| 594 | 622 | ||
| 595 | return Ok(len); | 623 | data = rx_reader.pop_slice(); |
| 596 | } | 624 | } |
| 625 | return Ok(buf_len); | ||
| 597 | } | 626 | } |
| 598 | } | 627 | } |
| 599 | 628 | ||
| @@ -633,6 +662,10 @@ impl<'d> BufferedUartRx<'d> { | |||
| 633 | pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { | 662 | pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { |
| 634 | reconfigure(self.info, self.kernel_clock, config)?; | 663 | reconfigure(self.info, self.kernel_clock, config)?; |
| 635 | 664 | ||
| 665 | self.state | ||
| 666 | .eager_reads | ||
| 667 | .store(config.eager_reads.unwrap_or(0), Ordering::Relaxed); | ||
| 668 | |||
| 636 | self.info.regs.cr1().modify(|w| { | 669 | self.info.regs.cr1().modify(|w| { |
| 637 | w.set_rxneie(true); | 670 | w.set_rxneie(true); |
| 638 | w.set_idleie(true); | 671 | w.set_idleie(true); |
diff --git a/embassy-stm32/src/usart/mod.rs b/embassy-stm32/src/usart/mod.rs index ff211e0c9..0d2d86aca 100644 --- a/embassy-stm32/src/usart/mod.rs +++ b/embassy-stm32/src/usart/mod.rs | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | use core::future::poll_fn; | 5 | use core::future::poll_fn; |
| 6 | use core::marker::PhantomData; | 6 | use core::marker::PhantomData; |
| 7 | use core::sync::atomic::{compiler_fence, AtomicU8, Ordering}; | 7 | use core::sync::atomic::{compiler_fence, AtomicU8, AtomicUsize, Ordering}; |
| 8 | use core::task::Poll; | 8 | use core::task::Poll; |
| 9 | 9 | ||
| 10 | use embassy_embedded_hal::SetConfig; | 10 | use embassy_embedded_hal::SetConfig; |
| @@ -185,6 +185,12 @@ pub enum ConfigError { | |||
| 185 | RxOrTxNotEnabled, | 185 | RxOrTxNotEnabled, |
| 186 | /// Data bits and parity combination not supported | 186 | /// Data bits and parity combination not supported |
| 187 | DataParityNotSupported, | 187 | DataParityNotSupported, |
| 188 | /// DE assertion time too high | ||
| 189 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 190 | DeAssertionTimeTooHigh, | ||
| 191 | /// DE deassertion time too high | ||
| 192 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 193 | DeDeassertionTimeTooHigh, | ||
| 188 | } | 194 | } |
| 189 | 195 | ||
| 190 | #[non_exhaustive] | 196 | #[non_exhaustive] |
| @@ -206,6 +212,21 @@ pub struct Config { | |||
| 206 | /// If false: the error is ignored and cleared | 212 | /// If false: the error is ignored and cleared |
| 207 | pub detect_previous_overrun: bool, | 213 | pub detect_previous_overrun: bool, |
| 208 | 214 | ||
| 215 | /// If `None` (the default) then read-like calls on `BufferedUartRx` and `RingBufferedUartRx` | ||
| 216 | /// typically only wake/return after line idle or after the buffer is at least half full | ||
| 217 | /// (for `BufferedUartRx`) or the DMA buffer is written at the half or full positions | ||
| 218 | /// (for `RingBufferedUartRx`), though it may also wake/return earlier in some circumstances. | ||
| 219 | /// | ||
| 220 | /// If `Some(n)` then such reads are also woken/return as soon as at least `n` words are | ||
| 221 | /// available in the buffer, in addition to waking/returning when the conditions described | ||
| 222 | /// above are met. `Some(0)` is treated as `None`. Setting this for `RingBufferedUartRx` | ||
| 223 | /// will trigger an interrupt for every received word to check the buffer level, which may | ||
| 224 | /// impact performance at high data rates. | ||
| 225 | /// | ||
| 226 | /// Has no effect on plain `Uart` or `UartRx` reads, which are specified to either | ||
| 227 | /// return a single word, a full buffer, or after line idle. | ||
| 228 | pub eager_reads: Option<usize>, | ||
| 229 | |||
| 209 | /// Set this to true if the line is considered noise free. | 230 | /// Set this to true if the line is considered noise free. |
| 210 | /// This will increase the receiver’s tolerance to clock deviations, | 231 | /// This will increase the receiver’s tolerance to clock deviations, |
| 211 | /// but will effectively disable noise detection. | 232 | /// but will effectively disable noise detection. |
| @@ -239,6 +260,14 @@ pub struct Config { | |||
| 239 | /// Set the pin configuration for the DE pin. | 260 | /// Set the pin configuration for the DE pin. |
| 240 | pub de_config: OutputConfig, | 261 | pub de_config: OutputConfig, |
| 241 | 262 | ||
| 263 | /// Set DE assertion time before the first start bit, 0-31 16ths of a bit period. | ||
| 264 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 265 | pub de_assertion_time: u8, | ||
| 266 | |||
| 267 | /// Set DE deassertion time after the last stop bit, 0-31 16ths of a bit period. | ||
| 268 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 269 | pub de_deassertion_time: u8, | ||
| 270 | |||
| 242 | // private: set by new_half_duplex, not by the user. | 271 | // private: set by new_half_duplex, not by the user. |
| 243 | duplex: Duplex, | 272 | duplex: Duplex, |
| 244 | } | 273 | } |
| @@ -270,6 +299,7 @@ impl Default for Config { | |||
| 270 | parity: Parity::ParityNone, | 299 | parity: Parity::ParityNone, |
| 271 | // historical behavior | 300 | // historical behavior |
| 272 | detect_previous_overrun: false, | 301 | detect_previous_overrun: false, |
| 302 | eager_reads: None, | ||
| 273 | #[cfg(not(usart_v1))] | 303 | #[cfg(not(usart_v1))] |
| 274 | assume_noise_free: false, | 304 | assume_noise_free: false, |
| 275 | #[cfg(any(usart_v3, usart_v4))] | 305 | #[cfg(any(usart_v3, usart_v4))] |
| @@ -283,6 +313,10 @@ impl Default for Config { | |||
| 283 | tx_config: OutputConfig::PushPull, | 313 | tx_config: OutputConfig::PushPull, |
| 284 | rts_config: OutputConfig::PushPull, | 314 | rts_config: OutputConfig::PushPull, |
| 285 | de_config: OutputConfig::PushPull, | 315 | de_config: OutputConfig::PushPull, |
| 316 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 317 | de_assertion_time: 0, | ||
| 318 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 319 | de_deassertion_time: 0, | ||
| 286 | duplex: Duplex::Full, | 320 | duplex: Duplex::Full, |
| 287 | } | 321 | } |
| 288 | } | 322 | } |
| @@ -966,6 +1000,9 @@ impl<'d, M: Mode> UartRx<'d, M> { | |||
| 966 | let info = self.info; | 1000 | let info = self.info; |
| 967 | let state = self.state; | 1001 | let state = self.state; |
| 968 | state.tx_rx_refcount.store(1, Ordering::Relaxed); | 1002 | state.tx_rx_refcount.store(1, Ordering::Relaxed); |
| 1003 | state | ||
| 1004 | .eager_reads | ||
| 1005 | .store(config.eager_reads.unwrap_or(0), Ordering::Relaxed); | ||
| 969 | 1006 | ||
| 970 | info.rcc.enable_and_reset(); | 1007 | info.rcc.enable_and_reset(); |
| 971 | 1008 | ||
| @@ -982,6 +1019,9 @@ impl<'d, M: Mode> UartRx<'d, M> { | |||
| 982 | 1019 | ||
| 983 | /// Reconfigure the driver | 1020 | /// Reconfigure the driver |
| 984 | pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { | 1021 | pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { |
| 1022 | self.state | ||
| 1023 | .eager_reads | ||
| 1024 | .store(config.eager_reads.unwrap_or(0), Ordering::Relaxed); | ||
| 985 | reconfigure(self.info, self.kernel_clock, config) | 1025 | reconfigure(self.info, self.kernel_clock, config) |
| 986 | } | 1026 | } |
| 987 | 1027 | ||
| @@ -1462,6 +1502,9 @@ impl<'d, M: Mode> Uart<'d, M> { | |||
| 1462 | let info = self.rx.info; | 1502 | let info = self.rx.info; |
| 1463 | let state = self.rx.state; | 1503 | let state = self.rx.state; |
| 1464 | state.tx_rx_refcount.store(2, Ordering::Relaxed); | 1504 | state.tx_rx_refcount.store(2, Ordering::Relaxed); |
| 1505 | state | ||
| 1506 | .eager_reads | ||
| 1507 | .store(config.eager_reads.unwrap_or(0), Ordering::Relaxed); | ||
| 1465 | 1508 | ||
| 1466 | info.rcc.enable_and_reset(); | 1509 | info.rcc.enable_and_reset(); |
| 1467 | 1510 | ||
| @@ -1690,6 +1733,16 @@ fn configure( | |||
| 1690 | return Err(ConfigError::RxOrTxNotEnabled); | 1733 | return Err(ConfigError::RxOrTxNotEnabled); |
| 1691 | } | 1734 | } |
| 1692 | 1735 | ||
| 1736 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 1737 | let dem = r.cr3().read().dem(); | ||
| 1738 | |||
| 1739 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 1740 | if config.de_assertion_time > 31 { | ||
| 1741 | return Err(ConfigError::DeAssertionTimeTooHigh); | ||
| 1742 | } else if config.de_deassertion_time > 31 { | ||
| 1743 | return Err(ConfigError::DeDeassertionTimeTooHigh); | ||
| 1744 | } | ||
| 1745 | |||
| 1693 | // UART must be disabled during configuration. | 1746 | // UART must be disabled during configuration. |
| 1694 | r.cr1().modify(|w| { | 1747 | r.cr1().modify(|w| { |
| 1695 | w.set_ue(false); | 1748 | w.set_ue(false); |
| @@ -1738,6 +1791,20 @@ fn configure( | |||
| 1738 | w.set_re(enable_rx); | 1791 | w.set_re(enable_rx); |
| 1739 | } | 1792 | } |
| 1740 | 1793 | ||
| 1794 | #[cfg(not(any(usart_v1, usart_v2)))] | ||
| 1795 | if dem { | ||
| 1796 | w.set_deat(if over8 { | ||
| 1797 | config.de_assertion_time / 2 | ||
| 1798 | } else { | ||
| 1799 | config.de_assertion_time | ||
| 1800 | }); | ||
| 1801 | w.set_dedt(if over8 { | ||
| 1802 | config.de_assertion_time / 2 | ||
| 1803 | } else { | ||
| 1804 | config.de_assertion_time | ||
| 1805 | }); | ||
| 1806 | } | ||
| 1807 | |||
| 1741 | // configure word size and parity, since the parity bit is inserted into the MSB position, | 1808 | // configure word size and parity, since the parity bit is inserted into the MSB position, |
| 1742 | // it increases the effective word size | 1809 | // it increases the effective word size |
| 1743 | match (config.parity, config.data_bits) { | 1810 | match (config.parity, config.data_bits) { |
| @@ -2022,6 +2089,7 @@ struct State { | |||
| 2022 | rx_waker: AtomicWaker, | 2089 | rx_waker: AtomicWaker, |
| 2023 | tx_waker: AtomicWaker, | 2090 | tx_waker: AtomicWaker, |
| 2024 | tx_rx_refcount: AtomicU8, | 2091 | tx_rx_refcount: AtomicU8, |
| 2092 | eager_reads: AtomicUsize, | ||
| 2025 | } | 2093 | } |
| 2026 | 2094 | ||
| 2027 | impl State { | 2095 | impl State { |
| @@ -2030,6 +2098,7 @@ impl State { | |||
| 2030 | rx_waker: AtomicWaker::new(), | 2098 | rx_waker: AtomicWaker::new(), |
| 2031 | tx_waker: AtomicWaker::new(), | 2099 | tx_waker: AtomicWaker::new(), |
| 2032 | tx_rx_refcount: AtomicU8::new(0), | 2100 | tx_rx_refcount: AtomicU8::new(0), |
| 2101 | eager_reads: AtomicUsize::new(0), | ||
| 2033 | } | 2102 | } |
| 2034 | } | 2103 | } |
| 2035 | } | 2104 | } |
diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index 5f4e87834..27071fb31 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs | |||
| @@ -26,9 +26,9 @@ use crate::Peri; | |||
| 26 | /// contain enough bytes to fill the buffer passed by the caller of | 26 | /// contain enough bytes to fill the buffer passed by the caller of |
| 27 | /// the function, or is empty. | 27 | /// the function, or is empty. |
| 28 | /// | 28 | /// |
| 29 | /// Waiting for bytes operates in one of two modes, depending on | 29 | /// Waiting for bytes operates in one of three modes, depending on |
| 30 | /// the behavior of the sender and the size of the buffer passed | 30 | /// the behavior of the sender, the size of the buffer passed |
| 31 | /// to the function: | 31 | /// to the function, and the configuration: |
| 32 | /// | 32 | /// |
| 33 | /// - If the sender sends intermittently, the 'idle line' | 33 | /// - If the sender sends intermittently, the 'idle line' |
| 34 | /// condition will be detected when the sender stops, and any | 34 | /// condition will be detected when the sender stops, and any |
| @@ -47,7 +47,11 @@ use crate::Peri; | |||
| 47 | /// interrupt when those specific buffer addresses have been | 47 | /// interrupt when those specific buffer addresses have been |
| 48 | /// written. | 48 | /// written. |
| 49 | /// | 49 | /// |
| 50 | /// In both cases this will result in variable latency due to the | 50 | /// - If `eager_reads` is enabled in `config`, the UART interrupt |
| 51 | /// is enabled on all data reception and the call will only wait | ||
| 52 | /// for at least one byte to be available before returning. | ||
| 53 | /// | ||
| 54 | /// In the first two cases this will result in variable latency due to the | ||
| 51 | /// buffering effect. For example, if the baudrate is 2400 bps, and | 55 | /// buffering effect. For example, if the baudrate is 2400 bps, and |
| 52 | /// the configuration is 8 data bits, no parity bit, and one stop bit, | 56 | /// the configuration is 8 data bits, no parity bit, and one stop bit, |
| 53 | /// then a byte will be received every ~4.16ms. If the ring buffer is | 57 | /// then a byte will be received every ~4.16ms. If the ring buffer is |
| @@ -68,15 +72,10 @@ use crate::Peri; | |||
| 68 | /// sending, but would be falsely triggered in the worst-case | 72 | /// sending, but would be falsely triggered in the worst-case |
| 69 | /// buffer delay scenario. | 73 | /// buffer delay scenario. |
| 70 | /// | 74 | /// |
| 71 | /// Note: This latency is caused by the limited capabilities of the | 75 | /// Note: Enabling `eager_reads` with `RingBufferedUartRx` will enable |
| 72 | /// STM32 DMA controller; since it cannot generate an interrupt when | 76 | /// an UART RXNE interrupt, which will cause an interrupt to occur on |
| 73 | /// it stores a byte into an empty ring buffer, or in any other | 77 | /// every received data byte. The data is still copied using DMA, but |
| 74 | /// configurable conditions, it is not possible to take notice of the | 78 | /// there is nevertheless additional processing overhead for each byte. |
| 75 | /// contents of the ring buffer more quickly without introducing | ||
| 76 | /// polling. As a result the latency can be reduced by calling the | ||
| 77 | /// read functions repeatedly with smaller buffers to receive the | ||
| 78 | /// available bytes, as each call to a read function will explicitly | ||
| 79 | /// check the ring buffer for available bytes. | ||
| 80 | pub struct RingBufferedUartRx<'d> { | 79 | pub struct RingBufferedUartRx<'d> { |
| 81 | info: &'static Info, | 80 | info: &'static Info, |
| 82 | state: &'static State, | 81 | state: &'static State, |
| @@ -133,6 +132,9 @@ impl<'d> UartRx<'d, Async> { | |||
| 133 | impl<'d> RingBufferedUartRx<'d> { | 132 | impl<'d> RingBufferedUartRx<'d> { |
| 134 | /// Reconfigure the driver | 133 | /// Reconfigure the driver |
| 135 | pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { | 134 | pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> { |
| 135 | self.state | ||
| 136 | .eager_reads | ||
| 137 | .store(config.eager_reads.unwrap_or(0), Ordering::Relaxed); | ||
| 136 | reconfigure(self.info, self.kernel_clock, config) | 138 | reconfigure(self.info, self.kernel_clock, config) |
| 137 | } | 139 | } |
| 138 | 140 | ||
| @@ -148,8 +150,8 @@ impl<'d> RingBufferedUartRx<'d> { | |||
| 148 | let r = self.info.regs; | 150 | let r = self.info.regs; |
| 149 | // clear all interrupts and DMA Rx Request | 151 | // clear all interrupts and DMA Rx Request |
| 150 | r.cr1().modify(|w| { | 152 | r.cr1().modify(|w| { |
| 151 | // disable RXNE interrupt | 153 | // use RXNE only when returning reads early |
| 152 | w.set_rxneie(false); | 154 | w.set_rxneie(self.state.eager_reads.load(Ordering::Relaxed) > 0); |
| 153 | // enable parity interrupt if not ParityNone | 155 | // enable parity interrupt if not ParityNone |
| 154 | w.set_peie(w.pce()); | 156 | w.set_peie(w.pce()); |
| 155 | // enable idle line interrupt | 157 | // enable idle line interrupt |
| @@ -248,39 +250,67 @@ impl<'d> RingBufferedUartRx<'d> { | |||
| 248 | async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { | 250 | async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { |
| 249 | compiler_fence(Ordering::SeqCst); | 251 | compiler_fence(Ordering::SeqCst); |
| 250 | 252 | ||
| 251 | // Future which completes when idle line is detected | 253 | loop { |
| 252 | let s = self.state; | 254 | // Future which completes when idle line is detected |
| 253 | let uart = poll_fn(|cx| { | 255 | let s = self.state; |
| 254 | s.rx_waker.register(cx.waker()); | 256 | let mut uart_init = false; |
| 255 | 257 | let uart = poll_fn(|cx| { | |
| 256 | compiler_fence(Ordering::SeqCst); | 258 | s.rx_waker.register(cx.waker()); |
| 257 | 259 | ||
| 258 | if check_idle_and_errors(self.info.regs)? { | 260 | compiler_fence(Ordering::SeqCst); |
| 259 | // Idle line is detected | 261 | |
| 260 | Poll::Ready(Ok(())) | 262 | // We may have been woken by IDLE or, if eager_reads is set, by RXNE. |
| 261 | } else { | 263 | // However, DMA will clear RXNE, so we can't check directly, and because |
| 262 | Poll::Pending | 264 | // the other future borrows `ring_buf`, we can't check `len()` here either. |
| 263 | } | 265 | // Instead, return from this future and we'll check the length afterwards. |
| 264 | }); | 266 | let eager = s.eager_reads.load(Ordering::Relaxed) > 0; |
| 267 | |||
| 268 | let idle = check_idle_and_errors(self.info.regs)?; | ||
| 269 | if idle || (eager && uart_init) { | ||
| 270 | // Idle line is detected, or eager reads is set and some data is available. | ||
| 271 | Poll::Ready(Ok(idle)) | ||
| 272 | } else { | ||
| 273 | uart_init = true; | ||
| 274 | Poll::Pending | ||
| 275 | } | ||
| 276 | }); | ||
| 265 | 277 | ||
| 266 | let mut dma_init = false; | 278 | let mut dma_init = false; |
| 267 | // Future which completes when the DMA controller indicates it | 279 | // Future which completes when the DMA controller indicates it |
| 268 | // has written to the ring buffer's middle byte, or last byte | 280 | // has written to the ring buffer's middle byte, or last byte |
| 269 | let dma = poll_fn(|cx| { | 281 | let dma = poll_fn(|cx| { |
| 270 | self.ring_buf.set_waker(cx.waker()); | 282 | self.ring_buf.set_waker(cx.waker()); |
| 271 | 283 | ||
| 272 | let status = match dma_init { | 284 | let status = match dma_init { |
| 273 | false => Poll::Pending, | 285 | false => Poll::Pending, |
| 274 | true => Poll::Ready(()), | 286 | true => Poll::Ready(()), |
| 275 | }; | 287 | }; |
| 276 | 288 | ||
| 277 | dma_init = true; | 289 | dma_init = true; |
| 278 | status | 290 | status |
| 279 | }); | 291 | }); |
| 280 | 292 | ||
| 281 | match select(uart, dma).await { | 293 | match select(uart, dma).await { |
| 282 | Either::Left((result, _)) => result, | 294 | // UART woke with line idle |
| 283 | Either::Right(((), _)) => Ok(()), | 295 | Either::Left((Ok(true), _)) => { |
| 296 | return Ok(()); | ||
| 297 | } | ||
| 298 | // UART woke without idle or error: word received | ||
| 299 | Either::Left((Ok(false), _)) => { | ||
| 300 | let eager = self.state.eager_reads.load(Ordering::Relaxed); | ||
| 301 | if eager > 0 && self.ring_buf.len().unwrap_or(0) >= eager { | ||
| 302 | return Ok(()); | ||
| 303 | } else { | ||
| 304 | continue; | ||
| 305 | } | ||
| 306 | } | ||
| 307 | // UART woke with error | ||
| 308 | Either::Left((Err(e), _)) => { | ||
| 309 | return Err(e); | ||
| 310 | } | ||
| 311 | // DMA woke | ||
| 312 | Either::Right(((), _)) => return Ok(()), | ||
| 313 | } | ||
| 284 | } | 314 | } |
| 285 | } | 315 | } |
| 286 | 316 | ||
