diff options
| author | Rasmus Melchior Jacobsen <[email protected]> | 2023-04-27 10:48:38 +0200 |
|---|---|---|
| committer | Dario Nieuwenhuis <[email protected]> | 2023-05-01 22:42:36 +0200 |
| commit | fc268df6f56661d5f43450c7a03850044ae8e136 (patch) | |
| tree | b48b4543442552b46aceba5bd7f930997d67cd57 | |
| parent | 4ea6662e55f32ff90b564c1c611f5ac4e0d8ab1c (diff) | |
Support overflow detection for more than one ring-period
| -rw-r--r-- | embassy-stm32/src/dma/dma.rs | 111 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer.rs | 127 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/rx_ringbuffered.rs | 121 | ||||
| -rw-r--r-- | tests/stm32/src/bin/usart_rx_ringbuffered.rs | 19 | ||||
| -rw-r--r-- | tests/utils/src/bin/saturate_serial.rs | 15 |
5 files changed, 214 insertions, 179 deletions
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs index 17d82fe2d..10ae20f5c 100644 --- a/embassy-stm32/src/dma/dma.rs +++ b/embassy-stm32/src/dma/dma.rs | |||
| @@ -4,6 +4,7 @@ use core::pin::Pin; | |||
| 4 | use core::sync::atomic::{fence, Ordering}; | 4 | use core::sync::atomic::{fence, Ordering}; |
| 5 | use core::task::{Context, Poll, Waker}; | 5 | use core::task::{Context, Poll, Waker}; |
| 6 | 6 | ||
| 7 | use atomic_polyfill::AtomicUsize; | ||
| 7 | use embassy_cortex_m::interrupt::Priority; | 8 | use embassy_cortex_m::interrupt::Priority; |
| 8 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; | 9 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; |
| 9 | use embassy_sync::waitqueue::AtomicWaker; | 10 | use embassy_sync::waitqueue::AtomicWaker; |
| @@ -129,13 +130,16 @@ impl From<FifoThreshold> for vals::Fth { | |||
| 129 | 130 | ||
| 130 | struct State { | 131 | struct State { |
| 131 | ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT], | 132 | ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT], |
| 133 | complete_count: [AtomicUsize; DMA_CHANNEL_COUNT], | ||
| 132 | } | 134 | } |
| 133 | 135 | ||
| 134 | impl State { | 136 | impl State { |
| 135 | const fn new() -> Self { | 137 | const fn new() -> Self { |
| 138 | const ZERO: AtomicUsize = AtomicUsize::new(0); | ||
| 136 | const AW: AtomicWaker = AtomicWaker::new(); | 139 | const AW: AtomicWaker = AtomicWaker::new(); |
| 137 | Self { | 140 | Self { |
| 138 | ch_wakers: [AW; DMA_CHANNEL_COUNT], | 141 | ch_wakers: [AW; DMA_CHANNEL_COUNT], |
| 142 | complete_count: [ZERO; DMA_CHANNEL_COUNT], | ||
| 139 | } | 143 | } |
| 140 | } | 144 | } |
| 141 | } | 145 | } |
| @@ -184,13 +188,43 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index: | |||
| 184 | panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); | 188 | panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); |
| 185 | } | 189 | } |
| 186 | 190 | ||
| 187 | if isr.tcif(channel_num % 4) && cr.read().tcie() { | 191 | let mut wake = false; |
| 188 | /* acknowledge transfer complete interrupt */ | 192 | |
| 189 | dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); | 193 | if isr.htif(channel_num % 4) && cr.read().htie() { |
| 194 | // Acknowledge half transfer complete interrupt | ||
| 195 | dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true)); | ||
| 196 | wake = true; | ||
| 197 | } | ||
| 198 | |||
| 199 | wake |= process_tcif(dma, channel_num, index); | ||
| 200 | |||
| 201 | if wake { | ||
| 190 | STATE.ch_wakers[index].wake(); | 202 | STATE.ch_wakers[index].wake(); |
| 191 | } | 203 | } |
| 192 | } | 204 | } |
| 193 | 205 | ||
| 206 | unsafe fn process_tcif(dma: pac::dma::Dma, channel_num: usize, index: usize) -> bool { | ||
| 207 | let isr_reg = dma.isr(channel_num / 4); | ||
| 208 | let cr_reg = dma.st(channel_num).cr(); | ||
| 209 | |||
| 210 | // First, figure out if tcif is set without a cs. | ||
| 211 | if isr_reg.read().tcif(channel_num % 4) && cr_reg.read().tcie() { | ||
| 212 | // Make tcif test again within a cs to avoid race when incrementing complete_count. | ||
| 213 | critical_section::with(|_| { | ||
| 214 | if isr_reg.read().tcif(channel_num % 4) && cr_reg.read().tcie() { | ||
| 215 | // Acknowledge transfer complete interrupt | ||
| 216 | dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); | ||
| 217 | STATE.complete_count[index].fetch_add(1, Ordering::Release); | ||
| 218 | true | ||
| 219 | } else { | ||
| 220 | false | ||
| 221 | } | ||
| 222 | }) | ||
| 223 | } else { | ||
| 224 | false | ||
| 225 | } | ||
| 226 | } | ||
| 227 | |||
| 194 | #[cfg(any(dma_v2, dmamux))] | 228 | #[cfg(any(dma_v2, dmamux))] |
| 195 | pub type Request = u8; | 229 | pub type Request = u8; |
| 196 | #[cfg(not(any(dma_v2, dmamux)))] | 230 | #[cfg(not(any(dma_v2, dmamux)))] |
| @@ -530,6 +564,7 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> { | |||
| 530 | 564 | ||
| 531 | unsafe { | 565 | unsafe { |
| 532 | dma.ifcr(isrn).write(|w| { | 566 | dma.ifcr(isrn).write(|w| { |
| 567 | w.set_htif(isrbit, true); | ||
| 533 | w.set_tcif(isrbit, true); | 568 | w.set_tcif(isrbit, true); |
| 534 | w.set_teif(isrbit, true); | 569 | w.set_teif(isrbit, true); |
| 535 | }) | 570 | }) |
| @@ -593,31 +628,27 @@ impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> { | |||
| 593 | // ============================== | 628 | // ============================== |
| 594 | 629 | ||
| 595 | impl<C: Channel> DmaCtrl for C { | 630 | impl<C: Channel> DmaCtrl for C { |
| 596 | fn tcif(&self) -> bool { | 631 | fn ndtr(&self) -> usize { |
| 597 | let channel_number = self.num(); | 632 | let ch = self.regs().st(self.num()); |
| 598 | let dma = self.regs(); | 633 | unsafe { ch.ndtr().read() }.ndt() as usize |
| 599 | let isrn = channel_number / 4; | ||
| 600 | let isrbit = channel_number % 4; | ||
| 601 | |||
| 602 | unsafe { dma.isr(isrn).read() }.tcif(isrbit) | ||
| 603 | } | 634 | } |
| 604 | 635 | ||
| 605 | fn clear_tcif(&mut self) { | 636 | fn get_complete_count(&self) -> usize { |
| 606 | let channel_number = self.num(); | ||
| 607 | let dma = self.regs(); | 637 | let dma = self.regs(); |
| 608 | let isrn = channel_number / 4; | 638 | let channel_num = self.num(); |
| 609 | let isrbit = channel_number % 4; | 639 | let index = self.index(); |
| 610 | 640 | // Manually process tcif in case transfer was completed and we are in a higher priority task. | |
| 611 | unsafe { | 641 | unsafe { process_tcif(dma, channel_num, index) }; |
| 612 | dma.ifcr(isrn).write(|w| { | 642 | STATE.complete_count[index].load(Ordering::Acquire) |
| 613 | w.set_tcif(isrbit, true); | ||
| 614 | }) | ||
| 615 | } | ||
| 616 | } | 643 | } |
| 617 | 644 | ||
| 618 | fn ndtr(&self) -> usize { | 645 | fn reset_complete_count(&mut self) -> usize { |
| 619 | let ch = self.regs().st(self.num()); | 646 | let dma = self.regs(); |
| 620 | unsafe { ch.ndtr().read() }.ndt() as usize | 647 | let channel_num = self.num(); |
| 648 | let index = self.index(); | ||
| 649 | // Manually process tcif in case transfer was completed and we are in a higher priority task. | ||
| 650 | unsafe { process_tcif(dma, channel_num, index) }; | ||
| 651 | STATE.complete_count[index].swap(0, Ordering::AcqRel) | ||
| 621 | } | 652 | } |
| 622 | } | 653 | } |
| 623 | 654 | ||
| @@ -657,7 +688,8 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 657 | w.set_minc(vals::Inc::INCREMENTED); | 688 | w.set_minc(vals::Inc::INCREMENTED); |
| 658 | w.set_pinc(vals::Inc::FIXED); | 689 | w.set_pinc(vals::Inc::FIXED); |
| 659 | w.set_teie(true); | 690 | w.set_teie(true); |
| 660 | w.set_tcie(false); | 691 | w.set_htie(true); |
| 692 | w.set_tcie(true); | ||
| 661 | w.set_circ(vals::Circ::ENABLED); | 693 | w.set_circ(vals::Circ::ENABLED); |
| 662 | #[cfg(dma_v1)] | 694 | #[cfg(dma_v1)] |
| 663 | w.set_trbuff(true); | 695 | w.set_trbuff(true); |
| @@ -703,7 +735,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 703 | } | 735 | } |
| 704 | 736 | ||
| 705 | pub fn clear(&mut self) { | 737 | pub fn clear(&mut self) { |
| 706 | self.ringbuf.clear(); | 738 | self.ringbuf.clear(&mut *self.channel); |
| 707 | } | 739 | } |
| 708 | 740 | ||
| 709 | /// Read bytes from the ring buffer | 741 | /// Read bytes from the ring buffer |
| @@ -712,6 +744,22 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 712 | self.ringbuf.read(&mut *self.channel, buf) | 744 | self.ringbuf.read(&mut *self.channel, buf) |
| 713 | } | 745 | } |
| 714 | 746 | ||
| 747 | pub fn is_empty(&self) -> bool { | ||
| 748 | self.ringbuf.is_empty() | ||
| 749 | } | ||
| 750 | |||
| 751 | pub fn len(&self) -> usize { | ||
| 752 | self.ringbuf.len() | ||
| 753 | } | ||
| 754 | |||
| 755 | pub fn capacity(&self) -> usize { | ||
| 756 | self.ringbuf.dma_buf.len() | ||
| 757 | } | ||
| 758 | |||
| 759 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 760 | STATE.ch_wakers[self.channel.index()].register(waker); | ||
| 761 | } | ||
| 762 | |||
| 715 | fn clear_irqs(&mut self) { | 763 | fn clear_irqs(&mut self) { |
| 716 | let channel_number = self.channel.num(); | 764 | let channel_number = self.channel.num(); |
| 717 | let dma = self.channel.regs(); | 765 | let dma = self.channel.regs(); |
| @@ -720,6 +768,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 720 | 768 | ||
| 721 | unsafe { | 769 | unsafe { |
| 722 | dma.ifcr(isrn).write(|w| { | 770 | dma.ifcr(isrn).write(|w| { |
| 771 | w.set_htif(isrbit, true); | ||
| 723 | w.set_tcif(isrbit, true); | 772 | w.set_tcif(isrbit, true); |
| 724 | w.set_teif(isrbit, true); | 773 | w.set_teif(isrbit, true); |
| 725 | }) | 774 | }) |
| @@ -733,6 +782,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 733 | unsafe { | 782 | unsafe { |
| 734 | ch.cr().write(|w| { | 783 | ch.cr().write(|w| { |
| 735 | w.set_teie(true); | 784 | w.set_teie(true); |
| 785 | w.set_htie(true); | ||
| 736 | w.set_tcie(true); | 786 | w.set_tcie(true); |
| 737 | }) | 787 | }) |
| 738 | } | 788 | } |
| @@ -743,15 +793,10 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 743 | unsafe { ch.cr().read() }.en() | 793 | unsafe { ch.cr().read() }.en() |
| 744 | } | 794 | } |
| 745 | 795 | ||
| 746 | /// Gets the total remaining transfers for the channel | 796 | /// Synchronize the position of the ring buffer to the actual DMA controller position |
| 747 | /// Note: this will be zero for transfers that completed without cancellation. | 797 | pub fn reload_position(&mut self) { |
| 748 | pub fn get_remaining_transfers(&self) -> usize { | ||
| 749 | let ch = self.channel.regs().st(self.channel.num()); | 798 | let ch = self.channel.regs().st(self.channel.num()); |
| 750 | unsafe { ch.ndtr().read() }.ndt() as usize | 799 | self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize; |
| 751 | } | ||
| 752 | |||
| 753 | pub fn set_ndtr(&mut self, ndtr: usize) { | ||
| 754 | self.ringbuf.ndtr = ndtr; | ||
| 755 | } | 800 | } |
| 756 | } | 801 | } |
| 757 | 802 | ||
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs index f9ace6018..544ec9461 100644 --- a/embassy-stm32/src/dma/ringbuffer.rs +++ b/embassy-stm32/src/dma/ringbuffer.rs | |||
| @@ -10,14 +10,6 @@ use super::word::Word; | |||
| 10 | /// to the current register value. `ndtr` describes the current position of the DMA | 10 | /// to the current register value. `ndtr` describes the current position of the DMA |
| 11 | /// write. | 11 | /// write. |
| 12 | /// | 12 | /// |
| 13 | /// # Safety | ||
| 14 | /// | ||
| 15 | /// The ring buffer controls the TCIF (transfer completed interrupt flag) to | ||
| 16 | /// detect buffer overruns, hence this interrupt must be disabled. | ||
| 17 | /// The buffer can detect overruns up to one period, that is, for a X byte buffer, | ||
| 18 | /// overruns can be detected if they happen from byte X+1 up to 2X. After this | ||
| 19 | /// point, overrunds may or may not be detected. | ||
| 20 | /// | ||
| 21 | /// # Buffer layout | 13 | /// # Buffer layout |
| 22 | /// | 14 | /// |
| 23 | /// ```text | 15 | /// ```text |
| @@ -39,7 +31,6 @@ pub struct DmaRingBuffer<'a, W: Word> { | |||
| 39 | pub(crate) dma_buf: &'a mut [W], | 31 | pub(crate) dma_buf: &'a mut [W], |
| 40 | first: usize, | 32 | first: usize, |
| 41 | pub ndtr: usize, | 33 | pub ndtr: usize, |
| 42 | expect_next_read_to_wrap: bool, | ||
| 43 | } | 34 | } |
| 44 | 35 | ||
| 45 | #[derive(Debug, PartialEq)] | 36 | #[derive(Debug, PartialEq)] |
| @@ -50,13 +41,13 @@ pub trait DmaCtrl { | |||
| 50 | /// buffer until the dma writer wraps. | 41 | /// buffer until the dma writer wraps. |
| 51 | fn ndtr(&self) -> usize; | 42 | fn ndtr(&self) -> usize; |
| 52 | 43 | ||
| 53 | /// Read the transfer completed interrupt flag | 44 | /// Get the transfer completed counter. |
| 54 | /// This flag is set by the dma controller when NDTR is reloaded, | 45 | /// This counter is incremented by the dma controller when NDTR is reloaded, |
| 55 | /// i.e. when the writing wraps. | 46 | /// i.e. when the writing wraps. |
| 56 | fn tcif(&self) -> bool; | 47 | fn get_complete_count(&self) -> usize; |
| 57 | 48 | ||
| 58 | /// Clear the transfer completed interrupt flag | 49 | /// Reset the transfer completed counter to 0 and return the value just prior to the reset. |
| 59 | fn clear_tcif(&mut self); | 50 | fn reset_complete_count(&mut self) -> usize; |
| 60 | } | 51 | } |
| 61 | 52 | ||
| 62 | impl<'a, W: Word> DmaRingBuffer<'a, W> { | 53 | impl<'a, W: Word> DmaRingBuffer<'a, W> { |
| @@ -66,15 +57,14 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { | |||
| 66 | dma_buf, | 57 | dma_buf, |
| 67 | first: 0, | 58 | first: 0, |
| 68 | ndtr, | 59 | ndtr, |
| 69 | expect_next_read_to_wrap: false, | ||
| 70 | } | 60 | } |
| 71 | } | 61 | } |
| 72 | 62 | ||
| 73 | /// Reset the ring buffer to its initial state | 63 | /// Reset the ring buffer to its initial state |
| 74 | pub fn clear(&mut self) { | 64 | pub fn clear(&mut self, dma: &mut impl DmaCtrl) { |
| 75 | self.first = 0; | 65 | self.first = 0; |
| 76 | self.ndtr = self.dma_buf.len(); | 66 | self.ndtr = self.dma_buf.len(); |
| 77 | self.expect_next_read_to_wrap = false; | 67 | dma.reset_complete_count(); |
| 78 | } | 68 | } |
| 79 | 69 | ||
| 80 | /// The buffer end position | 70 | /// The buffer end position |
| @@ -83,14 +73,12 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { | |||
| 83 | } | 73 | } |
| 84 | 74 | ||
| 85 | /// Returns whether the buffer is empty | 75 | /// Returns whether the buffer is empty |
| 86 | #[allow(dead_code)] | ||
| 87 | pub fn is_empty(&self) -> bool { | 76 | pub fn is_empty(&self) -> bool { |
| 88 | self.first == self.end() | 77 | self.first == self.end() |
| 89 | } | 78 | } |
| 90 | 79 | ||
| 91 | /// The current number of bytes in the buffer | 80 | /// The current number of bytes in the buffer |
| 92 | /// This may change at any time if dma is currently active | 81 | /// This may change at any time if dma is currently active |
| 93 | #[allow(dead_code)] | ||
| 94 | pub fn len(&self) -> usize { | 82 | pub fn len(&self) -> usize { |
| 95 | // Read out a stable end (the dma periheral can change it at anytime) | 83 | // Read out a stable end (the dma periheral can change it at anytime) |
| 96 | let end = self.end(); | 84 | let end = self.end(); |
| @@ -112,27 +100,19 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { | |||
| 112 | if self.first == end { | 100 | if self.first == end { |
| 113 | // The buffer is currently empty | 101 | // The buffer is currently empty |
| 114 | 102 | ||
| 115 | if dma.tcif() { | 103 | if dma.get_complete_count() > 0 { |
| 116 | // The dma controller has written such that the ring buffer now wraps | 104 | // The DMA has written such that the ring buffer wraps at least once |
| 117 | // This is the special case where exactly n*dma_buf.len(), n = 1,2,..., bytes was written, | ||
| 118 | // but where additional bytes are now written causing the ring buffer to wrap. | ||
| 119 | // This is only an error if the writing has passed the current unread region. | ||
| 120 | self.ndtr = dma.ndtr(); | 105 | self.ndtr = dma.ndtr(); |
| 121 | if self.end() > self.first { | 106 | if self.end() > self.first || dma.get_complete_count() > 1 { |
| 122 | dma.clear_tcif(); | ||
| 123 | return Err(OverrunError); | 107 | return Err(OverrunError); |
| 124 | } | 108 | } |
| 125 | } | 109 | } |
| 126 | 110 | ||
| 127 | self.expect_next_read_to_wrap = false; | ||
| 128 | Ok(0) | 111 | Ok(0) |
| 129 | } else if self.first < end { | 112 | } else if self.first < end { |
| 130 | // The available, unread portion in the ring buffer DOES NOT wrap | 113 | // The available, unread portion in the ring buffer DOES NOT wrap |
| 131 | 114 | ||
| 132 | if self.expect_next_read_to_wrap { | 115 | if dma.get_complete_count() > 1 { |
| 133 | // The read was expected to wrap but it did not | ||
| 134 | |||
| 135 | dma.clear_tcif(); | ||
| 136 | return Err(OverrunError); | 116 | return Err(OverrunError); |
| 137 | } | 117 | } |
| 138 | 118 | ||
| @@ -141,35 +121,39 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { | |||
| 141 | 121 | ||
| 142 | compiler_fence(Ordering::SeqCst); | 122 | compiler_fence(Ordering::SeqCst); |
| 143 | 123 | ||
| 144 | if dma.tcif() { | 124 | match dma.get_complete_count() { |
| 145 | // The dma controller has written such that the ring buffer now wraps | 125 | 0 => { |
| 146 | 126 | // The DMA writer has not wrapped before nor after the copy | |
| 147 | self.ndtr = dma.ndtr(); | 127 | } |
| 148 | if self.end() > self.first { | 128 | 1 => { |
| 149 | // The bytes that we have copied out have overflowed | 129 | // The DMA writer has written such that the ring buffer now wraps |
| 150 | // as the writer has now both wrapped and is currently writing | 130 | self.ndtr = dma.ndtr(); |
| 151 | // within the region that we have just copied out | 131 | if self.end() > self.first || dma.get_complete_count() > 1 { |
| 152 | 132 | // The bytes that we have copied out have overflowed | |
| 153 | // Clear transfer completed interrupt flag | 133 | // as the writer has now both wrapped and is currently writing |
| 154 | dma.clear_tcif(); | 134 | // within the region that we have just copied out |
| 135 | return Err(OverrunError); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | _ => { | ||
| 155 | return Err(OverrunError); | 139 | return Err(OverrunError); |
| 156 | } | 140 | } |
| 157 | } | 141 | } |
| 158 | 142 | ||
| 159 | self.first = (self.first + len) % self.dma_buf.len(); | 143 | self.first = (self.first + len) % self.dma_buf.len(); |
| 160 | self.expect_next_read_to_wrap = false; | ||
| 161 | Ok(len) | 144 | Ok(len) |
| 162 | } else { | 145 | } else { |
| 163 | // The available, unread portion in the ring buffer DOES wrap | 146 | // The available, unread portion in the ring buffer DOES wrap |
| 164 | // The dma controller has wrapped since we last read and is currently | 147 | // The DMA writer has wrapped since we last read and is currently |
| 165 | // writing (or the next byte added will be) in the beginning of the ring buffer. | 148 | // writing (or the next byte added will be) in the beginning of the ring buffer. |
| 166 | 149 | ||
| 167 | // If the unread portion wraps then the writer must also have wrapped, | 150 | let complete_count = dma.get_complete_count(); |
| 168 | // or it has wrapped and we already cleared the TCIF flag | 151 | if complete_count > 1 { |
| 169 | assert!(dma.tcif() || self.expect_next_read_to_wrap); | 152 | return Err(OverrunError); |
| 153 | } | ||
| 170 | 154 | ||
| 171 | // Clear transfer completed interrupt flag | 155 | // If the unread portion wraps then the writer must also have wrapped |
| 172 | dma.clear_tcif(); | 156 | assert!(complete_count == 1); |
| 173 | 157 | ||
| 174 | if self.first + buf.len() < self.dma_buf.len() { | 158 | if self.first + buf.len() < self.dma_buf.len() { |
| 175 | // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer. | 159 | // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer. |
| @@ -182,13 +166,12 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { | |||
| 182 | // We have now copied out the data from dma_buf | 166 | // We have now copied out the data from dma_buf |
| 183 | // Make sure that the just read part was not overwritten during the copy | 167 | // Make sure that the just read part was not overwritten during the copy |
| 184 | self.ndtr = dma.ndtr(); | 168 | self.ndtr = dma.ndtr(); |
| 185 | if self.end() > self.first || dma.tcif() { | 169 | if self.end() > self.first || dma.get_complete_count() > 1 { |
| 186 | // The writer has entered the data that we have just read since we read out `end` in the beginning and until now. | 170 | // The writer has entered the data that we have just read since we read out `end` in the beginning and until now. |
| 187 | return Err(OverrunError); | 171 | return Err(OverrunError); |
| 188 | } | 172 | } |
| 189 | 173 | ||
| 190 | self.first = (self.first + len) % self.dma_buf.len(); | 174 | self.first = (self.first + len) % self.dma_buf.len(); |
| 191 | self.expect_next_read_to_wrap = true; | ||
| 192 | Ok(len) | 175 | Ok(len) |
| 193 | } else { | 176 | } else { |
| 194 | // The provided read buffer is large enough to include all bytes from the tail of the dma buffer, | 177 | // The provided read buffer is large enough to include all bytes from the tail of the dma buffer, |
| @@ -201,14 +184,14 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { | |||
| 201 | compiler_fence(Ordering::SeqCst); | 184 | compiler_fence(Ordering::SeqCst); |
| 202 | 185 | ||
| 203 | // We have now copied out the data from dma_buf | 186 | // We have now copied out the data from dma_buf |
| 204 | // Make sure that the just read part was not overwritten during the copy | 187 | // Reset complete counter and make sure that the just read part was not overwritten during the copy |
| 205 | self.ndtr = dma.ndtr(); | 188 | self.ndtr = dma.ndtr(); |
| 206 | if self.end() > self.first || dma.tcif() { | 189 | let complete_count = dma.reset_complete_count(); |
| 190 | if self.end() > self.first || complete_count > 1 { | ||
| 207 | return Err(OverrunError); | 191 | return Err(OverrunError); |
| 208 | } | 192 | } |
| 209 | 193 | ||
| 210 | self.first = head; | 194 | self.first = head; |
| 211 | self.expect_next_read_to_wrap = false; | ||
| 212 | Ok(tail + head) | 195 | Ok(tail + head) |
| 213 | } | 196 | } |
| 214 | } | 197 | } |
| @@ -243,14 +226,14 @@ mod tests { | |||
| 243 | 226 | ||
| 244 | struct TestCtrl { | 227 | struct TestCtrl { |
| 245 | next_ndtr: RefCell<Option<usize>>, | 228 | next_ndtr: RefCell<Option<usize>>, |
| 246 | tcif: bool, | 229 | complete_count: usize, |
| 247 | } | 230 | } |
| 248 | 231 | ||
| 249 | impl TestCtrl { | 232 | impl TestCtrl { |
| 250 | pub const fn new() -> Self { | 233 | pub const fn new() -> Self { |
| 251 | Self { | 234 | Self { |
| 252 | next_ndtr: RefCell::new(None), | 235 | next_ndtr: RefCell::new(None), |
| 253 | tcif: false, | 236 | complete_count: 0, |
| 254 | } | 237 | } |
| 255 | } | 238 | } |
| 256 | 239 | ||
| @@ -264,12 +247,14 @@ mod tests { | |||
| 264 | self.next_ndtr.borrow_mut().unwrap() | 247 | self.next_ndtr.borrow_mut().unwrap() |
| 265 | } | 248 | } |
| 266 | 249 | ||
| 267 | fn tcif(&self) -> bool { | 250 | fn get_complete_count(&self) -> usize { |
| 268 | self.tcif | 251 | self.complete_count |
| 269 | } | 252 | } |
| 270 | 253 | ||
| 271 | fn clear_tcif(&mut self) { | 254 | fn reset_complete_count(&mut self) -> usize { |
| 272 | self.tcif = false; | 255 | let old = self.complete_count; |
| 256 | self.complete_count = 0; | ||
| 257 | old | ||
| 273 | } | 258 | } |
| 274 | } | 259 | } |
| 275 | 260 | ||
| @@ -320,7 +305,7 @@ mod tests { | |||
| 320 | ringbuf.ndtr = 10; | 305 | ringbuf.ndtr = 10; |
| 321 | 306 | ||
| 322 | // The dma controller has written 4 + 6 bytes and has reloaded NDTR | 307 | // The dma controller has written 4 + 6 bytes and has reloaded NDTR |
| 323 | ctrl.tcif = true; | 308 | ctrl.complete_count = 1; |
| 324 | ctrl.set_next_ndtr(10); | 309 | ctrl.set_next_ndtr(10); |
| 325 | 310 | ||
| 326 | assert!(!ringbuf.is_empty()); | 311 | assert!(!ringbuf.is_empty()); |
| @@ -346,14 +331,14 @@ mod tests { | |||
| 346 | ringbuf.ndtr = 6; | 331 | ringbuf.ndtr = 6; |
| 347 | 332 | ||
| 348 | // The dma controller has written 6 + 2 bytes and has reloaded NDTR | 333 | // The dma controller has written 6 + 2 bytes and has reloaded NDTR |
| 349 | ctrl.tcif = true; | 334 | ctrl.complete_count = 1; |
| 350 | ctrl.set_next_ndtr(14); | 335 | ctrl.set_next_ndtr(14); |
| 351 | 336 | ||
| 352 | let mut buf = [0; 2]; | 337 | let mut buf = [0; 2]; |
| 353 | assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | 338 | assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |
| 354 | assert_eq!([2, 3], buf); | 339 | assert_eq!([2, 3], buf); |
| 355 | 340 | ||
| 356 | assert_eq!(true, ctrl.tcif); // The interrupt flag IS NOT cleared | 341 | assert_eq!(1, ctrl.complete_count); // The interrupt flag IS NOT cleared |
| 357 | } | 342 | } |
| 358 | 343 | ||
| 359 | #[test] | 344 | #[test] |
| @@ -365,14 +350,14 @@ mod tests { | |||
| 365 | ringbuf.ndtr = 10; | 350 | ringbuf.ndtr = 10; |
| 366 | 351 | ||
| 367 | // The dma controller has written 6 + 2 bytes and has reloaded NDTR | 352 | // The dma controller has written 6 + 2 bytes and has reloaded NDTR |
| 368 | ctrl.tcif = true; | 353 | ctrl.complete_count = 1; |
| 369 | ctrl.set_next_ndtr(14); | 354 | ctrl.set_next_ndtr(14); |
| 370 | 355 | ||
| 371 | let mut buf = [0; 10]; | 356 | let mut buf = [0; 10]; |
| 372 | assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | 357 | assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |
| 373 | assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf); | 358 | assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf); |
| 374 | 359 | ||
| 375 | assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared | 360 | assert_eq!(0, ctrl.complete_count); // The interrupt flag IS cleared |
| 376 | } | 361 | } |
| 377 | 362 | ||
| 378 | #[test] | 363 | #[test] |
| @@ -387,12 +372,12 @@ mod tests { | |||
| 387 | assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty | 372 | assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty |
| 388 | 373 | ||
| 389 | // The dma controller has written exactly 16 bytes | 374 | // The dma controller has written exactly 16 bytes |
| 390 | ctrl.tcif = true; | 375 | ctrl.complete_count = 1; |
| 391 | 376 | ||
| 392 | let mut buf = [0; 2]; | 377 | let mut buf = [0; 2]; |
| 393 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); | 378 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); |
| 394 | 379 | ||
| 395 | assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared | 380 | assert_eq!(1, ctrl.complete_count); // The complete counter is not reset |
| 396 | } | 381 | } |
| 397 | 382 | ||
| 398 | #[test] | 383 | #[test] |
| @@ -404,13 +389,13 @@ mod tests { | |||
| 404 | ringbuf.ndtr = 6; | 389 | ringbuf.ndtr = 6; |
| 405 | 390 | ||
| 406 | // The dma controller has written 6 + 3 bytes and has reloaded NDTR | 391 | // The dma controller has written 6 + 3 bytes and has reloaded NDTR |
| 407 | ctrl.tcif = true; | 392 | ctrl.complete_count = 1; |
| 408 | ctrl.set_next_ndtr(13); | 393 | ctrl.set_next_ndtr(13); |
| 409 | 394 | ||
| 410 | let mut buf = [0; 2]; | 395 | let mut buf = [0; 2]; |
| 411 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); | 396 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); |
| 412 | 397 | ||
| 413 | assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared | 398 | assert_eq!(1, ctrl.complete_count); // The complete counter is not reset |
| 414 | } | 399 | } |
| 415 | 400 | ||
| 416 | #[test] | 401 | #[test] |
| @@ -422,12 +407,12 @@ mod tests { | |||
| 422 | ringbuf.ndtr = 10; | 407 | ringbuf.ndtr = 10; |
| 423 | 408 | ||
| 424 | // The dma controller has written 6 + 13 bytes and has reloaded NDTR | 409 | // The dma controller has written 6 + 13 bytes and has reloaded NDTR |
| 425 | ctrl.tcif = true; | 410 | ctrl.complete_count = 1; |
| 426 | ctrl.set_next_ndtr(3); | 411 | ctrl.set_next_ndtr(3); |
| 427 | 412 | ||
| 428 | let mut buf = [0; 2]; | 413 | let mut buf = [0; 2]; |
| 429 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); | 414 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); |
| 430 | 415 | ||
| 431 | assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared | 416 | assert_eq!(1, ctrl.complete_count); // The complete counter is not reset |
| 432 | } | 417 | } |
| 433 | } | 418 | } |
diff --git a/embassy-stm32/src/usart/rx_ringbuffered.rs b/embassy-stm32/src/usart/rx_ringbuffered.rs index 0dc90ece7..dc21f557b 100644 --- a/embassy-stm32/src/usart/rx_ringbuffered.rs +++ b/embassy-stm32/src/usart/rx_ringbuffered.rs | |||
| @@ -4,8 +4,9 @@ use core::task::Poll; | |||
| 4 | 4 | ||
| 5 | use embassy_hal_common::drop::OnDrop; | 5 | use embassy_hal_common::drop::OnDrop; |
| 6 | use embassy_hal_common::PeripheralRef; | 6 | use embassy_hal_common::PeripheralRef; |
| 7 | use futures::future::{select, Either}; | ||
| 7 | 8 | ||
| 8 | use super::{rdr, sr, BasicInstance, Error, UartRx}; | 9 | use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx}; |
| 9 | use crate::dma::ringbuffer::OverrunError; | 10 | use crate::dma::ringbuffer::OverrunError; |
| 10 | use crate::dma::RingBuffer; | 11 | use crate::dma::RingBuffer; |
| 11 | 12 | ||
| @@ -98,7 +99,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||
| 98 | } | 99 | } |
| 99 | 100 | ||
| 100 | /// Read bytes that are readily available in the ring buffer. | 101 | /// Read bytes that are readily available in the ring buffer. |
| 101 | /// If no bytes are currently available in the buffer the call waits until data are received. | 102 | /// If no bytes are currently available in the buffer the call waits until the some |
| 103 | /// bytes are available (at least one byte and at most half the buffer size) | ||
| 102 | /// | 104 | /// |
| 103 | /// Background receive is started if `start()` has not been previously called. | 105 | /// Background receive is started if `start()` has not been previously called. |
| 104 | /// | 106 | /// |
| @@ -107,10 +109,9 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||
| 107 | pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> { | 109 | pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> { |
| 108 | let r = T::regs(); | 110 | let r = T::regs(); |
| 109 | 111 | ||
| 112 | // Start background receive if it was not already started | ||
| 110 | // SAFETY: read only | 113 | // SAFETY: read only |
| 111 | let is_started = unsafe { r.cr3().read().dmar() }; | 114 | let is_started = unsafe { r.cr3().read().dmar() }; |
| 112 | |||
| 113 | // Start background receive if it was not already started | ||
| 114 | if !is_started { | 115 | if !is_started { |
| 115 | self.start()?; | 116 | self.start()?; |
| 116 | } | 117 | } |
| @@ -132,8 +133,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||
| 132 | } | 133 | } |
| 133 | } | 134 | } |
| 134 | 135 | ||
| 135 | let ndtr = self.ring_buf.get_remaining_transfers(); | 136 | self.ring_buf.reload_position(); |
| 136 | self.ring_buf.set_ndtr(ndtr); | ||
| 137 | match self.ring_buf.read(buf) { | 137 | match self.ring_buf.read(buf) { |
| 138 | Ok(len) if len == 0 => {} | 138 | Ok(len) if len == 0 => {} |
| 139 | Ok(len) => { | 139 | Ok(len) => { |
| @@ -148,28 +148,32 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||
| 148 | } | 148 | } |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | // Wait for any data since `ndtr` | 151 | loop { |
| 152 | self.wait_for_data(ndtr).await?; | 152 | self.wait_for_data_or_idle().await?; |
| 153 | |||
| 154 | self.ring_buf.reload_position(); | ||
| 155 | if !self.ring_buf.is_empty() { | ||
| 156 | break; | ||
| 157 | } | ||
| 158 | } | ||
| 153 | 159 | ||
| 154 | // ndtr is now different than the value provided to `wait_for_data()` | ||
| 155 | // Re-sample ndtr now when it has changed. | ||
| 156 | self.ring_buf.set_ndtr(self.ring_buf.get_remaining_transfers()); | ||
| 157 | let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?; | 160 | let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?; |
| 158 | assert!(len > 0); | 161 | assert!(len > 0); |
| 162 | |||
| 159 | Ok(len) | 163 | Ok(len) |
| 160 | } | 164 | } |
| 161 | 165 | ||
| 162 | /// Wait for uart data | 166 | /// Wait for uart idle or dma half-full or full |
| 163 | async fn wait_for_data(&mut self, old_ndtr: usize) -> Result<(), Error> { | 167 | async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { |
| 164 | let r = T::regs(); | 168 | let r = T::regs(); |
| 165 | 169 | ||
| 166 | // make sure USART state is restored to neutral state when this future is dropped | 170 | // make sure USART state is restored to neutral state |
| 167 | let _drop = OnDrop::new(move || { | 171 | let _on_drop = OnDrop::new(move || { |
| 168 | // SAFETY: only clears Rx related flags | 172 | // SAFETY: only clears Rx related flags |
| 169 | unsafe { | 173 | unsafe { |
| 170 | r.cr1().modify(|w| { | 174 | r.cr1().modify(|w| { |
| 171 | // disable RXNE interrupt | 175 | // disable idle line interrupt |
| 172 | w.set_rxneie(false); | 176 | w.set_idleie(false); |
| 173 | }); | 177 | }); |
| 174 | } | 178 | } |
| 175 | }); | 179 | }); |
| @@ -177,76 +181,65 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||
| 177 | // SAFETY: only sets Rx related flags | 181 | // SAFETY: only sets Rx related flags |
| 178 | unsafe { | 182 | unsafe { |
| 179 | r.cr1().modify(|w| { | 183 | r.cr1().modify(|w| { |
| 180 | // enable RXNE interrupt | 184 | // enable idle line interrupt |
| 181 | w.set_rxneie(true); | 185 | w.set_idleie(true); |
| 182 | }); | 186 | }); |
| 183 | } | 187 | } |
| 184 | 188 | ||
| 185 | // future which completes when RX "not empty" is detected, | 189 | compiler_fence(Ordering::SeqCst); |
| 186 | // i.e. when there is data in uart rx register | 190 | |
| 187 | let rxne = poll_fn(|cx| { | 191 | // Future which completes when there is dma is half full or full |
| 188 | let s = T::state(); | 192 | let dma = poll_fn(|cx| { |
| 193 | self.ring_buf.set_waker(cx.waker()); | ||
| 194 | |||
| 195 | compiler_fence(Ordering::SeqCst); | ||
| 189 | 196 | ||
| 190 | // Register waker to be awaken when RXNE interrupt is received | 197 | self.ring_buf.reload_position(); |
| 198 | if !self.ring_buf.is_empty() { | ||
| 199 | // Some data is now available | ||
| 200 | Poll::Ready(()) | ||
| 201 | } else { | ||
| 202 | Poll::Pending | ||
| 203 | } | ||
| 204 | }); | ||
| 205 | |||
| 206 | // Future which completes when idle line is detected | ||
| 207 | let uart = poll_fn(|cx| { | ||
| 208 | let s = T::state(); | ||
| 191 | s.rx_waker.register(cx.waker()); | 209 | s.rx_waker.register(cx.waker()); |
| 192 | 210 | ||
| 193 | compiler_fence(Ordering::SeqCst); | 211 | compiler_fence(Ordering::SeqCst); |
| 194 | 212 | ||
| 195 | // SAFETY: read only and we only use Rx related flags | 213 | // SAFETY: read only and we only use Rx related flags |
| 196 | let s = unsafe { sr(r).read() }; | 214 | let sr = unsafe { sr(r).read() }; |
| 197 | let has_errors = s.pe() || s.fe() || s.ne() || s.ore(); | 215 | |
| 216 | let has_errors = sr.pe() || sr.fe() || sr.ne() || sr.ore(); | ||
| 198 | if has_errors { | 217 | if has_errors { |
| 199 | if s.pe() { | 218 | if sr.pe() { |
| 200 | return Poll::Ready(Err(Error::Parity)); | 219 | return Poll::Ready(Err(Error::Parity)); |
| 201 | } else if s.fe() { | 220 | } else if sr.fe() { |
| 202 | return Poll::Ready(Err(Error::Framing)); | 221 | return Poll::Ready(Err(Error::Framing)); |
| 203 | } else if s.ne() { | 222 | } else if sr.ne() { |
| 204 | return Poll::Ready(Err(Error::Noise)); | 223 | return Poll::Ready(Err(Error::Noise)); |
| 205 | } else { | 224 | } else { |
| 206 | return Poll::Ready(Err(Error::Overrun)); | 225 | return Poll::Ready(Err(Error::Overrun)); |
| 207 | } | 226 | } |
| 208 | } | 227 | } |
| 209 | 228 | ||
| 210 | // Re-sample ndtr and determine if it has changed since we started | 229 | if sr.idle() { |
| 211 | // waiting for data. | 230 | // Idle line is detected |
| 212 | let new_ndtr = self.ring_buf.get_remaining_transfers(); | ||
| 213 | if new_ndtr != old_ndtr { | ||
| 214 | // Some data was received as NDTR has changed | ||
| 215 | Poll::Ready(Ok(())) | 231 | Poll::Ready(Ok(())) |
| 216 | } else { | 232 | } else { |
| 217 | // It may be that the DMA controller is currently busy consuming the | 233 | Poll::Pending |
| 218 | // RX data register. We therefore wait register to become empty. | ||
| 219 | while unsafe { sr(r).read().rxne() } {} | ||
| 220 | |||
| 221 | compiler_fence(Ordering::SeqCst); | ||
| 222 | |||
| 223 | // Re-get again: This time we know that the DMA controller has consumed | ||
| 224 | // the current read register if it was busy doing so | ||
| 225 | let new_ndtr = self.ring_buf.get_remaining_transfers(); | ||
| 226 | if new_ndtr != old_ndtr { | ||
| 227 | // Some data was received as NDTR has changed | ||
| 228 | Poll::Ready(Ok(())) | ||
| 229 | } else { | ||
| 230 | Poll::Pending | ||
| 231 | } | ||
| 232 | } | 234 | } |
| 233 | }); | 235 | }); |
| 234 | 236 | ||
| 235 | compiler_fence(Ordering::SeqCst); | 237 | match select(dma, uart).await { |
| 236 | 238 | Either::Left(((), _)) => Ok(()), | |
| 237 | let new_ndtr = self.ring_buf.get_remaining_transfers(); | 239 | Either::Right((Ok(()), _)) => Ok(()), |
| 238 | if new_ndtr != old_ndtr { | 240 | Either::Right((Err(e), _)) => { |
| 239 | // Fast path - NDTR has already changed, no reason to poll | 241 | self.teardown_uart(); |
| 240 | Ok(()) | 242 | Err(e) |
| 241 | } else { | ||
| 242 | // NDTR has not changed since we first read from the ring buffer | ||
| 243 | // Wait for RXNE interrupt... | ||
| 244 | match rxne.await { | ||
| 245 | Ok(()) => Ok(()), | ||
| 246 | Err(e) => { | ||
| 247 | self.teardown_uart(); | ||
| 248 | Err(e) | ||
| 249 | } | ||
| 250 | } | 243 | } |
| 251 | } | 244 | } |
| 252 | } | 245 | } |
diff --git a/tests/stm32/src/bin/usart_rx_ringbuffered.rs b/tests/stm32/src/bin/usart_rx_ringbuffered.rs index 3ea8bfb7b..48dc25b0e 100644 --- a/tests/stm32/src/bin/usart_rx_ringbuffered.rs +++ b/tests/stm32/src/bin/usart_rx_ringbuffered.rs | |||
| @@ -56,6 +56,7 @@ mod board { | |||
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | const ONE_BYTE_DURATION_US: u32 = 9_000_000 / 115200; | 58 | const ONE_BYTE_DURATION_US: u32 = 9_000_000 / 115200; |
| 59 | const DMA_BUF_SIZE: usize = 64; | ||
| 59 | 60 | ||
| 60 | #[embassy_executor::main] | 61 | #[embassy_executor::main] |
| 61 | async fn main(spawner: Spawner) { | 62 | async fn main(spawner: Spawner) { |
| @@ -114,7 +115,7 @@ async fn main(spawner: Spawner) { | |||
| 114 | 115 | ||
| 115 | let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config); | 116 | let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config); |
| 116 | let (tx, rx) = usart.split(); | 117 | let (tx, rx) = usart.split(); |
| 117 | static mut DMA_BUF: [u8; 64] = [0; 64]; | 118 | static mut DMA_BUF: [u8; DMA_BUF_SIZE] = [0; DMA_BUF_SIZE]; |
| 118 | let dma_buf = unsafe { DMA_BUF.as_mut() }; | 119 | let dma_buf = unsafe { DMA_BUF.as_mut() }; |
| 119 | let rx = rx.into_ring_buffered(dma_buf); | 120 | let rx = rx.into_ring_buffered(dma_buf); |
| 120 | 121 | ||
| @@ -159,7 +160,14 @@ async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::Rx | |||
| 159 | loop { | 160 | loop { |
| 160 | let mut buf = [0; 100]; | 161 | let mut buf = [0; 100]; |
| 161 | let max_len = 1 + (rng.next_u32() as usize % (buf.len() - 1)); | 162 | let max_len = 1 + (rng.next_u32() as usize % (buf.len() - 1)); |
| 162 | let received = rx.read(&mut buf[..max_len]).await.unwrap(); | 163 | let received = match rx.read(&mut buf[..max_len]).await { |
| 164 | Ok(r) => r, | ||
| 165 | Err(e) => { | ||
| 166 | error!("Test fail! read error: {:?}", e); | ||
| 167 | cortex_m::asm::bkpt(); | ||
| 168 | return; | ||
| 169 | } | ||
| 170 | }; | ||
| 163 | 171 | ||
| 164 | if expected.is_none() { | 172 | if expected.is_none() { |
| 165 | info!("Test started"); | 173 | info!("Test started"); |
| @@ -176,8 +184,11 @@ async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::Rx | |||
| 176 | } | 184 | } |
| 177 | 185 | ||
| 178 | if received < max_len { | 186 | if received < max_len { |
| 179 | let byte_count = rng.next_u32() % 64; | 187 | let byte_count = rng.next_u32() % (DMA_BUF_SIZE as u32); |
| 180 | Timer::after(Duration::from_micros((byte_count * ONE_BYTE_DURATION_US) as _)).await; | 188 | let random_delay_us = (byte_count * ONE_BYTE_DURATION_US) as u64; |
| 189 | if random_delay_us > 200 { | ||
| 190 | Timer::after(Duration::from_micros(random_delay_us - 200)).await; | ||
| 191 | } | ||
| 181 | } | 192 | } |
| 182 | 193 | ||
| 183 | i += 1; | 194 | i += 1; |
diff --git a/tests/utils/src/bin/saturate_serial.rs b/tests/utils/src/bin/saturate_serial.rs index 28480516d..18ca12fb7 100644 --- a/tests/utils/src/bin/saturate_serial.rs +++ b/tests/utils/src/bin/saturate_serial.rs | |||
| @@ -1,18 +1,19 @@ | |||
| 1 | use std::path::Path; | 1 | use std::path::Path; |
| 2 | use std::time::Duration; | 2 | use std::time::Duration; |
| 3 | use std::{env, io, thread}; | 3 | use std::{env, io, process, thread}; |
| 4 | 4 | ||
| 5 | use rand::random; | 5 | use rand::random; |
| 6 | use serial::SerialPort; | 6 | use serial::SerialPort; |
| 7 | 7 | ||
| 8 | pub fn main() { | 8 | pub fn main() { |
| 9 | if let Some(port_name) = env::args().nth(1) { | 9 | if let Some(port_name) = env::args().nth(1) { |
| 10 | let sleep = env::args().position(|x| x == "--sleep").is_some(); | 10 | let idles = env::args().position(|x| x == "--idles").is_some(); |
| 11 | 11 | ||
| 12 | println!("Saturating port {:?} with 115200 8N1", port_name); | 12 | println!("Saturating port {:?} with 115200 8N1", port_name); |
| 13 | println!("Sleep: {}", sleep); | 13 | println!("Idles: {}", idles); |
| 14 | println!("Process ID: {}", process::id()); | ||
| 14 | let mut port = serial::open(&port_name).unwrap(); | 15 | let mut port = serial::open(&port_name).unwrap(); |
| 15 | if saturate(&mut port, sleep).is_err() { | 16 | if saturate(&mut port, idles).is_err() { |
| 16 | eprintln!("Unable to saturate port"); | 17 | eprintln!("Unable to saturate port"); |
| 17 | } | 18 | } |
| 18 | } else { | 19 | } else { |
| @@ -23,7 +24,7 @@ pub fn main() { | |||
| 23 | } | 24 | } |
| 24 | } | 25 | } |
| 25 | 26 | ||
| 26 | fn saturate<T: SerialPort>(port: &mut T, sleep: bool) -> io::Result<()> { | 27 | fn saturate<T: SerialPort>(port: &mut T, idles: bool) -> io::Result<()> { |
| 27 | port.reconfigure(&|settings| { | 28 | port.reconfigure(&|settings| { |
| 28 | settings.set_baud_rate(serial::Baud115200)?; | 29 | settings.set_baud_rate(serial::Baud115200)?; |
| 29 | settings.set_char_size(serial::Bits8); | 30 | settings.set_char_size(serial::Bits8); |
| @@ -39,7 +40,7 @@ fn saturate<T: SerialPort>(port: &mut T, sleep: bool) -> io::Result<()> { | |||
| 39 | 40 | ||
| 40 | port.write_all(&buf)?; | 41 | port.write_all(&buf)?; |
| 41 | 42 | ||
| 42 | if sleep { | 43 | if idles { |
| 43 | let micros = (random::<usize>() % 1000) as u64; | 44 | let micros = (random::<usize>() % 1000) as u64; |
| 44 | println!("Sleeping {}us", micros); | 45 | println!("Sleeping {}us", micros); |
| 45 | port.flush().unwrap(); | 46 | port.flush().unwrap(); |
| @@ -49,4 +50,4 @@ fn saturate<T: SerialPort>(port: &mut T, sleep: bool) -> io::Result<()> { | |||
| 49 | written += len; | 50 | written += len; |
| 50 | println!("Written: {}", written); | 51 | println!("Written: {}", written); |
| 51 | } | 52 | } |
| 52 | } \ No newline at end of file | 53 | } |
