aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorxoviat <[email protected]>2023-05-29 14:49:43 -0500
committerxoviat <[email protected]>2023-05-29 14:49:43 -0500
commitaba0f8fd6cd51cad65480689bc9254df4f071175 (patch)
treebe7dabd8075b827cb8a2d920225c48b452857e1f
parent0a136c308eb3a949a7bca2f7f688277adb085489 (diff)
stm32/uart: refactor rx ringbuffer
- remove some race conditions - allow full use of rx buffer
-rw-r--r--.vscode/.gitignore3
-rw-r--r--embassy-stm32/src/dma/bdma.rs40
-rw-r--r--embassy-stm32/src/dma/dma.rs40
-rw-r--r--embassy-stm32/src/dma/ringbuffer.rs537
-rw-r--r--embassy-stm32/src/lib.rs2
-rw-r--r--embassy-stm32/src/usart/mod.rs50
-rw-r--r--embassy-stm32/src/usart/ringbuffered.rs (renamed from embassy-stm32/src/usart/rx_ringbuffered.rs)182
7 files changed, 426 insertions, 428 deletions
diff --git a/.vscode/.gitignore b/.vscode/.gitignore
index 9fbb9ec95..8c3dd8a31 100644
--- a/.vscode/.gitignore
+++ b/.vscode/.gitignore
@@ -1,3 +1,4 @@
1*.cortex-debug.*.json 1*.cortex-debug.*.json
2launch.json 2launch.json
3tasks.json \ No newline at end of file 3tasks.json
4*.cfg
diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs
index 0202ec379..9dafa26d0 100644
--- a/embassy-stm32/src/dma/bdma.rs
+++ b/embassy-stm32/src/dma/bdma.rs
@@ -111,24 +111,18 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index
111 panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num); 111 panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num);
112 } 112 }
113 113
114 let mut wake = false;
115
116 if isr.htif(channel_num) && cr.read().htie() { 114 if isr.htif(channel_num) && cr.read().htie() {
117 // Acknowledge half transfer complete interrupt 115 // Acknowledge half transfer complete interrupt
118 dma.ifcr().write(|w| w.set_htif(channel_num, true)); 116 dma.ifcr().write(|w| w.set_htif(channel_num, true));
119 wake = true; 117 } else if isr.tcif(channel_num) && cr.read().tcie() {
120 }
121
122 if isr.tcif(channel_num) && cr.read().tcie() {
123 // Acknowledge transfer complete interrupt 118 // Acknowledge transfer complete interrupt
124 dma.ifcr().write(|w| w.set_tcif(channel_num, true)); 119 dma.ifcr().write(|w| w.set_tcif(channel_num, true));
125 STATE.complete_count[index].fetch_add(1, Ordering::Release); 120 STATE.complete_count[index].fetch_add(1, Ordering::Release);
126 wake = true; 121 } else {
122 return;
127 } 123 }
128 124
129 if wake { 125 STATE.ch_wakers[index].wake();
130 STATE.ch_wakers[index].wake();
131 }
132} 126}
133 127
134#[cfg(any(bdma_v2, dmamux))] 128#[cfg(any(bdma_v2, dmamux))]
@@ -371,7 +365,7 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
371struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); 365struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
372 366
373impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { 367impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
374 fn ndtr(&self) -> usize { 368 fn get_remaining_transfers(&self) -> usize {
375 let ch = self.0.regs().ch(self.0.num()); 369 let ch = self.0.regs().ch(self.0.num());
376 unsafe { ch.ndtr().read() }.ndt() as usize 370 unsafe { ch.ndtr().read() }.ndt() as usize
377 } 371 }
@@ -457,21 +451,17 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
457 } 451 }
458 452
459 /// Read bytes from the ring buffer 453 /// Read bytes from the ring buffer
454 /// Return a tuple of the length read and the length remaining in the buffer
455 /// If not all of the bytes were read, then there will be some bytes in the buffer remaining
456 /// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read
460 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. 457 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
461 pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> { 458 pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
462 self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) 459 self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
463 } 460 }
464 461
465 pub fn is_empty(&self) -> bool { 462 /// The capacity of the ringbuffer
466 self.ringbuf.is_empty() 463 pub fn cap(&self) -> usize {
467 } 464 self.ringbuf.cap()
468
469 pub fn len(&self) -> usize {
470 self.ringbuf.len()
471 }
472
473 pub fn capacity(&self) -> usize {
474 self.ringbuf.dma_buf.len()
475 } 465 }
476 466
477 pub fn set_waker(&mut self, waker: &Waker) { 467 pub fn set_waker(&mut self, waker: &Waker) {
@@ -506,12 +496,6 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
506 let ch = self.channel.regs().ch(self.channel.num()); 496 let ch = self.channel.regs().ch(self.channel.num());
507 unsafe { ch.cr().read() }.en() 497 unsafe { ch.cr().read() }.en()
508 } 498 }
509
510 /// Synchronize the position of the ring buffer to the actual DMA controller position
511 pub fn reload_position(&mut self) {
512 let ch = self.channel.regs().ch(self.channel.num());
513 self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
514 }
515} 499}
516 500
517impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { 501impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs
index 7b17d9e49..47b749ece 100644
--- a/embassy-stm32/src/dma/dma.rs
+++ b/embassy-stm32/src/dma/dma.rs
@@ -187,24 +187,18 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index:
187 panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); 187 panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
188 } 188 }
189 189
190 let mut wake = false;
191
192 if isr.htif(channel_num % 4) && cr.read().htie() { 190 if isr.htif(channel_num % 4) && cr.read().htie() {
193 // Acknowledge half transfer complete interrupt 191 // Acknowledge half transfer complete interrupt
194 dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true)); 192 dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true));
195 wake = true; 193 } else if isr.tcif(channel_num % 4) && cr.read().tcie() {
196 }
197
198 if isr.tcif(channel_num % 4) && cr.read().tcie() {
199 // Acknowledge transfer complete interrupt 194 // Acknowledge transfer complete interrupt
200 dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); 195 dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
201 STATE.complete_count[index].fetch_add(1, Ordering::Release); 196 STATE.complete_count[index].fetch_add(1, Ordering::Release);
202 wake = true; 197 } else {
198 return;
203 } 199 }
204 200
205 if wake { 201 STATE.ch_wakers[index].wake();
206 STATE.ch_wakers[index].wake();
207 }
208} 202}
209 203
210#[cfg(any(dma_v2, dmamux))] 204#[cfg(any(dma_v2, dmamux))]
@@ -612,7 +606,7 @@ impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
612struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); 606struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
613 607
614impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { 608impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
615 fn ndtr(&self) -> usize { 609 fn get_remaining_transfers(&self) -> usize {
616 let ch = self.0.regs().st(self.0.num()); 610 let ch = self.0.regs().st(self.0.num());
617 unsafe { ch.ndtr().read() }.ndt() as usize 611 unsafe { ch.ndtr().read() }.ndt() as usize
618 } 612 }
@@ -713,21 +707,17 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
713 } 707 }
714 708
715 /// Read bytes from the ring buffer 709 /// Read bytes from the ring buffer
710 /// Return a tuple of the length read and the length remaining in the buffer
711 /// If not all of the bytes were read, then there will be some bytes in the buffer remaining
712 /// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read
716 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. 713 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
717 pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> { 714 pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
718 self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) 715 self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
719 } 716 }
720 717
721 pub fn is_empty(&self) -> bool { 718 // The capacity of the ringbuffer
722 self.ringbuf.is_empty() 719 pub fn cap(&self) -> usize {
723 } 720 self.ringbuf.cap()
724
725 pub fn len(&self) -> usize {
726 self.ringbuf.len()
727 }
728
729 pub fn capacity(&self) -> usize {
730 self.ringbuf.dma_buf.len()
731 } 721 }
732 722
733 pub fn set_waker(&mut self, waker: &Waker) { 723 pub fn set_waker(&mut self, waker: &Waker) {
@@ -766,12 +756,6 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
766 let ch = self.channel.regs().st(self.channel.num()); 756 let ch = self.channel.regs().st(self.channel.num());
767 unsafe { ch.cr().read() }.en() 757 unsafe { ch.cr().read() }.en()
768 } 758 }
769
770 /// Synchronize the position of the ring buffer to the actual DMA controller position
771 pub fn reload_position(&mut self) {
772 let ch = self.channel.regs().st(self.channel.num());
773 self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
774 }
775} 759}
776 760
777impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { 761impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs
index 38cc87ae9..72a84a57f 100644
--- a/embassy-stm32/src/dma/ringbuffer.rs
+++ b/embassy-stm32/src/dma/ringbuffer.rs
@@ -25,14 +25,13 @@ use super::word::Word;
25/// +-----------------------------------------+ +-----------------------------------------+ 25/// +-----------------------------------------+ +-----------------------------------------+
26/// ^ ^ ^ ^ ^ ^ 26/// ^ ^ ^ ^ ^ ^
27/// | | | | | | 27/// | | | | | |
28/// +- first --+ | +- end ------+ | 28/// +- start --+ | +- end ------+ |
29/// | | | | 29/// | | | |
30/// +- end --------------------+ +- first ----------------+ 30/// +- end --------------------+ +- start ----------------+
31/// ``` 31/// ```
32pub struct DmaRingBuffer<'a, W: Word> { 32pub struct DmaRingBuffer<'a, W: Word> {
33 pub(crate) dma_buf: &'a mut [W], 33 pub(crate) dma_buf: &'a mut [W],
34 first: usize, 34 start: usize,
35 pub ndtr: usize,
36} 35}
37 36
38#[derive(Debug, PartialEq)] 37#[derive(Debug, PartialEq)]
@@ -41,7 +40,7 @@ pub struct OverrunError;
41pub trait DmaCtrl { 40pub trait DmaCtrl {
42 /// Get the NDTR register value, i.e. the space left in the underlying 41 /// Get the NDTR register value, i.e. the space left in the underlying
43 /// buffer until the dma writer wraps. 42 /// buffer until the dma writer wraps.
44 fn ndtr(&self) -> usize; 43 fn get_remaining_transfers(&self) -> usize;
45 44
46 /// Get the transfer completed counter. 45 /// Get the transfer completed counter.
47 /// This counter is incremented by the dma controller when NDTR is reloaded, 46 /// This counter is incremented by the dma controller when NDTR is reloaded,
@@ -54,151 +53,131 @@ pub trait DmaCtrl {
54 53
55impl<'a, W: Word> DmaRingBuffer<'a, W> { 54impl<'a, W: Word> DmaRingBuffer<'a, W> {
56 pub fn new(dma_buf: &'a mut [W]) -> Self { 55 pub fn new(dma_buf: &'a mut [W]) -> Self {
57 let ndtr = dma_buf.len(); 56 Self { dma_buf, start: 0 }
58 Self {
59 dma_buf,
60 first: 0,
61 ndtr,
62 }
63 } 57 }
64 58
65 /// Reset the ring buffer to its initial state 59 /// Reset the ring buffer to its initial state
66 pub fn clear(&mut self, mut dma: impl DmaCtrl) { 60 pub fn clear(&mut self, mut dma: impl DmaCtrl) {
67 self.first = 0; 61 self.start = 0;
68 self.ndtr = self.dma_buf.len();
69 dma.reset_complete_count(); 62 dma.reset_complete_count();
70 } 63 }
71 64
72 /// The buffer end position 65 /// The capacity of the ringbuffer
73 fn end(&self) -> usize { 66 pub const fn cap(&self) -> usize {
74 self.dma_buf.len() - self.ndtr 67 self.dma_buf.len()
75 }
76
77 /// Returns whether the buffer is empty
78 pub fn is_empty(&self) -> bool {
79 self.first == self.end()
80 } 68 }
81 69
82 /// The current number of bytes in the buffer 70 /// The current position of the ringbuffer
83 /// This may change at any time if dma is currently active 71 fn pos(&self, remaining_transfers: usize) -> usize {
84 pub fn len(&self) -> usize { 72 self.cap() - remaining_transfers
85 // Read out a stable end (the dma periheral can change it at anytime)
86 let end = self.end();
87 if self.first <= end {
88 // No wrap
89 end - self.first
90 } else {
91 self.dma_buf.len() - self.first + end
92 }
93 } 73 }
94 74
95 /// Read bytes from the ring buffer 75 /// Read bytes from the ring buffer
76 /// Return a tuple of the length read and the length remaining in the buffer
77 /// If not all of the bytes were read, then there will be some bytes in the buffer remaining
78 /// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read
96 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. 79 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
97 pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<usize, OverrunError> { 80 pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
98 let end = self.end(); 81 /*
82 This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check
83 after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed
84 to fire in the same clock cycle that a register is read, so checking get_complete_count early does
85 not yield relevant information.
86
87 Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full
88 buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error
89 conditions.
90
91 After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that
92 the dma has not overrun within the data we could have copied. We check the data we could have copied
93 rather than the data we actually copied because it costs nothing and confirms an error condition
94 earlier.
95 */
96 let end = self.pos(dma.get_remaining_transfers());
97 if self.start == end && dma.get_complete_count() == 0 {
98 // No bytes are available in the buffer
99 Ok((0, self.cap()))
100 } else if self.start < end {
101 // The available, unread portion in the ring buffer DOES NOT wrap
102 // Copy out the bytes from the dma buffer
103 let len = self.copy_to(buf, self.start..end);
99 104
100 compiler_fence(Ordering::SeqCst); 105 compiler_fence(Ordering::SeqCst);
101 106
102 if self.first == end { 107 /*
103 // The buffer is currently empty 108 first, check if the dma has wrapped at all if it's after end
109 or more than once if it's before start
104 110
105 if dma.get_complete_count() > 0 { 111 this is in a critical section to try to reduce mushy behavior.
106 // The DMA has written such that the ring buffer wraps at least once 112 it's not ideal but it's the best we can do
107 self.ndtr = dma.ndtr();
108 if self.end() > self.first || dma.get_complete_count() > 1 {
109 return Err(OverrunError);
110 }
111 }
112 113
113 Ok(0) 114 then, get the current position of of the dma write and check
114 } else if self.first < end { 115 if it's inside data we could have copied
115 // The available, unread portion in the ring buffer DOES NOT wrap 116 */
117 let (pos, complete_count) =
118 critical_section::with(|_| (self.pos(dma.get_remaining_transfers()), dma.get_complete_count()));
119 if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 {
120 Err(OverrunError)
121 } else {
122 self.start = (self.start + len) % self.cap();
116 123
117 if dma.get_complete_count() > 1 { 124 Ok((len, self.cap() - self.start))
118 return Err(OverrunError);
119 } 125 }
126 } else if self.start + buf.len() < self.cap() {
127 // The available, unread portion in the ring buffer DOES wrap
128 // The DMA writer has wrapped since we last read and is currently
129 // writing (or the next byte added will be) in the beginning of the ring buffer.
120 130
121 // Copy out the bytes from the dma buffer 131 // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
122 let len = self.copy_to(buf, self.first..end); 132
133 // Copy out from the dma buffer
134 let len = self.copy_to(buf, self.start..self.cap());
123 135
124 compiler_fence(Ordering::SeqCst); 136 compiler_fence(Ordering::SeqCst);
125 137
126 match dma.get_complete_count() { 138 /*
127 0 => { 139 first, check if the dma has wrapped around more than once
128 // The DMA writer has not wrapped before nor after the copy
129 }
130 1 => {
131 // The DMA writer has written such that the ring buffer now wraps
132 self.ndtr = dma.ndtr();
133 if self.end() > self.first || dma.get_complete_count() > 1 {
134 // The bytes that we have copied out have overflowed
135 // as the writer has now both wrapped and is currently writing
136 // within the region that we have just copied out
137 return Err(OverrunError);
138 }
139 }
140 _ => {
141 return Err(OverrunError);
142 }
143 }
144 140
145 self.first = (self.first + len) % self.dma_buf.len(); 141 then, get the current position of of the dma write and check
146 Ok(len) 142 if it's inside data we could have copied
143 */
144 let pos = self.pos(dma.get_remaining_transfers());
145 if pos > self.start || pos < end || dma.get_complete_count() > 1 {
146 Err(OverrunError)
147 } else {
148 self.start = (self.start + len) % self.cap();
149
150 Ok((len, self.start + end))
151 }
147 } else { 152 } else {
148 // The available, unread portion in the ring buffer DOES wrap 153 // The available, unread portion in the ring buffer DOES wrap
149 // The DMA writer has wrapped since we last read and is currently 154 // The DMA writer has wrapped since we last read and is currently
150 // writing (or the next byte added will be) in the beginning of the ring buffer. 155 // writing (or the next byte added will be) in the beginning of the ring buffer.
151 156
152 let complete_count = dma.get_complete_count(); 157 // The provided read buffer is large enough to include all bytes from the tail of the dma buffer,
153 if complete_count > 1 { 158 // so the next read will not have any unread tail bytes in the ring buffer.
154 return Err(OverrunError);
155 }
156
157 // If the unread portion wraps then the writer must also have wrapped
158 assert!(complete_count == 1);
159
160 if self.first + buf.len() < self.dma_buf.len() {
161 // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
162 159
163 // Copy out from the dma buffer 160 // Copy out from the dma buffer
164 let len = self.copy_to(buf, self.first..self.dma_buf.len()); 161 let tail = self.copy_to(buf, self.start..self.cap());
162 let head = self.copy_to(&mut buf[tail..], 0..end);
165 163
166 compiler_fence(Ordering::SeqCst); 164 compiler_fence(Ordering::SeqCst);
167 165
168 // We have now copied out the data from dma_buf 166 /*
169 // Make sure that the just read part was not overwritten during the copy 167 first, check if the dma has wrapped around more than once
170 self.ndtr = dma.ndtr();
171 if self.end() > self.first || dma.get_complete_count() > 1 {
172 // The writer has entered the data that we have just read since we read out `end` in the beginning and until now.
173 return Err(OverrunError);
174 }
175 168
176 self.first = (self.first + len) % self.dma_buf.len(); 169 then, get the current position of of the dma write and check
177 Ok(len) 170 if it's inside data we could have copied
171 */
172 let pos = self.pos(dma.get_remaining_transfers());
173 if pos > self.start || pos < end || dma.reset_complete_count() > 1 {
174 Err(OverrunError)
178 } else { 175 } else {
179 // The provided read buffer is large enough to include all bytes from the tail of the dma buffer, 176 self.start = head;
180 // so the next read will not have any unread tail bytes in the ring buffer. 177 Ok((tail + head, self.cap() - self.start))
181
182 // Copy out from the dma buffer
183 let tail = self.copy_to(buf, self.first..self.dma_buf.len());
184 let head = self.copy_to(&mut buf[tail..], 0..end);
185
186 compiler_fence(Ordering::SeqCst);
187
188 // We have now copied out the data from dma_buf
189 // Reset complete counter and make sure that the just read part was not overwritten during the copy
190 self.ndtr = dma.ndtr();
191 let complete_count = dma.reset_complete_count();
192 if self.end() > self.first || complete_count > 1 {
193 return Err(OverrunError);
194 }
195
196 self.first = head;
197 Ok(tail + head)
198 } 178 }
199 } 179 }
200 } 180 }
201
202 /// Copy from the dma buffer at `data_range` into `buf` 181 /// Copy from the dma buffer at `data_range` into `buf`
203 fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize { 182 fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize {
204 // Limit the number of bytes that can be copied 183 // Limit the number of bytes that can be copied
@@ -218,203 +197,289 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
218 length 197 length
219 } 198 }
220} 199}
221
222#[cfg(test)] 200#[cfg(test)]
223mod tests { 201mod tests {
224 use core::array; 202 use core::array;
225 use core::cell::RefCell; 203 use std::{cell, vec};
226 204
227 use super::*; 205 use super::*;
228 206
229 struct TestCtrl { 207 #[allow(dead_code)]
230 next_ndtr: RefCell<Option<usize>>, 208 #[derive(PartialEq, Debug)]
231 complete_count: usize, 209 enum TestCircularTransferRequest {
210 GetCompleteCount(usize),
211 ResetCompleteCount(usize),
212 PositionRequest(usize),
232 } 213 }
233 214
234 impl TestCtrl { 215 struct TestCircularTransfer {
235 pub const fn new() -> Self { 216 len: usize,
236 Self { 217 requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>,
237 next_ndtr: RefCell::new(None), 218 }
238 complete_count: 0, 219
220 impl DmaCtrl for &mut TestCircularTransfer {
221 fn get_remaining_transfers(&self) -> usize {
222 match self.requests.borrow_mut().pop().unwrap() {
223 TestCircularTransferRequest::PositionRequest(pos) => {
224 let len = self.len;
225
226 assert!(len >= pos);
227
228 len - pos
229 }
230 _ => unreachable!(),
239 } 231 }
240 } 232 }
241 233
242 pub fn set_next_ndtr(&mut self, ndtr: usize) { 234 fn get_complete_count(&self) -> usize {
243 self.next_ndtr.borrow_mut().replace(ndtr); 235 match self.requests.borrow_mut().pop().unwrap() {
236 TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count,
237 _ => unreachable!(),
238 }
244 } 239 }
245 }
246 240
247 impl DmaCtrl for &mut TestCtrl { 241 fn reset_complete_count(&mut self) -> usize {
248 fn ndtr(&self) -> usize { 242 match self.requests.get_mut().pop().unwrap() {
249 self.next_ndtr.borrow_mut().unwrap() 243 TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count,
244 _ => unreachable!(),
245 }
250 } 246 }
247 }
251 248
252 fn get_complete_count(&self) -> usize { 249 impl TestCircularTransfer {
253 self.complete_count 250 pub fn new(len: usize) -> Self {
251 Self {
252 requests: cell::RefCell::new(vec![]),
253 len: len,
254 }
254 } 255 }
255 256
256 fn reset_complete_count(&mut self) -> usize { 257 pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) {
257 let old = self.complete_count; 258 requests.reverse();
258 self.complete_count = 0; 259 self.requests.replace(requests);
259 old
260 } 260 }
261 } 261 }
262 262
263 #[test] 263 #[test]
264 fn empty() { 264 fn empty_and_read_not_started() {
265 let mut dma_buf = [0u8; 16]; 265 let mut dma_buf = [0u8; 16];
266 let ringbuf = DmaRingBuffer::new(&mut dma_buf); 266 let ringbuf = DmaRingBuffer::new(&mut dma_buf);
267 267
268 assert!(ringbuf.is_empty()); 268 assert_eq!(0, ringbuf.start);
269 assert_eq!(0, ringbuf.len());
270 } 269 }
271 270
272 #[test] 271 #[test]
273 fn can_read() { 272 fn can_read() {
273 let mut dma = TestCircularTransfer::new(16);
274
274 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 275 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
275 let mut ctrl = TestCtrl::new();
276 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); 276 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
277 ringbuf.ndtr = 6;
278 277
279 assert!(!ringbuf.is_empty()); 278 assert_eq!(0, ringbuf.start);
280 assert_eq!(10, ringbuf.len()); 279 assert_eq!(16, ringbuf.len());
281 280
281 dma.setup(vec![
282 TestCircularTransferRequest::PositionRequest(8),
283 TestCircularTransferRequest::PositionRequest(10),
284 TestCircularTransferRequest::GetCompleteCount(0),
285 ]);
282 let mut buf = [0; 2]; 286 let mut buf = [0; 2];
283 assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); 287 assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
284 assert_eq!([0, 1], buf); 288 assert_eq!([0, 1], buf);
285 assert_eq!(8, ringbuf.len()); 289 assert_eq!(2, ringbuf.start);
286 290
291 dma.setup(vec![
292 TestCircularTransferRequest::PositionRequest(10),
293 TestCircularTransferRequest::PositionRequest(12),
294 TestCircularTransferRequest::GetCompleteCount(0),
295 ]);
287 let mut buf = [0; 2]; 296 let mut buf = [0; 2];
288 assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); 297 assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
289 assert_eq!([2, 3], buf); 298 assert_eq!([2, 3], buf);
290 assert_eq!(6, ringbuf.len()); 299 assert_eq!(4, ringbuf.start);
291 300
301 dma.setup(vec![
302 TestCircularTransferRequest::PositionRequest(12),
303 TestCircularTransferRequest::PositionRequest(14),
304 TestCircularTransferRequest::GetCompleteCount(0),
305 ]);
292 let mut buf = [0; 8]; 306 let mut buf = [0; 8];
293 assert_eq!(6, ringbuf.read(&mut ctrl, &mut buf).unwrap()); 307 assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0);
294 assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]); 308 assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]);
295 assert_eq!(0, ringbuf.len()); 309 assert_eq!(12, ringbuf.start);
296
297 let mut buf = [0; 2];
298 assert_eq!(0, ringbuf.read(&mut ctrl, &mut buf).unwrap());
299 } 310 }
300 311
301 #[test] 312 #[test]
302 fn can_read_with_wrap() { 313 fn can_read_with_wrap() {
314 let mut dma = TestCircularTransfer::new(16);
315
303 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 316 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
304 let mut ctrl = TestCtrl::new();
305 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); 317 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
306 ringbuf.first = 12;
307 ringbuf.ndtr = 10;
308
309 // The dma controller has written 4 + 6 bytes and has reloaded NDTR
310 ctrl.complete_count = 1;
311 ctrl.set_next_ndtr(10);
312
313 assert!(!ringbuf.is_empty());
314 assert_eq!(6 + 4, ringbuf.len());
315 318
316 let mut buf = [0; 2]; 319 assert_eq!(0, ringbuf.start);
317 assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); 320 assert_eq!(16, ringbuf.len());
318 assert_eq!([12, 13], buf); 321
319 assert_eq!(6 + 2, ringbuf.len()); 322 /*
320 323 Read to close to the end of the buffer
321 let mut buf = [0; 4]; 324 */
322 assert_eq!(4, ringbuf.read(&mut ctrl, &mut buf).unwrap()); 325 dma.setup(vec![
323 assert_eq!([14, 15, 0, 1], buf); 326 TestCircularTransferRequest::PositionRequest(14),
324 assert_eq!(4, ringbuf.len()); 327 TestCircularTransferRequest::PositionRequest(16),
328 TestCircularTransferRequest::GetCompleteCount(0),
329 ]);
330 let mut buf = [0; 14];
331 assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
332 assert_eq!(14, ringbuf.start);
333
334 /*
335 Now, read around the buffer
336 */
337 dma.setup(vec![
338 TestCircularTransferRequest::PositionRequest(6),
339 TestCircularTransferRequest::PositionRequest(8),
340 TestCircularTransferRequest::ResetCompleteCount(1),
341 ]);
342 let mut buf = [0; 6];
343 assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
344 assert_eq!(4, ringbuf.start);
325 } 345 }
326 346
327 #[test] 347 #[test]
328 fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() { 348 fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() {
349 let mut dma = TestCircularTransfer::new(16);
350
329 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 351 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
330 let mut ctrl = TestCtrl::new();
331 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); 352 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
332 ringbuf.first = 2;
333 ringbuf.ndtr = 6;
334
335 // The dma controller has written 6 + 2 bytes and has reloaded NDTR
336 ctrl.complete_count = 1;
337 ctrl.set_next_ndtr(14);
338 353
354 assert_eq!(0, ringbuf.start);
355 assert_eq!(16, ringbuf.len());
356
357 /*
358 Read to close to the end of the buffer
359 */
360 dma.setup(vec![
361 TestCircularTransferRequest::PositionRequest(14),
362 TestCircularTransferRequest::PositionRequest(16),
363 TestCircularTransferRequest::GetCompleteCount(0),
364 ]);
365 let mut buf = [0; 14];
366 assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
367 assert_eq!(14, ringbuf.start);
368
369 /*
370 Now, read to the end of the buffer
371 */
372 dma.setup(vec![
373 TestCircularTransferRequest::PositionRequest(6),
374 TestCircularTransferRequest::PositionRequest(8),
375 TestCircularTransferRequest::ResetCompleteCount(1),
376 ]);
339 let mut buf = [0; 2]; 377 let mut buf = [0; 2];
340 assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); 378 assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
341 assert_eq!([2, 3], buf); 379 assert_eq!(0, ringbuf.start);
342
343 assert_eq!(1, ctrl.complete_count); // The interrupt flag IS NOT cleared
344 } 380 }
345 381
346 #[test] 382 #[test]
347 fn can_read_when_dma_writer_is_wrapped_and_read_wraps() { 383 fn can_read_when_dma_writer_wraps_once_with_same_ndtr() {
384 let mut dma = TestCircularTransfer::new(16);
385
348 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 386 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
349 let mut ctrl = TestCtrl::new();
350 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); 387 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
351 ringbuf.first = 12;
352 ringbuf.ndtr = 10;
353
354 // The dma controller has written 6 + 2 bytes and has reloaded NDTR
355 ctrl.complete_count = 1;
356 ctrl.set_next_ndtr(14);
357
358 let mut buf = [0; 10];
359 assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap());
360 assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf);
361 388
362 assert_eq!(0, ctrl.complete_count); // The interrupt flag IS cleared 389 assert_eq!(0, ringbuf.start);
363 } 390 assert_eq!(16, ringbuf.len());
364 391
365 #[test] 392 /*
366 fn cannot_read_when_dma_writer_wraps_with_same_ndtr() { 393 Read to about the middle of the buffer
367 let mut dma_buf = [0u8; 16]; 394 */
368 let mut ctrl = TestCtrl::new(); 395 dma.setup(vec![
369 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); 396 TestCircularTransferRequest::PositionRequest(6),
370 ringbuf.first = 6; 397 TestCircularTransferRequest::PositionRequest(6),
371 ringbuf.ndtr = 10; 398 TestCircularTransferRequest::GetCompleteCount(0),
372 ctrl.set_next_ndtr(9); 399 ]);
373 400 let mut buf = [0; 6];
374 assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty 401 assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
375 402 assert_eq!(6, ringbuf.start);
376 // The dma controller has written exactly 16 bytes 403
377 ctrl.complete_count = 1; 404 /*
378 405 Now, wrap the DMA controller around
379 let mut buf = [0; 2]; 406 */
380 assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); 407 dma.setup(vec![
381 408 TestCircularTransferRequest::PositionRequest(6),
382 assert_eq!(1, ctrl.complete_count); // The complete counter is not reset 409 TestCircularTransferRequest::GetCompleteCount(1),
410 TestCircularTransferRequest::PositionRequest(6),
411 TestCircularTransferRequest::GetCompleteCount(1),
412 ]);
413 let mut buf = [0; 6];
414 assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
415 assert_eq!(12, ringbuf.start);
383 } 416 }
384 417
385 #[test] 418 #[test]
386 fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() { 419 fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() {
420 let mut dma = TestCircularTransfer::new(16);
421
387 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 422 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
388 let mut ctrl = TestCtrl::new();
389 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); 423 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
390 ringbuf.first = 2;
391 ringbuf.ndtr = 6;
392
393 // The dma controller has written 6 + 3 bytes and has reloaded NDTR
394 ctrl.complete_count = 1;
395 ctrl.set_next_ndtr(13);
396
397 let mut buf = [0; 2];
398 assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
399 424
400 assert_eq!(1, ctrl.complete_count); // The complete counter is not reset 425 assert_eq!(0, ringbuf.start);
426 assert_eq!(16, ringbuf.len());
427
428 /*
429 Read a few bytes
430 */
431 dma.setup(vec![
432 TestCircularTransferRequest::PositionRequest(2),
433 TestCircularTransferRequest::PositionRequest(2),
434 TestCircularTransferRequest::GetCompleteCount(0),
435 ]);
436 let mut buf = [0; 6];
437 assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
438 assert_eq!(2, ringbuf.start);
439
440 /*
441 Now, overtake the reader
442 */
443 dma.setup(vec![
444 TestCircularTransferRequest::PositionRequest(4),
445 TestCircularTransferRequest::PositionRequest(6),
446 TestCircularTransferRequest::GetCompleteCount(1),
447 ]);
448 let mut buf = [0; 6];
449 assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
401 } 450 }
402 451
403 #[test] 452 #[test]
404 fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() { 453 fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() {
454 let mut dma = TestCircularTransfer::new(16);
455
405 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 456 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
406 let mut ctrl = TestCtrl::new();
407 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); 457 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
408 ringbuf.first = 12;
409 ringbuf.ndtr = 10;
410
411 // The dma controller has written 6 + 13 bytes and has reloaded NDTR
412 ctrl.complete_count = 1;
413 ctrl.set_next_ndtr(3);
414
415 let mut buf = [0; 2];
416 assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
417 458
418 assert_eq!(1, ctrl.complete_count); // The complete counter is not reset 459 assert_eq!(0, ringbuf.start);
460 assert_eq!(16, ringbuf.len());
461
462 /*
463 Read to close to the end of the buffer
464 */
465 dma.setup(vec![
466 TestCircularTransferRequest::PositionRequest(14),
467 TestCircularTransferRequest::PositionRequest(16),
468 TestCircularTransferRequest::GetCompleteCount(0),
469 ]);
470 let mut buf = [0; 14];
471 assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
472 assert_eq!(14, ringbuf.start);
473
474 /*
475 Now, overtake the reader
476 */
477 dma.setup(vec![
478 TestCircularTransferRequest::PositionRequest(8),
479 TestCircularTransferRequest::PositionRequest(10),
480 TestCircularTransferRequest::ResetCompleteCount(2),
481 ]);
482 let mut buf = [0; 6];
483 assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
419 } 484 }
420} 485}
diff --git a/embassy-stm32/src/lib.rs b/embassy-stm32/src/lib.rs
index c9df5c1b2..1258a65f1 100644
--- a/embassy-stm32/src/lib.rs
+++ b/embassy-stm32/src/lib.rs
@@ -1,4 +1,4 @@
1#![no_std] 1#![cfg_attr(not(test), no_std)]
2#![cfg_attr(feature = "nightly", feature(async_fn_in_trait, impl_trait_projections))] 2#![cfg_attr(feature = "nightly", feature(async_fn_in_trait, impl_trait_projections))]
3 3
4// This must go FIRST so that all the other modules see its macros. 4// This must go FIRST so that all the other modules see its macros.
diff --git a/embassy-stm32/src/usart/mod.rs b/embassy-stm32/src/usart/mod.rs
index 061c859a8..05ccb8749 100644
--- a/embassy-stm32/src/usart/mod.rs
+++ b/embassy-stm32/src/usart/mod.rs
@@ -13,6 +13,12 @@ use futures::future::{select, Either};
13use crate::dma::{NoDma, Transfer}; 13use crate::dma::{NoDma, Transfer};
14use crate::gpio::sealed::AFType; 14use crate::gpio::sealed::AFType;
15#[cfg(not(any(usart_v1, usart_v2)))] 15#[cfg(not(any(usart_v1, usart_v2)))]
16#[allow(unused_imports)]
17use crate::pac::usart::regs::Isr as Sr;
18#[cfg(any(usart_v1, usart_v2))]
19#[allow(unused_imports)]
20use crate::pac::usart::regs::Sr;
21#[cfg(not(any(usart_v1, usart_v2)))]
16use crate::pac::usart::Lpuart as Regs; 22use crate::pac::usart::Lpuart as Regs;
17#[cfg(any(usart_v1, usart_v2))] 23#[cfg(any(usart_v1, usart_v2))]
18use crate::pac::usart::Usart as Regs; 24use crate::pac::usart::Usart as Regs;
@@ -32,7 +38,6 @@ impl<T: BasicInstance> interrupt::Handler<T::Interrupt> for InterruptHandler<T>
32 38
33 let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) }; 39 let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) };
34 40
35 let mut wake = false;
36 let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie()); 41 let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie());
37 if has_errors { 42 if has_errors {
38 // clear all interrupts and DMA Rx Request 43 // clear all interrupts and DMA Rx Request
@@ -52,35 +57,24 @@ impl<T: BasicInstance> interrupt::Handler<T::Interrupt> for InterruptHandler<T>
52 w.set_dmar(false); 57 w.set_dmar(false);
53 }); 58 });
54 } 59 }
55 60 } else if cr1.idleie() && sr.idle() {
56 wake = true; 61 // IDLE detected: no more data will come
57 } else { 62 unsafe {
58 if cr1.idleie() && sr.idle() { 63 r.cr1().modify(|w| {
59 // IDLE detected: no more data will come 64 // disable idle line detection
60 unsafe { 65 w.set_idleie(false);
61 r.cr1().modify(|w| { 66 });
62 // disable idle line detection
63 w.set_idleie(false);
64 });
65 }
66
67 wake = true;
68 } 67 }
68 } else if cr1.rxneie() {
69 // We cannot check the RXNE flag as it is auto-cleared by the DMA controller
69 70
70 if cr1.rxneie() { 71 // It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection
71 // We cannot check the RXNE flag as it is auto-cleared by the DMA controller 72 } else {
72 73 return;
73 // It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection
74
75 wake = true;
76 }
77 } 74 }
78 75
79 if wake { 76 compiler_fence(Ordering::SeqCst);
80 compiler_fence(Ordering::SeqCst); 77 s.rx_waker.wake();
81
82 s.rx_waker.wake();
83 }
84 } 78 }
85} 79}
86 80
@@ -1109,9 +1103,9 @@ pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler;
1109mod buffered; 1103mod buffered;
1110 1104
1111#[cfg(not(gpdma))] 1105#[cfg(not(gpdma))]
1112mod rx_ringbuffered; 1106mod ringbuffered;
1113#[cfg(not(gpdma))] 1107#[cfg(not(gpdma))]
1114pub use rx_ringbuffered::RingBufferedUartRx; 1108pub use ringbuffered::RingBufferedUartRx;
1115 1109
1116use self::sealed::Kind; 1110use self::sealed::Kind;
1117 1111
diff --git a/embassy-stm32/src/usart/rx_ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs
index 33b750497..511b71c7f 100644
--- a/embassy-stm32/src/usart/rx_ringbuffered.rs
+++ b/embassy-stm32/src/usart/ringbuffered.rs
@@ -2,13 +2,12 @@ use core::future::poll_fn;
2use core::sync::atomic::{compiler_fence, Ordering}; 2use core::sync::atomic::{compiler_fence, Ordering};
3use core::task::Poll; 3use core::task::Poll;
4 4
5use embassy_hal_common::drop::OnDrop;
6use embassy_hal_common::PeripheralRef; 5use embassy_hal_common::PeripheralRef;
7use futures::future::{select, Either}; 6use futures::future::{select, Either};
8 7
9use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx}; 8use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx};
10use crate::dma::ringbuffer::OverrunError;
11use crate::dma::RingBuffer; 9use crate::dma::RingBuffer;
10use crate::usart::{Regs, Sr};
12 11
13pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> { 12pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> {
14 _peri: PeripheralRef<'d, T>, 13 _peri: PeripheralRef<'d, T>,
@@ -24,7 +23,9 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
24 23
25 let request = self.rx_dma.request(); 24 let request = self.rx_dma.request();
26 let opts = Default::default(); 25 let opts = Default::default();
26
27 let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) }; 27 let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) };
28
28 RingBufferedUartRx { 29 RingBufferedUartRx {
29 _peri: self._peri, 30 _peri: self._peri,
30 ring_buf, 31 ring_buf,
@@ -42,11 +43,18 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
42 Ok(()) 43 Ok(())
43 } 44 }
44 45
46 fn stop(&mut self, err: Error) -> Result<usize, Error> {
47 self.teardown_uart();
48
49 Err(err)
50 }
51
45 /// Start uart background receive 52 /// Start uart background receive
46 fn setup_uart(&mut self) { 53 fn setup_uart(&mut self) {
47 // fence before starting DMA. 54 // fence before starting DMA.
48 compiler_fence(Ordering::SeqCst); 55 compiler_fence(Ordering::SeqCst);
49 56
57 // start the dma controller
50 self.ring_buf.start(); 58 self.ring_buf.start();
51 59
52 let r = T::regs(); 60 let r = T::regs();
@@ -58,8 +66,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
58 w.set_rxneie(false); 66 w.set_rxneie(false);
59 // enable parity interrupt if not ParityNone 67 // enable parity interrupt if not ParityNone
60 w.set_peie(w.pce()); 68 w.set_peie(w.pce());
61 // disable idle line interrupt 69 // enable idle line interrupt
62 w.set_idleie(false); 70 w.set_idleie(true);
63 }); 71 });
64 r.cr3().modify(|w| { 72 r.cr3().modify(|w| {
65 // enable Error Interrupt: (Frame error, Noise error, Overrun error) 73 // enable Error Interrupt: (Frame error, Noise error, Overrun error)
@@ -72,6 +80,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
72 80
73 /// Stop uart background receive 81 /// Stop uart background receive
74 fn teardown_uart(&mut self) { 82 fn teardown_uart(&mut self) {
83 self.ring_buf.request_stop();
84
75 let r = T::regs(); 85 let r = T::regs();
76 // clear all interrupts and DMA Rx Request 86 // clear all interrupts and DMA Rx Request
77 // SAFETY: only clears Rx related flags 87 // SAFETY: only clears Rx related flags
@@ -93,9 +103,6 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
93 } 103 }
94 104
95 compiler_fence(Ordering::SeqCst); 105 compiler_fence(Ordering::SeqCst);
96
97 self.ring_buf.request_stop();
98 while self.ring_buf.is_running() {}
99 } 106 }
100 107
101 /// Read bytes that are readily available in the ring buffer. 108 /// Read bytes that are readily available in the ring buffer.
@@ -111,96 +118,49 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
111 118
112 // Start background receive if it was not already started 119 // Start background receive if it was not already started
113 // SAFETY: read only 120 // SAFETY: read only
114 let is_started = unsafe { r.cr3().read().dmar() }; 121 match unsafe { r.cr3().read().dmar() } {
115 if !is_started { 122 false => self.start()?,
116 self.start()?; 123 _ => {}
117 } 124 };
118 125
119 // SAFETY: read only and we only use Rx related flags 126 check_for_errors(clear_idle_flag(T::regs()))?;
120 let s = unsafe { sr(r).read() };
121 let has_errors = s.pe() || s.fe() || s.ne() || s.ore();
122 if has_errors {
123 self.teardown_uart();
124
125 if s.pe() {
126 return Err(Error::Parity);
127 } else if s.fe() {
128 return Err(Error::Framing);
129 } else if s.ne() {
130 return Err(Error::Noise);
131 } else {
132 return Err(Error::Overrun);
133 }
134 }
135
136 self.ring_buf.reload_position();
137 match self.ring_buf.read(buf) {
138 Ok(len) if len == 0 => {}
139 Ok(len) => {
140 assert!(len > 0);
141 return Ok(len);
142 }
143 Err(OverrunError) => {
144 // Stop any transfer from now on
145 // The user must re-start to receive any more data
146 self.teardown_uart();
147 return Err(Error::Overrun);
148 }
149 }
150 127
151 loop { 128 loop {
152 self.wait_for_data_or_idle().await?; 129 match self.ring_buf.read(buf) {
130 Ok((0, _)) => {}
131 Ok((len, _)) => {
132 return Ok(len);
133 }
134 Err(_) => {
135 return self.stop(Error::Overrun);
136 }
137 }
153 138
154 self.ring_buf.reload_position(); 139 match self.wait_for_data_or_idle().await {
155 if !self.ring_buf.is_empty() { 140 Ok(_) => {}
156 break; 141 Err(err) => {
142 return self.stop(err);
143 }
157 } 144 }
158 } 145 }
159
160 let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?;
161 assert!(len > 0);
162
163 Ok(len)
164 } 146 }
165 147
166 /// Wait for uart idle or dma half-full or full 148 /// Wait for uart idle or dma half-full or full
167 async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { 149 async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> {
168 let r = T::regs();
169
170 // make sure USART state is restored to neutral state
171 let _on_drop = OnDrop::new(move || {
172 // SAFETY: only clears Rx related flags
173 unsafe {
174 r.cr1().modify(|w| {
175 // disable idle line interrupt
176 w.set_idleie(false);
177 });
178 }
179 });
180
181 // SAFETY: only sets Rx related flags
182 unsafe {
183 r.cr1().modify(|w| {
184 // enable idle line interrupt
185 w.set_idleie(true);
186 });
187 }
188
189 compiler_fence(Ordering::SeqCst); 150 compiler_fence(Ordering::SeqCst);
190 151
152 let mut dma_init = false;
191 // Future which completes when there is dma is half full or full 153 // Future which completes when there is dma is half full or full
192 let dma = poll_fn(|cx| { 154 let dma = poll_fn(|cx| {
193 self.ring_buf.set_waker(cx.waker()); 155 self.ring_buf.set_waker(cx.waker());
194 156
195 compiler_fence(Ordering::SeqCst); 157 let status = match dma_init {
158 false => Poll::Pending,
159 true => Poll::Ready(()),
160 };
196 161
197 self.ring_buf.reload_position(); 162 dma_init = true;
198 if !self.ring_buf.is_empty() { 163 status
199 // Some data is now available
200 Poll::Ready(())
201 } else {
202 Poll::Pending
203 }
204 }); 164 });
205 165
206 // Future which completes when idle line is detected 166 // Future which completes when idle line is detected
@@ -210,28 +170,11 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
210 170
211 compiler_fence(Ordering::SeqCst); 171 compiler_fence(Ordering::SeqCst);
212 172
213 // SAFETY: read only and we only use Rx related flags 173 // Critical section is needed so that IDLE isn't set after
214 let sr = unsafe { sr(r).read() }; 174 // our read but before we clear it.
215 175 let sr = critical_section::with(|_| clear_idle_flag(T::regs()));
216 // SAFETY: only clears Rx related flags
217 unsafe {
218 // This read also clears the error and idle interrupt flags on v1.
219 rdr(r).read_volatile();
220 clear_interrupt_flags(r, sr);
221 }
222 176
223 let has_errors = sr.pe() || sr.fe() || sr.ne() || sr.ore(); 177 check_for_errors(sr)?;
224 if has_errors {
225 if sr.pe() {
226 return Poll::Ready(Err(Error::Parity));
227 } else if sr.fe() {
228 return Poll::Ready(Err(Error::Framing));
229 } else if sr.ne() {
230 return Poll::Ready(Err(Error::Noise));
231 } else {
232 return Poll::Ready(Err(Error::Overrun));
233 }
234 }
235 178
236 if sr.idle() { 179 if sr.idle() {
237 // Idle line is detected 180 // Idle line is detected
@@ -243,11 +186,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
243 186
244 match select(dma, uart).await { 187 match select(dma, uart).await {
245 Either::Left(((), _)) => Ok(()), 188 Either::Left(((), _)) => Ok(()),
246 Either::Right((Ok(()), _)) => Ok(()), 189 Either::Right((result, _)) => result,
247 Either::Right((Err(e), _)) => {
248 self.teardown_uart();
249 Err(e)
250 }
251 } 190 }
252 } 191 }
253} 192}
@@ -257,6 +196,37 @@ impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T
257 self.teardown_uart(); 196 self.teardown_uart();
258 } 197 }
259} 198}
199/// Return an error result if the Sr register has errors
200fn check_for_errors(s: Sr) -> Result<(), Error> {
201 if s.pe() {
202 Err(Error::Parity)
203 } else if s.fe() {
204 Err(Error::Framing)
205 } else if s.ne() {
206 Err(Error::Noise)
207 } else if s.ore() {
208 Err(Error::Overrun)
209 } else {
210 Ok(())
211 }
212}
213
214/// Clear IDLE and return the Sr register
215fn clear_idle_flag(r: Regs) -> Sr {
216 unsafe {
217 // SAFETY: read only and we only use Rx related flags
218
219 let sr = sr(r).read();
220
221 // This read also clears the error and idle interrupt flags on v1.
222 rdr(r).read_volatile();
223 clear_interrupt_flags(r, sr);
224
225 r.cr1().modify(|w| w.set_idleie(true));
226
227 sr
228 }
229}
260 230
261#[cfg(all(feature = "unstable-traits", feature = "nightly"))] 231#[cfg(all(feature = "unstable-traits", feature = "nightly"))]
262mod eio { 232mod eio {