aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--embassy-stm32/src/dma/dma.rs174
-rw-r--r--embassy-stm32/src/dma/mod.rs1
-rw-r--r--embassy-stm32/src/dma/ringbuffer.rs433
-rw-r--r--embassy-stm32/src/usart/mod.rs43
-rw-r--r--embassy-stm32/src/usart/rx_ringbuffered.rs286
-rw-r--r--tests/stm32/Cargo.toml2
-rw-r--r--tests/stm32/src/bin/usart_rx_ringbuffered.rs188
-rw-r--r--tests/utils/Cargo.toml10
-rw-r--r--tests/utils/src/bin/saturate_serial.rs52
9 files changed, 1171 insertions, 18 deletions
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs
index ef1d27573..17d82fe2d 100644
--- a/embassy-stm32/src/dma/dma.rs
+++ b/embassy-stm32/src/dma/dma.rs
@@ -9,6 +9,7 @@ use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
9use embassy_sync::waitqueue::AtomicWaker; 9use embassy_sync::waitqueue::AtomicWaker;
10use pac::dma::regs; 10use pac::dma::regs;
11 11
12use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError};
12use super::word::{Word, WordSize}; 13use super::word::{Word, WordSize};
13use super::Dir; 14use super::Dir;
14use crate::_generated::DMA_CHANNEL_COUNT; 15use crate::_generated::DMA_CHANNEL_COUNT;
@@ -445,7 +446,6 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
445 446
446// ================================== 447// ==================================
447 448
448#[must_use = "futures do nothing unless you `.await` or poll them"]
449pub struct DoubleBuffered<'a, C: Channel, W: Word> { 449pub struct DoubleBuffered<'a, C: Channel, W: Word> {
450 channel: PeripheralRef<'a, C>, 450 channel: PeripheralRef<'a, C>,
451 _phantom: PhantomData<W>, 451 _phantom: PhantomData<W>,
@@ -578,18 +578,184 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
578 let ch = self.channel.regs().st(self.channel.num()); 578 let ch = self.channel.regs().st(self.channel.num());
579 unsafe { ch.ndtr().read() }.ndt() 579 unsafe { ch.ndtr().read() }.ndt()
580 } 580 }
581}
581 582
582 pub fn blocking_wait(mut self) { 583impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
584 fn drop(&mut self) {
585 self.request_stop();
583 while self.is_running() {} 586 while self.is_running() {}
584 587
585 // "Subsequent reads and writes cannot be moved ahead of preceding reads." 588 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
586 fence(Ordering::SeqCst); 589 fence(Ordering::SeqCst);
590 }
591}
587 592
588 core::mem::forget(self); 593// ==============================
594
595impl<C: Channel> DmaCtrl for C {
596 fn tcif(&self) -> bool {
597 let channel_number = self.num();
598 let dma = self.regs();
599 let isrn = channel_number / 4;
600 let isrbit = channel_number % 4;
601
602 unsafe { dma.isr(isrn).read() }.tcif(isrbit)
603 }
604
605 fn clear_tcif(&mut self) {
606 let channel_number = self.num();
607 let dma = self.regs();
608 let isrn = channel_number / 4;
609 let isrbit = channel_number % 4;
610
611 unsafe {
612 dma.ifcr(isrn).write(|w| {
613 w.set_tcif(isrbit, true);
614 })
615 }
616 }
617
618 fn ndtr(&self) -> usize {
619 let ch = self.regs().st(self.num());
620 unsafe { ch.ndtr().read() }.ndt() as usize
589 } 621 }
590} 622}
591 623
592impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> { 624pub struct RingBuffer<'a, C: Channel, W: Word> {
625 cr: regs::Cr,
626 channel: PeripheralRef<'a, C>,
627 ringbuf: DmaRingBuffer<'a, W>,
628}
629
630impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
631 pub unsafe fn new_read(
632 channel: impl Peripheral<P = C> + 'a,
633 _request: Request,
634 peri_addr: *mut W,
635 buffer: &'a mut [W],
636 options: TransferOptions,
637 ) -> Self {
638 into_ref!(channel);
639
640 let len = buffer.len();
641 assert!(len > 0 && len <= 0xFFFF);
642
643 let dir = Dir::PeripheralToMemory;
644 let data_size = W::size();
645
646 let channel_number = channel.num();
647 let dma = channel.regs();
648
649 // "Preceding reads and writes cannot be moved past subsequent writes."
650 fence(Ordering::SeqCst);
651
652 let mut w = regs::Cr(0);
653 w.set_dir(dir.into());
654 w.set_msize(data_size.into());
655 w.set_psize(data_size.into());
656 w.set_pl(vals::Pl::VERYHIGH);
657 w.set_minc(vals::Inc::INCREMENTED);
658 w.set_pinc(vals::Inc::FIXED);
659 w.set_teie(true);
660 w.set_tcie(false);
661 w.set_circ(vals::Circ::ENABLED);
662 #[cfg(dma_v1)]
663 w.set_trbuff(true);
664 #[cfg(dma_v2)]
665 w.set_chsel(_request);
666 w.set_pburst(options.pburst.into());
667 w.set_mburst(options.mburst.into());
668 w.set_pfctrl(options.flow_ctrl.into());
669 w.set_en(true);
670
671 let buffer_ptr = buffer.as_mut_ptr();
672 let mut this = Self {
673 channel,
674 cr: w,
675 ringbuf: DmaRingBuffer::new(buffer),
676 };
677 this.clear_irqs();
678
679 #[cfg(dmamux)]
680 super::dmamux::configure_dmamux(&mut *this.channel, _request);
681
682 let ch = dma.st(channel_number);
683 ch.par().write_value(peri_addr as u32);
684 ch.m0ar().write_value(buffer_ptr as u32);
685 ch.ndtr().write_value(regs::Ndtr(len as _));
686 ch.fcr().write(|w| {
687 if let Some(fth) = options.fifo_threshold {
688 // FIFO mode
689 w.set_dmdis(vals::Dmdis::DISABLED);
690 w.set_fth(fth.into());
691 } else {
692 // Direct mode
693 w.set_dmdis(vals::Dmdis::ENABLED);
694 }
695 });
696
697 this
698 }
699
700 pub fn start(&mut self) {
701 let ch = self.channel.regs().st(self.channel.num());
702 unsafe { ch.cr().write_value(self.cr) }
703 }
704
705 pub fn clear(&mut self) {
706 self.ringbuf.clear();
707 }
708
709 /// Read bytes from the ring buffer
710 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
711 pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> {
712 self.ringbuf.read(&mut *self.channel, buf)
713 }
714
715 fn clear_irqs(&mut self) {
716 let channel_number = self.channel.num();
717 let dma = self.channel.regs();
718 let isrn = channel_number / 4;
719 let isrbit = channel_number % 4;
720
721 unsafe {
722 dma.ifcr(isrn).write(|w| {
723 w.set_tcif(isrbit, true);
724 w.set_teif(isrbit, true);
725 })
726 }
727 }
728
729 pub fn request_stop(&mut self) {
730 let ch = self.channel.regs().st(self.channel.num());
731
732 // Disable the channel. Keep the IEs enabled so the irqs still fire.
733 unsafe {
734 ch.cr().write(|w| {
735 w.set_teie(true);
736 w.set_tcie(true);
737 })
738 }
739 }
740
741 pub fn is_running(&mut self) -> bool {
742 let ch = self.channel.regs().st(self.channel.num());
743 unsafe { ch.cr().read() }.en()
744 }
745
746 /// Gets the total remaining transfers for the channel
747 /// Note: this will be zero for transfers that completed without cancellation.
748 pub fn get_remaining_transfers(&self) -> usize {
749 let ch = self.channel.regs().st(self.channel.num());
750 unsafe { ch.ndtr().read() }.ndt() as usize
751 }
752
753 pub fn set_ndtr(&mut self, ndtr: usize) {
754 self.ringbuf.ndtr = ndtr;
755 }
756}
757
758impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
593 fn drop(&mut self) { 759 fn drop(&mut self) {
594 self.request_stop(); 760 self.request_stop();
595 while self.is_running() {} 761 while self.is_running() {}
diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs
index 3312ca752..3ac0d1b3d 100644
--- a/embassy-stm32/src/dma/mod.rs
+++ b/embassy-stm32/src/dma/mod.rs
@@ -21,6 +21,7 @@ pub use gpdma::*;
21#[cfg(dmamux)] 21#[cfg(dmamux)]
22mod dmamux; 22mod dmamux;
23 23
24pub(crate) mod ringbuffer;
24pub mod word; 25pub mod word;
25 26
26use core::mem; 27use core::mem;
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs
new file mode 100644
index 000000000..f9ace6018
--- /dev/null
+++ b/embassy-stm32/src/dma/ringbuffer.rs
@@ -0,0 +1,433 @@
1use core::ops::Range;
2use core::sync::atomic::{compiler_fence, Ordering};
3
4use super::word::Word;
5
6/// A "read-only" ring-buffer to be used together with the DMA controller which
7/// writes in a circular way, "uncontrolled" to the buffer.
8///
9/// A snapshot of the ring buffer state can be attained by setting the `ndtr` field
10/// to the current register value. `ndtr` describes the current position of the DMA
11/// write.
12///
13/// # Safety
14///
15/// The ring buffer controls the TCIF (transfer completed interrupt flag) to
16/// detect buffer overruns, hence this interrupt must be disabled.
17/// The buffer can detect overruns up to one period, that is, for a X byte buffer,
18/// overruns can be detected if they happen from byte X+1 up to 2X. After this
19/// point, overrunds may or may not be detected.
20///
21/// # Buffer layout
22///
23/// ```text
24/// Without wraparound: With wraparound:
25///
26/// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+
27/// | | | | | |
28/// v v v v v v
29/// +-----------------------------------------+ +-----------------------------------------+
30/// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX|
31/// +-----------------------------------------+ +-----------------------------------------+
32/// ^ ^ ^ ^ ^ ^
33/// | | | | | |
34/// +- first --+ | +- end ------+ |
35/// | | | |
36/// +- end --------------------+ +- first ----------------+
37/// ```
38pub struct DmaRingBuffer<'a, W: Word> {
39 pub(crate) dma_buf: &'a mut [W],
40 first: usize,
41 pub ndtr: usize,
42 expect_next_read_to_wrap: bool,
43}
44
45#[derive(Debug, PartialEq)]
46pub struct OverrunError;
47
48pub trait DmaCtrl {
49 /// Get the NDTR register value, i.e. the space left in the underlying
50 /// buffer until the dma writer wraps.
51 fn ndtr(&self) -> usize;
52
53 /// Read the transfer completed interrupt flag
54 /// This flag is set by the dma controller when NDTR is reloaded,
55 /// i.e. when the writing wraps.
56 fn tcif(&self) -> bool;
57
58 /// Clear the transfer completed interrupt flag
59 fn clear_tcif(&mut self);
60}
61
62impl<'a, W: Word> DmaRingBuffer<'a, W> {
63 pub fn new(dma_buf: &'a mut [W]) -> Self {
64 let ndtr = dma_buf.len();
65 Self {
66 dma_buf,
67 first: 0,
68 ndtr,
69 expect_next_read_to_wrap: false,
70 }
71 }
72
73 /// Reset the ring buffer to its initial state
74 pub fn clear(&mut self) {
75 self.first = 0;
76 self.ndtr = self.dma_buf.len();
77 self.expect_next_read_to_wrap = false;
78 }
79
80 /// The buffer end position
81 fn end(&self) -> usize {
82 self.dma_buf.len() - self.ndtr
83 }
84
85 /// Returns whether the buffer is empty
86 #[allow(dead_code)]
87 pub fn is_empty(&self) -> bool {
88 self.first == self.end()
89 }
90
91 /// The current number of bytes in the buffer
92 /// This may change at any time if dma is currently active
93 #[allow(dead_code)]
94 pub fn len(&self) -> usize {
95 // Read out a stable end (the dma periheral can change it at anytime)
96 let end = self.end();
97 if self.first <= end {
98 // No wrap
99 end - self.first
100 } else {
101 self.dma_buf.len() - self.first + end
102 }
103 }
104
105 /// Read bytes from the ring buffer
106 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
107 pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<usize, OverrunError> {
108 let end = self.end();
109
110 compiler_fence(Ordering::SeqCst);
111
112 if self.first == end {
113 // The buffer is currently empty
114
115 if dma.tcif() {
116 // The dma controller has written such that the ring buffer now wraps
117 // This is the special case where exactly n*dma_buf.len(), n = 1,2,..., bytes was written,
118 // but where additional bytes are now written causing the ring buffer to wrap.
119 // This is only an error if the writing has passed the current unread region.
120 self.ndtr = dma.ndtr();
121 if self.end() > self.first {
122 dma.clear_tcif();
123 return Err(OverrunError);
124 }
125 }
126
127 self.expect_next_read_to_wrap = false;
128 Ok(0)
129 } else if self.first < end {
130 // The available, unread portion in the ring buffer DOES NOT wrap
131
132 if self.expect_next_read_to_wrap {
133 // The read was expected to wrap but it did not
134
135 dma.clear_tcif();
136 return Err(OverrunError);
137 }
138
139 // Copy out the bytes from the dma buffer
140 let len = self.copy_to(buf, self.first..end);
141
142 compiler_fence(Ordering::SeqCst);
143
144 if dma.tcif() {
145 // The dma controller has written such that the ring buffer now wraps
146
147 self.ndtr = dma.ndtr();
148 if self.end() > self.first {
149 // The bytes that we have copied out have overflowed
150 // as the writer has now both wrapped and is currently writing
151 // within the region that we have just copied out
152
153 // Clear transfer completed interrupt flag
154 dma.clear_tcif();
155 return Err(OverrunError);
156 }
157 }
158
159 self.first = (self.first + len) % self.dma_buf.len();
160 self.expect_next_read_to_wrap = false;
161 Ok(len)
162 } else {
163 // The available, unread portion in the ring buffer DOES wrap
164 // The dma controller has wrapped since we last read and is currently
165 // writing (or the next byte added will be) in the beginning of the ring buffer.
166
167 // If the unread portion wraps then the writer must also have wrapped,
168 // or it has wrapped and we already cleared the TCIF flag
169 assert!(dma.tcif() || self.expect_next_read_to_wrap);
170
171 // Clear transfer completed interrupt flag
172 dma.clear_tcif();
173
174 if self.first + buf.len() < self.dma_buf.len() {
175 // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
176
177 // Copy out from the dma buffer
178 let len = self.copy_to(buf, self.first..self.dma_buf.len());
179
180 compiler_fence(Ordering::SeqCst);
181
182 // We have now copied out the data from dma_buf
183 // Make sure that the just read part was not overwritten during the copy
184 self.ndtr = dma.ndtr();
185 if self.end() > self.first || dma.tcif() {
186 // The writer has entered the data that we have just read since we read out `end` in the beginning and until now.
187 return Err(OverrunError);
188 }
189
190 self.first = (self.first + len) % self.dma_buf.len();
191 self.expect_next_read_to_wrap = true;
192 Ok(len)
193 } else {
194 // The provided read buffer is large enough to include all bytes from the tail of the dma buffer,
195 // so the next read will not have any unread tail bytes in the ring buffer.
196
197 // Copy out from the dma buffer
198 let tail = self.copy_to(buf, self.first..self.dma_buf.len());
199 let head = self.copy_to(&mut buf[tail..], 0..end);
200
201 compiler_fence(Ordering::SeqCst);
202
203 // We have now copied out the data from dma_buf
204 // Make sure that the just read part was not overwritten during the copy
205 self.ndtr = dma.ndtr();
206 if self.end() > self.first || dma.tcif() {
207 return Err(OverrunError);
208 }
209
210 self.first = head;
211 self.expect_next_read_to_wrap = false;
212 Ok(tail + head)
213 }
214 }
215 }
216
217 /// Copy from the dma buffer at `data_range` into `buf`
218 fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize {
219 // Limit the number of bytes that can be copied
220 let length = usize::min(data_range.len(), buf.len());
221
222 // Copy from dma buffer into read buffer
223 // We need to do it like this instead of a simple copy_from_slice() because
224 // reading from a part of memory that may be simultaneously written to is unsafe
225 unsafe {
226 let dma_buf = self.dma_buf.as_ptr();
227
228 for i in 0..length {
229 buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize));
230 }
231 }
232
233 length
234 }
235}
236
237#[cfg(test)]
238mod tests {
239 use core::array;
240 use core::cell::RefCell;
241
242 use super::*;
243
244 struct TestCtrl {
245 next_ndtr: RefCell<Option<usize>>,
246 tcif: bool,
247 }
248
249 impl TestCtrl {
250 pub const fn new() -> Self {
251 Self {
252 next_ndtr: RefCell::new(None),
253 tcif: false,
254 }
255 }
256
257 pub fn set_next_ndtr(&mut self, ndtr: usize) {
258 self.next_ndtr.borrow_mut().replace(ndtr);
259 }
260 }
261
262 impl DmaCtrl for TestCtrl {
263 fn ndtr(&self) -> usize {
264 self.next_ndtr.borrow_mut().unwrap()
265 }
266
267 fn tcif(&self) -> bool {
268 self.tcif
269 }
270
271 fn clear_tcif(&mut self) {
272 self.tcif = false;
273 }
274 }
275
276 #[test]
277 fn empty() {
278 let mut dma_buf = [0u8; 16];
279 let ringbuf = DmaRingBuffer::new(&mut dma_buf);
280
281 assert!(ringbuf.is_empty());
282 assert_eq!(0, ringbuf.len());
283 }
284
285 #[test]
286 fn can_read() {
287 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
288 let mut ctrl = TestCtrl::new();
289 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
290 ringbuf.ndtr = 6;
291
292 assert!(!ringbuf.is_empty());
293 assert_eq!(10, ringbuf.len());
294
295 let mut buf = [0; 2];
296 assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
297 assert_eq!([0, 1], buf);
298 assert_eq!(8, ringbuf.len());
299
300 let mut buf = [0; 2];
301 assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
302 assert_eq!([2, 3], buf);
303 assert_eq!(6, ringbuf.len());
304
305 let mut buf = [0; 8];
306 assert_eq!(6, ringbuf.read(&mut ctrl, &mut buf).unwrap());
307 assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]);
308 assert_eq!(0, ringbuf.len());
309
310 let mut buf = [0; 2];
311 assert_eq!(0, ringbuf.read(&mut ctrl, &mut buf).unwrap());
312 }
313
314 #[test]
315 fn can_read_with_wrap() {
316 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
317 let mut ctrl = TestCtrl::new();
318 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
319 ringbuf.first = 12;
320 ringbuf.ndtr = 10;
321
322 // The dma controller has written 4 + 6 bytes and has reloaded NDTR
323 ctrl.tcif = true;
324 ctrl.set_next_ndtr(10);
325
326 assert!(!ringbuf.is_empty());
327 assert_eq!(6 + 4, ringbuf.len());
328
329 let mut buf = [0; 2];
330 assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
331 assert_eq!([12, 13], buf);
332 assert_eq!(6 + 2, ringbuf.len());
333
334 let mut buf = [0; 4];
335 assert_eq!(4, ringbuf.read(&mut ctrl, &mut buf).unwrap());
336 assert_eq!([14, 15, 0, 1], buf);
337 assert_eq!(4, ringbuf.len());
338 }
339
340 #[test]
341 fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() {
342 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
343 let mut ctrl = TestCtrl::new();
344 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
345 ringbuf.first = 2;
346 ringbuf.ndtr = 6;
347
348 // The dma controller has written 6 + 2 bytes and has reloaded NDTR
349 ctrl.tcif = true;
350 ctrl.set_next_ndtr(14);
351
352 let mut buf = [0; 2];
353 assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
354 assert_eq!([2, 3], buf);
355
356 assert_eq!(true, ctrl.tcif); // The interrupt flag IS NOT cleared
357 }
358
359 #[test]
360 fn can_read_when_dma_writer_is_wrapped_and_read_wraps() {
361 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
362 let mut ctrl = TestCtrl::new();
363 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
364 ringbuf.first = 12;
365 ringbuf.ndtr = 10;
366
367 // The dma controller has written 6 + 2 bytes and has reloaded NDTR
368 ctrl.tcif = true;
369 ctrl.set_next_ndtr(14);
370
371 let mut buf = [0; 10];
372 assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap());
373 assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf);
374
375 assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared
376 }
377
378 #[test]
379 fn cannot_read_when_dma_writer_wraps_with_same_ndtr() {
380 let mut dma_buf = [0u8; 16];
381 let mut ctrl = TestCtrl::new();
382 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
383 ringbuf.first = 6;
384 ringbuf.ndtr = 10;
385 ctrl.set_next_ndtr(9);
386
387 assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty
388
389 // The dma controller has written exactly 16 bytes
390 ctrl.tcif = true;
391
392 let mut buf = [0; 2];
393 assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
394
395 assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared
396 }
397
398 #[test]
399 fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() {
400 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
401 let mut ctrl = TestCtrl::new();
402 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
403 ringbuf.first = 2;
404 ringbuf.ndtr = 6;
405
406 // The dma controller has written 6 + 3 bytes and has reloaded NDTR
407 ctrl.tcif = true;
408 ctrl.set_next_ndtr(13);
409
410 let mut buf = [0; 2];
411 assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
412
413 assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared
414 }
415
416 #[test]
417 fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() {
418 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
419 let mut ctrl = TestCtrl::new();
420 let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
421 ringbuf.first = 12;
422 ringbuf.ndtr = 10;
423
424 // The dma controller has written 6 + 13 bytes and has reloaded NDTR
425 ctrl.tcif = true;
426 ctrl.set_next_ndtr(3);
427
428 let mut buf = [0; 2];
429 assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
430
431 assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared
432 }
433}
diff --git a/embassy-stm32/src/usart/mod.rs b/embassy-stm32/src/usart/mod.rs
index 266561659..fea0c5f11 100644
--- a/embassy-stm32/src/usart/mod.rs
+++ b/embassy-stm32/src/usart/mod.rs
@@ -283,8 +283,8 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
283 283
284 let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) }; 284 let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) };
285 285
286 let mut wake = false;
286 let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie()); 287 let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie());
287
288 if has_errors { 288 if has_errors {
289 // clear all interrupts and DMA Rx Request 289 // clear all interrupts and DMA Rx Request
290 unsafe { 290 unsafe {
@@ -304,22 +304,35 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
304 }); 304 });
305 } 305 }
306 306
307 compiler_fence(Ordering::SeqCst); 307 wake = true;
308 } else {
309 if cr1.idleie() && sr.idle() {
310 // IDLE detected: no more data will come
311 unsafe {
312 r.cr1().modify(|w| {
313 // disable idle line detection
314 w.set_idleie(false);
315 });
316
317 r.cr3().modify(|w| {
318 // disable DMA Rx Request
319 w.set_dmar(false);
320 });
321 }
308 322
309 s.rx_waker.wake(); 323 wake = true;
310 } else if cr1.idleie() && sr.idle() { 324 }
311 // IDLE detected: no more data will come
312 unsafe {
313 r.cr1().modify(|w| {
314 // disable idle line detection
315 w.set_idleie(false);
316 });
317 325
318 r.cr3().modify(|w| { 326 if cr1.rxneie() {
319 // disable DMA Rx Request 327 // We cannot check the RXNE flag as it is auto-cleared by the DMA controller
320 w.set_dmar(false); 328
321 }); 329 // It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection
330
331 wake = true;
322 } 332 }
333 }
334
335 if wake {
323 compiler_fence(Ordering::SeqCst); 336 compiler_fence(Ordering::SeqCst);
324 337
325 s.rx_waker.wake(); 338 s.rx_waker.wake();
@@ -972,6 +985,8 @@ mod eio {
972pub use buffered::*; 985pub use buffered::*;
973#[cfg(feature = "nightly")] 986#[cfg(feature = "nightly")]
974mod buffered; 987mod buffered;
988mod rx_ringbuffered;
989pub use rx_ringbuffered::RingBufferedUartRx;
975 990
976#[cfg(usart_v1)] 991#[cfg(usart_v1)]
977fn tdr(r: crate::pac::usart::Usart) -> *mut u8 { 992fn tdr(r: crate::pac::usart::Usart) -> *mut u8 {
diff --git a/embassy-stm32/src/usart/rx_ringbuffered.rs b/embassy-stm32/src/usart/rx_ringbuffered.rs
new file mode 100644
index 000000000..0dc90ece7
--- /dev/null
+++ b/embassy-stm32/src/usart/rx_ringbuffered.rs
@@ -0,0 +1,286 @@
1use core::future::poll_fn;
2use core::sync::atomic::{compiler_fence, Ordering};
3use core::task::Poll;
4
5use embassy_hal_common::drop::OnDrop;
6use embassy_hal_common::PeripheralRef;
7
8use super::{rdr, sr, BasicInstance, Error, UartRx};
9use crate::dma::ringbuffer::OverrunError;
10use crate::dma::RingBuffer;
11
12pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> {
13 _peri: PeripheralRef<'d, T>,
14 ring_buf: RingBuffer<'d, RxDma, u8>,
15}
16
17impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
18 /// Turn the `UartRx` into a buffered uart which can continously receive in the background
19 /// without the possibility of loosing bytes. The `dma_buf` is a buffer registered to the
20 /// DMA controller, and must be sufficiently large, such that it will not overflow.
21 pub fn into_ring_buffered(self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d, T, RxDma> {
22 assert!(dma_buf.len() > 0 && dma_buf.len() <= 0xFFFF);
23
24 let request = self.rx_dma.request();
25 let opts = Default::default();
26 let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) };
27 RingBufferedUartRx {
28 _peri: self._peri,
29 ring_buf,
30 }
31 }
32}
33
34impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxDma> {
35 pub fn start(&mut self) -> Result<(), Error> {
36 // Clear the ring buffer so that it is ready to receive data
37 self.ring_buf.clear();
38
39 self.setup_uart();
40
41 Ok(())
42 }
43
44 /// Start uart background receive
45 fn setup_uart(&mut self) {
46 // fence before starting DMA.
47 compiler_fence(Ordering::SeqCst);
48
49 self.ring_buf.start();
50
51 let r = T::regs();
52 // clear all interrupts and DMA Rx Request
53 // SAFETY: only clears Rx related flags
54 unsafe {
55 r.cr1().modify(|w| {
56 // disable RXNE interrupt
57 w.set_rxneie(false);
58 // enable parity interrupt if not ParityNone
59 w.set_peie(w.pce());
60 // disable idle line interrupt
61 w.set_idleie(false);
62 });
63 r.cr3().modify(|w| {
64 // enable Error Interrupt: (Frame error, Noise error, Overrun error)
65 w.set_eie(true);
66 // enable DMA Rx Request
67 w.set_dmar(true);
68 });
69 }
70 }
71
72 /// Stop uart background receive
73 fn teardown_uart(&mut self) {
74 let r = T::regs();
75 // clear all interrupts and DMA Rx Request
76 // SAFETY: only clears Rx related flags
77 unsafe {
78 r.cr1().modify(|w| {
79 // disable RXNE interrupt
80 w.set_rxneie(false);
81 // disable parity interrupt
82 w.set_peie(false);
83 // disable idle line interrupt
84 w.set_idleie(false);
85 });
86 r.cr3().modify(|w| {
87 // disable Error Interrupt: (Frame error, Noise error, Overrun error)
88 w.set_eie(false);
89 // disable DMA Rx Request
90 w.set_dmar(false);
91 });
92 }
93
94 compiler_fence(Ordering::SeqCst);
95
96 self.ring_buf.request_stop();
97 while self.ring_buf.is_running() {}
98 }
99
100 /// Read bytes that are readily available in the ring buffer.
101 /// If no bytes are currently available in the buffer the call waits until data are received.
102 ///
103 /// Background receive is started if `start()` has not been previously called.
104 ///
105 /// Receive in the background is terminated if an error is returned.
106 /// It must then manually be started again by calling `start()` or by re-calling `read()`.
107 pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
108 let r = T::regs();
109
110 // SAFETY: read only
111 let is_started = unsafe { r.cr3().read().dmar() };
112
113 // Start background receive if it was not already started
114 if !is_started {
115 self.start()?;
116 }
117
118 // SAFETY: read only and we only use Rx related flags
119 let s = unsafe { sr(r).read() };
120 let has_errors = s.pe() || s.fe() || s.ne() || s.ore();
121 if has_errors {
122 self.teardown_uart();
123
124 if s.pe() {
125 return Err(Error::Parity);
126 } else if s.fe() {
127 return Err(Error::Framing);
128 } else if s.ne() {
129 return Err(Error::Noise);
130 } else {
131 return Err(Error::Overrun);
132 }
133 }
134
135 let ndtr = self.ring_buf.get_remaining_transfers();
136 self.ring_buf.set_ndtr(ndtr);
137 match self.ring_buf.read(buf) {
138 Ok(len) if len == 0 => {}
139 Ok(len) => {
140 assert!(len > 0);
141 return Ok(len);
142 }
143 Err(OverrunError) => {
144 // Stop any transfer from now on
145 // The user must re-start to receive any more data
146 self.teardown_uart();
147 return Err(Error::Overrun);
148 }
149 }
150
151 // Wait for any data since `ndtr`
152 self.wait_for_data(ndtr).await?;
153
154 // ndtr is now different than the value provided to `wait_for_data()`
155 // Re-sample ndtr now when it has changed.
156 self.ring_buf.set_ndtr(self.ring_buf.get_remaining_transfers());
157 let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?;
158 assert!(len > 0);
159 Ok(len)
160 }
161
162 /// Wait for uart data
163 async fn wait_for_data(&mut self, old_ndtr: usize) -> Result<(), Error> {
164 let r = T::regs();
165
166 // make sure USART state is restored to neutral state when this future is dropped
167 let _drop = OnDrop::new(move || {
168 // SAFETY: only clears Rx related flags
169 unsafe {
170 r.cr1().modify(|w| {
171 // disable RXNE interrupt
172 w.set_rxneie(false);
173 });
174 }
175 });
176
177 // SAFETY: only sets Rx related flags
178 unsafe {
179 r.cr1().modify(|w| {
180 // enable RXNE interrupt
181 w.set_rxneie(true);
182 });
183 }
184
185 // future which completes when RX "not empty" is detected,
186 // i.e. when there is data in uart rx register
187 let rxne = poll_fn(|cx| {
188 let s = T::state();
189
190 // Register waker to be awaken when RXNE interrupt is received
191 s.rx_waker.register(cx.waker());
192
193 compiler_fence(Ordering::SeqCst);
194
195 // SAFETY: read only and we only use Rx related flags
196 let s = unsafe { sr(r).read() };
197 let has_errors = s.pe() || s.fe() || s.ne() || s.ore();
198 if has_errors {
199 if s.pe() {
200 return Poll::Ready(Err(Error::Parity));
201 } else if s.fe() {
202 return Poll::Ready(Err(Error::Framing));
203 } else if s.ne() {
204 return Poll::Ready(Err(Error::Noise));
205 } else {
206 return Poll::Ready(Err(Error::Overrun));
207 }
208 }
209
210 // Re-sample ndtr and determine if it has changed since we started
211 // waiting for data.
212 let new_ndtr = self.ring_buf.get_remaining_transfers();
213 if new_ndtr != old_ndtr {
214 // Some data was received as NDTR has changed
215 Poll::Ready(Ok(()))
216 } else {
217 // It may be that the DMA controller is currently busy consuming the
218 // RX data register. We therefore wait register to become empty.
219 while unsafe { sr(r).read().rxne() } {}
220
221 compiler_fence(Ordering::SeqCst);
222
223 // Re-get again: This time we know that the DMA controller has consumed
224 // the current read register if it was busy doing so
225 let new_ndtr = self.ring_buf.get_remaining_transfers();
226 if new_ndtr != old_ndtr {
227 // Some data was received as NDTR has changed
228 Poll::Ready(Ok(()))
229 } else {
230 Poll::Pending
231 }
232 }
233 });
234
235 compiler_fence(Ordering::SeqCst);
236
237 let new_ndtr = self.ring_buf.get_remaining_transfers();
238 if new_ndtr != old_ndtr {
239 // Fast path - NDTR has already changed, no reason to poll
240 Ok(())
241 } else {
242 // NDTR has not changed since we first read from the ring buffer
243 // Wait for RXNE interrupt...
244 match rxne.await {
245 Ok(()) => Ok(()),
246 Err(e) => {
247 self.teardown_uart();
248 Err(e)
249 }
250 }
251 }
252 }
253}
254
255impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T, RxDma> {
256 fn drop(&mut self) {
257 self.teardown_uart();
258 }
259}
260
261#[cfg(all(feature = "unstable-traits", feature = "nightly"))]
262mod eio {
263 use embedded_io::asynch::Read;
264 use embedded_io::Io;
265
266 use super::RingBufferedUartRx;
267 use crate::usart::{BasicInstance, Error, RxDma};
268
269 impl<T, Rx> Io for RingBufferedUartRx<'_, T, Rx>
270 where
271 T: BasicInstance,
272 Rx: RxDma<T>,
273 {
274 type Error = Error;
275 }
276
277 impl<T, Rx> Read for RingBufferedUartRx<'_, T, Rx>
278 where
279 T: BasicInstance,
280 Rx: RxDma<T>,
281 {
282 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
283 self.read(buf).await
284 }
285 }
286}
diff --git a/tests/stm32/Cargo.toml b/tests/stm32/Cargo.toml
index d10d01e29..240fad522 100644
--- a/tests/stm32/Cargo.toml
+++ b/tests/stm32/Cargo.toml
@@ -33,6 +33,8 @@ embedded-hal = "0.2.6"
33embedded-hal-1 = { package = "embedded-hal", version = "=1.0.0-alpha.10" } 33embedded-hal-1 = { package = "embedded-hal", version = "=1.0.0-alpha.10" }
34embedded-hal-async = { version = "=0.2.0-alpha.1" } 34embedded-hal-async = { version = "=0.2.0-alpha.1" }
35panic-probe = { version = "0.3.0", features = ["print-defmt"] } 35panic-probe = { version = "0.3.0", features = ["print-defmt"] }
36rand_core = { version = "0.6", default-features = false }
37rand_chacha = { version = "0.3", default-features = false }
36 38
37chrono = { version = "^0.4", default-features = false, optional = true} 39chrono = { version = "^0.4", default-features = false, optional = true}
38 40
diff --git a/tests/stm32/src/bin/usart_rx_ringbuffered.rs b/tests/stm32/src/bin/usart_rx_ringbuffered.rs
new file mode 100644
index 000000000..3ea8bfb7b
--- /dev/null
+++ b/tests/stm32/src/bin/usart_rx_ringbuffered.rs
@@ -0,0 +1,188 @@
1#![no_std]
2#![no_main]
3#![feature(type_alias_impl_trait)]
4
5#[path = "../example_common.rs"]
6mod example_common;
7use embassy_executor::Spawner;
8use embassy_stm32::interrupt;
9use embassy_stm32::usart::{Config, DataBits, Parity, RingBufferedUartRx, StopBits, Uart, UartTx};
10use embassy_time::{Duration, Timer};
11use example_common::*;
12use rand_chacha::ChaCha8Rng;
13use rand_core::{RngCore, SeedableRng};
14
15#[cfg(feature = "stm32f103c8")]
16mod board {
17 pub type Uart = embassy_stm32::peripherals::USART1;
18 pub type TxDma = embassy_stm32::peripherals::DMA1_CH4;
19 pub type RxDma = embassy_stm32::peripherals::DMA1_CH5;
20}
21#[cfg(feature = "stm32g491re")]
22mod board {
23 pub type Uart = embassy_stm32::peripherals::USART1;
24 pub type TxDma = embassy_stm32::peripherals::DMA1_CH1;
25 pub type RxDma = embassy_stm32::peripherals::DMA1_CH2;
26}
27#[cfg(feature = "stm32g071rb")]
28mod board {
29 pub type Uart = embassy_stm32::peripherals::USART1;
30 pub type TxDma = embassy_stm32::peripherals::DMA1_CH1;
31 pub type RxDma = embassy_stm32::peripherals::DMA1_CH2;
32}
33#[cfg(feature = "stm32f429zi")]
34mod board {
35 pub type Uart = embassy_stm32::peripherals::USART2;
36 pub type TxDma = embassy_stm32::peripherals::DMA1_CH6;
37 pub type RxDma = embassy_stm32::peripherals::DMA1_CH5;
38}
39#[cfg(feature = "stm32wb55rg")]
40mod board {
41 pub type Uart = embassy_stm32::peripherals::LPUART1;
42 pub type TxDma = embassy_stm32::peripherals::DMA1_CH1;
43 pub type RxDma = embassy_stm32::peripherals::DMA1_CH2;
44}
45#[cfg(feature = "stm32h755zi")]
46mod board {
47 pub type Uart = embassy_stm32::peripherals::USART1;
48 pub type TxDma = embassy_stm32::peripherals::DMA1_CH0;
49 pub type RxDma = embassy_stm32::peripherals::DMA1_CH1;
50}
51#[cfg(feature = "stm32u585ai")]
52mod board {
53 pub type Uart = embassy_stm32::peripherals::USART3;
54 pub type TxDma = embassy_stm32::peripherals::GPDMA1_CH0;
55 pub type RxDma = embassy_stm32::peripherals::GPDMA1_CH1;
56}
57
58const ONE_BYTE_DURATION_US: u32 = 9_000_000 / 115200;
59
60#[embassy_executor::main]
61async fn main(spawner: Spawner) {
62 let p = embassy_stm32::init(config());
63 info!("Hello World!");
64
65 // Arduino pins D0 and D1
66 // They're connected together with a 1K resistor.
67 #[cfg(feature = "stm32f103c8")]
68 let (tx, rx, usart, irq, tx_dma, rx_dma) = (
69 p.PA9,
70 p.PA10,
71 p.USART1,
72 interrupt::take!(USART1),
73 p.DMA1_CH4,
74 p.DMA1_CH5,
75 );
76 #[cfg(feature = "stm32g491re")]
77 let (tx, rx, usart, irq, tx_dma, rx_dma) =
78 (p.PC4, p.PC5, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2);
79 #[cfg(feature = "stm32g071rb")]
80 let (tx, rx, usart, irq, tx_dma, rx_dma) =
81 (p.PC4, p.PC5, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2);
82 #[cfg(feature = "stm32f429zi")]
83 let (tx, rx, usart, irq, tx_dma, rx_dma) =
84 (p.PA2, p.PA3, p.USART2, interrupt::take!(USART2), p.DMA1_CH6, p.DMA1_CH5);
85 #[cfg(feature = "stm32wb55rg")]
86 let (tx, rx, usart, irq, tx_dma, rx_dma) = (
87 p.PA2,
88 p.PA3,
89 p.LPUART1,
90 interrupt::take!(LPUART1),
91 p.DMA1_CH1,
92 p.DMA1_CH2,
93 );
94 #[cfg(feature = "stm32h755zi")]
95 let (tx, rx, usart, irq, tx_dma, rx_dma) =
96 (p.PB6, p.PB7, p.USART1, interrupt::take!(USART1), p.DMA1_CH0, p.DMA1_CH1);
97 #[cfg(feature = "stm32u585ai")]
98 let (tx, rx, usart, irq, tx_dma, rx_dma) = (
99 p.PD8,
100 p.PD9,
101 p.USART3,
102 interrupt::take!(USART3),
103 p.GPDMA1_CH0,
104 p.GPDMA1_CH1,
105 );
106
107 // To run this test, use the saturating_serial test utility to saturate the serial port
108
109 let mut config = Config::default();
110 config.baudrate = 115200;
111 config.data_bits = DataBits::DataBits8;
112 config.stop_bits = StopBits::STOP1;
113 config.parity = Parity::ParityNone;
114
115 let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config);
116 let (tx, rx) = usart.split();
117 static mut DMA_BUF: [u8; 64] = [0; 64];
118 let dma_buf = unsafe { DMA_BUF.as_mut() };
119 let rx = rx.into_ring_buffered(dma_buf);
120
121 info!("Spawning tasks");
122 spawner.spawn(transmit_task(tx)).unwrap();
123 spawner.spawn(receive_task(rx)).unwrap();
124}
125
126#[embassy_executor::task]
127async fn transmit_task(mut tx: UartTx<'static, board::Uart, board::TxDma>) {
128 let mut rng = ChaCha8Rng::seed_from_u64(1337);
129
130 info!("Starting random transmissions into void...");
131
132 let mut i: u8 = 0;
133 loop {
134 let mut buf = [0; 32];
135 let len = 1 + (rng.next_u32() as usize % (buf.len() - 1));
136 for b in &mut buf[..len] {
137 *b = i;
138 i = i.wrapping_add(1);
139 }
140
141 tx.write(&buf[..len]).await.unwrap();
142 Timer::after(Duration::from_micros((rng.next_u32() % 10000) as _)).await;
143
144 //i += 1;
145 //if i % 1000 == 0 {
146 // trace!("Wrote {} times", i);
147 //}
148 }
149}
150
151#[embassy_executor::task]
152async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::RxDma>) {
153 info!("Ready to receive...");
154
155 let mut rng = ChaCha8Rng::seed_from_u64(1337);
156
157 let mut i = 0;
158 let mut expected: Option<u8> = None;
159 loop {
160 let mut buf = [0; 100];
161 let max_len = 1 + (rng.next_u32() as usize % (buf.len() - 1));
162 let received = rx.read(&mut buf[..max_len]).await.unwrap();
163
164 if expected.is_none() {
165 info!("Test started");
166 expected = Some(buf[0]);
167 }
168
169 for byte in &buf[..received] {
170 if byte != &expected.unwrap() {
171 error!("Test fail! received {}, expected {}", *byte, expected.unwrap());
172 cortex_m::asm::bkpt();
173 return;
174 }
175 expected = Some(expected.unwrap().wrapping_add(1));
176 }
177
178 if received < max_len {
179 let byte_count = rng.next_u32() % 64;
180 Timer::after(Duration::from_micros((byte_count * ONE_BYTE_DURATION_US) as _)).await;
181 }
182
183 i += 1;
184 if i % 1000 == 0 {
185 trace!("Read {} times", i);
186 }
187 }
188}
diff --git a/tests/utils/Cargo.toml b/tests/utils/Cargo.toml
new file mode 100644
index 000000000..7d66fd586
--- /dev/null
+++ b/tests/utils/Cargo.toml
@@ -0,0 +1,10 @@
1[package]
2name = "test-utils"
3version = "0.1.0"
4edition = "2021"
5
6# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7
8[dependencies]
9rand = "0.8"
10serial = "0.4"
diff --git a/tests/utils/src/bin/saturate_serial.rs b/tests/utils/src/bin/saturate_serial.rs
new file mode 100644
index 000000000..28480516d
--- /dev/null
+++ b/tests/utils/src/bin/saturate_serial.rs
@@ -0,0 +1,52 @@
1use std::path::Path;
2use std::time::Duration;
3use std::{env, io, thread};
4
5use rand::random;
6use serial::SerialPort;
7
8pub fn main() {
9 if let Some(port_name) = env::args().nth(1) {
10 let sleep = env::args().position(|x| x == "--sleep").is_some();
11
12 println!("Saturating port {:?} with 115200 8N1", port_name);
13 println!("Sleep: {}", sleep);
14 let mut port = serial::open(&port_name).unwrap();
15 if saturate(&mut port, sleep).is_err() {
16 eprintln!("Unable to saturate port");
17 }
18 } else {
19 let path = env::args().next().unwrap();
20 let basepath = Path::new(&path).with_extension("");
21 let basename = basepath.file_name().unwrap();
22 eprintln!("USAGE: {} <port-name>", basename.to_string_lossy());
23 }
24}
25
26fn saturate<T: SerialPort>(port: &mut T, sleep: bool) -> io::Result<()> {
27 port.reconfigure(&|settings| {
28 settings.set_baud_rate(serial::Baud115200)?;
29 settings.set_char_size(serial::Bits8);
30 settings.set_parity(serial::ParityNone);
31 settings.set_stop_bits(serial::Stop1);
32 Ok(())
33 })?;
34
35 let mut written = 0;
36 loop {
37 let len = random::<usize>() % 0x1000;
38 let buf: Vec<u8> = (written..written + len).map(|x| x as u8).collect();
39
40 port.write_all(&buf)?;
41
42 if sleep {
43 let micros = (random::<usize>() % 1000) as u64;
44 println!("Sleeping {}us", micros);
45 port.flush().unwrap();
46 thread::sleep(Duration::from_micros(micros));
47 }
48
49 written += len;
50 println!("Written: {}", written);
51 }
52} \ No newline at end of file