diff options
| author | bors[bot] <26634292+bors[bot]@users.noreply.github.com> | 2023-05-01 21:36:10 +0000 |
|---|---|---|
| committer | GitHub <[email protected]> | 2023-05-01 21:36:10 +0000 |
| commit | 6096f0cf4b5ef45b97665166be41bfd490748f40 (patch) | |
| tree | 3e19fd1bacd3cb2d4276eefc224a81ed6c004ff0 | |
| parent | 855c0d1423cb1aacd4f4f45e255b02b442afde34 (diff) | |
| parent | a1d45303c336434929eb8eb7e55629c504a95b0e (diff) | |
Merge #1404
1404: feat(stm32): Add DMA based, ring-buffer based rx uart, v3 r=Dirbaio a=rmja
This PR replaces #1150. Comparing to that PR, this one has the following changes:
* The implementation now aligns with the new stm32 dma module, thanks `@Dirbaio!`
* Calls to `read()` now returns on either 1) idle line, or 2) ring buffer is at most half full. This is different from the previous pr, which would return a lot of 1 byte reads. Thank you `@chemicstry` for making me realize that it was actually not what I wanted. This is accomplished using half-transfer completed and full-transfer completed interrupts. Both seems to be supported on both dma and bdma.
The implementation still have the issue mentioned here: https://github.com/embassy-rs/embassy/pull/1150#discussion_r1094627035
Regarding the todos here: https://github.com/embassy-rs/embassy/pull/1150#issuecomment-1513905925. I have removed the exposure of ndtr from `dma::RingBuffer` to the uart so that the uart now simply calls `ringbuf::reload_position()` to align the position within the ring buffer to that of the actual running dma controller. BDMA and GPDMA is not implemented. I do not have any chips with those dma controllers, so maybe someone else should to this so that it can be tested.
The `saturate_serial` test utility inside `tests/utils` has an `--idles` switch which can be used to saturate the uart from a pc, but with random idles.
Because embassy-stm32 now can have tests, we should probably run them in ci. I do this locally to test the DmaRingBuffer: `cargo test --no-default-features --features stm32f429ig`.
cc `@chemicstry` `@Dirbaio`
Co-authored-by: Rasmus Melchior Jacobsen <[email protected]>
Co-authored-by: Dario Nieuwenhuis <[email protected]>
| -rw-r--r-- | embassy-stm32/src/dma/bdma.rs | 189 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/dma.rs | 199 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/mod.rs | 1 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer.rs | 420 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/mod.rs | 41 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/rx_ringbuffered.rs | 286 | ||||
| -rw-r--r-- | tests/stm32/Cargo.toml | 25 | ||||
| -rw-r--r-- | tests/stm32/src/bin/usart_dma.rs | 27 | ||||
| -rw-r--r-- | tests/stm32/src/bin/usart_rx_ringbuffered.rs | 200 | ||||
| -rw-r--r-- | tests/stm32/src/example_common.rs | 11 | ||||
| -rw-r--r-- | tests/utils/Cargo.toml | 10 | ||||
| -rw-r--r-- | tests/utils/src/bin/saturate_serial.rs | 53 |
12 files changed, 1409 insertions, 53 deletions
diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs index a23bb8cd7..0202ec379 100644 --- a/embassy-stm32/src/dma/bdma.rs +++ b/embassy-stm32/src/dma/bdma.rs | |||
| @@ -3,18 +3,20 @@ | |||
| 3 | use core::future::Future; | 3 | use core::future::Future; |
| 4 | use core::pin::Pin; | 4 | use core::pin::Pin; |
| 5 | use core::sync::atomic::{fence, Ordering}; | 5 | use core::sync::atomic::{fence, Ordering}; |
| 6 | use core::task::{Context, Poll}; | 6 | use core::task::{Context, Poll, Waker}; |
| 7 | 7 | ||
| 8 | use atomic_polyfill::AtomicUsize; | ||
| 8 | use embassy_cortex_m::interrupt::Priority; | 9 | use embassy_cortex_m::interrupt::Priority; |
| 9 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; | 10 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; |
| 10 | use embassy_sync::waitqueue::AtomicWaker; | 11 | use embassy_sync::waitqueue::AtomicWaker; |
| 11 | 12 | ||
| 13 | use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError}; | ||
| 12 | use super::word::{Word, WordSize}; | 14 | use super::word::{Word, WordSize}; |
| 13 | use super::Dir; | 15 | use super::Dir; |
| 14 | use crate::_generated::BDMA_CHANNEL_COUNT; | 16 | use crate::_generated::BDMA_CHANNEL_COUNT; |
| 15 | use crate::interrupt::{Interrupt, InterruptExt}; | 17 | use crate::interrupt::{Interrupt, InterruptExt}; |
| 16 | use crate::pac; | 18 | use crate::pac; |
| 17 | use crate::pac::bdma::vals; | 19 | use crate::pac::bdma::{regs, vals}; |
| 18 | 20 | ||
| 19 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | 21 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] |
| 20 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | 22 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] |
| @@ -48,13 +50,16 @@ impl From<Dir> for vals::Dir { | |||
| 48 | 50 | ||
| 49 | struct State { | 51 | struct State { |
| 50 | ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT], | 52 | ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT], |
| 53 | complete_count: [AtomicUsize; BDMA_CHANNEL_COUNT], | ||
| 51 | } | 54 | } |
| 52 | 55 | ||
| 53 | impl State { | 56 | impl State { |
| 54 | const fn new() -> Self { | 57 | const fn new() -> Self { |
| 58 | const ZERO: AtomicUsize = AtomicUsize::new(0); | ||
| 55 | const AW: AtomicWaker = AtomicWaker::new(); | 59 | const AW: AtomicWaker = AtomicWaker::new(); |
| 56 | Self { | 60 | Self { |
| 57 | ch_wakers: [AW; BDMA_CHANNEL_COUNT], | 61 | ch_wakers: [AW; BDMA_CHANNEL_COUNT], |
| 62 | complete_count: [ZERO; BDMA_CHANNEL_COUNT], | ||
| 58 | } | 63 | } |
| 59 | } | 64 | } |
| 60 | } | 65 | } |
| @@ -105,8 +110,23 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index | |||
| 105 | if isr.teif(channel_num) { | 110 | if isr.teif(channel_num) { |
| 106 | panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num); | 111 | panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num); |
| 107 | } | 112 | } |
| 113 | |||
| 114 | let mut wake = false; | ||
| 115 | |||
| 116 | if isr.htif(channel_num) && cr.read().htie() { | ||
| 117 | // Acknowledge half transfer complete interrupt | ||
| 118 | dma.ifcr().write(|w| w.set_htif(channel_num, true)); | ||
| 119 | wake = true; | ||
| 120 | } | ||
| 121 | |||
| 108 | if isr.tcif(channel_num) && cr.read().tcie() { | 122 | if isr.tcif(channel_num) && cr.read().tcie() { |
| 109 | cr.write(|_| ()); // Disable channel interrupts with the default value. | 123 | // Acknowledge transfer complete interrupt |
| 124 | dma.ifcr().write(|w| w.set_tcif(channel_num, true)); | ||
| 125 | STATE.complete_count[index].fetch_add(1, Ordering::Release); | ||
| 126 | wake = true; | ||
| 127 | } | ||
| 128 | |||
| 129 | if wake { | ||
| 110 | STATE.ch_wakers[index].wake(); | 130 | STATE.ch_wakers[index].wake(); |
| 111 | } | 131 | } |
| 112 | } | 132 | } |
| @@ -252,6 +272,7 @@ impl<'a, C: Channel> Transfer<'a, C> { | |||
| 252 | 272 | ||
| 253 | let mut this = Self { channel }; | 273 | let mut this = Self { channel }; |
| 254 | this.clear_irqs(); | 274 | this.clear_irqs(); |
| 275 | STATE.complete_count[this.channel.index()].store(0, Ordering::Release); | ||
| 255 | 276 | ||
| 256 | #[cfg(dmamux)] | 277 | #[cfg(dmamux)] |
| 257 | super::dmamux::configure_dmamux(&mut *this.channel, _request); | 278 | super::dmamux::configure_dmamux(&mut *this.channel, _request); |
| @@ -299,7 +320,9 @@ impl<'a, C: Channel> Transfer<'a, C> { | |||
| 299 | 320 | ||
| 300 | pub fn is_running(&mut self) -> bool { | 321 | pub fn is_running(&mut self) -> bool { |
| 301 | let ch = self.channel.regs().ch(self.channel.num()); | 322 | let ch = self.channel.regs().ch(self.channel.num()); |
| 302 | unsafe { ch.cr().read() }.en() | 323 | let en = unsafe { ch.cr().read() }.en(); |
| 324 | let tcif = STATE.complete_count[self.channel.index()].load(Ordering::Acquire) != 0; | ||
| 325 | en && !tcif | ||
| 303 | } | 326 | } |
| 304 | 327 | ||
| 305 | /// Gets the total remaining transfers for the channel | 328 | /// Gets the total remaining transfers for the channel |
| @@ -342,3 +365,161 @@ impl<'a, C: Channel> Future for Transfer<'a, C> { | |||
| 342 | } | 365 | } |
| 343 | } | 366 | } |
| 344 | } | 367 | } |
| 368 | |||
| 369 | // ============================== | ||
| 370 | |||
| 371 | struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); | ||
| 372 | |||
| 373 | impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { | ||
| 374 | fn ndtr(&self) -> usize { | ||
| 375 | let ch = self.0.regs().ch(self.0.num()); | ||
| 376 | unsafe { ch.ndtr().read() }.ndt() as usize | ||
| 377 | } | ||
| 378 | |||
| 379 | fn get_complete_count(&self) -> usize { | ||
| 380 | STATE.complete_count[self.0.index()].load(Ordering::Acquire) | ||
| 381 | } | ||
| 382 | |||
| 383 | fn reset_complete_count(&mut self) -> usize { | ||
| 384 | STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel) | ||
| 385 | } | ||
| 386 | } | ||
| 387 | |||
| 388 | pub struct RingBuffer<'a, C: Channel, W: Word> { | ||
| 389 | cr: regs::Cr, | ||
| 390 | channel: PeripheralRef<'a, C>, | ||
| 391 | ringbuf: DmaRingBuffer<'a, W>, | ||
| 392 | } | ||
| 393 | |||
| 394 | impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | ||
| 395 | pub unsafe fn new_read( | ||
| 396 | channel: impl Peripheral<P = C> + 'a, | ||
| 397 | _request: Request, | ||
| 398 | peri_addr: *mut W, | ||
| 399 | buffer: &'a mut [W], | ||
| 400 | _options: TransferOptions, | ||
| 401 | ) -> Self { | ||
| 402 | into_ref!(channel); | ||
| 403 | |||
| 404 | let len = buffer.len(); | ||
| 405 | assert!(len > 0 && len <= 0xFFFF); | ||
| 406 | |||
| 407 | let dir = Dir::PeripheralToMemory; | ||
| 408 | let data_size = W::size(); | ||
| 409 | |||
| 410 | let channel_number = channel.num(); | ||
| 411 | let dma = channel.regs(); | ||
| 412 | |||
| 413 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 414 | fence(Ordering::SeqCst); | ||
| 415 | |||
| 416 | #[cfg(bdma_v2)] | ||
| 417 | critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request))); | ||
| 418 | |||
| 419 | let mut w = regs::Cr(0); | ||
| 420 | w.set_psize(data_size.into()); | ||
| 421 | w.set_msize(data_size.into()); | ||
| 422 | w.set_minc(vals::Inc::ENABLED); | ||
| 423 | w.set_dir(dir.into()); | ||
| 424 | w.set_teie(true); | ||
| 425 | w.set_htie(true); | ||
| 426 | w.set_tcie(true); | ||
| 427 | w.set_circ(vals::Circ::ENABLED); | ||
| 428 | w.set_pl(vals::Pl::VERYHIGH); | ||
| 429 | w.set_en(true); | ||
| 430 | |||
| 431 | let buffer_ptr = buffer.as_mut_ptr(); | ||
| 432 | let mut this = Self { | ||
| 433 | channel, | ||
| 434 | cr: w, | ||
| 435 | ringbuf: DmaRingBuffer::new(buffer), | ||
| 436 | }; | ||
| 437 | this.clear_irqs(); | ||
| 438 | |||
| 439 | #[cfg(dmamux)] | ||
| 440 | super::dmamux::configure_dmamux(&mut *this.channel, _request); | ||
| 441 | |||
| 442 | let ch = dma.ch(channel_number); | ||
| 443 | ch.par().write_value(peri_addr as u32); | ||
| 444 | ch.mar().write_value(buffer_ptr as u32); | ||
| 445 | ch.ndtr().write(|w| w.set_ndt(len as u16)); | ||
| 446 | |||
| 447 | this | ||
| 448 | } | ||
| 449 | |||
| 450 | pub fn start(&mut self) { | ||
| 451 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 452 | unsafe { ch.cr().write_value(self.cr) } | ||
| 453 | } | ||
| 454 | |||
| 455 | pub fn clear(&mut self) { | ||
| 456 | self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow())); | ||
| 457 | } | ||
| 458 | |||
| 459 | /// Read bytes from the ring buffer | ||
| 460 | /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||
| 461 | pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> { | ||
| 462 | self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) | ||
| 463 | } | ||
| 464 | |||
| 465 | pub fn is_empty(&self) -> bool { | ||
| 466 | self.ringbuf.is_empty() | ||
| 467 | } | ||
| 468 | |||
| 469 | pub fn len(&self) -> usize { | ||
| 470 | self.ringbuf.len() | ||
| 471 | } | ||
| 472 | |||
| 473 | pub fn capacity(&self) -> usize { | ||
| 474 | self.ringbuf.dma_buf.len() | ||
| 475 | } | ||
| 476 | |||
| 477 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 478 | STATE.ch_wakers[self.channel.index()].register(waker); | ||
| 479 | } | ||
| 480 | |||
| 481 | fn clear_irqs(&mut self) { | ||
| 482 | let dma = self.channel.regs(); | ||
| 483 | unsafe { | ||
| 484 | dma.ifcr().write(|w| { | ||
| 485 | w.set_htif(self.channel.num(), true); | ||
| 486 | w.set_tcif(self.channel.num(), true); | ||
| 487 | w.set_teif(self.channel.num(), true); | ||
| 488 | }) | ||
| 489 | } | ||
| 490 | } | ||
| 491 | |||
| 492 | pub fn request_stop(&mut self) { | ||
| 493 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 494 | |||
| 495 | // Disable the channel. Keep the IEs enabled so the irqs still fire. | ||
| 496 | unsafe { | ||
| 497 | ch.cr().write(|w| { | ||
| 498 | w.set_teie(true); | ||
| 499 | w.set_htie(true); | ||
| 500 | w.set_tcie(true); | ||
| 501 | }) | ||
| 502 | } | ||
| 503 | } | ||
| 504 | |||
| 505 | pub fn is_running(&mut self) -> bool { | ||
| 506 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 507 | unsafe { ch.cr().read() }.en() | ||
| 508 | } | ||
| 509 | |||
| 510 | /// Synchronize the position of the ring buffer to the actual DMA controller position | ||
| 511 | pub fn reload_position(&mut self) { | ||
| 512 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 513 | self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize; | ||
| 514 | } | ||
| 515 | } | ||
| 516 | |||
| 517 | impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | ||
| 518 | fn drop(&mut self) { | ||
| 519 | self.request_stop(); | ||
| 520 | while self.is_running() {} | ||
| 521 | |||
| 522 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 523 | fence(Ordering::SeqCst); | ||
| 524 | } | ||
| 525 | } | ||
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs index ef1d27573..7b17d9e49 100644 --- a/embassy-stm32/src/dma/dma.rs +++ b/embassy-stm32/src/dma/dma.rs | |||
| @@ -4,16 +4,17 @@ use core::pin::Pin; | |||
| 4 | use core::sync::atomic::{fence, Ordering}; | 4 | use core::sync::atomic::{fence, Ordering}; |
| 5 | use core::task::{Context, Poll, Waker}; | 5 | use core::task::{Context, Poll, Waker}; |
| 6 | 6 | ||
| 7 | use atomic_polyfill::AtomicUsize; | ||
| 7 | use embassy_cortex_m::interrupt::Priority; | 8 | use embassy_cortex_m::interrupt::Priority; |
| 8 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; | 9 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; |
| 9 | use embassy_sync::waitqueue::AtomicWaker; | 10 | use embassy_sync::waitqueue::AtomicWaker; |
| 10 | use pac::dma::regs; | ||
| 11 | 11 | ||
| 12 | use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError}; | ||
| 12 | use super::word::{Word, WordSize}; | 13 | use super::word::{Word, WordSize}; |
| 13 | use super::Dir; | 14 | use super::Dir; |
| 14 | use crate::_generated::DMA_CHANNEL_COUNT; | 15 | use crate::_generated::DMA_CHANNEL_COUNT; |
| 15 | use crate::interrupt::{Interrupt, InterruptExt}; | 16 | use crate::interrupt::{Interrupt, InterruptExt}; |
| 16 | use crate::pac::dma::vals; | 17 | use crate::pac::dma::{regs, vals}; |
| 17 | use crate::{interrupt, pac}; | 18 | use crate::{interrupt, pac}; |
| 18 | 19 | ||
| 19 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | 20 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] |
| @@ -128,13 +129,16 @@ impl From<FifoThreshold> for vals::Fth { | |||
| 128 | 129 | ||
| 129 | struct State { | 130 | struct State { |
| 130 | ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT], | 131 | ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT], |
| 132 | complete_count: [AtomicUsize; DMA_CHANNEL_COUNT], | ||
| 131 | } | 133 | } |
| 132 | 134 | ||
| 133 | impl State { | 135 | impl State { |
| 134 | const fn new() -> Self { | 136 | const fn new() -> Self { |
| 137 | const ZERO: AtomicUsize = AtomicUsize::new(0); | ||
| 135 | const AW: AtomicWaker = AtomicWaker::new(); | 138 | const AW: AtomicWaker = AtomicWaker::new(); |
| 136 | Self { | 139 | Self { |
| 137 | ch_wakers: [AW; DMA_CHANNEL_COUNT], | 140 | ch_wakers: [AW; DMA_CHANNEL_COUNT], |
| 141 | complete_count: [ZERO; DMA_CHANNEL_COUNT], | ||
| 138 | } | 142 | } |
| 139 | } | 143 | } |
| 140 | } | 144 | } |
| @@ -183,9 +187,22 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index: | |||
| 183 | panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); | 187 | panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); |
| 184 | } | 188 | } |
| 185 | 189 | ||
| 190 | let mut wake = false; | ||
| 191 | |||
| 192 | if isr.htif(channel_num % 4) && cr.read().htie() { | ||
| 193 | // Acknowledge half transfer complete interrupt | ||
| 194 | dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true)); | ||
| 195 | wake = true; | ||
| 196 | } | ||
| 197 | |||
| 186 | if isr.tcif(channel_num % 4) && cr.read().tcie() { | 198 | if isr.tcif(channel_num % 4) && cr.read().tcie() { |
| 187 | /* acknowledge transfer complete interrupt */ | 199 | // Acknowledge transfer complete interrupt |
| 188 | dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); | 200 | dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); |
| 201 | STATE.complete_count[index].fetch_add(1, Ordering::Release); | ||
| 202 | wake = true; | ||
| 203 | } | ||
| 204 | |||
| 205 | if wake { | ||
| 189 | STATE.ch_wakers[index].wake(); | 206 | STATE.ch_wakers[index].wake(); |
| 190 | } | 207 | } |
| 191 | } | 208 | } |
| @@ -445,7 +462,6 @@ impl<'a, C: Channel> Future for Transfer<'a, C> { | |||
| 445 | 462 | ||
| 446 | // ================================== | 463 | // ================================== |
| 447 | 464 | ||
| 448 | #[must_use = "futures do nothing unless you `.await` or poll them"] | ||
| 449 | pub struct DoubleBuffered<'a, C: Channel, W: Word> { | 465 | pub struct DoubleBuffered<'a, C: Channel, W: Word> { |
| 450 | channel: PeripheralRef<'a, C>, | 466 | channel: PeripheralRef<'a, C>, |
| 451 | _phantom: PhantomData<W>, | 467 | _phantom: PhantomData<W>, |
| @@ -530,6 +546,7 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> { | |||
| 530 | 546 | ||
| 531 | unsafe { | 547 | unsafe { |
| 532 | dma.ifcr(isrn).write(|w| { | 548 | dma.ifcr(isrn).write(|w| { |
| 549 | w.set_htif(isrbit, true); | ||
| 533 | w.set_tcif(isrbit, true); | 550 | w.set_tcif(isrbit, true); |
| 534 | w.set_teif(isrbit, true); | 551 | w.set_teif(isrbit, true); |
| 535 | }) | 552 | }) |
| @@ -578,18 +595,186 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> { | |||
| 578 | let ch = self.channel.regs().st(self.channel.num()); | 595 | let ch = self.channel.regs().st(self.channel.num()); |
| 579 | unsafe { ch.ndtr().read() }.ndt() | 596 | unsafe { ch.ndtr().read() }.ndt() |
| 580 | } | 597 | } |
| 598 | } | ||
| 581 | 599 | ||
| 582 | pub fn blocking_wait(mut self) { | 600 | impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> { |
| 601 | fn drop(&mut self) { | ||
| 602 | self.request_stop(); | ||
| 583 | while self.is_running() {} | 603 | while self.is_running() {} |
| 584 | 604 | ||
| 585 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | 605 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." |
| 586 | fence(Ordering::SeqCst); | 606 | fence(Ordering::SeqCst); |
| 607 | } | ||
| 608 | } | ||
| 587 | 609 | ||
| 588 | core::mem::forget(self); | 610 | // ============================== |
| 611 | |||
| 612 | struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); | ||
| 613 | |||
| 614 | impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { | ||
| 615 | fn ndtr(&self) -> usize { | ||
| 616 | let ch = self.0.regs().st(self.0.num()); | ||
| 617 | unsafe { ch.ndtr().read() }.ndt() as usize | ||
| 618 | } | ||
| 619 | |||
| 620 | fn get_complete_count(&self) -> usize { | ||
| 621 | STATE.complete_count[self.0.index()].load(Ordering::Acquire) | ||
| 622 | } | ||
| 623 | |||
| 624 | fn reset_complete_count(&mut self) -> usize { | ||
| 625 | STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel) | ||
| 589 | } | 626 | } |
| 590 | } | 627 | } |
| 591 | 628 | ||
| 592 | impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> { | 629 | pub struct RingBuffer<'a, C: Channel, W: Word> { |
| 630 | cr: regs::Cr, | ||
| 631 | channel: PeripheralRef<'a, C>, | ||
| 632 | ringbuf: DmaRingBuffer<'a, W>, | ||
| 633 | } | ||
| 634 | |||
| 635 | impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | ||
| 636 | pub unsafe fn new_read( | ||
| 637 | channel: impl Peripheral<P = C> + 'a, | ||
| 638 | _request: Request, | ||
| 639 | peri_addr: *mut W, | ||
| 640 | buffer: &'a mut [W], | ||
| 641 | options: TransferOptions, | ||
| 642 | ) -> Self { | ||
| 643 | into_ref!(channel); | ||
| 644 | |||
| 645 | let len = buffer.len(); | ||
| 646 | assert!(len > 0 && len <= 0xFFFF); | ||
| 647 | |||
| 648 | let dir = Dir::PeripheralToMemory; | ||
| 649 | let data_size = W::size(); | ||
| 650 | |||
| 651 | let channel_number = channel.num(); | ||
| 652 | let dma = channel.regs(); | ||
| 653 | |||
| 654 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 655 | fence(Ordering::SeqCst); | ||
| 656 | |||
| 657 | let mut w = regs::Cr(0); | ||
| 658 | w.set_dir(dir.into()); | ||
| 659 | w.set_msize(data_size.into()); | ||
| 660 | w.set_psize(data_size.into()); | ||
| 661 | w.set_pl(vals::Pl::VERYHIGH); | ||
| 662 | w.set_minc(vals::Inc::INCREMENTED); | ||
| 663 | w.set_pinc(vals::Inc::FIXED); | ||
| 664 | w.set_teie(true); | ||
| 665 | w.set_htie(true); | ||
| 666 | w.set_tcie(true); | ||
| 667 | w.set_circ(vals::Circ::ENABLED); | ||
| 668 | #[cfg(dma_v1)] | ||
| 669 | w.set_trbuff(true); | ||
| 670 | #[cfg(dma_v2)] | ||
| 671 | w.set_chsel(_request); | ||
| 672 | w.set_pburst(options.pburst.into()); | ||
| 673 | w.set_mburst(options.mburst.into()); | ||
| 674 | w.set_pfctrl(options.flow_ctrl.into()); | ||
| 675 | w.set_en(true); | ||
| 676 | |||
| 677 | let buffer_ptr = buffer.as_mut_ptr(); | ||
| 678 | let mut this = Self { | ||
| 679 | channel, | ||
| 680 | cr: w, | ||
| 681 | ringbuf: DmaRingBuffer::new(buffer), | ||
| 682 | }; | ||
| 683 | this.clear_irqs(); | ||
| 684 | |||
| 685 | #[cfg(dmamux)] | ||
| 686 | super::dmamux::configure_dmamux(&mut *this.channel, _request); | ||
| 687 | |||
| 688 | let ch = dma.st(channel_number); | ||
| 689 | ch.par().write_value(peri_addr as u32); | ||
| 690 | ch.m0ar().write_value(buffer_ptr as u32); | ||
| 691 | ch.ndtr().write_value(regs::Ndtr(len as _)); | ||
| 692 | ch.fcr().write(|w| { | ||
| 693 | if let Some(fth) = options.fifo_threshold { | ||
| 694 | // FIFO mode | ||
| 695 | w.set_dmdis(vals::Dmdis::DISABLED); | ||
| 696 | w.set_fth(fth.into()); | ||
| 697 | } else { | ||
| 698 | // Direct mode | ||
| 699 | w.set_dmdis(vals::Dmdis::ENABLED); | ||
| 700 | } | ||
| 701 | }); | ||
| 702 | |||
| 703 | this | ||
| 704 | } | ||
| 705 | |||
| 706 | pub fn start(&mut self) { | ||
| 707 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 708 | unsafe { ch.cr().write_value(self.cr) } | ||
| 709 | } | ||
| 710 | |||
| 711 | pub fn clear(&mut self) { | ||
| 712 | self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow())); | ||
| 713 | } | ||
| 714 | |||
| 715 | /// Read bytes from the ring buffer | ||
| 716 | /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||
| 717 | pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> { | ||
| 718 | self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) | ||
| 719 | } | ||
| 720 | |||
| 721 | pub fn is_empty(&self) -> bool { | ||
| 722 | self.ringbuf.is_empty() | ||
| 723 | } | ||
| 724 | |||
| 725 | pub fn len(&self) -> usize { | ||
| 726 | self.ringbuf.len() | ||
| 727 | } | ||
| 728 | |||
| 729 | pub fn capacity(&self) -> usize { | ||
| 730 | self.ringbuf.dma_buf.len() | ||
| 731 | } | ||
| 732 | |||
| 733 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 734 | STATE.ch_wakers[self.channel.index()].register(waker); | ||
| 735 | } | ||
| 736 | |||
| 737 | fn clear_irqs(&mut self) { | ||
| 738 | let channel_number = self.channel.num(); | ||
| 739 | let dma = self.channel.regs(); | ||
| 740 | let isrn = channel_number / 4; | ||
| 741 | let isrbit = channel_number % 4; | ||
| 742 | |||
| 743 | unsafe { | ||
| 744 | dma.ifcr(isrn).write(|w| { | ||
| 745 | w.set_htif(isrbit, true); | ||
| 746 | w.set_tcif(isrbit, true); | ||
| 747 | w.set_teif(isrbit, true); | ||
| 748 | }) | ||
| 749 | } | ||
| 750 | } | ||
| 751 | |||
| 752 | pub fn request_stop(&mut self) { | ||
| 753 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 754 | |||
| 755 | // Disable the channel. Keep the IEs enabled so the irqs still fire. | ||
| 756 | unsafe { | ||
| 757 | ch.cr().write(|w| { | ||
| 758 | w.set_teie(true); | ||
| 759 | w.set_htie(true); | ||
| 760 | w.set_tcie(true); | ||
| 761 | }) | ||
| 762 | } | ||
| 763 | } | ||
| 764 | |||
| 765 | pub fn is_running(&mut self) -> bool { | ||
| 766 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 767 | unsafe { ch.cr().read() }.en() | ||
| 768 | } | ||
| 769 | |||
| 770 | /// Synchronize the position of the ring buffer to the actual DMA controller position | ||
| 771 | pub fn reload_position(&mut self) { | ||
| 772 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 773 | self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize; | ||
| 774 | } | ||
| 775 | } | ||
| 776 | |||
| 777 | impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | ||
| 593 | fn drop(&mut self) { | 778 | fn drop(&mut self) { |
| 594 | self.request_stop(); | 779 | self.request_stop(); |
| 595 | while self.is_running() {} | 780 | while self.is_running() {} |
diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs index 3312ca752..3ac0d1b3d 100644 --- a/embassy-stm32/src/dma/mod.rs +++ b/embassy-stm32/src/dma/mod.rs | |||
| @@ -21,6 +21,7 @@ pub use gpdma::*; | |||
| 21 | #[cfg(dmamux)] | 21 | #[cfg(dmamux)] |
| 22 | mod dmamux; | 22 | mod dmamux; |
| 23 | 23 | ||
| 24 | pub(crate) mod ringbuffer; | ||
| 24 | pub mod word; | 25 | pub mod word; |
| 25 | 26 | ||
| 26 | use core::mem; | 27 | use core::mem; |
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs new file mode 100644 index 000000000..38cc87ae9 --- /dev/null +++ b/embassy-stm32/src/dma/ringbuffer.rs | |||
| @@ -0,0 +1,420 @@ | |||
| 1 | #![cfg_attr(gpdma, allow(unused))] | ||
| 2 | |||
| 3 | use core::ops::Range; | ||
| 4 | use core::sync::atomic::{compiler_fence, Ordering}; | ||
| 5 | |||
| 6 | use super::word::Word; | ||
| 7 | |||
| 8 | /// A "read-only" ring-buffer to be used together with the DMA controller which | ||
| 9 | /// writes in a circular way, "uncontrolled" to the buffer. | ||
| 10 | /// | ||
| 11 | /// A snapshot of the ring buffer state can be attained by setting the `ndtr` field | ||
| 12 | /// to the current register value. `ndtr` describes the current position of the DMA | ||
| 13 | /// write. | ||
| 14 | /// | ||
| 15 | /// # Buffer layout | ||
| 16 | /// | ||
| 17 | /// ```text | ||
| 18 | /// Without wraparound: With wraparound: | ||
| 19 | /// | ||
| 20 | /// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+ | ||
| 21 | /// | | | | | | | ||
| 22 | /// v v v v v v | ||
| 23 | /// +-----------------------------------------+ +-----------------------------------------+ | ||
| 24 | /// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX| | ||
| 25 | /// +-----------------------------------------+ +-----------------------------------------+ | ||
| 26 | /// ^ ^ ^ ^ ^ ^ | ||
| 27 | /// | | | | | | | ||
| 28 | /// +- first --+ | +- end ------+ | | ||
| 29 | /// | | | | | ||
| 30 | /// +- end --------------------+ +- first ----------------+ | ||
| 31 | /// ``` | ||
| 32 | pub struct DmaRingBuffer<'a, W: Word> { | ||
| 33 | pub(crate) dma_buf: &'a mut [W], | ||
| 34 | first: usize, | ||
| 35 | pub ndtr: usize, | ||
| 36 | } | ||
| 37 | |||
| 38 | #[derive(Debug, PartialEq)] | ||
| 39 | pub struct OverrunError; | ||
| 40 | |||
| 41 | pub trait DmaCtrl { | ||
| 42 | /// Get the NDTR register value, i.e. the space left in the underlying | ||
| 43 | /// buffer until the dma writer wraps. | ||
| 44 | fn ndtr(&self) -> usize; | ||
| 45 | |||
| 46 | /// Get the transfer completed counter. | ||
| 47 | /// This counter is incremented by the dma controller when NDTR is reloaded, | ||
| 48 | /// i.e. when the writing wraps. | ||
| 49 | fn get_complete_count(&self) -> usize; | ||
| 50 | |||
| 51 | /// Reset the transfer completed counter to 0 and return the value just prior to the reset. | ||
| 52 | fn reset_complete_count(&mut self) -> usize; | ||
| 53 | } | ||
| 54 | |||
| 55 | impl<'a, W: Word> DmaRingBuffer<'a, W> { | ||
| 56 | pub fn new(dma_buf: &'a mut [W]) -> Self { | ||
| 57 | let ndtr = dma_buf.len(); | ||
| 58 | Self { | ||
| 59 | dma_buf, | ||
| 60 | first: 0, | ||
| 61 | ndtr, | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | /// Reset the ring buffer to its initial state | ||
| 66 | pub fn clear(&mut self, mut dma: impl DmaCtrl) { | ||
| 67 | self.first = 0; | ||
| 68 | self.ndtr = self.dma_buf.len(); | ||
| 69 | dma.reset_complete_count(); | ||
| 70 | } | ||
| 71 | |||
| 72 | /// The buffer end position | ||
| 73 | fn end(&self) -> usize { | ||
| 74 | self.dma_buf.len() - self.ndtr | ||
| 75 | } | ||
| 76 | |||
| 77 | /// Returns whether the buffer is empty | ||
| 78 | pub fn is_empty(&self) -> bool { | ||
| 79 | self.first == self.end() | ||
| 80 | } | ||
| 81 | |||
| 82 | /// The current number of bytes in the buffer | ||
| 83 | /// This may change at any time if dma is currently active | ||
| 84 | pub fn len(&self) -> usize { | ||
| 85 | // Read out a stable end (the dma periheral can change it at anytime) | ||
| 86 | let end = self.end(); | ||
| 87 | if self.first <= end { | ||
| 88 | // No wrap | ||
| 89 | end - self.first | ||
| 90 | } else { | ||
| 91 | self.dma_buf.len() - self.first + end | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 95 | /// Read bytes from the ring buffer | ||
| 96 | /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||
| 97 | pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<usize, OverrunError> { | ||
| 98 | let end = self.end(); | ||
| 99 | |||
| 100 | compiler_fence(Ordering::SeqCst); | ||
| 101 | |||
| 102 | if self.first == end { | ||
| 103 | // The buffer is currently empty | ||
| 104 | |||
| 105 | if dma.get_complete_count() > 0 { | ||
| 106 | // The DMA has written such that the ring buffer wraps at least once | ||
| 107 | self.ndtr = dma.ndtr(); | ||
| 108 | if self.end() > self.first || dma.get_complete_count() > 1 { | ||
| 109 | return Err(OverrunError); | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | Ok(0) | ||
| 114 | } else if self.first < end { | ||
| 115 | // The available, unread portion in the ring buffer DOES NOT wrap | ||
| 116 | |||
| 117 | if dma.get_complete_count() > 1 { | ||
| 118 | return Err(OverrunError); | ||
| 119 | } | ||
| 120 | |||
| 121 | // Copy out the bytes from the dma buffer | ||
| 122 | let len = self.copy_to(buf, self.first..end); | ||
| 123 | |||
| 124 | compiler_fence(Ordering::SeqCst); | ||
| 125 | |||
| 126 | match dma.get_complete_count() { | ||
| 127 | 0 => { | ||
| 128 | // The DMA writer has not wrapped before nor after the copy | ||
| 129 | } | ||
| 130 | 1 => { | ||
| 131 | // The DMA writer has written such that the ring buffer now wraps | ||
| 132 | self.ndtr = dma.ndtr(); | ||
| 133 | if self.end() > self.first || dma.get_complete_count() > 1 { | ||
| 134 | // The bytes that we have copied out have overflowed | ||
| 135 | // as the writer has now both wrapped and is currently writing | ||
| 136 | // within the region that we have just copied out | ||
| 137 | return Err(OverrunError); | ||
| 138 | } | ||
| 139 | } | ||
| 140 | _ => { | ||
| 141 | return Err(OverrunError); | ||
| 142 | } | ||
| 143 | } | ||
| 144 | |||
| 145 | self.first = (self.first + len) % self.dma_buf.len(); | ||
| 146 | Ok(len) | ||
| 147 | } else { | ||
| 148 | // The available, unread portion in the ring buffer DOES wrap | ||
| 149 | // The DMA writer has wrapped since we last read and is currently | ||
| 150 | // writing (or the next byte added will be) in the beginning of the ring buffer. | ||
| 151 | |||
| 152 | let complete_count = dma.get_complete_count(); | ||
| 153 | if complete_count > 1 { | ||
| 154 | return Err(OverrunError); | ||
| 155 | } | ||
| 156 | |||
| 157 | // If the unread portion wraps then the writer must also have wrapped | ||
| 158 | assert!(complete_count == 1); | ||
| 159 | |||
| 160 | if self.first + buf.len() < self.dma_buf.len() { | ||
| 161 | // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer. | ||
| 162 | |||
| 163 | // Copy out from the dma buffer | ||
| 164 | let len = self.copy_to(buf, self.first..self.dma_buf.len()); | ||
| 165 | |||
| 166 | compiler_fence(Ordering::SeqCst); | ||
| 167 | |||
| 168 | // We have now copied out the data from dma_buf | ||
| 169 | // Make sure that the just read part was not overwritten during the copy | ||
| 170 | self.ndtr = dma.ndtr(); | ||
| 171 | if self.end() > self.first || dma.get_complete_count() > 1 { | ||
| 172 | // The writer has entered the data that we have just read since we read out `end` in the beginning and until now. | ||
| 173 | return Err(OverrunError); | ||
| 174 | } | ||
| 175 | |||
| 176 | self.first = (self.first + len) % self.dma_buf.len(); | ||
| 177 | Ok(len) | ||
| 178 | } else { | ||
| 179 | // The provided read buffer is large enough to include all bytes from the tail of the dma buffer, | ||
| 180 | // so the next read will not have any unread tail bytes in the ring buffer. | ||
| 181 | |||
| 182 | // Copy out from the dma buffer | ||
| 183 | let tail = self.copy_to(buf, self.first..self.dma_buf.len()); | ||
| 184 | let head = self.copy_to(&mut buf[tail..], 0..end); | ||
| 185 | |||
| 186 | compiler_fence(Ordering::SeqCst); | ||
| 187 | |||
| 188 | // We have now copied out the data from dma_buf | ||
| 189 | // Reset complete counter and make sure that the just read part was not overwritten during the copy | ||
| 190 | self.ndtr = dma.ndtr(); | ||
| 191 | let complete_count = dma.reset_complete_count(); | ||
| 192 | if self.end() > self.first || complete_count > 1 { | ||
| 193 | return Err(OverrunError); | ||
| 194 | } | ||
| 195 | |||
| 196 | self.first = head; | ||
| 197 | Ok(tail + head) | ||
| 198 | } | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | /// Copy from the dma buffer at `data_range` into `buf` | ||
| 203 | fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize { | ||
| 204 | // Limit the number of bytes that can be copied | ||
| 205 | let length = usize::min(data_range.len(), buf.len()); | ||
| 206 | |||
| 207 | // Copy from dma buffer into read buffer | ||
| 208 | // We need to do it like this instead of a simple copy_from_slice() because | ||
| 209 | // reading from a part of memory that may be simultaneously written to is unsafe | ||
| 210 | unsafe { | ||
| 211 | let dma_buf = self.dma_buf.as_ptr(); | ||
| 212 | |||
| 213 | for i in 0..length { | ||
| 214 | buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize)); | ||
| 215 | } | ||
| 216 | } | ||
| 217 | |||
| 218 | length | ||
| 219 | } | ||
| 220 | } | ||
| 221 | |||
| 222 | #[cfg(test)] | ||
| 223 | mod tests { | ||
| 224 | use core::array; | ||
| 225 | use core::cell::RefCell; | ||
| 226 | |||
| 227 | use super::*; | ||
| 228 | |||
| 229 | struct TestCtrl { | ||
| 230 | next_ndtr: RefCell<Option<usize>>, | ||
| 231 | complete_count: usize, | ||
| 232 | } | ||
| 233 | |||
| 234 | impl TestCtrl { | ||
| 235 | pub const fn new() -> Self { | ||
| 236 | Self { | ||
| 237 | next_ndtr: RefCell::new(None), | ||
| 238 | complete_count: 0, | ||
| 239 | } | ||
| 240 | } | ||
| 241 | |||
| 242 | pub fn set_next_ndtr(&mut self, ndtr: usize) { | ||
| 243 | self.next_ndtr.borrow_mut().replace(ndtr); | ||
| 244 | } | ||
| 245 | } | ||
| 246 | |||
| 247 | impl DmaCtrl for &mut TestCtrl { | ||
| 248 | fn ndtr(&self) -> usize { | ||
| 249 | self.next_ndtr.borrow_mut().unwrap() | ||
| 250 | } | ||
| 251 | |||
| 252 | fn get_complete_count(&self) -> usize { | ||
| 253 | self.complete_count | ||
| 254 | } | ||
| 255 | |||
| 256 | fn reset_complete_count(&mut self) -> usize { | ||
| 257 | let old = self.complete_count; | ||
| 258 | self.complete_count = 0; | ||
| 259 | old | ||
| 260 | } | ||
| 261 | } | ||
| 262 | |||
| 263 | #[test] | ||
| 264 | fn empty() { | ||
| 265 | let mut dma_buf = [0u8; 16]; | ||
| 266 | let ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||
| 267 | |||
| 268 | assert!(ringbuf.is_empty()); | ||
| 269 | assert_eq!(0, ringbuf.len()); | ||
| 270 | } | ||
| 271 | |||
| 272 | #[test] | ||
| 273 | fn can_read() { | ||
| 274 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 275 | let mut ctrl = TestCtrl::new(); | ||
| 276 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||
| 277 | ringbuf.ndtr = 6; | ||
| 278 | |||
| 279 | assert!(!ringbuf.is_empty()); | ||
| 280 | assert_eq!(10, ringbuf.len()); | ||
| 281 | |||
| 282 | let mut buf = [0; 2]; | ||
| 283 | assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | ||
| 284 | assert_eq!([0, 1], buf); | ||
| 285 | assert_eq!(8, ringbuf.len()); | ||
| 286 | |||
| 287 | let mut buf = [0; 2]; | ||
| 288 | assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | ||
| 289 | assert_eq!([2, 3], buf); | ||
| 290 | assert_eq!(6, ringbuf.len()); | ||
| 291 | |||
| 292 | let mut buf = [0; 8]; | ||
| 293 | assert_eq!(6, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | ||
| 294 | assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]); | ||
| 295 | assert_eq!(0, ringbuf.len()); | ||
| 296 | |||
| 297 | let mut buf = [0; 2]; | ||
| 298 | assert_eq!(0, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | ||
| 299 | } | ||
| 300 | |||
| 301 | #[test] | ||
| 302 | fn can_read_with_wrap() { | ||
| 303 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 304 | let mut ctrl = TestCtrl::new(); | ||
| 305 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||
| 306 | ringbuf.first = 12; | ||
| 307 | ringbuf.ndtr = 10; | ||
| 308 | |||
| 309 | // The dma controller has written 4 + 6 bytes and has reloaded NDTR | ||
| 310 | ctrl.complete_count = 1; | ||
| 311 | ctrl.set_next_ndtr(10); | ||
| 312 | |||
| 313 | assert!(!ringbuf.is_empty()); | ||
| 314 | assert_eq!(6 + 4, ringbuf.len()); | ||
| 315 | |||
| 316 | let mut buf = [0; 2]; | ||
| 317 | assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | ||
| 318 | assert_eq!([12, 13], buf); | ||
| 319 | assert_eq!(6 + 2, ringbuf.len()); | ||
| 320 | |||
| 321 | let mut buf = [0; 4]; | ||
| 322 | assert_eq!(4, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | ||
| 323 | assert_eq!([14, 15, 0, 1], buf); | ||
| 324 | assert_eq!(4, ringbuf.len()); | ||
| 325 | } | ||
| 326 | |||
| 327 | #[test] | ||
| 328 | fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() { | ||
| 329 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 330 | let mut ctrl = TestCtrl::new(); | ||
| 331 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||
| 332 | ringbuf.first = 2; | ||
| 333 | ringbuf.ndtr = 6; | ||
| 334 | |||
| 335 | // The dma controller has written 6 + 2 bytes and has reloaded NDTR | ||
| 336 | ctrl.complete_count = 1; | ||
| 337 | ctrl.set_next_ndtr(14); | ||
| 338 | |||
| 339 | let mut buf = [0; 2]; | ||
| 340 | assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | ||
| 341 | assert_eq!([2, 3], buf); | ||
| 342 | |||
| 343 | assert_eq!(1, ctrl.complete_count); // The interrupt flag IS NOT cleared | ||
| 344 | } | ||
| 345 | |||
| 346 | #[test] | ||
| 347 | fn can_read_when_dma_writer_is_wrapped_and_read_wraps() { | ||
| 348 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 349 | let mut ctrl = TestCtrl::new(); | ||
| 350 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||
| 351 | ringbuf.first = 12; | ||
| 352 | ringbuf.ndtr = 10; | ||
| 353 | |||
| 354 | // The dma controller has written 6 + 2 bytes and has reloaded NDTR | ||
| 355 | ctrl.complete_count = 1; | ||
| 356 | ctrl.set_next_ndtr(14); | ||
| 357 | |||
| 358 | let mut buf = [0; 10]; | ||
| 359 | assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap()); | ||
| 360 | assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf); | ||
| 361 | |||
| 362 | assert_eq!(0, ctrl.complete_count); // The interrupt flag IS cleared | ||
| 363 | } | ||
| 364 | |||
| 365 | #[test] | ||
| 366 | fn cannot_read_when_dma_writer_wraps_with_same_ndtr() { | ||
| 367 | let mut dma_buf = [0u8; 16]; | ||
| 368 | let mut ctrl = TestCtrl::new(); | ||
| 369 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||
| 370 | ringbuf.first = 6; | ||
| 371 | ringbuf.ndtr = 10; | ||
| 372 | ctrl.set_next_ndtr(9); | ||
| 373 | |||
| 374 | assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty | ||
| 375 | |||
| 376 | // The dma controller has written exactly 16 bytes | ||
| 377 | ctrl.complete_count = 1; | ||
| 378 | |||
| 379 | let mut buf = [0; 2]; | ||
| 380 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); | ||
| 381 | |||
| 382 | assert_eq!(1, ctrl.complete_count); // The complete counter is not reset | ||
| 383 | } | ||
| 384 | |||
| 385 | #[test] | ||
| 386 | fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() { | ||
| 387 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 388 | let mut ctrl = TestCtrl::new(); | ||
| 389 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||
| 390 | ringbuf.first = 2; | ||
| 391 | ringbuf.ndtr = 6; | ||
| 392 | |||
| 393 | // The dma controller has written 6 + 3 bytes and has reloaded NDTR | ||
| 394 | ctrl.complete_count = 1; | ||
| 395 | ctrl.set_next_ndtr(13); | ||
| 396 | |||
| 397 | let mut buf = [0; 2]; | ||
| 398 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); | ||
| 399 | |||
| 400 | assert_eq!(1, ctrl.complete_count); // The complete counter is not reset | ||
| 401 | } | ||
| 402 | |||
| 403 | #[test] | ||
| 404 | fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() { | ||
| 405 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 406 | let mut ctrl = TestCtrl::new(); | ||
| 407 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||
| 408 | ringbuf.first = 12; | ||
| 409 | ringbuf.ndtr = 10; | ||
| 410 | |||
| 411 | // The dma controller has written 6 + 13 bytes and has reloaded NDTR | ||
| 412 | ctrl.complete_count = 1; | ||
| 413 | ctrl.set_next_ndtr(3); | ||
| 414 | |||
| 415 | let mut buf = [0; 2]; | ||
| 416 | assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); | ||
| 417 | |||
| 418 | assert_eq!(1, ctrl.complete_count); // The complete counter is not reset | ||
| 419 | } | ||
| 420 | } | ||
diff --git a/embassy-stm32/src/usart/mod.rs b/embassy-stm32/src/usart/mod.rs index 266561659..ad450f2b3 100644 --- a/embassy-stm32/src/usart/mod.rs +++ b/embassy-stm32/src/usart/mod.rs | |||
| @@ -283,8 +283,8 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> { | |||
| 283 | 283 | ||
| 284 | let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) }; | 284 | let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) }; |
| 285 | 285 | ||
| 286 | let mut wake = false; | ||
| 286 | let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie()); | 287 | let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie()); |
| 287 | |||
| 288 | if has_errors { | 288 | if has_errors { |
| 289 | // clear all interrupts and DMA Rx Request | 289 | // clear all interrupts and DMA Rx Request |
| 290 | unsafe { | 290 | unsafe { |
| @@ -304,22 +304,30 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> { | |||
| 304 | }); | 304 | }); |
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | compiler_fence(Ordering::SeqCst); | 307 | wake = true; |
| 308 | } else { | ||
| 309 | if cr1.idleie() && sr.idle() { | ||
| 310 | // IDLE detected: no more data will come | ||
| 311 | unsafe { | ||
| 312 | r.cr1().modify(|w| { | ||
| 313 | // disable idle line detection | ||
| 314 | w.set_idleie(false); | ||
| 315 | }); | ||
| 316 | } | ||
| 308 | 317 | ||
| 309 | s.rx_waker.wake(); | 318 | wake = true; |
| 310 | } else if cr1.idleie() && sr.idle() { | 319 | } |
| 311 | // IDLE detected: no more data will come | ||
| 312 | unsafe { | ||
| 313 | r.cr1().modify(|w| { | ||
| 314 | // disable idle line detection | ||
| 315 | w.set_idleie(false); | ||
| 316 | }); | ||
| 317 | 320 | ||
| 318 | r.cr3().modify(|w| { | 321 | if cr1.rxneie() { |
| 319 | // disable DMA Rx Request | 322 | // We cannot check the RXNE flag as it is auto-cleared by the DMA controller |
| 320 | w.set_dmar(false); | 323 | |
| 321 | }); | 324 | // It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection |
| 325 | |||
| 326 | wake = true; | ||
| 322 | } | 327 | } |
| 328 | } | ||
| 329 | |||
| 330 | if wake { | ||
| 323 | compiler_fence(Ordering::SeqCst); | 331 | compiler_fence(Ordering::SeqCst); |
| 324 | 332 | ||
| 325 | s.rx_waker.wake(); | 333 | s.rx_waker.wake(); |
| @@ -973,6 +981,11 @@ pub use buffered::*; | |||
| 973 | #[cfg(feature = "nightly")] | 981 | #[cfg(feature = "nightly")] |
| 974 | mod buffered; | 982 | mod buffered; |
| 975 | 983 | ||
| 984 | #[cfg(not(gpdma))] | ||
| 985 | mod rx_ringbuffered; | ||
| 986 | #[cfg(not(gpdma))] | ||
| 987 | pub use rx_ringbuffered::RingBufferedUartRx; | ||
| 988 | |||
| 976 | #[cfg(usart_v1)] | 989 | #[cfg(usart_v1)] |
| 977 | fn tdr(r: crate::pac::usart::Usart) -> *mut u8 { | 990 | fn tdr(r: crate::pac::usart::Usart) -> *mut u8 { |
| 978 | r.dr().ptr() as _ | 991 | r.dr().ptr() as _ |
diff --git a/embassy-stm32/src/usart/rx_ringbuffered.rs b/embassy-stm32/src/usart/rx_ringbuffered.rs new file mode 100644 index 000000000..33b750497 --- /dev/null +++ b/embassy-stm32/src/usart/rx_ringbuffered.rs | |||
| @@ -0,0 +1,286 @@ | |||
| 1 | use core::future::poll_fn; | ||
| 2 | use core::sync::atomic::{compiler_fence, Ordering}; | ||
| 3 | use core::task::Poll; | ||
| 4 | |||
| 5 | use embassy_hal_common::drop::OnDrop; | ||
| 6 | use embassy_hal_common::PeripheralRef; | ||
| 7 | use futures::future::{select, Either}; | ||
| 8 | |||
| 9 | use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx}; | ||
| 10 | use crate::dma::ringbuffer::OverrunError; | ||
| 11 | use crate::dma::RingBuffer; | ||
| 12 | |||
| 13 | pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> { | ||
| 14 | _peri: PeripheralRef<'d, T>, | ||
| 15 | ring_buf: RingBuffer<'d, RxDma, u8>, | ||
| 16 | } | ||
| 17 | |||
| 18 | impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> { | ||
| 19 | /// Turn the `UartRx` into a buffered uart which can continously receive in the background | ||
| 20 | /// without the possibility of loosing bytes. The `dma_buf` is a buffer registered to the | ||
| 21 | /// DMA controller, and must be sufficiently large, such that it will not overflow. | ||
| 22 | pub fn into_ring_buffered(self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d, T, RxDma> { | ||
| 23 | assert!(dma_buf.len() > 0 && dma_buf.len() <= 0xFFFF); | ||
| 24 | |||
| 25 | let request = self.rx_dma.request(); | ||
| 26 | let opts = Default::default(); | ||
| 27 | let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) }; | ||
| 28 | RingBufferedUartRx { | ||
| 29 | _peri: self._peri, | ||
| 30 | ring_buf, | ||
| 31 | } | ||
| 32 | } | ||
| 33 | } | ||
| 34 | |||
| 35 | impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxDma> { | ||
| 36 | pub fn start(&mut self) -> Result<(), Error> { | ||
| 37 | // Clear the ring buffer so that it is ready to receive data | ||
| 38 | self.ring_buf.clear(); | ||
| 39 | |||
| 40 | self.setup_uart(); | ||
| 41 | |||
| 42 | Ok(()) | ||
| 43 | } | ||
| 44 | |||
| 45 | /// Start uart background receive | ||
| 46 | fn setup_uart(&mut self) { | ||
| 47 | // fence before starting DMA. | ||
| 48 | compiler_fence(Ordering::SeqCst); | ||
| 49 | |||
| 50 | self.ring_buf.start(); | ||
| 51 | |||
| 52 | let r = T::regs(); | ||
| 53 | // clear all interrupts and DMA Rx Request | ||
| 54 | // SAFETY: only clears Rx related flags | ||
| 55 | unsafe { | ||
| 56 | r.cr1().modify(|w| { | ||
| 57 | // disable RXNE interrupt | ||
| 58 | w.set_rxneie(false); | ||
| 59 | // enable parity interrupt if not ParityNone | ||
| 60 | w.set_peie(w.pce()); | ||
| 61 | // disable idle line interrupt | ||
| 62 | w.set_idleie(false); | ||
| 63 | }); | ||
| 64 | r.cr3().modify(|w| { | ||
| 65 | // enable Error Interrupt: (Frame error, Noise error, Overrun error) | ||
| 66 | w.set_eie(true); | ||
| 67 | // enable DMA Rx Request | ||
| 68 | w.set_dmar(true); | ||
| 69 | }); | ||
| 70 | } | ||
| 71 | } | ||
| 72 | |||
| 73 | /// Stop uart background receive | ||
| 74 | fn teardown_uart(&mut self) { | ||
| 75 | let r = T::regs(); | ||
| 76 | // clear all interrupts and DMA Rx Request | ||
| 77 | // SAFETY: only clears Rx related flags | ||
| 78 | unsafe { | ||
| 79 | r.cr1().modify(|w| { | ||
| 80 | // disable RXNE interrupt | ||
| 81 | w.set_rxneie(false); | ||
| 82 | // disable parity interrupt | ||
| 83 | w.set_peie(false); | ||
| 84 | // disable idle line interrupt | ||
| 85 | w.set_idleie(false); | ||
| 86 | }); | ||
| 87 | r.cr3().modify(|w| { | ||
| 88 | // disable Error Interrupt: (Frame error, Noise error, Overrun error) | ||
| 89 | w.set_eie(false); | ||
| 90 | // disable DMA Rx Request | ||
| 91 | w.set_dmar(false); | ||
| 92 | }); | ||
| 93 | } | ||
| 94 | |||
| 95 | compiler_fence(Ordering::SeqCst); | ||
| 96 | |||
| 97 | self.ring_buf.request_stop(); | ||
| 98 | while self.ring_buf.is_running() {} | ||
| 99 | } | ||
| 100 | |||
| 101 | /// Read bytes that are readily available in the ring buffer. | ||
| 102 | /// If no bytes are currently available in the buffer the call waits until the some | ||
| 103 | /// bytes are available (at least one byte and at most half the buffer size) | ||
| 104 | /// | ||
| 105 | /// Background receive is started if `start()` has not been previously called. | ||
| 106 | /// | ||
| 107 | /// Receive in the background is terminated if an error is returned. | ||
| 108 | /// It must then manually be started again by calling `start()` or by re-calling `read()`. | ||
| 109 | pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> { | ||
| 110 | let r = T::regs(); | ||
| 111 | |||
| 112 | // Start background receive if it was not already started | ||
| 113 | // SAFETY: read only | ||
| 114 | let is_started = unsafe { r.cr3().read().dmar() }; | ||
| 115 | if !is_started { | ||
| 116 | self.start()?; | ||
| 117 | } | ||
| 118 | |||
| 119 | // SAFETY: read only and we only use Rx related flags | ||
| 120 | let s = unsafe { sr(r).read() }; | ||
| 121 | let has_errors = s.pe() || s.fe() || s.ne() || s.ore(); | ||
| 122 | if has_errors { | ||
| 123 | self.teardown_uart(); | ||
| 124 | |||
| 125 | if s.pe() { | ||
| 126 | return Err(Error::Parity); | ||
| 127 | } else if s.fe() { | ||
| 128 | return Err(Error::Framing); | ||
| 129 | } else if s.ne() { | ||
| 130 | return Err(Error::Noise); | ||
| 131 | } else { | ||
| 132 | return Err(Error::Overrun); | ||
| 133 | } | ||
| 134 | } | ||
| 135 | |||
| 136 | self.ring_buf.reload_position(); | ||
| 137 | match self.ring_buf.read(buf) { | ||
| 138 | Ok(len) if len == 0 => {} | ||
| 139 | Ok(len) => { | ||
| 140 | assert!(len > 0); | ||
| 141 | return Ok(len); | ||
| 142 | } | ||
| 143 | Err(OverrunError) => { | ||
| 144 | // Stop any transfer from now on | ||
| 145 | // The user must re-start to receive any more data | ||
| 146 | self.teardown_uart(); | ||
| 147 | return Err(Error::Overrun); | ||
| 148 | } | ||
| 149 | } | ||
| 150 | |||
| 151 | loop { | ||
| 152 | self.wait_for_data_or_idle().await?; | ||
| 153 | |||
| 154 | self.ring_buf.reload_position(); | ||
| 155 | if !self.ring_buf.is_empty() { | ||
| 156 | break; | ||
| 157 | } | ||
| 158 | } | ||
| 159 | |||
| 160 | let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?; | ||
| 161 | assert!(len > 0); | ||
| 162 | |||
| 163 | Ok(len) | ||
| 164 | } | ||
| 165 | |||
| 166 | /// Wait for uart idle or dma half-full or full | ||
| 167 | async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { | ||
| 168 | let r = T::regs(); | ||
| 169 | |||
| 170 | // make sure USART state is restored to neutral state | ||
| 171 | let _on_drop = OnDrop::new(move || { | ||
| 172 | // SAFETY: only clears Rx related flags | ||
| 173 | unsafe { | ||
| 174 | r.cr1().modify(|w| { | ||
| 175 | // disable idle line interrupt | ||
| 176 | w.set_idleie(false); | ||
| 177 | }); | ||
| 178 | } | ||
| 179 | }); | ||
| 180 | |||
| 181 | // SAFETY: only sets Rx related flags | ||
| 182 | unsafe { | ||
| 183 | r.cr1().modify(|w| { | ||
| 184 | // enable idle line interrupt | ||
| 185 | w.set_idleie(true); | ||
| 186 | }); | ||
| 187 | } | ||
| 188 | |||
| 189 | compiler_fence(Ordering::SeqCst); | ||
| 190 | |||
| 191 | // Future which completes when there is dma is half full or full | ||
| 192 | let dma = poll_fn(|cx| { | ||
| 193 | self.ring_buf.set_waker(cx.waker()); | ||
| 194 | |||
| 195 | compiler_fence(Ordering::SeqCst); | ||
| 196 | |||
| 197 | self.ring_buf.reload_position(); | ||
| 198 | if !self.ring_buf.is_empty() { | ||
| 199 | // Some data is now available | ||
| 200 | Poll::Ready(()) | ||
| 201 | } else { | ||
| 202 | Poll::Pending | ||
| 203 | } | ||
| 204 | }); | ||
| 205 | |||
| 206 | // Future which completes when idle line is detected | ||
| 207 | let uart = poll_fn(|cx| { | ||
| 208 | let s = T::state(); | ||
| 209 | s.rx_waker.register(cx.waker()); | ||
| 210 | |||
| 211 | compiler_fence(Ordering::SeqCst); | ||
| 212 | |||
| 213 | // SAFETY: read only and we only use Rx related flags | ||
| 214 | let sr = unsafe { sr(r).read() }; | ||
| 215 | |||
| 216 | // SAFETY: only clears Rx related flags | ||
| 217 | unsafe { | ||
| 218 | // This read also clears the error and idle interrupt flags on v1. | ||
| 219 | rdr(r).read_volatile(); | ||
| 220 | clear_interrupt_flags(r, sr); | ||
| 221 | } | ||
| 222 | |||
| 223 | let has_errors = sr.pe() || sr.fe() || sr.ne() || sr.ore(); | ||
| 224 | if has_errors { | ||
| 225 | if sr.pe() { | ||
| 226 | return Poll::Ready(Err(Error::Parity)); | ||
| 227 | } else if sr.fe() { | ||
| 228 | return Poll::Ready(Err(Error::Framing)); | ||
| 229 | } else if sr.ne() { | ||
| 230 | return Poll::Ready(Err(Error::Noise)); | ||
| 231 | } else { | ||
| 232 | return Poll::Ready(Err(Error::Overrun)); | ||
| 233 | } | ||
| 234 | } | ||
| 235 | |||
| 236 | if sr.idle() { | ||
| 237 | // Idle line is detected | ||
| 238 | Poll::Ready(Ok(())) | ||
| 239 | } else { | ||
| 240 | Poll::Pending | ||
| 241 | } | ||
| 242 | }); | ||
| 243 | |||
| 244 | match select(dma, uart).await { | ||
| 245 | Either::Left(((), _)) => Ok(()), | ||
| 246 | Either::Right((Ok(()), _)) => Ok(()), | ||
| 247 | Either::Right((Err(e), _)) => { | ||
| 248 | self.teardown_uart(); | ||
| 249 | Err(e) | ||
| 250 | } | ||
| 251 | } | ||
| 252 | } | ||
| 253 | } | ||
| 254 | |||
| 255 | impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T, RxDma> { | ||
| 256 | fn drop(&mut self) { | ||
| 257 | self.teardown_uart(); | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | #[cfg(all(feature = "unstable-traits", feature = "nightly"))] | ||
| 262 | mod eio { | ||
| 263 | use embedded_io::asynch::Read; | ||
| 264 | use embedded_io::Io; | ||
| 265 | |||
| 266 | use super::RingBufferedUartRx; | ||
| 267 | use crate::usart::{BasicInstance, Error, RxDma}; | ||
| 268 | |||
| 269 | impl<T, Rx> Io for RingBufferedUartRx<'_, T, Rx> | ||
| 270 | where | ||
| 271 | T: BasicInstance, | ||
| 272 | Rx: RxDma<T>, | ||
| 273 | { | ||
| 274 | type Error = Error; | ||
| 275 | } | ||
| 276 | |||
| 277 | impl<T, Rx> Read for RingBufferedUartRx<'_, T, Rx> | ||
| 278 | where | ||
| 279 | T: BasicInstance, | ||
| 280 | Rx: RxDma<T>, | ||
| 281 | { | ||
| 282 | async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> { | ||
| 283 | self.read(buf).await | ||
| 284 | } | ||
| 285 | } | ||
| 286 | } | ||
diff --git a/tests/stm32/Cargo.toml b/tests/stm32/Cargo.toml index d10d01e29..5cd949661 100644 --- a/tests/stm32/Cargo.toml +++ b/tests/stm32/Cargo.toml | |||
| @@ -5,24 +5,26 @@ version = "0.1.0" | |||
| 5 | license = "MIT OR Apache-2.0" | 5 | license = "MIT OR Apache-2.0" |
| 6 | 6 | ||
| 7 | [features] | 7 | [features] |
| 8 | stm32f103c8 = ["embassy-stm32/stm32f103c8"] # Blue Pill | 8 | stm32f103c8 = ["embassy-stm32/stm32f103c8", "not-gpdma"] # Blue Pill |
| 9 | stm32f429zi = ["embassy-stm32/stm32f429zi", "sdmmc", "chrono"] # Nucleo | 9 | stm32f429zi = ["embassy-stm32/stm32f429zi", "sdmmc", "chrono", "not-gpdma"] # Nucleo |
| 10 | stm32g071rb = ["embassy-stm32/stm32g071rb"] # Nucleo | 10 | stm32g071rb = ["embassy-stm32/stm32g071rb", "not-gpdma"] # Nucleo |
| 11 | stm32c031c6 = ["embassy-stm32/stm32c031c6"] # Nucleo | 11 | stm32c031c6 = ["embassy-stm32/stm32c031c6", "not-gpdma"] # Nucleo |
| 12 | stm32g491re = ["embassy-stm32/stm32g491re"] # Nucleo | 12 | stm32g491re = ["embassy-stm32/stm32g491re", "not-gpdma"] # Nucleo |
| 13 | stm32h755zi = ["embassy-stm32/stm32h755zi-cm7"] # Nucleo | 13 | stm32h755zi = ["embassy-stm32/stm32h755zi-cm7", "not-gpdma"] # Nucleo |
| 14 | stm32wb55rg = ["embassy-stm32/stm32wb55rg"] # Nucleo | 14 | stm32wb55rg = ["embassy-stm32/stm32wb55rg", "not-gpdma"] # Nucleo |
| 15 | stm32h563zi = ["embassy-stm32/stm32h563zi"] # Nucleo | 15 | stm32h563zi = ["embassy-stm32/stm32h563zi"] # Nucleo |
| 16 | stm32u585ai = ["embassy-stm32/stm32u585ai"] # IoT board | 16 | stm32u585ai = ["embassy-stm32/stm32u585ai"] # IoT board |
| 17 | 17 | ||
| 18 | sdmmc = [] | 18 | sdmmc = [] |
| 19 | chrono = ["embassy-stm32/chrono", "dep:chrono"] | 19 | chrono = ["embassy-stm32/chrono", "dep:chrono"] |
| 20 | not-gpdma = [] | ||
| 20 | 21 | ||
| 21 | [dependencies] | 22 | [dependencies] |
| 22 | embassy-sync = { version = "0.2.0", path = "../../embassy-sync", features = ["defmt"] } | 23 | embassy-sync = { version = "0.2.0", path = "../../embassy-sync", features = ["defmt"] } |
| 23 | embassy-executor = { version = "0.2.0", path = "../../embassy-executor", features = ["arch-cortex-m", "executor-thread", "defmt", "integrated-timers"] } | 24 | embassy-executor = { version = "0.2.0", path = "../../embassy-executor", features = ["arch-cortex-m", "executor-thread", "defmt", "integrated-timers"] } |
| 24 | embassy-time = { version = "0.1.0", path = "../../embassy-time", features = ["defmt", "tick-hz-32_768"] } | 25 | embassy-time = { version = "0.1.0", path = "../../embassy-time", features = ["defmt", "tick-hz-32_768", "defmt-timestamp-uptime"] } |
| 25 | embassy-stm32 = { version = "0.1.0", path = "../../embassy-stm32", features = ["nightly", "defmt", "unstable-pac", "memory-x", "time-driver-any"] } | 26 | embassy-stm32 = { version = "0.1.0", path = "../../embassy-stm32", features = ["nightly", "defmt", "unstable-pac", "memory-x", "time-driver-any"] } |
| 27 | embassy-futures = { version = "0.1.0", path = "../../embassy-futures" } | ||
| 26 | 28 | ||
| 27 | defmt = "0.3.0" | 29 | defmt = "0.3.0" |
| 28 | defmt-rtt = "0.4" | 30 | defmt-rtt = "0.4" |
| @@ -33,6 +35,8 @@ embedded-hal = "0.2.6" | |||
| 33 | embedded-hal-1 = { package = "embedded-hal", version = "=1.0.0-alpha.10" } | 35 | embedded-hal-1 = { package = "embedded-hal", version = "=1.0.0-alpha.10" } |
| 34 | embedded-hal-async = { version = "=0.2.0-alpha.1" } | 36 | embedded-hal-async = { version = "=0.2.0-alpha.1" } |
| 35 | panic-probe = { version = "0.3.0", features = ["print-defmt"] } | 37 | panic-probe = { version = "0.3.0", features = ["print-defmt"] } |
| 38 | rand_core = { version = "0.6", default-features = false } | ||
| 39 | rand_chacha = { version = "0.3", default-features = false } | ||
| 36 | 40 | ||
| 37 | chrono = { version = "^0.4", default-features = false, optional = true} | 41 | chrono = { version = "^0.4", default-features = false, optional = true} |
| 38 | 42 | ||
| @@ -78,6 +82,11 @@ name = "usart_dma" | |||
| 78 | path = "src/bin/usart_dma.rs" | 82 | path = "src/bin/usart_dma.rs" |
| 79 | required-features = [] | 83 | required-features = [] |
| 80 | 84 | ||
| 85 | [[bin]] | ||
| 86 | name = "usart_rx_ringbuffered" | ||
| 87 | path = "src/bin/usart_rx_ringbuffered.rs" | ||
| 88 | required-features = [ "not-gpdma",] | ||
| 89 | |||
| 81 | # END TESTS | 90 | # END TESTS |
| 82 | 91 | ||
| 83 | [profile.dev] | 92 | [profile.dev] |
diff --git a/tests/stm32/src/bin/usart_dma.rs b/tests/stm32/src/bin/usart_dma.rs index d673df0f3..de6cd41d1 100644 --- a/tests/stm32/src/bin/usart_dma.rs +++ b/tests/stm32/src/bin/usart_dma.rs | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | mod example_common; | 6 | mod example_common; |
| 7 | use defmt::assert_eq; | 7 | use defmt::assert_eq; |
| 8 | use embassy_executor::Spawner; | 8 | use embassy_executor::Spawner; |
| 9 | use embassy_futures::join::join; | ||
| 9 | use embassy_stm32::interrupt; | 10 | use embassy_stm32::interrupt; |
| 10 | use embassy_stm32::usart::{Config, Uart}; | 11 | use embassy_stm32::usart::{Config, Uart}; |
| 11 | use example_common::*; | 12 | use example_common::*; |
| @@ -76,18 +77,26 @@ async fn main(_spawner: Spawner) { | |||
| 76 | (p.PB6, p.PB7, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2); | 77 | (p.PB6, p.PB7, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2); |
| 77 | 78 | ||
| 78 | let config = Config::default(); | 79 | let config = Config::default(); |
| 79 | let mut usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config); | 80 | let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config); |
| 80 | 81 | ||
| 81 | // We can't send too many bytes, they have to fit in the FIFO. | 82 | const LEN: usize = 128; |
| 82 | // This is because we aren't sending+receiving at the same time. | 83 | let mut tx_buf = [0; LEN]; |
| 83 | // For whatever reason, blocking works with 2 bytes but DMA only with 1?? | 84 | let mut rx_buf = [0; LEN]; |
| 85 | for i in 0..LEN { | ||
| 86 | tx_buf[i] = i as u8; | ||
| 87 | } | ||
| 84 | 88 | ||
| 85 | let data = [0x42]; | 89 | let (mut tx, mut rx) = usart.split(); |
| 86 | usart.write(&data).await.unwrap(); | ||
| 87 | 90 | ||
| 88 | let mut buf = [0; 1]; | 91 | let tx_fut = async { |
| 89 | usart.read(&mut buf).await.unwrap(); | 92 | tx.write(&tx_buf).await.unwrap(); |
| 90 | assert_eq!(buf, data); | 93 | }; |
| 94 | let rx_fut = async { | ||
| 95 | rx.read(&mut rx_buf).await.unwrap(); | ||
| 96 | }; | ||
| 97 | join(rx_fut, tx_fut).await; | ||
| 98 | |||
| 99 | assert_eq!(tx_buf, rx_buf); | ||
| 91 | 100 | ||
| 92 | info!("Test OK"); | 101 | info!("Test OK"); |
| 93 | cortex_m::asm::bkpt(); | 102 | cortex_m::asm::bkpt(); |
diff --git a/tests/stm32/src/bin/usart_rx_ringbuffered.rs b/tests/stm32/src/bin/usart_rx_ringbuffered.rs new file mode 100644 index 000000000..2c4a8fdf4 --- /dev/null +++ b/tests/stm32/src/bin/usart_rx_ringbuffered.rs | |||
| @@ -0,0 +1,200 @@ | |||
| 1 | // required-features: not-gpdma | ||
| 2 | |||
| 3 | #![no_std] | ||
| 4 | #![no_main] | ||
| 5 | #![feature(type_alias_impl_trait)] | ||
| 6 | |||
| 7 | #[path = "../example_common.rs"] | ||
| 8 | mod example_common; | ||
| 9 | use defmt::{assert_eq, panic}; | ||
| 10 | use embassy_executor::Spawner; | ||
| 11 | use embassy_stm32::interrupt; | ||
| 12 | use embassy_stm32::usart::{Config, DataBits, Parity, RingBufferedUartRx, StopBits, Uart, UartTx}; | ||
| 13 | use embassy_time::{Duration, Timer}; | ||
| 14 | use example_common::*; | ||
| 15 | use rand_chacha::ChaCha8Rng; | ||
| 16 | use rand_core::{RngCore, SeedableRng}; | ||
| 17 | |||
| 18 | #[cfg(feature = "stm32f103c8")] | ||
| 19 | mod board { | ||
| 20 | pub type Uart = embassy_stm32::peripherals::USART1; | ||
| 21 | pub type TxDma = embassy_stm32::peripherals::DMA1_CH4; | ||
| 22 | pub type RxDma = embassy_stm32::peripherals::DMA1_CH5; | ||
| 23 | } | ||
| 24 | #[cfg(feature = "stm32g491re")] | ||
| 25 | mod board { | ||
| 26 | pub type Uart = embassy_stm32::peripherals::USART1; | ||
| 27 | pub type TxDma = embassy_stm32::peripherals::DMA1_CH1; | ||
| 28 | pub type RxDma = embassy_stm32::peripherals::DMA1_CH2; | ||
| 29 | } | ||
| 30 | #[cfg(feature = "stm32g071rb")] | ||
| 31 | mod board { | ||
| 32 | pub type Uart = embassy_stm32::peripherals::USART1; | ||
| 33 | pub type TxDma = embassy_stm32::peripherals::DMA1_CH1; | ||
| 34 | pub type RxDma = embassy_stm32::peripherals::DMA1_CH2; | ||
| 35 | } | ||
| 36 | #[cfg(feature = "stm32f429zi")] | ||
| 37 | mod board { | ||
| 38 | pub type Uart = embassy_stm32::peripherals::USART6; | ||
| 39 | pub type TxDma = embassy_stm32::peripherals::DMA2_CH6; | ||
| 40 | pub type RxDma = embassy_stm32::peripherals::DMA2_CH1; | ||
| 41 | } | ||
| 42 | #[cfg(feature = "stm32wb55rg")] | ||
| 43 | mod board { | ||
| 44 | pub type Uart = embassy_stm32::peripherals::LPUART1; | ||
| 45 | pub type TxDma = embassy_stm32::peripherals::DMA1_CH1; | ||
| 46 | pub type RxDma = embassy_stm32::peripherals::DMA1_CH2; | ||
| 47 | } | ||
| 48 | #[cfg(feature = "stm32h755zi")] | ||
| 49 | mod board { | ||
| 50 | pub type Uart = embassy_stm32::peripherals::USART1; | ||
| 51 | pub type TxDma = embassy_stm32::peripherals::DMA1_CH0; | ||
| 52 | pub type RxDma = embassy_stm32::peripherals::DMA1_CH1; | ||
| 53 | } | ||
| 54 | #[cfg(feature = "stm32u585ai")] | ||
| 55 | mod board { | ||
| 56 | pub type Uart = embassy_stm32::peripherals::USART3; | ||
| 57 | pub type TxDma = embassy_stm32::peripherals::GPDMA1_CH0; | ||
| 58 | pub type RxDma = embassy_stm32::peripherals::GPDMA1_CH1; | ||
| 59 | } | ||
| 60 | #[cfg(feature = "stm32c031c6")] | ||
| 61 | mod board { | ||
| 62 | pub type Uart = embassy_stm32::peripherals::USART1; | ||
| 63 | pub type TxDma = embassy_stm32::peripherals::DMA1_CH1; | ||
| 64 | pub type RxDma = embassy_stm32::peripherals::DMA1_CH2; | ||
| 65 | } | ||
| 66 | |||
| 67 | const DMA_BUF_SIZE: usize = 256; | ||
| 68 | |||
| 69 | #[embassy_executor::main] | ||
| 70 | async fn main(spawner: Spawner) { | ||
| 71 | let p = embassy_stm32::init(config()); | ||
| 72 | info!("Hello World!"); | ||
| 73 | |||
| 74 | // Arduino pins D0 and D1 | ||
| 75 | // They're connected together with a 1K resistor. | ||
| 76 | #[cfg(feature = "stm32f103c8")] | ||
| 77 | let (tx, rx, usart, irq, tx_dma, rx_dma) = ( | ||
| 78 | p.PA9, | ||
| 79 | p.PA10, | ||
| 80 | p.USART1, | ||
| 81 | interrupt::take!(USART1), | ||
| 82 | p.DMA1_CH4, | ||
| 83 | p.DMA1_CH5, | ||
| 84 | ); | ||
| 85 | #[cfg(feature = "stm32g491re")] | ||
| 86 | let (tx, rx, usart, irq, tx_dma, rx_dma) = | ||
| 87 | (p.PC4, p.PC5, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2); | ||
| 88 | #[cfg(feature = "stm32g071rb")] | ||
| 89 | let (tx, rx, usart, irq, tx_dma, rx_dma) = | ||
| 90 | (p.PC4, p.PC5, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2); | ||
| 91 | #[cfg(feature = "stm32f429zi")] | ||
| 92 | let (tx, rx, usart, irq, tx_dma, rx_dma) = ( | ||
| 93 | p.PG14, | ||
| 94 | p.PG9, | ||
| 95 | p.USART6, | ||
| 96 | interrupt::take!(USART6), | ||
| 97 | p.DMA2_CH6, | ||
| 98 | p.DMA2_CH1, | ||
| 99 | ); | ||
| 100 | #[cfg(feature = "stm32wb55rg")] | ||
| 101 | let (tx, rx, usart, irq, tx_dma, rx_dma) = ( | ||
| 102 | p.PA2, | ||
| 103 | p.PA3, | ||
| 104 | p.LPUART1, | ||
| 105 | interrupt::take!(LPUART1), | ||
| 106 | p.DMA1_CH1, | ||
| 107 | p.DMA1_CH2, | ||
| 108 | ); | ||
| 109 | #[cfg(feature = "stm32h755zi")] | ||
| 110 | let (tx, rx, usart, irq, tx_dma, rx_dma) = | ||
| 111 | (p.PB6, p.PB7, p.USART1, interrupt::take!(USART1), p.DMA1_CH0, p.DMA1_CH1); | ||
| 112 | #[cfg(feature = "stm32u585ai")] | ||
| 113 | let (tx, rx, usart, irq, tx_dma, rx_dma) = ( | ||
| 114 | p.PD8, | ||
| 115 | p.PD9, | ||
| 116 | p.USART3, | ||
| 117 | interrupt::take!(USART3), | ||
| 118 | p.GPDMA1_CH0, | ||
| 119 | p.GPDMA1_CH1, | ||
| 120 | ); | ||
| 121 | #[cfg(feature = "stm32c031c6")] | ||
| 122 | let (tx, rx, usart, irq, tx_dma, rx_dma) = | ||
| 123 | (p.PB6, p.PB7, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2); | ||
| 124 | |||
| 125 | // To run this test, use the saturating_serial test utility to saturate the serial port | ||
| 126 | |||
| 127 | let mut config = Config::default(); | ||
| 128 | // this is the fastest we can go without tuning RCC | ||
| 129 | // some chips have default pclk=8mhz, and uart can run at max pclk/16 | ||
| 130 | config.baudrate = 500_000; | ||
| 131 | config.data_bits = DataBits::DataBits8; | ||
| 132 | config.stop_bits = StopBits::STOP1; | ||
| 133 | config.parity = Parity::ParityNone; | ||
| 134 | |||
| 135 | let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config); | ||
| 136 | let (tx, rx) = usart.split(); | ||
| 137 | static mut DMA_BUF: [u8; DMA_BUF_SIZE] = [0; DMA_BUF_SIZE]; | ||
| 138 | let dma_buf = unsafe { DMA_BUF.as_mut() }; | ||
| 139 | let rx = rx.into_ring_buffered(dma_buf); | ||
| 140 | |||
| 141 | info!("Spawning tasks"); | ||
| 142 | spawner.spawn(transmit_task(tx)).unwrap(); | ||
| 143 | spawner.spawn(receive_task(rx)).unwrap(); | ||
| 144 | } | ||
| 145 | |||
| 146 | #[embassy_executor::task] | ||
| 147 | async fn transmit_task(mut tx: UartTx<'static, board::Uart, board::TxDma>) { | ||
| 148 | let mut rng = ChaCha8Rng::seed_from_u64(1337); | ||
| 149 | |||
| 150 | info!("Starting random transmissions into void..."); | ||
| 151 | |||
| 152 | let mut i: u8 = 0; | ||
| 153 | loop { | ||
| 154 | let mut buf = [0; 32]; | ||
| 155 | let len = 1 + (rng.next_u32() as usize % buf.len()); | ||
| 156 | for b in &mut buf[..len] { | ||
| 157 | *b = i; | ||
| 158 | i = i.wrapping_add(1); | ||
| 159 | } | ||
| 160 | |||
| 161 | tx.write(&buf[..len]).await.unwrap(); | ||
| 162 | Timer::after(Duration::from_micros((rng.next_u32() % 1000) as _)).await; | ||
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | #[embassy_executor::task] | ||
| 167 | async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::RxDma>) { | ||
| 168 | info!("Ready to receive..."); | ||
| 169 | |||
| 170 | let mut rng = ChaCha8Rng::seed_from_u64(1337); | ||
| 171 | |||
| 172 | let mut i = 0; | ||
| 173 | let mut expected = 0; | ||
| 174 | loop { | ||
| 175 | let mut buf = [0; 100]; | ||
| 176 | let max_len = 1 + (rng.next_u32() as usize % buf.len()); | ||
| 177 | let received = match rx.read(&mut buf[..max_len]).await { | ||
| 178 | Ok(r) => r, | ||
| 179 | Err(e) => { | ||
| 180 | panic!("Test fail! read error: {:?}", e); | ||
| 181 | } | ||
| 182 | }; | ||
| 183 | |||
| 184 | for byte in &buf[..received] { | ||
| 185 | assert_eq!(*byte, expected); | ||
| 186 | expected = expected.wrapping_add(1); | ||
| 187 | } | ||
| 188 | |||
| 189 | if received < max_len { | ||
| 190 | Timer::after(Duration::from_micros((rng.next_u32() % 1000) as _)).await; | ||
| 191 | } | ||
| 192 | |||
| 193 | i += received; | ||
| 194 | |||
| 195 | if i > 100000 { | ||
| 196 | info!("Test OK!"); | ||
| 197 | cortex_m::asm::bkpt(); | ||
| 198 | } | ||
| 199 | } | ||
| 200 | } | ||
diff --git a/tests/stm32/src/example_common.rs b/tests/stm32/src/example_common.rs index c47ed75c4..a4f8668c7 100644 --- a/tests/stm32/src/example_common.rs +++ b/tests/stm32/src/example_common.rs | |||
| @@ -1,22 +1,11 @@ | |||
| 1 | #![macro_use] | 1 | #![macro_use] |
| 2 | 2 | ||
| 3 | use core::sync::atomic::{AtomicUsize, Ordering}; | ||
| 4 | |||
| 5 | pub use defmt::*; | 3 | pub use defmt::*; |
| 6 | #[allow(unused)] | 4 | #[allow(unused)] |
| 7 | use embassy_stm32::time::Hertz; | 5 | use embassy_stm32::time::Hertz; |
| 8 | use embassy_stm32::Config; | 6 | use embassy_stm32::Config; |
| 9 | use {defmt_rtt as _, panic_probe as _}; | 7 | use {defmt_rtt as _, panic_probe as _}; |
| 10 | 8 | ||
| 11 | defmt::timestamp! {"{=u64}", { | ||
| 12 | static COUNT: AtomicUsize = AtomicUsize::new(0); | ||
| 13 | // NOTE(no-CAS) `timestamps` runs with interrupts disabled | ||
| 14 | let n = COUNT.load(Ordering::Relaxed); | ||
| 15 | COUNT.store(n + 1, Ordering::Relaxed); | ||
| 16 | n as u64 | ||
| 17 | } | ||
| 18 | } | ||
| 19 | |||
| 20 | pub fn config() -> Config { | 9 | pub fn config() -> Config { |
| 21 | #[allow(unused_mut)] | 10 | #[allow(unused_mut)] |
| 22 | let mut config = Config::default(); | 11 | let mut config = Config::default(); |
diff --git a/tests/utils/Cargo.toml b/tests/utils/Cargo.toml new file mode 100644 index 000000000..7d66fd586 --- /dev/null +++ b/tests/utils/Cargo.toml | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | [package] | ||
| 2 | name = "test-utils" | ||
| 3 | version = "0.1.0" | ||
| 4 | edition = "2021" | ||
| 5 | |||
| 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html | ||
| 7 | |||
| 8 | [dependencies] | ||
| 9 | rand = "0.8" | ||
| 10 | serial = "0.4" | ||
diff --git a/tests/utils/src/bin/saturate_serial.rs b/tests/utils/src/bin/saturate_serial.rs new file mode 100644 index 000000000..18ca12fb7 --- /dev/null +++ b/tests/utils/src/bin/saturate_serial.rs | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | use std::path::Path; | ||
| 2 | use std::time::Duration; | ||
| 3 | use std::{env, io, process, thread}; | ||
| 4 | |||
| 5 | use rand::random; | ||
| 6 | use serial::SerialPort; | ||
| 7 | |||
| 8 | pub fn main() { | ||
| 9 | if let Some(port_name) = env::args().nth(1) { | ||
| 10 | let idles = env::args().position(|x| x == "--idles").is_some(); | ||
| 11 | |||
| 12 | println!("Saturating port {:?} with 115200 8N1", port_name); | ||
| 13 | println!("Idles: {}", idles); | ||
| 14 | println!("Process ID: {}", process::id()); | ||
| 15 | let mut port = serial::open(&port_name).unwrap(); | ||
| 16 | if saturate(&mut port, idles).is_err() { | ||
| 17 | eprintln!("Unable to saturate port"); | ||
| 18 | } | ||
| 19 | } else { | ||
| 20 | let path = env::args().next().unwrap(); | ||
| 21 | let basepath = Path::new(&path).with_extension(""); | ||
| 22 | let basename = basepath.file_name().unwrap(); | ||
| 23 | eprintln!("USAGE: {} <port-name>", basename.to_string_lossy()); | ||
| 24 | } | ||
| 25 | } | ||
| 26 | |||
| 27 | fn saturate<T: SerialPort>(port: &mut T, idles: bool) -> io::Result<()> { | ||
| 28 | port.reconfigure(&|settings| { | ||
| 29 | settings.set_baud_rate(serial::Baud115200)?; | ||
| 30 | settings.set_char_size(serial::Bits8); | ||
| 31 | settings.set_parity(serial::ParityNone); | ||
| 32 | settings.set_stop_bits(serial::Stop1); | ||
| 33 | Ok(()) | ||
| 34 | })?; | ||
| 35 | |||
| 36 | let mut written = 0; | ||
| 37 | loop { | ||
| 38 | let len = random::<usize>() % 0x1000; | ||
| 39 | let buf: Vec<u8> = (written..written + len).map(|x| x as u8).collect(); | ||
| 40 | |||
| 41 | port.write_all(&buf)?; | ||
| 42 | |||
| 43 | if idles { | ||
| 44 | let micros = (random::<usize>() % 1000) as u64; | ||
| 45 | println!("Sleeping {}us", micros); | ||
| 46 | port.flush().unwrap(); | ||
| 47 | thread::sleep(Duration::from_micros(micros)); | ||
| 48 | } | ||
| 49 | |||
| 50 | written += len; | ||
| 51 | println!("Written: {}", written); | ||
| 52 | } | ||
| 53 | } | ||
