diff options
| -rwxr-xr-x | ci.sh | 5 | ||||
| -rw-r--r-- | embassy-stm32/Cargo.toml | 2 | ||||
| -rw-r--r-- | embassy-stm32/src/adc/ringbuffered_v2.rs | 9 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/dma_bdma.rs | 41 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer.rs | 668 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer/mod.rs | 305 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer/tests/mod.rs | 90 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs | 50 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs | 123 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs | 122 | ||||
| -rw-r--r-- | embassy-stm32/src/lib.rs | 3 | ||||
| -rw-r--r-- | embassy-stm32/src/sai/mod.rs | 10 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/ringbuffered.rs | 1 |
13 files changed, 732 insertions, 697 deletions
| @@ -305,11 +305,6 @@ rm out/tests/stm32f207zg/eth | |||
| 305 | # doesn't work, gives "noise error", no idea why. usart_dma does pass. | 305 | # doesn't work, gives "noise error", no idea why. usart_dma does pass. |
| 306 | rm out/tests/stm32u5a5zj/usart | 306 | rm out/tests/stm32u5a5zj/usart |
| 307 | 307 | ||
| 308 | # flaky, probably due to bad ringbuffered dma code. | ||
| 309 | rm out/tests/stm32l152re/usart_rx_ringbuffered | ||
| 310 | rm out/tests/stm32f207zg/usart_rx_ringbuffered | ||
| 311 | rm out/tests/stm32wl55jc/usart_rx_ringbuffered | ||
| 312 | |||
| 313 | if [[ -z "${TELEPROBE_TOKEN-}" ]]; then | 308 | if [[ -z "${TELEPROBE_TOKEN-}" ]]; then |
| 314 | echo No teleprobe token found, skipping running HIL tests | 309 | echo No teleprobe token found, skipping running HIL tests |
| 315 | exit | 310 | exit |
diff --git a/embassy-stm32/Cargo.toml b/embassy-stm32/Cargo.toml index 8fc8da006..53ec1b27f 100644 --- a/embassy-stm32/Cargo.toml +++ b/embassy-stm32/Cargo.toml | |||
| @@ -93,6 +93,8 @@ aligned = "0.4.1" | |||
| 93 | 93 | ||
| 94 | [dev-dependencies] | 94 | [dev-dependencies] |
| 95 | critical-section = { version = "1.1", features = ["std"] } | 95 | critical-section = { version = "1.1", features = ["std"] } |
| 96 | proptest = "1.5.0" | ||
| 97 | proptest-state-machine = "0.3.0" | ||
| 96 | 98 | ||
| 97 | [build-dependencies] | 99 | [build-dependencies] |
| 98 | proc-macro2 = "1.0.36" | 100 | proc-macro2 = "1.0.36" |
diff --git a/embassy-stm32/src/adc/ringbuffered_v2.rs b/embassy-stm32/src/adc/ringbuffered_v2.rs index 3b064044e..3f0c1a57a 100644 --- a/embassy-stm32/src/adc/ringbuffered_v2.rs +++ b/embassy-stm32/src/adc/ringbuffered_v2.rs | |||
| @@ -6,11 +6,13 @@ use embassy_hal_internal::{into_ref, Peripheral}; | |||
| 6 | use stm32_metapac::adc::vals::SampleTime; | 6 | use stm32_metapac::adc::vals::SampleTime; |
| 7 | 7 | ||
| 8 | use crate::adc::{Adc, AdcChannel, Instance, RxDma}; | 8 | use crate::adc::{Adc, AdcChannel, Instance, RxDma}; |
| 9 | use crate::dma::ringbuffer::OverrunError; | ||
| 10 | use crate::dma::{Priority, ReadableRingBuffer, TransferOptions}; | 9 | use crate::dma::{Priority, ReadableRingBuffer, TransferOptions}; |
| 11 | use crate::pac::adc::vals; | 10 | use crate::pac::adc::vals; |
| 12 | use crate::rcc; | 11 | use crate::rcc; |
| 13 | 12 | ||
| 13 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 14 | pub struct OverrunError; | ||
| 15 | |||
| 14 | fn clear_interrupt_flags(r: crate::pac::adc::Adc) { | 16 | fn clear_interrupt_flags(r: crate::pac::adc::Adc) { |
| 15 | r.sr().modify(|regs| { | 17 | r.sr().modify(|regs| { |
| 16 | regs.set_eoc(false); | 18 | regs.set_eoc(false); |
| @@ -226,9 +228,8 @@ impl<'d, T: Instance> RingBufferedAdc<'d, T> { | |||
| 226 | 228 | ||
| 227 | /// Turns on ADC if it is not already turned on and starts continuous DMA transfer. | 229 | /// Turns on ADC if it is not already turned on and starts continuous DMA transfer. |
| 228 | pub fn start(&mut self) -> Result<(), OverrunError> { | 230 | pub fn start(&mut self) -> Result<(), OverrunError> { |
| 229 | self.ring_buf.clear(); | ||
| 230 | |||
| 231 | self.setup_adc(); | 231 | self.setup_adc(); |
| 232 | self.ring_buf.clear(); | ||
| 232 | 233 | ||
| 233 | Ok(()) | 234 | Ok(()) |
| 234 | } | 235 | } |
| @@ -245,7 +246,7 @@ impl<'d, T: Instance> RingBufferedAdc<'d, T> { | |||
| 245 | /// [`start`]: #method.start | 246 | /// [`start`]: #method.start |
| 246 | pub fn teardown_adc(&mut self) { | 247 | pub fn teardown_adc(&mut self) { |
| 247 | // Stop the DMA transfer | 248 | // Stop the DMA transfer |
| 248 | self.ring_buf.request_stop(); | 249 | self.ring_buf.request_pause(); |
| 249 | 250 | ||
| 250 | let r = T::regs(); | 251 | let r = T::regs(); |
| 251 | 252 | ||
diff --git a/embassy-stm32/src/dma/dma_bdma.rs b/embassy-stm32/src/dma/dma_bdma.rs index d10b5554f..cdc603e2c 100644 --- a/embassy-stm32/src/dma/dma_bdma.rs +++ b/embassy-stm32/src/dma/dma_bdma.rs | |||
| @@ -6,7 +6,7 @@ use core::task::{Context, Poll, Waker}; | |||
| 6 | use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; | 6 | use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; |
| 7 | use embassy_sync::waitqueue::AtomicWaker; | 7 | use embassy_sync::waitqueue::AtomicWaker; |
| 8 | 8 | ||
| 9 | use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer}; | 9 | use super::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}; |
| 10 | use super::word::{Word, WordSize}; | 10 | use super::word::{Word, WordSize}; |
| 11 | use super::{AnyChannel, Channel, Dir, Request, STATE}; | 11 | use super::{AnyChannel, Channel, Dir, Request, STATE}; |
| 12 | use crate::interrupt::typelevel::Interrupt; | 12 | use crate::interrupt::typelevel::Interrupt; |
| @@ -299,7 +299,6 @@ impl AnyChannel { | |||
| 299 | } else { | 299 | } else { |
| 300 | return; | 300 | return; |
| 301 | } | 301 | } |
| 302 | |||
| 303 | state.waker.wake(); | 302 | state.waker.wake(); |
| 304 | } | 303 | } |
| 305 | #[cfg(bdma)] | 304 | #[cfg(bdma)] |
| @@ -763,10 +762,6 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { | |||
| 763 | self.0.get_remaining_transfers() as _ | 762 | self.0.get_remaining_transfers() as _ |
| 764 | } | 763 | } |
| 765 | 764 | ||
| 766 | fn get_complete_count(&self) -> usize { | ||
| 767 | STATE[self.0.id as usize].complete_count.load(Ordering::Acquire) | ||
| 768 | } | ||
| 769 | |||
| 770 | fn reset_complete_count(&mut self) -> usize { | 765 | fn reset_complete_count(&mut self) -> usize { |
| 771 | let state = &STATE[self.0.id as usize]; | 766 | let state = &STATE[self.0.id as usize]; |
| 772 | #[cfg(not(armv6m))] | 767 | #[cfg(not(armv6m))] |
| @@ -832,27 +827,28 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { | |||
| 832 | /// | 827 | /// |
| 833 | /// You must call this after creating it for it to work. | 828 | /// You must call this after creating it for it to work. |
| 834 | pub fn start(&mut self) { | 829 | pub fn start(&mut self) { |
| 835 | self.channel.start() | 830 | self.channel.start(); |
| 831 | self.clear(); | ||
| 836 | } | 832 | } |
| 837 | 833 | ||
| 838 | /// Clear all data in the ring buffer. | 834 | /// Clear all data in the ring buffer. |
| 839 | pub fn clear(&mut self) { | 835 | pub fn clear(&mut self) { |
| 840 | self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow())); | 836 | self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); |
| 841 | } | 837 | } |
| 842 | 838 | ||
| 843 | /// Read elements from the ring buffer | 839 | /// Read elements from the ring buffer |
| 844 | /// Return a tuple of the length read and the length remaining in the buffer | 840 | /// Return a tuple of the length read and the length remaining in the buffer |
| 845 | /// If not all of the elements were read, then there will be some elements in the buffer remaining | 841 | /// If not all of the elements were read, then there will be some elements in the buffer remaining |
| 846 | /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read | 842 | /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read |
| 847 | /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | 843 | /// Error is returned if the portion to be read was overwritten by the DMA controller. |
| 848 | pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> { | 844 | pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> { |
| 849 | self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf) | 845 | self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf) |
| 850 | } | 846 | } |
| 851 | 847 | ||
| 852 | /// Read an exact number of elements from the ringbuffer. | 848 | /// Read an exact number of elements from the ringbuffer. |
| 853 | /// | 849 | /// |
| 854 | /// Returns the remaining number of elements available for immediate reading. | 850 | /// Returns the remaining number of elements available for immediate reading. |
| 855 | /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | 851 | /// Error is returned if the portion to be read was overwritten by the DMA controller. |
| 856 | /// | 852 | /// |
| 857 | /// Async/Wake Behavior: | 853 | /// Async/Wake Behavior: |
| 858 | /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, | 854 | /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, |
| @@ -860,12 +856,17 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { | |||
| 860 | /// ring buffer was created with a buffer of size 'N': | 856 | /// ring buffer was created with a buffer of size 'N': |
| 861 | /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. | 857 | /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. |
| 862 | /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. | 858 | /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. |
| 863 | pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> { | 859 | pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, Error> { |
| 864 | self.ringbuf | 860 | self.ringbuf |
| 865 | .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) | 861 | .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) |
| 866 | .await | 862 | .await |
| 867 | } | 863 | } |
| 868 | 864 | ||
| 865 | /// The current length of the ringbuffer | ||
| 866 | pub fn len(&mut self) -> Result<usize, Error> { | ||
| 867 | Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) | ||
| 868 | } | ||
| 869 | |||
| 869 | /// The capacity of the ringbuffer | 870 | /// The capacity of the ringbuffer |
| 870 | pub const fn capacity(&self) -> usize { | 871 | pub const fn capacity(&self) -> usize { |
| 871 | self.ringbuf.cap() | 872 | self.ringbuf.cap() |
| @@ -979,34 +980,40 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { | |||
| 979 | /// | 980 | /// |
| 980 | /// You must call this after creating it for it to work. | 981 | /// You must call this after creating it for it to work. |
| 981 | pub fn start(&mut self) { | 982 | pub fn start(&mut self) { |
| 982 | self.channel.start() | 983 | self.channel.start(); |
| 984 | self.clear(); | ||
| 983 | } | 985 | } |
| 984 | 986 | ||
| 985 | /// Clear all data in the ring buffer. | 987 | /// Clear all data in the ring buffer. |
| 986 | pub fn clear(&mut self) { | 988 | pub fn clear(&mut self) { |
| 987 | self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow())); | 989 | self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); |
| 988 | } | 990 | } |
| 989 | 991 | ||
| 990 | /// Write elements directly to the raw buffer. | 992 | /// Write elements directly to the raw buffer. |
| 991 | /// This can be used to fill the buffer before starting the DMA transfer. | 993 | /// This can be used to fill the buffer before starting the DMA transfer. |
| 992 | #[allow(dead_code)] | 994 | #[allow(dead_code)] |
| 993 | pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { | 995 | pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { |
| 994 | self.ringbuf.write_immediate(buf) | 996 | self.ringbuf.write_immediate(buf) |
| 995 | } | 997 | } |
| 996 | 998 | ||
| 997 | /// Write elements from the ring buffer | 999 | /// Write elements from the ring buffer |
| 998 | /// Return a tuple of the length written and the length remaining in the buffer | 1000 | /// Return a tuple of the length written and the length remaining in the buffer |
| 999 | pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { | 1001 | pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { |
| 1000 | self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf) | 1002 | self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf) |
| 1001 | } | 1003 | } |
| 1002 | 1004 | ||
| 1003 | /// Write an exact number of elements to the ringbuffer. | 1005 | /// Write an exact number of elements to the ringbuffer. |
| 1004 | pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> { | 1006 | pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> { |
| 1005 | self.ringbuf | 1007 | self.ringbuf |
| 1006 | .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) | 1008 | .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) |
| 1007 | .await | 1009 | .await |
| 1008 | } | 1010 | } |
| 1009 | 1011 | ||
| 1012 | /// The current length of the ringbuffer | ||
| 1013 | pub fn len(&mut self) -> Result<usize, Error> { | ||
| 1014 | Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) | ||
| 1015 | } | ||
| 1016 | |||
| 1010 | /// The capacity of the ringbuffer | 1017 | /// The capacity of the ringbuffer |
| 1011 | pub const fn capacity(&self) -> usize { | 1018 | pub const fn capacity(&self) -> usize { |
| 1012 | self.ringbuf.cap() | 1019 | self.ringbuf.cap() |
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs deleted file mode 100644 index 23f1d67d5..000000000 --- a/embassy-stm32/src/dma/ringbuffer.rs +++ /dev/null | |||
| @@ -1,668 +0,0 @@ | |||
| 1 | #![cfg_attr(gpdma, allow(unused))] | ||
| 2 | |||
| 3 | use core::future::poll_fn; | ||
| 4 | use core::ops::Range; | ||
| 5 | use core::sync::atomic::{compiler_fence, Ordering}; | ||
| 6 | use core::task::{Poll, Waker}; | ||
| 7 | |||
| 8 | use super::word::Word; | ||
| 9 | |||
| 10 | /// A "read-only" ring-buffer to be used together with the DMA controller which | ||
| 11 | /// writes in a circular way, "uncontrolled" to the buffer. | ||
| 12 | /// | ||
| 13 | /// A snapshot of the ring buffer state can be attained by setting the `ndtr` field | ||
| 14 | /// to the current register value. `ndtr` describes the current position of the DMA | ||
| 15 | /// write. | ||
| 16 | /// | ||
| 17 | /// # Buffer layout | ||
| 18 | /// | ||
| 19 | /// ```text | ||
| 20 | /// Without wraparound: With wraparound: | ||
| 21 | /// | ||
| 22 | /// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+ | ||
| 23 | /// | | | | | | | ||
| 24 | /// v v v v v v | ||
| 25 | /// +-----------------------------------------+ +-----------------------------------------+ | ||
| 26 | /// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX| | ||
| 27 | /// +-----------------------------------------+ +-----------------------------------------+ | ||
| 28 | /// ^ ^ ^ ^ ^ ^ | ||
| 29 | /// | | | | | | | ||
| 30 | /// +- start --+ | +- end ------+ | | ||
| 31 | /// | | | | | ||
| 32 | /// +- end --------------------+ +- start ----------------+ | ||
| 33 | /// ``` | ||
| 34 | pub struct ReadableDmaRingBuffer<'a, W: Word> { | ||
| 35 | pub(crate) dma_buf: &'a mut [W], | ||
| 36 | start: usize, | ||
| 37 | } | ||
| 38 | |||
| 39 | #[derive(Debug, PartialEq)] | ||
| 40 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 41 | pub struct OverrunError; | ||
| 42 | |||
| 43 | pub trait DmaCtrl { | ||
| 44 | /// Get the NDTR register value, i.e. the space left in the underlying | ||
| 45 | /// buffer until the dma writer wraps. | ||
| 46 | fn get_remaining_transfers(&self) -> usize; | ||
| 47 | |||
| 48 | /// Get the transfer completed counter. | ||
| 49 | /// This counter is incremented by the dma controller when NDTR is reloaded, | ||
| 50 | /// i.e. when the writing wraps. | ||
| 51 | fn get_complete_count(&self) -> usize; | ||
| 52 | |||
| 53 | /// Reset the transfer completed counter to 0 and return the value just prior to the reset. | ||
| 54 | fn reset_complete_count(&mut self) -> usize; | ||
| 55 | |||
| 56 | /// Set the waker for a running poll_fn | ||
| 57 | fn set_waker(&mut self, waker: &Waker); | ||
| 58 | } | ||
| 59 | |||
| 60 | impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> { | ||
| 61 | pub fn new(dma_buf: &'a mut [W]) -> Self { | ||
| 62 | Self { dma_buf, start: 0 } | ||
| 63 | } | ||
| 64 | |||
| 65 | /// Reset the ring buffer to its initial state | ||
| 66 | pub fn clear(&mut self, dma: &mut impl DmaCtrl) { | ||
| 67 | self.start = 0; | ||
| 68 | dma.reset_complete_count(); | ||
| 69 | } | ||
| 70 | |||
| 71 | /// The capacity of the ringbuffer | ||
| 72 | pub const fn cap(&self) -> usize { | ||
| 73 | self.dma_buf.len() | ||
| 74 | } | ||
| 75 | |||
| 76 | /// The current position of the ringbuffer | ||
| 77 | fn pos(&self, dma: &mut impl DmaCtrl) -> usize { | ||
| 78 | self.cap() - dma.get_remaining_transfers() | ||
| 79 | } | ||
| 80 | |||
| 81 | /// Read an exact number of elements from the ringbuffer. | ||
| 82 | /// | ||
| 83 | /// Returns the remaining number of elements available for immediate reading. | ||
| 84 | /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||
| 85 | /// | ||
| 86 | /// Async/Wake Behavior: | ||
| 87 | /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, | ||
| 88 | /// and when it wraps around. This means that when called with a buffer of length 'M', when this | ||
| 89 | /// ring buffer was created with a buffer of size 'N': | ||
| 90 | /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. | ||
| 91 | /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. | ||
| 92 | pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, OverrunError> { | ||
| 93 | let mut read_data = 0; | ||
| 94 | let buffer_len = buffer.len(); | ||
| 95 | |||
| 96 | poll_fn(|cx| { | ||
| 97 | dma.set_waker(cx.waker()); | ||
| 98 | |||
| 99 | compiler_fence(Ordering::SeqCst); | ||
| 100 | |||
| 101 | match self.read(dma, &mut buffer[read_data..buffer_len]) { | ||
| 102 | Ok((len, remaining)) => { | ||
| 103 | read_data += len; | ||
| 104 | if read_data == buffer_len { | ||
| 105 | Poll::Ready(Ok(remaining)) | ||
| 106 | } else { | ||
| 107 | Poll::Pending | ||
| 108 | } | ||
| 109 | } | ||
| 110 | Err(e) => Poll::Ready(Err(e)), | ||
| 111 | } | ||
| 112 | }) | ||
| 113 | .await | ||
| 114 | } | ||
| 115 | |||
| 116 | /// Read elements from the ring buffer | ||
| 117 | /// Return a tuple of the length read and the length remaining in the buffer | ||
| 118 | /// If not all of the elements were read, then there will be some elements in the buffer remaining | ||
| 119 | /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read | ||
| 120 | /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||
| 121 | pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> { | ||
| 122 | /* | ||
| 123 | This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check | ||
| 124 | after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed | ||
| 125 | to fire in the same clock cycle that a register is read, so checking get_complete_count early does | ||
| 126 | not yield relevant information. | ||
| 127 | |||
| 128 | Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full | ||
| 129 | buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error | ||
| 130 | conditions. | ||
| 131 | |||
| 132 | After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that | ||
| 133 | the dma has not overrun within the data we could have copied. We check the data we could have copied | ||
| 134 | rather than the data we actually copied because it costs nothing and confirms an error condition | ||
| 135 | earlier. | ||
| 136 | */ | ||
| 137 | let end = self.pos(dma); | ||
| 138 | if self.start == end && dma.get_complete_count() == 0 { | ||
| 139 | // No elements are available in the buffer | ||
| 140 | Ok((0, self.cap())) | ||
| 141 | } else if self.start < end { | ||
| 142 | // The available, unread portion in the ring buffer DOES NOT wrap | ||
| 143 | // Copy out the elements from the dma buffer | ||
| 144 | let len = self.copy_to(buf, self.start..end); | ||
| 145 | |||
| 146 | compiler_fence(Ordering::SeqCst); | ||
| 147 | |||
| 148 | /* | ||
| 149 | first, check if the dma has wrapped at all if it's after end | ||
| 150 | or more than once if it's before start | ||
| 151 | |||
| 152 | this is in a critical section to try to reduce mushy behavior. | ||
| 153 | it's not ideal but it's the best we can do | ||
| 154 | |||
| 155 | then, get the current position of of the dma write and check | ||
| 156 | if it's inside data we could have copied | ||
| 157 | */ | ||
| 158 | let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count())); | ||
| 159 | if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 { | ||
| 160 | Err(OverrunError) | ||
| 161 | } else { | ||
| 162 | self.start = (self.start + len) % self.cap(); | ||
| 163 | |||
| 164 | Ok((len, self.cap() - self.start)) | ||
| 165 | } | ||
| 166 | } else if self.start + buf.len() < self.cap() { | ||
| 167 | // The available, unread portion in the ring buffer DOES wrap | ||
| 168 | // The DMA writer has wrapped since we last read and is currently | ||
| 169 | // writing (or the next byte added will be) in the beginning of the ring buffer. | ||
| 170 | |||
| 171 | // The provided read buffer is not large enough to include all elements from the tail of the dma buffer. | ||
| 172 | |||
| 173 | // Copy out from the dma buffer | ||
| 174 | let len = self.copy_to(buf, self.start..self.cap()); | ||
| 175 | |||
| 176 | compiler_fence(Ordering::SeqCst); | ||
| 177 | |||
| 178 | /* | ||
| 179 | first, check if the dma has wrapped around more than once | ||
| 180 | |||
| 181 | then, get the current position of of the dma write and check | ||
| 182 | if it's inside data we could have copied | ||
| 183 | */ | ||
| 184 | let pos = self.pos(dma); | ||
| 185 | if pos > self.start || pos < end || dma.get_complete_count() > 1 { | ||
| 186 | Err(OverrunError) | ||
| 187 | } else { | ||
| 188 | self.start = (self.start + len) % self.cap(); | ||
| 189 | |||
| 190 | Ok((len, self.start + end)) | ||
| 191 | } | ||
| 192 | } else { | ||
| 193 | // The available, unread portion in the ring buffer DOES wrap | ||
| 194 | // The DMA writer has wrapped since we last read and is currently | ||
| 195 | // writing (or the next byte added will be) in the beginning of the ring buffer. | ||
| 196 | |||
| 197 | // The provided read buffer is large enough to include all elements from the tail of the dma buffer, | ||
| 198 | // so the next read will not have any unread tail elements in the ring buffer. | ||
| 199 | |||
| 200 | // Copy out from the dma buffer | ||
| 201 | let tail = self.copy_to(buf, self.start..self.cap()); | ||
| 202 | let head = self.copy_to(&mut buf[tail..], 0..end); | ||
| 203 | |||
| 204 | compiler_fence(Ordering::SeqCst); | ||
| 205 | |||
| 206 | /* | ||
| 207 | first, check if the dma has wrapped around more than once | ||
| 208 | |||
| 209 | then, get the current position of of the dma write and check | ||
| 210 | if it's inside data we could have copied | ||
| 211 | */ | ||
| 212 | let pos = self.pos(dma); | ||
| 213 | if pos > self.start || pos < end || dma.reset_complete_count() > 1 { | ||
| 214 | Err(OverrunError) | ||
| 215 | } else { | ||
| 216 | self.start = head; | ||
| 217 | Ok((tail + head, self.cap() - self.start)) | ||
| 218 | } | ||
| 219 | } | ||
| 220 | } | ||
| 221 | /// Copy from the dma buffer at `data_range` into `buf` | ||
| 222 | fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize { | ||
| 223 | // Limit the number of elements that can be copied | ||
| 224 | let length = usize::min(data_range.len(), buf.len()); | ||
| 225 | |||
| 226 | // Copy from dma buffer into read buffer | ||
| 227 | // We need to do it like this instead of a simple copy_from_slice() because | ||
| 228 | // reading from a part of memory that may be simultaneously written to is unsafe | ||
| 229 | unsafe { | ||
| 230 | let dma_buf = self.dma_buf.as_ptr(); | ||
| 231 | |||
| 232 | for i in 0..length { | ||
| 233 | buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize)); | ||
| 234 | } | ||
| 235 | } | ||
| 236 | |||
| 237 | length | ||
| 238 | } | ||
| 239 | } | ||
| 240 | |||
| 241 | pub struct WritableDmaRingBuffer<'a, W: Word> { | ||
| 242 | pub(crate) dma_buf: &'a mut [W], | ||
| 243 | end: usize, | ||
| 244 | } | ||
| 245 | |||
| 246 | impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { | ||
| 247 | pub fn new(dma_buf: &'a mut [W]) -> Self { | ||
| 248 | Self { dma_buf, end: 0 } | ||
| 249 | } | ||
| 250 | |||
| 251 | /// Reset the ring buffer to its initial state | ||
| 252 | pub fn clear(&mut self, dma: &mut impl DmaCtrl) { | ||
| 253 | self.end = 0; | ||
| 254 | dma.reset_complete_count(); | ||
| 255 | } | ||
| 256 | |||
| 257 | /// The capacity of the ringbuffer | ||
| 258 | pub const fn cap(&self) -> usize { | ||
| 259 | self.dma_buf.len() | ||
| 260 | } | ||
| 261 | |||
| 262 | /// The current position of the ringbuffer | ||
| 263 | fn pos(&self, dma: &mut impl DmaCtrl) -> usize { | ||
| 264 | self.cap() - dma.get_remaining_transfers() | ||
| 265 | } | ||
| 266 | |||
| 267 | /// Write elements directly to the buffer. This must be done before the DMA is started | ||
| 268 | /// or after the buffer has been cleared using `clear()`. | ||
| 269 | pub fn write_immediate(&mut self, buffer: &[W]) -> Result<(usize, usize), OverrunError> { | ||
| 270 | if self.end != 0 { | ||
| 271 | return Err(OverrunError); | ||
| 272 | } | ||
| 273 | let written = self.copy_from(buffer, 0..self.cap()); | ||
| 274 | self.end = written % self.cap(); | ||
| 275 | Ok((written, self.cap() - written)) | ||
| 276 | } | ||
| 277 | |||
| 278 | /// Write an exact number of elements to the ringbuffer. | ||
| 279 | pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, OverrunError> { | ||
| 280 | let mut written_data = 0; | ||
| 281 | let buffer_len = buffer.len(); | ||
| 282 | |||
| 283 | poll_fn(|cx| { | ||
| 284 | dma.set_waker(cx.waker()); | ||
| 285 | |||
| 286 | compiler_fence(Ordering::SeqCst); | ||
| 287 | |||
| 288 | match self.write(dma, &buffer[written_data..buffer_len]) { | ||
| 289 | Ok((len, remaining)) => { | ||
| 290 | written_data += len; | ||
| 291 | if written_data == buffer_len { | ||
| 292 | Poll::Ready(Ok(remaining)) | ||
| 293 | } else { | ||
| 294 | Poll::Pending | ||
| 295 | } | ||
| 296 | } | ||
| 297 | Err(e) => Poll::Ready(Err(e)), | ||
| 298 | } | ||
| 299 | }) | ||
| 300 | .await | ||
| 301 | } | ||
| 302 | |||
| 303 | /// Write elements from the ring buffer | ||
| 304 | /// Return a tuple of the length written and the capacity remaining to be written in the buffer | ||
| 305 | pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> { | ||
| 306 | let start = self.pos(dma); | ||
| 307 | if start > self.end { | ||
| 308 | // The occupied portion in the ring buffer DOES wrap | ||
| 309 | let len = self.copy_from(buf, self.end..start); | ||
| 310 | |||
| 311 | compiler_fence(Ordering::SeqCst); | ||
| 312 | |||
| 313 | // Confirm that the DMA is not inside data we could have written | ||
| 314 | let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count())); | ||
| 315 | if (pos >= self.end && pos < start) || (complete_count > 0 && pos >= start) || complete_count > 1 { | ||
| 316 | Err(OverrunError) | ||
| 317 | } else { | ||
| 318 | self.end = (self.end + len) % self.cap(); | ||
| 319 | |||
| 320 | Ok((len, self.cap() - (start - self.end))) | ||
| 321 | } | ||
| 322 | } else if start == self.end && dma.get_complete_count() == 0 { | ||
| 323 | Ok((0, 0)) | ||
| 324 | } else if start <= self.end && self.end + buf.len() < self.cap() { | ||
| 325 | // The occupied portion in the ring buffer DOES NOT wrap | ||
| 326 | // and copying elements into the buffer WILL NOT cause it to | ||
| 327 | |||
| 328 | // Copy into the dma buffer | ||
| 329 | let len = self.copy_from(buf, self.end..self.cap()); | ||
| 330 | |||
| 331 | compiler_fence(Ordering::SeqCst); | ||
| 332 | |||
| 333 | // Confirm that the DMA is not inside data we could have written | ||
| 334 | let pos = self.pos(dma); | ||
| 335 | if pos > self.end || pos < start || dma.get_complete_count() > 1 { | ||
| 336 | Err(OverrunError) | ||
| 337 | } else { | ||
| 338 | self.end = (self.end + len) % self.cap(); | ||
| 339 | |||
| 340 | Ok((len, self.cap() - (self.end - start))) | ||
| 341 | } | ||
| 342 | } else { | ||
| 343 | // The occupied portion in the ring buffer DOES NOT wrap | ||
| 344 | // and copying elements into the buffer WILL cause it to | ||
| 345 | |||
| 346 | let tail = self.copy_from(buf, self.end..self.cap()); | ||
| 347 | let head = self.copy_from(&buf[tail..], 0..start); | ||
| 348 | |||
| 349 | compiler_fence(Ordering::SeqCst); | ||
| 350 | |||
| 351 | // Confirm that the DMA is not inside data we could have written | ||
| 352 | let pos = self.pos(dma); | ||
| 353 | if pos > self.end || pos < start || dma.reset_complete_count() > 1 { | ||
| 354 | Err(OverrunError) | ||
| 355 | } else { | ||
| 356 | self.end = head; | ||
| 357 | |||
| 358 | Ok((tail + head, self.cap() - (start - self.end))) | ||
| 359 | } | ||
| 360 | } | ||
| 361 | } | ||
| 362 | /// Copy into the dma buffer at `data_range` from `buf` | ||
| 363 | fn copy_from(&mut self, buf: &[W], data_range: Range<usize>) -> usize { | ||
| 364 | // Limit the number of elements that can be copied | ||
| 365 | let length = usize::min(data_range.len(), buf.len()); | ||
| 366 | |||
| 367 | // Copy into dma buffer from read buffer | ||
| 368 | // We need to do it like this instead of a simple copy_from_slice() because | ||
| 369 | // reading from a part of memory that may be simultaneously written to is unsafe | ||
| 370 | unsafe { | ||
| 371 | let dma_buf = self.dma_buf.as_mut_ptr(); | ||
| 372 | |||
| 373 | for i in 0..length { | ||
| 374 | core::ptr::write_volatile(dma_buf.offset((data_range.start + i) as isize), buf[i]); | ||
| 375 | } | ||
| 376 | } | ||
| 377 | |||
| 378 | length | ||
| 379 | } | ||
| 380 | } | ||
| 381 | #[cfg(test)] | ||
| 382 | mod tests { | ||
| 383 | use core::array; | ||
| 384 | use std::{cell, vec}; | ||
| 385 | |||
| 386 | use super::*; | ||
| 387 | |||
| 388 | #[allow(dead_code)] | ||
| 389 | #[derive(PartialEq, Debug)] | ||
| 390 | enum TestCircularTransferRequest { | ||
| 391 | GetCompleteCount(usize), | ||
| 392 | ResetCompleteCount(usize), | ||
| 393 | PositionRequest(usize), | ||
| 394 | } | ||
| 395 | |||
| 396 | struct TestCircularTransfer { | ||
| 397 | len: usize, | ||
| 398 | requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>, | ||
| 399 | } | ||
| 400 | |||
| 401 | impl DmaCtrl for TestCircularTransfer { | ||
| 402 | fn get_remaining_transfers(&self) -> usize { | ||
| 403 | match self.requests.borrow_mut().pop().unwrap() { | ||
| 404 | TestCircularTransferRequest::PositionRequest(pos) => { | ||
| 405 | let len = self.len; | ||
| 406 | |||
| 407 | assert!(len >= pos); | ||
| 408 | |||
| 409 | len - pos | ||
| 410 | } | ||
| 411 | _ => unreachable!(), | ||
| 412 | } | ||
| 413 | } | ||
| 414 | |||
| 415 | fn get_complete_count(&self) -> usize { | ||
| 416 | match self.requests.borrow_mut().pop().unwrap() { | ||
| 417 | TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count, | ||
| 418 | _ => unreachable!(), | ||
| 419 | } | ||
| 420 | } | ||
| 421 | |||
| 422 | fn reset_complete_count(&mut self) -> usize { | ||
| 423 | match self.requests.get_mut().pop().unwrap() { | ||
| 424 | TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count, | ||
| 425 | _ => unreachable!(), | ||
| 426 | } | ||
| 427 | } | ||
| 428 | |||
| 429 | fn set_waker(&mut self, waker: &Waker) {} | ||
| 430 | } | ||
| 431 | |||
| 432 | impl TestCircularTransfer { | ||
| 433 | pub fn new(len: usize) -> Self { | ||
| 434 | Self { | ||
| 435 | requests: cell::RefCell::new(vec![]), | ||
| 436 | len, | ||
| 437 | } | ||
| 438 | } | ||
| 439 | |||
| 440 | pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) { | ||
| 441 | requests.reverse(); | ||
| 442 | self.requests.replace(requests); | ||
| 443 | } | ||
| 444 | } | ||
| 445 | |||
| 446 | #[test] | ||
| 447 | fn empty_and_read_not_started() { | ||
| 448 | let mut dma_buf = [0u8; 16]; | ||
| 449 | let ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||
| 450 | |||
| 451 | assert_eq!(0, ringbuf.start); | ||
| 452 | } | ||
| 453 | |||
| 454 | #[test] | ||
| 455 | fn can_read() { | ||
| 456 | let mut dma = TestCircularTransfer::new(16); | ||
| 457 | |||
| 458 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 459 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||
| 460 | |||
| 461 | assert_eq!(0, ringbuf.start); | ||
| 462 | assert_eq!(16, ringbuf.cap()); | ||
| 463 | |||
| 464 | dma.setup(vec![ | ||
| 465 | TestCircularTransferRequest::PositionRequest(8), | ||
| 466 | TestCircularTransferRequest::PositionRequest(10), | ||
| 467 | TestCircularTransferRequest::GetCompleteCount(0), | ||
| 468 | ]); | ||
| 469 | let mut buf = [0; 2]; | ||
| 470 | assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 471 | assert_eq!([0, 1], buf); | ||
| 472 | assert_eq!(2, ringbuf.start); | ||
| 473 | |||
| 474 | dma.setup(vec![ | ||
| 475 | TestCircularTransferRequest::PositionRequest(10), | ||
| 476 | TestCircularTransferRequest::PositionRequest(12), | ||
| 477 | TestCircularTransferRequest::GetCompleteCount(0), | ||
| 478 | ]); | ||
| 479 | let mut buf = [0; 2]; | ||
| 480 | assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 481 | assert_eq!([2, 3], buf); | ||
| 482 | assert_eq!(4, ringbuf.start); | ||
| 483 | |||
| 484 | dma.setup(vec![ | ||
| 485 | TestCircularTransferRequest::PositionRequest(12), | ||
| 486 | TestCircularTransferRequest::PositionRequest(14), | ||
| 487 | TestCircularTransferRequest::GetCompleteCount(0), | ||
| 488 | ]); | ||
| 489 | let mut buf = [0; 8]; | ||
| 490 | assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 491 | assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]); | ||
| 492 | assert_eq!(12, ringbuf.start); | ||
| 493 | } | ||
| 494 | |||
| 495 | #[test] | ||
| 496 | fn can_read_with_wrap() { | ||
| 497 | let mut dma = TestCircularTransfer::new(16); | ||
| 498 | |||
| 499 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 500 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||
| 501 | |||
| 502 | assert_eq!(0, ringbuf.start); | ||
| 503 | assert_eq!(16, ringbuf.cap()); | ||
| 504 | |||
| 505 | /* | ||
| 506 | Read to close to the end of the buffer | ||
| 507 | */ | ||
| 508 | dma.setup(vec![ | ||
| 509 | TestCircularTransferRequest::PositionRequest(14), | ||
| 510 | TestCircularTransferRequest::PositionRequest(16), | ||
| 511 | TestCircularTransferRequest::GetCompleteCount(0), | ||
| 512 | ]); | ||
| 513 | let mut buf = [0; 14]; | ||
| 514 | assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 515 | assert_eq!(14, ringbuf.start); | ||
| 516 | |||
| 517 | /* | ||
| 518 | Now, read around the buffer | ||
| 519 | */ | ||
| 520 | dma.setup(vec![ | ||
| 521 | TestCircularTransferRequest::PositionRequest(6), | ||
| 522 | TestCircularTransferRequest::PositionRequest(8), | ||
| 523 | TestCircularTransferRequest::ResetCompleteCount(1), | ||
| 524 | ]); | ||
| 525 | let mut buf = [0; 6]; | ||
| 526 | assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 527 | assert_eq!(4, ringbuf.start); | ||
| 528 | } | ||
| 529 | |||
| 530 | #[test] | ||
| 531 | fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() { | ||
| 532 | let mut dma = TestCircularTransfer::new(16); | ||
| 533 | |||
| 534 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 535 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||
| 536 | |||
| 537 | assert_eq!(0, ringbuf.start); | ||
| 538 | assert_eq!(16, ringbuf.cap()); | ||
| 539 | |||
| 540 | /* | ||
| 541 | Read to close to the end of the buffer | ||
| 542 | */ | ||
| 543 | dma.setup(vec![ | ||
| 544 | TestCircularTransferRequest::PositionRequest(14), | ||
| 545 | TestCircularTransferRequest::PositionRequest(16), | ||
| 546 | TestCircularTransferRequest::GetCompleteCount(0), | ||
| 547 | ]); | ||
| 548 | let mut buf = [0; 14]; | ||
| 549 | assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 550 | assert_eq!(14, ringbuf.start); | ||
| 551 | |||
| 552 | /* | ||
| 553 | Now, read to the end of the buffer | ||
| 554 | */ | ||
| 555 | dma.setup(vec![ | ||
| 556 | TestCircularTransferRequest::PositionRequest(6), | ||
| 557 | TestCircularTransferRequest::PositionRequest(8), | ||
| 558 | TestCircularTransferRequest::ResetCompleteCount(1), | ||
| 559 | ]); | ||
| 560 | let mut buf = [0; 2]; | ||
| 561 | assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 562 | assert_eq!(0, ringbuf.start); | ||
| 563 | } | ||
| 564 | |||
| 565 | #[test] | ||
| 566 | fn can_read_when_dma_writer_wraps_once_with_same_ndtr() { | ||
| 567 | let mut dma = TestCircularTransfer::new(16); | ||
| 568 | |||
| 569 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 570 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||
| 571 | |||
| 572 | assert_eq!(0, ringbuf.start); | ||
| 573 | assert_eq!(16, ringbuf.cap()); | ||
| 574 | |||
| 575 | /* | ||
| 576 | Read to about the middle of the buffer | ||
| 577 | */ | ||
| 578 | dma.setup(vec![ | ||
| 579 | TestCircularTransferRequest::PositionRequest(6), | ||
| 580 | TestCircularTransferRequest::PositionRequest(6), | ||
| 581 | TestCircularTransferRequest::GetCompleteCount(0), | ||
| 582 | ]); | ||
| 583 | let mut buf = [0; 6]; | ||
| 584 | assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 585 | assert_eq!(6, ringbuf.start); | ||
| 586 | |||
| 587 | /* | ||
| 588 | Now, wrap the DMA controller around | ||
| 589 | */ | ||
| 590 | dma.setup(vec![ | ||
| 591 | TestCircularTransferRequest::PositionRequest(6), | ||
| 592 | TestCircularTransferRequest::GetCompleteCount(1), | ||
| 593 | TestCircularTransferRequest::PositionRequest(6), | ||
| 594 | TestCircularTransferRequest::GetCompleteCount(1), | ||
| 595 | ]); | ||
| 596 | let mut buf = [0; 6]; | ||
| 597 | assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 598 | assert_eq!(12, ringbuf.start); | ||
| 599 | } | ||
| 600 | |||
| 601 | #[test] | ||
| 602 | fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() { | ||
| 603 | let mut dma = TestCircularTransfer::new(16); | ||
| 604 | |||
| 605 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 606 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||
| 607 | |||
| 608 | assert_eq!(0, ringbuf.start); | ||
| 609 | assert_eq!(16, ringbuf.cap()); | ||
| 610 | |||
| 611 | /* | ||
| 612 | Read a few bytes | ||
| 613 | */ | ||
| 614 | dma.setup(vec![ | ||
| 615 | TestCircularTransferRequest::PositionRequest(2), | ||
| 616 | TestCircularTransferRequest::PositionRequest(2), | ||
| 617 | TestCircularTransferRequest::GetCompleteCount(0), | ||
| 618 | ]); | ||
| 619 | let mut buf = [0; 6]; | ||
| 620 | assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 621 | assert_eq!(2, ringbuf.start); | ||
| 622 | |||
| 623 | /* | ||
| 624 | Now, overtake the reader | ||
| 625 | */ | ||
| 626 | dma.setup(vec![ | ||
| 627 | TestCircularTransferRequest::PositionRequest(4), | ||
| 628 | TestCircularTransferRequest::PositionRequest(6), | ||
| 629 | TestCircularTransferRequest::GetCompleteCount(1), | ||
| 630 | ]); | ||
| 631 | let mut buf = [0; 6]; | ||
| 632 | assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err()); | ||
| 633 | } | ||
| 634 | |||
| 635 | #[test] | ||
| 636 | fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() { | ||
| 637 | let mut dma = TestCircularTransfer::new(16); | ||
| 638 | |||
| 639 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||
| 640 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||
| 641 | |||
| 642 | assert_eq!(0, ringbuf.start); | ||
| 643 | assert_eq!(16, ringbuf.cap()); | ||
| 644 | |||
| 645 | /* | ||
| 646 | Read to close to the end of the buffer | ||
| 647 | */ | ||
| 648 | dma.setup(vec![ | ||
| 649 | TestCircularTransferRequest::PositionRequest(14), | ||
| 650 | TestCircularTransferRequest::PositionRequest(16), | ||
| 651 | TestCircularTransferRequest::GetCompleteCount(0), | ||
| 652 | ]); | ||
| 653 | let mut buf = [0; 14]; | ||
| 654 | assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||
| 655 | assert_eq!(14, ringbuf.start); | ||
| 656 | |||
| 657 | /* | ||
| 658 | Now, overtake the reader | ||
| 659 | */ | ||
| 660 | dma.setup(vec![ | ||
| 661 | TestCircularTransferRequest::PositionRequest(8), | ||
| 662 | TestCircularTransferRequest::PositionRequest(10), | ||
| 663 | TestCircularTransferRequest::ResetCompleteCount(2), | ||
| 664 | ]); | ||
| 665 | let mut buf = [0; 6]; | ||
| 666 | assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err()); | ||
| 667 | } | ||
| 668 | } | ||
diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs new file mode 100644 index 000000000..12d418414 --- /dev/null +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs | |||
| @@ -0,0 +1,305 @@ | |||
| 1 | #![cfg_attr(gpdma, allow(unused))] | ||
| 2 | |||
| 3 | use core::future::poll_fn; | ||
| 4 | use core::task::{Poll, Waker}; | ||
| 5 | |||
| 6 | use crate::dma::word::Word; | ||
| 7 | |||
| 8 | pub trait DmaCtrl { | ||
| 9 | /// Get the NDTR register value, i.e. the space left in the underlying | ||
| 10 | /// buffer until the dma writer wraps. | ||
| 11 | fn get_remaining_transfers(&self) -> usize; | ||
| 12 | |||
| 13 | /// Reset the transfer completed counter to 0 and return the value just prior to the reset. | ||
| 14 | fn reset_complete_count(&mut self) -> usize; | ||
| 15 | |||
| 16 | /// Set the waker for a running poll_fn | ||
| 17 | fn set_waker(&mut self, waker: &Waker); | ||
| 18 | } | ||
| 19 | |||
| 20 | #[derive(Debug, PartialEq)] | ||
| 21 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 22 | pub enum Error { | ||
| 23 | Overrun, | ||
| 24 | DmaUnsynced, | ||
| 25 | } | ||
| 26 | |||
| 27 | #[derive(Debug, Clone, Copy, Default)] | ||
| 28 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 29 | struct DmaIndex { | ||
| 30 | complete_count: usize, | ||
| 31 | pos: usize, | ||
| 32 | } | ||
| 33 | |||
| 34 | impl DmaIndex { | ||
| 35 | fn reset(&mut self) { | ||
| 36 | self.pos = 0; | ||
| 37 | self.complete_count = 0; | ||
| 38 | } | ||
| 39 | |||
| 40 | fn as_index(&self, cap: usize, offset: usize) -> usize { | ||
| 41 | (self.pos + offset) % cap | ||
| 42 | } | ||
| 43 | |||
| 44 | fn dma_sync(&mut self, cap: usize, dma: &mut impl DmaCtrl) { | ||
| 45 | // Important! | ||
| 46 | // The ordering of the first two lines matters! | ||
| 47 | // If changed, the code will detect a wrong +capacity | ||
| 48 | // jump at wrap-around. | ||
| 49 | let count_diff = dma.reset_complete_count(); | ||
| 50 | let pos = cap - dma.get_remaining_transfers(); | ||
| 51 | self.pos = if pos < self.pos && count_diff == 0 { | ||
| 52 | cap - 1 | ||
| 53 | } else { | ||
| 54 | pos | ||
| 55 | }; | ||
| 56 | |||
| 57 | self.complete_count += count_diff; | ||
| 58 | } | ||
| 59 | |||
| 60 | fn advance(&mut self, cap: usize, steps: usize) { | ||
| 61 | let next = self.pos + steps; | ||
| 62 | self.complete_count += next / cap; | ||
| 63 | self.pos = next % cap; | ||
| 64 | } | ||
| 65 | |||
| 66 | fn normalize(lhs: &mut DmaIndex, rhs: &mut DmaIndex) { | ||
| 67 | let min_count = lhs.complete_count.min(rhs.complete_count); | ||
| 68 | lhs.complete_count -= min_count; | ||
| 69 | rhs.complete_count -= min_count; | ||
| 70 | } | ||
| 71 | |||
| 72 | fn diff(&self, cap: usize, rhs: &DmaIndex) -> isize { | ||
| 73 | (self.complete_count * cap + self.pos) as isize - (rhs.complete_count * cap + rhs.pos) as isize | ||
| 74 | } | ||
| 75 | } | ||
| 76 | |||
| 77 | pub struct ReadableDmaRingBuffer<'a, W: Word> { | ||
| 78 | dma_buf: &'a mut [W], | ||
| 79 | write_index: DmaIndex, | ||
| 80 | read_index: DmaIndex, | ||
| 81 | } | ||
| 82 | |||
| 83 | impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> { | ||
| 84 | /// Construct an empty buffer. | ||
| 85 | pub fn new(dma_buf: &'a mut [W]) -> Self { | ||
| 86 | Self { | ||
| 87 | dma_buf, | ||
| 88 | write_index: Default::default(), | ||
| 89 | read_index: Default::default(), | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | /// Reset the ring buffer to its initial state. | ||
| 94 | pub fn reset(&mut self, dma: &mut impl DmaCtrl) { | ||
| 95 | dma.reset_complete_count(); | ||
| 96 | self.write_index.reset(); | ||
| 97 | self.write_index.dma_sync(self.cap(), dma); | ||
| 98 | self.read_index = self.write_index; | ||
| 99 | } | ||
| 100 | |||
| 101 | /// Get the full ringbuffer capacity. | ||
| 102 | pub const fn cap(&self) -> usize { | ||
| 103 | self.dma_buf.len() | ||
| 104 | } | ||
| 105 | |||
| 106 | /// Get the available readable dma samples. | ||
| 107 | pub fn len(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, Error> { | ||
| 108 | self.write_index.dma_sync(self.cap(), dma); | ||
| 109 | DmaIndex::normalize(&mut self.write_index, &mut self.read_index); | ||
| 110 | |||
| 111 | let diff = self.write_index.diff(self.cap(), &self.read_index); | ||
| 112 | |||
| 113 | if diff < 0 { | ||
| 114 | Err(Error::DmaUnsynced) | ||
| 115 | } else if diff > self.cap() as isize { | ||
| 116 | Err(Error::Overrun) | ||
| 117 | } else { | ||
| 118 | Ok(diff as usize) | ||
| 119 | } | ||
| 120 | } | ||
| 121 | |||
| 122 | /// Read elements from the ring buffer. | ||
| 123 | /// | ||
| 124 | /// Return a tuple of the length read and the length remaining in the buffer | ||
| 125 | /// If not all of the elements were read, then there will be some elements in the buffer remaining | ||
| 126 | /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read | ||
| 127 | /// Error is returned if the portion to be read was overwritten by the DMA controller, | ||
| 128 | /// in which case the rinbuffer will automatically reset itself. | ||
| 129 | pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), Error> { | ||
| 130 | self.read_raw(dma, buf).inspect_err(|_e| { | ||
| 131 | self.reset(dma); | ||
| 132 | }) | ||
| 133 | } | ||
| 134 | |||
| 135 | /// Read an exact number of elements from the ringbuffer. | ||
| 136 | /// | ||
| 137 | /// Returns the remaining number of elements available for immediate reading. | ||
| 138 | /// Error is returned if the portion to be read was overwritten by the DMA controller. | ||
| 139 | /// | ||
| 140 | /// Async/Wake Behavior: | ||
| 141 | /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, | ||
| 142 | /// and when it wraps around. This means that when called with a buffer of length 'M', when this | ||
| 143 | /// ring buffer was created with a buffer of size 'N': | ||
| 144 | /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. | ||
| 145 | /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. | ||
| 146 | pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, Error> { | ||
| 147 | let mut read_data = 0; | ||
| 148 | let buffer_len = buffer.len(); | ||
| 149 | |||
| 150 | poll_fn(|cx| { | ||
| 151 | dma.set_waker(cx.waker()); | ||
| 152 | |||
| 153 | match self.read(dma, &mut buffer[read_data..buffer_len]) { | ||
| 154 | Ok((len, remaining)) => { | ||
| 155 | read_data += len; | ||
| 156 | if read_data == buffer_len { | ||
| 157 | Poll::Ready(Ok(remaining)) | ||
| 158 | } else { | ||
| 159 | Poll::Pending | ||
| 160 | } | ||
| 161 | } | ||
| 162 | Err(e) => Poll::Ready(Err(e)), | ||
| 163 | } | ||
| 164 | }) | ||
| 165 | .await | ||
| 166 | } | ||
| 167 | |||
| 168 | fn read_raw(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), Error> { | ||
| 169 | let readable = self.len(dma)?.min(buf.len()); | ||
| 170 | for i in 0..readable { | ||
| 171 | buf[i] = self.read_buf(i); | ||
| 172 | } | ||
| 173 | let available = self.len(dma)?; | ||
| 174 | self.read_index.advance(self.cap(), readable); | ||
| 175 | Ok((readable, available - readable)) | ||
| 176 | } | ||
| 177 | |||
| 178 | fn read_buf(&self, offset: usize) -> W { | ||
| 179 | unsafe { | ||
| 180 | core::ptr::read_volatile( | ||
| 181 | self.dma_buf | ||
| 182 | .as_ptr() | ||
| 183 | .offset(self.read_index.as_index(self.cap(), offset) as isize), | ||
| 184 | ) | ||
| 185 | } | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | pub struct WritableDmaRingBuffer<'a, W: Word> { | ||
| 190 | dma_buf: &'a mut [W], | ||
| 191 | read_index: DmaIndex, | ||
| 192 | write_index: DmaIndex, | ||
| 193 | } | ||
| 194 | |||
| 195 | impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { | ||
| 196 | /// Construct a ringbuffer filled with the given buffer data. | ||
| 197 | pub fn new(dma_buf: &'a mut [W]) -> Self { | ||
| 198 | let len = dma_buf.len(); | ||
| 199 | Self { | ||
| 200 | dma_buf, | ||
| 201 | read_index: Default::default(), | ||
| 202 | write_index: DmaIndex { | ||
| 203 | complete_count: 0, | ||
| 204 | pos: len, | ||
| 205 | }, | ||
| 206 | } | ||
| 207 | } | ||
| 208 | |||
| 209 | /// Reset the ring buffer to its initial state. The buffer after the reset will be full. | ||
| 210 | pub fn reset(&mut self, dma: &mut impl DmaCtrl) { | ||
| 211 | dma.reset_complete_count(); | ||
| 212 | self.read_index.reset(); | ||
| 213 | self.read_index.dma_sync(self.cap(), dma); | ||
| 214 | self.write_index = self.read_index; | ||
| 215 | self.write_index.advance(self.cap(), self.cap()); | ||
| 216 | } | ||
| 217 | |||
| 218 | /// Get the remaining writable dma samples. | ||
| 219 | pub fn len(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, Error> { | ||
| 220 | self.read_index.dma_sync(self.cap(), dma); | ||
| 221 | DmaIndex::normalize(&mut self.read_index, &mut self.write_index); | ||
| 222 | |||
| 223 | let diff = self.write_index.diff(self.cap(), &self.read_index); | ||
| 224 | |||
| 225 | if diff < 0 { | ||
| 226 | Err(Error::Overrun) | ||
| 227 | } else if diff > self.cap() as isize { | ||
| 228 | Err(Error::DmaUnsynced) | ||
| 229 | } else { | ||
| 230 | Ok(self.cap().saturating_sub(diff as usize)) | ||
| 231 | } | ||
| 232 | } | ||
| 233 | |||
| 234 | /// Get the full ringbuffer capacity. | ||
| 235 | pub const fn cap(&self) -> usize { | ||
| 236 | self.dma_buf.len() | ||
| 237 | } | ||
| 238 | |||
| 239 | /// Append data to the ring buffer. | ||
| 240 | /// Returns a tuple of the data written and the remaining write capacity in the buffer. | ||
| 241 | /// Error is returned if the portion to be written was previously read by the DMA controller. | ||
| 242 | /// In this case, the ringbuffer will automatically reset itself, giving a full buffer worth of | ||
| 243 | /// leeway between the write index and the DMA. | ||
| 244 | pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), Error> { | ||
| 245 | self.write_raw(dma, buf).inspect_err(|_e| { | ||
| 246 | self.reset(dma); | ||
| 247 | }) | ||
| 248 | } | ||
| 249 | |||
| 250 | /// Write elements directly to the buffer. | ||
| 251 | pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { | ||
| 252 | for (i, data) in buf.iter().enumerate() { | ||
| 253 | self.write_buf(i, *data) | ||
| 254 | } | ||
| 255 | let written = buf.len().min(self.cap()); | ||
| 256 | Ok((written, self.cap() - written)) | ||
| 257 | } | ||
| 258 | |||
| 259 | /// Write an exact number of elements to the ringbuffer. | ||
| 260 | pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, Error> { | ||
| 261 | let mut written_data = 0; | ||
| 262 | let buffer_len = buffer.len(); | ||
| 263 | |||
| 264 | poll_fn(|cx| { | ||
| 265 | dma.set_waker(cx.waker()); | ||
| 266 | |||
| 267 | match self.write(dma, &buffer[written_data..buffer_len]) { | ||
| 268 | Ok((len, remaining)) => { | ||
| 269 | written_data += len; | ||
| 270 | if written_data == buffer_len { | ||
| 271 | Poll::Ready(Ok(remaining)) | ||
| 272 | } else { | ||
| 273 | Poll::Pending | ||
| 274 | } | ||
| 275 | } | ||
| 276 | Err(e) => Poll::Ready(Err(e)), | ||
| 277 | } | ||
| 278 | }) | ||
| 279 | .await | ||
| 280 | } | ||
| 281 | |||
| 282 | fn write_raw(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), Error> { | ||
| 283 | let writable = self.len(dma)?.min(buf.len()); | ||
| 284 | for i in 0..writable { | ||
| 285 | self.write_buf(i, buf[i]); | ||
| 286 | } | ||
| 287 | let available = self.len(dma)?; | ||
| 288 | self.write_index.advance(self.cap(), writable); | ||
| 289 | Ok((writable, available - writable)) | ||
| 290 | } | ||
| 291 | |||
| 292 | fn write_buf(&mut self, offset: usize, value: W) { | ||
| 293 | unsafe { | ||
| 294 | core::ptr::write_volatile( | ||
| 295 | self.dma_buf | ||
| 296 | .as_mut_ptr() | ||
| 297 | .offset(self.write_index.as_index(self.cap(), offset) as isize), | ||
| 298 | value, | ||
| 299 | ) | ||
| 300 | } | ||
| 301 | } | ||
| 302 | } | ||
| 303 | |||
| 304 | #[cfg(test)] | ||
| 305 | mod tests; | ||
diff --git a/embassy-stm32/src/dma/ringbuffer/tests/mod.rs b/embassy-stm32/src/dma/ringbuffer/tests/mod.rs new file mode 100644 index 000000000..6fabedb83 --- /dev/null +++ b/embassy-stm32/src/dma/ringbuffer/tests/mod.rs | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | use std::{cell, vec}; | ||
| 2 | |||
| 3 | use super::*; | ||
| 4 | |||
| 5 | #[allow(unused)] | ||
| 6 | #[derive(PartialEq, Debug)] | ||
| 7 | enum TestCircularTransferRequest { | ||
| 8 | ResetCompleteCount(usize), | ||
| 9 | PositionRequest(usize), | ||
| 10 | } | ||
| 11 | |||
| 12 | #[allow(unused)] | ||
| 13 | struct TestCircularTransfer { | ||
| 14 | len: usize, | ||
| 15 | requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>, | ||
| 16 | } | ||
| 17 | |||
| 18 | impl DmaCtrl for TestCircularTransfer { | ||
| 19 | fn get_remaining_transfers(&self) -> usize { | ||
| 20 | match self.requests.borrow_mut().pop().unwrap() { | ||
| 21 | TestCircularTransferRequest::PositionRequest(pos) => { | ||
| 22 | let len = self.len; | ||
| 23 | |||
| 24 | assert!(len >= pos); | ||
| 25 | |||
| 26 | len - pos | ||
| 27 | } | ||
| 28 | _ => unreachable!(), | ||
| 29 | } | ||
| 30 | } | ||
| 31 | |||
| 32 | fn reset_complete_count(&mut self) -> usize { | ||
| 33 | match self.requests.get_mut().pop().unwrap() { | ||
| 34 | TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count, | ||
| 35 | _ => unreachable!(), | ||
| 36 | } | ||
| 37 | } | ||
| 38 | |||
| 39 | fn set_waker(&mut self, _waker: &Waker) {} | ||
| 40 | } | ||
| 41 | |||
| 42 | impl TestCircularTransfer { | ||
| 43 | #[allow(unused)] | ||
| 44 | pub fn new(len: usize) -> Self { | ||
| 45 | Self { | ||
| 46 | requests: cell::RefCell::new(vec![]), | ||
| 47 | len, | ||
| 48 | } | ||
| 49 | } | ||
| 50 | |||
| 51 | #[allow(unused)] | ||
| 52 | pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) { | ||
| 53 | requests.reverse(); | ||
| 54 | self.requests.replace(requests); | ||
| 55 | } | ||
| 56 | } | ||
| 57 | |||
| 58 | const CAP: usize = 16; | ||
| 59 | |||
| 60 | #[test] | ||
| 61 | fn dma_index_as_index_returns_index_mod_cap_by_default() { | ||
| 62 | let index = DmaIndex::default(); | ||
| 63 | assert_eq!(index.as_index(CAP, 0), 0); | ||
| 64 | assert_eq!(index.as_index(CAP, 1), 1); | ||
| 65 | assert_eq!(index.as_index(CAP, 2), 2); | ||
| 66 | assert_eq!(index.as_index(CAP, 3), 3); | ||
| 67 | assert_eq!(index.as_index(CAP, 4), 4); | ||
| 68 | assert_eq!(index.as_index(CAP, CAP), 0); | ||
| 69 | assert_eq!(index.as_index(CAP, CAP + 1), 1); | ||
| 70 | } | ||
| 71 | |||
| 72 | #[test] | ||
| 73 | fn dma_index_advancing_increases_as_index() { | ||
| 74 | let mut index = DmaIndex::default(); | ||
| 75 | assert_eq!(index.as_index(CAP, 0), 0); | ||
| 76 | index.advance(CAP, 1); | ||
| 77 | assert_eq!(index.as_index(CAP, 0), 1); | ||
| 78 | index.advance(CAP, 1); | ||
| 79 | assert_eq!(index.as_index(CAP, 0), 2); | ||
| 80 | index.advance(CAP, 1); | ||
| 81 | assert_eq!(index.as_index(CAP, 0), 3); | ||
| 82 | index.advance(CAP, 1); | ||
| 83 | assert_eq!(index.as_index(CAP, 0), 4); | ||
| 84 | index.advance(CAP, CAP - 4); | ||
| 85 | assert_eq!(index.as_index(CAP, 0), 0); | ||
| 86 | index.advance(CAP, 1); | ||
| 87 | assert_eq!(index.as_index(CAP, 0), 1); | ||
| 88 | } | ||
| 89 | |||
| 90 | mod prop_test; | ||
diff --git a/embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs new file mode 100644 index 000000000..661fb1728 --- /dev/null +++ b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | use std::task::Waker; | ||
| 2 | |||
| 3 | use proptest::prop_oneof; | ||
| 4 | use proptest::strategy::{self, BoxedStrategy, Strategy as _}; | ||
| 5 | use proptest_state_machine::{prop_state_machine, ReferenceStateMachine, StateMachineTest}; | ||
| 6 | |||
| 7 | use super::*; | ||
| 8 | |||
| 9 | const CAP: usize = 128; | ||
| 10 | |||
| 11 | #[derive(Debug, Default)] | ||
| 12 | struct DmaMock { | ||
| 13 | pos: usize, | ||
| 14 | wraps: usize, | ||
| 15 | } | ||
| 16 | |||
| 17 | impl DmaMock { | ||
| 18 | pub fn advance(&mut self, steps: usize) { | ||
| 19 | let next = self.pos + steps; | ||
| 20 | self.pos = next % CAP; | ||
| 21 | self.wraps += next / CAP; | ||
| 22 | } | ||
| 23 | } | ||
| 24 | |||
| 25 | impl DmaCtrl for DmaMock { | ||
| 26 | fn get_remaining_transfers(&self) -> usize { | ||
| 27 | CAP - self.pos | ||
| 28 | } | ||
| 29 | |||
| 30 | fn reset_complete_count(&mut self) -> usize { | ||
| 31 | core::mem::replace(&mut self.wraps, 0) | ||
| 32 | } | ||
| 33 | |||
| 34 | fn set_waker(&mut self, _waker: &Waker) {} | ||
| 35 | } | ||
| 36 | |||
| 37 | #[derive(Debug, Clone)] | ||
| 38 | enum Status { | ||
| 39 | Available(usize), | ||
| 40 | Failed, | ||
| 41 | } | ||
| 42 | |||
| 43 | impl Status { | ||
| 44 | pub fn new(capacity: usize) -> Self { | ||
| 45 | Self::Available(capacity) | ||
| 46 | } | ||
| 47 | } | ||
| 48 | |||
| 49 | mod reader; | ||
| 50 | mod writer; | ||
diff --git a/embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs new file mode 100644 index 000000000..4f3957a68 --- /dev/null +++ b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs | |||
| @@ -0,0 +1,123 @@ | |||
| 1 | use core::fmt::Debug; | ||
| 2 | |||
| 3 | use super::*; | ||
| 4 | |||
| 5 | #[derive(Debug, Clone)] | ||
| 6 | enum ReaderTransition { | ||
| 7 | Write(usize), | ||
| 8 | Reset, | ||
| 9 | ReadUpTo(usize), | ||
| 10 | } | ||
| 11 | |||
| 12 | struct ReaderSM; | ||
| 13 | |||
| 14 | impl ReferenceStateMachine for ReaderSM { | ||
| 15 | type State = Status; | ||
| 16 | type Transition = ReaderTransition; | ||
| 17 | |||
| 18 | fn init_state() -> BoxedStrategy<Self::State> { | ||
| 19 | strategy::Just(Status::new(0)).boxed() | ||
| 20 | } | ||
| 21 | |||
| 22 | fn transitions(_state: &Self::State) -> BoxedStrategy<Self::Transition> { | ||
| 23 | prop_oneof![ | ||
| 24 | (1..50_usize).prop_map(ReaderTransition::Write), | ||
| 25 | (1..50_usize).prop_map(ReaderTransition::ReadUpTo), | ||
| 26 | strategy::Just(ReaderTransition::Reset), | ||
| 27 | ] | ||
| 28 | .boxed() | ||
| 29 | } | ||
| 30 | |||
| 31 | fn apply(status: Self::State, transition: &Self::Transition) -> Self::State { | ||
| 32 | match (status, transition) { | ||
| 33 | (_, ReaderTransition::Reset) => Status::Available(0), | ||
| 34 | (Status::Available(x), ReaderTransition::Write(y)) => { | ||
| 35 | if x + y > CAP { | ||
| 36 | Status::Failed | ||
| 37 | } else { | ||
| 38 | Status::Available(x + y) | ||
| 39 | } | ||
| 40 | } | ||
| 41 | (Status::Failed, ReaderTransition::Write(_)) => Status::Failed, | ||
| 42 | (Status::Available(x), ReaderTransition::ReadUpTo(y)) => Status::Available(x.saturating_sub(*y)), | ||
| 43 | (Status::Failed, ReaderTransition::ReadUpTo(_)) => Status::Available(0), | ||
| 44 | } | ||
| 45 | } | ||
| 46 | } | ||
| 47 | |||
| 48 | struct ReaderSut { | ||
| 49 | status: Status, | ||
| 50 | buffer: *mut [u8], | ||
| 51 | producer: DmaMock, | ||
| 52 | consumer: ReadableDmaRingBuffer<'static, u8>, | ||
| 53 | } | ||
| 54 | |||
| 55 | impl Debug for ReaderSut { | ||
| 56 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { | ||
| 57 | <DmaMock as Debug>::fmt(&self.producer, f) | ||
| 58 | } | ||
| 59 | } | ||
| 60 | |||
| 61 | struct ReaderTest; | ||
| 62 | |||
| 63 | impl StateMachineTest for ReaderTest { | ||
| 64 | type SystemUnderTest = ReaderSut; | ||
| 65 | type Reference = ReaderSM; | ||
| 66 | |||
| 67 | fn init_test(ref_status: &<Self::Reference as ReferenceStateMachine>::State) -> Self::SystemUnderTest { | ||
| 68 | let buffer = Box::into_raw(Box::new([0; CAP])); | ||
| 69 | ReaderSut { | ||
| 70 | status: ref_status.clone(), | ||
| 71 | buffer, | ||
| 72 | producer: DmaMock::default(), | ||
| 73 | consumer: ReadableDmaRingBuffer::new(unsafe { &mut *buffer }), | ||
| 74 | } | ||
| 75 | } | ||
| 76 | |||
| 77 | fn teardown(state: Self::SystemUnderTest) { | ||
| 78 | unsafe { | ||
| 79 | let _ = Box::from_raw(state.buffer); | ||
| 80 | }; | ||
| 81 | } | ||
| 82 | |||
| 83 | fn apply( | ||
| 84 | mut sut: Self::SystemUnderTest, | ||
| 85 | ref_state: &<Self::Reference as ReferenceStateMachine>::State, | ||
| 86 | transition: <Self::Reference as ReferenceStateMachine>::Transition, | ||
| 87 | ) -> Self::SystemUnderTest { | ||
| 88 | match transition { | ||
| 89 | ReaderTransition::Write(x) => sut.producer.advance(x), | ||
| 90 | ReaderTransition::Reset => { | ||
| 91 | sut.consumer.reset(&mut sut.producer); | ||
| 92 | } | ||
| 93 | ReaderTransition::ReadUpTo(x) => { | ||
| 94 | let status = sut.status; | ||
| 95 | let ReaderSut { | ||
| 96 | ref mut producer, | ||
| 97 | ref mut consumer, | ||
| 98 | .. | ||
| 99 | } = sut; | ||
| 100 | let mut buf = vec![0; x]; | ||
| 101 | let res = consumer.read(producer, &mut buf); | ||
| 102 | match status { | ||
| 103 | Status::Available(n) => { | ||
| 104 | let readable = x.min(n); | ||
| 105 | |||
| 106 | assert_eq!(res.unwrap().0, readable); | ||
| 107 | } | ||
| 108 | Status::Failed => assert!(res.is_err()), | ||
| 109 | } | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | ReaderSut { | ||
| 114 | status: ref_state.clone(), | ||
| 115 | ..sut | ||
| 116 | } | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 120 | prop_state_machine! { | ||
| 121 | #[test] | ||
| 122 | fn reader_state_test(sequential 1..20 => ReaderTest); | ||
| 123 | } | ||
diff --git a/embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs new file mode 100644 index 000000000..15433c0ee --- /dev/null +++ b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs | |||
| @@ -0,0 +1,122 @@ | |||
| 1 | use core::fmt::Debug; | ||
| 2 | |||
| 3 | use super::*; | ||
| 4 | |||
| 5 | #[derive(Debug, Clone)] | ||
| 6 | enum WriterTransition { | ||
| 7 | Read(usize), | ||
| 8 | WriteUpTo(usize), | ||
| 9 | Reset, | ||
| 10 | } | ||
| 11 | |||
| 12 | struct WriterSM; | ||
| 13 | |||
| 14 | impl ReferenceStateMachine for WriterSM { | ||
| 15 | type State = Status; | ||
| 16 | type Transition = WriterTransition; | ||
| 17 | |||
| 18 | fn init_state() -> BoxedStrategy<Self::State> { | ||
| 19 | strategy::Just(Status::new(CAP)).boxed() | ||
| 20 | } | ||
| 21 | |||
| 22 | fn transitions(_state: &Self::State) -> BoxedStrategy<Self::Transition> { | ||
| 23 | prop_oneof![ | ||
| 24 | (1..50_usize).prop_map(WriterTransition::Read), | ||
| 25 | (1..50_usize).prop_map(WriterTransition::WriteUpTo), | ||
| 26 | strategy::Just(WriterTransition::Reset), | ||
| 27 | ] | ||
| 28 | .boxed() | ||
| 29 | } | ||
| 30 | |||
| 31 | fn apply(status: Self::State, transition: &Self::Transition) -> Self::State { | ||
| 32 | match (status, transition) { | ||
| 33 | (_, WriterTransition::Reset) => Status::Available(CAP), | ||
| 34 | (Status::Available(x), WriterTransition::Read(y)) => { | ||
| 35 | if x < *y { | ||
| 36 | Status::Failed | ||
| 37 | } else { | ||
| 38 | Status::Available(x - y) | ||
| 39 | } | ||
| 40 | } | ||
| 41 | (Status::Failed, WriterTransition::Read(_)) => Status::Failed, | ||
| 42 | (Status::Available(x), WriterTransition::WriteUpTo(y)) => Status::Available((x + *y).min(CAP)), | ||
| 43 | (Status::Failed, WriterTransition::WriteUpTo(_)) => Status::Available(CAP), | ||
| 44 | } | ||
| 45 | } | ||
| 46 | } | ||
| 47 | |||
| 48 | struct WriterSut { | ||
| 49 | status: Status, | ||
| 50 | buffer: *mut [u8], | ||
| 51 | producer: WritableDmaRingBuffer<'static, u8>, | ||
| 52 | consumer: DmaMock, | ||
| 53 | } | ||
| 54 | |||
| 55 | impl Debug for WriterSut { | ||
| 56 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { | ||
| 57 | <DmaMock as Debug>::fmt(&self.consumer, f) | ||
| 58 | } | ||
| 59 | } | ||
| 60 | |||
| 61 | struct WriterTest; | ||
| 62 | |||
| 63 | impl StateMachineTest for WriterTest { | ||
| 64 | type SystemUnderTest = WriterSut; | ||
| 65 | type Reference = WriterSM; | ||
| 66 | |||
| 67 | fn init_test(ref_status: &<Self::Reference as ReferenceStateMachine>::State) -> Self::SystemUnderTest { | ||
| 68 | let buffer = Box::into_raw(Box::new([0; CAP])); | ||
| 69 | WriterSut { | ||
| 70 | status: ref_status.clone(), | ||
| 71 | buffer, | ||
| 72 | producer: WritableDmaRingBuffer::new(unsafe { &mut *buffer }), | ||
| 73 | consumer: DmaMock::default(), | ||
| 74 | } | ||
| 75 | } | ||
| 76 | |||
| 77 | fn teardown(state: Self::SystemUnderTest) { | ||
| 78 | unsafe { | ||
| 79 | let _ = Box::from_raw(state.buffer); | ||
| 80 | }; | ||
| 81 | } | ||
| 82 | |||
| 83 | fn apply( | ||
| 84 | mut sut: Self::SystemUnderTest, | ||
| 85 | ref_status: &<Self::Reference as ReferenceStateMachine>::State, | ||
| 86 | transition: <Self::Reference as ReferenceStateMachine>::Transition, | ||
| 87 | ) -> Self::SystemUnderTest { | ||
| 88 | match transition { | ||
| 89 | WriterTransition::Read(x) => sut.consumer.advance(x), | ||
| 90 | WriterTransition::Reset => { | ||
| 91 | sut.producer.reset(&mut sut.consumer); | ||
| 92 | } | ||
| 93 | WriterTransition::WriteUpTo(x) => { | ||
| 94 | let status = sut.status; | ||
| 95 | let WriterSut { | ||
| 96 | ref mut producer, | ||
| 97 | ref mut consumer, | ||
| 98 | .. | ||
| 99 | } = sut; | ||
| 100 | let mut buf = vec![0; x]; | ||
| 101 | let res = producer.write(consumer, &mut buf); | ||
| 102 | match status { | ||
| 103 | Status::Available(n) => { | ||
| 104 | let writable = x.min(CAP - n.min(CAP)); | ||
| 105 | assert_eq!(res.unwrap().0, writable); | ||
| 106 | } | ||
| 107 | Status::Failed => assert!(res.is_err()), | ||
| 108 | } | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | WriterSut { | ||
| 113 | status: ref_status.clone(), | ||
| 114 | ..sut | ||
| 115 | } | ||
| 116 | } | ||
| 117 | } | ||
| 118 | |||
| 119 | prop_state_machine! { | ||
| 120 | #[test] | ||
| 121 | fn writer_state_test(sequential 1..20 => WriterTest); | ||
| 122 | } | ||
diff --git a/embassy-stm32/src/lib.rs b/embassy-stm32/src/lib.rs index 451f595e0..5f103e652 100644 --- a/embassy-stm32/src/lib.rs +++ b/embassy-stm32/src/lib.rs | |||
| @@ -296,6 +296,9 @@ mod dual_core { | |||
| 296 | /// It cannot be initialized by the user. The intended use is: | 296 | /// It cannot be initialized by the user. The intended use is: |
| 297 | /// | 297 | /// |
| 298 | /// ``` | 298 | /// ``` |
| 299 | /// use core::mem::MaybeUninit; | ||
| 300 | /// use embassy_stm32::{init_secondary, SharedData}; | ||
| 301 | /// | ||
| 299 | /// #[link_section = ".ram_d3"] | 302 | /// #[link_section = ".ram_d3"] |
| 300 | /// static SHARED_DATA: MaybeUninit<SharedData> = MaybeUninit::uninit(); | 303 | /// static SHARED_DATA: MaybeUninit<SharedData> = MaybeUninit::uninit(); |
| 301 | /// | 304 | /// |
diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs index 63f48ace0..7d2f071de 100644 --- a/embassy-stm32/src/sai/mod.rs +++ b/embassy-stm32/src/sai/mod.rs | |||
| @@ -27,8 +27,14 @@ pub enum Error { | |||
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | #[cfg(not(gpdma))] | 29 | #[cfg(not(gpdma))] |
| 30 | impl From<ringbuffer::OverrunError> for Error { | 30 | impl From<ringbuffer::Error> for Error { |
| 31 | fn from(_: ringbuffer::OverrunError) -> Self { | 31 | fn from(#[allow(unused)] err: ringbuffer::Error) -> Self { |
| 32 | #[cfg(feature = "defmt")] | ||
| 33 | { | ||
| 34 | if err == ringbuffer::Error::DmaUnsynced { | ||
| 35 | defmt::error!("Ringbuffer broken invariants detected!"); | ||
| 36 | } | ||
| 37 | } | ||
| 32 | Self::Overrun | 38 | Self::Overrun |
| 33 | } | 39 | } |
| 34 | } | 40 | } |
diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index 75834bf37..eb2399d9c 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs | |||
| @@ -83,7 +83,6 @@ impl<'d> RingBufferedUartRx<'d> { | |||
| 83 | // Clear the buffer so that it is ready to receive data | 83 | // Clear the buffer so that it is ready to receive data |
| 84 | compiler_fence(Ordering::SeqCst); | 84 | compiler_fence(Ordering::SeqCst); |
| 85 | self.ring_buf.start(); | 85 | self.ring_buf.start(); |
| 86 | self.ring_buf.clear(); | ||
| 87 | 86 | ||
| 88 | let r = self.info.regs; | 87 | let r = self.info.regs; |
| 89 | // clear all interrupts and DMA Rx Request | 88 | // clear all interrupts and DMA Rx Request |
