aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--embassy-stm32/Cargo.toml2
-rw-r--r--embassy-stm32/src/dma/dma_bdma.rs4
-rw-r--r--embassy-stm32/src/dma/ringbuffer.rs668
-rw-r--r--embassy-stm32/src/dma/ringbuffer/mod.rs293
-rw-r--r--embassy-stm32/src/dma/ringbuffer/tests/mod.rs165
-rw-r--r--embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs50
-rw-r--r--embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs122
-rw-r--r--embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs121
8 files changed, 753 insertions, 672 deletions
diff --git a/embassy-stm32/Cargo.toml b/embassy-stm32/Cargo.toml
index 8fc8da006..53ec1b27f 100644
--- a/embassy-stm32/Cargo.toml
+++ b/embassy-stm32/Cargo.toml
@@ -93,6 +93,8 @@ aligned = "0.4.1"
93 93
94[dev-dependencies] 94[dev-dependencies]
95critical-section = { version = "1.1", features = ["std"] } 95critical-section = { version = "1.1", features = ["std"] }
96proptest = "1.5.0"
97proptest-state-machine = "0.3.0"
96 98
97[build-dependencies] 99[build-dependencies]
98proc-macro2 = "1.0.36" 100proc-macro2 = "1.0.36"
diff --git a/embassy-stm32/src/dma/dma_bdma.rs b/embassy-stm32/src/dma/dma_bdma.rs
index d10b5554f..a43137c6e 100644
--- a/embassy-stm32/src/dma/dma_bdma.rs
+++ b/embassy-stm32/src/dma/dma_bdma.rs
@@ -763,10 +763,6 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
763 self.0.get_remaining_transfers() as _ 763 self.0.get_remaining_transfers() as _
764 } 764 }
765 765
766 fn get_complete_count(&self) -> usize {
767 STATE[self.0.id as usize].complete_count.load(Ordering::Acquire)
768 }
769
770 fn reset_complete_count(&mut self) -> usize { 766 fn reset_complete_count(&mut self) -> usize {
771 let state = &STATE[self.0.id as usize]; 767 let state = &STATE[self.0.id as usize];
772 #[cfg(not(armv6m))] 768 #[cfg(not(armv6m))]
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs
deleted file mode 100644
index 23f1d67d5..000000000
--- a/embassy-stm32/src/dma/ringbuffer.rs
+++ /dev/null
@@ -1,668 +0,0 @@
1#![cfg_attr(gpdma, allow(unused))]
2
3use core::future::poll_fn;
4use core::ops::Range;
5use core::sync::atomic::{compiler_fence, Ordering};
6use core::task::{Poll, Waker};
7
8use super::word::Word;
9
10/// A "read-only" ring-buffer to be used together with the DMA controller which
11/// writes in a circular way, "uncontrolled" to the buffer.
12///
13/// A snapshot of the ring buffer state can be attained by setting the `ndtr` field
14/// to the current register value. `ndtr` describes the current position of the DMA
15/// write.
16///
17/// # Buffer layout
18///
19/// ```text
20/// Without wraparound: With wraparound:
21///
22/// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+
23/// | | | | | |
24/// v v v v v v
25/// +-----------------------------------------+ +-----------------------------------------+
26/// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX|
27/// +-----------------------------------------+ +-----------------------------------------+
28/// ^ ^ ^ ^ ^ ^
29/// | | | | | |
30/// +- start --+ | +- end ------+ |
31/// | | | |
32/// +- end --------------------+ +- start ----------------+
33/// ```
34pub struct ReadableDmaRingBuffer<'a, W: Word> {
35 pub(crate) dma_buf: &'a mut [W],
36 start: usize,
37}
38
39#[derive(Debug, PartialEq)]
40#[cfg_attr(feature = "defmt", derive(defmt::Format))]
41pub struct OverrunError;
42
43pub trait DmaCtrl {
44 /// Get the NDTR register value, i.e. the space left in the underlying
45 /// buffer until the dma writer wraps.
46 fn get_remaining_transfers(&self) -> usize;
47
48 /// Get the transfer completed counter.
49 /// This counter is incremented by the dma controller when NDTR is reloaded,
50 /// i.e. when the writing wraps.
51 fn get_complete_count(&self) -> usize;
52
53 /// Reset the transfer completed counter to 0 and return the value just prior to the reset.
54 fn reset_complete_count(&mut self) -> usize;
55
56 /// Set the waker for a running poll_fn
57 fn set_waker(&mut self, waker: &Waker);
58}
59
60impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> {
61 pub fn new(dma_buf: &'a mut [W]) -> Self {
62 Self { dma_buf, start: 0 }
63 }
64
65 /// Reset the ring buffer to its initial state
66 pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
67 self.start = 0;
68 dma.reset_complete_count();
69 }
70
71 /// The capacity of the ringbuffer
72 pub const fn cap(&self) -> usize {
73 self.dma_buf.len()
74 }
75
76 /// The current position of the ringbuffer
77 fn pos(&self, dma: &mut impl DmaCtrl) -> usize {
78 self.cap() - dma.get_remaining_transfers()
79 }
80
81 /// Read an exact number of elements from the ringbuffer.
82 ///
83 /// Returns the remaining number of elements available for immediate reading.
84 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
85 ///
86 /// Async/Wake Behavior:
87 /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
88 /// and when it wraps around. This means that when called with a buffer of length 'M', when this
89 /// ring buffer was created with a buffer of size 'N':
90 /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
91 /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
92 pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, OverrunError> {
93 let mut read_data = 0;
94 let buffer_len = buffer.len();
95
96 poll_fn(|cx| {
97 dma.set_waker(cx.waker());
98
99 compiler_fence(Ordering::SeqCst);
100
101 match self.read(dma, &mut buffer[read_data..buffer_len]) {
102 Ok((len, remaining)) => {
103 read_data += len;
104 if read_data == buffer_len {
105 Poll::Ready(Ok(remaining))
106 } else {
107 Poll::Pending
108 }
109 }
110 Err(e) => Poll::Ready(Err(e)),
111 }
112 })
113 .await
114 }
115
116 /// Read elements from the ring buffer
117 /// Return a tuple of the length read and the length remaining in the buffer
118 /// If not all of the elements were read, then there will be some elements in the buffer remaining
119 /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
120 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
121 pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
122 /*
123 This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check
124 after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed
125 to fire in the same clock cycle that a register is read, so checking get_complete_count early does
126 not yield relevant information.
127
128 Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full
129 buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error
130 conditions.
131
132 After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that
133 the dma has not overrun within the data we could have copied. We check the data we could have copied
134 rather than the data we actually copied because it costs nothing and confirms an error condition
135 earlier.
136 */
137 let end = self.pos(dma);
138 if self.start == end && dma.get_complete_count() == 0 {
139 // No elements are available in the buffer
140 Ok((0, self.cap()))
141 } else if self.start < end {
142 // The available, unread portion in the ring buffer DOES NOT wrap
143 // Copy out the elements from the dma buffer
144 let len = self.copy_to(buf, self.start..end);
145
146 compiler_fence(Ordering::SeqCst);
147
148 /*
149 first, check if the dma has wrapped at all if it's after end
150 or more than once if it's before start
151
152 this is in a critical section to try to reduce mushy behavior.
153 it's not ideal but it's the best we can do
154
155 then, get the current position of of the dma write and check
156 if it's inside data we could have copied
157 */
158 let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count()));
159 if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 {
160 Err(OverrunError)
161 } else {
162 self.start = (self.start + len) % self.cap();
163
164 Ok((len, self.cap() - self.start))
165 }
166 } else if self.start + buf.len() < self.cap() {
167 // The available, unread portion in the ring buffer DOES wrap
168 // The DMA writer has wrapped since we last read and is currently
169 // writing (or the next byte added will be) in the beginning of the ring buffer.
170
171 // The provided read buffer is not large enough to include all elements from the tail of the dma buffer.
172
173 // Copy out from the dma buffer
174 let len = self.copy_to(buf, self.start..self.cap());
175
176 compiler_fence(Ordering::SeqCst);
177
178 /*
179 first, check if the dma has wrapped around more than once
180
181 then, get the current position of of the dma write and check
182 if it's inside data we could have copied
183 */
184 let pos = self.pos(dma);
185 if pos > self.start || pos < end || dma.get_complete_count() > 1 {
186 Err(OverrunError)
187 } else {
188 self.start = (self.start + len) % self.cap();
189
190 Ok((len, self.start + end))
191 }
192 } else {
193 // The available, unread portion in the ring buffer DOES wrap
194 // The DMA writer has wrapped since we last read and is currently
195 // writing (or the next byte added will be) in the beginning of the ring buffer.
196
197 // The provided read buffer is large enough to include all elements from the tail of the dma buffer,
198 // so the next read will not have any unread tail elements in the ring buffer.
199
200 // Copy out from the dma buffer
201 let tail = self.copy_to(buf, self.start..self.cap());
202 let head = self.copy_to(&mut buf[tail..], 0..end);
203
204 compiler_fence(Ordering::SeqCst);
205
206 /*
207 first, check if the dma has wrapped around more than once
208
209 then, get the current position of of the dma write and check
210 if it's inside data we could have copied
211 */
212 let pos = self.pos(dma);
213 if pos > self.start || pos < end || dma.reset_complete_count() > 1 {
214 Err(OverrunError)
215 } else {
216 self.start = head;
217 Ok((tail + head, self.cap() - self.start))
218 }
219 }
220 }
221 /// Copy from the dma buffer at `data_range` into `buf`
222 fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize {
223 // Limit the number of elements that can be copied
224 let length = usize::min(data_range.len(), buf.len());
225
226 // Copy from dma buffer into read buffer
227 // We need to do it like this instead of a simple copy_from_slice() because
228 // reading from a part of memory that may be simultaneously written to is unsafe
229 unsafe {
230 let dma_buf = self.dma_buf.as_ptr();
231
232 for i in 0..length {
233 buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize));
234 }
235 }
236
237 length
238 }
239}
240
241pub struct WritableDmaRingBuffer<'a, W: Word> {
242 pub(crate) dma_buf: &'a mut [W],
243 end: usize,
244}
245
246impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
247 pub fn new(dma_buf: &'a mut [W]) -> Self {
248 Self { dma_buf, end: 0 }
249 }
250
251 /// Reset the ring buffer to its initial state
252 pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
253 self.end = 0;
254 dma.reset_complete_count();
255 }
256
257 /// The capacity of the ringbuffer
258 pub const fn cap(&self) -> usize {
259 self.dma_buf.len()
260 }
261
262 /// The current position of the ringbuffer
263 fn pos(&self, dma: &mut impl DmaCtrl) -> usize {
264 self.cap() - dma.get_remaining_transfers()
265 }
266
267 /// Write elements directly to the buffer. This must be done before the DMA is started
268 /// or after the buffer has been cleared using `clear()`.
269 pub fn write_immediate(&mut self, buffer: &[W]) -> Result<(usize, usize), OverrunError> {
270 if self.end != 0 {
271 return Err(OverrunError);
272 }
273 let written = self.copy_from(buffer, 0..self.cap());
274 self.end = written % self.cap();
275 Ok((written, self.cap() - written))
276 }
277
278 /// Write an exact number of elements to the ringbuffer.
279 pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, OverrunError> {
280 let mut written_data = 0;
281 let buffer_len = buffer.len();
282
283 poll_fn(|cx| {
284 dma.set_waker(cx.waker());
285
286 compiler_fence(Ordering::SeqCst);
287
288 match self.write(dma, &buffer[written_data..buffer_len]) {
289 Ok((len, remaining)) => {
290 written_data += len;
291 if written_data == buffer_len {
292 Poll::Ready(Ok(remaining))
293 } else {
294 Poll::Pending
295 }
296 }
297 Err(e) => Poll::Ready(Err(e)),
298 }
299 })
300 .await
301 }
302
303 /// Write elements from the ring buffer
304 /// Return a tuple of the length written and the capacity remaining to be written in the buffer
305 pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> {
306 let start = self.pos(dma);
307 if start > self.end {
308 // The occupied portion in the ring buffer DOES wrap
309 let len = self.copy_from(buf, self.end..start);
310
311 compiler_fence(Ordering::SeqCst);
312
313 // Confirm that the DMA is not inside data we could have written
314 let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count()));
315 if (pos >= self.end && pos < start) || (complete_count > 0 && pos >= start) || complete_count > 1 {
316 Err(OverrunError)
317 } else {
318 self.end = (self.end + len) % self.cap();
319
320 Ok((len, self.cap() - (start - self.end)))
321 }
322 } else if start == self.end && dma.get_complete_count() == 0 {
323 Ok((0, 0))
324 } else if start <= self.end && self.end + buf.len() < self.cap() {
325 // The occupied portion in the ring buffer DOES NOT wrap
326 // and copying elements into the buffer WILL NOT cause it to
327
328 // Copy into the dma buffer
329 let len = self.copy_from(buf, self.end..self.cap());
330
331 compiler_fence(Ordering::SeqCst);
332
333 // Confirm that the DMA is not inside data we could have written
334 let pos = self.pos(dma);
335 if pos > self.end || pos < start || dma.get_complete_count() > 1 {
336 Err(OverrunError)
337 } else {
338 self.end = (self.end + len) % self.cap();
339
340 Ok((len, self.cap() - (self.end - start)))
341 }
342 } else {
343 // The occupied portion in the ring buffer DOES NOT wrap
344 // and copying elements into the buffer WILL cause it to
345
346 let tail = self.copy_from(buf, self.end..self.cap());
347 let head = self.copy_from(&buf[tail..], 0..start);
348
349 compiler_fence(Ordering::SeqCst);
350
351 // Confirm that the DMA is not inside data we could have written
352 let pos = self.pos(dma);
353 if pos > self.end || pos < start || dma.reset_complete_count() > 1 {
354 Err(OverrunError)
355 } else {
356 self.end = head;
357
358 Ok((tail + head, self.cap() - (start - self.end)))
359 }
360 }
361 }
362 /// Copy into the dma buffer at `data_range` from `buf`
363 fn copy_from(&mut self, buf: &[W], data_range: Range<usize>) -> usize {
364 // Limit the number of elements that can be copied
365 let length = usize::min(data_range.len(), buf.len());
366
367 // Copy into dma buffer from read buffer
368 // We need to do it like this instead of a simple copy_from_slice() because
369 // reading from a part of memory that may be simultaneously written to is unsafe
370 unsafe {
371 let dma_buf = self.dma_buf.as_mut_ptr();
372
373 for i in 0..length {
374 core::ptr::write_volatile(dma_buf.offset((data_range.start + i) as isize), buf[i]);
375 }
376 }
377
378 length
379 }
380}
381#[cfg(test)]
382mod tests {
383 use core::array;
384 use std::{cell, vec};
385
386 use super::*;
387
388 #[allow(dead_code)]
389 #[derive(PartialEq, Debug)]
390 enum TestCircularTransferRequest {
391 GetCompleteCount(usize),
392 ResetCompleteCount(usize),
393 PositionRequest(usize),
394 }
395
396 struct TestCircularTransfer {
397 len: usize,
398 requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>,
399 }
400
401 impl DmaCtrl for TestCircularTransfer {
402 fn get_remaining_transfers(&self) -> usize {
403 match self.requests.borrow_mut().pop().unwrap() {
404 TestCircularTransferRequest::PositionRequest(pos) => {
405 let len = self.len;
406
407 assert!(len >= pos);
408
409 len - pos
410 }
411 _ => unreachable!(),
412 }
413 }
414
415 fn get_complete_count(&self) -> usize {
416 match self.requests.borrow_mut().pop().unwrap() {
417 TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count,
418 _ => unreachable!(),
419 }
420 }
421
422 fn reset_complete_count(&mut self) -> usize {
423 match self.requests.get_mut().pop().unwrap() {
424 TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count,
425 _ => unreachable!(),
426 }
427 }
428
429 fn set_waker(&mut self, waker: &Waker) {}
430 }
431
432 impl TestCircularTransfer {
433 pub fn new(len: usize) -> Self {
434 Self {
435 requests: cell::RefCell::new(vec![]),
436 len,
437 }
438 }
439
440 pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) {
441 requests.reverse();
442 self.requests.replace(requests);
443 }
444 }
445
446 #[test]
447 fn empty_and_read_not_started() {
448 let mut dma_buf = [0u8; 16];
449 let ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
450
451 assert_eq!(0, ringbuf.start);
452 }
453
454 #[test]
455 fn can_read() {
456 let mut dma = TestCircularTransfer::new(16);
457
458 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
459 let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
460
461 assert_eq!(0, ringbuf.start);
462 assert_eq!(16, ringbuf.cap());
463
464 dma.setup(vec![
465 TestCircularTransferRequest::PositionRequest(8),
466 TestCircularTransferRequest::PositionRequest(10),
467 TestCircularTransferRequest::GetCompleteCount(0),
468 ]);
469 let mut buf = [0; 2];
470 assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
471 assert_eq!([0, 1], buf);
472 assert_eq!(2, ringbuf.start);
473
474 dma.setup(vec![
475 TestCircularTransferRequest::PositionRequest(10),
476 TestCircularTransferRequest::PositionRequest(12),
477 TestCircularTransferRequest::GetCompleteCount(0),
478 ]);
479 let mut buf = [0; 2];
480 assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
481 assert_eq!([2, 3], buf);
482 assert_eq!(4, ringbuf.start);
483
484 dma.setup(vec![
485 TestCircularTransferRequest::PositionRequest(12),
486 TestCircularTransferRequest::PositionRequest(14),
487 TestCircularTransferRequest::GetCompleteCount(0),
488 ]);
489 let mut buf = [0; 8];
490 assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0);
491 assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]);
492 assert_eq!(12, ringbuf.start);
493 }
494
495 #[test]
496 fn can_read_with_wrap() {
497 let mut dma = TestCircularTransfer::new(16);
498
499 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
500 let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
501
502 assert_eq!(0, ringbuf.start);
503 assert_eq!(16, ringbuf.cap());
504
505 /*
506 Read to close to the end of the buffer
507 */
508 dma.setup(vec![
509 TestCircularTransferRequest::PositionRequest(14),
510 TestCircularTransferRequest::PositionRequest(16),
511 TestCircularTransferRequest::GetCompleteCount(0),
512 ]);
513 let mut buf = [0; 14];
514 assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
515 assert_eq!(14, ringbuf.start);
516
517 /*
518 Now, read around the buffer
519 */
520 dma.setup(vec![
521 TestCircularTransferRequest::PositionRequest(6),
522 TestCircularTransferRequest::PositionRequest(8),
523 TestCircularTransferRequest::ResetCompleteCount(1),
524 ]);
525 let mut buf = [0; 6];
526 assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
527 assert_eq!(4, ringbuf.start);
528 }
529
530 #[test]
531 fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() {
532 let mut dma = TestCircularTransfer::new(16);
533
534 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
535 let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
536
537 assert_eq!(0, ringbuf.start);
538 assert_eq!(16, ringbuf.cap());
539
540 /*
541 Read to close to the end of the buffer
542 */
543 dma.setup(vec![
544 TestCircularTransferRequest::PositionRequest(14),
545 TestCircularTransferRequest::PositionRequest(16),
546 TestCircularTransferRequest::GetCompleteCount(0),
547 ]);
548 let mut buf = [0; 14];
549 assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
550 assert_eq!(14, ringbuf.start);
551
552 /*
553 Now, read to the end of the buffer
554 */
555 dma.setup(vec![
556 TestCircularTransferRequest::PositionRequest(6),
557 TestCircularTransferRequest::PositionRequest(8),
558 TestCircularTransferRequest::ResetCompleteCount(1),
559 ]);
560 let mut buf = [0; 2];
561 assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
562 assert_eq!(0, ringbuf.start);
563 }
564
565 #[test]
566 fn can_read_when_dma_writer_wraps_once_with_same_ndtr() {
567 let mut dma = TestCircularTransfer::new(16);
568
569 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
570 let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
571
572 assert_eq!(0, ringbuf.start);
573 assert_eq!(16, ringbuf.cap());
574
575 /*
576 Read to about the middle of the buffer
577 */
578 dma.setup(vec![
579 TestCircularTransferRequest::PositionRequest(6),
580 TestCircularTransferRequest::PositionRequest(6),
581 TestCircularTransferRequest::GetCompleteCount(0),
582 ]);
583 let mut buf = [0; 6];
584 assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
585 assert_eq!(6, ringbuf.start);
586
587 /*
588 Now, wrap the DMA controller around
589 */
590 dma.setup(vec![
591 TestCircularTransferRequest::PositionRequest(6),
592 TestCircularTransferRequest::GetCompleteCount(1),
593 TestCircularTransferRequest::PositionRequest(6),
594 TestCircularTransferRequest::GetCompleteCount(1),
595 ]);
596 let mut buf = [0; 6];
597 assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
598 assert_eq!(12, ringbuf.start);
599 }
600
601 #[test]
602 fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() {
603 let mut dma = TestCircularTransfer::new(16);
604
605 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
606 let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
607
608 assert_eq!(0, ringbuf.start);
609 assert_eq!(16, ringbuf.cap());
610
611 /*
612 Read a few bytes
613 */
614 dma.setup(vec![
615 TestCircularTransferRequest::PositionRequest(2),
616 TestCircularTransferRequest::PositionRequest(2),
617 TestCircularTransferRequest::GetCompleteCount(0),
618 ]);
619 let mut buf = [0; 6];
620 assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
621 assert_eq!(2, ringbuf.start);
622
623 /*
624 Now, overtake the reader
625 */
626 dma.setup(vec![
627 TestCircularTransferRequest::PositionRequest(4),
628 TestCircularTransferRequest::PositionRequest(6),
629 TestCircularTransferRequest::GetCompleteCount(1),
630 ]);
631 let mut buf = [0; 6];
632 assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
633 }
634
635 #[test]
636 fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() {
637 let mut dma = TestCircularTransfer::new(16);
638
639 let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
640 let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
641
642 assert_eq!(0, ringbuf.start);
643 assert_eq!(16, ringbuf.cap());
644
645 /*
646 Read to close to the end of the buffer
647 */
648 dma.setup(vec![
649 TestCircularTransferRequest::PositionRequest(14),
650 TestCircularTransferRequest::PositionRequest(16),
651 TestCircularTransferRequest::GetCompleteCount(0),
652 ]);
653 let mut buf = [0; 14];
654 assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
655 assert_eq!(14, ringbuf.start);
656
657 /*
658 Now, overtake the reader
659 */
660 dma.setup(vec![
661 TestCircularTransferRequest::PositionRequest(8),
662 TestCircularTransferRequest::PositionRequest(10),
663 TestCircularTransferRequest::ResetCompleteCount(2),
664 ]);
665 let mut buf = [0; 6];
666 assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
667 }
668}
diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs
new file mode 100644
index 000000000..10a9ff975
--- /dev/null
+++ b/embassy-stm32/src/dma/ringbuffer/mod.rs
@@ -0,0 +1,293 @@
1#![cfg_attr(gpdma, allow(unused))]
2
3use core::future::poll_fn;
4use core::task::{Poll, Waker};
5
6use crate::dma::word::Word;
7
8pub trait DmaCtrl {
9 /// Get the NDTR register value, i.e. the space left in the underlying
10 /// buffer until the dma writer wraps.
11 fn get_remaining_transfers(&self) -> usize;
12
13 /// Reset the transfer completed counter to 0 and return the value just prior to the reset.
14 fn reset_complete_count(&mut self) -> usize;
15
16 /// Set the waker for a running poll_fn
17 fn set_waker(&mut self, waker: &Waker);
18}
19
20#[derive(Debug, PartialEq)]
21#[cfg_attr(feature = "defmt", derive(defmt::Format))]
22pub struct OverrunError;
23
24#[derive(Debug, Clone, Copy, Default)]
25struct DmaIndex {
26 completion_count: usize,
27 pos: usize,
28}
29
30fn pos(cap: usize, dma: &impl DmaCtrl) -> usize {
31 cap - dma.get_remaining_transfers()
32}
33
34impl DmaIndex {
35 fn reset(&mut self) {
36 self.pos = 0;
37 self.completion_count = 0;
38 }
39
40 fn as_index(&self, cap: usize, offset: usize) -> usize {
41 (self.pos + offset) % cap
42 }
43
44 fn dma_sync(&mut self, cap: usize, dma: &mut impl DmaCtrl) {
45 let fst_pos = pos(cap, dma);
46 let fst_count = dma.reset_complete_count();
47 let pos = pos(cap, dma);
48
49 let wrap_count = if pos >= fst_pos {
50 fst_count
51 } else {
52 fst_count + dma.reset_complete_count()
53 };
54
55 self.pos = pos;
56 self.completion_count += wrap_count;
57 }
58
59 fn advance(&mut self, cap: usize, steps: usize) {
60 let next = self.pos + steps;
61 self.completion_count += next / cap;
62 self.pos = next % cap;
63 }
64
65 fn normalize(lhs: &mut DmaIndex, rhs: &mut DmaIndex) {
66 let min_count = lhs.completion_count.min(rhs.completion_count);
67 lhs.completion_count -= min_count;
68 rhs.completion_count -= min_count;
69 }
70
71 fn diff(&mut self, cap: usize, rhs: &mut DmaIndex) -> isize {
72 Self::normalize(self, rhs);
73 (self.completion_count * cap + self.pos) as isize - (rhs.completion_count * cap + rhs.pos) as isize
74 }
75}
76
77pub struct ReadableDmaRingBuffer<'a, W: Word> {
78 dma_buf: &'a mut [W],
79 write_index: DmaIndex,
80 read_index: DmaIndex,
81}
82
83impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> {
84 /// Construct an empty buffer.
85 pub fn new(dma_buf: &'a mut [W]) -> Self {
86 Self {
87 dma_buf,
88 write_index: Default::default(),
89 read_index: Default::default(),
90 }
91 }
92
93 /// Reset the ring buffer to its initial state
94 pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
95 dma.reset_complete_count();
96 self.write_index.reset();
97 self.update_dma_index(dma);
98 self.read_index = self.write_index;
99 }
100
101 /// The capacity of the ringbuffer
102 pub const fn cap(&self) -> usize {
103 self.dma_buf.len()
104 }
105
106 /// Read elements from the ring buffer
107 /// Return a tuple of the length read and the length remaining in the buffer
108 /// If not all of the elements were read, then there will be some elements in the buffer remaining
109 /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
110 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
111 pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
112 let readable = self.margin(dma)?.min(buf.len());
113 for i in 0..readable {
114 buf[i] = self.read_buf(i);
115 }
116 let available = self.margin(dma)?;
117 self.read_index.advance(self.cap(), readable);
118 Ok((readable, available - readable))
119 }
120
121 /// Read an exact number of elements from the ringbuffer.
122 ///
123 /// Returns the remaining number of elements available for immediate reading.
124 /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
125 ///
126 /// Async/Wake Behavior:
127 /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
128 /// and when it wraps around. This means that when called with a buffer of length 'M', when this
129 /// ring buffer was created with a buffer of size 'N':
130 /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
131 /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
132 pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, OverrunError> {
133 let mut read_data = 0;
134 let buffer_len = buffer.len();
135
136 poll_fn(|cx| {
137 dma.set_waker(cx.waker());
138
139 match self.read(dma, &mut buffer[read_data..buffer_len]) {
140 Ok((len, remaining)) => {
141 read_data += len;
142 if read_data == buffer_len {
143 Poll::Ready(Ok(remaining))
144 } else {
145 Poll::Pending
146 }
147 }
148 Err(e) => Poll::Ready(Err(e)),
149 }
150 })
151 .await
152 }
153
154 fn update_dma_index(&mut self, dma: &mut impl DmaCtrl) {
155 self.write_index.dma_sync(self.cap(), dma)
156 }
157
158 fn read_buf(&self, offset: usize) -> W {
159 unsafe {
160 core::ptr::read_volatile(
161 self.dma_buf
162 .as_ptr()
163 .offset(self.read_index.as_index(self.cap(), offset) as isize),
164 )
165 }
166 }
167
168 /// Returns available dma samples
169 fn margin(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, OverrunError> {
170 self.update_dma_index(dma);
171
172 let diff: usize = self
173 .write_index
174 .diff(self.cap(), &mut self.read_index)
175 .try_into()
176 .unwrap();
177
178 if diff > self.cap() {
179 Err(OverrunError)
180 } else {
181 Ok(diff)
182 }
183 }
184}
185
186pub struct WritableDmaRingBuffer<'a, W: Word> {
187 dma_buf: &'a mut [W],
188 read_index: DmaIndex,
189 write_index: DmaIndex,
190}
191
192impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
193 /// Construct a ringbuffer filled with the given buffer data.
194 pub fn new(dma_buf: &'a mut [W]) -> Self {
195 let len = dma_buf.len();
196 Self {
197 dma_buf,
198 read_index: Default::default(),
199 write_index: DmaIndex {
200 completion_count: 0,
201 pos: len,
202 },
203 }
204 }
205
206 /// Reset the ring buffer to its initial state. The buffer after the reset will be full.
207 pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
208 dma.reset_complete_count();
209 self.read_index.reset();
210 self.update_dma_index(dma);
211 self.write_index = self.read_index;
212 self.write_index.advance(self.cap(), self.cap());
213 }
214
215 /// Get the capacity of the ringbuffer.
216 pub const fn cap(&self) -> usize {
217 self.dma_buf.len()
218 }
219
220 /// Append data to the ring buffer.
221 /// Returns a tuple of the data written and the remaining write capacity in the buffer.
222 pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> {
223 let writable = self.margin(dma)?.min(buf.len());
224 for i in 0..writable {
225 self.write_buf(i, buf[i]);
226 }
227 let available = self.margin(dma)?;
228 self.write_index.advance(self.cap(), writable);
229 Ok((writable, available - writable))
230 }
231
232 /// Write elements directly to the buffer.
233 pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
234 for (i, data) in buf.iter().enumerate() {
235 self.write_buf(i, *data)
236 }
237 let written = buf.len().min(self.cap());
238 Ok((written, self.cap() - written))
239 }
240
241 /// Write an exact number of elements to the ringbuffer.
242 pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, OverrunError> {
243 let mut written_data = 0;
244 let buffer_len = buffer.len();
245
246 poll_fn(|cx| {
247 dma.set_waker(cx.waker());
248
249 match self.write(dma, &buffer[written_data..buffer_len]) {
250 Ok((len, remaining)) => {
251 written_data += len;
252 if written_data == buffer_len {
253 Poll::Ready(Ok(remaining))
254 } else {
255 Poll::Pending
256 }
257 }
258 Err(e) => Poll::Ready(Err(e)),
259 }
260 })
261 .await
262 }
263
264 fn update_dma_index(&mut self, dma: &mut impl DmaCtrl) {
265 self.read_index.dma_sync(self.cap(), dma);
266 }
267
268 fn write_buf(&mut self, offset: usize, value: W) {
269 unsafe {
270 core::ptr::write_volatile(
271 self.dma_buf
272 .as_mut_ptr()
273 .offset(self.write_index.as_index(self.cap(), offset) as isize),
274 value,
275 )
276 }
277 }
278
279 fn margin(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, OverrunError> {
280 self.update_dma_index(dma);
281
282 let diff = self.write_index.diff(self.cap(), &mut self.read_index);
283
284 if diff < 0 {
285 Err(OverrunError)
286 } else {
287 Ok(self.cap().saturating_sub(diff as usize))
288 }
289 }
290}
291
292#[cfg(test)]
293mod tests;
diff --git a/embassy-stm32/src/dma/ringbuffer/tests/mod.rs b/embassy-stm32/src/dma/ringbuffer/tests/mod.rs
new file mode 100644
index 000000000..9768e1df8
--- /dev/null
+++ b/embassy-stm32/src/dma/ringbuffer/tests/mod.rs
@@ -0,0 +1,165 @@
1use std::{cell, vec};
2
3use super::*;
4
5#[allow(dead_code)]
6#[derive(PartialEq, Debug)]
7enum TestCircularTransferRequest {
8 ResetCompleteCount(usize),
9 PositionRequest(usize),
10}
11
12struct TestCircularTransfer {
13 len: usize,
14 requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>,
15}
16
17impl DmaCtrl for TestCircularTransfer {
18 fn get_remaining_transfers(&self) -> usize {
19 match self.requests.borrow_mut().pop().unwrap() {
20 TestCircularTransferRequest::PositionRequest(pos) => {
21 let len = self.len;
22
23 assert!(len >= pos);
24
25 len - pos
26 }
27 _ => unreachable!(),
28 }
29 }
30
31 fn reset_complete_count(&mut self) -> usize {
32 match self.requests.get_mut().pop().unwrap() {
33 TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count,
34 _ => unreachable!(),
35 }
36 }
37
38 fn set_waker(&mut self, _waker: &Waker) {}
39}
40
41impl TestCircularTransfer {
42 pub fn new(len: usize) -> Self {
43 Self {
44 requests: cell::RefCell::new(vec![]),
45 len,
46 }
47 }
48
49 pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) {
50 requests.reverse();
51 self.requests.replace(requests);
52 }
53}
54
55const CAP: usize = 16;
56
57#[test]
58fn dma_index_dma_sync_syncs_position_to_last_read_if_sync_takes_place_on_same_dma_cycle() {
59 let mut dma = TestCircularTransfer::new(CAP);
60 dma.setup(vec![
61 TestCircularTransferRequest::PositionRequest(4),
62 TestCircularTransferRequest::ResetCompleteCount(0),
63 TestCircularTransferRequest::PositionRequest(7),
64 ]);
65 let mut index = DmaIndex::default();
66 index.dma_sync(CAP, &mut dma);
67 assert_eq!(index.completion_count, 0);
68 assert_eq!(index.pos, 7);
69}
70
71#[test]
72fn dma_index_dma_sync_updates_completion_count_properly_if_sync_takes_place_on_same_dma_cycle() {
73 let mut dma = TestCircularTransfer::new(CAP);
74 dma.setup(vec![
75 TestCircularTransferRequest::PositionRequest(4),
76 TestCircularTransferRequest::ResetCompleteCount(2),
77 TestCircularTransferRequest::PositionRequest(7),
78 ]);
79 let mut index = DmaIndex::default();
80 index.completion_count = 1;
81 index.dma_sync(CAP, &mut dma);
82 assert_eq!(index.completion_count, 3);
83 assert_eq!(index.pos, 7);
84}
85
86#[test]
87fn dma_index_dma_sync_syncs_to_last_position_if_reads_occur_on_different_dma_cycles() {
88 let mut dma = TestCircularTransfer::new(CAP);
89 dma.setup(vec![
90 TestCircularTransferRequest::PositionRequest(10),
91 TestCircularTransferRequest::ResetCompleteCount(1),
92 TestCircularTransferRequest::PositionRequest(5),
93 TestCircularTransferRequest::ResetCompleteCount(0),
94 ]);
95 let mut index = DmaIndex::default();
96 index.dma_sync(CAP, &mut dma);
97 assert_eq!(index.completion_count, 1);
98 assert_eq!(index.pos, 5);
99}
100
101#[test]
102fn dma_index_dma_sync_detects_new_cycle_if_later_position_is_less_than_first_and_first_completion_count_occurs_on_first_cycle(
103) {
104 let mut dma = TestCircularTransfer::new(CAP);
105 dma.setup(vec![
106 TestCircularTransferRequest::PositionRequest(10),
107 TestCircularTransferRequest::ResetCompleteCount(1),
108 TestCircularTransferRequest::PositionRequest(5),
109 TestCircularTransferRequest::ResetCompleteCount(1),
110 ]);
111 let mut index = DmaIndex::default();
112 index.completion_count = 1;
113 index.dma_sync(CAP, &mut dma);
114 assert_eq!(index.completion_count, 3);
115 assert_eq!(index.pos, 5);
116}
117
118#[test]
119fn dma_index_dma_sync_detects_new_cycle_if_later_position_is_less_than_first_and_first_completion_count_occurs_on_later_cycle(
120) {
121 let mut dma = TestCircularTransfer::new(CAP);
122 dma.setup(vec![
123 TestCircularTransferRequest::PositionRequest(10),
124 TestCircularTransferRequest::ResetCompleteCount(2),
125 TestCircularTransferRequest::PositionRequest(5),
126 TestCircularTransferRequest::ResetCompleteCount(0),
127 ]);
128 let mut index = DmaIndex::default();
129 index.completion_count = 1;
130 index.dma_sync(CAP, &mut dma);
131 assert_eq!(index.completion_count, 3);
132 assert_eq!(index.pos, 5);
133}
134
135#[test]
136fn dma_index_as_index_returns_index_mod_cap_by_default() {
137 let index = DmaIndex::default();
138 assert_eq!(index.as_index(CAP, 0), 0);
139 assert_eq!(index.as_index(CAP, 1), 1);
140 assert_eq!(index.as_index(CAP, 2), 2);
141 assert_eq!(index.as_index(CAP, 3), 3);
142 assert_eq!(index.as_index(CAP, 4), 4);
143 assert_eq!(index.as_index(CAP, CAP), 0);
144 assert_eq!(index.as_index(CAP, CAP + 1), 1);
145}
146
147#[test]
148fn dma_index_advancing_increases_as_index() {
149 let mut index = DmaIndex::default();
150 assert_eq!(index.as_index(CAP, 0), 0);
151 index.advance(CAP, 1);
152 assert_eq!(index.as_index(CAP, 0), 1);
153 index.advance(CAP, 1);
154 assert_eq!(index.as_index(CAP, 0), 2);
155 index.advance(CAP, 1);
156 assert_eq!(index.as_index(CAP, 0), 3);
157 index.advance(CAP, 1);
158 assert_eq!(index.as_index(CAP, 0), 4);
159 index.advance(CAP, CAP - 4);
160 assert_eq!(index.as_index(CAP, 0), 0);
161 index.advance(CAP, 1);
162 assert_eq!(index.as_index(CAP, 0), 1);
163}
164
165mod prop_test;
diff --git a/embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs
new file mode 100644
index 000000000..661fb1728
--- /dev/null
+++ b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs
@@ -0,0 +1,50 @@
1use std::task::Waker;
2
3use proptest::prop_oneof;
4use proptest::strategy::{self, BoxedStrategy, Strategy as _};
5use proptest_state_machine::{prop_state_machine, ReferenceStateMachine, StateMachineTest};
6
7use super::*;
8
9const CAP: usize = 128;
10
11#[derive(Debug, Default)]
12struct DmaMock {
13 pos: usize,
14 wraps: usize,
15}
16
17impl DmaMock {
18 pub fn advance(&mut self, steps: usize) {
19 let next = self.pos + steps;
20 self.pos = next % CAP;
21 self.wraps += next / CAP;
22 }
23}
24
25impl DmaCtrl for DmaMock {
26 fn get_remaining_transfers(&self) -> usize {
27 CAP - self.pos
28 }
29
30 fn reset_complete_count(&mut self) -> usize {
31 core::mem::replace(&mut self.wraps, 0)
32 }
33
34 fn set_waker(&mut self, _waker: &Waker) {}
35}
36
37#[derive(Debug, Clone)]
38enum Status {
39 Available(usize),
40 Failed,
41}
42
43impl Status {
44 pub fn new(capacity: usize) -> Self {
45 Self::Available(capacity)
46 }
47}
48
49mod reader;
50mod writer;
diff --git a/embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs
new file mode 100644
index 000000000..6555ebfb0
--- /dev/null
+++ b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs
@@ -0,0 +1,122 @@
1use core::fmt::Debug;
2
3use super::*;
4
5#[derive(Debug, Clone)]
6enum ReaderTransition {
7 Write(usize),
8 Clear,
9 ReadUpTo(usize),
10}
11
12struct ReaderSM;
13
14impl ReferenceStateMachine for ReaderSM {
15 type State = Status;
16 type Transition = ReaderTransition;
17
18 fn init_state() -> BoxedStrategy<Self::State> {
19 strategy::Just(Status::new(0)).boxed()
20 }
21
22 fn transitions(_state: &Self::State) -> BoxedStrategy<Self::Transition> {
23 prop_oneof![
24 (1..50_usize).prop_map(ReaderTransition::Write),
25 (1..50_usize).prop_map(ReaderTransition::ReadUpTo),
26 strategy::Just(ReaderTransition::Clear),
27 ]
28 .boxed()
29 }
30
31 fn apply(status: Self::State, transition: &Self::Transition) -> Self::State {
32 match (status, transition) {
33 (_, ReaderTransition::Clear) => Status::Available(0),
34 (Status::Available(x), ReaderTransition::Write(y)) => {
35 if x + y > CAP {
36 Status::Failed
37 } else {
38 Status::Available(x + y)
39 }
40 }
41 (Status::Available(x), ReaderTransition::ReadUpTo(y)) => Status::Available(x.saturating_sub(*y)),
42 (Status::Failed, _) => Status::Failed,
43 }
44 }
45}
46
47struct ReaderSut {
48 status: Status,
49 buffer: *mut [u8],
50 producer: DmaMock,
51 consumer: ReadableDmaRingBuffer<'static, u8>,
52}
53
54impl Debug for ReaderSut {
55 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
56 <DmaMock as Debug>::fmt(&self.producer, f)
57 }
58}
59
60struct ReaderTest;
61
62impl StateMachineTest for ReaderTest {
63 type SystemUnderTest = ReaderSut;
64 type Reference = ReaderSM;
65
66 fn init_test(ref_status: &<Self::Reference as ReferenceStateMachine>::State) -> Self::SystemUnderTest {
67 let buffer = Box::into_raw(Box::new([0; CAP]));
68 ReaderSut {
69 status: ref_status.clone(),
70 buffer,
71 producer: DmaMock::default(),
72 consumer: ReadableDmaRingBuffer::new(unsafe { &mut *buffer }),
73 }
74 }
75
76 fn teardown(state: Self::SystemUnderTest) {
77 unsafe {
78 let _ = Box::from_raw(state.buffer);
79 };
80 }
81
82 fn apply(
83 mut sut: Self::SystemUnderTest,
84 ref_state: &<Self::Reference as ReferenceStateMachine>::State,
85 transition: <Self::Reference as ReferenceStateMachine>::Transition,
86 ) -> Self::SystemUnderTest {
87 match transition {
88 ReaderTransition::Write(x) => sut.producer.advance(x),
89 ReaderTransition::Clear => {
90 sut.consumer.clear(&mut sut.producer);
91 }
92 ReaderTransition::ReadUpTo(x) => {
93 let status = sut.status;
94 let ReaderSut {
95 ref mut producer,
96 ref mut consumer,
97 ..
98 } = sut;
99 let mut buf = vec![0; x];
100 let res = consumer.read(producer, &mut buf);
101 match status {
102 Status::Available(n) => {
103 let readable = x.min(n);
104
105 assert_eq!(res.unwrap().0, readable);
106 }
107 Status::Failed => assert!(res.is_err()),
108 }
109 }
110 }
111
112 ReaderSut {
113 status: ref_state.clone(),
114 ..sut
115 }
116 }
117}
118
119prop_state_machine! {
120 #[test]
121 fn reader_state_test(sequential 1..20 => ReaderTest);
122}
diff --git a/embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs
new file mode 100644
index 000000000..15f54c672
--- /dev/null
+++ b/embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs
@@ -0,0 +1,121 @@
1use core::fmt::Debug;
2
3use super::*;
4
5#[derive(Debug, Clone)]
6enum WriterTransition {
7 Read(usize),
8 WriteUpTo(usize),
9 Clear,
10}
11
12struct WriterSM;
13
14impl ReferenceStateMachine for WriterSM {
15 type State = Status;
16 type Transition = WriterTransition;
17
18 fn init_state() -> BoxedStrategy<Self::State> {
19 strategy::Just(Status::new(CAP)).boxed()
20 }
21
22 fn transitions(_state: &Self::State) -> BoxedStrategy<Self::Transition> {
23 prop_oneof![
24 (1..50_usize).prop_map(WriterTransition::Read),
25 (1..50_usize).prop_map(WriterTransition::WriteUpTo),
26 strategy::Just(WriterTransition::Clear),
27 ]
28 .boxed()
29 }
30
31 fn apply(status: Self::State, transition: &Self::Transition) -> Self::State {
32 match (status, transition) {
33 (_, WriterTransition::Clear) => Status::Available(CAP),
34 (Status::Available(x), WriterTransition::Read(y)) => {
35 if x < *y {
36 Status::Failed
37 } else {
38 Status::Available(x - y)
39 }
40 }
41 (Status::Available(x), WriterTransition::WriteUpTo(y)) => Status::Available((x + *y).min(CAP)),
42 (Status::Failed, _) => Status::Failed,
43 }
44 }
45}
46
47struct WriterSut {
48 status: Status,
49 buffer: *mut [u8],
50 producer: WritableDmaRingBuffer<'static, u8>,
51 consumer: DmaMock,
52}
53
54impl Debug for WriterSut {
55 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
56 <DmaMock as Debug>::fmt(&self.consumer, f)
57 }
58}
59
60struct WriterTest;
61
62impl StateMachineTest for WriterTest {
63 type SystemUnderTest = WriterSut;
64 type Reference = WriterSM;
65
66 fn init_test(ref_status: &<Self::Reference as ReferenceStateMachine>::State) -> Self::SystemUnderTest {
67 let buffer = Box::into_raw(Box::new([0; CAP]));
68 WriterSut {
69 status: ref_status.clone(),
70 buffer,
71 producer: WritableDmaRingBuffer::new(unsafe { &mut *buffer }),
72 consumer: DmaMock::default(),
73 }
74 }
75
76 fn teardown(state: Self::SystemUnderTest) {
77 unsafe {
78 let _ = Box::from_raw(state.buffer);
79 };
80 }
81
82 fn apply(
83 mut sut: Self::SystemUnderTest,
84 ref_status: &<Self::Reference as ReferenceStateMachine>::State,
85 transition: <Self::Reference as ReferenceStateMachine>::Transition,
86 ) -> Self::SystemUnderTest {
87 match transition {
88 WriterTransition::Read(x) => sut.consumer.advance(x),
89 WriterTransition::Clear => {
90 sut.producer.clear(&mut sut.consumer);
91 }
92 WriterTransition::WriteUpTo(x) => {
93 let status = sut.status;
94 let WriterSut {
95 ref mut producer,
96 ref mut consumer,
97 ..
98 } = sut;
99 let mut buf = vec![0; x];
100 let res = producer.write(consumer, &mut buf);
101 match status {
102 Status::Available(n) => {
103 let writable = x.min(CAP - n.min(CAP));
104 assert_eq!(res.unwrap().0, writable);
105 }
106 Status::Failed => assert!(res.is_err()),
107 }
108 }
109 }
110
111 WriterSut {
112 status: ref_status.clone(),
113 ..sut
114 }
115 }
116}
117
118prop_state_machine! {
119 #[test]
120 fn writer_state_test(sequential 1..20 => WriterTest);
121}