aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--embassy-stm32/src/dma/ringbuffer.rs93
1 files changed, 93 insertions, 0 deletions
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs
index 92be3334f..db3672989 100644
--- a/embassy-stm32/src/dma/ringbuffer.rs
+++ b/embassy-stm32/src/dma/ringbuffer.rs
@@ -197,6 +197,99 @@ impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> {
197 length 197 length
198 } 198 }
199} 199}
200
201pub struct WritableDmaRingBuffer<'a, W: Word> {
202 pub(crate) dma_buf: &'a mut [W],
203 end: usize,
204}
205
206impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
207 pub fn new(dma_buf: &'a mut [W]) -> Self {
208 Self { dma_buf, end: 0 }
209 }
210
211 /// Reset the ring buffer to its initial state
212 pub fn clear(&mut self, mut dma: impl DmaCtrl) {
213 self.end = 0;
214 dma.reset_complete_count();
215 }
216
217 /// The capacity of the ringbuffer
218 pub const fn cap(&self) -> usize {
219 self.dma_buf.len()
220 }
221
222 /// The current position of the ringbuffer
223 fn pos(&self, remaining_transfers: usize) -> usize {
224 self.cap() - remaining_transfers
225 }
226
227 /// Write elements from the ring buffer
228 /// Return a tuple of the length written and the capacity remaining to be written in the buffer
229 pub fn write(&mut self, mut dma: impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> {
230 let start = self.pos(dma.get_remaining_transfers());
231 if start < self.end && self.end + buf.len() < self.cap() {
232 // The available, unwritten portion in the ring buffer DOES NOT wrap
233 // and copying elements into the buffer will not cause it to
234
235 // Copy into the dma buffer
236 let len = self.copy_from(buf, self.end..self.cap());
237
238 compiler_fence(Ordering::SeqCst);
239
240 // Confirm that the DMA is not inside data we could have written
241 let pos = self.pos(dma.get_remaining_transfers());
242 if pos > self.end || pos <= start || dma.get_complete_count() > 1 {
243 Err(OverrunError)
244 } else {
245 self.end = (self.end + len) % self.cap();
246
247 Ok((len, self.cap() - (self.end - start)))
248 }
249 } else if self.end > start {
250 // The available, unwritten portion in the ring buffer DOES wrap
251 let len = self.copy_from(buf, self.end..start);
252
253 compiler_fence(Ordering::SeqCst);
254
255 dma.get_complete_count();
256
257 todo!()
258 } else if start < self.end && self.end + buf.len() >= self.cap() {
259 // The available, unwritten portion in the ring buffer DOES NOT wrap
260 // and copying elements into the buffer will cause it to
261
262 let tail = self.copy_from(buf, self.end..self.cap());
263 let head = self.copy_from(&buf[tail..], 0..start);
264
265 compiler_fence(Ordering::SeqCst);
266
267 dma.reset_complete_count();
268
269 todo!()
270 } else {
271 todo!()
272 }
273 }
274 /// Copy into the dma buffer at `data_range` from `buf`
275 fn copy_from(&mut self, buf: &[W], data_range: Range<usize>) -> usize {
276 // Limit the number of elements that can be copied
277 let length = usize::min(data_range.len(), buf.len());
278
279 // Copy into dma buffer from read buffer
280 // We need to do it like this instead of a simple copy_from_slice() because
281 // reading from a part of memory that may be simultaneously written to is unsafe
282 unsafe {
283 let dma_buf = self.dma_buf.as_mut_ptr();
284
285 for i in 0..length {
286 core::ptr::write_volatile(dma_buf.offset((data_range.start + i) as isize), buf[i]);
287 }
288 }
289
290 length
291 }
292}
200#[cfg(test)] 293#[cfg(test)]
201mod tests { 294mod tests {
202 use core::array; 295 use core::array;