diff options
| author | xoviat <[email protected]> | 2023-07-30 09:18:33 -0500 |
|---|---|---|
| committer | xoviat <[email protected]> | 2023-07-30 09:18:33 -0500 |
| commit | 603c4cb4fa5f3dc2d95c5e47f13149beaa227bf5 (patch) | |
| tree | a7ba035a832b9f6191a5c3fb39ae5b63ce77e761 /embassy-stm32/src/dma/ringbuffer.rs | |
| parent | 8064f4bfe07c407884d412ce4820153e607c68b4 (diff) | |
stm32/dma: complete initial ringbuf impl.
Diffstat (limited to 'embassy-stm32/src/dma/ringbuffer.rs')
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer.rs | 50 |
1 files changed, 30 insertions, 20 deletions
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs index db3672989..e9d330219 100644 --- a/embassy-stm32/src/dma/ringbuffer.rs +++ b/embassy-stm32/src/dma/ringbuffer.rs | |||
| @@ -228,47 +228,57 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { | |||
| 228 | /// Return a tuple of the length written and the capacity remaining to be written in the buffer | 228 | /// Return a tuple of the length written and the capacity remaining to be written in the buffer |
| 229 | pub fn write(&mut self, mut dma: impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> { | 229 | pub fn write(&mut self, mut dma: impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> { |
| 230 | let start = self.pos(dma.get_remaining_transfers()); | 230 | let start = self.pos(dma.get_remaining_transfers()); |
| 231 | if start < self.end && self.end + buf.len() < self.cap() { | 231 | if start > self.end { |
| 232 | // The available, unwritten portion in the ring buffer DOES NOT wrap | 232 | // The occupied portion in the ring buffer DOES wrap |
| 233 | // and copying elements into the buffer will not cause it to | 233 | let len = self.copy_from(buf, self.end..start); |
| 234 | |||
| 235 | // Copy into the dma buffer | ||
| 236 | let len = self.copy_from(buf, self.end..self.cap()); | ||
| 237 | 234 | ||
| 238 | compiler_fence(Ordering::SeqCst); | 235 | compiler_fence(Ordering::SeqCst); |
| 239 | 236 | ||
| 240 | // Confirm that the DMA is not inside data we could have written | 237 | // Confirm that the DMA is not inside data we could have written |
| 241 | let pos = self.pos(dma.get_remaining_transfers()); | 238 | let pos = self.pos(dma.get_remaining_transfers()); |
| 242 | if pos > self.end || pos <= start || dma.get_complete_count() > 1 { | 239 | if (pos > self.end && pos <= start) || dma.get_complete_count() > 1 { |
| 243 | Err(OverrunError) | 240 | Err(OverrunError) |
| 244 | } else { | 241 | } else { |
| 245 | self.end = (self.end + len) % self.cap(); | 242 | self.end = (self.end + len) % self.cap(); |
| 246 | 243 | ||
| 247 | Ok((len, self.cap() - (self.end - start))) | 244 | Ok((len, self.cap() - (start - self.end))) |
| 248 | } | 245 | } |
| 249 | } else if self.end > start { | 246 | } else if start <= self.end && self.end + buf.len() < self.cap() { |
| 250 | // The available, unwritten portion in the ring buffer DOES wrap | 247 | // The occupied portion in the ring buffer DOES NOT wrap |
| 251 | let len = self.copy_from(buf, self.end..start); | 248 | // and copying elements into the buffer WILL NOT cause it to |
| 249 | |||
| 250 | // Copy into the dma buffer | ||
| 251 | let len = self.copy_from(buf, self.end..self.cap()); | ||
| 252 | 252 | ||
| 253 | compiler_fence(Ordering::SeqCst); | 253 | compiler_fence(Ordering::SeqCst); |
| 254 | 254 | ||
| 255 | dma.get_complete_count(); | 255 | // Confirm that the DMA is not inside data we could have written |
| 256 | let pos = self.pos(dma.get_remaining_transfers()); | ||
| 257 | if pos > self.end || pos < start || dma.get_complete_count() > 1 { | ||
| 258 | Err(OverrunError) | ||
| 259 | } else { | ||
| 260 | self.end = (self.end + len) % self.cap(); | ||
| 256 | 261 | ||
| 257 | todo!() | 262 | Ok((len, self.cap() - (self.end - start))) |
| 258 | } else if start < self.end && self.end + buf.len() >= self.cap() { | 263 | } |
| 259 | // The available, unwritten portion in the ring buffer DOES NOT wrap | 264 | } else { |
| 260 | // and copying elements into the buffer will cause it to | 265 | // The occupied portion in the ring buffer DOES NOT wrap |
| 266 | // and copying elements into the buffer WILL cause it to | ||
| 261 | 267 | ||
| 262 | let tail = self.copy_from(buf, self.end..self.cap()); | 268 | let tail = self.copy_from(buf, self.end..self.cap()); |
| 263 | let head = self.copy_from(&buf[tail..], 0..start); | 269 | let head = self.copy_from(&buf[tail..], 0..start); |
| 264 | 270 | ||
| 265 | compiler_fence(Ordering::SeqCst); | 271 | compiler_fence(Ordering::SeqCst); |
| 266 | 272 | ||
| 267 | dma.reset_complete_count(); | 273 | // Confirm that the DMA is not inside data we could have written |
| 274 | let pos = self.pos(dma.get_remaining_transfers()); | ||
| 275 | if pos > self.end || pos < start || dma.reset_complete_count() > 1 { | ||
| 276 | Err(OverrunError) | ||
| 277 | } else { | ||
| 278 | self.end = head; | ||
| 268 | 279 | ||
| 269 | todo!() | 280 | Ok((tail + head, self.cap() - (start - self.end))) |
| 270 | } else { | 281 | } |
| 271 | todo!() | ||
| 272 | } | 282 | } |
| 273 | } | 283 | } |
| 274 | /// Copy into the dma buffer at `data_range` from `buf` | 284 | /// Copy into the dma buffer at `data_range` from `buf` |
