diff options
| author | xoviat <[email protected]> | 2023-07-30 09:18:33 -0500 |
|---|---|---|
| committer | xoviat <[email protected]> | 2023-07-30 09:18:33 -0500 |
| commit | 603c4cb4fa5f3dc2d95c5e47f13149beaa227bf5 (patch) | |
| tree | a7ba035a832b9f6191a5c3fb39ae5b63ce77e761 /embassy-stm32 | |
| parent | 8064f4bfe07c407884d412ce4820153e607c68b4 (diff) | |
stm32/dma: complete initial ringbuf impl.
Diffstat (limited to 'embassy-stm32')
| -rw-r--r-- | embassy-stm32/src/dma/dma.rs | 171 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer.rs | 50 |
2 files changed, 200 insertions, 21 deletions
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs index 9157f72db..3c5c79fd8 100644 --- a/embassy-stm32/src/dma/dma.rs +++ b/embassy-stm32/src/dma/dma.rs | |||
| @@ -7,7 +7,7 @@ use core::task::{Context, Poll, Waker}; | |||
| 7 | use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; | 7 | use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; |
| 8 | use embassy_sync::waitqueue::AtomicWaker; | 8 | use embassy_sync::waitqueue::AtomicWaker; |
| 9 | 9 | ||
| 10 | use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer}; | 10 | use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer}; |
| 11 | use super::word::{Word, WordSize}; | 11 | use super::word::{Word, WordSize}; |
| 12 | use super::Dir; | 12 | use super::Dir; |
| 13 | use crate::_generated::DMA_CHANNEL_COUNT; | 13 | use crate::_generated::DMA_CHANNEL_COUNT; |
| @@ -806,3 +806,172 @@ impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> { | |||
| 806 | fence(Ordering::SeqCst); | 806 | fence(Ordering::SeqCst); |
| 807 | } | 807 | } |
| 808 | } | 808 | } |
| 809 | |||
| 810 | pub struct WritableRingBuffer<'a, C: Channel, W: Word> { | ||
| 811 | cr: regs::Cr, | ||
| 812 | channel: PeripheralRef<'a, C>, | ||
| 813 | ringbuf: WritableDmaRingBuffer<'a, W>, | ||
| 814 | } | ||
| 815 | |||
| 816 | impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { | ||
| 817 | pub unsafe fn new_read( | ||
| 818 | channel: impl Peripheral<P = C> + 'a, | ||
| 819 | _request: Request, | ||
| 820 | peri_addr: *mut W, | ||
| 821 | buffer: &'a mut [W], | ||
| 822 | options: TransferOptions, | ||
| 823 | ) -> Self { | ||
| 824 | into_ref!(channel); | ||
| 825 | |||
| 826 | let len = buffer.len(); | ||
| 827 | assert!(len > 0 && len <= 0xFFFF); | ||
| 828 | |||
| 829 | let dir = Dir::MemoryToPeripheral; | ||
| 830 | let data_size = W::size(); | ||
| 831 | |||
| 832 | let channel_number = channel.num(); | ||
| 833 | let dma = channel.regs(); | ||
| 834 | |||
| 835 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 836 | fence(Ordering::SeqCst); | ||
| 837 | |||
| 838 | let mut w = regs::Cr(0); | ||
| 839 | w.set_dir(dir.into()); | ||
| 840 | w.set_msize(data_size.into()); | ||
| 841 | w.set_psize(data_size.into()); | ||
| 842 | w.set_pl(vals::Pl::VERYHIGH); | ||
| 843 | w.set_minc(vals::Inc::INCREMENTED); | ||
| 844 | w.set_pinc(vals::Inc::FIXED); | ||
| 845 | w.set_teie(true); | ||
| 846 | w.set_htie(options.half_transfer_ir); | ||
| 847 | w.set_tcie(true); | ||
| 848 | w.set_circ(vals::Circ::ENABLED); | ||
| 849 | #[cfg(dma_v1)] | ||
| 850 | w.set_trbuff(true); | ||
| 851 | #[cfg(dma_v2)] | ||
| 852 | w.set_chsel(_request); | ||
| 853 | w.set_pburst(options.pburst.into()); | ||
| 854 | w.set_mburst(options.mburst.into()); | ||
| 855 | w.set_pfctrl(options.flow_ctrl.into()); | ||
| 856 | w.set_en(true); | ||
| 857 | |||
| 858 | let buffer_ptr = buffer.as_mut_ptr(); | ||
| 859 | let mut this = Self { | ||
| 860 | channel, | ||
| 861 | cr: w, | ||
| 862 | ringbuf: WritableDmaRingBuffer::new(buffer), | ||
| 863 | }; | ||
| 864 | this.clear_irqs(); | ||
| 865 | |||
| 866 | #[cfg(dmamux)] | ||
| 867 | super::dmamux::configure_dmamux(&mut *this.channel, _request); | ||
| 868 | |||
| 869 | let ch = dma.st(channel_number); | ||
| 870 | ch.par().write_value(peri_addr as u32); | ||
| 871 | ch.m0ar().write_value(buffer_ptr as u32); | ||
| 872 | ch.ndtr().write_value(regs::Ndtr(len as _)); | ||
| 873 | ch.fcr().write(|w| { | ||
| 874 | if let Some(fth) = options.fifo_threshold { | ||
| 875 | // FIFO mode | ||
| 876 | w.set_dmdis(vals::Dmdis::DISABLED); | ||
| 877 | w.set_fth(fth.into()); | ||
| 878 | } else { | ||
| 879 | // Direct mode | ||
| 880 | w.set_dmdis(vals::Dmdis::ENABLED); | ||
| 881 | } | ||
| 882 | }); | ||
| 883 | |||
| 884 | this | ||
| 885 | } | ||
| 886 | |||
| 887 | pub fn start(&mut self) { | ||
| 888 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 889 | ch.cr().write_value(self.cr); | ||
| 890 | } | ||
| 891 | |||
| 892 | pub fn clear(&mut self) { | ||
| 893 | self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow())); | ||
| 894 | } | ||
| 895 | |||
| 896 | /// Write elements from the ring buffer | ||
| 897 | /// Return a tuple of the length written and the length remaining in the buffer | ||
| 898 | pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { | ||
| 899 | self.ringbuf.write(DmaCtrlImpl(self.channel.reborrow()), buf) | ||
| 900 | } | ||
| 901 | |||
| 902 | /// Write an exact number of elements from the ringbuffer. | ||
| 903 | pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> { | ||
| 904 | use core::future::poll_fn; | ||
| 905 | use core::sync::atomic::compiler_fence; | ||
| 906 | |||
| 907 | let mut written_data = 0; | ||
| 908 | let buffer_len = buffer.len(); | ||
| 909 | |||
| 910 | poll_fn(|cx| { | ||
| 911 | self.set_waker(cx.waker()); | ||
| 912 | |||
| 913 | compiler_fence(Ordering::SeqCst); | ||
| 914 | |||
| 915 | match self.write(&buffer[written_data..buffer_len]) { | ||
| 916 | Ok((len, remaining)) => { | ||
| 917 | written_data += len; | ||
| 918 | if written_data == buffer_len { | ||
| 919 | Poll::Ready(Ok(remaining)) | ||
| 920 | } else { | ||
| 921 | Poll::Pending | ||
| 922 | } | ||
| 923 | } | ||
| 924 | Err(e) => Poll::Ready(Err(e)), | ||
| 925 | } | ||
| 926 | }) | ||
| 927 | .await | ||
| 928 | } | ||
| 929 | |||
| 930 | // The capacity of the ringbuffer | ||
| 931 | pub fn cap(&self) -> usize { | ||
| 932 | self.ringbuf.cap() | ||
| 933 | } | ||
| 934 | |||
| 935 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 936 | STATE.ch_wakers[self.channel.index()].register(waker); | ||
| 937 | } | ||
| 938 | |||
| 939 | fn clear_irqs(&mut self) { | ||
| 940 | let channel_number = self.channel.num(); | ||
| 941 | let dma = self.channel.regs(); | ||
| 942 | let isrn = channel_number / 4; | ||
| 943 | let isrbit = channel_number % 4; | ||
| 944 | |||
| 945 | dma.ifcr(isrn).write(|w| { | ||
| 946 | w.set_htif(isrbit, true); | ||
| 947 | w.set_tcif(isrbit, true); | ||
| 948 | w.set_teif(isrbit, true); | ||
| 949 | }); | ||
| 950 | } | ||
| 951 | |||
| 952 | pub fn request_stop(&mut self) { | ||
| 953 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 954 | |||
| 955 | // Disable the channel. Keep the IEs enabled so the irqs still fire. | ||
| 956 | ch.cr().write(|w| { | ||
| 957 | w.set_teie(true); | ||
| 958 | w.set_htie(true); | ||
| 959 | w.set_tcie(true); | ||
| 960 | }); | ||
| 961 | } | ||
| 962 | |||
| 963 | pub fn is_running(&mut self) -> bool { | ||
| 964 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 965 | ch.cr().read().en() | ||
| 966 | } | ||
| 967 | } | ||
| 968 | |||
| 969 | impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> { | ||
| 970 | fn drop(&mut self) { | ||
| 971 | self.request_stop(); | ||
| 972 | while self.is_running() {} | ||
| 973 | |||
| 974 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 975 | fence(Ordering::SeqCst); | ||
| 976 | } | ||
| 977 | } | ||
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs index db3672989..e9d330219 100644 --- a/embassy-stm32/src/dma/ringbuffer.rs +++ b/embassy-stm32/src/dma/ringbuffer.rs | |||
| @@ -228,47 +228,57 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { | |||
| 228 | /// Return a tuple of the length written and the capacity remaining to be written in the buffer | 228 | /// Return a tuple of the length written and the capacity remaining to be written in the buffer |
| 229 | pub fn write(&mut self, mut dma: impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> { | 229 | pub fn write(&mut self, mut dma: impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> { |
| 230 | let start = self.pos(dma.get_remaining_transfers()); | 230 | let start = self.pos(dma.get_remaining_transfers()); |
| 231 | if start < self.end && self.end + buf.len() < self.cap() { | 231 | if start > self.end { |
| 232 | // The available, unwritten portion in the ring buffer DOES NOT wrap | 232 | // The occupied portion in the ring buffer DOES wrap |
| 233 | // and copying elements into the buffer will not cause it to | 233 | let len = self.copy_from(buf, self.end..start); |
| 234 | |||
| 235 | // Copy into the dma buffer | ||
| 236 | let len = self.copy_from(buf, self.end..self.cap()); | ||
| 237 | 234 | ||
| 238 | compiler_fence(Ordering::SeqCst); | 235 | compiler_fence(Ordering::SeqCst); |
| 239 | 236 | ||
| 240 | // Confirm that the DMA is not inside data we could have written | 237 | // Confirm that the DMA is not inside data we could have written |
| 241 | let pos = self.pos(dma.get_remaining_transfers()); | 238 | let pos = self.pos(dma.get_remaining_transfers()); |
| 242 | if pos > self.end || pos <= start || dma.get_complete_count() > 1 { | 239 | if (pos > self.end && pos <= start) || dma.get_complete_count() > 1 { |
| 243 | Err(OverrunError) | 240 | Err(OverrunError) |
| 244 | } else { | 241 | } else { |
| 245 | self.end = (self.end + len) % self.cap(); | 242 | self.end = (self.end + len) % self.cap(); |
| 246 | 243 | ||
| 247 | Ok((len, self.cap() - (self.end - start))) | 244 | Ok((len, self.cap() - (start - self.end))) |
| 248 | } | 245 | } |
| 249 | } else if self.end > start { | 246 | } else if start <= self.end && self.end + buf.len() < self.cap() { |
| 250 | // The available, unwritten portion in the ring buffer DOES wrap | 247 | // The occupied portion in the ring buffer DOES NOT wrap |
| 251 | let len = self.copy_from(buf, self.end..start); | 248 | // and copying elements into the buffer WILL NOT cause it to |
| 249 | |||
| 250 | // Copy into the dma buffer | ||
| 251 | let len = self.copy_from(buf, self.end..self.cap()); | ||
| 252 | 252 | ||
| 253 | compiler_fence(Ordering::SeqCst); | 253 | compiler_fence(Ordering::SeqCst); |
| 254 | 254 | ||
| 255 | dma.get_complete_count(); | 255 | // Confirm that the DMA is not inside data we could have written |
| 256 | let pos = self.pos(dma.get_remaining_transfers()); | ||
| 257 | if pos > self.end || pos < start || dma.get_complete_count() > 1 { | ||
| 258 | Err(OverrunError) | ||
| 259 | } else { | ||
| 260 | self.end = (self.end + len) % self.cap(); | ||
| 256 | 261 | ||
| 257 | todo!() | 262 | Ok((len, self.cap() - (self.end - start))) |
| 258 | } else if start < self.end && self.end + buf.len() >= self.cap() { | 263 | } |
| 259 | // The available, unwritten portion in the ring buffer DOES NOT wrap | 264 | } else { |
| 260 | // and copying elements into the buffer will cause it to | 265 | // The occupied portion in the ring buffer DOES NOT wrap |
| 266 | // and copying elements into the buffer WILL cause it to | ||
| 261 | 267 | ||
| 262 | let tail = self.copy_from(buf, self.end..self.cap()); | 268 | let tail = self.copy_from(buf, self.end..self.cap()); |
| 263 | let head = self.copy_from(&buf[tail..], 0..start); | 269 | let head = self.copy_from(&buf[tail..], 0..start); |
| 264 | 270 | ||
| 265 | compiler_fence(Ordering::SeqCst); | 271 | compiler_fence(Ordering::SeqCst); |
| 266 | 272 | ||
| 267 | dma.reset_complete_count(); | 273 | // Confirm that the DMA is not inside data we could have written |
| 274 | let pos = self.pos(dma.get_remaining_transfers()); | ||
| 275 | if pos > self.end || pos < start || dma.reset_complete_count() > 1 { | ||
| 276 | Err(OverrunError) | ||
| 277 | } else { | ||
| 278 | self.end = head; | ||
| 268 | 279 | ||
| 269 | todo!() | 280 | Ok((tail + head, self.cap() - (start - self.end))) |
| 270 | } else { | 281 | } |
| 271 | todo!() | ||
| 272 | } | 282 | } |
| 273 | } | 283 | } |
| 274 | /// Copy into the dma buffer at `data_range` from `buf` | 284 | /// Copy into the dma buffer at `data_range` from `buf` |
