diff options
| author | xoviat <[email protected]> | 2023-07-31 22:57:30 +0000 |
|---|---|---|
| committer | GitHub <[email protected]> | 2023-07-31 22:57:30 +0000 |
| commit | a1fce1b554bea8cad7f87073748145aaeca634f8 (patch) | |
| tree | eb35732bd61e83443258286efb2ee77f1103fca5 | |
| parent | 4da97433173fcab5196e7c2a69c295edff3588eb (diff) | |
| parent | bbc8424a5b502187d62cb0ff607e1ac0a719c1c4 (diff) | |
Merge pull request #1714 from xoviat/dma
stm32/dma: add writable ringbuf
| -rw-r--r-- | embassy-stm32/src/dma/bdma.rs | 166 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/dma.rs | 181 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer.rs | 124 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/ringbuffered.rs | 6 |
4 files changed, 452 insertions, 25 deletions
diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs index d956047d5..60f4fbd09 100644 --- a/embassy-stm32/src/dma/bdma.rs +++ b/embassy-stm32/src/dma/bdma.rs | |||
| @@ -9,7 +9,7 @@ use atomic_polyfill::AtomicUsize; | |||
| 9 | use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; | 9 | use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; |
| 10 | use embassy_sync::waitqueue::AtomicWaker; | 10 | use embassy_sync::waitqueue::AtomicWaker; |
| 11 | 11 | ||
| 12 | use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError}; | 12 | use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer}; |
| 13 | use super::word::{Word, WordSize}; | 13 | use super::word::{Word, WordSize}; |
| 14 | use super::Dir; | 14 | use super::Dir; |
| 15 | use crate::_generated::BDMA_CHANNEL_COUNT; | 15 | use crate::_generated::BDMA_CHANNEL_COUNT; |
| @@ -395,13 +395,13 @@ impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { | |||
| 395 | } | 395 | } |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | pub struct RingBuffer<'a, C: Channel, W: Word> { | 398 | pub struct ReadableRingBuffer<'a, C: Channel, W: Word> { |
| 399 | cr: regs::Cr, | 399 | cr: regs::Cr, |
| 400 | channel: PeripheralRef<'a, C>, | 400 | channel: PeripheralRef<'a, C>, |
| 401 | ringbuf: DmaRingBuffer<'a, W>, | 401 | ringbuf: ReadableDmaRingBuffer<'a, W>, |
| 402 | } | 402 | } |
| 403 | 403 | ||
| 404 | impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | 404 | impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> { |
| 405 | pub unsafe fn new_read( | 405 | pub unsafe fn new_read( |
| 406 | channel: impl Peripheral<P = C> + 'a, | 406 | channel: impl Peripheral<P = C> + 'a, |
| 407 | _request: Request, | 407 | _request: Request, |
| @@ -442,7 +442,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 442 | let mut this = Self { | 442 | let mut this = Self { |
| 443 | channel, | 443 | channel, |
| 444 | cr: w, | 444 | cr: w, |
| 445 | ringbuf: DmaRingBuffer::new(buffer), | 445 | ringbuf: ReadableDmaRingBuffer::new(buffer), |
| 446 | }; | 446 | }; |
| 447 | this.clear_irqs(); | 447 | this.clear_irqs(); |
| 448 | 448 | ||
| @@ -513,7 +513,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 513 | .await | 513 | .await |
| 514 | } | 514 | } |
| 515 | 515 | ||
| 516 | /// The capacity of the ringbuffer | 516 | /// The capacity of the ringbuffer. |
| 517 | pub fn cap(&self) -> usize { | 517 | pub fn cap(&self) -> usize { |
| 518 | self.ringbuf.cap() | 518 | self.ringbuf.cap() |
| 519 | } | 519 | } |
| @@ -550,7 +550,159 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 550 | } | 550 | } |
| 551 | } | 551 | } |
| 552 | 552 | ||
| 553 | impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | 553 | impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> { |
| 554 | fn drop(&mut self) { | ||
| 555 | self.request_stop(); | ||
| 556 | while self.is_running() {} | ||
| 557 | |||
| 558 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 559 | fence(Ordering::SeqCst); | ||
| 560 | } | ||
| 561 | } | ||
| 562 | |||
| 563 | pub struct WritableRingBuffer<'a, C: Channel, W: Word> { | ||
| 564 | cr: regs::Cr, | ||
| 565 | channel: PeripheralRef<'a, C>, | ||
| 566 | ringbuf: WritableDmaRingBuffer<'a, W>, | ||
| 567 | } | ||
| 568 | |||
| 569 | impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { | ||
| 570 | pub unsafe fn new_write( | ||
| 571 | channel: impl Peripheral<P = C> + 'a, | ||
| 572 | _request: Request, | ||
| 573 | peri_addr: *mut W, | ||
| 574 | buffer: &'a mut [W], | ||
| 575 | _options: TransferOptions, | ||
| 576 | ) -> Self { | ||
| 577 | into_ref!(channel); | ||
| 578 | |||
| 579 | let len = buffer.len(); | ||
| 580 | assert!(len > 0 && len <= 0xFFFF); | ||
| 581 | |||
| 582 | let dir = Dir::MemoryToPeripheral; | ||
| 583 | let data_size = W::size(); | ||
| 584 | |||
| 585 | let channel_number = channel.num(); | ||
| 586 | let dma = channel.regs(); | ||
| 587 | |||
| 588 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 589 | fence(Ordering::SeqCst); | ||
| 590 | |||
| 591 | #[cfg(bdma_v2)] | ||
| 592 | critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request))); | ||
| 593 | |||
| 594 | let mut w = regs::Cr(0); | ||
| 595 | w.set_psize(data_size.into()); | ||
| 596 | w.set_msize(data_size.into()); | ||
| 597 | w.set_minc(vals::Inc::ENABLED); | ||
| 598 | w.set_dir(dir.into()); | ||
| 599 | w.set_teie(true); | ||
| 600 | w.set_htie(true); | ||
| 601 | w.set_tcie(true); | ||
| 602 | w.set_circ(vals::Circ::ENABLED); | ||
| 603 | w.set_pl(vals::Pl::VERYHIGH); | ||
| 604 | w.set_en(true); | ||
| 605 | |||
| 606 | let buffer_ptr = buffer.as_mut_ptr(); | ||
| 607 | let mut this = Self { | ||
| 608 | channel, | ||
| 609 | cr: w, | ||
| 610 | ringbuf: WritableDmaRingBuffer::new(buffer), | ||
| 611 | }; | ||
| 612 | this.clear_irqs(); | ||
| 613 | |||
| 614 | #[cfg(dmamux)] | ||
| 615 | super::dmamux::configure_dmamux(&mut *this.channel, _request); | ||
| 616 | |||
| 617 | let ch = dma.ch(channel_number); | ||
| 618 | ch.par().write_value(peri_addr as u32); | ||
| 619 | ch.mar().write_value(buffer_ptr as u32); | ||
| 620 | ch.ndtr().write(|w| w.set_ndt(len as u16)); | ||
| 621 | |||
| 622 | this | ||
| 623 | } | ||
| 624 | |||
| 625 | pub fn start(&mut self) { | ||
| 626 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 627 | ch.cr().write_value(self.cr) | ||
| 628 | } | ||
| 629 | |||
| 630 | pub fn clear(&mut self) { | ||
| 631 | self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow())); | ||
| 632 | } | ||
| 633 | |||
| 634 | /// Write elements to the ring buffer | ||
| 635 | /// Return a tuple of the length written and the length remaining in the buffer | ||
| 636 | pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { | ||
| 637 | self.ringbuf.write(DmaCtrlImpl(self.channel.reborrow()), buf) | ||
| 638 | } | ||
| 639 | |||
| 640 | /// Write an exact number of elements to the ringbuffer. | ||
| 641 | pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> { | ||
| 642 | use core::future::poll_fn; | ||
| 643 | use core::sync::atomic::compiler_fence; | ||
| 644 | |||
| 645 | let mut written_data = 0; | ||
| 646 | let buffer_len = buffer.len(); | ||
| 647 | |||
| 648 | poll_fn(|cx| { | ||
| 649 | self.set_waker(cx.waker()); | ||
| 650 | |||
| 651 | compiler_fence(Ordering::SeqCst); | ||
| 652 | |||
| 653 | match self.write(&buffer[written_data..buffer_len]) { | ||
| 654 | Ok((len, remaining)) => { | ||
| 655 | written_data += len; | ||
| 656 | if written_data == buffer_len { | ||
| 657 | Poll::Ready(Ok(remaining)) | ||
| 658 | } else { | ||
| 659 | Poll::Pending | ||
| 660 | } | ||
| 661 | } | ||
| 662 | Err(e) => Poll::Ready(Err(e)), | ||
| 663 | } | ||
| 664 | }) | ||
| 665 | .await | ||
| 666 | } | ||
| 667 | |||
| 668 | /// The capacity of the ringbuffer. | ||
| 669 | pub fn cap(&self) -> usize { | ||
| 670 | self.ringbuf.cap() | ||
| 671 | } | ||
| 672 | |||
| 673 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 674 | STATE.ch_wakers[self.channel.index()].register(waker); | ||
| 675 | } | ||
| 676 | |||
| 677 | fn clear_irqs(&mut self) { | ||
| 678 | let dma = self.channel.regs(); | ||
| 679 | dma.ifcr().write(|w| { | ||
| 680 | w.set_htif(self.channel.num(), true); | ||
| 681 | w.set_tcif(self.channel.num(), true); | ||
| 682 | w.set_teif(self.channel.num(), true); | ||
| 683 | }); | ||
| 684 | } | ||
| 685 | |||
| 686 | pub fn request_stop(&mut self) { | ||
| 687 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 688 | |||
| 689 | // Disable the channel. Keep the IEs enabled so the irqs still fire. | ||
| 690 | // If the channel is enabled and transfer is not completed, we need to perform | ||
| 691 | // two separate write access to the CR register to disable the channel. | ||
| 692 | ch.cr().write(|w| { | ||
| 693 | w.set_teie(true); | ||
| 694 | w.set_htie(true); | ||
| 695 | w.set_tcie(true); | ||
| 696 | }); | ||
| 697 | } | ||
| 698 | |||
| 699 | pub fn is_running(&mut self) -> bool { | ||
| 700 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 701 | ch.cr().read().en() | ||
| 702 | } | ||
| 703 | } | ||
| 704 | |||
| 705 | impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> { | ||
| 554 | fn drop(&mut self) { | 706 | fn drop(&mut self) { |
| 555 | self.request_stop(); | 707 | self.request_stop(); |
| 556 | while self.is_running() {} | 708 | while self.is_running() {} |
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs index 219ef2eb0..9cd7aa8d5 100644 --- a/embassy-stm32/src/dma/dma.rs +++ b/embassy-stm32/src/dma/dma.rs | |||
| @@ -7,7 +7,7 @@ use core::task::{Context, Poll, Waker}; | |||
| 7 | use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; | 7 | use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; |
| 8 | use embassy_sync::waitqueue::AtomicWaker; | 8 | use embassy_sync::waitqueue::AtomicWaker; |
| 9 | 9 | ||
| 10 | use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError}; | 10 | use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer}; |
| 11 | use super::word::{Word, WordSize}; | 11 | use super::word::{Word, WordSize}; |
| 12 | use super::Dir; | 12 | use super::Dir; |
| 13 | use crate::_generated::DMA_CHANNEL_COUNT; | 13 | use crate::_generated::DMA_CHANNEL_COUNT; |
| @@ -625,13 +625,13 @@ impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { | |||
| 625 | } | 625 | } |
| 626 | } | 626 | } |
| 627 | 627 | ||
| 628 | pub struct RingBuffer<'a, C: Channel, W: Word> { | 628 | pub struct ReadableRingBuffer<'a, C: Channel, W: Word> { |
| 629 | cr: regs::Cr, | 629 | cr: regs::Cr, |
| 630 | channel: PeripheralRef<'a, C>, | 630 | channel: PeripheralRef<'a, C>, |
| 631 | ringbuf: DmaRingBuffer<'a, W>, | 631 | ringbuf: ReadableDmaRingBuffer<'a, W>, |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | 634 | impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> { |
| 635 | pub unsafe fn new_read( | 635 | pub unsafe fn new_read( |
| 636 | channel: impl Peripheral<P = C> + 'a, | 636 | channel: impl Peripheral<P = C> + 'a, |
| 637 | _request: Request, | 637 | _request: Request, |
| @@ -677,7 +677,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 677 | let mut this = Self { | 677 | let mut this = Self { |
| 678 | channel, | 678 | channel, |
| 679 | cr: w, | 679 | cr: w, |
| 680 | ringbuf: DmaRingBuffer::new(buffer), | 680 | ringbuf: ReadableDmaRingBuffer::new(buffer), |
| 681 | }; | 681 | }; |
| 682 | this.clear_irqs(); | 682 | this.clear_irqs(); |
| 683 | 683 | ||
| @@ -797,7 +797,176 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||
| 797 | } | 797 | } |
| 798 | } | 798 | } |
| 799 | 799 | ||
| 800 | impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | 800 | impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> { |
| 801 | fn drop(&mut self) { | ||
| 802 | self.request_stop(); | ||
| 803 | while self.is_running() {} | ||
| 804 | |||
| 805 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 806 | fence(Ordering::SeqCst); | ||
| 807 | } | ||
| 808 | } | ||
| 809 | |||
| 810 | pub struct WritableRingBuffer<'a, C: Channel, W: Word> { | ||
| 811 | cr: regs::Cr, | ||
| 812 | channel: PeripheralRef<'a, C>, | ||
| 813 | ringbuf: WritableDmaRingBuffer<'a, W>, | ||
| 814 | } | ||
| 815 | |||
| 816 | impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { | ||
| 817 | pub unsafe fn new_write( | ||
| 818 | channel: impl Peripheral<P = C> + 'a, | ||
| 819 | _request: Request, | ||
| 820 | peri_addr: *mut W, | ||
| 821 | buffer: &'a mut [W], | ||
| 822 | options: TransferOptions, | ||
| 823 | ) -> Self { | ||
| 824 | into_ref!(channel); | ||
| 825 | |||
| 826 | let len = buffer.len(); | ||
| 827 | assert!(len > 0 && len <= 0xFFFF); | ||
| 828 | |||
| 829 | let dir = Dir::MemoryToPeripheral; | ||
| 830 | let data_size = W::size(); | ||
| 831 | |||
| 832 | let channel_number = channel.num(); | ||
| 833 | let dma = channel.regs(); | ||
| 834 | |||
| 835 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 836 | fence(Ordering::SeqCst); | ||
| 837 | |||
| 838 | let mut w = regs::Cr(0); | ||
| 839 | w.set_dir(dir.into()); | ||
| 840 | w.set_msize(data_size.into()); | ||
| 841 | w.set_psize(data_size.into()); | ||
| 842 | w.set_pl(vals::Pl::VERYHIGH); | ||
| 843 | w.set_minc(vals::Inc::INCREMENTED); | ||
| 844 | w.set_pinc(vals::Inc::FIXED); | ||
| 845 | w.set_teie(true); | ||
| 846 | w.set_htie(options.half_transfer_ir); | ||
| 847 | w.set_tcie(true); | ||
| 848 | w.set_circ(vals::Circ::ENABLED); | ||
| 849 | #[cfg(dma_v1)] | ||
| 850 | w.set_trbuff(true); | ||
| 851 | #[cfg(dma_v2)] | ||
| 852 | w.set_chsel(_request); | ||
| 853 | w.set_pburst(options.pburst.into()); | ||
| 854 | w.set_mburst(options.mburst.into()); | ||
| 855 | w.set_pfctrl(options.flow_ctrl.into()); | ||
| 856 | w.set_en(true); | ||
| 857 | |||
| 858 | let buffer_ptr = buffer.as_mut_ptr(); | ||
| 859 | let mut this = Self { | ||
| 860 | channel, | ||
| 861 | cr: w, | ||
| 862 | ringbuf: WritableDmaRingBuffer::new(buffer), | ||
| 863 | }; | ||
| 864 | this.clear_irqs(); | ||
| 865 | |||
| 866 | #[cfg(dmamux)] | ||
| 867 | super::dmamux::configure_dmamux(&mut *this.channel, _request); | ||
| 868 | |||
| 869 | let ch = dma.st(channel_number); | ||
| 870 | ch.par().write_value(peri_addr as u32); | ||
| 871 | ch.m0ar().write_value(buffer_ptr as u32); | ||
| 872 | ch.ndtr().write_value(regs::Ndtr(len as _)); | ||
| 873 | ch.fcr().write(|w| { | ||
| 874 | if let Some(fth) = options.fifo_threshold { | ||
| 875 | // FIFO mode | ||
| 876 | w.set_dmdis(vals::Dmdis::DISABLED); | ||
| 877 | w.set_fth(fth.into()); | ||
| 878 | } else { | ||
| 879 | // Direct mode | ||
| 880 | w.set_dmdis(vals::Dmdis::ENABLED); | ||
| 881 | } | ||
| 882 | }); | ||
| 883 | |||
| 884 | this | ||
| 885 | } | ||
| 886 | |||
| 887 | pub fn start(&mut self) { | ||
| 888 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 889 | ch.cr().write_value(self.cr); | ||
| 890 | } | ||
| 891 | |||
| 892 | pub fn clear(&mut self) { | ||
| 893 | self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow())); | ||
| 894 | } | ||
| 895 | |||
| 896 | /// Write elements from the ring buffer | ||
| 897 | /// Return a tuple of the length written and the length remaining in the buffer | ||
| 898 | pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { | ||
| 899 | self.ringbuf.write(DmaCtrlImpl(self.channel.reborrow()), buf) | ||
| 900 | } | ||
| 901 | |||
| 902 | /// Write an exact number of elements to the ringbuffer. | ||
| 903 | pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> { | ||
| 904 | use core::future::poll_fn; | ||
| 905 | use core::sync::atomic::compiler_fence; | ||
| 906 | |||
| 907 | let mut written_data = 0; | ||
| 908 | let buffer_len = buffer.len(); | ||
| 909 | |||
| 910 | poll_fn(|cx| { | ||
| 911 | self.set_waker(cx.waker()); | ||
| 912 | |||
| 913 | compiler_fence(Ordering::SeqCst); | ||
| 914 | |||
| 915 | match self.write(&buffer[written_data..buffer_len]) { | ||
| 916 | Ok((len, remaining)) => { | ||
| 917 | written_data += len; | ||
| 918 | if written_data == buffer_len { | ||
| 919 | Poll::Ready(Ok(remaining)) | ||
| 920 | } else { | ||
| 921 | Poll::Pending | ||
| 922 | } | ||
| 923 | } | ||
| 924 | Err(e) => Poll::Ready(Err(e)), | ||
| 925 | } | ||
| 926 | }) | ||
| 927 | .await | ||
| 928 | } | ||
| 929 | |||
| 930 | // The capacity of the ringbuffer | ||
| 931 | pub fn cap(&self) -> usize { | ||
| 932 | self.ringbuf.cap() | ||
| 933 | } | ||
| 934 | |||
| 935 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 936 | STATE.ch_wakers[self.channel.index()].register(waker); | ||
| 937 | } | ||
| 938 | |||
| 939 | fn clear_irqs(&mut self) { | ||
| 940 | let channel_number = self.channel.num(); | ||
| 941 | let dma = self.channel.regs(); | ||
| 942 | let isrn = channel_number / 4; | ||
| 943 | let isrbit = channel_number % 4; | ||
| 944 | |||
| 945 | dma.ifcr(isrn).write(|w| { | ||
| 946 | w.set_htif(isrbit, true); | ||
| 947 | w.set_tcif(isrbit, true); | ||
| 948 | w.set_teif(isrbit, true); | ||
| 949 | }); | ||
| 950 | } | ||
| 951 | |||
| 952 | pub fn request_stop(&mut self) { | ||
| 953 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 954 | |||
| 955 | // Disable the channel. Keep the IEs enabled so the irqs still fire. | ||
| 956 | ch.cr().write(|w| { | ||
| 957 | w.set_teie(true); | ||
| 958 | w.set_htie(true); | ||
| 959 | w.set_tcie(true); | ||
| 960 | }); | ||
| 961 | } | ||
| 962 | |||
| 963 | pub fn is_running(&mut self) -> bool { | ||
| 964 | let ch = self.channel.regs().st(self.channel.num()); | ||
| 965 | ch.cr().read().en() | ||
| 966 | } | ||
| 967 | } | ||
| 968 | |||
| 969 | impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> { | ||
| 801 | fn drop(&mut self) { | 970 | fn drop(&mut self) { |
| 802 | self.request_stop(); | 971 | self.request_stop(); |
| 803 | while self.is_running() {} | 972 | while self.is_running() {} |
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs index 190793974..945c7508c 100644 --- a/embassy-stm32/src/dma/ringbuffer.rs +++ b/embassy-stm32/src/dma/ringbuffer.rs | |||
| @@ -29,7 +29,7 @@ use super::word::Word; | |||
| 29 | /// | | | | | 29 | /// | | | | |
| 30 | /// +- end --------------------+ +- start ----------------+ | 30 | /// +- end --------------------+ +- start ----------------+ |
| 31 | /// ``` | 31 | /// ``` |
| 32 | pub struct DmaRingBuffer<'a, W: Word> { | 32 | pub struct ReadableDmaRingBuffer<'a, W: Word> { |
| 33 | pub(crate) dma_buf: &'a mut [W], | 33 | pub(crate) dma_buf: &'a mut [W], |
| 34 | start: usize, | 34 | start: usize, |
| 35 | } | 35 | } |
| @@ -51,7 +51,7 @@ pub trait DmaCtrl { | |||
| 51 | fn reset_complete_count(&mut self) -> usize; | 51 | fn reset_complete_count(&mut self) -> usize; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | impl<'a, W: Word> DmaRingBuffer<'a, W> { | 54 | impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> { |
| 55 | pub fn new(dma_buf: &'a mut [W]) -> Self { | 55 | pub fn new(dma_buf: &'a mut [W]) -> Self { |
| 56 | Self { dma_buf, start: 0 } | 56 | Self { dma_buf, start: 0 } |
| 57 | } | 57 | } |
| @@ -197,6 +197,112 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { | |||
| 197 | length | 197 | length |
| 198 | } | 198 | } |
| 199 | } | 199 | } |
| 200 | |||
| 201 | pub struct WritableDmaRingBuffer<'a, W: Word> { | ||
| 202 | pub(crate) dma_buf: &'a mut [W], | ||
| 203 | end: usize, | ||
| 204 | } | ||
| 205 | |||
| 206 | impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { | ||
| 207 | pub fn new(dma_buf: &'a mut [W]) -> Self { | ||
| 208 | Self { dma_buf, end: 0 } | ||
| 209 | } | ||
| 210 | |||
| 211 | /// Reset the ring buffer to its initial state | ||
| 212 | pub fn clear(&mut self, mut dma: impl DmaCtrl) { | ||
| 213 | self.end = 0; | ||
| 214 | dma.reset_complete_count(); | ||
| 215 | } | ||
| 216 | |||
| 217 | /// The capacity of the ringbuffer | ||
| 218 | pub const fn cap(&self) -> usize { | ||
| 219 | self.dma_buf.len() | ||
| 220 | } | ||
| 221 | |||
| 222 | /// The current position of the ringbuffer | ||
| 223 | fn pos(&self, remaining_transfers: usize) -> usize { | ||
| 224 | self.cap() - remaining_transfers | ||
| 225 | } | ||
| 226 | |||
| 227 | /// Write elements from the ring buffer | ||
| 228 | /// Return a tuple of the length written and the capacity remaining to be written in the buffer | ||
| 229 | pub fn write(&mut self, mut dma: impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> { | ||
| 230 | let start = self.pos(dma.get_remaining_transfers()); | ||
| 231 | if start > self.end { | ||
| 232 | // The occupied portion in the ring buffer DOES wrap | ||
| 233 | let len = self.copy_from(buf, self.end..start); | ||
| 234 | |||
| 235 | compiler_fence(Ordering::SeqCst); | ||
| 236 | |||
| 237 | // Confirm that the DMA is not inside data we could have written | ||
| 238 | let (pos, complete_count) = | ||
| 239 | critical_section::with(|_| (self.pos(dma.get_remaining_transfers()), dma.get_complete_count())); | ||
| 240 | if (pos >= self.end && pos < start) || (complete_count > 0 && pos >= start) || complete_count > 1 { | ||
| 241 | Err(OverrunError) | ||
| 242 | } else { | ||
| 243 | self.end = (self.end + len) % self.cap(); | ||
| 244 | |||
| 245 | Ok((len, self.cap() - (start - self.end))) | ||
| 246 | } | ||
| 247 | } else if start == self.end && dma.get_complete_count() == 0 { | ||
| 248 | Ok((0, 0)) | ||
| 249 | } else if start <= self.end && self.end + buf.len() < self.cap() { | ||
| 250 | // The occupied portion in the ring buffer DOES NOT wrap | ||
| 251 | // and copying elements into the buffer WILL NOT cause it to | ||
| 252 | |||
| 253 | // Copy into the dma buffer | ||
| 254 | let len = self.copy_from(buf, self.end..self.cap()); | ||
| 255 | |||
| 256 | compiler_fence(Ordering::SeqCst); | ||
| 257 | |||
| 258 | // Confirm that the DMA is not inside data we could have written | ||
| 259 | let pos = self.pos(dma.get_remaining_transfers()); | ||
| 260 | if pos > self.end || pos < start || dma.get_complete_count() > 1 { | ||
| 261 | Err(OverrunError) | ||
| 262 | } else { | ||
| 263 | self.end = (self.end + len) % self.cap(); | ||
| 264 | |||
| 265 | Ok((len, self.cap() - (self.end - start))) | ||
| 266 | } | ||
| 267 | } else { | ||
| 268 | // The occupied portion in the ring buffer DOES NOT wrap | ||
| 269 | // and copying elements into the buffer WILL cause it to | ||
| 270 | |||
| 271 | let tail = self.copy_from(buf, self.end..self.cap()); | ||
| 272 | let head = self.copy_from(&buf[tail..], 0..start); | ||
| 273 | |||
| 274 | compiler_fence(Ordering::SeqCst); | ||
| 275 | |||
| 276 | // Confirm that the DMA is not inside data we could have written | ||
| 277 | let pos = self.pos(dma.get_remaining_transfers()); | ||
| 278 | if pos > self.end || pos < start || dma.reset_complete_count() > 1 { | ||
| 279 | Err(OverrunError) | ||
| 280 | } else { | ||
| 281 | self.end = head; | ||
| 282 | |||
| 283 | Ok((tail + head, self.cap() - (start - self.end))) | ||
| 284 | } | ||
| 285 | } | ||
| 286 | } | ||
| 287 | /// Copy into the dma buffer at `data_range` from `buf` | ||
| 288 | fn copy_from(&mut self, buf: &[W], data_range: Range<usize>) -> usize { | ||
| 289 | // Limit the number of elements that can be copied | ||
| 290 | let length = usize::min(data_range.len(), buf.len()); | ||
| 291 | |||
| 292 | // Copy into dma buffer from read buffer | ||
| 293 | // We need to do it like this instead of a simple copy_from_slice() because | ||
| 294 | // reading from a part of memory that may be simultaneously written to is unsafe | ||
| 295 | unsafe { | ||
| 296 | let dma_buf = self.dma_buf.as_mut_ptr(); | ||
| 297 | |||
| 298 | for i in 0..length { | ||
| 299 | core::ptr::write_volatile(dma_buf.offset((data_range.start + i) as isize), buf[i]); | ||
| 300 | } | ||
| 301 | } | ||
| 302 | |||
| 303 | length | ||
| 304 | } | ||
| 305 | } | ||
| 200 | #[cfg(test)] | 306 | #[cfg(test)] |
| 201 | mod tests { | 307 | mod tests { |
| 202 | use core::array; | 308 | use core::array; |
| @@ -263,7 +369,7 @@ mod tests { | |||
| 263 | #[test] | 369 | #[test] |
| 264 | fn empty_and_read_not_started() { | 370 | fn empty_and_read_not_started() { |
| 265 | let mut dma_buf = [0u8; 16]; | 371 | let mut dma_buf = [0u8; 16]; |
| 266 | let ringbuf = DmaRingBuffer::new(&mut dma_buf); | 372 | let ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); |
| 267 | 373 | ||
| 268 | assert_eq!(0, ringbuf.start); | 374 | assert_eq!(0, ringbuf.start); |
| 269 | } | 375 | } |
| @@ -273,7 +379,7 @@ mod tests { | |||
| 273 | let mut dma = TestCircularTransfer::new(16); | 379 | let mut dma = TestCircularTransfer::new(16); |
| 274 | 380 | ||
| 275 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | 381 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |
| 276 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | 382 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); |
| 277 | 383 | ||
| 278 | assert_eq!(0, ringbuf.start); | 384 | assert_eq!(0, ringbuf.start); |
| 279 | assert_eq!(16, ringbuf.cap()); | 385 | assert_eq!(16, ringbuf.cap()); |
| @@ -314,7 +420,7 @@ mod tests { | |||
| 314 | let mut dma = TestCircularTransfer::new(16); | 420 | let mut dma = TestCircularTransfer::new(16); |
| 315 | 421 | ||
| 316 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | 422 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |
| 317 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | 423 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); |
| 318 | 424 | ||
| 319 | assert_eq!(0, ringbuf.start); | 425 | assert_eq!(0, ringbuf.start); |
| 320 | assert_eq!(16, ringbuf.cap()); | 426 | assert_eq!(16, ringbuf.cap()); |
| @@ -349,7 +455,7 @@ mod tests { | |||
| 349 | let mut dma = TestCircularTransfer::new(16); | 455 | let mut dma = TestCircularTransfer::new(16); |
| 350 | 456 | ||
| 351 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | 457 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |
| 352 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | 458 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); |
| 353 | 459 | ||
| 354 | assert_eq!(0, ringbuf.start); | 460 | assert_eq!(0, ringbuf.start); |
| 355 | assert_eq!(16, ringbuf.cap()); | 461 | assert_eq!(16, ringbuf.cap()); |
| @@ -384,7 +490,7 @@ mod tests { | |||
| 384 | let mut dma = TestCircularTransfer::new(16); | 490 | let mut dma = TestCircularTransfer::new(16); |
| 385 | 491 | ||
| 386 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | 492 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |
| 387 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | 493 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); |
| 388 | 494 | ||
| 389 | assert_eq!(0, ringbuf.start); | 495 | assert_eq!(0, ringbuf.start); |
| 390 | assert_eq!(16, ringbuf.cap()); | 496 | assert_eq!(16, ringbuf.cap()); |
| @@ -420,7 +526,7 @@ mod tests { | |||
| 420 | let mut dma = TestCircularTransfer::new(16); | 526 | let mut dma = TestCircularTransfer::new(16); |
| 421 | 527 | ||
| 422 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | 528 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |
| 423 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | 529 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); |
| 424 | 530 | ||
| 425 | assert_eq!(0, ringbuf.start); | 531 | assert_eq!(0, ringbuf.start); |
| 426 | assert_eq!(16, ringbuf.cap()); | 532 | assert_eq!(16, ringbuf.cap()); |
| @@ -454,7 +560,7 @@ mod tests { | |||
| 454 | let mut dma = TestCircularTransfer::new(16); | 560 | let mut dma = TestCircularTransfer::new(16); |
| 455 | 561 | ||
| 456 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | 562 | let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |
| 457 | let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | 563 | let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); |
| 458 | 564 | ||
| 459 | assert_eq!(0, ringbuf.start); | 565 | assert_eq!(0, ringbuf.start); |
| 460 | assert_eq!(16, ringbuf.cap()); | 566 | assert_eq!(16, ringbuf.cap()); |
diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index 80261d048..71077c070 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs | |||
| @@ -6,12 +6,12 @@ use embassy_hal_internal::PeripheralRef; | |||
| 6 | use futures::future::{select, Either}; | 6 | use futures::future::{select, Either}; |
| 7 | 7 | ||
| 8 | use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx}; | 8 | use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx}; |
| 9 | use crate::dma::RingBuffer; | 9 | use crate::dma::ReadableRingBuffer; |
| 10 | use crate::usart::{Regs, Sr}; | 10 | use crate::usart::{Regs, Sr}; |
| 11 | 11 | ||
| 12 | pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> { | 12 | pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> { |
| 13 | _peri: PeripheralRef<'d, T>, | 13 | _peri: PeripheralRef<'d, T>, |
| 14 | ring_buf: RingBuffer<'d, RxDma, u8>, | 14 | ring_buf: ReadableRingBuffer<'d, RxDma, u8>, |
| 15 | } | 15 | } |
| 16 | 16 | ||
| 17 | impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> { | 17 | impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> { |
| @@ -24,7 +24,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> { | |||
| 24 | let request = self.rx_dma.request(); | 24 | let request = self.rx_dma.request(); |
| 25 | let opts = Default::default(); | 25 | let opts = Default::default(); |
| 26 | 26 | ||
| 27 | let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) }; | 27 | let ring_buf = unsafe { ReadableRingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) }; |
| 28 | 28 | ||
| 29 | RingBufferedUartRx { | 29 | RingBufferedUartRx { |
| 30 | _peri: self._peri, | 30 | _peri: self._peri, |
