diff options
| author | Dario Nieuwenhuis <[email protected]> | 2025-09-05 13:07:48 +0000 |
|---|---|---|
| committer | GitHub <[email protected]> | 2025-09-05 13:07:48 +0000 |
| commit | f6414d8cd22d18eb1adc5dfa2780bc94a150be30 (patch) | |
| tree | 9934f127507e16e294e75e4175b13c03e60ced13 | |
| parent | fbe2c0d43b777067027ce1413946892cb7d12001 (diff) | |
| parent | 2e2562d8dc38844b9907e282f6c098e6ac2fd096 (diff) | |
Merge pull request #3923 from elagil/gpdma_ll_ringbuf_support
Add GPDMA linked-list + ringbuffer support
| -rw-r--r-- | embassy-stm32/CHANGELOG.md | 5 | ||||
| -rw-r--r-- | embassy-stm32/src/cryp/mod.rs | 3 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/dma_bdma.rs | 102 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/gpdma.rs | 339 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/gpdma/linked_list.rs | 267 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/gpdma/mod.rs | 699 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/gpdma/ringbuffered.rs | 332 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/mod.rs | 7 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/ringbuffer/mod.rs | 13 | ||||
| -rw-r--r-- | embassy-stm32/src/i2c/v2.rs | 2 | ||||
| -rw-r--r-- | embassy-stm32/src/sai/mod.rs | 9 | ||||
| -rw-r--r-- | embassy-stm32/src/spdifrx/mod.rs | 7 | ||||
| -rw-r--r-- | embassy-stm32/src/ucpd.rs | 4 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/mod.rs | 2 | ||||
| -rw-r--r-- | embassy-stm32/src/usart/ringbuffered.rs | 2 | ||||
| -rw-r--r-- | examples/stm32h5/src/bin/sai.rs | 52 |
16 files changed, 1429 insertions, 416 deletions
diff --git a/embassy-stm32/CHANGELOG.md b/embassy-stm32/CHANGELOG.md index 4cc48ed97..8ed4dbd65 100644 --- a/embassy-stm32/CHANGELOG.md +++ b/embassy-stm32/CHANGELOG.md | |||
| @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 | |||
| 25 | - feat: stm32/adc/v3: allow DMA reads to loop through enable channels | 25 | - feat: stm32/adc/v3: allow DMA reads to loop through enable channels |
| 26 | - fix: Fix XSPI not disabling alternate bytes when they were previously enabled | 26 | - fix: Fix XSPI not disabling alternate bytes when they were previously enabled |
| 27 | - fix: Fix stm32h7rs init when using external flash via XSPI | 27 | - fix: Fix stm32h7rs init when using external flash via XSPI |
| 28 | - feat: Add GPDMA linked-list + ringbuffer support ([#3923](https://github.com/embassy-rs/embassy/pull/3923)) | ||
| 28 | 29 | ||
| 29 | ## 0.3.0 - 2025-08-12 | 30 | ## 0.3.0 - 2025-08-12 |
| 30 | 31 | ||
| @@ -135,7 +136,7 @@ GPIO: | |||
| 135 | - Refactor AfType ([#3031](https://github.com/embassy-rs/embassy/pull/3031)) | 136 | - Refactor AfType ([#3031](https://github.com/embassy-rs/embassy/pull/3031)) |
| 136 | - Gpiov1: Do not call set_speed for AFType::Input ([#2996](https://github.com/embassy-rs/embassy/pull/2996)) | 137 | - Gpiov1: Do not call set_speed for AFType::Input ([#2996](https://github.com/embassy-rs/embassy/pull/2996)) |
| 137 | 138 | ||
| 138 | UART: | 139 | UART: |
| 139 | - Add embedded-io impls ([#2739](https://github.com/embassy-rs/embassy/pull/2739)) | 140 | - Add embedded-io impls ([#2739](https://github.com/embassy-rs/embassy/pull/2739)) |
| 140 | - Add support for changing baud rate ([#3512](https://github.com/embassy-rs/embassy/pull/3512)) | 141 | - Add support for changing baud rate ([#3512](https://github.com/embassy-rs/embassy/pull/3512)) |
| 141 | - Add split_ref ([#3500](https://github.com/embassy-rs/embassy/pull/3500)) | 142 | - Add split_ref ([#3500](https://github.com/embassy-rs/embassy/pull/3500)) |
| @@ -159,7 +160,7 @@ UART: | |||
| 159 | - Wake receive task for each received byte ([#2722](https://github.com/embassy-rs/embassy/pull/2722)) | 160 | - Wake receive task for each received byte ([#2722](https://github.com/embassy-rs/embassy/pull/2722)) |
| 160 | - Fix dma and idle line detection in ringbuffereduartrx ([#3319](https://github.com/embassy-rs/embassy/pull/3319)) | 161 | - Fix dma and idle line detection in ringbuffereduartrx ([#3319](https://github.com/embassy-rs/embassy/pull/3319)) |
| 161 | 162 | ||
| 162 | SPI: | 163 | SPI: |
| 163 | - Add MISO pullup configuration option ([#2943](https://github.com/embassy-rs/embassy/pull/2943)) | 164 | - Add MISO pullup configuration option ([#2943](https://github.com/embassy-rs/embassy/pull/2943)) |
| 164 | - Add slew rate configuration options ([#3669](https://github.com/embassy-rs/embassy/pull/3669)) | 165 | - Add slew rate configuration options ([#3669](https://github.com/embassy-rs/embassy/pull/3669)) |
| 165 | - Fix blocking_write on nosck spi. ([#3035](https://github.com/embassy-rs/embassy/pull/3035)) | 166 | - Fix blocking_write on nosck spi. ([#3035](https://github.com/embassy-rs/embassy/pull/3035)) |
diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 35d9f8cce..0173b2b5d 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs | |||
| @@ -1814,7 +1814,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> { | |||
| 1814 | // Configure DMA to transfer input to crypto core. | 1814 | // Configure DMA to transfer input to crypto core. |
| 1815 | let dst_ptr: *mut u32 = T::regs().din().as_ptr(); | 1815 | let dst_ptr: *mut u32 = T::regs().din().as_ptr(); |
| 1816 | let options = TransferOptions { | 1816 | let options = TransferOptions { |
| 1817 | #[cfg(not(gpdma))] | ||
| 1818 | priority: crate::dma::Priority::High, | 1817 | priority: crate::dma::Priority::High, |
| 1819 | ..Default::default() | 1818 | ..Default::default() |
| 1820 | }; | 1819 | }; |
| @@ -1834,7 +1833,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> { | |||
| 1834 | // Configure DMA to transfer input to crypto core. | 1833 | // Configure DMA to transfer input to crypto core. |
| 1835 | let dst_ptr: *mut u32 = T::regs().din().as_ptr(); | 1834 | let dst_ptr: *mut u32 = T::regs().din().as_ptr(); |
| 1836 | let options = TransferOptions { | 1835 | let options = TransferOptions { |
| 1837 | #[cfg(not(gpdma))] | ||
| 1838 | priority: crate::dma::Priority::High, | 1836 | priority: crate::dma::Priority::High, |
| 1839 | ..Default::default() | 1837 | ..Default::default() |
| 1840 | }; | 1838 | }; |
| @@ -1853,7 +1851,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> { | |||
| 1853 | // Configure DMA to get output from crypto core. | 1851 | // Configure DMA to get output from crypto core. |
| 1854 | let src_ptr = T::regs().dout().as_ptr(); | 1852 | let src_ptr = T::regs().dout().as_ptr(); |
| 1855 | let options = TransferOptions { | 1853 | let options = TransferOptions { |
| 1856 | #[cfg(not(gpdma))] | ||
| 1857 | priority: crate::dma::Priority::VeryHigh, | 1854 | priority: crate::dma::Priority::VeryHigh, |
| 1858 | ..Default::default() | 1855 | ..Default::default() |
| 1859 | }; | 1856 | }; |
diff --git a/embassy-stm32/src/dma/dma_bdma.rs b/embassy-stm32/src/dma/dma_bdma.rs index 464823bfc..73ecab070 100644 --- a/embassy-stm32/src/dma/dma_bdma.rs +++ b/embassy-stm32/src/dma/dma_bdma.rs | |||
| @@ -498,46 +498,52 @@ impl AnyChannel { | |||
| 498 | } | 498 | } |
| 499 | } | 499 | } |
| 500 | 500 | ||
| 501 | fn request_stop(&self) { | 501 | fn request_pause(&self) { |
| 502 | let info = self.info(); | 502 | let info = self.info(); |
| 503 | match self.info().dma { | 503 | match self.info().dma { |
| 504 | #[cfg(dma)] | 504 | #[cfg(dma)] |
| 505 | DmaInfo::Dma(r) => { | 505 | DmaInfo::Dma(r) => { |
| 506 | // Disable the channel. Keep the IEs enabled so the irqs still fire. | 506 | // Disable the channel without overwriting the existing configuration |
| 507 | r.st(info.num).cr().write(|w| { | 507 | r.st(info.num).cr().modify(|w| { |
| 508 | w.set_teie(true); | 508 | w.set_en(false); |
| 509 | w.set_tcie(true); | ||
| 510 | }); | 509 | }); |
| 511 | } | 510 | } |
| 512 | #[cfg(bdma)] | 511 | #[cfg(bdma)] |
| 513 | DmaInfo::Bdma(r) => { | 512 | DmaInfo::Bdma(r) => { |
| 514 | // Disable the channel. Keep the IEs enabled so the irqs still fire. | 513 | // Disable the channel without overwriting the existing configuration |
| 515 | r.ch(info.num).cr().write(|w| { | 514 | r.ch(info.num).cr().modify(|w| { |
| 516 | w.set_teie(true); | 515 | w.set_en(false); |
| 517 | w.set_tcie(true); | ||
| 518 | }); | 516 | }); |
| 519 | } | 517 | } |
| 520 | } | 518 | } |
| 521 | } | 519 | } |
| 522 | 520 | ||
| 523 | fn request_pause(&self) { | 521 | fn request_resume(&self) { |
| 522 | self.start() | ||
| 523 | } | ||
| 524 | |||
| 525 | fn request_reset(&self) { | ||
| 524 | let info = self.info(); | 526 | let info = self.info(); |
| 525 | match self.info().dma { | 527 | match self.info().dma { |
| 526 | #[cfg(dma)] | 528 | #[cfg(dma)] |
| 527 | DmaInfo::Dma(r) => { | 529 | DmaInfo::Dma(r) => { |
| 528 | // Disable the channel without overwriting the existing configuration | 530 | // Disable the channel. Keep the IEs enabled so the irqs still fire. |
| 529 | r.st(info.num).cr().modify(|w| { | 531 | r.st(info.num).cr().write(|w| { |
| 530 | w.set_en(false); | 532 | w.set_teie(true); |
| 533 | w.set_tcie(true); | ||
| 531 | }); | 534 | }); |
| 532 | } | 535 | } |
| 533 | #[cfg(bdma)] | 536 | #[cfg(bdma)] |
| 534 | DmaInfo::Bdma(r) => { | 537 | DmaInfo::Bdma(r) => { |
| 535 | // Disable the channel without overwriting the existing configuration | 538 | // Disable the channel. Keep the IEs enabled so the irqs still fire. |
| 536 | r.ch(info.num).cr().modify(|w| { | 539 | r.ch(info.num).cr().write(|w| { |
| 537 | w.set_en(false); | 540 | w.set_teie(true); |
| 541 | w.set_tcie(true); | ||
| 538 | }); | 542 | }); |
| 539 | } | 543 | } |
| 540 | } | 544 | } |
| 545 | |||
| 546 | while self.is_running() {} | ||
| 541 | } | 547 | } |
| 542 | 548 | ||
| 543 | fn is_running(&self) -> bool { | 549 | fn is_running(&self) -> bool { |
| @@ -710,27 +716,31 @@ impl<'a> Transfer<'a> { | |||
| 710 | Self { channel } | 716 | Self { channel } |
| 711 | } | 717 | } |
| 712 | 718 | ||
| 713 | /// Request the transfer to stop. | ||
| 714 | /// The configuration for this channel will **not be preserved**. If you need to restart the transfer | ||
| 715 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | ||
| 716 | /// | ||
| 717 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | ||
| 718 | pub fn request_stop(&mut self) { | ||
| 719 | self.channel.request_stop() | ||
| 720 | } | ||
| 721 | |||
| 722 | /// Request the transfer to pause, keeping the existing configuration for this channel. | 719 | /// Request the transfer to pause, keeping the existing configuration for this channel. |
| 723 | /// To restart the transfer, call [`start`](Self::start) again. | ||
| 724 | /// | 720 | /// |
| 721 | /// To resume the transfer, call [`request_resume`](Self::request_resume) again. | ||
| 725 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | 722 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. |
| 726 | pub fn request_pause(&mut self) { | 723 | pub fn request_pause(&mut self) { |
| 727 | self.channel.request_pause() | 724 | self.channel.request_pause() |
| 728 | } | 725 | } |
| 729 | 726 | ||
| 727 | /// Request the transfer to resume after having been paused. | ||
| 728 | pub fn request_resume(&mut self) { | ||
| 729 | self.channel.request_resume() | ||
| 730 | } | ||
| 731 | |||
| 732 | /// Request the DMA to reset. | ||
| 733 | /// | ||
| 734 | /// The configuration for this channel will **not be preserved**. If you need to restart the transfer | ||
| 735 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | ||
| 736 | pub fn request_reset(&mut self) { | ||
| 737 | self.channel.request_reset() | ||
| 738 | } | ||
| 739 | |||
| 730 | /// Return whether this transfer is still running. | 740 | /// Return whether this transfer is still running. |
| 731 | /// | 741 | /// |
| 732 | /// If this returns `false`, it can be because either the transfer finished, or | 742 | /// If this returns `false`, it can be because either the transfer finished, or |
| 733 | /// it was requested to stop early with [`request_stop`](Self::request_stop). | 743 | /// it was requested to stop early with [`request_pause`](Self::request_pause). |
| 734 | pub fn is_running(&mut self) -> bool { | 744 | pub fn is_running(&mut self) -> bool { |
| 735 | self.channel.is_running() | 745 | self.channel.is_running() |
| 736 | } | 746 | } |
| @@ -754,7 +764,7 @@ impl<'a> Transfer<'a> { | |||
| 754 | 764 | ||
| 755 | impl<'a> Drop for Transfer<'a> { | 765 | impl<'a> Drop for Transfer<'a> { |
| 756 | fn drop(&mut self) { | 766 | fn drop(&mut self) { |
| 757 | self.request_stop(); | 767 | self.request_reset(); |
| 758 | while self.is_running() {} | 768 | while self.is_running() {} |
| 759 | 769 | ||
| 760 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | 770 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." |
| @@ -901,15 +911,6 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { | |||
| 901 | DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); | 911 | DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); |
| 902 | } | 912 | } |
| 903 | 913 | ||
| 904 | /// Request the DMA to stop. | ||
| 905 | /// The configuration for this channel will **not be preserved**. If you need to restart the transfer | ||
| 906 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | ||
| 907 | /// | ||
| 908 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | ||
| 909 | pub fn request_stop(&mut self) { | ||
| 910 | self.channel.request_stop() | ||
| 911 | } | ||
| 912 | |||
| 913 | /// Request the transfer to pause, keeping the existing configuration for this channel. | 914 | /// Request the transfer to pause, keeping the existing configuration for this channel. |
| 914 | /// To restart the transfer, call [`start`](Self::start) again. | 915 | /// To restart the transfer, call [`start`](Self::start) again. |
| 915 | /// | 916 | /// |
| @@ -918,10 +919,23 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { | |||
| 918 | self.channel.request_pause() | 919 | self.channel.request_pause() |
| 919 | } | 920 | } |
| 920 | 921 | ||
| 922 | /// Request the transfer to resume after having been paused. | ||
| 923 | pub fn request_resume(&mut self) { | ||
| 924 | self.channel.request_resume() | ||
| 925 | } | ||
| 926 | |||
| 927 | /// Request the DMA to reset. | ||
| 928 | /// | ||
| 929 | /// The configuration for this channel will **not be preserved**. If you need to restart the transfer | ||
| 930 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | ||
| 931 | pub fn request_reset(&mut self) { | ||
| 932 | self.channel.request_reset() | ||
| 933 | } | ||
| 934 | |||
| 921 | /// Return whether DMA is still running. | 935 | /// Return whether DMA is still running. |
| 922 | /// | 936 | /// |
| 923 | /// If this returns `false`, it can be because either the transfer finished, or | 937 | /// If this returns `false`, it can be because either the transfer finished, or |
| 924 | /// it was requested to stop early with [`request_stop`](Self::request_stop). | 938 | /// it was requested to stop early with [`request_reset`](Self::request_reset). |
| 925 | pub fn is_running(&mut self) -> bool { | 939 | pub fn is_running(&mut self) -> bool { |
| 926 | self.channel.is_running() | 940 | self.channel.is_running() |
| 927 | } | 941 | } |
| @@ -934,7 +948,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { | |||
| 934 | /// This is designed to be used with streaming input data such as the | 948 | /// This is designed to be used with streaming input data such as the |
| 935 | /// I2S/SAI or ADC. | 949 | /// I2S/SAI or ADC. |
| 936 | /// | 950 | /// |
| 937 | /// When using the UART, you probably want `request_stop()`. | 951 | /// When using the UART, you probably want `request_reset()`. |
| 938 | pub async fn stop(&mut self) { | 952 | pub async fn stop(&mut self) { |
| 939 | self.channel.disable_circular_mode(); | 953 | self.channel.disable_circular_mode(); |
| 940 | //wait until cr.susp reads as true | 954 | //wait until cr.susp reads as true |
| @@ -948,7 +962,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> { | |||
| 948 | 962 | ||
| 949 | impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { | 963 | impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { |
| 950 | fn drop(&mut self) { | 964 | fn drop(&mut self) { |
| 951 | self.request_stop(); | 965 | self.request_reset(); |
| 952 | while self.is_running() {} | 966 | while self.is_running() {} |
| 953 | 967 | ||
| 954 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | 968 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." |
| @@ -1058,8 +1072,8 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { | |||
| 1058 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | 1072 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. |
| 1059 | /// | 1073 | /// |
| 1060 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | 1074 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. |
| 1061 | pub fn request_stop(&mut self) { | 1075 | pub fn request_reset(&mut self) { |
| 1062 | self.channel.request_stop() | 1076 | self.channel.request_reset() |
| 1063 | } | 1077 | } |
| 1064 | 1078 | ||
| 1065 | /// Request the transfer to pause, keeping the existing configuration for this channel. | 1079 | /// Request the transfer to pause, keeping the existing configuration for this channel. |
| @@ -1073,7 +1087,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { | |||
| 1073 | /// Return whether DMA is still running. | 1087 | /// Return whether DMA is still running. |
| 1074 | /// | 1088 | /// |
| 1075 | /// If this returns `false`, it can be because either the transfer finished, or | 1089 | /// If this returns `false`, it can be because either the transfer finished, or |
| 1076 | /// it was requested to stop early with [`request_stop`](Self::request_stop). | 1090 | /// it was requested to stop early with [`request_reset`](Self::request_reset). |
| 1077 | pub fn is_running(&mut self) -> bool { | 1091 | pub fn is_running(&mut self) -> bool { |
| 1078 | self.channel.is_running() | 1092 | self.channel.is_running() |
| 1079 | } | 1093 | } |
| @@ -1098,7 +1112,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> { | |||
| 1098 | 1112 | ||
| 1099 | impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { | 1113 | impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { |
| 1100 | fn drop(&mut self) { | 1114 | fn drop(&mut self) { |
| 1101 | self.request_stop(); | 1115 | self.request_reset(); |
| 1102 | while self.is_running() {} | 1116 | while self.is_running() {} |
| 1103 | 1117 | ||
| 1104 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | 1118 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." |
diff --git a/embassy-stm32/src/dma/gpdma.rs b/embassy-stm32/src/dma/gpdma.rs deleted file mode 100644 index 151e4ab9f..000000000 --- a/embassy-stm32/src/dma/gpdma.rs +++ /dev/null | |||
| @@ -1,339 +0,0 @@ | |||
| 1 | #![macro_use] | ||
| 2 | |||
| 3 | use core::future::Future; | ||
| 4 | use core::pin::Pin; | ||
| 5 | use core::sync::atomic::{fence, Ordering}; | ||
| 6 | use core::task::{Context, Poll}; | ||
| 7 | |||
| 8 | use embassy_hal_internal::Peri; | ||
| 9 | use embassy_sync::waitqueue::AtomicWaker; | ||
| 10 | |||
| 11 | use super::word::{Word, WordSize}; | ||
| 12 | use super::{AnyChannel, Channel, Dir, Request, STATE}; | ||
| 13 | use crate::interrupt::typelevel::Interrupt; | ||
| 14 | use crate::interrupt::Priority; | ||
| 15 | use crate::pac; | ||
| 16 | use crate::pac::gpdma::vals; | ||
| 17 | |||
| 18 | pub(crate) struct ChannelInfo { | ||
| 19 | pub(crate) dma: pac::gpdma::Gpdma, | ||
| 20 | pub(crate) num: usize, | ||
| 21 | #[cfg(feature = "_dual-core")] | ||
| 22 | pub(crate) irq: pac::Interrupt, | ||
| 23 | } | ||
| 24 | |||
| 25 | /// GPDMA transfer options. | ||
| 26 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 27 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 28 | #[non_exhaustive] | ||
| 29 | pub struct TransferOptions {} | ||
| 30 | |||
| 31 | impl Default for TransferOptions { | ||
| 32 | fn default() -> Self { | ||
| 33 | Self {} | ||
| 34 | } | ||
| 35 | } | ||
| 36 | |||
| 37 | impl From<WordSize> for vals::Dw { | ||
| 38 | fn from(raw: WordSize) -> Self { | ||
| 39 | match raw { | ||
| 40 | WordSize::OneByte => Self::BYTE, | ||
| 41 | WordSize::TwoBytes => Self::HALF_WORD, | ||
| 42 | WordSize::FourBytes => Self::WORD, | ||
| 43 | } | ||
| 44 | } | ||
| 45 | } | ||
| 46 | |||
| 47 | pub(crate) struct ChannelState { | ||
| 48 | waker: AtomicWaker, | ||
| 49 | } | ||
| 50 | |||
| 51 | impl ChannelState { | ||
| 52 | pub(crate) const NEW: Self = Self { | ||
| 53 | waker: AtomicWaker::new(), | ||
| 54 | }; | ||
| 55 | } | ||
| 56 | |||
| 57 | /// safety: must be called only once | ||
| 58 | pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { | ||
| 59 | foreach_interrupt! { | ||
| 60 | ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { | ||
| 61 | crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); | ||
| 62 | #[cfg(not(feature = "_dual-core"))] | ||
| 63 | crate::interrupt::typelevel::$irq::enable(); | ||
| 64 | }; | ||
| 65 | } | ||
| 66 | crate::_generated::init_gpdma(); | ||
| 67 | } | ||
| 68 | |||
| 69 | impl AnyChannel { | ||
| 70 | /// Safety: Must be called with a matching set of parameters for a valid dma channel | ||
| 71 | pub(crate) unsafe fn on_irq(&self) { | ||
| 72 | let info = self.info(); | ||
| 73 | #[cfg(feature = "_dual-core")] | ||
| 74 | { | ||
| 75 | use embassy_hal_internal::interrupt::InterruptExt as _; | ||
| 76 | info.irq.enable(); | ||
| 77 | } | ||
| 78 | |||
| 79 | let state = &STATE[self.id as usize]; | ||
| 80 | |||
| 81 | let ch = info.dma.ch(info.num); | ||
| 82 | let sr = ch.sr().read(); | ||
| 83 | |||
| 84 | if sr.dtef() { | ||
| 85 | panic!( | ||
| 86 | "DMA: data transfer error on DMA@{:08x} channel {}", | ||
| 87 | info.dma.as_ptr() as u32, | ||
| 88 | info.num | ||
| 89 | ); | ||
| 90 | } | ||
| 91 | if sr.usef() { | ||
| 92 | panic!( | ||
| 93 | "DMA: user settings error on DMA@{:08x} channel {}", | ||
| 94 | info.dma.as_ptr() as u32, | ||
| 95 | info.num | ||
| 96 | ); | ||
| 97 | } | ||
| 98 | |||
| 99 | if sr.suspf() || sr.tcf() { | ||
| 100 | // disable all xxIEs to prevent the irq from firing again. | ||
| 101 | ch.cr().write(|_| {}); | ||
| 102 | |||
| 103 | // Wake the future. It'll look at tcf and see it's set. | ||
| 104 | state.waker.wake(); | ||
| 105 | } | ||
| 106 | } | ||
| 107 | } | ||
| 108 | |||
| 109 | /// DMA transfer. | ||
| 110 | #[must_use = "futures do nothing unless you `.await` or poll them"] | ||
| 111 | pub struct Transfer<'a> { | ||
| 112 | channel: Peri<'a, AnyChannel>, | ||
| 113 | } | ||
| 114 | |||
| 115 | impl<'a> Transfer<'a> { | ||
| 116 | /// Create a new read DMA transfer (peripheral to memory). | ||
| 117 | pub unsafe fn new_read<W: Word>( | ||
| 118 | channel: Peri<'a, impl Channel>, | ||
| 119 | request: Request, | ||
| 120 | peri_addr: *mut W, | ||
| 121 | buf: &'a mut [W], | ||
| 122 | options: TransferOptions, | ||
| 123 | ) -> Self { | ||
| 124 | Self::new_read_raw(channel, request, peri_addr, buf, options) | ||
| 125 | } | ||
| 126 | |||
| 127 | /// Create a new read DMA transfer (peripheral to memory), using raw pointers. | ||
| 128 | pub unsafe fn new_read_raw<MW: Word, PW: Word>( | ||
| 129 | channel: Peri<'a, impl Channel>, | ||
| 130 | request: Request, | ||
| 131 | peri_addr: *mut PW, | ||
| 132 | buf: *mut [MW], | ||
| 133 | options: TransferOptions, | ||
| 134 | ) -> Self { | ||
| 135 | Self::new_inner( | ||
| 136 | channel.into(), | ||
| 137 | request, | ||
| 138 | Dir::PeripheralToMemory, | ||
| 139 | peri_addr as *const u32, | ||
| 140 | buf as *mut MW as *mut u32, | ||
| 141 | buf.len(), | ||
| 142 | true, | ||
| 143 | PW::size(), | ||
| 144 | MW::size(), | ||
| 145 | options, | ||
| 146 | ) | ||
| 147 | } | ||
| 148 | |||
| 149 | /// Create a new write DMA transfer (memory to peripheral). | ||
| 150 | pub unsafe fn new_write<MW: Word, PW: Word>( | ||
| 151 | channel: Peri<'a, impl Channel>, | ||
| 152 | request: Request, | ||
| 153 | buf: &'a [MW], | ||
| 154 | peri_addr: *mut PW, | ||
| 155 | options: TransferOptions, | ||
| 156 | ) -> Self { | ||
| 157 | Self::new_write_raw(channel, request, buf, peri_addr, options) | ||
| 158 | } | ||
| 159 | |||
| 160 | /// Create a new write DMA transfer (memory to peripheral), using raw pointers. | ||
| 161 | pub unsafe fn new_write_raw<MW: Word, PW: Word>( | ||
| 162 | channel: Peri<'a, impl Channel>, | ||
| 163 | request: Request, | ||
| 164 | buf: *const [MW], | ||
| 165 | peri_addr: *mut PW, | ||
| 166 | options: TransferOptions, | ||
| 167 | ) -> Self { | ||
| 168 | Self::new_inner( | ||
| 169 | channel.into(), | ||
| 170 | request, | ||
| 171 | Dir::MemoryToPeripheral, | ||
| 172 | peri_addr as *const u32, | ||
| 173 | buf as *const MW as *mut u32, | ||
| 174 | buf.len(), | ||
| 175 | true, | ||
| 176 | MW::size(), | ||
| 177 | PW::size(), | ||
| 178 | options, | ||
| 179 | ) | ||
| 180 | } | ||
| 181 | |||
| 182 | /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly. | ||
| 183 | pub unsafe fn new_write_repeated<MW: Word, PW: Word>( | ||
| 184 | channel: Peri<'a, impl Channel>, | ||
| 185 | request: Request, | ||
| 186 | repeated: &'a MW, | ||
| 187 | count: usize, | ||
| 188 | peri_addr: *mut PW, | ||
| 189 | options: TransferOptions, | ||
| 190 | ) -> Self { | ||
| 191 | Self::new_inner( | ||
| 192 | channel.into(), | ||
| 193 | request, | ||
| 194 | Dir::MemoryToPeripheral, | ||
| 195 | peri_addr as *const u32, | ||
| 196 | repeated as *const MW as *mut u32, | ||
| 197 | count, | ||
| 198 | false, | ||
| 199 | MW::size(), | ||
| 200 | PW::size(), | ||
| 201 | options, | ||
| 202 | ) | ||
| 203 | } | ||
| 204 | |||
| 205 | unsafe fn new_inner( | ||
| 206 | channel: Peri<'a, AnyChannel>, | ||
| 207 | request: Request, | ||
| 208 | dir: Dir, | ||
| 209 | peri_addr: *const u32, | ||
| 210 | mem_addr: *mut u32, | ||
| 211 | mem_len: usize, | ||
| 212 | incr_mem: bool, | ||
| 213 | data_size: WordSize, | ||
| 214 | dst_size: WordSize, | ||
| 215 | _options: TransferOptions, | ||
| 216 | ) -> Self { | ||
| 217 | // BNDT is specified as bytes, not as number of transfers. | ||
| 218 | let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { | ||
| 219 | panic!("DMA transfers may not be larger than 65535 bytes."); | ||
| 220 | }; | ||
| 221 | |||
| 222 | let info = channel.info(); | ||
| 223 | let ch = info.dma.ch(info.num); | ||
| 224 | |||
| 225 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 226 | fence(Ordering::SeqCst); | ||
| 227 | |||
| 228 | let this = Self { channel }; | ||
| 229 | |||
| 230 | ch.cr().write(|w| w.set_reset(true)); | ||
| 231 | ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs | ||
| 232 | ch.llr().write(|_| {}); // no linked list | ||
| 233 | ch.tr1().write(|w| { | ||
| 234 | w.set_sdw(data_size.into()); | ||
| 235 | w.set_ddw(dst_size.into()); | ||
| 236 | w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); | ||
| 237 | w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); | ||
| 238 | }); | ||
| 239 | ch.tr2().write(|w| { | ||
| 240 | w.set_dreq(match dir { | ||
| 241 | Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL, | ||
| 242 | Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL, | ||
| 243 | }); | ||
| 244 | w.set_reqsel(request); | ||
| 245 | }); | ||
| 246 | ch.tr3().write(|_| {}); // no address offsets. | ||
| 247 | ch.br1().write(|w| w.set_bndt(bndt)); | ||
| 248 | |||
| 249 | match dir { | ||
| 250 | Dir::MemoryToPeripheral => { | ||
| 251 | ch.sar().write_value(mem_addr as _); | ||
| 252 | ch.dar().write_value(peri_addr as _); | ||
| 253 | } | ||
| 254 | Dir::PeripheralToMemory => { | ||
| 255 | ch.sar().write_value(peri_addr as _); | ||
| 256 | ch.dar().write_value(mem_addr as _); | ||
| 257 | } | ||
| 258 | } | ||
| 259 | |||
| 260 | ch.cr().write(|w| { | ||
| 261 | // Enable interrupts | ||
| 262 | w.set_tcie(true); | ||
| 263 | w.set_useie(true); | ||
| 264 | w.set_dteie(true); | ||
| 265 | w.set_suspie(true); | ||
| 266 | |||
| 267 | // Start it | ||
| 268 | w.set_en(true); | ||
| 269 | }); | ||
| 270 | |||
| 271 | this | ||
| 272 | } | ||
| 273 | |||
| 274 | /// Request the transfer to stop. | ||
| 275 | /// | ||
| 276 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | ||
| 277 | pub fn request_stop(&mut self) { | ||
| 278 | let info = self.channel.info(); | ||
| 279 | let ch = info.dma.ch(info.num); | ||
| 280 | |||
| 281 | ch.cr().modify(|w| w.set_susp(true)) | ||
| 282 | } | ||
| 283 | |||
| 284 | /// Return whether this transfer is still running. | ||
| 285 | /// | ||
| 286 | /// If this returns `false`, it can be because either the transfer finished, or | ||
| 287 | /// it was requested to stop early with [`request_stop`](Self::request_stop). | ||
| 288 | pub fn is_running(&mut self) -> bool { | ||
| 289 | let info = self.channel.info(); | ||
| 290 | let ch = info.dma.ch(info.num); | ||
| 291 | |||
| 292 | let sr = ch.sr().read(); | ||
| 293 | !sr.tcf() && !sr.suspf() | ||
| 294 | } | ||
| 295 | |||
| 296 | /// Gets the total remaining transfers for the channel | ||
| 297 | /// Note: this will be zero for transfers that completed without cancellation. | ||
| 298 | pub fn get_remaining_transfers(&self) -> u16 { | ||
| 299 | let info = self.channel.info(); | ||
| 300 | let ch = info.dma.ch(info.num); | ||
| 301 | |||
| 302 | ch.br1().read().bndt() | ||
| 303 | } | ||
| 304 | |||
| 305 | /// Blocking wait until the transfer finishes. | ||
| 306 | pub fn blocking_wait(mut self) { | ||
| 307 | while self.is_running() {} | ||
| 308 | |||
| 309 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 310 | fence(Ordering::SeqCst); | ||
| 311 | |||
| 312 | core::mem::forget(self); | ||
| 313 | } | ||
| 314 | } | ||
| 315 | |||
| 316 | impl<'a> Drop for Transfer<'a> { | ||
| 317 | fn drop(&mut self) { | ||
| 318 | self.request_stop(); | ||
| 319 | while self.is_running() {} | ||
| 320 | |||
| 321 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 322 | fence(Ordering::SeqCst); | ||
| 323 | } | ||
| 324 | } | ||
| 325 | |||
| 326 | impl<'a> Unpin for Transfer<'a> {} | ||
| 327 | impl<'a> Future for Transfer<'a> { | ||
| 328 | type Output = (); | ||
| 329 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { | ||
| 330 | let state = &STATE[self.channel.id as usize]; | ||
| 331 | state.waker.register(cx.waker()); | ||
| 332 | |||
| 333 | if self.is_running() { | ||
| 334 | Poll::Pending | ||
| 335 | } else { | ||
| 336 | Poll::Ready(()) | ||
| 337 | } | ||
| 338 | } | ||
| 339 | } | ||
diff --git a/embassy-stm32/src/dma/gpdma/linked_list.rs b/embassy-stm32/src/dma/gpdma/linked_list.rs new file mode 100644 index 000000000..f7c1fbbed --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/linked_list.rs | |||
| @@ -0,0 +1,267 @@ | |||
| 1 | //! Implementation of the GPDMA linked list and linked list items. | ||
| 2 | #![macro_use] | ||
| 3 | |||
| 4 | use stm32_metapac::gpdma::regs; | ||
| 5 | use stm32_metapac::gpdma::vals::Dreq; | ||
| 6 | |||
| 7 | use crate::dma::word::{Word, WordSize}; | ||
| 8 | use crate::dma::{Dir, Request}; | ||
| 9 | |||
| 10 | /// The mode in which to run the linked list. | ||
| 11 | #[derive(Debug)] | ||
| 12 | pub enum RunMode { | ||
| 13 | /// List items are not linked together. | ||
| 14 | Unlinked, | ||
| 15 | /// The list is linked sequentially and only run once. | ||
| 16 | Once, | ||
| 17 | /// The list is linked sequentially, and the end of the list is linked to the beginning. | ||
| 18 | Circular, | ||
| 19 | } | ||
| 20 | |||
| 21 | /// A linked-list item for linear GPDMA transfers. | ||
| 22 | /// | ||
| 23 | /// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities. | ||
| 24 | #[derive(Debug, Copy, Clone, Default)] | ||
| 25 | #[repr(C)] | ||
| 26 | pub struct LinearItem { | ||
| 27 | /// Transfer register 1. | ||
| 28 | pub tr1: regs::ChTr1, | ||
| 29 | /// Transfer register 2. | ||
| 30 | pub tr2: regs::ChTr2, | ||
| 31 | /// Block register 2. | ||
| 32 | pub br1: regs::ChBr1, | ||
| 33 | /// Source address register. | ||
| 34 | pub sar: u32, | ||
| 35 | /// Destination address register. | ||
| 36 | pub dar: u32, | ||
| 37 | /// Linked-list address register. | ||
| 38 | pub llr: regs::ChLlr, | ||
| 39 | } | ||
| 40 | |||
| 41 | impl LinearItem { | ||
| 42 | /// Create a new read DMA transfer (peripheral to memory). | ||
| 43 | pub unsafe fn new_read<'d, W: Word>(request: Request, peri_addr: *mut W, buf: &'d mut [W]) -> Self { | ||
| 44 | Self::new_inner( | ||
| 45 | request, | ||
| 46 | Dir::PeripheralToMemory, | ||
| 47 | peri_addr as *const u32, | ||
| 48 | buf as *mut [W] as *mut W as *mut u32, | ||
| 49 | buf.len(), | ||
| 50 | true, | ||
| 51 | W::size(), | ||
| 52 | W::size(), | ||
| 53 | ) | ||
| 54 | } | ||
| 55 | |||
| 56 | /// Create a new write DMA transfer (memory to peripheral). | ||
| 57 | pub unsafe fn new_write<'d, MW: Word, PW: Word>(request: Request, buf: &'d [MW], peri_addr: *mut PW) -> Self { | ||
| 58 | Self::new_inner( | ||
| 59 | request, | ||
| 60 | Dir::MemoryToPeripheral, | ||
| 61 | peri_addr as *const u32, | ||
| 62 | buf as *const [MW] as *const MW as *mut u32, | ||
| 63 | buf.len(), | ||
| 64 | true, | ||
| 65 | MW::size(), | ||
| 66 | PW::size(), | ||
| 67 | ) | ||
| 68 | } | ||
| 69 | |||
| 70 | unsafe fn new_inner( | ||
| 71 | request: Request, | ||
| 72 | dir: Dir, | ||
| 73 | peri_addr: *const u32, | ||
| 74 | mem_addr: *mut u32, | ||
| 75 | mem_len: usize, | ||
| 76 | incr_mem: bool, | ||
| 77 | data_size: WordSize, | ||
| 78 | dst_size: WordSize, | ||
| 79 | ) -> Self { | ||
| 80 | // BNDT is specified as bytes, not as number of transfers. | ||
| 81 | let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { | ||
| 82 | panic!("DMA transfers may not be larger than 65535 bytes."); | ||
| 83 | }; | ||
| 84 | |||
| 85 | let mut br1 = regs::ChBr1(0); | ||
| 86 | br1.set_bndt(bndt); | ||
| 87 | |||
| 88 | let mut tr1 = regs::ChTr1(0); | ||
| 89 | tr1.set_sdw(data_size.into()); | ||
| 90 | tr1.set_ddw(dst_size.into()); | ||
| 91 | tr1.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); | ||
| 92 | tr1.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); | ||
| 93 | |||
| 94 | let mut tr2 = regs::ChTr2(0); | ||
| 95 | tr2.set_dreq(match dir { | ||
| 96 | Dir::MemoryToPeripheral => Dreq::DESTINATION_PERIPHERAL, | ||
| 97 | Dir::PeripheralToMemory => Dreq::SOURCE_PERIPHERAL, | ||
| 98 | }); | ||
| 99 | tr2.set_reqsel(request); | ||
| 100 | |||
| 101 | let (sar, dar) = match dir { | ||
| 102 | Dir::MemoryToPeripheral => (mem_addr as _, peri_addr as _), | ||
| 103 | Dir::PeripheralToMemory => (peri_addr as _, mem_addr as _), | ||
| 104 | }; | ||
| 105 | |||
| 106 | let llr = regs::ChLlr(0); | ||
| 107 | |||
| 108 | Self { | ||
| 109 | tr1, | ||
| 110 | tr2, | ||
| 111 | br1, | ||
| 112 | sar, | ||
| 113 | dar, | ||
| 114 | llr, | ||
| 115 | } | ||
| 116 | } | ||
| 117 | |||
| 118 | /// Link to the next linear item at the given address. | ||
| 119 | /// | ||
| 120 | /// Enables channel update bits. | ||
| 121 | fn link_to(&mut self, next: u16) { | ||
| 122 | let mut llr = regs::ChLlr(0); | ||
| 123 | |||
| 124 | llr.set_ut1(true); | ||
| 125 | llr.set_ut2(true); | ||
| 126 | llr.set_ub1(true); | ||
| 127 | llr.set_usa(true); | ||
| 128 | llr.set_uda(true); | ||
| 129 | llr.set_ull(true); | ||
| 130 | |||
| 131 | // Lower two bits are ignored: 32 bit aligned. | ||
| 132 | llr.set_la(next >> 2); | ||
| 133 | |||
| 134 | self.llr = llr; | ||
| 135 | } | ||
| 136 | |||
| 137 | /// Unlink the next linear item. | ||
| 138 | /// | ||
| 139 | /// Disables channel update bits. | ||
| 140 | fn unlink(&mut self) { | ||
| 141 | self.llr = regs::ChLlr(0); | ||
| 142 | } | ||
| 143 | |||
| 144 | /// The item's transfer count in number of words. | ||
| 145 | fn transfer_count(&self) -> usize { | ||
| 146 | let word_size: WordSize = self.tr1.ddw().into(); | ||
| 147 | self.br1.bndt() as usize / word_size.bytes() | ||
| 148 | } | ||
| 149 | } | ||
| 150 | |||
| 151 | /// A table of linked list items. | ||
| 152 | #[repr(C)] | ||
| 153 | pub struct Table<const ITEM_COUNT: usize> { | ||
| 154 | /// The items. | ||
| 155 | pub items: [LinearItem; ITEM_COUNT], | ||
| 156 | } | ||
| 157 | |||
| 158 | impl<const ITEM_COUNT: usize> Table<ITEM_COUNT> { | ||
| 159 | /// Create a new table. | ||
| 160 | pub fn new(items: [LinearItem; ITEM_COUNT]) -> Self { | ||
| 161 | assert!(!items.is_empty()); | ||
| 162 | |||
| 163 | Self { items } | ||
| 164 | } | ||
| 165 | |||
| 166 | /// Create a ping-pong linked-list table. | ||
| 167 | /// | ||
| 168 | /// This uses two linked-list items, one for each half of the buffer. | ||
| 169 | pub unsafe fn new_ping_pong<W: Word>( | ||
| 170 | request: Request, | ||
| 171 | peri_addr: *mut W, | ||
| 172 | buffer: &mut [W], | ||
| 173 | direction: Dir, | ||
| 174 | ) -> Table<2> { | ||
| 175 | // Buffer halves should be the same length. | ||
| 176 | let half_len = buffer.len() / 2; | ||
| 177 | assert_eq!(half_len * 2, buffer.len()); | ||
| 178 | |||
| 179 | let items = match direction { | ||
| 180 | Dir::MemoryToPeripheral => [ | ||
| 181 | LinearItem::new_write(request, &mut buffer[..half_len], peri_addr), | ||
| 182 | LinearItem::new_write(request, &mut buffer[half_len..], peri_addr), | ||
| 183 | ], | ||
| 184 | Dir::PeripheralToMemory => [ | ||
| 185 | LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]), | ||
| 186 | LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]), | ||
| 187 | ], | ||
| 188 | }; | ||
| 189 | |||
| 190 | Table::new(items) | ||
| 191 | } | ||
| 192 | |||
| 193 | /// Link the table as given by the run mode. | ||
| 194 | pub fn link(&mut self, run_mode: RunMode) { | ||
| 195 | if matches!(run_mode, RunMode::Once | RunMode::Circular) { | ||
| 196 | self.link_sequential(); | ||
| 197 | } | ||
| 198 | |||
| 199 | if matches!(run_mode, RunMode::Circular) { | ||
| 200 | self.link_repeat(); | ||
| 201 | } | ||
| 202 | } | ||
| 203 | |||
| 204 | /// The number of linked list items. | ||
| 205 | pub fn len(&self) -> usize { | ||
| 206 | self.items.len() | ||
| 207 | } | ||
| 208 | |||
| 209 | /// The total transfer count of the table in number of words. | ||
| 210 | pub fn transfer_count(&self) -> usize { | ||
| 211 | let mut count = 0; | ||
| 212 | for item in self.items { | ||
| 213 | count += item.transfer_count() as usize | ||
| 214 | } | ||
| 215 | |||
| 216 | count | ||
| 217 | } | ||
| 218 | |||
| 219 | /// Link items of given indices together: first -> second. | ||
| 220 | pub fn link_indices(&mut self, first: usize, second: usize) { | ||
| 221 | assert!(first < self.len()); | ||
| 222 | assert!(second < self.len()); | ||
| 223 | |||
| 224 | let second_item = self.offset_address(second); | ||
| 225 | self.items[first].link_to(second_item); | ||
| 226 | } | ||
| 227 | |||
| 228 | /// Link items sequentially. | ||
| 229 | pub fn link_sequential(&mut self) { | ||
| 230 | if self.len() > 1 { | ||
| 231 | for index in 0..(self.items.len() - 1) { | ||
| 232 | let next = self.offset_address(index + 1); | ||
| 233 | self.items[index].link_to(next); | ||
| 234 | } | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 238 | /// Link last to first item. | ||
| 239 | pub fn link_repeat(&mut self) { | ||
| 240 | let first_address = self.offset_address(0); | ||
| 241 | self.items.last_mut().unwrap().link_to(first_address); | ||
| 242 | } | ||
| 243 | |||
| 244 | /// Unlink all items. | ||
| 245 | pub fn unlink(&mut self) { | ||
| 246 | for item in self.items.iter_mut() { | ||
| 247 | item.unlink(); | ||
| 248 | } | ||
| 249 | } | ||
| 250 | |||
| 251 | /// Linked list base address (upper 16 address bits). | ||
| 252 | pub fn base_address(&self) -> u16 { | ||
| 253 | ((&raw const self.items as u32) >> 16) as _ | ||
| 254 | } | ||
| 255 | |||
| 256 | /// Linked list offset address (lower 16 address bits) at the selected index. | ||
| 257 | pub fn offset_address(&self, index: usize) -> u16 { | ||
| 258 | assert!(self.items.len() > index); | ||
| 259 | |||
| 260 | let address = &raw const self.items[index] as _; | ||
| 261 | |||
| 262 | // Ensure 32 bit address alignment. | ||
| 263 | assert_eq!(address & 0b11, 0); | ||
| 264 | |||
| 265 | address | ||
| 266 | } | ||
| 267 | } | ||
diff --git a/embassy-stm32/src/dma/gpdma/mod.rs b/embassy-stm32/src/dma/gpdma/mod.rs new file mode 100644 index 000000000..4a14c2a8e --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/mod.rs | |||
| @@ -0,0 +1,699 @@ | |||
| 1 | #![macro_use] | ||
| 2 | |||
| 3 | use core::future::Future; | ||
| 4 | use core::pin::Pin; | ||
| 5 | use core::sync::atomic::{fence, AtomicUsize, Ordering}; | ||
| 6 | use core::task::{Context, Poll}; | ||
| 7 | |||
| 8 | use embassy_hal_internal::Peri; | ||
| 9 | use embassy_sync::waitqueue::AtomicWaker; | ||
| 10 | use linked_list::Table; | ||
| 11 | |||
| 12 | use super::word::{Word, WordSize}; | ||
| 13 | use super::{AnyChannel, Channel, Dir, Request, STATE}; | ||
| 14 | use crate::interrupt::typelevel::Interrupt; | ||
| 15 | use crate::pac; | ||
| 16 | use crate::pac::gpdma::vals; | ||
| 17 | |||
| 18 | pub mod linked_list; | ||
| 19 | pub mod ringbuffered; | ||
| 20 | |||
| 21 | pub(crate) struct ChannelInfo { | ||
| 22 | pub(crate) dma: pac::gpdma::Gpdma, | ||
| 23 | pub(crate) num: usize, | ||
| 24 | #[cfg(feature = "_dual-core")] | ||
| 25 | pub(crate) irq: pac::Interrupt, | ||
| 26 | } | ||
| 27 | |||
| 28 | /// DMA request priority | ||
| 29 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 30 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 31 | pub enum Priority { | ||
| 32 | /// Low Priority | ||
| 33 | Low, | ||
| 34 | /// Medium Priority | ||
| 35 | Medium, | ||
| 36 | /// High Priority | ||
| 37 | High, | ||
| 38 | /// Very High Priority | ||
| 39 | VeryHigh, | ||
| 40 | } | ||
| 41 | |||
| 42 | impl From<Priority> for pac::gpdma::vals::Prio { | ||
| 43 | fn from(value: Priority) -> Self { | ||
| 44 | match value { | ||
| 45 | Priority::Low => pac::gpdma::vals::Prio::LOW_WITH_LOWH_WEIGHT, | ||
| 46 | Priority::Medium => pac::gpdma::vals::Prio::LOW_WITH_MID_WEIGHT, | ||
| 47 | Priority::High => pac::gpdma::vals::Prio::LOW_WITH_HIGH_WEIGHT, | ||
| 48 | Priority::VeryHigh => pac::gpdma::vals::Prio::HIGH, | ||
| 49 | } | ||
| 50 | } | ||
| 51 | } | ||
| 52 | |||
| 53 | /// GPDMA transfer options. | ||
| 54 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | ||
| 55 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 56 | #[non_exhaustive] | ||
| 57 | pub struct TransferOptions { | ||
| 58 | /// Request priority level. | ||
| 59 | pub priority: Priority, | ||
| 60 | /// Enable half transfer interrupt. | ||
| 61 | pub half_transfer_ir: bool, | ||
| 62 | /// Enable transfer complete interrupt. | ||
| 63 | pub complete_transfer_ir: bool, | ||
| 64 | } | ||
| 65 | |||
| 66 | impl Default for TransferOptions { | ||
| 67 | fn default() -> Self { | ||
| 68 | Self { | ||
| 69 | priority: Priority::VeryHigh, | ||
| 70 | half_transfer_ir: false, | ||
| 71 | complete_transfer_ir: true, | ||
| 72 | } | ||
| 73 | } | ||
| 74 | } | ||
| 75 | |||
| 76 | impl From<WordSize> for vals::Dw { | ||
| 77 | fn from(raw: WordSize) -> Self { | ||
| 78 | match raw { | ||
| 79 | WordSize::OneByte => Self::BYTE, | ||
| 80 | WordSize::TwoBytes => Self::HALF_WORD, | ||
| 81 | WordSize::FourBytes => Self::WORD, | ||
| 82 | } | ||
| 83 | } | ||
| 84 | } | ||
| 85 | |||
| 86 | impl From<vals::Dw> for WordSize { | ||
| 87 | fn from(raw: vals::Dw) -> Self { | ||
| 88 | match raw { | ||
| 89 | vals::Dw::BYTE => Self::OneByte, | ||
| 90 | vals::Dw::HALF_WORD => Self::TwoBytes, | ||
| 91 | vals::Dw::WORD => Self::FourBytes, | ||
| 92 | _ => panic!("Invalid word size"), | ||
| 93 | } | ||
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 97 | pub(crate) struct LLiState { | ||
| 98 | /// The number of linked-list items. | ||
| 99 | count: AtomicUsize, | ||
| 100 | /// The index of the current linked-list item. | ||
| 101 | index: AtomicUsize, | ||
| 102 | /// The total transfer count of all linked-list items in number of words. | ||
| 103 | transfer_count: AtomicUsize, | ||
| 104 | } | ||
| 105 | |||
| 106 | pub(crate) struct ChannelState { | ||
| 107 | waker: AtomicWaker, | ||
| 108 | complete_count: AtomicUsize, | ||
| 109 | lli_state: LLiState, | ||
| 110 | } | ||
| 111 | |||
| 112 | impl ChannelState { | ||
| 113 | pub(crate) const NEW: Self = Self { | ||
| 114 | waker: AtomicWaker::new(), | ||
| 115 | complete_count: AtomicUsize::new(0), | ||
| 116 | |||
| 117 | lli_state: LLiState { | ||
| 118 | count: AtomicUsize::new(0), | ||
| 119 | index: AtomicUsize::new(0), | ||
| 120 | transfer_count: AtomicUsize::new(0), | ||
| 121 | }, | ||
| 122 | }; | ||
| 123 | } | ||
| 124 | |||
| 125 | /// safety: must be called only once | ||
| 126 | pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: crate::interrupt::Priority) { | ||
| 127 | foreach_interrupt! { | ||
| 128 | ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { | ||
| 129 | crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); | ||
| 130 | #[cfg(not(feature = "_dual-core"))] | ||
| 131 | crate::interrupt::typelevel::$irq::enable(); | ||
| 132 | }; | ||
| 133 | } | ||
| 134 | crate::_generated::init_gpdma(); | ||
| 135 | } | ||
| 136 | |||
| 137 | impl AnyChannel { | ||
| 138 | /// Safety: Must be called with a matching set of parameters for a valid dma channel | ||
| 139 | pub(crate) unsafe fn on_irq(&self) { | ||
| 140 | let info = self.info(); | ||
| 141 | #[cfg(feature = "_dual-core")] | ||
| 142 | { | ||
| 143 | use embassy_hal_internal::interrupt::InterruptExt as _; | ||
| 144 | info.irq.enable(); | ||
| 145 | } | ||
| 146 | |||
| 147 | let state = &STATE[self.id as usize]; | ||
| 148 | |||
| 149 | let ch = info.dma.ch(info.num); | ||
| 150 | let sr = ch.sr().read(); | ||
| 151 | |||
| 152 | if sr.dtef() { | ||
| 153 | panic!( | ||
| 154 | "DMA: data transfer error on DMA@{:08x} channel {}", | ||
| 155 | info.dma.as_ptr() as u32, | ||
| 156 | info.num | ||
| 157 | ); | ||
| 158 | } | ||
| 159 | if sr.usef() { | ||
| 160 | panic!( | ||
| 161 | "DMA: user settings error on DMA@{:08x} channel {}", | ||
| 162 | info.dma.as_ptr() as u32, | ||
| 163 | info.num | ||
| 164 | ); | ||
| 165 | } | ||
| 166 | if sr.ulef() { | ||
| 167 | panic!( | ||
| 168 | "DMA: link transfer error on DMA@{:08x} channel {}", | ||
| 169 | info.dma.as_ptr() as u32, | ||
| 170 | info.num | ||
| 171 | ); | ||
| 172 | } | ||
| 173 | |||
| 174 | if sr.htf() { | ||
| 175 | ch.fcr().write(|w| w.set_htf(true)); | ||
| 176 | } | ||
| 177 | |||
| 178 | if sr.tcf() { | ||
| 179 | ch.fcr().write(|w| w.set_tcf(true)); | ||
| 180 | |||
| 181 | let lli_count = state.lli_state.count.load(Ordering::Acquire); | ||
| 182 | let complete = if lli_count > 0 { | ||
| 183 | let next_lli_index = state.lli_state.index.load(Ordering::Acquire) + 1; | ||
| 184 | let complete = next_lli_index >= lli_count; | ||
| 185 | |||
| 186 | state | ||
| 187 | .lli_state | ||
| 188 | .index | ||
| 189 | .store(if complete { 0 } else { next_lli_index }, Ordering::Release); | ||
| 190 | |||
| 191 | complete | ||
| 192 | } else { | ||
| 193 | true | ||
| 194 | }; | ||
| 195 | |||
| 196 | if complete { | ||
| 197 | state.complete_count.fetch_add(1, Ordering::Release); | ||
| 198 | } | ||
| 199 | } | ||
| 200 | |||
| 201 | if sr.suspf() { | ||
| 202 | // Disable all xxIEs to prevent the irq from firing again. | ||
| 203 | ch.cr().write(|_| {}); | ||
| 204 | } | ||
| 205 | state.waker.wake(); | ||
| 206 | } | ||
| 207 | |||
| 208 | fn get_remaining_transfers(&self) -> u16 { | ||
| 209 | let info = self.info(); | ||
| 210 | let ch = info.dma.ch(info.num); | ||
| 211 | let word_size: WordSize = ch.tr1().read().ddw().into(); | ||
| 212 | |||
| 213 | ch.br1().read().bndt() / word_size.bytes() as u16 | ||
| 214 | } | ||
| 215 | |||
| 216 | unsafe fn configure( | ||
| 217 | &self, | ||
| 218 | request: Request, | ||
| 219 | dir: Dir, | ||
| 220 | peri_addr: *const u32, | ||
| 221 | mem_addr: *mut u32, | ||
| 222 | mem_len: usize, | ||
| 223 | incr_mem: bool, | ||
| 224 | data_size: WordSize, | ||
| 225 | dst_size: WordSize, | ||
| 226 | options: TransferOptions, | ||
| 227 | ) { | ||
| 228 | // BNDT is specified as bytes, not as number of transfers. | ||
| 229 | let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { | ||
| 230 | panic!("DMA transfers may not be larger than 65535 bytes."); | ||
| 231 | }; | ||
| 232 | |||
| 233 | let info = self.info(); | ||
| 234 | let ch = info.dma.ch(info.num); | ||
| 235 | |||
| 236 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 237 | fence(Ordering::SeqCst); | ||
| 238 | |||
| 239 | ch.cr().write(|w| w.set_reset(true)); | ||
| 240 | ch.fcr().write(|w| { | ||
| 241 | // Clear all irqs | ||
| 242 | w.set_dtef(true); | ||
| 243 | w.set_htf(true); | ||
| 244 | w.set_suspf(true); | ||
| 245 | w.set_tcf(true); | ||
| 246 | w.set_tof(true); | ||
| 247 | w.set_ulef(true); | ||
| 248 | w.set_usef(true); | ||
| 249 | }); | ||
| 250 | ch.llr().write(|_| {}); // no linked list | ||
| 251 | ch.tr1().write(|w| { | ||
| 252 | w.set_sdw(data_size.into()); | ||
| 253 | w.set_ddw(dst_size.into()); | ||
| 254 | w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); | ||
| 255 | w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); | ||
| 256 | }); | ||
| 257 | ch.tr2().write(|w| { | ||
| 258 | w.set_dreq(match dir { | ||
| 259 | Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL, | ||
| 260 | Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL, | ||
| 261 | }); | ||
| 262 | w.set_reqsel(request); | ||
| 263 | }); | ||
| 264 | ch.tr3().write(|_| {}); // no address offsets. | ||
| 265 | ch.br1().write(|w| w.set_bndt(bndt)); | ||
| 266 | |||
| 267 | match dir { | ||
| 268 | Dir::MemoryToPeripheral => { | ||
| 269 | ch.sar().write_value(mem_addr as _); | ||
| 270 | ch.dar().write_value(peri_addr as _); | ||
| 271 | } | ||
| 272 | Dir::PeripheralToMemory => { | ||
| 273 | ch.sar().write_value(peri_addr as _); | ||
| 274 | ch.dar().write_value(mem_addr as _); | ||
| 275 | } | ||
| 276 | } | ||
| 277 | |||
| 278 | ch.cr().write(|w| { | ||
| 279 | w.set_prio(options.priority.into()); | ||
| 280 | w.set_htie(options.half_transfer_ir); | ||
| 281 | w.set_tcie(options.complete_transfer_ir); | ||
| 282 | w.set_useie(true); | ||
| 283 | w.set_dteie(true); | ||
| 284 | w.set_suspie(true); | ||
| 285 | }); | ||
| 286 | |||
| 287 | let state = &STATE[self.id as usize]; | ||
| 288 | state.lli_state.count.store(0, Ordering::Relaxed); | ||
| 289 | state.lli_state.index.store(0, Ordering::Relaxed); | ||
| 290 | state.lli_state.transfer_count.store(0, Ordering::Relaxed) | ||
| 291 | } | ||
| 292 | |||
| 293 | /// Configure a linked-list transfer. | ||
| 294 | unsafe fn configure_linked_list<const ITEM_COUNT: usize>( | ||
| 295 | &self, | ||
| 296 | table: &Table<ITEM_COUNT>, | ||
| 297 | options: TransferOptions, | ||
| 298 | ) { | ||
| 299 | let info = self.info(); | ||
| 300 | let ch = info.dma.ch(info.num); | ||
| 301 | |||
| 302 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 303 | fence(Ordering::SeqCst); | ||
| 304 | |||
| 305 | ch.cr().write(|w| w.set_reset(true)); | ||
| 306 | ch.fcr().write(|w| { | ||
| 307 | // Clear all irqs | ||
| 308 | w.set_dtef(true); | ||
| 309 | w.set_htf(true); | ||
| 310 | w.set_suspf(true); | ||
| 311 | w.set_tcf(true); | ||
| 312 | w.set_tof(true); | ||
| 313 | w.set_ulef(true); | ||
| 314 | w.set_usef(true); | ||
| 315 | }); | ||
| 316 | ch.lbar().write(|reg| reg.set_lba(table.base_address())); | ||
| 317 | |||
| 318 | // Empty LLI0. | ||
| 319 | ch.br1().write(|w| w.set_bndt(0)); | ||
| 320 | |||
| 321 | // Enable all linked-list field updates. | ||
| 322 | ch.llr().write(|w| { | ||
| 323 | w.set_ut1(true); | ||
| 324 | w.set_ut2(true); | ||
| 325 | w.set_ub1(true); | ||
| 326 | w.set_usa(true); | ||
| 327 | w.set_uda(true); | ||
| 328 | w.set_ull(true); | ||
| 329 | |||
| 330 | // Lower two bits are ignored: 32 bit aligned. | ||
| 331 | w.set_la(table.offset_address(0) >> 2); | ||
| 332 | }); | ||
| 333 | |||
| 334 | ch.tr3().write(|_| {}); // no address offsets. | ||
| 335 | |||
| 336 | ch.cr().write(|w| { | ||
| 337 | w.set_prio(options.priority.into()); | ||
| 338 | w.set_htie(options.half_transfer_ir); | ||
| 339 | w.set_tcie(options.complete_transfer_ir); | ||
| 340 | w.set_useie(true); | ||
| 341 | w.set_uleie(true); | ||
| 342 | w.set_dteie(true); | ||
| 343 | w.set_suspie(true); | ||
| 344 | }); | ||
| 345 | |||
| 346 | let state = &STATE[self.id as usize]; | ||
| 347 | state.lli_state.count.store(ITEM_COUNT, Ordering::Relaxed); | ||
| 348 | state.lli_state.index.store(0, Ordering::Relaxed); | ||
| 349 | state | ||
| 350 | .lli_state | ||
| 351 | .transfer_count | ||
| 352 | .store(table.transfer_count(), Ordering::Relaxed) | ||
| 353 | } | ||
| 354 | |||
| 355 | fn start(&self) { | ||
| 356 | let info = self.info(); | ||
| 357 | let ch = info.dma.ch(info.num); | ||
| 358 | |||
| 359 | ch.cr().modify(|w| w.set_en(true)); | ||
| 360 | } | ||
| 361 | |||
| 362 | fn request_pause(&self) { | ||
| 363 | let info = self.info(); | ||
| 364 | let ch = info.dma.ch(info.num); | ||
| 365 | |||
| 366 | ch.cr().modify(|w| w.set_susp(true)) | ||
| 367 | } | ||
| 368 | |||
| 369 | fn request_resume(&self) { | ||
| 370 | let info = self.info(); | ||
| 371 | let ch = info.dma.ch(info.num); | ||
| 372 | |||
| 373 | ch.cr().modify(|w| w.set_susp(false)); | ||
| 374 | } | ||
| 375 | |||
| 376 | fn request_reset(&self) { | ||
| 377 | let info = self.info(); | ||
| 378 | let ch = info.dma.ch(info.num); | ||
| 379 | |||
| 380 | self.request_pause(); | ||
| 381 | while self.is_running() {} | ||
| 382 | |||
| 383 | ch.cr().modify(|w| w.set_reset(true)); | ||
| 384 | } | ||
| 385 | |||
| 386 | fn is_running(&self) -> bool { | ||
| 387 | let info = self.info(); | ||
| 388 | let ch = info.dma.ch(info.num); | ||
| 389 | |||
| 390 | let sr = ch.sr().read(); | ||
| 391 | |||
| 392 | !sr.suspf() && !sr.idlef() | ||
| 393 | } | ||
| 394 | |||
| 395 | fn poll_stop(&self) -> Poll<()> { | ||
| 396 | use core::sync::atomic::compiler_fence; | ||
| 397 | compiler_fence(Ordering::SeqCst); | ||
| 398 | |||
| 399 | if !self.is_running() { | ||
| 400 | Poll::Ready(()) | ||
| 401 | } else { | ||
| 402 | Poll::Pending | ||
| 403 | } | ||
| 404 | } | ||
| 405 | } | ||
| 406 | |||
| 407 | /// Linked-list DMA transfer. | ||
| 408 | #[must_use = "futures do nothing unless you `.await` or poll them"] | ||
| 409 | pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> { | ||
| 410 | channel: Peri<'a, AnyChannel>, | ||
| 411 | } | ||
| 412 | |||
| 413 | impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> { | ||
| 414 | /// Create a new linked-list transfer. | ||
| 415 | pub unsafe fn new_linked_list<const N: usize>( | ||
| 416 | channel: Peri<'a, impl Channel>, | ||
| 417 | table: Table<ITEM_COUNT>, | ||
| 418 | options: TransferOptions, | ||
| 419 | ) -> Self { | ||
| 420 | Self::new_inner_linked_list(channel.into(), table, options) | ||
| 421 | } | ||
| 422 | |||
| 423 | unsafe fn new_inner_linked_list( | ||
| 424 | channel: Peri<'a, AnyChannel>, | ||
| 425 | table: Table<ITEM_COUNT>, | ||
| 426 | options: TransferOptions, | ||
| 427 | ) -> Self { | ||
| 428 | channel.configure_linked_list(&table, options); | ||
| 429 | channel.start(); | ||
| 430 | |||
| 431 | Self { channel } | ||
| 432 | } | ||
| 433 | |||
| 434 | /// Request the transfer to pause, keeping the existing configuration for this channel. | ||
| 435 | /// | ||
| 436 | /// To resume the transfer, call [`request_resume`](Self::request_resume) again. | ||
| 437 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | ||
| 438 | pub fn request_pause(&mut self) { | ||
| 439 | self.channel.request_pause() | ||
| 440 | } | ||
| 441 | |||
| 442 | /// Request the transfer to resume after having been paused. | ||
| 443 | pub fn request_resume(&mut self) { | ||
| 444 | self.channel.request_resume() | ||
| 445 | } | ||
| 446 | |||
| 447 | /// Request the DMA to reset. | ||
| 448 | /// | ||
| 449 | /// The configuration for this channel will **not be preserved**. If you need to restart the transfer | ||
| 450 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | ||
| 451 | pub fn request_reset(&mut self) { | ||
| 452 | self.channel.request_reset() | ||
| 453 | } | ||
| 454 | |||
| 455 | /// Return whether this transfer is still running. | ||
| 456 | /// | ||
| 457 | /// If this returns `false`, it can be because either the transfer finished, or | ||
| 458 | /// it was requested to stop early with [`request_pause`](Self::request_pause). | ||
| 459 | pub fn is_running(&mut self) -> bool { | ||
| 460 | self.channel.is_running() | ||
| 461 | } | ||
| 462 | |||
| 463 | /// Gets the total remaining transfers for the channel | ||
| 464 | /// Note: this will be zero for transfers that completed without cancellation. | ||
| 465 | pub fn get_remaining_transfers(&self) -> u16 { | ||
| 466 | self.channel.get_remaining_transfers() | ||
| 467 | } | ||
| 468 | |||
| 469 | /// Blocking wait until the transfer finishes. | ||
| 470 | pub fn blocking_wait(mut self) { | ||
| 471 | while self.is_running() {} | ||
| 472 | |||
| 473 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 474 | fence(Ordering::SeqCst); | ||
| 475 | |||
| 476 | core::mem::forget(self); | ||
| 477 | } | ||
| 478 | } | ||
| 479 | |||
| 480 | impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> { | ||
| 481 | fn drop(&mut self) { | ||
| 482 | self.request_reset(); | ||
| 483 | |||
| 484 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 485 | fence(Ordering::SeqCst); | ||
| 486 | } | ||
| 487 | } | ||
| 488 | |||
| 489 | impl<'a, const ITEM_COUNT: usize> Unpin for LinkedListTransfer<'a, ITEM_COUNT> {} | ||
| 490 | impl<'a, const ITEM_COUNT: usize> Future for LinkedListTransfer<'a, ITEM_COUNT> { | ||
| 491 | type Output = (); | ||
| 492 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { | ||
| 493 | let state = &STATE[self.channel.id as usize]; | ||
| 494 | state.waker.register(cx.waker()); | ||
| 495 | |||
| 496 | if self.is_running() { | ||
| 497 | Poll::Pending | ||
| 498 | } else { | ||
| 499 | Poll::Ready(()) | ||
| 500 | } | ||
| 501 | } | ||
| 502 | } | ||
| 503 | |||
| 504 | /// DMA transfer. | ||
| 505 | #[must_use = "futures do nothing unless you `.await` or poll them"] | ||
| 506 | pub struct Transfer<'a> { | ||
| 507 | channel: Peri<'a, AnyChannel>, | ||
| 508 | } | ||
| 509 | |||
| 510 | impl<'a> Transfer<'a> { | ||
| 511 | /// Create a new read DMA transfer (peripheral to memory). | ||
| 512 | pub unsafe fn new_read<W: Word>( | ||
| 513 | channel: Peri<'a, impl Channel>, | ||
| 514 | request: Request, | ||
| 515 | peri_addr: *mut W, | ||
| 516 | buf: &'a mut [W], | ||
| 517 | options: TransferOptions, | ||
| 518 | ) -> Self { | ||
| 519 | Self::new_read_raw(channel, request, peri_addr, buf, options) | ||
| 520 | } | ||
| 521 | |||
| 522 | /// Create a new read DMA transfer (peripheral to memory), using raw pointers. | ||
| 523 | pub unsafe fn new_read_raw<MW: Word, PW: Word>( | ||
| 524 | channel: Peri<'a, impl Channel>, | ||
| 525 | request: Request, | ||
| 526 | peri_addr: *mut PW, | ||
| 527 | buf: *mut [MW], | ||
| 528 | options: TransferOptions, | ||
| 529 | ) -> Self { | ||
| 530 | Self::new_inner( | ||
| 531 | channel.into(), | ||
| 532 | request, | ||
| 533 | Dir::PeripheralToMemory, | ||
| 534 | peri_addr as *const u32, | ||
| 535 | buf as *mut MW as *mut u32, | ||
| 536 | buf.len(), | ||
| 537 | true, | ||
| 538 | PW::size(), | ||
| 539 | MW::size(), | ||
| 540 | options, | ||
| 541 | ) | ||
| 542 | } | ||
| 543 | |||
| 544 | /// Create a new write DMA transfer (memory to peripheral). | ||
| 545 | pub unsafe fn new_write<MW: Word, PW: Word>( | ||
| 546 | channel: Peri<'a, impl Channel>, | ||
| 547 | request: Request, | ||
| 548 | buf: &'a [MW], | ||
| 549 | peri_addr: *mut PW, | ||
| 550 | options: TransferOptions, | ||
| 551 | ) -> Self { | ||
| 552 | Self::new_write_raw(channel, request, buf, peri_addr, options) | ||
| 553 | } | ||
| 554 | |||
| 555 | /// Create a new write DMA transfer (memory to peripheral), using raw pointers. | ||
| 556 | pub unsafe fn new_write_raw<MW: Word, PW: Word>( | ||
| 557 | channel: Peri<'a, impl Channel>, | ||
| 558 | request: Request, | ||
| 559 | buf: *const [MW], | ||
| 560 | peri_addr: *mut PW, | ||
| 561 | options: TransferOptions, | ||
| 562 | ) -> Self { | ||
| 563 | Self::new_inner( | ||
| 564 | channel.into(), | ||
| 565 | request, | ||
| 566 | Dir::MemoryToPeripheral, | ||
| 567 | peri_addr as *const u32, | ||
| 568 | buf as *const MW as *mut u32, | ||
| 569 | buf.len(), | ||
| 570 | true, | ||
| 571 | MW::size(), | ||
| 572 | PW::size(), | ||
| 573 | options, | ||
| 574 | ) | ||
| 575 | } | ||
| 576 | |||
| 577 | /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly. | ||
| 578 | pub unsafe fn new_write_repeated<MW: Word, PW: Word>( | ||
| 579 | channel: Peri<'a, impl Channel>, | ||
| 580 | request: Request, | ||
| 581 | repeated: &'a MW, | ||
| 582 | count: usize, | ||
| 583 | peri_addr: *mut PW, | ||
| 584 | options: TransferOptions, | ||
| 585 | ) -> Self { | ||
| 586 | Self::new_inner( | ||
| 587 | channel.into(), | ||
| 588 | request, | ||
| 589 | Dir::MemoryToPeripheral, | ||
| 590 | peri_addr as *const u32, | ||
| 591 | repeated as *const MW as *mut u32, | ||
| 592 | count, | ||
| 593 | false, | ||
| 594 | MW::size(), | ||
| 595 | PW::size(), | ||
| 596 | options, | ||
| 597 | ) | ||
| 598 | } | ||
| 599 | |||
| 600 | unsafe fn new_inner( | ||
| 601 | channel: Peri<'a, AnyChannel>, | ||
| 602 | request: Request, | ||
| 603 | dir: Dir, | ||
| 604 | peri_addr: *const u32, | ||
| 605 | mem_addr: *mut u32, | ||
| 606 | mem_len: usize, | ||
| 607 | incr_mem: bool, | ||
| 608 | data_size: WordSize, | ||
| 609 | peripheral_size: WordSize, | ||
| 610 | options: TransferOptions, | ||
| 611 | ) -> Self { | ||
| 612 | assert!(mem_len > 0 && mem_len <= 0xFFFF); | ||
| 613 | |||
| 614 | channel.configure( | ||
| 615 | request, | ||
| 616 | dir, | ||
| 617 | peri_addr, | ||
| 618 | mem_addr, | ||
| 619 | mem_len, | ||
| 620 | incr_mem, | ||
| 621 | data_size, | ||
| 622 | peripheral_size, | ||
| 623 | options, | ||
| 624 | ); | ||
| 625 | channel.start(); | ||
| 626 | |||
| 627 | Self { channel } | ||
| 628 | } | ||
| 629 | |||
| 630 | /// Request the transfer to pause, keeping the existing configuration for this channel. | ||
| 631 | /// To restart the transfer, call [`start`](Self::start) again. | ||
| 632 | /// | ||
| 633 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | ||
| 634 | pub fn request_pause(&mut self) { | ||
| 635 | self.channel.request_pause() | ||
| 636 | } | ||
| 637 | |||
| 638 | /// Request the transfer to resume after being suspended. | ||
| 639 | pub fn request_resume(&mut self) { | ||
| 640 | self.channel.request_resume() | ||
| 641 | } | ||
| 642 | |||
| 643 | /// Request the DMA to reset. | ||
| 644 | /// | ||
| 645 | /// The configuration for this channel will **not be preserved**. If you need to restart the transfer | ||
| 646 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | ||
| 647 | pub fn request_reset(&mut self) { | ||
| 648 | self.channel.request_reset() | ||
| 649 | } | ||
| 650 | |||
| 651 | /// Return whether this transfer is still running. | ||
| 652 | /// | ||
| 653 | /// If this returns `false`, it can be because either the transfer finished, or | ||
| 654 | /// it was requested to stop early with [`request_pause`](Self::request_pause). | ||
| 655 | pub fn is_running(&mut self) -> bool { | ||
| 656 | self.channel.is_running() | ||
| 657 | } | ||
| 658 | |||
| 659 | /// Gets the total remaining transfers for the channel | ||
| 660 | /// Note: this will be zero for transfers that completed without cancellation. | ||
| 661 | pub fn get_remaining_transfers(&self) -> u16 { | ||
| 662 | self.channel.get_remaining_transfers() | ||
| 663 | } | ||
| 664 | |||
| 665 | /// Blocking wait until the transfer finishes. | ||
| 666 | pub fn blocking_wait(mut self) { | ||
| 667 | while self.is_running() {} | ||
| 668 | |||
| 669 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 670 | fence(Ordering::SeqCst); | ||
| 671 | |||
| 672 | core::mem::forget(self); | ||
| 673 | } | ||
| 674 | } | ||
| 675 | |||
| 676 | impl<'a> Drop for Transfer<'a> { | ||
| 677 | fn drop(&mut self) { | ||
| 678 | self.request_pause(); | ||
| 679 | while self.is_running() {} | ||
| 680 | |||
| 681 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 682 | fence(Ordering::SeqCst); | ||
| 683 | } | ||
| 684 | } | ||
| 685 | |||
| 686 | impl<'a> Unpin for Transfer<'a> {} | ||
| 687 | impl<'a> Future for Transfer<'a> { | ||
| 688 | type Output = (); | ||
| 689 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { | ||
| 690 | let state = &STATE[self.channel.id as usize]; | ||
| 691 | state.waker.register(cx.waker()); | ||
| 692 | |||
| 693 | if self.is_running() { | ||
| 694 | Poll::Pending | ||
| 695 | } else { | ||
| 696 | Poll::Ready(()) | ||
| 697 | } | ||
| 698 | } | ||
| 699 | } | ||
diff --git a/embassy-stm32/src/dma/gpdma/ringbuffered.rs b/embassy-stm32/src/dma/gpdma/ringbuffered.rs new file mode 100644 index 000000000..9ee52193b --- /dev/null +++ b/embassy-stm32/src/dma/gpdma/ringbuffered.rs | |||
| @@ -0,0 +1,332 @@ | |||
| 1 | //! GPDMA ring buffer implementation. | ||
| 2 | //! | ||
| 3 | //! FIXME: Add request_pause functionality? | ||
| 4 | //! FIXME: Stop the DMA, if a user does not queue new transfers (chain of linked-list items ends automatically). | ||
| 5 | use core::future::poll_fn; | ||
| 6 | use core::sync::atomic::{fence, Ordering}; | ||
| 7 | use core::task::Waker; | ||
| 8 | |||
| 9 | use embassy_hal_internal::Peri; | ||
| 10 | |||
| 11 | use super::{AnyChannel, TransferOptions, STATE}; | ||
| 12 | use crate::dma::gpdma::linked_list::{RunMode, Table}; | ||
| 13 | use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer}; | ||
| 14 | use crate::dma::word::Word; | ||
| 15 | use crate::dma::{Channel, Dir, Request}; | ||
| 16 | |||
| 17 | struct DmaCtrlImpl<'a>(Peri<'a, AnyChannel>); | ||
| 18 | |||
| 19 | impl<'a> DmaCtrl for DmaCtrlImpl<'a> { | ||
| 20 | fn get_remaining_transfers(&self) -> usize { | ||
| 21 | let state = &STATE[self.0.id as usize]; | ||
| 22 | let current_remaining = self.0.get_remaining_transfers() as usize; | ||
| 23 | |||
| 24 | let lli_count = state.lli_state.count.load(Ordering::Acquire); | ||
| 25 | |||
| 26 | if lli_count > 0 { | ||
| 27 | // In linked-list mode, the remaining transfers are the sum of the full lengths of LLIs that follow, | ||
| 28 | // and the remaining transfers for the current LLI. | ||
| 29 | let lli_index = state.lli_state.index.load(Ordering::Acquire); | ||
| 30 | let single_transfer_count = state.lli_state.transfer_count.load(Ordering::Acquire) / lli_count; | ||
| 31 | |||
| 32 | (lli_count - lli_index - 1) * single_transfer_count + current_remaining | ||
| 33 | } else { | ||
| 34 | // No linked-list mode. | ||
| 35 | current_remaining | ||
| 36 | } | ||
| 37 | } | ||
| 38 | |||
| 39 | fn reset_complete_count(&mut self) -> usize { | ||
| 40 | let state = &STATE[self.0.id as usize]; | ||
| 41 | |||
| 42 | state.complete_count.swap(0, Ordering::AcqRel) | ||
| 43 | } | ||
| 44 | |||
| 45 | fn set_waker(&mut self, waker: &Waker) { | ||
| 46 | STATE[self.0.id as usize].waker.register(waker); | ||
| 47 | } | ||
| 48 | } | ||
| 49 | |||
| 50 | /// Ringbuffer for receiving data using GPDMA linked-list mode. | ||
| 51 | pub struct ReadableRingBuffer<'a, W: Word> { | ||
| 52 | channel: Peri<'a, AnyChannel>, | ||
| 53 | ringbuf: ReadableDmaRingBuffer<'a, W>, | ||
| 54 | table: Table<2>, | ||
| 55 | options: TransferOptions, | ||
| 56 | } | ||
| 57 | |||
| 58 | impl<'a, W: Word> ReadableRingBuffer<'a, W> { | ||
| 59 | /// Create a new ring buffer. | ||
| 60 | /// | ||
| 61 | /// Transfer options are applied to the individual linked list items. | ||
| 62 | pub unsafe fn new( | ||
| 63 | channel: Peri<'a, impl Channel>, | ||
| 64 | request: Request, | ||
| 65 | peri_addr: *mut W, | ||
| 66 | buffer: &'a mut [W], | ||
| 67 | options: TransferOptions, | ||
| 68 | ) -> Self { | ||
| 69 | let channel: Peri<'a, AnyChannel> = channel.into(); | ||
| 70 | let table = Table::<2>::new_ping_pong::<W>(request, peri_addr, buffer, Dir::PeripheralToMemory); | ||
| 71 | |||
| 72 | Self { | ||
| 73 | channel, | ||
| 74 | ringbuf: ReadableDmaRingBuffer::new(buffer), | ||
| 75 | table, | ||
| 76 | options, | ||
| 77 | } | ||
| 78 | } | ||
| 79 | |||
| 80 | /// Start the ring buffer operation. | ||
| 81 | pub fn start(&mut self) { | ||
| 82 | // Apply the default configuration to the channel. | ||
| 83 | unsafe { self.channel.configure_linked_list(&self.table, self.options) }; | ||
| 84 | self.table.link(RunMode::Circular); | ||
| 85 | self.channel.start(); | ||
| 86 | } | ||
| 87 | |||
| 88 | /// Clear all data in the ring buffer. | ||
| 89 | pub fn clear(&mut self) { | ||
| 90 | self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); | ||
| 91 | } | ||
| 92 | |||
| 93 | /// Read elements from the ring buffer | ||
| 94 | /// Return a tuple of the length read and the length remaining in the buffer | ||
| 95 | /// If not all of the elements were read, then there will be some elements in the buffer remaining | ||
| 96 | /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read | ||
| 97 | /// Error is returned if the portion to be read was overwritten by the DMA controller. | ||
| 98 | pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> { | ||
| 99 | self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf) | ||
| 100 | } | ||
| 101 | |||
| 102 | /// Read an exact number of elements from the ringbuffer. | ||
| 103 | /// | ||
| 104 | /// Returns the remaining number of elements available for immediate reading. | ||
| 105 | /// Error is returned if the portion to be read was overwritten by the DMA controller. | ||
| 106 | /// | ||
| 107 | /// Async/Wake Behavior: | ||
| 108 | /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, | ||
| 109 | /// and when it wraps around. This means that when called with a buffer of length 'M', when this | ||
| 110 | /// ring buffer was created with a buffer of size 'N': | ||
| 111 | /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. | ||
| 112 | /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. | ||
| 113 | pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, Error> { | ||
| 114 | self.ringbuf | ||
| 115 | .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) | ||
| 116 | .await | ||
| 117 | } | ||
| 118 | |||
| 119 | /// The current length of the ringbuffer | ||
| 120 | pub fn len(&mut self) -> Result<usize, Error> { | ||
| 121 | Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) | ||
| 122 | } | ||
| 123 | |||
| 124 | /// The capacity of the ringbuffer | ||
| 125 | pub const fn capacity(&self) -> usize { | ||
| 126 | self.ringbuf.cap() | ||
| 127 | } | ||
| 128 | |||
| 129 | /// Set a waker to be woken when at least one byte is received. | ||
| 130 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 131 | DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); | ||
| 132 | } | ||
| 133 | |||
| 134 | /// Request the transfer to pause, keeping the existing configuration for this channel. | ||
| 135 | /// | ||
| 136 | /// To resume the transfer, call [`request_resume`](Self::request_resume) again. | ||
| 137 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | ||
| 138 | pub fn request_pause(&mut self) { | ||
| 139 | self.channel.request_pause() | ||
| 140 | } | ||
| 141 | |||
| 142 | /// Request the transfer to resume after having been paused. | ||
| 143 | pub fn request_resume(&mut self) { | ||
| 144 | self.channel.request_resume() | ||
| 145 | } | ||
| 146 | |||
| 147 | /// Request the DMA to reset. | ||
| 148 | /// | ||
| 149 | /// The configuration for this channel will **not be preserved**. If you need to restart the transfer | ||
| 150 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | ||
| 151 | pub fn request_reset(&mut self) { | ||
| 152 | self.channel.request_reset() | ||
| 153 | } | ||
| 154 | |||
| 155 | /// Return whether this transfer is still running. | ||
| 156 | /// | ||
| 157 | /// If this returns `false`, it can be because either the transfer finished, or | ||
| 158 | /// it was requested to stop early with [`request_pause`](Self::request_pause). | ||
| 159 | pub fn is_running(&mut self) -> bool { | ||
| 160 | self.channel.is_running() | ||
| 161 | } | ||
| 162 | |||
| 163 | /// Stop the DMA transfer and await until the buffer is full. | ||
| 164 | /// | ||
| 165 | /// This disables the DMA transfer's circular mode so that the transfer | ||
| 166 | /// stops when the buffer is full. | ||
| 167 | /// | ||
| 168 | /// This is designed to be used with streaming input data such as the | ||
| 169 | /// I2S/SAI or ADC. | ||
| 170 | pub async fn stop(&mut self) { | ||
| 171 | // wait until cr.susp reads as true | ||
| 172 | poll_fn(|cx| { | ||
| 173 | self.set_waker(cx.waker()); | ||
| 174 | self.channel.poll_stop() | ||
| 175 | }) | ||
| 176 | .await | ||
| 177 | } | ||
| 178 | } | ||
| 179 | |||
| 180 | impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> { | ||
| 181 | fn drop(&mut self) { | ||
| 182 | self.request_pause(); | ||
| 183 | while self.is_running() {} | ||
| 184 | |||
| 185 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 186 | fence(Ordering::SeqCst); | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 190 | /// Ringbuffer for writing data using GPDMA linked-list mode. | ||
| 191 | pub struct WritableRingBuffer<'a, W: Word> { | ||
| 192 | channel: Peri<'a, AnyChannel>, | ||
| 193 | ringbuf: WritableDmaRingBuffer<'a, W>, | ||
| 194 | table: Table<2>, | ||
| 195 | options: TransferOptions, | ||
| 196 | } | ||
| 197 | |||
| 198 | impl<'a, W: Word> WritableRingBuffer<'a, W> { | ||
| 199 | /// Create a new ring buffer. | ||
| 200 | /// | ||
| 201 | /// Transfer options are applied to the individual linked list items. | ||
| 202 | pub unsafe fn new( | ||
| 203 | channel: Peri<'a, impl Channel>, | ||
| 204 | request: Request, | ||
| 205 | peri_addr: *mut W, | ||
| 206 | buffer: &'a mut [W], | ||
| 207 | options: TransferOptions, | ||
| 208 | ) -> Self { | ||
| 209 | let channel: Peri<'a, AnyChannel> = channel.into(); | ||
| 210 | let table = Table::<2>::new_ping_pong::<W>(request, peri_addr, buffer, Dir::MemoryToPeripheral); | ||
| 211 | |||
| 212 | Self { | ||
| 213 | channel, | ||
| 214 | ringbuf: WritableDmaRingBuffer::new(buffer), | ||
| 215 | table, | ||
| 216 | options, | ||
| 217 | } | ||
| 218 | } | ||
| 219 | |||
| 220 | /// Start the ring buffer operation. | ||
| 221 | pub fn start(&mut self) { | ||
| 222 | // Apply the default configuration to the channel. | ||
| 223 | unsafe { self.channel.configure_linked_list(&self.table, self.options) }; | ||
| 224 | self.table.link(RunMode::Circular); | ||
| 225 | |||
| 226 | self.channel.start(); | ||
| 227 | } | ||
| 228 | |||
| 229 | /// Clear all data in the ring buffer. | ||
| 230 | pub fn clear(&mut self) { | ||
| 231 | self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow())); | ||
| 232 | } | ||
| 233 | |||
| 234 | /// Write elements directly to the raw buffer. | ||
| 235 | /// This can be used to fill the buffer before starting the DMA transfer. | ||
| 236 | pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { | ||
| 237 | self.ringbuf.write_immediate(buf) | ||
| 238 | } | ||
| 239 | |||
| 240 | /// Write elements from the ring buffer | ||
| 241 | /// Return a tuple of the length written and the length remaining in the buffer | ||
| 242 | pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { | ||
| 243 | self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf) | ||
| 244 | } | ||
| 245 | |||
| 246 | /// Write an exact number of elements to the ringbuffer. | ||
| 247 | pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> { | ||
| 248 | self.ringbuf | ||
| 249 | .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer) | ||
| 250 | .await | ||
| 251 | } | ||
| 252 | |||
| 253 | /// Wait for any ring buffer write error. | ||
| 254 | pub async fn wait_write_error(&mut self) -> Result<usize, Error> { | ||
| 255 | self.ringbuf | ||
| 256 | .wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow())) | ||
| 257 | .await | ||
| 258 | } | ||
| 259 | |||
| 260 | /// The current length of the ringbuffer | ||
| 261 | pub fn len(&mut self) -> Result<usize, Error> { | ||
| 262 | Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?) | ||
| 263 | } | ||
| 264 | |||
| 265 | /// The capacity of the ringbuffer | ||
| 266 | pub const fn capacity(&self) -> usize { | ||
| 267 | self.ringbuf.cap() | ||
| 268 | } | ||
| 269 | |||
| 270 | /// Set a waker to be woken when at least one byte is received. | ||
| 271 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 272 | DmaCtrlImpl(self.channel.reborrow()).set_waker(waker); | ||
| 273 | } | ||
| 274 | |||
| 275 | /// Request the DMA to suspend. | ||
| 276 | /// | ||
| 277 | /// To resume the transfer, call [`request_resume`](Self::request_resume) again. | ||
| 278 | /// | ||
| 279 | /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. | ||
| 280 | pub fn request_pause(&mut self) { | ||
| 281 | self.channel.request_pause() | ||
| 282 | } | ||
| 283 | |||
| 284 | /// Request the DMA to resume transfers after being suspended. | ||
| 285 | pub fn request_resume(&mut self) { | ||
| 286 | self.channel.request_resume() | ||
| 287 | } | ||
| 288 | |||
| 289 | /// Request the DMA to reset. | ||
| 290 | /// | ||
| 291 | /// The configuration for this channel will **not be preserved**. If you need to restart the transfer | ||
| 292 | /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead. | ||
| 293 | pub fn request_reset(&mut self) { | ||
| 294 | self.channel.request_reset() | ||
| 295 | } | ||
| 296 | |||
| 297 | /// Return whether DMA is still running. | ||
| 298 | /// | ||
| 299 | /// If this returns `false`, it can be because either the transfer finished, or | ||
| 300 | /// it was requested to stop early with [`request_stop`](Self::request_stop). | ||
| 301 | pub fn is_running(&mut self) -> bool { | ||
| 302 | self.channel.is_running() | ||
| 303 | } | ||
| 304 | |||
| 305 | /// Stop the DMA transfer and await until the buffer is full. | ||
| 306 | /// | ||
| 307 | /// This disables the DMA transfer's circular mode so that the transfer | ||
| 308 | /// stops when the buffer is full. | ||
| 309 | /// | ||
| 310 | /// This is designed to be used with streaming input data such as the | ||
| 311 | /// I2S/SAI or ADC. | ||
| 312 | /// | ||
| 313 | /// When using the UART, you probably want `request_stop()`. | ||
| 314 | pub async fn stop(&mut self) { | ||
| 315 | // wait until cr.susp reads as true | ||
| 316 | poll_fn(|cx| { | ||
| 317 | self.set_waker(cx.waker()); | ||
| 318 | self.channel.poll_stop() | ||
| 319 | }) | ||
| 320 | .await | ||
| 321 | } | ||
| 322 | } | ||
| 323 | |||
| 324 | impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> { | ||
| 325 | fn drop(&mut self) { | ||
| 326 | self.request_pause(); | ||
| 327 | while self.is_running() {} | ||
| 328 | |||
| 329 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 330 | fence(Ordering::SeqCst); | ||
| 331 | } | ||
| 332 | } | ||
diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs index d3b070a6d..5989bfd7c 100644 --- a/embassy-stm32/src/dma/mod.rs +++ b/embassy-stm32/src/dma/mod.rs | |||
| @@ -9,6 +9,8 @@ pub use dma_bdma::*; | |||
| 9 | #[cfg(gpdma)] | 9 | #[cfg(gpdma)] |
| 10 | pub(crate) mod gpdma; | 10 | pub(crate) mod gpdma; |
| 11 | #[cfg(gpdma)] | 11 | #[cfg(gpdma)] |
| 12 | pub use gpdma::ringbuffered::*; | ||
| 13 | #[cfg(gpdma)] | ||
| 12 | pub use gpdma::*; | 14 | pub use gpdma::*; |
| 13 | 15 | ||
| 14 | #[cfg(dmamux)] | 16 | #[cfg(dmamux)] |
| @@ -26,10 +28,13 @@ use embassy_hal_internal::{impl_peripheral, PeripheralType}; | |||
| 26 | 28 | ||
| 27 | use crate::interrupt; | 29 | use crate::interrupt; |
| 28 | 30 | ||
| 31 | /// The direction of a DMA transfer. | ||
| 29 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | 32 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] |
| 30 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | 33 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] |
| 31 | enum Dir { | 34 | pub enum Dir { |
| 35 | /// Transfer from memory to a peripheral. | ||
| 32 | MemoryToPeripheral, | 36 | MemoryToPeripheral, |
| 37 | /// Transfer from a peripheral to memory. | ||
| 33 | PeripheralToMemory, | 38 | PeripheralToMemory, |
| 34 | } | 39 | } |
| 35 | 40 | ||
diff --git a/embassy-stm32/src/dma/ringbuffer/mod.rs b/embassy-stm32/src/dma/ringbuffer/mod.rs index 44ea497fe..659ffa9e5 100644 --- a/embassy-stm32/src/dma/ringbuffer/mod.rs +++ b/embassy-stm32/src/dma/ringbuffer/mod.rs | |||
| @@ -1,5 +1,3 @@ | |||
| 1 | #![cfg_attr(gpdma, allow(unused))] | ||
| 2 | |||
| 3 | use core::future::poll_fn; | 1 | use core::future::poll_fn; |
| 4 | use core::task::{Poll, Waker}; | 2 | use core::task::{Poll, Waker}; |
| 5 | 3 | ||
| @@ -285,17 +283,20 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { | |||
| 285 | } | 283 | } |
| 286 | 284 | ||
| 287 | /// Write an exact number of elements to the ringbuffer. | 285 | /// Write an exact number of elements to the ringbuffer. |
| 286 | /// | ||
| 287 | /// Returns the remaining write capacity in the buffer. | ||
| 288 | #[allow(dead_code)] | ||
| 288 | pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, Error> { | 289 | pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, Error> { |
| 289 | let mut written_data = 0; | 290 | let mut written_len = 0; |
| 290 | let buffer_len = buffer.len(); | 291 | let buffer_len = buffer.len(); |
| 291 | 292 | ||
| 292 | poll_fn(|cx| { | 293 | poll_fn(|cx| { |
| 293 | dma.set_waker(cx.waker()); | 294 | dma.set_waker(cx.waker()); |
| 294 | 295 | ||
| 295 | match self.write(dma, &buffer[written_data..buffer_len]) { | 296 | match self.write(dma, &buffer[written_len..buffer_len]) { |
| 296 | Ok((len, remaining)) => { | 297 | Ok((len, remaining)) => { |
| 297 | written_data += len; | 298 | written_len += len; |
| 298 | if written_data == buffer_len { | 299 | if written_len == buffer_len { |
| 299 | Poll::Ready(Ok(remaining)) | 300 | Poll::Ready(Ok(remaining)) |
| 300 | } else { | 301 | } else { |
| 301 | Poll::Pending | 302 | Poll::Pending |
diff --git a/embassy-stm32/src/i2c/v2.rs b/embassy-stm32/src/i2c/v2.rs index 6b20a601b..0bfc795ac 100644 --- a/embassy-stm32/src/i2c/v2.rs +++ b/embassy-stm32/src/i2c/v2.rs | |||
| @@ -1283,7 +1283,7 @@ impl<'d> I2c<'d, Async, MultiMaster> { | |||
| 1283 | } else if isr.stopf() { | 1283 | } else if isr.stopf() { |
| 1284 | self.info.regs.icr().write(|reg| reg.set_stopcf(true)); | 1284 | self.info.regs.icr().write(|reg| reg.set_stopcf(true)); |
| 1285 | if remaining_len > 0 { | 1285 | if remaining_len > 0 { |
| 1286 | dma_transfer.request_stop(); | 1286 | dma_transfer.request_pause(); |
| 1287 | Poll::Ready(Ok(SendStatus::LeftoverBytes(remaining_len as usize))) | 1287 | Poll::Ready(Ok(SendStatus::LeftoverBytes(remaining_len as usize))) |
| 1288 | } else { | 1288 | } else { |
| 1289 | Poll::Ready(Ok(SendStatus::Done)) | 1289 | Poll::Ready(Ok(SendStatus::Done)) |
diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs index 4965f8b04..88cc225dd 100644 --- a/embassy-stm32/src/sai/mod.rs +++ b/embassy-stm32/src/sai/mod.rs | |||
| @@ -1,13 +1,11 @@ | |||
| 1 | //! Serial Audio Interface (SAI) | 1 | //! Serial Audio Interface (SAI) |
| 2 | #![macro_use] | 2 | #![macro_use] |
| 3 | #![cfg_attr(gpdma, allow(unused))] | ||
| 4 | 3 | ||
| 5 | use core::marker::PhantomData; | 4 | use core::marker::PhantomData; |
| 6 | 5 | ||
| 7 | use embassy_hal_internal::PeripheralType; | 6 | use embassy_hal_internal::PeripheralType; |
| 8 | 7 | ||
| 9 | pub use crate::dma::word; | 8 | pub use crate::dma::word; |
| 10 | #[cfg(not(gpdma))] | ||
| 11 | use crate::dma::{ringbuffer, Channel, ReadableRingBuffer, Request, TransferOptions, WritableRingBuffer}; | 9 | use crate::dma::{ringbuffer, Channel, ReadableRingBuffer, Request, TransferOptions, WritableRingBuffer}; |
| 12 | use crate::gpio::{AfType, AnyPin, OutputType, Pull, SealedPin as _, Speed}; | 10 | use crate::gpio::{AfType, AnyPin, OutputType, Pull, SealedPin as _, Speed}; |
| 13 | use crate::pac::sai::{vals, Sai as Regs}; | 11 | use crate::pac::sai::{vals, Sai as Regs}; |
| @@ -26,7 +24,6 @@ pub enum Error { | |||
| 26 | Overrun, | 24 | Overrun, |
| 27 | } | 25 | } |
| 28 | 26 | ||
| 29 | #[cfg(not(gpdma))] | ||
| 30 | impl From<ringbuffer::Error> for Error { | 27 | impl From<ringbuffer::Error> for Error { |
| 31 | fn from(#[allow(unused)] err: ringbuffer::Error) -> Self { | 28 | fn from(#[allow(unused)] err: ringbuffer::Error) -> Self { |
| 32 | #[cfg(feature = "defmt")] | 29 | #[cfg(feature = "defmt")] |
| @@ -652,7 +649,6 @@ impl Config { | |||
| 652 | } | 649 | } |
| 653 | } | 650 | } |
| 654 | 651 | ||
| 655 | #[cfg(not(gpdma))] | ||
| 656 | enum RingBuffer<'d, W: word::Word> { | 652 | enum RingBuffer<'d, W: word::Word> { |
| 657 | Writable(WritableRingBuffer<'d, W>), | 653 | Writable(WritableRingBuffer<'d, W>), |
| 658 | Readable(ReadableRingBuffer<'d, W>), | 654 | Readable(ReadableRingBuffer<'d, W>), |
| @@ -679,7 +675,6 @@ fn get_af_types(mode: Mode, tx_rx: TxRx) -> (AfType, AfType) { | |||
| 679 | ) | 675 | ) |
| 680 | } | 676 | } |
| 681 | 677 | ||
| 682 | #[cfg(not(gpdma))] | ||
| 683 | fn get_ring_buffer<'d, T: Instance, W: word::Word>( | 678 | fn get_ring_buffer<'d, T: Instance, W: word::Word>( |
| 684 | dma: Peri<'d, impl Channel>, | 679 | dma: Peri<'d, impl Channel>, |
| 685 | dma_buf: &'d mut [W], | 680 | dma_buf: &'d mut [W], |
| @@ -750,14 +745,10 @@ pub struct Sai<'d, T: Instance, W: word::Word> { | |||
| 750 | fs: Option<Peri<'d, AnyPin>>, | 745 | fs: Option<Peri<'d, AnyPin>>, |
| 751 | sck: Option<Peri<'d, AnyPin>>, | 746 | sck: Option<Peri<'d, AnyPin>>, |
| 752 | mclk: Option<Peri<'d, AnyPin>>, | 747 | mclk: Option<Peri<'d, AnyPin>>, |
| 753 | #[cfg(gpdma)] | ||
| 754 | ring_buffer: PhantomData<W>, | ||
| 755 | #[cfg(not(gpdma))] | ||
| 756 | ring_buffer: RingBuffer<'d, W>, | 748 | ring_buffer: RingBuffer<'d, W>, |
| 757 | sub_block: WhichSubBlock, | 749 | sub_block: WhichSubBlock, |
| 758 | } | 750 | } |
| 759 | 751 | ||
| 760 | #[cfg(not(gpdma))] | ||
| 761 | impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> { | 752 | impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> { |
| 762 | /// Create a new SAI driver in asynchronous mode with MCLK. | 753 | /// Create a new SAI driver in asynchronous mode with MCLK. |
| 763 | /// | 754 | /// |
diff --git a/embassy-stm32/src/spdifrx/mod.rs b/embassy-stm32/src/spdifrx/mod.rs index 9c42217f0..466639e83 100644 --- a/embassy-stm32/src/spdifrx/mod.rs +++ b/embassy-stm32/src/spdifrx/mod.rs | |||
| @@ -8,9 +8,7 @@ use embassy_sync::waitqueue::AtomicWaker; | |||
| 8 | 8 | ||
| 9 | use crate::dma::ringbuffer::Error as RingbufferError; | 9 | use crate::dma::ringbuffer::Error as RingbufferError; |
| 10 | pub use crate::dma::word; | 10 | pub use crate::dma::word; |
| 11 | #[cfg(not(gpdma))] | 11 | use crate::dma::{Channel, ReadableRingBuffer, TransferOptions}; |
| 12 | use crate::dma::ReadableRingBuffer; | ||
| 13 | use crate::dma::{Channel, TransferOptions}; | ||
| 14 | use crate::gpio::{AfType, AnyPin, Pull, SealedPin as _}; | 12 | use crate::gpio::{AfType, AnyPin, Pull, SealedPin as _}; |
| 15 | use crate::interrupt::typelevel::Interrupt; | 13 | use crate::interrupt::typelevel::Interrupt; |
| 16 | use crate::pac::spdifrx::Spdifrx as Regs; | 14 | use crate::pac::spdifrx::Spdifrx as Regs; |
| @@ -58,7 +56,6 @@ macro_rules! impl_spdifrx_pin { | |||
| 58 | /// Ring-buffered SPDIFRX driver. | 56 | /// Ring-buffered SPDIFRX driver. |
| 59 | /// | 57 | /// |
| 60 | /// Data is read by DMAs and stored in a ring buffer. | 58 | /// Data is read by DMAs and stored in a ring buffer. |
| 61 | #[cfg(not(gpdma))] | ||
| 62 | pub struct Spdifrx<'d, T: Instance> { | 59 | pub struct Spdifrx<'d, T: Instance> { |
| 63 | _peri: Peri<'d, T>, | 60 | _peri: Peri<'d, T>, |
| 64 | spdifrx_in: Option<Peri<'d, AnyPin>>, | 61 | spdifrx_in: Option<Peri<'d, AnyPin>>, |
| @@ -118,7 +115,6 @@ impl Default for Config { | |||
| 118 | } | 115 | } |
| 119 | } | 116 | } |
| 120 | 117 | ||
| 121 | #[cfg(not(gpdma))] | ||
| 122 | impl<'d, T: Instance> Spdifrx<'d, T> { | 118 | impl<'d, T: Instance> Spdifrx<'d, T> { |
| 123 | fn dma_opts() -> TransferOptions { | 119 | fn dma_opts() -> TransferOptions { |
| 124 | TransferOptions { | 120 | TransferOptions { |
| @@ -236,7 +232,6 @@ impl<'d, T: Instance> Spdifrx<'d, T> { | |||
| 236 | } | 232 | } |
| 237 | } | 233 | } |
| 238 | 234 | ||
| 239 | #[cfg(not(gpdma))] | ||
| 240 | impl<'d, T: Instance> Drop for Spdifrx<'d, T> { | 235 | impl<'d, T: Instance> Drop for Spdifrx<'d, T> { |
| 241 | fn drop(&mut self) { | 236 | fn drop(&mut self) { |
| 242 | T::info().regs.cr().modify(|cr| cr.set_spdifen(0x00)); | 237 | T::info().regs.cr().modify(|cr| cr.set_spdifen(0x00)); |
diff --git a/embassy-stm32/src/ucpd.rs b/embassy-stm32/src/ucpd.rs index 0a80adb8f..18aff4fbd 100644 --- a/embassy-stm32/src/ucpd.rs +++ b/embassy-stm32/src/ucpd.rs | |||
| @@ -490,14 +490,14 @@ impl<'d, T: Instance> PdPhy<'d, T> { | |||
| 490 | let sr = r.sr().read(); | 490 | let sr = r.sr().read(); |
| 491 | 491 | ||
| 492 | if sr.rxhrstdet() { | 492 | if sr.rxhrstdet() { |
| 493 | dma.request_stop(); | 493 | dma.request_pause(); |
| 494 | 494 | ||
| 495 | // Clean and re-enable hard reset receive interrupt. | 495 | // Clean and re-enable hard reset receive interrupt. |
| 496 | r.icr().write(|w| w.set_rxhrstdetcf(true)); | 496 | r.icr().write(|w| w.set_rxhrstdetcf(true)); |
| 497 | r.imr().modify(|w| w.set_rxhrstdetie(true)); | 497 | r.imr().modify(|w| w.set_rxhrstdetie(true)); |
| 498 | Poll::Ready(Err(RxError::HardReset)) | 498 | Poll::Ready(Err(RxError::HardReset)) |
| 499 | } else if sr.rxmsgend() { | 499 | } else if sr.rxmsgend() { |
| 500 | dma.request_stop(); | 500 | dma.request_pause(); |
| 501 | // Should be read immediately on interrupt. | 501 | // Should be read immediately on interrupt. |
| 502 | rxpaysz = r.rx_payszr().read().rxpaysz().into(); | 502 | rxpaysz = r.rx_payszr().read().rxpaysz().into(); |
| 503 | 503 | ||
diff --git a/embassy-stm32/src/usart/mod.rs b/embassy-stm32/src/usart/mod.rs index 5bece6d66..3d95de897 100644 --- a/embassy-stm32/src/usart/mod.rs +++ b/embassy-stm32/src/usart/mod.rs | |||
| @@ -1965,9 +1965,7 @@ pub use buffered::*; | |||
| 1965 | pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler; | 1965 | pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler; |
| 1966 | mod buffered; | 1966 | mod buffered; |
| 1967 | 1967 | ||
| 1968 | #[cfg(not(gpdma))] | ||
| 1969 | mod ringbuffered; | 1968 | mod ringbuffered; |
| 1970 | #[cfg(not(gpdma))] | ||
| 1971 | pub use ringbuffered::RingBufferedUartRx; | 1969 | pub use ringbuffered::RingBufferedUartRx; |
| 1972 | 1970 | ||
| 1973 | #[cfg(any(usart_v1, usart_v2))] | 1971 | #[cfg(any(usart_v1, usart_v2))] |
diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs index 1d4a44896..5f4e87834 100644 --- a/embassy-stm32/src/usart/ringbuffered.rs +++ b/embassy-stm32/src/usart/ringbuffered.rs | |||
| @@ -381,7 +381,7 @@ impl ReadReady for RingBufferedUartRx<'_> { | |||
| 381 | crate::dma::ringbuffer::Error::Overrun => Self::Error::Overrun, | 381 | crate::dma::ringbuffer::Error::Overrun => Self::Error::Overrun, |
| 382 | crate::dma::ringbuffer::Error::DmaUnsynced => { | 382 | crate::dma::ringbuffer::Error::DmaUnsynced => { |
| 383 | error!( | 383 | error!( |
| 384 | "Ringbuffer error: DmaUNsynced, driver implementation is | 384 | "Ringbuffer error: DmaUNsynced, driver implementation is |
| 385 | probably bugged please open an issue" | 385 | probably bugged please open an issue" |
| 386 | ); | 386 | ); |
| 387 | // we report this as overrun since its recoverable in the same way | 387 | // we report this as overrun since its recoverable in the same way |
diff --git a/examples/stm32h5/src/bin/sai.rs b/examples/stm32h5/src/bin/sai.rs new file mode 100644 index 000000000..0e182f9cf --- /dev/null +++ b/examples/stm32h5/src/bin/sai.rs | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | #![no_std] | ||
| 2 | #![no_main] | ||
| 3 | |||
| 4 | use defmt::info; | ||
| 5 | use embassy_executor::Spawner; | ||
| 6 | use embassy_stm32::{sai, Config}; | ||
| 7 | use {defmt_rtt as _, panic_probe as _}; | ||
| 8 | |||
| 9 | #[embassy_executor::main] | ||
| 10 | async fn main(_spawner: Spawner) { | ||
| 11 | info!("Hello world."); | ||
| 12 | |||
| 13 | let mut config = Config::default(); | ||
| 14 | { | ||
| 15 | use embassy_stm32::rcc::*; | ||
| 16 | |||
| 17 | config.rcc.pll2 = Some(Pll { | ||
| 18 | source: PllSource::HSI, | ||
| 19 | prediv: PllPreDiv::DIV16, | ||
| 20 | mul: PllMul::MUL32, | ||
| 21 | divp: Some(PllDiv::DIV16), // 8 MHz SAI clock | ||
| 22 | divq: None, | ||
| 23 | divr: None, | ||
| 24 | }); | ||
| 25 | |||
| 26 | config.rcc.mux.sai1sel = mux::Saisel::PLL2_P; | ||
| 27 | } | ||
| 28 | let p = embassy_stm32::init(config); | ||
| 29 | |||
| 30 | let mut write_buffer = [0u16; 1024]; | ||
| 31 | let (_, sai_b) = sai::split_subblocks(p.SAI1); | ||
| 32 | |||
| 33 | let mut sai_b = sai::Sai::new_asynchronous( | ||
| 34 | sai_b, | ||
| 35 | p.PF8, | ||
| 36 | p.PE3, | ||
| 37 | p.PF9, | ||
| 38 | p.GPDMA1_CH0, | ||
| 39 | &mut write_buffer, | ||
| 40 | Default::default(), | ||
| 41 | ); | ||
| 42 | |||
| 43 | // Populate arbitrary data. | ||
| 44 | let mut data = [0u16; 256]; | ||
| 45 | for (index, sample) in data.iter_mut().enumerate() { | ||
| 46 | *sample = index as u16; | ||
| 47 | } | ||
| 48 | |||
| 49 | loop { | ||
| 50 | sai_b.write(&data).await.unwrap(); | ||
| 51 | } | ||
| 52 | } | ||
