diff options
| author | Dario Nieuwenhuis <[email protected]> | 2023-05-01 18:14:53 +0200 |
|---|---|---|
| committer | Dario Nieuwenhuis <[email protected]> | 2023-05-01 22:42:36 +0200 |
| commit | 25864ae4dc3e5a765f6a3e2bb52bceb4df2e0199 (patch) | |
| tree | 380899cd4ffa68dc2b5aec79716759db0f18ed0e | |
| parent | 14e0090cb1e60ce477ae4f080d0deb724dbc3b9e (diff) | |
stm32/bdma: add ringbuffer support.
| -rw-r--r-- | embassy-stm32/src/dma/bdma.rs | 187 |
1 files changed, 183 insertions, 4 deletions
diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs index a23bb8cd7..88df76ba7 100644 --- a/embassy-stm32/src/dma/bdma.rs +++ b/embassy-stm32/src/dma/bdma.rs | |||
| @@ -3,18 +3,20 @@ | |||
| 3 | use core::future::Future; | 3 | use core::future::Future; |
| 4 | use core::pin::Pin; | 4 | use core::pin::Pin; |
| 5 | use core::sync::atomic::{fence, Ordering}; | 5 | use core::sync::atomic::{fence, Ordering}; |
| 6 | use core::task::{Context, Poll}; | 6 | use core::task::{Context, Poll, Waker}; |
| 7 | 7 | ||
| 8 | use atomic_polyfill::AtomicUsize; | ||
| 8 | use embassy_cortex_m::interrupt::Priority; | 9 | use embassy_cortex_m::interrupt::Priority; |
| 9 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; | 10 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; |
| 10 | use embassy_sync::waitqueue::AtomicWaker; | 11 | use embassy_sync::waitqueue::AtomicWaker; |
| 11 | 12 | ||
| 13 | use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError}; | ||
| 12 | use super::word::{Word, WordSize}; | 14 | use super::word::{Word, WordSize}; |
| 13 | use super::Dir; | 15 | use super::Dir; |
| 14 | use crate::_generated::BDMA_CHANNEL_COUNT; | 16 | use crate::_generated::BDMA_CHANNEL_COUNT; |
| 15 | use crate::interrupt::{Interrupt, InterruptExt}; | 17 | use crate::interrupt::{Interrupt, InterruptExt}; |
| 16 | use crate::pac; | 18 | use crate::pac; |
| 17 | use crate::pac::bdma::vals; | 19 | use crate::pac::bdma::{regs, vals}; |
| 18 | 20 | ||
| 19 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] | 21 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] |
| 20 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | 22 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] |
| @@ -48,13 +50,16 @@ impl From<Dir> for vals::Dir { | |||
| 48 | 50 | ||
| 49 | struct State { | 51 | struct State { |
| 50 | ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT], | 52 | ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT], |
| 53 | complete_count: [AtomicUsize; BDMA_CHANNEL_COUNT], | ||
| 51 | } | 54 | } |
| 52 | 55 | ||
| 53 | impl State { | 56 | impl State { |
| 54 | const fn new() -> Self { | 57 | const fn new() -> Self { |
| 58 | const ZERO: AtomicUsize = AtomicUsize::new(0); | ||
| 55 | const AW: AtomicWaker = AtomicWaker::new(); | 59 | const AW: AtomicWaker = AtomicWaker::new(); |
| 56 | Self { | 60 | Self { |
| 57 | ch_wakers: [AW; BDMA_CHANNEL_COUNT], | 61 | ch_wakers: [AW; BDMA_CHANNEL_COUNT], |
| 62 | complete_count: [ZERO; BDMA_CHANNEL_COUNT], | ||
| 58 | } | 63 | } |
| 59 | } | 64 | } |
| 60 | } | 65 | } |
| @@ -105,8 +110,23 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index | |||
| 105 | if isr.teif(channel_num) { | 110 | if isr.teif(channel_num) { |
| 106 | panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num); | 111 | panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num); |
| 107 | } | 112 | } |
| 113 | |||
| 114 | let mut wake = false; | ||
| 115 | |||
| 116 | if isr.htif(channel_num) && cr.read().htie() { | ||
| 117 | // Acknowledge half transfer complete interrupt | ||
| 118 | dma.ifcr().write(|w| w.set_htif(channel_num, true)); | ||
| 119 | wake = true; | ||
| 120 | } | ||
| 121 | |||
| 108 | if isr.tcif(channel_num) && cr.read().tcie() { | 122 | if isr.tcif(channel_num) && cr.read().tcie() { |
| 109 | cr.write(|_| ()); // Disable channel interrupts with the default value. | 123 | // Acknowledge transfer complete interrupt |
| 124 | dma.ifcr().write(|w| w.set_tcif(channel_num, true)); | ||
| 125 | STATE.complete_count[index].fetch_add(1, Ordering::Release); | ||
| 126 | wake = true; | ||
| 127 | } | ||
| 128 | |||
| 129 | if wake { | ||
| 110 | STATE.ch_wakers[index].wake(); | 130 | STATE.ch_wakers[index].wake(); |
| 111 | } | 131 | } |
| 112 | } | 132 | } |
| @@ -252,6 +272,7 @@ impl<'a, C: Channel> Transfer<'a, C> { | |||
| 252 | 272 | ||
| 253 | let mut this = Self { channel }; | 273 | let mut this = Self { channel }; |
| 254 | this.clear_irqs(); | 274 | this.clear_irqs(); |
| 275 | STATE.complete_count[this.channel.index()].store(0, Ordering::Release); | ||
| 255 | 276 | ||
| 256 | #[cfg(dmamux)] | 277 | #[cfg(dmamux)] |
| 257 | super::dmamux::configure_dmamux(&mut *this.channel, _request); | 278 | super::dmamux::configure_dmamux(&mut *this.channel, _request); |
| @@ -299,7 +320,9 @@ impl<'a, C: Channel> Transfer<'a, C> { | |||
| 299 | 320 | ||
| 300 | pub fn is_running(&mut self) -> bool { | 321 | pub fn is_running(&mut self) -> bool { |
| 301 | let ch = self.channel.regs().ch(self.channel.num()); | 322 | let ch = self.channel.regs().ch(self.channel.num()); |
| 302 | unsafe { ch.cr().read() }.en() | 323 | let en = unsafe { ch.cr().read() }.en(); |
| 324 | let tcif = STATE.complete_count[self.channel.index()].load(Ordering::Acquire) != 0; | ||
| 325 | en && !tcif | ||
| 303 | } | 326 | } |
| 304 | 327 | ||
| 305 | /// Gets the total remaining transfers for the channel | 328 | /// Gets the total remaining transfers for the channel |
| @@ -342,3 +365,159 @@ impl<'a, C: Channel> Future for Transfer<'a, C> { | |||
| 342 | } | 365 | } |
| 343 | } | 366 | } |
| 344 | } | 367 | } |
| 368 | |||
| 369 | // ============================== | ||
| 370 | |||
| 371 | impl<C: Channel> DmaCtrl for C { | ||
| 372 | fn ndtr(&self) -> usize { | ||
| 373 | let ch = self.regs().ch(self.num()); | ||
| 374 | unsafe { ch.ndtr().read() }.ndt() as usize | ||
| 375 | } | ||
| 376 | |||
| 377 | fn get_complete_count(&self) -> usize { | ||
| 378 | STATE.complete_count[self.index()].load(Ordering::Acquire) | ||
| 379 | } | ||
| 380 | |||
| 381 | fn reset_complete_count(&mut self) -> usize { | ||
| 382 | STATE.complete_count[self.index()].swap(0, Ordering::AcqRel) | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 386 | pub struct RingBuffer<'a, C: Channel, W: Word> { | ||
| 387 | cr: regs::Cr, | ||
| 388 | channel: PeripheralRef<'a, C>, | ||
| 389 | ringbuf: DmaRingBuffer<'a, W>, | ||
| 390 | } | ||
| 391 | |||
| 392 | impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | ||
| 393 | pub unsafe fn new_read( | ||
| 394 | channel: impl Peripheral<P = C> + 'a, | ||
| 395 | _request: Request, | ||
| 396 | peri_addr: *mut W, | ||
| 397 | buffer: &'a mut [W], | ||
| 398 | _options: TransferOptions, | ||
| 399 | ) -> Self { | ||
| 400 | into_ref!(channel); | ||
| 401 | |||
| 402 | let len = buffer.len(); | ||
| 403 | assert!(len > 0 && len <= 0xFFFF); | ||
| 404 | |||
| 405 | let dir = Dir::PeripheralToMemory; | ||
| 406 | let data_size = W::size(); | ||
| 407 | |||
| 408 | let channel_number = channel.num(); | ||
| 409 | let dma = channel.regs(); | ||
| 410 | |||
| 411 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 412 | fence(Ordering::SeqCst); | ||
| 413 | |||
| 414 | #[cfg(bdma_v2)] | ||
| 415 | critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request))); | ||
| 416 | |||
| 417 | let mut w = regs::Cr(0); | ||
| 418 | w.set_psize(data_size.into()); | ||
| 419 | w.set_msize(data_size.into()); | ||
| 420 | w.set_minc(vals::Inc::ENABLED); | ||
| 421 | w.set_dir(dir.into()); | ||
| 422 | w.set_teie(true); | ||
| 423 | w.set_htie(true); | ||
| 424 | w.set_tcie(true); | ||
| 425 | w.set_circ(vals::Circ::ENABLED); | ||
| 426 | w.set_pl(vals::Pl::VERYHIGH); | ||
| 427 | w.set_en(true); | ||
| 428 | |||
| 429 | let buffer_ptr = buffer.as_mut_ptr(); | ||
| 430 | let mut this = Self { | ||
| 431 | channel, | ||
| 432 | cr: w, | ||
| 433 | ringbuf: DmaRingBuffer::new(buffer), | ||
| 434 | }; | ||
| 435 | this.clear_irqs(); | ||
| 436 | |||
| 437 | #[cfg(dmamux)] | ||
| 438 | super::dmamux::configure_dmamux(&mut *this.channel, _request); | ||
| 439 | |||
| 440 | let ch = dma.ch(channel_number); | ||
| 441 | ch.par().write_value(peri_addr as u32); | ||
| 442 | ch.mar().write_value(buffer_ptr as u32); | ||
| 443 | ch.ndtr().write(|w| w.set_ndt(len as u16)); | ||
| 444 | |||
| 445 | this | ||
| 446 | } | ||
| 447 | |||
| 448 | pub fn start(&mut self) { | ||
| 449 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 450 | unsafe { ch.cr().write_value(self.cr) } | ||
| 451 | } | ||
| 452 | |||
| 453 | pub fn clear(&mut self) { | ||
| 454 | self.ringbuf.clear(&mut *self.channel); | ||
| 455 | } | ||
| 456 | |||
| 457 | /// Read bytes from the ring buffer | ||
| 458 | /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||
| 459 | pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> { | ||
| 460 | self.ringbuf.read(&mut *self.channel, buf) | ||
| 461 | } | ||
| 462 | |||
| 463 | pub fn is_empty(&self) -> bool { | ||
| 464 | self.ringbuf.is_empty() | ||
| 465 | } | ||
| 466 | |||
| 467 | pub fn len(&self) -> usize { | ||
| 468 | self.ringbuf.len() | ||
| 469 | } | ||
| 470 | |||
| 471 | pub fn capacity(&self) -> usize { | ||
| 472 | self.ringbuf.dma_buf.len() | ||
| 473 | } | ||
| 474 | |||
| 475 | pub fn set_waker(&mut self, waker: &Waker) { | ||
| 476 | STATE.ch_wakers[self.channel.index()].register(waker); | ||
| 477 | } | ||
| 478 | |||
| 479 | fn clear_irqs(&mut self) { | ||
| 480 | let dma = self.channel.regs(); | ||
| 481 | unsafe { | ||
| 482 | dma.ifcr().write(|w| { | ||
| 483 | w.set_htif(self.channel.num(), true); | ||
| 484 | w.set_tcif(self.channel.num(), true); | ||
| 485 | w.set_teif(self.channel.num(), true); | ||
| 486 | }) | ||
| 487 | } | ||
| 488 | } | ||
| 489 | |||
| 490 | pub fn request_stop(&mut self) { | ||
| 491 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 492 | |||
| 493 | // Disable the channel. Keep the IEs enabled so the irqs still fire. | ||
| 494 | unsafe { | ||
| 495 | ch.cr().write(|w| { | ||
| 496 | w.set_teie(true); | ||
| 497 | w.set_htie(true); | ||
| 498 | w.set_tcie(true); | ||
| 499 | }) | ||
| 500 | } | ||
| 501 | } | ||
| 502 | |||
| 503 | pub fn is_running(&mut self) -> bool { | ||
| 504 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 505 | unsafe { ch.cr().read() }.en() | ||
| 506 | } | ||
| 507 | |||
| 508 | /// Synchronize the position of the ring buffer to the actual DMA controller position | ||
| 509 | pub fn reload_position(&mut self) { | ||
| 510 | let ch = self.channel.regs().ch(self.channel.num()); | ||
| 511 | self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize; | ||
| 512 | } | ||
| 513 | } | ||
| 514 | |||
| 515 | impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | ||
| 516 | fn drop(&mut self) { | ||
| 517 | self.request_stop(); | ||
| 518 | while self.is_running() {} | ||
| 519 | |||
| 520 | // "Subsequent reads and writes cannot be moved ahead of preceding reads." | ||
| 521 | fence(Ordering::SeqCst); | ||
| 522 | } | ||
| 523 | } | ||
