diff options
| author | Dario Nieuwenhuis <[email protected]> | 2024-02-24 02:38:31 +0100 |
|---|---|---|
| committer | Dario Nieuwenhuis <[email protected]> | 2024-02-24 02:41:41 +0100 |
| commit | e67dfcb04f79aebed52a357b867d418e0ff476af (patch) | |
| tree | 82d1fb2b40b71a6dd62bfbe79596d61cbb6ce7c8 /embassy-stm32/src/dcmi.rs | |
| parent | f77d59500e9bbc0282f1ba4b6b27507f83f9d974 (diff) | |
stm32/dma: add AnyChannel, add support for BDMA on H7.
Diffstat (limited to 'embassy-stm32/src/dcmi.rs')
| -rw-r--r-- | embassy-stm32/src/dcmi.rs | 122 |
1 files changed, 0 insertions, 122 deletions
diff --git a/embassy-stm32/src/dcmi.rs b/embassy-stm32/src/dcmi.rs index 4d02284b2..826b04a4b 100644 --- a/embassy-stm32/src/dcmi.rs +++ b/embassy-stm32/src/dcmi.rs | |||
| @@ -394,19 +394,7 @@ where | |||
| 394 | 394 | ||
| 395 | /// This method starts the capture and finishes when both the dma transfer and DCMI finish the frame transfer. | 395 | /// This method starts the capture and finishes when both the dma transfer and DCMI finish the frame transfer. |
| 396 | /// The implication is that the input buffer size must be exactly the size of the captured frame. | 396 | /// The implication is that the input buffer size must be exactly the size of the captured frame. |
| 397 | /// | ||
| 398 | /// Note that when `buffer.len() > 0xffff` the capture future requires some real-time guarantees to be upheld | ||
| 399 | /// (must be polled fast enough so the buffers get switched before data is overwritten). | ||
| 400 | /// It is therefore recommended that it is run on higher priority executor. | ||
| 401 | pub async fn capture(&mut self, buffer: &mut [u32]) -> Result<(), Error> { | 397 | pub async fn capture(&mut self, buffer: &mut [u32]) -> Result<(), Error> { |
| 402 | if buffer.len() <= 0xffff { | ||
| 403 | return self.capture_small(buffer).await; | ||
| 404 | } else { | ||
| 405 | return self.capture_giant(buffer).await; | ||
| 406 | } | ||
| 407 | } | ||
| 408 | |||
| 409 | async fn capture_small(&mut self, buffer: &mut [u32]) -> Result<(), Error> { | ||
| 410 | let r = self.inner.regs(); | 398 | let r = self.inner.regs(); |
| 411 | let src = r.dr().as_ptr() as *mut u32; | 399 | let src = r.dr().as_ptr() as *mut u32; |
| 412 | let request = self.dma.request(); | 400 | let request = self.dma.request(); |
| @@ -441,116 +429,6 @@ where | |||
| 441 | 429 | ||
| 442 | result | 430 | result |
| 443 | } | 431 | } |
| 444 | |||
| 445 | #[cfg(not(dma))] | ||
| 446 | async fn capture_giant(&mut self, _buffer: &mut [u32]) -> Result<(), Error> { | ||
| 447 | panic!("capturing to buffers larger than 0xffff is only supported on DMA for now, not on BDMA or GPDMA."); | ||
| 448 | } | ||
| 449 | |||
| 450 | #[cfg(dma)] | ||
| 451 | async fn capture_giant(&mut self, buffer: &mut [u32]) -> Result<(), Error> { | ||
| 452 | use crate::dma::TransferOptions; | ||
| 453 | |||
| 454 | let data_len = buffer.len(); | ||
| 455 | let chunk_estimate = data_len / 0xffff; | ||
| 456 | |||
| 457 | let mut chunks = chunk_estimate + 1; | ||
| 458 | while data_len % chunks != 0 { | ||
| 459 | chunks += 1; | ||
| 460 | } | ||
| 461 | |||
| 462 | let chunk_size = data_len / chunks; | ||
| 463 | |||
| 464 | let mut remaining_chunks = chunks - 2; | ||
| 465 | |||
| 466 | let mut m0ar = buffer.as_mut_ptr(); | ||
| 467 | let mut m1ar = unsafe { buffer.as_mut_ptr().add(chunk_size) }; | ||
| 468 | |||
| 469 | let channel = &mut self.dma; | ||
| 470 | let request = channel.request(); | ||
| 471 | |||
| 472 | let r = self.inner.regs(); | ||
| 473 | let src = r.dr().as_ptr() as *mut u32; | ||
| 474 | |||
| 475 | let mut transfer = unsafe { | ||
| 476 | crate::dma::DoubleBuffered::new_read( | ||
| 477 | &mut self.dma, | ||
| 478 | request, | ||
| 479 | src, | ||
| 480 | m0ar, | ||
| 481 | m1ar, | ||
| 482 | chunk_size, | ||
| 483 | TransferOptions::default(), | ||
| 484 | ) | ||
| 485 | }; | ||
| 486 | |||
| 487 | let mut last_chunk_set_for_transfer = false; | ||
| 488 | let mut buffer0_last_accessible = false; | ||
| 489 | let dma_result = poll_fn(|cx| { | ||
| 490 | transfer.set_waker(cx.waker()); | ||
| 491 | |||
| 492 | let buffer0_currently_accessible = transfer.is_buffer0_accessible(); | ||
| 493 | |||
| 494 | // check if the accessible buffer changed since last poll | ||
| 495 | if buffer0_last_accessible == buffer0_currently_accessible { | ||
| 496 | return Poll::Pending; | ||
| 497 | } | ||
| 498 | buffer0_last_accessible = !buffer0_last_accessible; | ||
| 499 | |||
| 500 | if remaining_chunks != 0 { | ||
| 501 | if remaining_chunks % 2 == 0 && buffer0_currently_accessible { | ||
| 502 | m0ar = unsafe { m0ar.add(2 * chunk_size) }; | ||
| 503 | unsafe { transfer.set_buffer0(m0ar) } | ||
| 504 | remaining_chunks -= 1; | ||
| 505 | } else if !buffer0_currently_accessible { | ||
| 506 | m1ar = unsafe { m1ar.add(2 * chunk_size) }; | ||
| 507 | unsafe { transfer.set_buffer1(m1ar) }; | ||
| 508 | remaining_chunks -= 1; | ||
| 509 | } | ||
| 510 | } else { | ||
| 511 | if buffer0_currently_accessible { | ||
| 512 | unsafe { transfer.set_buffer0(buffer.as_mut_ptr()) } | ||
| 513 | } else { | ||
| 514 | unsafe { transfer.set_buffer1(buffer.as_mut_ptr()) } | ||
| 515 | } | ||
| 516 | if last_chunk_set_for_transfer { | ||
| 517 | transfer.request_stop(); | ||
| 518 | return Poll::Ready(()); | ||
| 519 | } | ||
| 520 | last_chunk_set_for_transfer = true; | ||
| 521 | } | ||
| 522 | Poll::Pending | ||
| 523 | }); | ||
| 524 | |||
| 525 | Self::clear_interrupt_flags(); | ||
| 526 | Self::enable_irqs(); | ||
| 527 | |||
| 528 | let result = poll_fn(|cx| { | ||
| 529 | STATE.waker.register(cx.waker()); | ||
| 530 | |||
| 531 | let ris = crate::pac::DCMI.ris().read(); | ||
| 532 | if ris.err_ris() { | ||
| 533 | crate::pac::DCMI.icr().write(|r| r.set_err_isc(true)); | ||
| 534 | Poll::Ready(Err(Error::PeripheralError)) | ||
| 535 | } else if ris.ovr_ris() { | ||
| 536 | crate::pac::DCMI.icr().write(|r| r.set_ovr_isc(true)); | ||
| 537 | Poll::Ready(Err(Error::Overrun)) | ||
| 538 | } else if ris.frame_ris() { | ||
| 539 | crate::pac::DCMI.icr().write(|r| r.set_frame_isc(true)); | ||
| 540 | Poll::Ready(Ok(())) | ||
| 541 | } else { | ||
| 542 | Poll::Pending | ||
| 543 | } | ||
| 544 | }); | ||
| 545 | |||
| 546 | Self::toggle(true); | ||
| 547 | |||
| 548 | let (_, result) = embassy_futures::join::join(dma_result, result).await; | ||
| 549 | |||
| 550 | Self::toggle(false); | ||
| 551 | |||
| 552 | result | ||
| 553 | } | ||
| 554 | } | 432 | } |
| 555 | 433 | ||
| 556 | mod sealed { | 434 | mod sealed { |
