diff options
| author | Matous Hybl <[email protected]> | 2022-04-12 14:06:53 +0200 |
|---|---|---|
| committer | Matous Hybl <[email protected]> | 2022-04-25 14:30:43 +0200 |
| commit | 945fa0871f03a1db9a9be16e92346d62825decdd (patch) | |
| tree | ca867531d8a4670b39f24e7afa6bd9c739cf7225 | |
| parent | a1746f4ddac734cd8912007b6155ca340df89f19 (diff) | |
Implement giant (chunked) DMA transfers for DCMI.
| -rw-r--r-- | embassy-stm32/src/dcmi.rs | 132 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/bdma.rs | 27 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/dma.rs | 164 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/mod.rs | 19 | ||||
| -rw-r--r-- | examples/stm32h7/memory.x | 4 |
5 files changed, 333 insertions, 13 deletions
diff --git a/embassy-stm32/src/dcmi.rs b/embassy-stm32/src/dcmi.rs index bf78b631f..b1e557ae1 100644 --- a/embassy-stm32/src/dcmi.rs +++ b/embassy-stm32/src/dcmi.rs | |||
| @@ -379,7 +379,23 @@ where | |||
| 379 | 379 | ||
| 380 | /// This method starts the capture and finishes when both the dma transfer and DCMI finish the frame transfer. | 380 | /// This method starts the capture and finishes when both the dma transfer and DCMI finish the frame transfer. |
| 381 | /// The implication is that the input buffer size must be exactly the size of the captured frame. | 381 | /// The implication is that the input buffer size must be exactly the size of the captured frame. |
| 382 | /// | ||
| 383 | /// Note that when `buffer.len() > 0xffff` the capture future requires some real-time guarantees to be upheld | ||
| 384 | /// (must be polled fast enough so the buffers get switched before data is overwritten). | ||
| 385 | /// It is therefore recommended that it is run on higher priority executor. | ||
| 382 | pub async fn capture(&mut self, buffer: &mut [u32]) -> Result<(), Error> { | 386 | pub async fn capture(&mut self, buffer: &mut [u32]) -> Result<(), Error> { |
| 387 | if buffer.len() <= 0xffff { | ||
| 388 | return self.capture_small(buffer).await; | ||
| 389 | } else { | ||
| 390 | #[cfg(feature = "unsafe-double-buffered-dma")] | ||
| 391 | return self.capture_giant(buffer).await; | ||
| 392 | |||
| 393 | #[cfg(not(feature = "unsafe-double-buffered-dma"))] | ||
| 394 | panic!("For DCMI transfers with length > 0xffff, the `unsafe-double-buffered-dma` must be enabled."); | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | async fn capture_small(&mut self, buffer: &mut [u32]) -> Result<(), Error> { | ||
| 383 | let channel = &mut self.dma; | 399 | let channel = &mut self.dma; |
| 384 | let request = channel.request(); | 400 | let request = channel.request(); |
| 385 | 401 | ||
| @@ -428,6 +444,122 @@ where | |||
| 428 | 444 | ||
| 429 | result | 445 | result |
| 430 | } | 446 | } |
| 447 | |||
| 448 | #[cfg(feature = "unsafe-double-buffered-dma")] | ||
| 449 | async fn capture_giant(&mut self, buffer: &mut [u32]) -> Result<(), Error> { | ||
| 450 | use crate::dma::TransferOptions; | ||
| 451 | |||
| 452 | let data_len = buffer.len(); | ||
| 453 | let chunk_estimate = data_len / 0xffff; | ||
| 454 | |||
| 455 | let mut chunks = chunk_estimate + 1; | ||
| 456 | while data_len % chunks != 0 { | ||
| 457 | chunks += 1; | ||
| 458 | } | ||
| 459 | |||
| 460 | let chunk_size = data_len / chunks; | ||
| 461 | |||
| 462 | let mut remaining_chunks = chunks - 2; | ||
| 463 | |||
| 464 | let mut m0ar = buffer.as_mut_ptr(); | ||
| 465 | let mut m1ar = unsafe { buffer.as_mut_ptr().add(chunk_size) }; | ||
| 466 | |||
| 467 | let channel = &mut self.dma; | ||
| 468 | let request = channel.request(); | ||
| 469 | |||
| 470 | let r = self.inner.regs(); | ||
| 471 | let src = r.dr().ptr() as *mut u32; | ||
| 472 | |||
| 473 | unsafe { | ||
| 474 | channel.start_double_buffered_read( | ||
| 475 | request, | ||
| 476 | src, | ||
| 477 | m0ar, | ||
| 478 | m1ar, | ||
| 479 | chunk_size, | ||
| 480 | TransferOptions::default(), | ||
| 481 | ); | ||
| 482 | } | ||
| 483 | |||
| 484 | let mut last_chunk_set_for_transfer = false; | ||
| 485 | let mut buffer0_last_accessible = false; | ||
| 486 | let dma_result = poll_fn(|cx| { | ||
| 487 | channel.set_waker(cx.waker()); | ||
| 488 | |||
| 489 | let buffer0_currently_accessible = unsafe { channel.is_buffer0_accessible() }; | ||
| 490 | |||
| 491 | // check if the accessible buffer changed since last poll | ||
| 492 | if buffer0_last_accessible == buffer0_currently_accessible { | ||
| 493 | return Poll::Pending; | ||
| 494 | } | ||
| 495 | buffer0_last_accessible = !buffer0_last_accessible; | ||
| 496 | |||
| 497 | if remaining_chunks != 0 { | ||
| 498 | if remaining_chunks % 2 == 0 && buffer0_currently_accessible { | ||
| 499 | m0ar = unsafe { m0ar.add(2 * chunk_size) }; | ||
| 500 | unsafe { channel.set_buffer0(m0ar) } | ||
| 501 | remaining_chunks -= 1; | ||
| 502 | } else if !buffer0_currently_accessible { | ||
| 503 | m1ar = unsafe { m1ar.add(2 * chunk_size) }; | ||
| 504 | unsafe { channel.set_buffer1(m1ar) }; | ||
| 505 | remaining_chunks -= 1; | ||
| 506 | } | ||
| 507 | } else { | ||
| 508 | if buffer0_currently_accessible { | ||
| 509 | unsafe { channel.set_buffer0(buffer.as_mut_ptr()) } | ||
| 510 | } else { | ||
| 511 | unsafe { channel.set_buffer1(buffer.as_mut_ptr()) } | ||
| 512 | } | ||
| 513 | if last_chunk_set_for_transfer { | ||
| 514 | channel.request_stop(); | ||
| 515 | return Poll::Ready(()); | ||
| 516 | } | ||
| 517 | last_chunk_set_for_transfer = true; | ||
| 518 | } | ||
| 519 | Poll::Pending | ||
| 520 | }); | ||
| 521 | |||
| 522 | Self::clear_interrupt_flags(); | ||
| 523 | Self::enable_irqs(); | ||
| 524 | |||
| 525 | let result = poll_fn(|cx| { | ||
| 526 | STATE.waker.register(cx.waker()); | ||
| 527 | |||
| 528 | let ris = unsafe { crate::pac::DCMI.ris().read() }; | ||
| 529 | if ris.err_ris() { | ||
| 530 | unsafe { | ||
| 531 | crate::pac::DCMI.icr().write(|r| { | ||
| 532 | r.set_err_isc(true); | ||
| 533 | }) | ||
| 534 | }; | ||
| 535 | Poll::Ready(Err(Error::PeripheralError)) | ||
| 536 | } else if ris.ovr_ris() { | ||
| 537 | unsafe { | ||
| 538 | crate::pac::DCMI.icr().write(|r| { | ||
| 539 | r.set_ovr_isc(true); | ||
| 540 | }) | ||
| 541 | }; | ||
| 542 | Poll::Ready(Err(Error::Overrun)) | ||
| 543 | } else if ris.frame_ris() { | ||
| 544 | unsafe { | ||
| 545 | crate::pac::DCMI.icr().write(|r| { | ||
| 546 | r.set_frame_isc(true); | ||
| 547 | }) | ||
| 548 | }; | ||
| 549 | Poll::Ready(Ok(())) | ||
| 550 | } else { | ||
| 551 | Poll::Pending | ||
| 552 | } | ||
| 553 | }); | ||
| 554 | |||
| 555 | unsafe { Self::toggle(true) }; | ||
| 556 | |||
| 557 | let (_, result) = futures::future::join(dma_result, result).await; | ||
| 558 | |||
| 559 | unsafe { Self::toggle(false) }; | ||
| 560 | |||
| 561 | result | ||
| 562 | } | ||
| 431 | } | 563 | } |
| 432 | 564 | ||
| 433 | mod sealed { | 565 | mod sealed { |
diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs index 30d2a0b97..413285320 100644 --- a/embassy-stm32/src/dma/bdma.rs +++ b/embassy-stm32/src/dma/bdma.rs | |||
| @@ -76,7 +76,6 @@ foreach_dma_channel! { | |||
| 76 | ); | 76 | ); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | |||
| 80 | unsafe fn start_write_repeated<W: Word>(&mut self, _request: Request, repeated: W, count: usize, reg_addr: *mut W, options: TransferOptions) { | 79 | unsafe fn start_write_repeated<W: Word>(&mut self, _request: Request, repeated: W, count: usize, reg_addr: *mut W, options: TransferOptions) { |
| 81 | let buf = [repeated]; | 80 | let buf = [repeated]; |
| 82 | low_level_api::start_transfer( | 81 | low_level_api::start_transfer( |
| @@ -119,6 +118,30 @@ foreach_dma_channel! { | |||
| 119 | ); | 118 | ); |
| 120 | } | 119 | } |
| 121 | 120 | ||
| 121 | unsafe fn start_double_buffered_read<W: super::Word>( | ||
| 122 | &mut self, | ||
| 123 | _request: Request, | ||
| 124 | _reg_addr: *const W, | ||
| 125 | _buffer0: *mut W, | ||
| 126 | _buffer1: *mut W, | ||
| 127 | _buffer_len: usize, | ||
| 128 | _options: TransferOptions, | ||
| 129 | ) { | ||
| 130 | panic!("Unsafe double buffered mode is unavailable on BDMA"); | ||
| 131 | } | ||
| 132 | |||
| 133 | unsafe fn set_buffer0<W: super::Word>(&mut self, _buffer: *mut W) { | ||
| 134 | panic!("Unsafe double buffered mode is unavailable on BDMA"); | ||
| 135 | } | ||
| 136 | |||
| 137 | unsafe fn set_buffer1<W: super::Word>(&mut self, _buffer: *mut W) { | ||
| 138 | panic!("Unsafe double buffered mode is unavailable on BDMA"); | ||
| 139 | } | ||
| 140 | |||
| 141 | unsafe fn is_buffer0_accessible(&mut self) -> bool { | ||
| 142 | panic!("Unsafe double buffered mode is unavailable on BDMA"); | ||
| 143 | } | ||
| 144 | |||
| 122 | fn request_stop(&mut self){ | 145 | fn request_stop(&mut self){ |
| 123 | unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);} | 146 | unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);} |
| 124 | } | 147 | } |
| @@ -232,7 +255,7 @@ mod low_level_api { | |||
| 232 | // get a handle on the channel itself | 255 | // get a handle on the channel itself |
| 233 | let ch = dma.ch(ch as _); | 256 | let ch = dma.ch(ch as _); |
| 234 | // read the remaining transfer count. If this is zero, the transfer completed fully. | 257 | // read the remaining transfer count. If this is zero, the transfer completed fully. |
| 235 | ch.ndtr().read().ndt() | 258 | ch.ndtr().read().ndt() as u16 |
| 236 | } | 259 | } |
| 237 | 260 | ||
| 238 | /// Sets the waker for the specified DMA channel | 261 | /// Sets the waker for the specified DMA channel |
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs index 0bce37e48..bc796c541 100644 --- a/embassy-stm32/src/dma/dma.rs +++ b/embassy-stm32/src/dma/dma.rs | |||
| @@ -41,15 +41,27 @@ impl From<FlowControl> for vals::Pfctrl { | |||
| 41 | } | 41 | } |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | struct ChannelState { | ||
| 45 | waker: AtomicWaker, | ||
| 46 | } | ||
| 47 | |||
| 48 | impl ChannelState { | ||
| 49 | const fn new() -> Self { | ||
| 50 | Self { | ||
| 51 | waker: AtomicWaker::new(), | ||
| 52 | } | ||
| 53 | } | ||
| 54 | } | ||
| 55 | |||
| 44 | struct State { | 56 | struct State { |
| 45 | ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT], | 57 | channels: [ChannelState; DMA_CHANNEL_COUNT], |
| 46 | } | 58 | } |
| 47 | 59 | ||
| 48 | impl State { | 60 | impl State { |
| 49 | const fn new() -> Self { | 61 | const fn new() -> Self { |
| 50 | const AW: AtomicWaker = AtomicWaker::new(); | 62 | const CH: ChannelState = ChannelState::new(); |
| 51 | Self { | 63 | Self { |
| 52 | ch_wakers: [AW; DMA_CHANNEL_COUNT], | 64 | channels: [CH; DMA_CHANNEL_COUNT], |
| 53 | } | 65 | } |
| 54 | } | 66 | } |
| 55 | } | 67 | } |
| @@ -129,6 +141,46 @@ foreach_dma_channel! { | |||
| 129 | ); | 141 | ); |
| 130 | } | 142 | } |
| 131 | 143 | ||
| 144 | unsafe fn start_double_buffered_read<W: Word>( | ||
| 145 | &mut self, | ||
| 146 | request: Request, | ||
| 147 | reg_addr: *const W, | ||
| 148 | buffer0: *mut W, | ||
| 149 | buffer1: *mut W, | ||
| 150 | buffer_len: usize, | ||
| 151 | options: TransferOptions, | ||
| 152 | ) { | ||
| 153 | low_level_api::start_dbm_transfer( | ||
| 154 | pac::$dma_peri, | ||
| 155 | $channel_num, | ||
| 156 | request, | ||
| 157 | vals::Dir::PERIPHERALTOMEMORY, | ||
| 158 | reg_addr as *const u32, | ||
| 159 | buffer0 as *mut u32, | ||
| 160 | buffer1 as *mut u32, | ||
| 161 | buffer_len, | ||
| 162 | true, | ||
| 163 | vals::Size::from(W::bits()), | ||
| 164 | options, | ||
| 165 | #[cfg(dmamux)] | ||
| 166 | <Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS, | ||
| 167 | #[cfg(dmamux)] | ||
| 168 | <Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM, | ||
| 169 | ); | ||
| 170 | } | ||
| 171 | |||
| 172 | unsafe fn set_buffer0<W: Word>(&mut self, buffer: *mut W) { | ||
| 173 | low_level_api::set_dbm_buffer0(pac::$dma_peri, $channel_num, buffer as *mut u32); | ||
| 174 | } | ||
| 175 | |||
| 176 | unsafe fn set_buffer1<W: Word>(&mut self, buffer: *mut W) { | ||
| 177 | low_level_api::set_dbm_buffer1(pac::$dma_peri, $channel_num, buffer as *mut u32); | ||
| 178 | } | ||
| 179 | |||
| 180 | unsafe fn is_buffer0_accessible(&mut self) -> bool { | ||
| 181 | low_level_api::is_buffer0_accessible(pac::$dma_peri, $channel_num) | ||
| 182 | } | ||
| 183 | |||
| 132 | fn request_stop(&mut self) { | 184 | fn request_stop(&mut self) { |
| 133 | unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);} | 185 | unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);} |
| 134 | } | 186 | } |
| @@ -151,7 +203,6 @@ foreach_dma_channel! { | |||
| 151 | } | 203 | } |
| 152 | } | 204 | } |
| 153 | } | 205 | } |
| 154 | |||
| 155 | impl crate::dma::Channel for crate::peripherals::$channel_peri { } | 206 | impl crate::dma::Channel for crate::peripherals::$channel_peri { } |
| 156 | }; | 207 | }; |
| 157 | } | 208 | } |
| @@ -212,6 +263,94 @@ mod low_level_api { | |||
| 212 | }); | 263 | }); |
| 213 | } | 264 | } |
| 214 | 265 | ||
| 266 | pub unsafe fn start_dbm_transfer( | ||
| 267 | dma: pac::dma::Dma, | ||
| 268 | channel_number: u8, | ||
| 269 | request: Request, | ||
| 270 | dir: vals::Dir, | ||
| 271 | peri_addr: *const u32, | ||
| 272 | mem0_addr: *mut u32, | ||
| 273 | mem1_addr: *mut u32, | ||
| 274 | mem_len: usize, | ||
| 275 | incr_mem: bool, | ||
| 276 | data_size: vals::Size, | ||
| 277 | options: TransferOptions, | ||
| 278 | #[cfg(dmamux)] dmamux_regs: pac::dmamux::Dmamux, | ||
| 279 | #[cfg(dmamux)] dmamux_ch_num: u8, | ||
| 280 | ) { | ||
| 281 | #[cfg(dmamux)] | ||
| 282 | super::super::dmamux::configure_dmamux(dmamux_regs, dmamux_ch_num, request); | ||
| 283 | |||
| 284 | trace!( | ||
| 285 | "Starting DBM transfer with 0: 0x{:x}, 1: 0x{:x}, len: 0x{:x}", | ||
| 286 | mem0_addr as u32, | ||
| 287 | mem1_addr as u32, | ||
| 288 | mem_len | ||
| 289 | ); | ||
| 290 | |||
| 291 | // "Preceding reads and writes cannot be moved past subsequent writes." | ||
| 292 | fence(Ordering::SeqCst); | ||
| 293 | |||
| 294 | reset_status(dma, channel_number); | ||
| 295 | |||
| 296 | let ch = dma.st(channel_number as _); | ||
| 297 | ch.par().write_value(peri_addr as u32); | ||
| 298 | ch.m0ar().write_value(mem0_addr as u32); | ||
| 299 | // configures the second buffer for DBM | ||
| 300 | ch.m1ar().write_value(mem1_addr as u32); | ||
| 301 | ch.ndtr().write_value(regs::Ndtr(mem_len as _)); | ||
| 302 | ch.cr().write(|w| { | ||
| 303 | w.set_dir(dir); | ||
| 304 | w.set_msize(data_size); | ||
| 305 | w.set_psize(data_size); | ||
| 306 | w.set_pl(vals::Pl::VERYHIGH); | ||
| 307 | if incr_mem { | ||
| 308 | w.set_minc(vals::Inc::INCREMENTED); | ||
| 309 | } else { | ||
| 310 | w.set_minc(vals::Inc::FIXED); | ||
| 311 | } | ||
| 312 | w.set_pinc(vals::Inc::FIXED); | ||
| 313 | w.set_teie(true); | ||
| 314 | w.set_tcie(true); | ||
| 315 | |||
| 316 | #[cfg(dma_v1)] | ||
| 317 | w.set_trbuff(true); | ||
| 318 | |||
| 319 | #[cfg(dma_v2)] | ||
| 320 | w.set_chsel(request); | ||
| 321 | |||
| 322 | // enable double buffered mode | ||
| 323 | w.set_dbm(vals::Dbm::ENABLED); | ||
| 324 | |||
| 325 | w.set_pburst(options.pburst.into()); | ||
| 326 | w.set_mburst(options.mburst.into()); | ||
| 327 | w.set_pfctrl(options.flow_ctrl.into()); | ||
| 328 | |||
| 329 | w.set_en(true); | ||
| 330 | }); | ||
| 331 | } | ||
| 332 | |||
| 333 | pub unsafe fn set_dbm_buffer0(dma: pac::dma::Dma, channel_number: u8, mem_addr: *mut u32) { | ||
| 334 | // get a handle on the channel itself | ||
| 335 | let ch = dma.st(channel_number as _); | ||
| 336 | // change M0AR to the new address | ||
| 337 | ch.m0ar().write_value(mem_addr as _); | ||
| 338 | } | ||
| 339 | |||
| 340 | pub unsafe fn set_dbm_buffer1(dma: pac::dma::Dma, channel_number: u8, mem_addr: *mut u32) { | ||
| 341 | // get a handle on the channel itself | ||
| 342 | let ch = dma.st(channel_number as _); | ||
| 343 | // change M1AR to the new address | ||
| 344 | ch.m1ar().write_value(mem_addr as _); | ||
| 345 | } | ||
| 346 | |||
| 347 | pub unsafe fn is_buffer0_accessible(dma: pac::dma::Dma, channel_number: u8) -> bool { | ||
| 348 | // get a handle on the channel itself | ||
| 349 | let ch = dma.st(channel_number as _); | ||
| 350 | // check the current target register value | ||
| 351 | ch.cr().read().ct() == vals::Ct::MEMORY1 | ||
| 352 | } | ||
| 353 | |||
| 215 | /// Stops the DMA channel. | 354 | /// Stops the DMA channel. |
| 216 | pub unsafe fn request_stop(dma: pac::dma::Dma, channel_number: u8) { | 355 | pub unsafe fn request_stop(dma: pac::dma::Dma, channel_number: u8) { |
| 217 | // get a handle on the channel itself | 356 | // get a handle on the channel itself |
| @@ -246,7 +385,7 @@ mod low_level_api { | |||
| 246 | 385 | ||
| 247 | /// Sets the waker for the specified DMA channel | 386 | /// Sets the waker for the specified DMA channel |
| 248 | pub unsafe fn set_waker(state_number: usize, waker: &Waker) { | 387 | pub unsafe fn set_waker(state_number: usize, waker: &Waker) { |
| 249 | STATE.ch_wakers[state_number].register(waker); | 388 | STATE.channels[state_number].waker.register(waker); |
| 250 | } | 389 | } |
| 251 | 390 | ||
| 252 | pub unsafe fn reset_status(dma: pac::dma::Dma, channel_number: u8) { | 391 | pub unsafe fn reset_status(dma: pac::dma::Dma, channel_number: u8) { |
| @@ -260,9 +399,9 @@ mod low_level_api { | |||
| 260 | } | 399 | } |
| 261 | 400 | ||
| 262 | /// Safety: Must be called with a matching set of parameters for a valid dma channel | 401 | /// Safety: Must be called with a matching set of parameters for a valid dma channel |
| 263 | pub unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: u8, index: u8) { | 402 | pub unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: u8, state_index: u8) { |
| 264 | let channel_num = channel_num as usize; | 403 | let channel_num = channel_num as usize; |
| 265 | let index = index as usize; | 404 | let state_index = state_index as usize; |
| 266 | 405 | ||
| 267 | let cr = dma.st(channel_num).cr(); | 406 | let cr = dma.st(channel_num).cr(); |
| 268 | let isr = dma.isr(channel_num / 4).read(); | 407 | let isr = dma.isr(channel_num / 4).read(); |
| @@ -273,9 +412,16 @@ mod low_level_api { | |||
| 273 | dma.0 as u32, channel_num | 412 | dma.0 as u32, channel_num |
| 274 | ); | 413 | ); |
| 275 | } | 414 | } |
| 415 | |||
| 276 | if isr.tcif(channel_num % 4) && cr.read().tcie() { | 416 | if isr.tcif(channel_num % 4) && cr.read().tcie() { |
| 277 | cr.write(|_| ()); // Disable channel interrupts with the default value. | 417 | if cr.read().dbm() == vals::Dbm::DISABLED { |
| 278 | STATE.ch_wakers[index].wake(); | 418 | cr.write(|_| ()); // Disable channel with the default value. |
| 419 | } else { | ||
| 420 | // for double buffered mode, clear TCIF flag but do not stop the transfer | ||
| 421 | dma.ifcr(channel_num / 4) | ||
| 422 | .write(|w| w.set_tcif(channel_num % 4, true)); | ||
| 423 | } | ||
| 424 | STATE.channels[state_index].waker.wake(); | ||
| 279 | } | 425 | } |
| 280 | } | 426 | } |
| 281 | } | 427 | } |
diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs index 8e9823772..f96ccbf6e 100644 --- a/embassy-stm32/src/dma/mod.rs +++ b/embassy-stm32/src/dma/mod.rs | |||
| @@ -76,6 +76,25 @@ pub(crate) mod sealed { | |||
| 76 | options: TransferOptions, | 76 | options: TransferOptions, |
| 77 | ); | 77 | ); |
| 78 | 78 | ||
| 79 | /// DMA double-buffered mode is unsafe as UB can happen when the hardware writes to a buffer currently owned by the software | ||
| 80 | /// more information can be found here: https://github.com/embassy-rs/embassy/issues/702 | ||
| 81 | /// This feature is now used solely for the purposes of implementing giant DMA transfers required for DCMI | ||
| 82 | unsafe fn start_double_buffered_read<W: super::Word>( | ||
| 83 | &mut self, | ||
| 84 | request: Request, | ||
| 85 | reg_addr: *const W, | ||
| 86 | buffer0: *mut W, | ||
| 87 | buffer1: *mut W, | ||
| 88 | buffer_len: usize, | ||
| 89 | options: TransferOptions, | ||
| 90 | ); | ||
| 91 | |||
| 92 | unsafe fn set_buffer0<W: super::Word>(&mut self, buffer: *mut W); | ||
| 93 | |||
| 94 | unsafe fn set_buffer1<W: super::Word>(&mut self, buffer: *mut W); | ||
| 95 | |||
| 96 | unsafe fn is_buffer0_accessible(&mut self) -> bool; | ||
| 97 | |||
| 79 | /// Requests the channel to stop. | 98 | /// Requests the channel to stop. |
| 80 | /// NOTE: The channel does not immediately stop, you have to wait | 99 | /// NOTE: The channel does not immediately stop, you have to wait |
| 81 | /// for `is_running() = false`. | 100 | /// for `is_running() = false`. |
diff --git a/examples/stm32h7/memory.x b/examples/stm32h7/memory.x index c23c397a4..026b14b9b 100644 --- a/examples/stm32h7/memory.x +++ b/examples/stm32h7/memory.x | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | MEMORY | 1 | MEMORY |
| 2 | { | 2 | { |
| 3 | FLASH : ORIGIN = 0x8000000, LENGTH = 1024K | 3 | FLASH : ORIGIN = 0x8000000, LENGTH = 1024K |
| 4 | RAM : ORIGIN = 0x24000000, LENGTH = 128K | 4 | RAM : ORIGIN = 0x24000000, LENGTH = 384K |
| 5 | } \ No newline at end of file | 5 | } |
