diff options
| author | Timo Kröger <[email protected]> | 2024-03-14 22:05:22 +0100 |
|---|---|---|
| committer | Timo Kröger <[email protected]> | 2024-03-14 22:05:22 +0100 |
| commit | 57ca072dc3807a868415d91b39f814c30c2e844b (patch) | |
| tree | 54ef6f16b488cbd6c1b3ea9ed48d1892ab4c59f6 | |
| parent | 62b0410e865044080781765d9ccee06202a82dff (diff) | |
[UCPD] Enable RX PHY only when receiving
| -rw-r--r-- | embassy-stm32/src/ucpd.rs | 61 |
1 files changed, 23 insertions, 38 deletions
diff --git a/embassy-stm32/src/ucpd.rs b/embassy-stm32/src/ucpd.rs index dcc4454d3..2d119c973 100644 --- a/embassy-stm32/src/ucpd.rs +++ b/embassy-stm32/src/ucpd.rs | |||
| @@ -163,10 +163,7 @@ impl<'d, T: Instance> Ucpd<'d, T> { | |||
| 163 | r.tx_ordsetr().write(|w| w.set_txordset(0b10001_11000_11000_11000)); | 163 | r.tx_ordsetr().write(|w| w.set_txordset(0b10001_11000_11000_11000)); |
| 164 | 164 | ||
| 165 | // Enable the receiver on one of the two CC lines. | 165 | // Enable the receiver on one of the two CC lines. |
| 166 | r.cr().modify(|w| { | 166 | r.cr().modify(|w| w.set_phyccsel(cc_sel)); |
| 167 | w.set_phyccsel(cc_sel); | ||
| 168 | w.set_phyrxen(true); | ||
| 169 | }); | ||
| 170 | 167 | ||
| 171 | // Enable hard reset receive interrupt. | 168 | // Enable hard reset receive interrupt. |
| 172 | r.imr().modify(|w| w.set_rxhrstdetie(true)); | 169 | r.imr().modify(|w| w.set_rxhrstdetie(true)); |
| @@ -319,15 +316,12 @@ pub struct PdPhy<'d, T: Instance> { | |||
| 319 | 316 | ||
| 320 | impl<'d, T: Instance> Drop for PdPhy<'d, T> { | 317 | impl<'d, T: Instance> Drop for PdPhy<'d, T> { |
| 321 | fn drop(&mut self) { | 318 | fn drop(&mut self) { |
| 322 | let r = T::REGS; | ||
| 323 | r.cr().modify(|w| w.set_phyrxen(false)); | ||
| 324 | |||
| 325 | // Check if the Type-C part was dropped already. | 319 | // Check if the Type-C part was dropped already. |
| 326 | let drop_not_ready = &T::state().drop_not_ready; | 320 | let drop_not_ready = &T::state().drop_not_ready; |
| 327 | if drop_not_ready.load(Ordering::Relaxed) { | 321 | if drop_not_ready.load(Ordering::Relaxed) { |
| 328 | drop_not_ready.store(true, Ordering::Relaxed); | 322 | drop_not_ready.store(true, Ordering::Relaxed); |
| 329 | } else { | 323 | } else { |
| 330 | r.cfgr1().write(|w| w.set_ucpden(false)); | 324 | T::REGS.cfgr1().write(|w| w.set_ucpden(false)); |
| 331 | T::disable(); | 325 | T::disable(); |
| 332 | T::Interrupt::disable(); | 326 | T::Interrupt::disable(); |
| 333 | } | 327 | } |
| @@ -341,16 +335,6 @@ impl<'d, T: Instance> PdPhy<'d, T> { | |||
| 341 | pub async fn receive(&mut self, buf: &mut [u8]) -> Result<usize, RxError> { | 335 | pub async fn receive(&mut self, buf: &mut [u8]) -> Result<usize, RxError> { |
| 342 | let r = T::REGS; | 336 | let r = T::REGS; |
| 343 | 337 | ||
| 344 | // Check if a message is already being received. If yes, wait until its | ||
| 345 | // done, ignore errors and try to receive the next message. | ||
| 346 | if r.sr().read().rxorddet() { | ||
| 347 | if let Err(RxError::HardReset) = self.wait_rx_done().await { | ||
| 348 | return Err(RxError::HardReset); | ||
| 349 | } | ||
| 350 | r.rxdr().read(); // Clear the RX buffer. | ||
| 351 | } | ||
| 352 | |||
| 353 | // Keep the DMA transfer alive so its drop code does not stop it right away. | ||
| 354 | let dma = unsafe { | 338 | let dma = unsafe { |
| 355 | Transfer::new_read( | 339 | Transfer::new_read( |
| 356 | &self.rx_dma_ch, | 340 | &self.rx_dma_ch, |
| @@ -361,22 +345,20 @@ impl<'d, T: Instance> PdPhy<'d, T> { | |||
| 361 | ) | 345 | ) |
| 362 | }; | 346 | }; |
| 363 | 347 | ||
| 364 | self.wait_rx_done().await?; | 348 | // Clear interrupt flags (possibly set from last receive). |
| 365 | 349 | r.icr().write(|w| { | |
| 366 | // Make sure the the last byte to byte was fetched by DMA. | 350 | w.set_rxorddetcf(true); |
| 367 | while r.sr().read().rxne() { | 351 | w.set_rxovrcf(true); |
| 368 | if dma.get_remaining_transfers() == 0 { | 352 | w.set_rxmsgendcf(true); |
| 369 | return Err(RxError::Overrun); | 353 | }); |
| 370 | } | ||
| 371 | } | ||
| 372 | 354 | ||
| 373 | Ok(r.rx_payszr().read().rxpaysz().into()) | 355 | r.cr().modify(|w| w.set_phyrxen(true)); |
| 374 | } | 356 | let _on_drop = OnDrop::new(|| { |
| 357 | r.cr().modify(|w| w.set_phyrxen(false)); | ||
| 358 | self.enable_rx_interrupt(false); | ||
| 359 | }); | ||
| 375 | 360 | ||
| 376 | async fn wait_rx_done(&self) -> Result<(), RxError> { | ||
| 377 | let _on_drop = OnDrop::new(|| self.enable_rx_interrupt(false)); | ||
| 378 | poll_fn(|cx| { | 361 | poll_fn(|cx| { |
| 379 | let r = T::REGS; | ||
| 380 | let sr = r.sr().read(); | 362 | let sr = r.sr().read(); |
| 381 | if sr.rxhrstdet() { | 363 | if sr.rxhrstdet() { |
| 382 | // Clean and re-enable hard reset receive interrupt. | 364 | // Clean and re-enable hard reset receive interrupt. |
| @@ -391,12 +373,6 @@ impl<'d, T: Instance> PdPhy<'d, T> { | |||
| 391 | } else { | 373 | } else { |
| 392 | Ok(()) | 374 | Ok(()) |
| 393 | }; | 375 | }; |
| 394 | // Message received, clear interrupt flags. | ||
| 395 | r.icr().write(|w| { | ||
| 396 | w.set_rxorddetcf(true); | ||
| 397 | w.set_rxovrcf(true); | ||
| 398 | w.set_rxmsgendcf(true); | ||
| 399 | }); | ||
| 400 | Poll::Ready(ret) | 376 | Poll::Ready(ret) |
| 401 | } else { | 377 | } else { |
| 402 | T::state().waker.register(cx.waker()); | 378 | T::state().waker.register(cx.waker()); |
| @@ -404,7 +380,16 @@ impl<'d, T: Instance> PdPhy<'d, T> { | |||
| 404 | Poll::Pending | 380 | Poll::Pending |
| 405 | } | 381 | } |
| 406 | }) | 382 | }) |
| 407 | .await | 383 | .await?; |
| 384 | |||
| 385 | // Make sure that the last byte was fetched by DMA. | ||
| 386 | while r.sr().read().rxne() { | ||
| 387 | if dma.get_remaining_transfers() == 0 { | ||
| 388 | return Err(RxError::Overrun); | ||
| 389 | } | ||
| 390 | } | ||
| 391 | |||
| 392 | Ok(r.rx_payszr().read().rxpaysz().into()) | ||
| 408 | } | 393 | } |
| 409 | 394 | ||
| 410 | fn enable_rx_interrupt(&self, enable: bool) { | 395 | fn enable_rx_interrupt(&self, enable: bool) { |
