aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--embassy-stm32/src/ucpd.rs99
1 files changed, 97 insertions, 2 deletions
diff --git a/embassy-stm32/src/ucpd.rs b/embassy-stm32/src/ucpd.rs
index 96cd92764..f3f225d0c 100644
--- a/embassy-stm32/src/ucpd.rs
+++ b/embassy-stm32/src/ucpd.rs
@@ -22,7 +22,7 @@ use embassy_hal_internal::drop::OnDrop;
22use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; 22use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
23use embassy_sync::waitqueue::AtomicWaker; 23use embassy_sync::waitqueue::AtomicWaker;
24 24
25use crate::dma::AnyChannel; 25use crate::dma::{AnyChannel, Request, Transfer, TransferOptions};
26use crate::interrupt; 26use crate::interrupt;
27use crate::pac::ucpd::vals::{Anamode, Ccenable, PscUsbpdclk}; 27use crate::pac::ucpd::vals::{Anamode, Ccenable, PscUsbpdclk};
28pub use crate::pac::ucpd::vals::{Phyccsel as CcSel, TypecVstateCc as CcVState}; 28pub use crate::pac::ucpd::vals::{Phyccsel as CcSel, TypecVstateCc as CcVState};
@@ -99,7 +99,7 @@ impl<'d, T: Instance> Ucpd<'d, T> {
99 // 1.75us * 17 = ~30us 99 // 1.75us * 17 = ~30us
100 w.set_ifrgap(17 - 1); 100 w.set_ifrgap(17 - 1);
101 101
102 // TODO: Only receive SOP messages 102 // TODO: Currently only SOP messages are supported.
103 w.set_rxordseten(0x1); 103 w.set_rxordseten(0x1);
104 104
105 // Enable DMA and the peripheral 105 // Enable DMA and the peripheral
@@ -186,6 +186,14 @@ impl<'d, T: Instance> Ucpd<'d, T> {
186 tx_dma: impl Peripheral<P = impl TxDma<T>> + 'd, 186 tx_dma: impl Peripheral<P = impl TxDma<T>> + 'd,
187 cc_sel: CcSel, 187 cc_sel: CcSel,
188 ) -> (PdRx<'_, T>, PdTx<'_, T>) { 188 ) -> (PdRx<'_, T>, PdTx<'_, T>) {
189 let r = T::REGS;
190
191 // Enable the receiver on one of the two CC lines.
192 r.cr().modify(|w| {
193 w.set_phyccsel(cc_sel);
194 w.set_phyrxen(true);
195 });
196
189 into_ref!(rx_dma, tx_dma); 197 into_ref!(rx_dma, tx_dma);
190 let rx_dma_req = rx_dma.request(); 198 let rx_dma_req = rx_dma.request();
191 let tx_dma_req = tx_dma.request(); 199 let tx_dma_req = tx_dma.request();
@@ -204,6 +212,16 @@ impl<'d, T: Instance> Ucpd<'d, T> {
204 } 212 }
205} 213}
206 214
215/// Receive Error.
216#[derive(Debug, Clone, Copy)]
217pub enum RxError {
218 /// Incorrect CRC or truncated message (a line becoming static before EOP is met).
219 Crc,
220
221 /// Provided buffer was too small for the received message.
222 Overrun,
223}
224
207/// Power Delivery (PD) Receiver. 225/// Power Delivery (PD) Receiver.
208pub struct PdRx<'d, T: Instance> { 226pub struct PdRx<'d, T: Instance> {
209 _ucpd: &'d Ucpd<'d, T>, 227 _ucpd: &'d Ucpd<'d, T>,
@@ -217,6 +235,79 @@ impl<'d, T: Instance> Drop for PdRx<'d, T> {
217 } 235 }
218} 236}
219 237
238impl<'d, T: Instance> PdRx<'d, T> {
239 /// Receives a PD message into the provided buffer.
240 ///
241 /// Returns the number of received bytes or an error.
242 pub async fn receive(&mut self, buf: &mut [u8]) -> Result<usize, RxError> {
243 let r = T::REGS;
244
245 // Check if a message is already being received. If yes, wait until its
246 // done, ignore errors and try to receive the next message.
247 if r.sr().read().rxorddet() {
248 let _ = self.wait_rx_done().await;
249 r.rxdr().read(); // Clear the RX buffer.
250 }
251
252 // Keep the DMA transfer alive so its drop code does not stop it right away.
253 let dma = unsafe {
254 // Disable the DMA complete interrupt because the end of packet is
255 // signaled by the UCPD receiver. When the DMA buffer is too short
256 // DMA stops by itself and the overrun RXOVR flag of UCPD is set.
257 let mut transfer_options = TransferOptions::default();
258 transfer_options.complete_transfer_ir = false;
259
260 Transfer::new_read(
261 &self.dma_ch,
262 self.dma_req,
263 r.rxdr().as_ptr() as *mut u8,
264 buf,
265 transfer_options,
266 )
267 };
268
269 self.wait_rx_done().await?;
270
271 // Make sure the the last byte to byte was fetched by DMA.
272 while r.sr().read().rxne() {
273 if dma.get_remaining_transfers() == 0 {
274 return Err(RxError::Overrun);
275 }
276 }
277
278 Ok(r.rx_payszr().read().rxpaysz().into())
279 }
280
281 async fn wait_rx_done(&self) -> Result<(), RxError> {
282 poll_fn(|cx| {
283 let r = T::REGS;
284 let sr = r.sr().read();
285 if sr.rxmsgend() {
286 let ret = if sr.rxovr() {
287 Err(RxError::Overrun)
288 } else if sr.rxerr() {
289 Err(RxError::Crc)
290 } else {
291 Ok(())
292 };
293 // Message received, clear interrupt flags.
294 r.icr().write(|w| {
295 w.set_rxorddetcf(true);
296 w.set_rxovrcf(true);
297 w.set_rxmsgendcf(true);
298 });
299 Poll::Ready(ret)
300 } else {
301 // Enable receiver interrupt.
302 T::waker().register(cx.waker());
303 r.imr().modify(|w| w.set_rxmsgendie(true));
304 Poll::Pending
305 }
306 })
307 .await
308 }
309}
310
220/// Power Delivery (PD) Transmitter. 311/// Power Delivery (PD) Transmitter.
221pub struct PdTx<'d, T: Instance> { 312pub struct PdTx<'d, T: Instance> {
222 _ucpd: &'d Ucpd<'d, T>, 313 _ucpd: &'d Ucpd<'d, T>,
@@ -241,6 +332,10 @@ impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandl
241 }); 332 });
242 } 333 }
243 334
335 if sr.rxmsgend() {
336 r.imr().modify(|w| w.set_rxmsgendie(false));
337 }
338
244 // Wake the task to clear and re-enabled interrupts. 339 // Wake the task to clear and re-enabled interrupts.
245 T::waker().wake(); 340 T::waker().wake();
246 } 341 }