aboutsummaryrefslogtreecommitdiff
path: root/embassy-nrf/src/buffered_uarte/v2.rs
diff options
context:
space:
mode:
authorRaul Alimbekov <[email protected]>2025-12-16 09:05:22 +0300
committerGitHub <[email protected]>2025-12-16 09:05:22 +0300
commitc9a04b4b732b7a3b696eb8223664c1a7942b1875 (patch)
tree6dbe5c02e66eed8d8762f13f95afd24f8db2b38c /embassy-nrf/src/buffered_uarte/v2.rs
parentcde24a3ef1117653ba5ed4184102b33f745782fb (diff)
parent5ae6e060ec1c90561719aabdc29d5b6e7b8b0a82 (diff)
Merge branch 'main' into main
Diffstat (limited to 'embassy-nrf/src/buffered_uarte/v2.rs')
-rw-r--r--embassy-nrf/src/buffered_uarte/v2.rs687
1 files changed, 687 insertions, 0 deletions
diff --git a/embassy-nrf/src/buffered_uarte/v2.rs b/embassy-nrf/src/buffered_uarte/v2.rs
new file mode 100644
index 000000000..d0d2d97d1
--- /dev/null
+++ b/embassy-nrf/src/buffered_uarte/v2.rs
@@ -0,0 +1,687 @@
1//! Async buffered UART driver.
2//!
3//! Note that discarding a future from a read or write operation may lead to losing
4//! data. For example, when using `futures_util::future::select` and completion occurs
5//! on the "other" future, you should capture the incomplete future and continue to use
6//! it for the next read or write. This pattern is a consideration for all IO, and not
7//! just serial communications.
8//!
9//! Please also see [crate::uarte] to understand when [BufferedUarte] should be used.
10//!
11//! The code is based on the generic buffered_uarte implementation but uses the nrf54l
12//! frame timeout event to correctly determine the size of transferred data.
13//! Counting of rxrdy events, used in the generic implementation, cannot be applied
14//! to nrf54l chips, as they buffer up to 4 bytes in a single DMA transaction.
15//! The only reliable way to find the number of bytes received is to stop the transfer,
16//! wait for the DMA stopped event, and read the value in the rx.dma.amount register.
17//! This also flushes all in-flight data to RAM.
18
19use core::cmp::min;
20use core::future::{Future, poll_fn};
21use core::marker::PhantomData;
22use core::slice;
23use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering, compiler_fence};
24use core::task::Poll;
25
26use embassy_hal_internal::Peri;
27use embassy_hal_internal::atomic_ring_buffer::RingBuffer;
28use pac::uarte::vals;
29// Re-export SVD variants to allow user to directly set values
30pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity};
31
32use crate::gpio::{AnyPin, Pin as GpioPin};
33use crate::interrupt::typelevel::Interrupt;
34use crate::uarte::{Config, Instance as UarteInstance, configure, configure_rx_pins, configure_tx_pins, drop_tx_rx};
35use crate::{EASY_DMA_SIZE, interrupt, pac};
36
37pub(crate) struct State {
38 tx_buf: RingBuffer,
39 tx_count: AtomicUsize,
40
41 rx_buf: RingBuffer,
42 rx_started: AtomicBool,
43}
44
45/// UART error.
46#[derive(Debug, Clone, Copy, PartialEq, Eq)]
47#[cfg_attr(feature = "defmt", derive(defmt::Format))]
48#[non_exhaustive]
49pub enum Error {
50 // No errors for now
51}
52
53impl State {
54 pub(crate) const fn new() -> Self {
55 Self {
56 tx_buf: RingBuffer::new(),
57 tx_count: AtomicUsize::new(0),
58
59 rx_buf: RingBuffer::new(),
60 rx_started: AtomicBool::new(false),
61 }
62 }
63}
64
65/// Interrupt handler.
66pub struct InterruptHandler<U: UarteInstance> {
67 _phantom: PhantomData<U>,
68}
69
70impl<U: UarteInstance> interrupt::typelevel::Handler<U::Interrupt> for InterruptHandler<U> {
71 unsafe fn on_interrupt() {
72 info!("irq: start");
73 let r = U::regs();
74 let ss = U::state();
75 let s = U::buffered_state();
76
77 if let Some(mut rx) = unsafe { s.rx_buf.try_writer() } {
78 let buf_len = s.rx_buf.len();
79 let half_len = buf_len / 2;
80
81 if r.events_error().read() != 0 {
82 r.events_error().write_value(0);
83 let errs = r.errorsrc().read();
84 r.errorsrc().write_value(errs);
85
86 if errs.overrun() {
87 panic!("BufferedUarte UART overrun");
88 }
89 }
90
91 let first_run = !s.rx_started.swap(true, Ordering::Relaxed);
92 if r.events_dma().rx().end().read() != 0 || first_run {
93 //trace!(" irq_rx: endrx");
94 r.events_dma().rx().end().write_value(0);
95
96 if !first_run {
97 // Received some bytes, wake task.
98 let rxed = r.dma().rx().amount().read().amount() as usize;
99 rx.push_done(rxed);
100 ss.rx_waker.wake();
101 }
102
103 let (ptr, len) = rx.push_buf();
104 if len == 0 {
105 panic!("BufferedUarte buffer overrun");
106 }
107
108 let len = if len > half_len { half_len } else { len };
109
110 // Set up the DMA read
111 r.dma().rx().ptr().write_value(ptr as u32);
112 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
113
114 // manually start
115 r.tasks_dma().rx().start().write_value(1);
116 }
117 }
118
119 // =============================
120
121 if let Some(mut tx) = unsafe { s.tx_buf.try_reader() } {
122 // TX end
123 if r.events_dma().tx().end().read() != 0 {
124 r.events_dma().tx().end().write_value(0);
125
126 let n = s.tx_count.load(Ordering::Relaxed);
127 //trace!(" irq_tx: endtx {:?}", n);
128 tx.pop_done(n);
129 ss.tx_waker.wake();
130 s.tx_count.store(0, Ordering::Relaxed);
131 }
132
133 // If not TXing, start.
134 if s.tx_count.load(Ordering::Relaxed) == 0 {
135 let (ptr, len) = tx.pop_buf();
136 let len = len.min(EASY_DMA_SIZE);
137 if len != 0 {
138 //trace!(" irq_tx: starting {:?}", len);
139 s.tx_count.store(len, Ordering::Relaxed);
140
141 // Set up the DMA write
142 r.dma().tx().ptr().write_value(ptr as u32);
143 r.dma().tx().maxcnt().write(|w| w.set_maxcnt(len as _));
144
145 // Start UARTE Transmit transaction
146 r.tasks_dma().tx().start().write_value(1);
147 }
148 }
149 }
150
151 //trace!("irq: end");
152 }
153}
154
155/// Buffered UARTE driver.
156pub struct BufferedUarte<'d, U: UarteInstance> {
157 tx: BufferedUarteTx<'d, U>,
158 rx: BufferedUarteRx<'d, U>,
159}
160
161impl<'d, U: UarteInstance> Unpin for BufferedUarte<'d, U> {}
162
163impl<'d, U: UarteInstance> BufferedUarte<'d, U> {
164 /// Create a new BufferedUarte without hardware flow control.
165 #[allow(clippy::too_many_arguments)]
166 pub fn new(
167 uarte: Peri<'d, U>,
168 rxd: Peri<'d, impl GpioPin>,
169 txd: Peri<'d, impl GpioPin>,
170 _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
171 config: Config,
172 rx_buffer: &'d mut [u8],
173 tx_buffer: &'d mut [u8],
174 ) -> Self {
175 Self::new_inner(uarte, rxd.into(), txd.into(), None, None, config, rx_buffer, tx_buffer)
176 }
177
178 /// Create a new BufferedUarte with hardware flow control (RTS/CTS)
179 #[allow(clippy::too_many_arguments)]
180 pub fn new_with_rtscts(
181 uarte: Peri<'d, U>,
182 rxd: Peri<'d, impl GpioPin>,
183 txd: Peri<'d, impl GpioPin>,
184 cts: Peri<'d, impl GpioPin>,
185 rts: Peri<'d, impl GpioPin>,
186 _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
187 config: Config,
188 rx_buffer: &'d mut [u8],
189 tx_buffer: &'d mut [u8],
190 ) -> Self {
191 Self::new_inner(
192 uarte,
193 rxd.into(),
194 txd.into(),
195 Some(cts.into()),
196 Some(rts.into()),
197 config,
198 rx_buffer,
199 tx_buffer,
200 )
201 }
202
203 #[allow(clippy::too_many_arguments)]
204 fn new_inner(
205 peri: Peri<'d, U>,
206 rxd: Peri<'d, AnyPin>,
207 txd: Peri<'d, AnyPin>,
208 cts: Option<Peri<'d, AnyPin>>,
209 rts: Option<Peri<'d, AnyPin>>,
210 config: Config,
211 rx_buffer: &'d mut [u8],
212 tx_buffer: &'d mut [u8],
213 ) -> Self {
214 configure(U::regs(), config, cts.is_some());
215
216 let tx = BufferedUarteTx::new_innerer(unsafe { peri.clone_unchecked() }, txd, cts, tx_buffer);
217 let rx = BufferedUarteRx::new_innerer(peri, rxd, rts, rx_buffer);
218
219 U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED));
220 U::Interrupt::pend();
221 unsafe { U::Interrupt::enable() };
222
223 U::state().tx_rx_refcount.store(2, Ordering::Relaxed);
224
225 Self { tx, rx }
226 }
227
228 /// Adjust the baud rate to the provided value.
229 pub fn set_baudrate(&mut self, baudrate: Baudrate) {
230 let r = U::regs();
231 r.baudrate().write(|w| w.set_baudrate(baudrate));
232 }
233
234 /// Split the UART in reader and writer parts.
235 ///
236 /// This allows reading and writing concurrently from independent tasks.
237 pub fn split(self) -> (BufferedUarteRx<'d, U>, BufferedUarteTx<'d, U>) {
238 (self.rx, self.tx)
239 }
240
241 /// Split the UART in reader and writer parts, by reference.
242 ///
243 /// The returned halves borrow from `self`, so you can drop them and go back to using
244 /// the "un-split" `self`. This allows temporarily splitting the UART.
245 pub fn split_by_ref(&mut self) -> (&mut BufferedUarteRx<'d, U>, &mut BufferedUarteTx<'d, U>) {
246 (&mut self.rx, &mut self.tx)
247 }
248
249 /// Pull some bytes from this source into the specified buffer, returning how many bytes were read.
250 pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
251 self.rx.read(buf).await
252 }
253
254 /// Return the contents of the internal buffer, filling it with more data from the inner reader if it is empty.
255 pub async fn fill_buf(&mut self) -> Result<&[u8], Error> {
256 self.rx.fill_buf().await
257 }
258
259 /// Tell this buffer that `amt` bytes have been consumed from the buffer, so they should no longer be returned in calls to `fill_buf`.
260 pub fn consume(&mut self, amt: usize) {
261 self.rx.consume(amt)
262 }
263
264 /// Write a buffer into this writer, returning how many bytes were written.
265 pub async fn write(&mut self, buf: &[u8]) -> Result<usize, Error> {
266 self.tx.write(buf).await
267 }
268
269 /// Try writing a buffer without waiting, returning how many bytes were written.
270 pub fn try_write(&mut self, buf: &[u8]) -> Result<usize, Error> {
271 self.tx.try_write(buf)
272 }
273
274 /// Flush this output stream, ensuring that all intermediately buffered contents reach their destination.
275 pub async fn flush(&mut self) -> Result<(), Error> {
276 self.tx.flush().await
277 }
278}
279
280/// Reader part of the buffered UARTE driver.
281pub struct BufferedUarteTx<'d, U: UarteInstance> {
282 _peri: Peri<'d, U>,
283}
284
285impl<'d, U: UarteInstance> BufferedUarteTx<'d, U> {
286 /// Create a new BufferedUarteTx without hardware flow control.
287 pub fn new(
288 uarte: Peri<'d, U>,
289 txd: Peri<'d, impl GpioPin>,
290 _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
291 config: Config,
292 tx_buffer: &'d mut [u8],
293 ) -> Self {
294 Self::new_inner(uarte, txd.into(), None, config, tx_buffer)
295 }
296
297 /// Create a new BufferedUarte with hardware flow control (RTS/CTS)
298 pub fn new_with_cts(
299 uarte: Peri<'d, U>,
300 txd: Peri<'d, impl GpioPin>,
301 cts: Peri<'d, impl GpioPin>,
302 _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
303 config: Config,
304 tx_buffer: &'d mut [u8],
305 ) -> Self {
306 Self::new_inner(uarte, txd.into(), Some(cts.into()), config, tx_buffer)
307 }
308
309 fn new_inner(
310 peri: Peri<'d, U>,
311 txd: Peri<'d, AnyPin>,
312 cts: Option<Peri<'d, AnyPin>>,
313 config: Config,
314 tx_buffer: &'d mut [u8],
315 ) -> Self {
316 configure(U::regs(), config, cts.is_some());
317
318 let this = Self::new_innerer(peri, txd, cts, tx_buffer);
319
320 U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED));
321 U::Interrupt::pend();
322 unsafe { U::Interrupt::enable() };
323
324 U::state().tx_rx_refcount.store(1, Ordering::Relaxed);
325
326 this
327 }
328
329 fn new_innerer(
330 peri: Peri<'d, U>,
331 txd: Peri<'d, AnyPin>,
332 cts: Option<Peri<'d, AnyPin>>,
333 tx_buffer: &'d mut [u8],
334 ) -> Self {
335 let r = U::regs();
336
337 configure_tx_pins(r, txd, cts);
338
339 // Initialize state
340 let s = U::buffered_state();
341 s.tx_count.store(0, Ordering::Relaxed);
342 let len = tx_buffer.len();
343 unsafe { s.tx_buf.init(tx_buffer.as_mut_ptr(), len) };
344
345 r.events_dma().tx().ready().write_value(0);
346
347 // Enable interrupts
348 r.intenset().write(|w| {
349 w.set_dmatxend(true);
350 });
351
352 Self { _peri: peri }
353 }
354
355 /// Write a buffer into this writer, returning how many bytes were written.
356 pub fn write<'a>(&'a mut self, buf: &'a [u8]) -> impl Future<Output = Result<usize, Error>> + 'a {
357 poll_fn(move |cx| {
358 //trace!("poll_write: {:?}", buf.len());
359 let ss = U::state();
360 let s = U::buffered_state();
361 let mut tx = unsafe { s.tx_buf.writer() };
362
363 let tx_buf = tx.push_slice();
364 if tx_buf.is_empty() {
365 //trace!("poll_write: pending");
366 ss.tx_waker.register(cx.waker());
367 return Poll::Pending;
368 }
369
370 let n = min(tx_buf.len(), buf.len());
371 tx_buf[..n].copy_from_slice(&buf[..n]);
372 tx.push_done(n);
373
374 //trace!("poll_write: queued {:?}", n);
375
376 compiler_fence(Ordering::SeqCst);
377 U::Interrupt::pend();
378
379 Poll::Ready(Ok(n))
380 })
381 }
382
383 /// Try writing a buffer without waiting, returning how many bytes were written.
384 pub fn try_write(&mut self, buf: &[u8]) -> Result<usize, Error> {
385 //trace!("poll_write: {:?}", buf.len());
386 let s = U::buffered_state();
387 let mut tx = unsafe { s.tx_buf.writer() };
388
389 let tx_buf = tx.push_slice();
390 if tx_buf.is_empty() {
391 return Ok(0);
392 }
393
394 let n = min(tx_buf.len(), buf.len());
395 tx_buf[..n].copy_from_slice(&buf[..n]);
396 tx.push_done(n);
397
398 //trace!("poll_write: queued {:?}", n);
399
400 compiler_fence(Ordering::SeqCst);
401 U::Interrupt::pend();
402
403 Ok(n)
404 }
405
406 /// Flush this output stream, ensuring that all intermediately buffered contents reach their destination.
407 pub fn flush(&mut self) -> impl Future<Output = Result<(), Error>> + '_ {
408 poll_fn(move |cx| {
409 //trace!("poll_flush");
410 let ss = U::state();
411 let s = U::buffered_state();
412 if !s.tx_buf.is_empty() {
413 //trace!("poll_flush: pending");
414 ss.tx_waker.register(cx.waker());
415 return Poll::Pending;
416 }
417
418 Poll::Ready(Ok(()))
419 })
420 }
421}
422
423impl<'a, U: UarteInstance> Drop for BufferedUarteTx<'a, U> {
424 fn drop(&mut self) {
425 let r = U::regs();
426
427 r.intenclr().write(|w| {
428 w.set_txdrdy(true);
429 w.set_dmatxready(true);
430 w.set_txstopped(true);
431 });
432 r.events_txstopped().write_value(0);
433 r.tasks_dma().tx().stop().write_value(1);
434 while r.events_txstopped().read() == 0 {}
435
436 let s = U::buffered_state();
437 unsafe { s.tx_buf.deinit() }
438
439 let s = U::state();
440 drop_tx_rx(r, s);
441 }
442}
443
444/// Reader part of the buffered UARTE driver.
445pub struct BufferedUarteRx<'d, U: UarteInstance> {
446 _peri: Peri<'d, U>,
447}
448
449impl<'d, U: UarteInstance> BufferedUarteRx<'d, U> {
450 /// Create a new BufferedUarte without hardware flow control.
451 #[allow(clippy::too_many_arguments)]
452 pub fn new(
453 uarte: Peri<'d, U>,
454 _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
455 rxd: Peri<'d, impl GpioPin>,
456 config: Config,
457 rx_buffer: &'d mut [u8],
458 ) -> Self {
459 Self::new_inner(uarte, rxd.into(), None, config, rx_buffer)
460 }
461
462 /// Create a new BufferedUarte with hardware flow control (RTS/CTS)
463 #[allow(clippy::too_many_arguments)]
464 pub fn new_with_rts(
465 uarte: Peri<'d, U>,
466 rxd: Peri<'d, impl GpioPin>,
467 rts: Peri<'d, impl GpioPin>,
468 _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
469 config: Config,
470 rx_buffer: &'d mut [u8],
471 ) -> Self {
472 Self::new_inner(uarte, rxd.into(), Some(rts.into()), config, rx_buffer)
473 }
474
475 #[allow(clippy::too_many_arguments)]
476 fn new_inner(
477 peri: Peri<'d, U>,
478 rxd: Peri<'d, AnyPin>,
479 rts: Option<Peri<'d, AnyPin>>,
480 config: Config,
481 rx_buffer: &'d mut [u8],
482 ) -> Self {
483 configure(U::regs(), config, rts.is_some());
484
485 let this = Self::new_innerer(peri, rxd, rts, rx_buffer);
486
487 U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED));
488 U::Interrupt::pend();
489 unsafe { U::Interrupt::enable() };
490
491 U::state().tx_rx_refcount.store(1, Ordering::Relaxed);
492
493 this
494 }
495
496 #[allow(clippy::too_many_arguments)]
497 fn new_innerer(
498 peri: Peri<'d, U>,
499 rxd: Peri<'d, AnyPin>,
500 rts: Option<Peri<'d, AnyPin>>,
501 rx_buffer: &'d mut [u8],
502 ) -> Self {
503 let r = U::regs();
504
505 configure_rx_pins(r, rxd, rts);
506
507 // Initialize state
508 let s = U::buffered_state();
509 let rx_len = rx_buffer.len().min(EASY_DMA_SIZE * 2);
510 let rx_ptr = rx_buffer.as_mut_ptr();
511 unsafe { s.rx_buf.init(rx_ptr, rx_len) };
512
513 // clear errors
514 let errors = r.errorsrc().read();
515 r.errorsrc().write_value(errors);
516
517 r.events_error().write_value(0);
518 r.events_dma().rx().end().write_value(0);
519
520 // set timeout-to-stop short
521 r.shorts().write(|w| {
522 w.set_frametimeout_dma_rx_stop(true);
523 });
524
525 // set default timeout
526 r.frametimeout().write_value(pac::uarte::regs::Frametimeout(0x10));
527
528 // Enable interrupts
529 r.intenset().write(|w| {
530 w.set_dmatxend(true);
531 w.set_error(true);
532 w.set_dmarxend(true);
533 });
534
535 Self { _peri: peri }
536 }
537
538 /// Pull some bytes from this source into the specified buffer, returning how many bytes were read.
539 pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
540 let data = self.fill_buf().await?;
541 let n = data.len().min(buf.len());
542 buf[..n].copy_from_slice(&data[..n]);
543 self.consume(n);
544 Ok(n)
545 }
546
547 /// Return the contents of the internal buffer, filling it with more data from the inner reader if it is empty.
548 pub fn fill_buf(&mut self) -> impl Future<Output = Result<&'_ [u8], Error>> {
549 poll_fn(move |cx| {
550 compiler_fence(Ordering::SeqCst);
551 //trace!("poll_read");
552
553 let s = U::buffered_state();
554 let ss = U::state();
555 let mut rx = unsafe { s.rx_buf.reader() };
556
557 let (ptr, n) = rx.pop_buf();
558 if n == 0 {
559 //trace!(" empty");
560 ss.rx_waker.register(cx.waker());
561 Poll::Pending
562 } else {
563 Poll::Ready(Ok(unsafe { slice::from_raw_parts(ptr, n) }))
564 }
565 })
566 }
567
568 /// Tell this buffer that `amt` bytes have been consumed from the buffer, so they should no longer be returned in calls to `fill_buf`.
569 pub fn consume(&mut self, amt: usize) {
570 if amt == 0 {
571 return;
572 }
573
574 let s = U::buffered_state();
575 let mut rx = unsafe { s.rx_buf.reader() };
576 rx.pop_done(amt);
577 }
578
579 /// we are ready to read if there is data in the buffer
580 fn read_ready() -> Result<bool, Error> {
581 let state = U::buffered_state();
582 Ok(!state.rx_buf.is_empty())
583 }
584}
585
586impl<'a, U: UarteInstance> Drop for BufferedUarteRx<'a, U> {
587 fn drop(&mut self) {
588 let r = U::regs();
589
590 r.intenclr().write(|w| {
591 w.set_rxto(true);
592 });
593 r.events_rxto().write_value(0);
594
595 let s = U::buffered_state();
596 unsafe { s.rx_buf.deinit() }
597
598 let s = U::state();
599 drop_tx_rx(r, s);
600 }
601}
602
603mod _embedded_io {
604 use super::*;
605
606 impl embedded_io_async::Error for Error {
607 fn kind(&self) -> embedded_io_async::ErrorKind {
608 match *self {}
609 }
610 }
611
612 impl<'d, U: UarteInstance> embedded_io_async::ErrorType for BufferedUarte<'d, U> {
613 type Error = Error;
614 }
615
616 impl<'d, U: UarteInstance> embedded_io_async::ErrorType for BufferedUarteRx<'d, U> {
617 type Error = Error;
618 }
619
620 impl<'d, U: UarteInstance> embedded_io_async::ErrorType for BufferedUarteTx<'d, U> {
621 type Error = Error;
622 }
623
624 impl<'d, U: UarteInstance> embedded_io_async::Read for BufferedUarte<'d, U> {
625 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
626 self.read(buf).await
627 }
628 }
629
630 impl<'d: 'd, U: UarteInstance> embedded_io_async::Read for BufferedUarteRx<'d, U> {
631 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
632 self.read(buf).await
633 }
634 }
635
636 impl<'d, U: UarteInstance> embedded_io_async::ReadReady for BufferedUarte<'d, U> {
637 fn read_ready(&mut self) -> Result<bool, Self::Error> {
638 BufferedUarteRx::<'d, U>::read_ready()
639 }
640 }
641
642 impl<'d, U: UarteInstance> embedded_io_async::ReadReady for BufferedUarteRx<'d, U> {
643 fn read_ready(&mut self) -> Result<bool, Self::Error> {
644 Self::read_ready()
645 }
646 }
647
648 impl<'d, U: UarteInstance> embedded_io_async::BufRead for BufferedUarte<'d, U> {
649 async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> {
650 self.fill_buf().await
651 }
652
653 fn consume(&mut self, amt: usize) {
654 self.consume(amt)
655 }
656 }
657
658 impl<'d: 'd, U: UarteInstance> embedded_io_async::BufRead for BufferedUarteRx<'d, U> {
659 async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> {
660 self.fill_buf().await
661 }
662
663 fn consume(&mut self, amt: usize) {
664 self.consume(amt)
665 }
666 }
667
668 impl<'d, U: UarteInstance> embedded_io_async::Write for BufferedUarte<'d, U> {
669 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
670 self.write(buf).await
671 }
672
673 async fn flush(&mut self) -> Result<(), Self::Error> {
674 self.flush().await
675 }
676 }
677
678 impl<'d: 'd, U: UarteInstance> embedded_io_async::Write for BufferedUarteTx<'d, U> {
679 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
680 self.write(buf).await
681 }
682
683 async fn flush(&mut self) -> Result<(), Self::Error> {
684 self.flush().await
685 }
686 }
687}