diff options
Diffstat (limited to 'embassy-nrf/src/buffered_uarte')
| -rw-r--r-- | embassy-nrf/src/buffered_uarte/mod.rs | 14 | ||||
| -rw-r--r-- | embassy-nrf/src/buffered_uarte/v1.rs | 951 | ||||
| -rw-r--r-- | embassy-nrf/src/buffered_uarte/v2.rs | 687 |
3 files changed, 1652 insertions, 0 deletions
diff --git a/embassy-nrf/src/buffered_uarte/mod.rs b/embassy-nrf/src/buffered_uarte/mod.rs new file mode 100644 index 000000000..75d84baac --- /dev/null +++ b/embassy-nrf/src/buffered_uarte/mod.rs | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | //! Async buffered UART driver. | ||
| 2 | //! | ||
| 3 | //! Note that discarding a future from a read or write operation may lead to losing | ||
| 4 | //! data. For example, when using `futures_util::future::select` and completion occurs | ||
| 5 | //! on the "other" future, you should capture the incomplete future and continue to use | ||
| 6 | //! it for the next read or write. This pattern is a consideration for all IO, and not | ||
| 7 | //! just serial communications. | ||
| 8 | //! | ||
| 9 | //! Please also see [crate::uarte] to understand when [BufferedUarte] should be used. | ||
| 10 | #[cfg_attr(not(feature = "_nrf54l"), path = "v1.rs")] | ||
| 11 | #[cfg_attr(feature = "_nrf54l", path = "v2.rs")] | ||
| 12 | mod _version; | ||
| 13 | |||
| 14 | pub use _version::*; | ||
diff --git a/embassy-nrf/src/buffered_uarte/v1.rs b/embassy-nrf/src/buffered_uarte/v1.rs new file mode 100644 index 000000000..07de22717 --- /dev/null +++ b/embassy-nrf/src/buffered_uarte/v1.rs | |||
| @@ -0,0 +1,951 @@ | |||
| 1 | //! Async buffered UART driver. | ||
| 2 | //! | ||
| 3 | //! Note that discarding a future from a read or write operation may lead to losing | ||
| 4 | //! data. For example, when using `futures_util::future::select` and completion occurs | ||
| 5 | //! on the "other" future, you should capture the incomplete future and continue to use | ||
| 6 | //! it for the next read or write. This pattern is a consideration for all IO, and not | ||
| 7 | //! just serial communications. | ||
| 8 | //! | ||
| 9 | //! Please also see [crate::uarte] to understand when [BufferedUarte] should be used. | ||
| 10 | |||
| 11 | use core::cmp::min; | ||
| 12 | use core::future::{Future, poll_fn}; | ||
| 13 | use core::marker::PhantomData; | ||
| 14 | use core::slice; | ||
| 15 | use core::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering, compiler_fence}; | ||
| 16 | use core::task::Poll; | ||
| 17 | |||
| 18 | use embassy_hal_internal::Peri; | ||
| 19 | use embassy_hal_internal::atomic_ring_buffer::RingBuffer; | ||
| 20 | use pac::uarte::vals; | ||
| 21 | // Re-export SVD variants to allow user to directly set values | ||
| 22 | pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity}; | ||
| 23 | |||
| 24 | use crate::gpio::{AnyPin, Pin as GpioPin}; | ||
| 25 | use crate::interrupt::InterruptExt; | ||
| 26 | use crate::interrupt::typelevel::Interrupt; | ||
| 27 | use crate::ppi::{ | ||
| 28 | self, AnyConfigurableChannel, AnyGroup, Channel, ConfigurableChannel, Event, Group, Ppi, PpiGroup, Task, | ||
| 29 | }; | ||
| 30 | use crate::timer::{Instance as TimerInstance, Timer}; | ||
| 31 | use crate::uarte::{Config, Instance as UarteInstance, configure, configure_rx_pins, configure_tx_pins, drop_tx_rx}; | ||
| 32 | use crate::{EASY_DMA_SIZE, interrupt, pac}; | ||
| 33 | |||
| 34 | pub(crate) struct State { | ||
| 35 | tx_buf: RingBuffer, | ||
| 36 | tx_count: AtomicUsize, | ||
| 37 | |||
| 38 | rx_buf: RingBuffer, | ||
| 39 | rx_started: AtomicBool, | ||
| 40 | rx_started_count: AtomicU8, | ||
| 41 | rx_ended_count: AtomicU8, | ||
| 42 | rx_ppi_ch: AtomicU8, | ||
| 43 | rx_overrun: AtomicBool, | ||
| 44 | } | ||
| 45 | |||
| 46 | /// UART error. | ||
| 47 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] | ||
| 48 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 49 | #[non_exhaustive] | ||
| 50 | pub enum Error { | ||
| 51 | /// Buffer Overrun | ||
| 52 | Overrun, | ||
| 53 | } | ||
| 54 | |||
| 55 | impl State { | ||
| 56 | pub(crate) const fn new() -> Self { | ||
| 57 | Self { | ||
| 58 | tx_buf: RingBuffer::new(), | ||
| 59 | tx_count: AtomicUsize::new(0), | ||
| 60 | |||
| 61 | rx_buf: RingBuffer::new(), | ||
| 62 | rx_started: AtomicBool::new(false), | ||
| 63 | rx_started_count: AtomicU8::new(0), | ||
| 64 | rx_ended_count: AtomicU8::new(0), | ||
| 65 | rx_ppi_ch: AtomicU8::new(0), | ||
| 66 | rx_overrun: AtomicBool::new(false), | ||
| 67 | } | ||
| 68 | } | ||
| 69 | } | ||
| 70 | |||
| 71 | /// Interrupt handler. | ||
| 72 | pub struct InterruptHandler<U: UarteInstance> { | ||
| 73 | _phantom: PhantomData<U>, | ||
| 74 | } | ||
| 75 | |||
| 76 | impl<U: UarteInstance> interrupt::typelevel::Handler<U::Interrupt> for InterruptHandler<U> { | ||
| 77 | unsafe fn on_interrupt() { | ||
| 78 | //trace!("irq: start"); | ||
| 79 | let r = U::regs(); | ||
| 80 | let ss = U::state(); | ||
| 81 | let s = U::buffered_state(); | ||
| 82 | |||
| 83 | if let Some(mut rx) = unsafe { s.rx_buf.try_writer() } { | ||
| 84 | let buf_len = s.rx_buf.len(); | ||
| 85 | let half_len = buf_len / 2; | ||
| 86 | |||
| 87 | if r.events_error().read() != 0 { | ||
| 88 | r.events_error().write_value(0); | ||
| 89 | let errs = r.errorsrc().read(); | ||
| 90 | r.errorsrc().write_value(errs); | ||
| 91 | |||
| 92 | if errs.overrun() { | ||
| 93 | s.rx_overrun.store(true, Ordering::Release); | ||
| 94 | ss.rx_waker.wake(); | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 98 | // Received some bytes, wake task. | ||
| 99 | if r.inten().read().rxdrdy() && r.events_rxdrdy().read() != 0 { | ||
| 100 | r.intenclr().write(|w| w.set_rxdrdy(true)); | ||
| 101 | r.events_rxdrdy().write_value(0); | ||
| 102 | ss.rx_waker.wake(); | ||
| 103 | } | ||
| 104 | |||
| 105 | if r.events_dma().rx().end().read() != 0 { | ||
| 106 | //trace!(" irq_rx: endrx"); | ||
| 107 | r.events_dma().rx().end().write_value(0); | ||
| 108 | |||
| 109 | let val = s.rx_ended_count.load(Ordering::Relaxed); | ||
| 110 | s.rx_ended_count.store(val.wrapping_add(1), Ordering::Relaxed); | ||
| 111 | } | ||
| 112 | |||
| 113 | if r.events_dma().rx().ready().read() != 0 || !s.rx_started.load(Ordering::Relaxed) { | ||
| 114 | //trace!(" irq_rx: rxstarted"); | ||
| 115 | let (ptr, len) = rx.push_buf(); | ||
| 116 | if len >= half_len { | ||
| 117 | r.events_dma().rx().ready().write_value(0); | ||
| 118 | |||
| 119 | //trace!(" irq_rx: starting second {:?}", half_len); | ||
| 120 | |||
| 121 | // Set up the DMA read | ||
| 122 | r.dma().rx().ptr().write_value(ptr as u32); | ||
| 123 | r.dma().rx().maxcnt().write(|w| w.set_maxcnt(half_len as _)); | ||
| 124 | |||
| 125 | let chn = s.rx_ppi_ch.load(Ordering::Relaxed); | ||
| 126 | |||
| 127 | // Enable endrx -> startrx PPI channel. | ||
| 128 | // From this point on, if endrx happens, startrx is automatically fired. | ||
| 129 | ppi::regs().chenset().write(|w| w.0 = 1 << chn); | ||
| 130 | |||
| 131 | // It is possible that endrx happened BEFORE enabling the PPI. In this case | ||
| 132 | // the PPI channel doesn't trigger, and we'd hang. We have to detect this | ||
| 133 | // and manually start. | ||
| 134 | |||
| 135 | // check again in case endrx has happened between the last check and now. | ||
| 136 | if r.events_dma().rx().end().read() != 0 { | ||
| 137 | //trace!(" irq_rx: endrx"); | ||
| 138 | r.events_dma().rx().end().write_value(0); | ||
| 139 | |||
| 140 | let val = s.rx_ended_count.load(Ordering::Relaxed); | ||
| 141 | s.rx_ended_count.store(val.wrapping_add(1), Ordering::Relaxed); | ||
| 142 | } | ||
| 143 | |||
| 144 | let rx_ended = s.rx_ended_count.load(Ordering::Relaxed); | ||
| 145 | let rx_started = s.rx_started_count.load(Ordering::Relaxed); | ||
| 146 | |||
| 147 | // If we started the same amount of transfers as ended, the last rxend has | ||
| 148 | // already occured. | ||
| 149 | let rxend_happened = rx_started == rx_ended; | ||
| 150 | |||
| 151 | // Check if the PPI channel is still enabled. The PPI channel disables itself | ||
| 152 | // when it fires, so if it's still enabled it hasn't fired. | ||
| 153 | let ppi_ch_enabled = ppi::regs().chen().read().ch(chn as _); | ||
| 154 | |||
| 155 | // if rxend happened, and the ppi channel hasn't fired yet, the rxend got missed. | ||
| 156 | // this condition also naturally matches if `!started`, needed to kickstart the DMA. | ||
| 157 | if rxend_happened && ppi_ch_enabled { | ||
| 158 | //trace!("manually starting."); | ||
| 159 | |||
| 160 | // disable the ppi ch, it's of no use anymore. | ||
| 161 | ppi::regs().chenclr().write(|w| w.set_ch(chn as _, true)); | ||
| 162 | |||
| 163 | // manually start | ||
| 164 | r.tasks_dma().rx().start().write_value(1); | ||
| 165 | } | ||
| 166 | |||
| 167 | rx.push_done(half_len); | ||
| 168 | |||
| 169 | s.rx_started_count.store(rx_started.wrapping_add(1), Ordering::Relaxed); | ||
| 170 | s.rx_started.store(true, Ordering::Relaxed); | ||
| 171 | } else { | ||
| 172 | //trace!(" irq_rx: rxstarted no buf"); | ||
| 173 | r.intenclr().write(|w| w.set_dmarxready(true)); | ||
| 174 | } | ||
| 175 | } | ||
| 176 | } | ||
| 177 | |||
| 178 | // ============================= | ||
| 179 | |||
| 180 | if let Some(mut tx) = unsafe { s.tx_buf.try_reader() } { | ||
| 181 | // TX end | ||
| 182 | if r.events_dma().tx().end().read() != 0 { | ||
| 183 | r.events_dma().tx().end().write_value(0); | ||
| 184 | |||
| 185 | let n = s.tx_count.load(Ordering::Relaxed); | ||
| 186 | //trace!(" irq_tx: endtx {:?}", n); | ||
| 187 | tx.pop_done(n); | ||
| 188 | ss.tx_waker.wake(); | ||
| 189 | s.tx_count.store(0, Ordering::Relaxed); | ||
| 190 | } | ||
| 191 | |||
| 192 | // If not TXing, start. | ||
| 193 | if s.tx_count.load(Ordering::Relaxed) == 0 { | ||
| 194 | let (ptr, len) = tx.pop_buf(); | ||
| 195 | let len = len.min(EASY_DMA_SIZE); | ||
| 196 | if len != 0 { | ||
| 197 | //trace!(" irq_tx: starting {:?}", len); | ||
| 198 | s.tx_count.store(len, Ordering::Relaxed); | ||
| 199 | |||
| 200 | // Set up the DMA write | ||
| 201 | r.dma().tx().ptr().write_value(ptr as u32); | ||
| 202 | r.dma().tx().maxcnt().write(|w| w.set_maxcnt(len as _)); | ||
| 203 | |||
| 204 | // Start UARTE Transmit transaction | ||
| 205 | r.tasks_dma().tx().start().write_value(1); | ||
| 206 | } | ||
| 207 | } | ||
| 208 | } | ||
| 209 | |||
| 210 | //trace!("irq: end"); | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | /// Buffered UARTE driver. | ||
| 215 | pub struct BufferedUarte<'d> { | ||
| 216 | tx: BufferedUarteTx<'d>, | ||
| 217 | rx: BufferedUarteRx<'d>, | ||
| 218 | } | ||
| 219 | |||
| 220 | impl<'d> Unpin for BufferedUarte<'d> {} | ||
| 221 | |||
| 222 | impl<'d> BufferedUarte<'d> { | ||
| 223 | /// Create a new BufferedUarte without hardware flow control. | ||
| 224 | /// | ||
| 225 | /// # Panics | ||
| 226 | /// | ||
| 227 | /// Panics if `rx_buffer.len()` is odd. | ||
| 228 | #[allow(clippy::too_many_arguments)] | ||
| 229 | pub fn new<U: UarteInstance, T: TimerInstance>( | ||
| 230 | uarte: Peri<'d, U>, | ||
| 231 | timer: Peri<'d, T>, | ||
| 232 | ppi_ch1: Peri<'d, impl ConfigurableChannel>, | ||
| 233 | ppi_ch2: Peri<'d, impl ConfigurableChannel>, | ||
| 234 | ppi_group: Peri<'d, impl Group>, | ||
| 235 | rxd: Peri<'d, impl GpioPin>, | ||
| 236 | txd: Peri<'d, impl GpioPin>, | ||
| 237 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 238 | config: Config, | ||
| 239 | rx_buffer: &'d mut [u8], | ||
| 240 | tx_buffer: &'d mut [u8], | ||
| 241 | ) -> Self { | ||
| 242 | Self::new_inner( | ||
| 243 | uarte, | ||
| 244 | timer, | ||
| 245 | ppi_ch1.into(), | ||
| 246 | ppi_ch2.into(), | ||
| 247 | ppi_group.into(), | ||
| 248 | rxd.into(), | ||
| 249 | txd.into(), | ||
| 250 | None, | ||
| 251 | None, | ||
| 252 | config, | ||
| 253 | rx_buffer, | ||
| 254 | tx_buffer, | ||
| 255 | ) | ||
| 256 | } | ||
| 257 | |||
| 258 | /// Create a new BufferedUarte with hardware flow control (RTS/CTS) | ||
| 259 | /// | ||
| 260 | /// # Panics | ||
| 261 | /// | ||
| 262 | /// Panics if `rx_buffer.len()` is odd. | ||
| 263 | #[allow(clippy::too_many_arguments)] | ||
| 264 | pub fn new_with_rtscts<U: UarteInstance, T: TimerInstance>( | ||
| 265 | uarte: Peri<'d, U>, | ||
| 266 | timer: Peri<'d, T>, | ||
| 267 | ppi_ch1: Peri<'d, impl ConfigurableChannel>, | ||
| 268 | ppi_ch2: Peri<'d, impl ConfigurableChannel>, | ||
| 269 | ppi_group: Peri<'d, impl Group>, | ||
| 270 | rxd: Peri<'d, impl GpioPin>, | ||
| 271 | txd: Peri<'d, impl GpioPin>, | ||
| 272 | cts: Peri<'d, impl GpioPin>, | ||
| 273 | rts: Peri<'d, impl GpioPin>, | ||
| 274 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 275 | config: Config, | ||
| 276 | rx_buffer: &'d mut [u8], | ||
| 277 | tx_buffer: &'d mut [u8], | ||
| 278 | ) -> Self { | ||
| 279 | Self::new_inner( | ||
| 280 | uarte, | ||
| 281 | timer, | ||
| 282 | ppi_ch1.into(), | ||
| 283 | ppi_ch2.into(), | ||
| 284 | ppi_group.into(), | ||
| 285 | rxd.into(), | ||
| 286 | txd.into(), | ||
| 287 | Some(cts.into()), | ||
| 288 | Some(rts.into()), | ||
| 289 | config, | ||
| 290 | rx_buffer, | ||
| 291 | tx_buffer, | ||
| 292 | ) | ||
| 293 | } | ||
| 294 | |||
| 295 | #[allow(clippy::too_many_arguments)] | ||
| 296 | fn new_inner<U: UarteInstance, T: TimerInstance>( | ||
| 297 | peri: Peri<'d, U>, | ||
| 298 | timer: Peri<'d, T>, | ||
| 299 | ppi_ch1: Peri<'d, AnyConfigurableChannel>, | ||
| 300 | ppi_ch2: Peri<'d, AnyConfigurableChannel>, | ||
| 301 | ppi_group: Peri<'d, AnyGroup>, | ||
| 302 | rxd: Peri<'d, AnyPin>, | ||
| 303 | txd: Peri<'d, AnyPin>, | ||
| 304 | cts: Option<Peri<'d, AnyPin>>, | ||
| 305 | rts: Option<Peri<'d, AnyPin>>, | ||
| 306 | config: Config, | ||
| 307 | rx_buffer: &'d mut [u8], | ||
| 308 | tx_buffer: &'d mut [u8], | ||
| 309 | ) -> Self { | ||
| 310 | let r = U::regs(); | ||
| 311 | let irq = U::Interrupt::IRQ; | ||
| 312 | let state = U::state(); | ||
| 313 | |||
| 314 | configure(r, config, cts.is_some()); | ||
| 315 | |||
| 316 | let tx = BufferedUarteTx::new_innerer(unsafe { peri.clone_unchecked() }, txd, cts, tx_buffer); | ||
| 317 | let rx = BufferedUarteRx::new_innerer(peri, timer, ppi_ch1, ppi_ch2, ppi_group, rxd, rts, rx_buffer); | ||
| 318 | |||
| 319 | r.enable().write(|w| w.set_enable(vals::Enable::ENABLED)); | ||
| 320 | irq.pend(); | ||
| 321 | unsafe { irq.enable() }; | ||
| 322 | |||
| 323 | state.tx_rx_refcount.store(2, Ordering::Relaxed); | ||
| 324 | |||
| 325 | Self { tx, rx } | ||
| 326 | } | ||
| 327 | |||
| 328 | /// Adjust the baud rate to the provided value. | ||
| 329 | pub fn set_baudrate(&mut self, baudrate: Baudrate) { | ||
| 330 | self.tx.set_baudrate(baudrate); | ||
| 331 | } | ||
| 332 | |||
| 333 | /// Split the UART in reader and writer parts. | ||
| 334 | /// | ||
| 335 | /// This allows reading and writing concurrently from independent tasks. | ||
| 336 | pub fn split(self) -> (BufferedUarteRx<'d>, BufferedUarteTx<'d>) { | ||
| 337 | (self.rx, self.tx) | ||
| 338 | } | ||
| 339 | |||
| 340 | /// Split the UART in reader and writer parts, by reference. | ||
| 341 | /// | ||
| 342 | /// The returned halves borrow from `self`, so you can drop them and go back to using | ||
| 343 | /// the "un-split" `self`. This allows temporarily splitting the UART. | ||
| 344 | pub fn split_by_ref(&mut self) -> (&mut BufferedUarteRx<'d>, &mut BufferedUarteTx<'d>) { | ||
| 345 | (&mut self.rx, &mut self.tx) | ||
| 346 | } | ||
| 347 | |||
| 348 | /// Pull some bytes from this source into the specified buffer, returning how many bytes were read. | ||
| 349 | pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> { | ||
| 350 | self.rx.read(buf).await | ||
| 351 | } | ||
| 352 | |||
| 353 | /// Return the contents of the internal buffer, filling it with more data from the inner reader if it is empty. | ||
| 354 | pub async fn fill_buf(&mut self) -> Result<&[u8], Error> { | ||
| 355 | self.rx.fill_buf().await | ||
| 356 | } | ||
| 357 | |||
| 358 | /// Tell this buffer that `amt` bytes have been consumed from the buffer, so they should no longer be returned in calls to `fill_buf`. | ||
| 359 | pub fn consume(&mut self, amt: usize) { | ||
| 360 | self.rx.consume(amt) | ||
| 361 | } | ||
| 362 | |||
| 363 | /// Write a buffer into this writer, returning how many bytes were written. | ||
| 364 | pub async fn write(&mut self, buf: &[u8]) -> Result<usize, Error> { | ||
| 365 | self.tx.write(buf).await | ||
| 366 | } | ||
| 367 | |||
| 368 | /// Try writing a buffer without waiting, returning how many bytes were written. | ||
| 369 | pub fn try_write(&mut self, buf: &[u8]) -> Result<usize, Error> { | ||
| 370 | self.tx.try_write(buf) | ||
| 371 | } | ||
| 372 | |||
| 373 | /// Flush this output stream, ensuring that all intermediately buffered contents reach their destination. | ||
| 374 | pub async fn flush(&mut self) -> Result<(), Error> { | ||
| 375 | self.tx.flush().await | ||
| 376 | } | ||
| 377 | } | ||
| 378 | |||
| 379 | /// Reader part of the buffered UARTE driver. | ||
| 380 | pub struct BufferedUarteTx<'d> { | ||
| 381 | r: pac::uarte::Uarte, | ||
| 382 | _irq: interrupt::Interrupt, | ||
| 383 | state: &'static crate::uarte::State, | ||
| 384 | buffered_state: &'static State, | ||
| 385 | _p: PhantomData<&'d ()>, | ||
| 386 | } | ||
| 387 | |||
| 388 | impl<'d> BufferedUarteTx<'d> { | ||
| 389 | /// Create a new BufferedUarteTx without hardware flow control. | ||
| 390 | pub fn new<U: UarteInstance>( | ||
| 391 | uarte: Peri<'d, U>, | ||
| 392 | txd: Peri<'d, impl GpioPin>, | ||
| 393 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 394 | config: Config, | ||
| 395 | tx_buffer: &'d mut [u8], | ||
| 396 | ) -> Self { | ||
| 397 | Self::new_inner(uarte, txd.into(), None, config, tx_buffer) | ||
| 398 | } | ||
| 399 | |||
| 400 | /// Create a new BufferedUarte with hardware flow control (RTS/CTS) | ||
| 401 | /// | ||
| 402 | /// # Panics | ||
| 403 | /// | ||
| 404 | /// Panics if `rx_buffer.len()` is odd. | ||
| 405 | pub fn new_with_cts<U: UarteInstance>( | ||
| 406 | uarte: Peri<'d, U>, | ||
| 407 | txd: Peri<'d, impl GpioPin>, | ||
| 408 | cts: Peri<'d, impl GpioPin>, | ||
| 409 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 410 | config: Config, | ||
| 411 | tx_buffer: &'d mut [u8], | ||
| 412 | ) -> Self { | ||
| 413 | Self::new_inner(uarte, txd.into(), Some(cts.into()), config, tx_buffer) | ||
| 414 | } | ||
| 415 | |||
| 416 | fn new_inner<U: UarteInstance>( | ||
| 417 | peri: Peri<'d, U>, | ||
| 418 | txd: Peri<'d, AnyPin>, | ||
| 419 | cts: Option<Peri<'d, AnyPin>>, | ||
| 420 | config: Config, | ||
| 421 | tx_buffer: &'d mut [u8], | ||
| 422 | ) -> Self { | ||
| 423 | let r = U::regs(); | ||
| 424 | let irq = U::Interrupt::IRQ; | ||
| 425 | let state = U::state(); | ||
| 426 | let _buffered_state = U::buffered_state(); | ||
| 427 | |||
| 428 | configure(r, config, cts.is_some()); | ||
| 429 | |||
| 430 | let this = Self::new_innerer(peri, txd, cts, tx_buffer); | ||
| 431 | |||
| 432 | r.enable().write(|w| w.set_enable(vals::Enable::ENABLED)); | ||
| 433 | irq.pend(); | ||
| 434 | unsafe { irq.enable() }; | ||
| 435 | |||
| 436 | state.tx_rx_refcount.store(1, Ordering::Relaxed); | ||
| 437 | |||
| 438 | this | ||
| 439 | } | ||
| 440 | |||
| 441 | fn new_innerer<U: UarteInstance>( | ||
| 442 | _peri: Peri<'d, U>, | ||
| 443 | txd: Peri<'d, AnyPin>, | ||
| 444 | cts: Option<Peri<'d, AnyPin>>, | ||
| 445 | tx_buffer: &'d mut [u8], | ||
| 446 | ) -> Self { | ||
| 447 | let r = U::regs(); | ||
| 448 | let irq = U::Interrupt::IRQ; | ||
| 449 | let state = U::state(); | ||
| 450 | let buffered_state = U::buffered_state(); | ||
| 451 | |||
| 452 | configure_tx_pins(r, txd, cts); | ||
| 453 | |||
| 454 | // Initialize state | ||
| 455 | buffered_state.tx_count.store(0, Ordering::Relaxed); | ||
| 456 | let len = tx_buffer.len(); | ||
| 457 | unsafe { buffered_state.tx_buf.init(tx_buffer.as_mut_ptr(), len) }; | ||
| 458 | |||
| 459 | r.events_dma().tx().ready().write_value(0); | ||
| 460 | |||
| 461 | // Enable interrupts | ||
| 462 | r.intenset().write(|w| { | ||
| 463 | w.set_dmatxend(true); | ||
| 464 | }); | ||
| 465 | |||
| 466 | Self { | ||
| 467 | r, | ||
| 468 | _irq: irq, | ||
| 469 | state, | ||
| 470 | buffered_state, | ||
| 471 | _p: PhantomData, | ||
| 472 | } | ||
| 473 | } | ||
| 474 | |||
| 475 | /// Write a buffer into this writer, returning how many bytes were written. | ||
| 476 | pub fn write<'a>(&'a mut self, buf: &'a [u8]) -> impl Future<Output = Result<usize, Error>> + 'a + use<'a, 'd> { | ||
| 477 | poll_fn(move |cx| { | ||
| 478 | //trace!("poll_write: {:?}", buf.len()); | ||
| 479 | let ss = self.state; | ||
| 480 | let s = self.buffered_state; | ||
| 481 | let mut tx = unsafe { s.tx_buf.writer() }; | ||
| 482 | |||
| 483 | let tx_buf = tx.push_slice(); | ||
| 484 | if tx_buf.is_empty() { | ||
| 485 | //trace!("poll_write: pending"); | ||
| 486 | ss.tx_waker.register(cx.waker()); | ||
| 487 | return Poll::Pending; | ||
| 488 | } | ||
| 489 | |||
| 490 | let n = min(tx_buf.len(), buf.len()); | ||
| 491 | tx_buf[..n].copy_from_slice(&buf[..n]); | ||
| 492 | tx.push_done(n); | ||
| 493 | |||
| 494 | //trace!("poll_write: queued {:?}", n); | ||
| 495 | |||
| 496 | compiler_fence(Ordering::SeqCst); | ||
| 497 | self._irq.pend(); | ||
| 498 | |||
| 499 | Poll::Ready(Ok(n)) | ||
| 500 | }) | ||
| 501 | } | ||
| 502 | |||
| 503 | /// Try writing a buffer without waiting, returning how many bytes were written. | ||
| 504 | pub fn try_write(&mut self, buf: &[u8]) -> Result<usize, Error> { | ||
| 505 | //trace!("poll_write: {:?}", buf.len()); | ||
| 506 | let s = self.buffered_state; | ||
| 507 | let mut tx = unsafe { s.tx_buf.writer() }; | ||
| 508 | |||
| 509 | let tx_buf = tx.push_slice(); | ||
| 510 | if tx_buf.is_empty() { | ||
| 511 | return Ok(0); | ||
| 512 | } | ||
| 513 | |||
| 514 | let n = min(tx_buf.len(), buf.len()); | ||
| 515 | tx_buf[..n].copy_from_slice(&buf[..n]); | ||
| 516 | tx.push_done(n); | ||
| 517 | |||
| 518 | //trace!("poll_write: queued {:?}", n); | ||
| 519 | |||
| 520 | compiler_fence(Ordering::SeqCst); | ||
| 521 | self._irq.pend(); | ||
| 522 | |||
| 523 | Ok(n) | ||
| 524 | } | ||
| 525 | |||
| 526 | /// Flush this output stream, ensuring that all intermediately buffered contents reach their destination. | ||
| 527 | pub fn flush(&mut self) -> impl Future<Output = Result<(), Error>> + '_ { | ||
| 528 | let ss = self.state; | ||
| 529 | let s = self.buffered_state; | ||
| 530 | poll_fn(move |cx| { | ||
| 531 | //trace!("poll_flush"); | ||
| 532 | if !s.tx_buf.is_empty() { | ||
| 533 | //trace!("poll_flush: pending"); | ||
| 534 | ss.tx_waker.register(cx.waker()); | ||
| 535 | return Poll::Pending; | ||
| 536 | } | ||
| 537 | |||
| 538 | Poll::Ready(Ok(())) | ||
| 539 | }) | ||
| 540 | } | ||
| 541 | |||
| 542 | /// Adjust the baud rate to the provided value. | ||
| 543 | pub fn set_baudrate(&mut self, baudrate: Baudrate) { | ||
| 544 | self.r.baudrate().write(|w| w.set_baudrate(baudrate)); | ||
| 545 | } | ||
| 546 | } | ||
| 547 | |||
| 548 | impl<'a> Drop for BufferedUarteTx<'a> { | ||
| 549 | fn drop(&mut self) { | ||
| 550 | let r = self.r; | ||
| 551 | |||
| 552 | r.intenclr().write(|w| { | ||
| 553 | w.set_txdrdy(true); | ||
| 554 | w.set_dmatxready(true); | ||
| 555 | w.set_txstopped(true); | ||
| 556 | }); | ||
| 557 | r.events_txstopped().write_value(0); | ||
| 558 | r.tasks_dma().tx().stop().write_value(1); | ||
| 559 | while r.events_txstopped().read() == 0 {} | ||
| 560 | |||
| 561 | let s = self.buffered_state; | ||
| 562 | unsafe { s.tx_buf.deinit() } | ||
| 563 | |||
| 564 | let s = self.state; | ||
| 565 | drop_tx_rx(r, s); | ||
| 566 | } | ||
| 567 | } | ||
| 568 | |||
| 569 | /// Reader part of the buffered UARTE driver. | ||
| 570 | pub struct BufferedUarteRx<'d> { | ||
| 571 | r: pac::uarte::Uarte, | ||
| 572 | state: &'static crate::uarte::State, | ||
| 573 | buffered_state: &'static State, | ||
| 574 | timer: Timer<'d>, | ||
| 575 | _ppi_ch1: Ppi<'d, AnyConfigurableChannel, 1, 1>, | ||
| 576 | _ppi_ch2: Ppi<'d, AnyConfigurableChannel, 1, 2>, | ||
| 577 | _ppi_group: PpiGroup<'d, AnyGroup>, | ||
| 578 | _p: PhantomData<&'d ()>, | ||
| 579 | } | ||
| 580 | |||
| 581 | impl<'d> BufferedUarteRx<'d> { | ||
| 582 | /// Create a new BufferedUarte without hardware flow control. | ||
| 583 | /// | ||
| 584 | /// # Panics | ||
| 585 | /// | ||
| 586 | /// Panics if `rx_buffer.len()` is odd. | ||
| 587 | #[allow(clippy::too_many_arguments)] | ||
| 588 | pub fn new<U: UarteInstance, T: TimerInstance>( | ||
| 589 | uarte: Peri<'d, U>, | ||
| 590 | timer: Peri<'d, T>, | ||
| 591 | ppi_ch1: Peri<'d, impl ConfigurableChannel>, | ||
| 592 | ppi_ch2: Peri<'d, impl ConfigurableChannel>, | ||
| 593 | ppi_group: Peri<'d, impl Group>, | ||
| 594 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 595 | rxd: Peri<'d, impl GpioPin>, | ||
| 596 | config: Config, | ||
| 597 | rx_buffer: &'d mut [u8], | ||
| 598 | ) -> Self { | ||
| 599 | Self::new_inner( | ||
| 600 | uarte, | ||
| 601 | timer, | ||
| 602 | ppi_ch1.into(), | ||
| 603 | ppi_ch2.into(), | ||
| 604 | ppi_group.into(), | ||
| 605 | rxd.into(), | ||
| 606 | None, | ||
| 607 | config, | ||
| 608 | rx_buffer, | ||
| 609 | ) | ||
| 610 | } | ||
| 611 | |||
| 612 | /// Create a new BufferedUarte with hardware flow control (RTS/CTS) | ||
| 613 | /// | ||
| 614 | /// # Panics | ||
| 615 | /// | ||
| 616 | /// Panics if `rx_buffer.len()` is odd. | ||
| 617 | #[allow(clippy::too_many_arguments)] | ||
| 618 | pub fn new_with_rts<U: UarteInstance, T: TimerInstance>( | ||
| 619 | uarte: Peri<'d, U>, | ||
| 620 | timer: Peri<'d, T>, | ||
| 621 | ppi_ch1: Peri<'d, impl ConfigurableChannel>, | ||
| 622 | ppi_ch2: Peri<'d, impl ConfigurableChannel>, | ||
| 623 | ppi_group: Peri<'d, impl Group>, | ||
| 624 | rxd: Peri<'d, impl GpioPin>, | ||
| 625 | rts: Peri<'d, impl GpioPin>, | ||
| 626 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 627 | config: Config, | ||
| 628 | rx_buffer: &'d mut [u8], | ||
| 629 | ) -> Self { | ||
| 630 | Self::new_inner( | ||
| 631 | uarte, | ||
| 632 | timer, | ||
| 633 | ppi_ch1.into(), | ||
| 634 | ppi_ch2.into(), | ||
| 635 | ppi_group.into(), | ||
| 636 | rxd.into(), | ||
| 637 | Some(rts.into()), | ||
| 638 | config, | ||
| 639 | rx_buffer, | ||
| 640 | ) | ||
| 641 | } | ||
| 642 | |||
| 643 | #[allow(clippy::too_many_arguments)] | ||
| 644 | fn new_inner<U: UarteInstance, T: TimerInstance>( | ||
| 645 | peri: Peri<'d, U>, | ||
| 646 | timer: Peri<'d, T>, | ||
| 647 | ppi_ch1: Peri<'d, AnyConfigurableChannel>, | ||
| 648 | ppi_ch2: Peri<'d, AnyConfigurableChannel>, | ||
| 649 | ppi_group: Peri<'d, AnyGroup>, | ||
| 650 | rxd: Peri<'d, AnyPin>, | ||
| 651 | rts: Option<Peri<'d, AnyPin>>, | ||
| 652 | config: Config, | ||
| 653 | rx_buffer: &'d mut [u8], | ||
| 654 | ) -> Self { | ||
| 655 | let r = U::regs(); | ||
| 656 | let irq = U::Interrupt::IRQ; | ||
| 657 | let state = U::state(); | ||
| 658 | let _buffered_state = U::buffered_state(); | ||
| 659 | |||
| 660 | configure(r, config, rts.is_some()); | ||
| 661 | |||
| 662 | let this = Self::new_innerer(peri, timer, ppi_ch1, ppi_ch2, ppi_group, rxd, rts, rx_buffer); | ||
| 663 | |||
| 664 | r.enable().write(|w| w.set_enable(vals::Enable::ENABLED)); | ||
| 665 | irq.pend(); | ||
| 666 | unsafe { irq.enable() }; | ||
| 667 | |||
| 668 | state.tx_rx_refcount.store(1, Ordering::Relaxed); | ||
| 669 | |||
| 670 | this | ||
| 671 | } | ||
| 672 | |||
| 673 | #[allow(clippy::too_many_arguments)] | ||
| 674 | fn new_innerer<U: UarteInstance, T: TimerInstance>( | ||
| 675 | _peri: Peri<'d, U>, | ||
| 676 | timer: Peri<'d, T>, | ||
| 677 | ppi_ch1: Peri<'d, AnyConfigurableChannel>, | ||
| 678 | ppi_ch2: Peri<'d, AnyConfigurableChannel>, | ||
| 679 | ppi_group: Peri<'d, AnyGroup>, | ||
| 680 | rxd: Peri<'d, AnyPin>, | ||
| 681 | rts: Option<Peri<'d, AnyPin>>, | ||
| 682 | rx_buffer: &'d mut [u8], | ||
| 683 | ) -> Self { | ||
| 684 | assert!(rx_buffer.len() % 2 == 0); | ||
| 685 | |||
| 686 | let r = U::regs(); | ||
| 687 | let state = U::state(); | ||
| 688 | let buffered_state = U::buffered_state(); | ||
| 689 | |||
| 690 | configure_rx_pins(r, rxd, rts); | ||
| 691 | |||
| 692 | // Initialize state | ||
| 693 | buffered_state.rx_started_count.store(0, Ordering::Relaxed); | ||
| 694 | buffered_state.rx_ended_count.store(0, Ordering::Relaxed); | ||
| 695 | buffered_state.rx_started.store(false, Ordering::Relaxed); | ||
| 696 | buffered_state.rx_overrun.store(false, Ordering::Relaxed); | ||
| 697 | let rx_len = rx_buffer.len().min(EASY_DMA_SIZE * 2); | ||
| 698 | unsafe { buffered_state.rx_buf.init(rx_buffer.as_mut_ptr(), rx_len) }; | ||
| 699 | |||
| 700 | // clear errors | ||
| 701 | let errors = r.errorsrc().read(); | ||
| 702 | r.errorsrc().write_value(errors); | ||
| 703 | |||
| 704 | r.events_dma().rx().ready().write_value(0); | ||
| 705 | r.events_error().write_value(0); | ||
| 706 | r.events_dma().rx().end().write_value(0); | ||
| 707 | |||
| 708 | // Enable interrupts | ||
| 709 | r.intenset().write(|w| { | ||
| 710 | w.set_dmatxend(true); | ||
| 711 | w.set_dmarxready(true); | ||
| 712 | w.set_error(true); | ||
| 713 | w.set_dmarxend(true); | ||
| 714 | }); | ||
| 715 | |||
| 716 | // Configure byte counter. | ||
| 717 | let timer = Timer::new_counter(timer); | ||
| 718 | timer.cc(1).write(rx_len as u32 * 2); | ||
| 719 | timer.cc(1).short_compare_clear(); | ||
| 720 | timer.clear(); | ||
| 721 | timer.start(); | ||
| 722 | |||
| 723 | let mut ppi_ch1 = Ppi::new_one_to_one(ppi_ch1, Event::from_reg(r.events_rxdrdy()), timer.task_count()); | ||
| 724 | ppi_ch1.enable(); | ||
| 725 | |||
| 726 | buffered_state | ||
| 727 | .rx_ppi_ch | ||
| 728 | .store(ppi_ch2.number() as u8, Ordering::Relaxed); | ||
| 729 | let mut ppi_group = PpiGroup::new(ppi_group); | ||
| 730 | let mut ppi_ch2 = Ppi::new_one_to_two( | ||
| 731 | ppi_ch2, | ||
| 732 | Event::from_reg(r.events_dma().rx().end()), | ||
| 733 | Task::from_reg(r.tasks_dma().rx().start()), | ||
| 734 | ppi_group.task_disable_all(), | ||
| 735 | ); | ||
| 736 | ppi_ch2.disable(); | ||
| 737 | ppi_group.add_channel(&ppi_ch2); | ||
| 738 | |||
| 739 | Self { | ||
| 740 | r, | ||
| 741 | state, | ||
| 742 | buffered_state, | ||
| 743 | timer, | ||
| 744 | _ppi_ch1: ppi_ch1, | ||
| 745 | _ppi_ch2: ppi_ch2, | ||
| 746 | _ppi_group: ppi_group, | ||
| 747 | _p: PhantomData, | ||
| 748 | } | ||
| 749 | } | ||
| 750 | |||
| 751 | /// Pull some bytes from this source into the specified buffer, returning how many bytes were read. | ||
| 752 | pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> { | ||
| 753 | let data = self.fill_buf().await?; | ||
| 754 | let n = data.len().min(buf.len()); | ||
| 755 | buf[..n].copy_from_slice(&data[..n]); | ||
| 756 | self.consume(n); | ||
| 757 | Ok(n) | ||
| 758 | } | ||
| 759 | |||
| 760 | /// Return the contents of the internal buffer, filling it with more data from the inner reader if it is empty. | ||
| 761 | pub fn fill_buf(&mut self) -> impl Future<Output = Result<&'_ [u8], Error>> { | ||
| 762 | let r = self.r; | ||
| 763 | let s = self.buffered_state; | ||
| 764 | let ss = self.state; | ||
| 765 | let timer = &self.timer; | ||
| 766 | poll_fn(move |cx| { | ||
| 767 | compiler_fence(Ordering::SeqCst); | ||
| 768 | //trace!("poll_read"); | ||
| 769 | |||
| 770 | if s.rx_overrun.swap(false, Ordering::Acquire) { | ||
| 771 | return Poll::Ready(Err(Error::Overrun)); | ||
| 772 | } | ||
| 773 | |||
| 774 | // Read the RXDRDY counter. | ||
| 775 | timer.cc(0).capture(); | ||
| 776 | let mut end = timer.cc(0).read() as usize; | ||
| 777 | //trace!(" rxdrdy count = {:?}", end); | ||
| 778 | |||
| 779 | // We've set a compare channel that resets the counter to 0 when it reaches `len*2`. | ||
| 780 | // However, it's unclear if that's instant, or there's a small window where you can | ||
| 781 | // still read `len()*2`. | ||
| 782 | // This could happen if in one clock cycle the counter is updated, and in the next the | ||
| 783 | // clear takes effect. The docs are very sparse, they just say "Task delays: After TIMER | ||
| 784 | // is started, the CLEAR, COUNT, and STOP tasks are guaranteed to take effect within one | ||
| 785 | // clock cycle of the PCLK16M." :shrug: | ||
| 786 | // So, we wrap the counter ourselves, just in case. | ||
| 787 | if end > s.rx_buf.len() * 2 { | ||
| 788 | end = 0 | ||
| 789 | } | ||
| 790 | |||
| 791 | // This logic mirrors `atomic_ring_buffer::Reader::pop_buf()` | ||
| 792 | let mut start = s.rx_buf.start.load(Ordering::Relaxed); | ||
| 793 | let len = s.rx_buf.len(); | ||
| 794 | if start == end { | ||
| 795 | //trace!(" empty"); | ||
| 796 | ss.rx_waker.register(cx.waker()); | ||
| 797 | r.intenset().write(|w| w.set_rxdrdy(true)); | ||
| 798 | return Poll::Pending; | ||
| 799 | } | ||
| 800 | |||
| 801 | if start >= len { | ||
| 802 | start -= len | ||
| 803 | } | ||
| 804 | if end >= len { | ||
| 805 | end -= len | ||
| 806 | } | ||
| 807 | |||
| 808 | let n = if end > start { end - start } else { len - start }; | ||
| 809 | assert!(n != 0); | ||
| 810 | //trace!(" uarte ringbuf: pop_buf {:?}..{:?}", start, start + n); | ||
| 811 | |||
| 812 | let buf = s.rx_buf.buf.load(Ordering::Relaxed); | ||
| 813 | Poll::Ready(Ok(unsafe { slice::from_raw_parts(buf.add(start), n) })) | ||
| 814 | }) | ||
| 815 | } | ||
| 816 | |||
| 817 | /// Tell this buffer that `amt` bytes have been consumed from the buffer, so they should no longer be returned in calls to `fill_buf`. | ||
| 818 | pub fn consume(&mut self, amt: usize) { | ||
| 819 | if amt == 0 { | ||
| 820 | return; | ||
| 821 | } | ||
| 822 | |||
| 823 | let s = self.buffered_state; | ||
| 824 | let mut rx = unsafe { s.rx_buf.reader() }; | ||
| 825 | rx.pop_done(amt); | ||
| 826 | self.r.intenset().write(|w| w.set_dmarxready(true)); | ||
| 827 | } | ||
| 828 | |||
| 829 | /// we are ready to read if there is data in the buffer | ||
| 830 | fn read_ready(&self) -> Result<bool, Error> { | ||
| 831 | let state = self.buffered_state; | ||
| 832 | if state.rx_overrun.swap(false, Ordering::Acquire) { | ||
| 833 | return Err(Error::Overrun); | ||
| 834 | } | ||
| 835 | Ok(!state.rx_buf.is_empty()) | ||
| 836 | } | ||
| 837 | } | ||
| 838 | |||
| 839 | impl<'a> Drop for BufferedUarteRx<'a> { | ||
| 840 | fn drop(&mut self) { | ||
| 841 | self._ppi_group.disable_all(); | ||
| 842 | |||
| 843 | let r = self.r; | ||
| 844 | |||
| 845 | self.timer.stop(); | ||
| 846 | |||
| 847 | r.intenclr().write(|w| { | ||
| 848 | w.set_rxdrdy(true); | ||
| 849 | w.set_dmarxready(true); | ||
| 850 | w.set_rxto(true); | ||
| 851 | }); | ||
| 852 | r.events_rxto().write_value(0); | ||
| 853 | r.tasks_dma().rx().stop().write_value(1); | ||
| 854 | while r.events_rxto().read() == 0 {} | ||
| 855 | |||
| 856 | let s = self.buffered_state; | ||
| 857 | unsafe { s.rx_buf.deinit() } | ||
| 858 | |||
| 859 | let s = self.state; | ||
| 860 | drop_tx_rx(r, s); | ||
| 861 | } | ||
| 862 | } | ||
| 863 | |||
| 864 | mod _embedded_io { | ||
| 865 | use super::*; | ||
| 866 | |||
| 867 | impl embedded_io_async::Error for Error { | ||
| 868 | fn kind(&self) -> embedded_io_async::ErrorKind { | ||
| 869 | match *self { | ||
| 870 | Error::Overrun => embedded_io_async::ErrorKind::OutOfMemory, | ||
| 871 | } | ||
| 872 | } | ||
| 873 | } | ||
| 874 | |||
| 875 | impl<'d> embedded_io_async::ErrorType for BufferedUarte<'d> { | ||
| 876 | type Error = Error; | ||
| 877 | } | ||
| 878 | |||
| 879 | impl<'d> embedded_io_async::ErrorType for BufferedUarteRx<'d> { | ||
| 880 | type Error = Error; | ||
| 881 | } | ||
| 882 | |||
| 883 | impl<'d> embedded_io_async::ErrorType for BufferedUarteTx<'d> { | ||
| 884 | type Error = Error; | ||
| 885 | } | ||
| 886 | |||
| 887 | impl<'d> embedded_io_async::Read for BufferedUarte<'d> { | ||
| 888 | async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> { | ||
| 889 | self.read(buf).await | ||
| 890 | } | ||
| 891 | } | ||
| 892 | |||
| 893 | impl<'d> embedded_io_async::Read for BufferedUarteRx<'d> { | ||
| 894 | async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> { | ||
| 895 | self.read(buf).await | ||
| 896 | } | ||
| 897 | } | ||
| 898 | |||
| 899 | impl<'d> embedded_io_async::ReadReady for BufferedUarte<'d> { | ||
| 900 | fn read_ready(&mut self) -> Result<bool, Self::Error> { | ||
| 901 | self.rx.read_ready() | ||
| 902 | } | ||
| 903 | } | ||
| 904 | |||
| 905 | impl<'d> embedded_io_async::ReadReady for BufferedUarteRx<'d> { | ||
| 906 | fn read_ready(&mut self) -> Result<bool, Self::Error> { | ||
| 907 | let state = self.buffered_state; | ||
| 908 | Ok(!state.rx_buf.is_empty()) | ||
| 909 | } | ||
| 910 | } | ||
| 911 | |||
| 912 | impl<'d> embedded_io_async::BufRead for BufferedUarte<'d> { | ||
| 913 | async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> { | ||
| 914 | self.fill_buf().await | ||
| 915 | } | ||
| 916 | |||
| 917 | fn consume(&mut self, amt: usize) { | ||
| 918 | self.consume(amt) | ||
| 919 | } | ||
| 920 | } | ||
| 921 | |||
| 922 | impl<'d> embedded_io_async::BufRead for BufferedUarteRx<'d> { | ||
| 923 | async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> { | ||
| 924 | self.fill_buf().await | ||
| 925 | } | ||
| 926 | |||
| 927 | fn consume(&mut self, amt: usize) { | ||
| 928 | self.consume(amt) | ||
| 929 | } | ||
| 930 | } | ||
| 931 | |||
| 932 | impl<'d> embedded_io_async::Write for BufferedUarte<'d> { | ||
| 933 | async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> { | ||
| 934 | self.write(buf).await | ||
| 935 | } | ||
| 936 | |||
| 937 | async fn flush(&mut self) -> Result<(), Self::Error> { | ||
| 938 | self.flush().await | ||
| 939 | } | ||
| 940 | } | ||
| 941 | |||
| 942 | impl<'d> embedded_io_async::Write for BufferedUarteTx<'d> { | ||
| 943 | async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> { | ||
| 944 | self.write(buf).await | ||
| 945 | } | ||
| 946 | |||
| 947 | async fn flush(&mut self) -> Result<(), Self::Error> { | ||
| 948 | self.flush().await | ||
| 949 | } | ||
| 950 | } | ||
| 951 | } | ||
diff --git a/embassy-nrf/src/buffered_uarte/v2.rs b/embassy-nrf/src/buffered_uarte/v2.rs new file mode 100644 index 000000000..d0d2d97d1 --- /dev/null +++ b/embassy-nrf/src/buffered_uarte/v2.rs | |||
| @@ -0,0 +1,687 @@ | |||
| 1 | //! Async buffered UART driver. | ||
| 2 | //! | ||
| 3 | //! Note that discarding a future from a read or write operation may lead to losing | ||
| 4 | //! data. For example, when using `futures_util::future::select` and completion occurs | ||
| 5 | //! on the "other" future, you should capture the incomplete future and continue to use | ||
| 6 | //! it for the next read or write. This pattern is a consideration for all IO, and not | ||
| 7 | //! just serial communications. | ||
| 8 | //! | ||
| 9 | //! Please also see [crate::uarte] to understand when [BufferedUarte] should be used. | ||
| 10 | //! | ||
| 11 | //! The code is based on the generic buffered_uarte implementation but uses the nrf54l | ||
| 12 | //! frame timeout event to correctly determine the size of transferred data. | ||
| 13 | //! Counting of rxrdy events, used in the generic implementation, cannot be applied | ||
| 14 | //! to nrf54l chips, as they buffer up to 4 bytes in a single DMA transaction. | ||
| 15 | //! The only reliable way to find the number of bytes received is to stop the transfer, | ||
| 16 | //! wait for the DMA stopped event, and read the value in the rx.dma.amount register. | ||
| 17 | //! This also flushes all in-flight data to RAM. | ||
| 18 | |||
| 19 | use core::cmp::min; | ||
| 20 | use core::future::{Future, poll_fn}; | ||
| 21 | use core::marker::PhantomData; | ||
| 22 | use core::slice; | ||
| 23 | use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering, compiler_fence}; | ||
| 24 | use core::task::Poll; | ||
| 25 | |||
| 26 | use embassy_hal_internal::Peri; | ||
| 27 | use embassy_hal_internal::atomic_ring_buffer::RingBuffer; | ||
| 28 | use pac::uarte::vals; | ||
| 29 | // Re-export SVD variants to allow user to directly set values | ||
| 30 | pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity}; | ||
| 31 | |||
| 32 | use crate::gpio::{AnyPin, Pin as GpioPin}; | ||
| 33 | use crate::interrupt::typelevel::Interrupt; | ||
| 34 | use crate::uarte::{Config, Instance as UarteInstance, configure, configure_rx_pins, configure_tx_pins, drop_tx_rx}; | ||
| 35 | use crate::{EASY_DMA_SIZE, interrupt, pac}; | ||
| 36 | |||
| 37 | pub(crate) struct State { | ||
| 38 | tx_buf: RingBuffer, | ||
| 39 | tx_count: AtomicUsize, | ||
| 40 | |||
| 41 | rx_buf: RingBuffer, | ||
| 42 | rx_started: AtomicBool, | ||
| 43 | } | ||
| 44 | |||
| 45 | /// UART error. | ||
| 46 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] | ||
| 47 | #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||
| 48 | #[non_exhaustive] | ||
| 49 | pub enum Error { | ||
| 50 | // No errors for now | ||
| 51 | } | ||
| 52 | |||
| 53 | impl State { | ||
| 54 | pub(crate) const fn new() -> Self { | ||
| 55 | Self { | ||
| 56 | tx_buf: RingBuffer::new(), | ||
| 57 | tx_count: AtomicUsize::new(0), | ||
| 58 | |||
| 59 | rx_buf: RingBuffer::new(), | ||
| 60 | rx_started: AtomicBool::new(false), | ||
| 61 | } | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | /// Interrupt handler. | ||
| 66 | pub struct InterruptHandler<U: UarteInstance> { | ||
| 67 | _phantom: PhantomData<U>, | ||
| 68 | } | ||
| 69 | |||
| 70 | impl<U: UarteInstance> interrupt::typelevel::Handler<U::Interrupt> for InterruptHandler<U> { | ||
| 71 | unsafe fn on_interrupt() { | ||
| 72 | info!("irq: start"); | ||
| 73 | let r = U::regs(); | ||
| 74 | let ss = U::state(); | ||
| 75 | let s = U::buffered_state(); | ||
| 76 | |||
| 77 | if let Some(mut rx) = unsafe { s.rx_buf.try_writer() } { | ||
| 78 | let buf_len = s.rx_buf.len(); | ||
| 79 | let half_len = buf_len / 2; | ||
| 80 | |||
| 81 | if r.events_error().read() != 0 { | ||
| 82 | r.events_error().write_value(0); | ||
| 83 | let errs = r.errorsrc().read(); | ||
| 84 | r.errorsrc().write_value(errs); | ||
| 85 | |||
| 86 | if errs.overrun() { | ||
| 87 | panic!("BufferedUarte UART overrun"); | ||
| 88 | } | ||
| 89 | } | ||
| 90 | |||
| 91 | let first_run = !s.rx_started.swap(true, Ordering::Relaxed); | ||
| 92 | if r.events_dma().rx().end().read() != 0 || first_run { | ||
| 93 | //trace!(" irq_rx: endrx"); | ||
| 94 | r.events_dma().rx().end().write_value(0); | ||
| 95 | |||
| 96 | if !first_run { | ||
| 97 | // Received some bytes, wake task. | ||
| 98 | let rxed = r.dma().rx().amount().read().amount() as usize; | ||
| 99 | rx.push_done(rxed); | ||
| 100 | ss.rx_waker.wake(); | ||
| 101 | } | ||
| 102 | |||
| 103 | let (ptr, len) = rx.push_buf(); | ||
| 104 | if len == 0 { | ||
| 105 | panic!("BufferedUarte buffer overrun"); | ||
| 106 | } | ||
| 107 | |||
| 108 | let len = if len > half_len { half_len } else { len }; | ||
| 109 | |||
| 110 | // Set up the DMA read | ||
| 111 | r.dma().rx().ptr().write_value(ptr as u32); | ||
| 112 | r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _)); | ||
| 113 | |||
| 114 | // manually start | ||
| 115 | r.tasks_dma().rx().start().write_value(1); | ||
| 116 | } | ||
| 117 | } | ||
| 118 | |||
| 119 | // ============================= | ||
| 120 | |||
| 121 | if let Some(mut tx) = unsafe { s.tx_buf.try_reader() } { | ||
| 122 | // TX end | ||
| 123 | if r.events_dma().tx().end().read() != 0 { | ||
| 124 | r.events_dma().tx().end().write_value(0); | ||
| 125 | |||
| 126 | let n = s.tx_count.load(Ordering::Relaxed); | ||
| 127 | //trace!(" irq_tx: endtx {:?}", n); | ||
| 128 | tx.pop_done(n); | ||
| 129 | ss.tx_waker.wake(); | ||
| 130 | s.tx_count.store(0, Ordering::Relaxed); | ||
| 131 | } | ||
| 132 | |||
| 133 | // If not TXing, start. | ||
| 134 | if s.tx_count.load(Ordering::Relaxed) == 0 { | ||
| 135 | let (ptr, len) = tx.pop_buf(); | ||
| 136 | let len = len.min(EASY_DMA_SIZE); | ||
| 137 | if len != 0 { | ||
| 138 | //trace!(" irq_tx: starting {:?}", len); | ||
| 139 | s.tx_count.store(len, Ordering::Relaxed); | ||
| 140 | |||
| 141 | // Set up the DMA write | ||
| 142 | r.dma().tx().ptr().write_value(ptr as u32); | ||
| 143 | r.dma().tx().maxcnt().write(|w| w.set_maxcnt(len as _)); | ||
| 144 | |||
| 145 | // Start UARTE Transmit transaction | ||
| 146 | r.tasks_dma().tx().start().write_value(1); | ||
| 147 | } | ||
| 148 | } | ||
| 149 | } | ||
| 150 | |||
| 151 | //trace!("irq: end"); | ||
| 152 | } | ||
| 153 | } | ||
| 154 | |||
| 155 | /// Buffered UARTE driver. | ||
| 156 | pub struct BufferedUarte<'d, U: UarteInstance> { | ||
| 157 | tx: BufferedUarteTx<'d, U>, | ||
| 158 | rx: BufferedUarteRx<'d, U>, | ||
| 159 | } | ||
| 160 | |||
| 161 | impl<'d, U: UarteInstance> Unpin for BufferedUarte<'d, U> {} | ||
| 162 | |||
| 163 | impl<'d, U: UarteInstance> BufferedUarte<'d, U> { | ||
| 164 | /// Create a new BufferedUarte without hardware flow control. | ||
| 165 | #[allow(clippy::too_many_arguments)] | ||
| 166 | pub fn new( | ||
| 167 | uarte: Peri<'d, U>, | ||
| 168 | rxd: Peri<'d, impl GpioPin>, | ||
| 169 | txd: Peri<'d, impl GpioPin>, | ||
| 170 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 171 | config: Config, | ||
| 172 | rx_buffer: &'d mut [u8], | ||
| 173 | tx_buffer: &'d mut [u8], | ||
| 174 | ) -> Self { | ||
| 175 | Self::new_inner(uarte, rxd.into(), txd.into(), None, None, config, rx_buffer, tx_buffer) | ||
| 176 | } | ||
| 177 | |||
| 178 | /// Create a new BufferedUarte with hardware flow control (RTS/CTS) | ||
| 179 | #[allow(clippy::too_many_arguments)] | ||
| 180 | pub fn new_with_rtscts( | ||
| 181 | uarte: Peri<'d, U>, | ||
| 182 | rxd: Peri<'d, impl GpioPin>, | ||
| 183 | txd: Peri<'d, impl GpioPin>, | ||
| 184 | cts: Peri<'d, impl GpioPin>, | ||
| 185 | rts: Peri<'d, impl GpioPin>, | ||
| 186 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 187 | config: Config, | ||
| 188 | rx_buffer: &'d mut [u8], | ||
| 189 | tx_buffer: &'d mut [u8], | ||
| 190 | ) -> Self { | ||
| 191 | Self::new_inner( | ||
| 192 | uarte, | ||
| 193 | rxd.into(), | ||
| 194 | txd.into(), | ||
| 195 | Some(cts.into()), | ||
| 196 | Some(rts.into()), | ||
| 197 | config, | ||
| 198 | rx_buffer, | ||
| 199 | tx_buffer, | ||
| 200 | ) | ||
| 201 | } | ||
| 202 | |||
| 203 | #[allow(clippy::too_many_arguments)] | ||
| 204 | fn new_inner( | ||
| 205 | peri: Peri<'d, U>, | ||
| 206 | rxd: Peri<'d, AnyPin>, | ||
| 207 | txd: Peri<'d, AnyPin>, | ||
| 208 | cts: Option<Peri<'d, AnyPin>>, | ||
| 209 | rts: Option<Peri<'d, AnyPin>>, | ||
| 210 | config: Config, | ||
| 211 | rx_buffer: &'d mut [u8], | ||
| 212 | tx_buffer: &'d mut [u8], | ||
| 213 | ) -> Self { | ||
| 214 | configure(U::regs(), config, cts.is_some()); | ||
| 215 | |||
| 216 | let tx = BufferedUarteTx::new_innerer(unsafe { peri.clone_unchecked() }, txd, cts, tx_buffer); | ||
| 217 | let rx = BufferedUarteRx::new_innerer(peri, rxd, rts, rx_buffer); | ||
| 218 | |||
| 219 | U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED)); | ||
| 220 | U::Interrupt::pend(); | ||
| 221 | unsafe { U::Interrupt::enable() }; | ||
| 222 | |||
| 223 | U::state().tx_rx_refcount.store(2, Ordering::Relaxed); | ||
| 224 | |||
| 225 | Self { tx, rx } | ||
| 226 | } | ||
| 227 | |||
| 228 | /// Adjust the baud rate to the provided value. | ||
| 229 | pub fn set_baudrate(&mut self, baudrate: Baudrate) { | ||
| 230 | let r = U::regs(); | ||
| 231 | r.baudrate().write(|w| w.set_baudrate(baudrate)); | ||
| 232 | } | ||
| 233 | |||
| 234 | /// Split the UART in reader and writer parts. | ||
| 235 | /// | ||
| 236 | /// This allows reading and writing concurrently from independent tasks. | ||
| 237 | pub fn split(self) -> (BufferedUarteRx<'d, U>, BufferedUarteTx<'d, U>) { | ||
| 238 | (self.rx, self.tx) | ||
| 239 | } | ||
| 240 | |||
| 241 | /// Split the UART in reader and writer parts, by reference. | ||
| 242 | /// | ||
| 243 | /// The returned halves borrow from `self`, so you can drop them and go back to using | ||
| 244 | /// the "un-split" `self`. This allows temporarily splitting the UART. | ||
| 245 | pub fn split_by_ref(&mut self) -> (&mut BufferedUarteRx<'d, U>, &mut BufferedUarteTx<'d, U>) { | ||
| 246 | (&mut self.rx, &mut self.tx) | ||
| 247 | } | ||
| 248 | |||
| 249 | /// Pull some bytes from this source into the specified buffer, returning how many bytes were read. | ||
| 250 | pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> { | ||
| 251 | self.rx.read(buf).await | ||
| 252 | } | ||
| 253 | |||
| 254 | /// Return the contents of the internal buffer, filling it with more data from the inner reader if it is empty. | ||
| 255 | pub async fn fill_buf(&mut self) -> Result<&[u8], Error> { | ||
| 256 | self.rx.fill_buf().await | ||
| 257 | } | ||
| 258 | |||
| 259 | /// Tell this buffer that `amt` bytes have been consumed from the buffer, so they should no longer be returned in calls to `fill_buf`. | ||
| 260 | pub fn consume(&mut self, amt: usize) { | ||
| 261 | self.rx.consume(amt) | ||
| 262 | } | ||
| 263 | |||
| 264 | /// Write a buffer into this writer, returning how many bytes were written. | ||
| 265 | pub async fn write(&mut self, buf: &[u8]) -> Result<usize, Error> { | ||
| 266 | self.tx.write(buf).await | ||
| 267 | } | ||
| 268 | |||
| 269 | /// Try writing a buffer without waiting, returning how many bytes were written. | ||
| 270 | pub fn try_write(&mut self, buf: &[u8]) -> Result<usize, Error> { | ||
| 271 | self.tx.try_write(buf) | ||
| 272 | } | ||
| 273 | |||
| 274 | /// Flush this output stream, ensuring that all intermediately buffered contents reach their destination. | ||
| 275 | pub async fn flush(&mut self) -> Result<(), Error> { | ||
| 276 | self.tx.flush().await | ||
| 277 | } | ||
| 278 | } | ||
| 279 | |||
| 280 | /// Reader part of the buffered UARTE driver. | ||
| 281 | pub struct BufferedUarteTx<'d, U: UarteInstance> { | ||
| 282 | _peri: Peri<'d, U>, | ||
| 283 | } | ||
| 284 | |||
| 285 | impl<'d, U: UarteInstance> BufferedUarteTx<'d, U> { | ||
| 286 | /// Create a new BufferedUarteTx without hardware flow control. | ||
| 287 | pub fn new( | ||
| 288 | uarte: Peri<'d, U>, | ||
| 289 | txd: Peri<'d, impl GpioPin>, | ||
| 290 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 291 | config: Config, | ||
| 292 | tx_buffer: &'d mut [u8], | ||
| 293 | ) -> Self { | ||
| 294 | Self::new_inner(uarte, txd.into(), None, config, tx_buffer) | ||
| 295 | } | ||
| 296 | |||
| 297 | /// Create a new BufferedUarte with hardware flow control (RTS/CTS) | ||
| 298 | pub fn new_with_cts( | ||
| 299 | uarte: Peri<'d, U>, | ||
| 300 | txd: Peri<'d, impl GpioPin>, | ||
| 301 | cts: Peri<'d, impl GpioPin>, | ||
| 302 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 303 | config: Config, | ||
| 304 | tx_buffer: &'d mut [u8], | ||
| 305 | ) -> Self { | ||
| 306 | Self::new_inner(uarte, txd.into(), Some(cts.into()), config, tx_buffer) | ||
| 307 | } | ||
| 308 | |||
| 309 | fn new_inner( | ||
| 310 | peri: Peri<'d, U>, | ||
| 311 | txd: Peri<'d, AnyPin>, | ||
| 312 | cts: Option<Peri<'d, AnyPin>>, | ||
| 313 | config: Config, | ||
| 314 | tx_buffer: &'d mut [u8], | ||
| 315 | ) -> Self { | ||
| 316 | configure(U::regs(), config, cts.is_some()); | ||
| 317 | |||
| 318 | let this = Self::new_innerer(peri, txd, cts, tx_buffer); | ||
| 319 | |||
| 320 | U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED)); | ||
| 321 | U::Interrupt::pend(); | ||
| 322 | unsafe { U::Interrupt::enable() }; | ||
| 323 | |||
| 324 | U::state().tx_rx_refcount.store(1, Ordering::Relaxed); | ||
| 325 | |||
| 326 | this | ||
| 327 | } | ||
| 328 | |||
| 329 | fn new_innerer( | ||
| 330 | peri: Peri<'d, U>, | ||
| 331 | txd: Peri<'d, AnyPin>, | ||
| 332 | cts: Option<Peri<'d, AnyPin>>, | ||
| 333 | tx_buffer: &'d mut [u8], | ||
| 334 | ) -> Self { | ||
| 335 | let r = U::regs(); | ||
| 336 | |||
| 337 | configure_tx_pins(r, txd, cts); | ||
| 338 | |||
| 339 | // Initialize state | ||
| 340 | let s = U::buffered_state(); | ||
| 341 | s.tx_count.store(0, Ordering::Relaxed); | ||
| 342 | let len = tx_buffer.len(); | ||
| 343 | unsafe { s.tx_buf.init(tx_buffer.as_mut_ptr(), len) }; | ||
| 344 | |||
| 345 | r.events_dma().tx().ready().write_value(0); | ||
| 346 | |||
| 347 | // Enable interrupts | ||
| 348 | r.intenset().write(|w| { | ||
| 349 | w.set_dmatxend(true); | ||
| 350 | }); | ||
| 351 | |||
| 352 | Self { _peri: peri } | ||
| 353 | } | ||
| 354 | |||
| 355 | /// Write a buffer into this writer, returning how many bytes were written. | ||
| 356 | pub fn write<'a>(&'a mut self, buf: &'a [u8]) -> impl Future<Output = Result<usize, Error>> + 'a { | ||
| 357 | poll_fn(move |cx| { | ||
| 358 | //trace!("poll_write: {:?}", buf.len()); | ||
| 359 | let ss = U::state(); | ||
| 360 | let s = U::buffered_state(); | ||
| 361 | let mut tx = unsafe { s.tx_buf.writer() }; | ||
| 362 | |||
| 363 | let tx_buf = tx.push_slice(); | ||
| 364 | if tx_buf.is_empty() { | ||
| 365 | //trace!("poll_write: pending"); | ||
| 366 | ss.tx_waker.register(cx.waker()); | ||
| 367 | return Poll::Pending; | ||
| 368 | } | ||
| 369 | |||
| 370 | let n = min(tx_buf.len(), buf.len()); | ||
| 371 | tx_buf[..n].copy_from_slice(&buf[..n]); | ||
| 372 | tx.push_done(n); | ||
| 373 | |||
| 374 | //trace!("poll_write: queued {:?}", n); | ||
| 375 | |||
| 376 | compiler_fence(Ordering::SeqCst); | ||
| 377 | U::Interrupt::pend(); | ||
| 378 | |||
| 379 | Poll::Ready(Ok(n)) | ||
| 380 | }) | ||
| 381 | } | ||
| 382 | |||
| 383 | /// Try writing a buffer without waiting, returning how many bytes were written. | ||
| 384 | pub fn try_write(&mut self, buf: &[u8]) -> Result<usize, Error> { | ||
| 385 | //trace!("poll_write: {:?}", buf.len()); | ||
| 386 | let s = U::buffered_state(); | ||
| 387 | let mut tx = unsafe { s.tx_buf.writer() }; | ||
| 388 | |||
| 389 | let tx_buf = tx.push_slice(); | ||
| 390 | if tx_buf.is_empty() { | ||
| 391 | return Ok(0); | ||
| 392 | } | ||
| 393 | |||
| 394 | let n = min(tx_buf.len(), buf.len()); | ||
| 395 | tx_buf[..n].copy_from_slice(&buf[..n]); | ||
| 396 | tx.push_done(n); | ||
| 397 | |||
| 398 | //trace!("poll_write: queued {:?}", n); | ||
| 399 | |||
| 400 | compiler_fence(Ordering::SeqCst); | ||
| 401 | U::Interrupt::pend(); | ||
| 402 | |||
| 403 | Ok(n) | ||
| 404 | } | ||
| 405 | |||
| 406 | /// Flush this output stream, ensuring that all intermediately buffered contents reach their destination. | ||
| 407 | pub fn flush(&mut self) -> impl Future<Output = Result<(), Error>> + '_ { | ||
| 408 | poll_fn(move |cx| { | ||
| 409 | //trace!("poll_flush"); | ||
| 410 | let ss = U::state(); | ||
| 411 | let s = U::buffered_state(); | ||
| 412 | if !s.tx_buf.is_empty() { | ||
| 413 | //trace!("poll_flush: pending"); | ||
| 414 | ss.tx_waker.register(cx.waker()); | ||
| 415 | return Poll::Pending; | ||
| 416 | } | ||
| 417 | |||
| 418 | Poll::Ready(Ok(())) | ||
| 419 | }) | ||
| 420 | } | ||
| 421 | } | ||
| 422 | |||
| 423 | impl<'a, U: UarteInstance> Drop for BufferedUarteTx<'a, U> { | ||
| 424 | fn drop(&mut self) { | ||
| 425 | let r = U::regs(); | ||
| 426 | |||
| 427 | r.intenclr().write(|w| { | ||
| 428 | w.set_txdrdy(true); | ||
| 429 | w.set_dmatxready(true); | ||
| 430 | w.set_txstopped(true); | ||
| 431 | }); | ||
| 432 | r.events_txstopped().write_value(0); | ||
| 433 | r.tasks_dma().tx().stop().write_value(1); | ||
| 434 | while r.events_txstopped().read() == 0 {} | ||
| 435 | |||
| 436 | let s = U::buffered_state(); | ||
| 437 | unsafe { s.tx_buf.deinit() } | ||
| 438 | |||
| 439 | let s = U::state(); | ||
| 440 | drop_tx_rx(r, s); | ||
| 441 | } | ||
| 442 | } | ||
| 443 | |||
| 444 | /// Reader part of the buffered UARTE driver. | ||
| 445 | pub struct BufferedUarteRx<'d, U: UarteInstance> { | ||
| 446 | _peri: Peri<'d, U>, | ||
| 447 | } | ||
| 448 | |||
| 449 | impl<'d, U: UarteInstance> BufferedUarteRx<'d, U> { | ||
| 450 | /// Create a new BufferedUarte without hardware flow control. | ||
| 451 | #[allow(clippy::too_many_arguments)] | ||
| 452 | pub fn new( | ||
| 453 | uarte: Peri<'d, U>, | ||
| 454 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 455 | rxd: Peri<'d, impl GpioPin>, | ||
| 456 | config: Config, | ||
| 457 | rx_buffer: &'d mut [u8], | ||
| 458 | ) -> Self { | ||
| 459 | Self::new_inner(uarte, rxd.into(), None, config, rx_buffer) | ||
| 460 | } | ||
| 461 | |||
| 462 | /// Create a new BufferedUarte with hardware flow control (RTS/CTS) | ||
| 463 | #[allow(clippy::too_many_arguments)] | ||
| 464 | pub fn new_with_rts( | ||
| 465 | uarte: Peri<'d, U>, | ||
| 466 | rxd: Peri<'d, impl GpioPin>, | ||
| 467 | rts: Peri<'d, impl GpioPin>, | ||
| 468 | _irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd, | ||
| 469 | config: Config, | ||
| 470 | rx_buffer: &'d mut [u8], | ||
| 471 | ) -> Self { | ||
| 472 | Self::new_inner(uarte, rxd.into(), Some(rts.into()), config, rx_buffer) | ||
| 473 | } | ||
| 474 | |||
| 475 | #[allow(clippy::too_many_arguments)] | ||
| 476 | fn new_inner( | ||
| 477 | peri: Peri<'d, U>, | ||
| 478 | rxd: Peri<'d, AnyPin>, | ||
| 479 | rts: Option<Peri<'d, AnyPin>>, | ||
| 480 | config: Config, | ||
| 481 | rx_buffer: &'d mut [u8], | ||
| 482 | ) -> Self { | ||
| 483 | configure(U::regs(), config, rts.is_some()); | ||
| 484 | |||
| 485 | let this = Self::new_innerer(peri, rxd, rts, rx_buffer); | ||
| 486 | |||
| 487 | U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED)); | ||
| 488 | U::Interrupt::pend(); | ||
| 489 | unsafe { U::Interrupt::enable() }; | ||
| 490 | |||
| 491 | U::state().tx_rx_refcount.store(1, Ordering::Relaxed); | ||
| 492 | |||
| 493 | this | ||
| 494 | } | ||
| 495 | |||
| 496 | #[allow(clippy::too_many_arguments)] | ||
| 497 | fn new_innerer( | ||
| 498 | peri: Peri<'d, U>, | ||
| 499 | rxd: Peri<'d, AnyPin>, | ||
| 500 | rts: Option<Peri<'d, AnyPin>>, | ||
| 501 | rx_buffer: &'d mut [u8], | ||
| 502 | ) -> Self { | ||
| 503 | let r = U::regs(); | ||
| 504 | |||
| 505 | configure_rx_pins(r, rxd, rts); | ||
| 506 | |||
| 507 | // Initialize state | ||
| 508 | let s = U::buffered_state(); | ||
| 509 | let rx_len = rx_buffer.len().min(EASY_DMA_SIZE * 2); | ||
| 510 | let rx_ptr = rx_buffer.as_mut_ptr(); | ||
| 511 | unsafe { s.rx_buf.init(rx_ptr, rx_len) }; | ||
| 512 | |||
| 513 | // clear errors | ||
| 514 | let errors = r.errorsrc().read(); | ||
| 515 | r.errorsrc().write_value(errors); | ||
| 516 | |||
| 517 | r.events_error().write_value(0); | ||
| 518 | r.events_dma().rx().end().write_value(0); | ||
| 519 | |||
| 520 | // set timeout-to-stop short | ||
| 521 | r.shorts().write(|w| { | ||
| 522 | w.set_frametimeout_dma_rx_stop(true); | ||
| 523 | }); | ||
| 524 | |||
| 525 | // set default timeout | ||
| 526 | r.frametimeout().write_value(pac::uarte::regs::Frametimeout(0x10)); | ||
| 527 | |||
| 528 | // Enable interrupts | ||
| 529 | r.intenset().write(|w| { | ||
| 530 | w.set_dmatxend(true); | ||
| 531 | w.set_error(true); | ||
| 532 | w.set_dmarxend(true); | ||
| 533 | }); | ||
| 534 | |||
| 535 | Self { _peri: peri } | ||
| 536 | } | ||
| 537 | |||
| 538 | /// Pull some bytes from this source into the specified buffer, returning how many bytes were read. | ||
| 539 | pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> { | ||
| 540 | let data = self.fill_buf().await?; | ||
| 541 | let n = data.len().min(buf.len()); | ||
| 542 | buf[..n].copy_from_slice(&data[..n]); | ||
| 543 | self.consume(n); | ||
| 544 | Ok(n) | ||
| 545 | } | ||
| 546 | |||
| 547 | /// Return the contents of the internal buffer, filling it with more data from the inner reader if it is empty. | ||
| 548 | pub fn fill_buf(&mut self) -> impl Future<Output = Result<&'_ [u8], Error>> { | ||
| 549 | poll_fn(move |cx| { | ||
| 550 | compiler_fence(Ordering::SeqCst); | ||
| 551 | //trace!("poll_read"); | ||
| 552 | |||
| 553 | let s = U::buffered_state(); | ||
| 554 | let ss = U::state(); | ||
| 555 | let mut rx = unsafe { s.rx_buf.reader() }; | ||
| 556 | |||
| 557 | let (ptr, n) = rx.pop_buf(); | ||
| 558 | if n == 0 { | ||
| 559 | //trace!(" empty"); | ||
| 560 | ss.rx_waker.register(cx.waker()); | ||
| 561 | Poll::Pending | ||
| 562 | } else { | ||
| 563 | Poll::Ready(Ok(unsafe { slice::from_raw_parts(ptr, n) })) | ||
| 564 | } | ||
| 565 | }) | ||
| 566 | } | ||
| 567 | |||
| 568 | /// Tell this buffer that `amt` bytes have been consumed from the buffer, so they should no longer be returned in calls to `fill_buf`. | ||
| 569 | pub fn consume(&mut self, amt: usize) { | ||
| 570 | if amt == 0 { | ||
| 571 | return; | ||
| 572 | } | ||
| 573 | |||
| 574 | let s = U::buffered_state(); | ||
| 575 | let mut rx = unsafe { s.rx_buf.reader() }; | ||
| 576 | rx.pop_done(amt); | ||
| 577 | } | ||
| 578 | |||
| 579 | /// we are ready to read if there is data in the buffer | ||
| 580 | fn read_ready() -> Result<bool, Error> { | ||
| 581 | let state = U::buffered_state(); | ||
| 582 | Ok(!state.rx_buf.is_empty()) | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | impl<'a, U: UarteInstance> Drop for BufferedUarteRx<'a, U> { | ||
| 587 | fn drop(&mut self) { | ||
| 588 | let r = U::regs(); | ||
| 589 | |||
| 590 | r.intenclr().write(|w| { | ||
| 591 | w.set_rxto(true); | ||
| 592 | }); | ||
| 593 | r.events_rxto().write_value(0); | ||
| 594 | |||
| 595 | let s = U::buffered_state(); | ||
| 596 | unsafe { s.rx_buf.deinit() } | ||
| 597 | |||
| 598 | let s = U::state(); | ||
| 599 | drop_tx_rx(r, s); | ||
| 600 | } | ||
| 601 | } | ||
| 602 | |||
| 603 | mod _embedded_io { | ||
| 604 | use super::*; | ||
| 605 | |||
| 606 | impl embedded_io_async::Error for Error { | ||
| 607 | fn kind(&self) -> embedded_io_async::ErrorKind { | ||
| 608 | match *self {} | ||
| 609 | } | ||
| 610 | } | ||
| 611 | |||
| 612 | impl<'d, U: UarteInstance> embedded_io_async::ErrorType for BufferedUarte<'d, U> { | ||
| 613 | type Error = Error; | ||
| 614 | } | ||
| 615 | |||
| 616 | impl<'d, U: UarteInstance> embedded_io_async::ErrorType for BufferedUarteRx<'d, U> { | ||
| 617 | type Error = Error; | ||
| 618 | } | ||
| 619 | |||
| 620 | impl<'d, U: UarteInstance> embedded_io_async::ErrorType for BufferedUarteTx<'d, U> { | ||
| 621 | type Error = Error; | ||
| 622 | } | ||
| 623 | |||
| 624 | impl<'d, U: UarteInstance> embedded_io_async::Read for BufferedUarte<'d, U> { | ||
| 625 | async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> { | ||
| 626 | self.read(buf).await | ||
| 627 | } | ||
| 628 | } | ||
| 629 | |||
| 630 | impl<'d: 'd, U: UarteInstance> embedded_io_async::Read for BufferedUarteRx<'d, U> { | ||
| 631 | async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> { | ||
| 632 | self.read(buf).await | ||
| 633 | } | ||
| 634 | } | ||
| 635 | |||
| 636 | impl<'d, U: UarteInstance> embedded_io_async::ReadReady for BufferedUarte<'d, U> { | ||
| 637 | fn read_ready(&mut self) -> Result<bool, Self::Error> { | ||
| 638 | BufferedUarteRx::<'d, U>::read_ready() | ||
| 639 | } | ||
| 640 | } | ||
| 641 | |||
| 642 | impl<'d, U: UarteInstance> embedded_io_async::ReadReady for BufferedUarteRx<'d, U> { | ||
| 643 | fn read_ready(&mut self) -> Result<bool, Self::Error> { | ||
| 644 | Self::read_ready() | ||
| 645 | } | ||
| 646 | } | ||
| 647 | |||
| 648 | impl<'d, U: UarteInstance> embedded_io_async::BufRead for BufferedUarte<'d, U> { | ||
| 649 | async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> { | ||
| 650 | self.fill_buf().await | ||
| 651 | } | ||
| 652 | |||
| 653 | fn consume(&mut self, amt: usize) { | ||
| 654 | self.consume(amt) | ||
| 655 | } | ||
| 656 | } | ||
| 657 | |||
| 658 | impl<'d: 'd, U: UarteInstance> embedded_io_async::BufRead for BufferedUarteRx<'d, U> { | ||
| 659 | async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> { | ||
| 660 | self.fill_buf().await | ||
| 661 | } | ||
| 662 | |||
| 663 | fn consume(&mut self, amt: usize) { | ||
| 664 | self.consume(amt) | ||
| 665 | } | ||
| 666 | } | ||
| 667 | |||
| 668 | impl<'d, U: UarteInstance> embedded_io_async::Write for BufferedUarte<'d, U> { | ||
| 669 | async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> { | ||
| 670 | self.write(buf).await | ||
| 671 | } | ||
| 672 | |||
| 673 | async fn flush(&mut self) -> Result<(), Self::Error> { | ||
| 674 | self.flush().await | ||
| 675 | } | ||
| 676 | } | ||
| 677 | |||
| 678 | impl<'d: 'd, U: UarteInstance> embedded_io_async::Write for BufferedUarteTx<'d, U> { | ||
| 679 | async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> { | ||
| 680 | self.write(buf).await | ||
| 681 | } | ||
| 682 | |||
| 683 | async fn flush(&mut self) -> Result<(), Self::Error> { | ||
| 684 | self.flush().await | ||
| 685 | } | ||
| 686 | } | ||
| 687 | } | ||
