1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
|
use core::future::poll_fn;
use core::mem;
use core::sync::atomic::{compiler_fence, Ordering};
use core::task::Poll;
use embassy_embedded_hal::SetConfig;
use embedded_io_async::ReadReady;
use futures_util::future::{select, Either};
use super::{rdr, reconfigure, set_baudrate, sr, Config, ConfigError, Error, Info, State, UartRx};
use crate::dma::ReadableRingBuffer;
use crate::gpio::{AnyPin, SealedPin as _};
use crate::mode::Async;
use crate::time::Hertz;
use crate::usart::Regs;
use crate::Peri;
/// Rx-only Ring-buffered UART Driver
///
/// Created with [UartRx::into_ring_buffered]
///
/// ### Notes on 'waiting for bytes'
///
/// The `read(buf)` (but not `read()`) and `read_exact(buf)` functions
/// may need to wait for bytes to arrive, if the ring buffer does not
/// contain enough bytes to fill the buffer passed by the caller of
/// the function, or is empty.
///
/// Waiting for bytes operates in one of three modes, depending on
/// the behavior of the sender, the size of the buffer passed
/// to the function, and the configuration:
///
/// - If the sender sends intermittently, the 'idle line'
/// condition will be detected when the sender stops, and any
/// bytes in the ring buffer will be returned. If there are no
/// bytes in the buffer, the check will be repeated each time the
/// 'idle line' condition is detected, so if the sender sends just
/// a single byte, it will be returned once the 'idle line'
/// condition is detected.
///
/// - If the sender sends continuously, the call will wait until
/// the DMA controller indicates that it has written to either the
/// middle byte or last byte of the ring buffer ('half transfer'
/// or 'transfer complete', respectively). This does not indicate
/// the buffer is half-full or full, though, because the DMA
/// controller does not detect those conditions; it sends an
/// interrupt when those specific buffer addresses have been
/// written.
///
/// - If `eager_reads` is enabled in `config`, the UART interrupt
/// is enabled on all data reception and the call will only wait
/// for at least one byte to be available before returning.
///
/// In the first two cases this will result in variable latency due to the
/// buffering effect. For example, if the baudrate is 2400 bps, and
/// the configuration is 8 data bits, no parity bit, and one stop bit,
/// then a byte will be received every ~4.16ms. If the ring buffer is
/// 32 bytes, then a 'wait for bytes' delay may have to wait for 16
/// bytes in the worst case, resulting in a delay (latency) of
/// ~62.46ms for the first byte in the ring buffer. If the sender
/// sends only 6 bytes and then stops, but the buffer was empty when
/// the read function was called, then those bytes may not be returned
/// until ~24.96ms after the first byte was received (time for 5
/// additional bytes plus the 'idle frame' which triggers the 'idle
/// line' condition).
///
/// Applications subject to this latency must be careful if they
/// also apply timeouts during reception, as it may appear (to
/// them) that the sender has stopped sending when it did not. In
/// the example above, a 50ms timeout (12 bytes at 2400bps) might
/// seem to be reasonable to detect that the sender has stopped
/// sending, but would be falsely triggered in the worst-case
/// buffer delay scenario.
///
/// Note: Enabling `eager_reads` with `RingBufferedUartRx` will enable
/// an UART RXNE interrupt, which will cause an interrupt to occur on
/// every received data byte. The data is still copied using DMA, but
/// there is nevertheless additional processing overhead for each byte.
pub struct RingBufferedUartRx<'d> {
info: &'static Info,
state: &'static State,
kernel_clock: Hertz,
rx: Option<Peri<'d, AnyPin>>,
rts: Option<Peri<'d, AnyPin>>,
ring_buf: ReadableRingBuffer<'d, u8>,
}
impl<'d> SetConfig for RingBufferedUartRx<'d> {
type Config = Config;
type ConfigError = ConfigError;
fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
self.set_config(config)
}
}
impl<'d> UartRx<'d, Async> {
/// Turn the `UartRx` into a buffered uart which can continously receive in the background
/// without the possibility of losing bytes. The `dma_buf` is a buffer registered to the
/// DMA controller, and must be large enough to prevent overflows.
pub fn into_ring_buffered(mut self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d> {
assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF);
let opts = Default::default();
// Safety: we forget the struct before this function returns.
let rx_dma = self.rx_dma.as_mut().unwrap();
let request = rx_dma.request;
let rx_dma = unsafe { rx_dma.channel.clone_unchecked() };
let info = self.info;
let state = self.state;
let kernel_clock = self.kernel_clock;
let ring_buf = unsafe { ReadableRingBuffer::new(rx_dma, request, rdr(info.regs), dma_buf, opts) };
let rx = unsafe { self.rx.as_ref().map(|x| x.clone_unchecked()) };
let rts = unsafe { self.rts.as_ref().map(|x| x.clone_unchecked()) };
// Don't disable the clock
mem::forget(self);
RingBufferedUartRx {
info,
state,
kernel_clock,
rx,
rts,
ring_buf,
}
}
}
impl<'d> RingBufferedUartRx<'d> {
/// Reconfigure the driver
pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> {
self.state
.eager_reads
.store(config.eager_reads.unwrap_or(0), Ordering::Relaxed);
reconfigure(self.info, self.kernel_clock, config)
}
/// Configure and start the DMA backed UART receiver
///
/// Note: This is also done automatically by the read functions if
/// required.
pub fn start_uart(&mut self) {
// Clear the buffer so that it is ready to receive data
compiler_fence(Ordering::SeqCst);
self.ring_buf.start();
let r = self.info.regs;
// clear all interrupts and DMA Rx Request
r.cr1().modify(|w| {
// use RXNE only when returning reads early
w.set_rxneie(self.state.eager_reads.load(Ordering::Relaxed) > 0);
// enable parity interrupt if not ParityNone
w.set_peie(w.pce());
// enable idle line interrupt
w.set_idleie(true);
});
r.cr3().modify(|w| {
// enable Error Interrupt: (Frame error, Noise error, Overrun error)
w.set_eie(true);
// enable DMA Rx Request
w.set_dmar(true);
});
}
/// Stop DMA backed UART receiver
fn stop_uart(&mut self) {
self.ring_buf.request_pause();
let r = self.info.regs;
// clear all interrupts and DMA Rx Request
r.cr1().modify(|w| {
// disable RXNE interrupt
w.set_rxneie(false);
// disable parity interrupt
w.set_peie(false);
// disable idle line interrupt
w.set_idleie(false);
});
r.cr3().modify(|w| {
// disable Error Interrupt: (Frame error, Noise error, Overrun error)
w.set_eie(false);
// disable DMA Rx Request
w.set_dmar(false);
});
compiler_fence(Ordering::SeqCst);
}
/// (Re-)start DMA and Uart if it is not running (has not been started yet or has failed), and
/// check for errors in status register. Error flags are checked/cleared first.
fn start_dma_or_check_errors(&mut self) -> Result<(), Error> {
let r = self.info.regs;
check_idle_and_errors(r)?;
if !r.cr3().read().dmar() {
self.start_uart();
}
Ok(())
}
/// Read bytes that are available in the ring buffer, or wait for
/// bytes to become available and return them.
///
/// Background reception is started if necessary (if `start_uart()` had
/// not previously been called, or if an error was detected which
/// caused background reception to be stopped).
///
/// Background reception is terminated when an error is returned.
/// It must be started again by calling `start_uart()` or by
/// calling a read function again.
pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
self.start_dma_or_check_errors()?;
// In half-duplex mode, we need to disable the Transmitter and enable the Receiver
// since they can't operate simultaneously on the shared line
let r = self.info.regs;
if r.cr3().read().hdsel() && r.cr1().read().te() {
r.cr1().modify(|reg| {
reg.set_re(true);
reg.set_te(false);
});
}
loop {
match self.ring_buf.read(buf) {
Ok((0, _)) => {}
Ok((len, _)) => {
return Ok(len);
}
Err(_) => {
self.stop_uart();
return Err(Error::Overrun);
}
}
match self.wait_for_data_or_idle().await {
Ok(_) => {}
Err(err) => {
self.stop_uart();
return Err(err);
}
}
}
}
/// Wait for uart idle or dma half-full or full
async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> {
compiler_fence(Ordering::SeqCst);
loop {
// Future which completes when idle line is detected
let s = self.state;
let mut uart_init = false;
let uart = poll_fn(|cx| {
s.rx_waker.register(cx.waker());
compiler_fence(Ordering::SeqCst);
// We may have been woken by IDLE or, if eager_reads is set, by RXNE.
// However, DMA will clear RXNE, so we can't check directly, and because
// the other future borrows `ring_buf`, we can't check `len()` here either.
// Instead, return from this future and we'll check the length afterwards.
let eager = s.eager_reads.load(Ordering::Relaxed) > 0;
let idle = check_idle_and_errors(self.info.regs)?;
if idle || (eager && uart_init) {
// Idle line is detected, or eager reads is set and some data is available.
Poll::Ready(Ok(idle))
} else {
uart_init = true;
Poll::Pending
}
});
let mut dma_init = false;
// Future which completes when the DMA controller indicates it
// has written to the ring buffer's middle byte, or last byte
let dma = poll_fn(|cx| {
self.ring_buf.set_waker(cx.waker());
let status = match dma_init {
false => Poll::Pending,
true => Poll::Ready(()),
};
dma_init = true;
status
});
match select(uart, dma).await {
// UART woke with line idle
Either::Left((Ok(true), _)) => {
return Ok(());
}
// UART woke without idle or error: word received
Either::Left((Ok(false), _)) => {
let eager = self.state.eager_reads.load(Ordering::Relaxed);
if eager > 0 && self.ring_buf.len().unwrap_or(0) >= eager {
return Ok(());
} else {
continue;
}
}
// UART woke with error
Either::Left((Err(e), _)) => {
return Err(e);
}
// DMA woke
Either::Right(((), _)) => return Ok(()),
}
}
}
/// Set baudrate
pub fn set_baudrate(&self, baudrate: u32) -> Result<(), ConfigError> {
set_baudrate(self.info, self.kernel_clock, baudrate)
}
}
impl Drop for RingBufferedUartRx<'_> {
fn drop(&mut self) {
self.stop_uart();
self.rx.as_ref().map(|x| x.set_as_disconnected());
self.rts.as_ref().map(|x| x.set_as_disconnected());
super::drop_tx_rx(self.info, self.state);
}
}
/// Check and clear idle and error interrupts, return true if idle, Err(e) on error
///
/// All flags are read and cleared in a single step, respectively. When more than one flag is set
/// at the same time, all flags will be cleared but only one flag will be reported. So the other
/// flag(s) will gone missing unnoticed. The error flags are checked first, the idle flag last.
///
/// For usart_v1 and usart_v2, all status flags must be handled together anyway because all flags
/// are cleared by a single read to the RDR register.
fn check_idle_and_errors(r: Regs) -> Result<bool, Error> {
// Critical section is required so that the flags aren't set after read and before clear
let sr = critical_section::with(|_| {
// SAFETY: read only and we only use Rx related flags
let sr = sr(r).read();
#[cfg(any(usart_v3, usart_v4))]
r.icr().write(|w| {
w.set_idle(true);
w.set_pe(true);
w.set_fe(true);
w.set_ne(true);
w.set_ore(true);
});
#[cfg(not(any(usart_v3, usart_v4)))]
unsafe {
// This read also clears the error and idle interrupt flags on v1 (TODO and v2?)
rdr(r).read_volatile()
};
sr
});
if sr.pe() {
Err(Error::Parity)
} else if sr.fe() {
Err(Error::Framing)
} else if sr.ne() {
Err(Error::Noise)
} else if sr.ore() {
Err(Error::Overrun)
} else {
r.cr1().modify(|w| w.set_idleie(true));
Ok(sr.idle())
}
}
impl embedded_io_async::ErrorType for RingBufferedUartRx<'_> {
type Error = Error;
}
impl embedded_io_async::Read for RingBufferedUartRx<'_> {
async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
self.read(buf).await
}
}
impl embedded_hal_nb::serial::Read for RingBufferedUartRx<'_> {
fn read(&mut self) -> nb::Result<u8, Self::Error> {
self.start_dma_or_check_errors()?;
let mut buf = [0u8; 1];
match self.ring_buf.read(&mut buf) {
Ok((0, _)) => Err(nb::Error::WouldBlock),
Ok((len, _)) => {
assert!(len == 1);
Ok(buf[0])
}
Err(_) => {
self.stop_uart();
Err(nb::Error::Other(Error::Overrun))
}
}
}
}
impl embedded_hal_nb::serial::ErrorType for RingBufferedUartRx<'_> {
type Error = Error;
}
impl ReadReady for RingBufferedUartRx<'_> {
fn read_ready(&mut self) -> Result<bool, Self::Error> {
let len = self.ring_buf.len().map_err(|e| match e {
crate::dma::ringbuffer::Error::Overrun => Self::Error::Overrun,
crate::dma::ringbuffer::Error::DmaUnsynced => {
error!(
"Ringbuffer error: DmaUNsynced, driver implementation is
probably bugged please open an issue"
);
// we report this as overrun since its recoverable in the same way
Self::Error::Overrun
}
})?;
Ok(len > 0)
}
}
|