aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDario Nieuwenhuis <[email protected]>2022-11-25 23:04:28 +0100
committerGitHub <[email protected]>2022-11-25 23:04:28 +0100
commit805b885de62f7028ffcec86b00014bf72749a871 (patch)
tree446239a627583d0d20513d8feba2024f72de62ec
parentfa374523591266f7f5abdd0f02f994174553df71 (diff)
parent7b838d03369f94e09d652982f994c5013e81457e (diff)
Merge pull request #1044 from embassy-rs/buffereduart-atomic
rp/uart: use lockfree ringbuffer.
-rw-r--r--embassy-hal-common/src/atomic_ring_buffer.rs331
-rw-r--r--embassy-hal-common/src/lib.rs1
-rw-r--r--embassy-rp/Cargo.toml2
-rw-r--r--embassy-rp/src/uart/buffered.rs656
-rw-r--r--embassy-rp/src/uart/mod.rs91
-rw-r--r--tests/rp/src/bin/uart_buffered.rs13
6 files changed, 733 insertions, 361 deletions
diff --git a/embassy-hal-common/src/atomic_ring_buffer.rs b/embassy-hal-common/src/atomic_ring_buffer.rs
new file mode 100644
index 000000000..c5e444306
--- /dev/null
+++ b/embassy-hal-common/src/atomic_ring_buffer.rs
@@ -0,0 +1,331 @@
1use core::slice;
2use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
3
4/// Atomic reusable ringbuffer
5///
6/// This ringbuffer implementation is designed to be stored in a `static`,
7/// therefore all methods take `&self` and not `&mut self`.
8///
9/// It is "reusable": when created it has no backing buffer, you can give it
10/// one with `init` and take it back with `deinit`, and init it again in the
11/// future if needed. This is very non-idiomatic, but helps a lot when storing
12/// it in a `static`.
13///
14/// One concurrent writer and one concurrent reader are supported, even at
15/// different execution priorities (like main and irq).
16pub struct RingBuffer {
17 buf: AtomicPtr<u8>,
18 len: AtomicUsize,
19 start: AtomicUsize,
20 end: AtomicUsize,
21}
22
23pub struct Reader<'a>(&'a RingBuffer);
24pub struct Writer<'a>(&'a RingBuffer);
25
26impl RingBuffer {
27 /// Create a new empty ringbuffer.
28 pub const fn new() -> Self {
29 Self {
30 buf: AtomicPtr::new(core::ptr::null_mut()),
31 len: AtomicUsize::new(0),
32 start: AtomicUsize::new(0),
33 end: AtomicUsize::new(0),
34 }
35 }
36
37 /// Initialize the ring buffer with a buffer.
38 ///
39 /// # Safety
40 /// - The buffer (`buf .. buf+len`) must be valid memory until `deinit` is called.
41 /// - Must not be called concurrently with any other methods.
42 pub unsafe fn init(&self, buf: *mut u8, len: usize) {
43 // Ordering: it's OK to use `Relaxed` because this is not called
44 // concurrently with other methods.
45 self.buf.store(buf, Ordering::Relaxed);
46 self.len.store(len, Ordering::Relaxed);
47 self.start.store(0, Ordering::Relaxed);
48 self.end.store(0, Ordering::Relaxed);
49 }
50
51 /// Deinitialize the ringbuffer.
52 ///
53 /// After calling this, the ringbuffer becomes empty, as if it was
54 /// just created with `new()`.
55 ///
56 /// # Safety
57 /// - Must not be called concurrently with any other methods.
58 pub unsafe fn deinit(&self) {
59 // Ordering: it's OK to use `Relaxed` because this is not called
60 // concurrently with other methods.
61 self.len.store(0, Ordering::Relaxed);
62 self.start.store(0, Ordering::Relaxed);
63 self.end.store(0, Ordering::Relaxed);
64 }
65
66 /// Create a reader.
67 ///
68 /// # Safety
69 ///
70 /// Only one reader can exist at a time.
71 pub unsafe fn reader(&self) -> Reader<'_> {
72 Reader(self)
73 }
74
75 /// Create a writer.
76 ///
77 /// # Safety
78 ///
79 /// Only one writer can exist at a time.
80 pub unsafe fn writer(&self) -> Writer<'_> {
81 Writer(self)
82 }
83
84 pub fn is_full(&self) -> bool {
85 let start = self.start.load(Ordering::Relaxed);
86 let end = self.end.load(Ordering::Relaxed);
87
88 self.wrap(end + 1) == start
89 }
90
91 pub fn is_empty(&self) -> bool {
92 let start = self.start.load(Ordering::Relaxed);
93 let end = self.end.load(Ordering::Relaxed);
94
95 start == end
96 }
97
98 fn wrap(&self, n: usize) -> usize {
99 let len = self.len.load(Ordering::Relaxed);
100
101 assert!(n <= len);
102 if n == len {
103 0
104 } else {
105 n
106 }
107 }
108}
109
110impl<'a> Writer<'a> {
111 /// Push data into the buffer in-place.
112 ///
113 /// The closure `f` is called with a free part of the buffer, it must write
114 /// some data to it and return the amount of bytes written.
115 pub fn push(&mut self, f: impl FnOnce(&mut [u8]) -> usize) -> usize {
116 let (p, n) = self.push_buf();
117 let buf = unsafe { slice::from_raw_parts_mut(p, n) };
118 let n = f(buf);
119 self.push_done(n);
120 n
121 }
122
123 /// Push one data byte.
124 ///
125 /// Returns true if pushed succesfully.
126 pub fn push_one(&mut self, val: u8) -> bool {
127 let n = self.push(|f| match f {
128 [] => 0,
129 [x, ..] => {
130 *x = val;
131 1
132 }
133 });
134 n != 0
135 }
136
137 /// Get a buffer where data can be pushed to.
138 ///
139 /// Write data to the start of the buffer, then call `push_done` with
140 /// however many bytes you've pushed.
141 ///
142 /// The buffer is suitable to DMA to.
143 ///
144 /// If the ringbuf is full, size=0 will be returned.
145 ///
146 /// The buffer stays valid as long as no other `Writer` method is called
147 /// and `init`/`deinit` aren't called on the ringbuf.
148 pub fn push_buf(&mut self) -> (*mut u8, usize) {
149 // Ordering: popping writes `start` last, so we read `start` first.
150 // Read it with Acquire ordering, so that the next accesses can't be reordered up past it.
151 let start = self.0.start.load(Ordering::Acquire);
152 let buf = self.0.buf.load(Ordering::Relaxed);
153 let len = self.0.len.load(Ordering::Relaxed);
154 let end = self.0.end.load(Ordering::Relaxed);
155
156 let n = if start <= end {
157 len - end - (start == 0) as usize
158 } else {
159 start - end - 1
160 };
161
162 trace!(" ringbuf: push_buf {:?}..{:?}", end, end + n);
163 (unsafe { buf.add(end) }, n)
164 }
165
166 pub fn push_done(&mut self, n: usize) {
167 trace!(" ringbuf: push {:?}", n);
168 let end = self.0.end.load(Ordering::Relaxed);
169
170 // Ordering: write `end` last, with Release ordering.
171 // The ordering ensures no preceding memory accesses (such as writing
172 // the actual data in the buffer) can be reordered down past it, which
173 // will guarantee the reader sees them after reading from `end`.
174 self.0.end.store(self.0.wrap(end + n), Ordering::Release);
175 }
176}
177
178impl<'a> Reader<'a> {
179 /// Pop data from the buffer in-place.
180 ///
181 /// The closure `f` is called with the next data, it must process
182 /// some data from it and return the amount of bytes processed.
183 pub fn pop(&mut self, f: impl FnOnce(&[u8]) -> usize) -> usize {
184 let (p, n) = self.pop_buf();
185 let buf = unsafe { slice::from_raw_parts(p, n) };
186 let n = f(buf);
187 self.pop_done(n);
188 n
189 }
190
191 /// Pop one data byte.
192 ///
193 /// Returns true if popped succesfully.
194 pub fn pop_one(&mut self) -> Option<u8> {
195 let mut res = None;
196 self.pop(|f| match f {
197 &[] => 0,
198 &[x, ..] => {
199 res = Some(x);
200 1
201 }
202 });
203 res
204 }
205
206 /// Get a buffer where data can be popped from.
207 ///
208 /// Read data from the start of the buffer, then call `pop_done` with
209 /// however many bytes you've processed.
210 ///
211 /// The buffer is suitable to DMA from.
212 ///
213 /// If the ringbuf is empty, size=0 will be returned.
214 ///
215 /// The buffer stays valid as long as no other `Reader` method is called
216 /// and `init`/`deinit` aren't called on the ringbuf.
217 pub fn pop_buf(&mut self) -> (*mut u8, usize) {
218 // Ordering: pushing writes `end` last, so we read `end` first.
219 // Read it with Acquire ordering, so that the next accesses can't be reordered up past it.
220 // This is needed to guarantee we "see" the data written by the writer.
221 let end = self.0.end.load(Ordering::Acquire);
222 let buf = self.0.buf.load(Ordering::Relaxed);
223 let len = self.0.len.load(Ordering::Relaxed);
224 let start = self.0.start.load(Ordering::Relaxed);
225
226 let n = if end < start { len - start } else { end - start };
227
228 trace!(" ringbuf: pop_buf {:?}..{:?}", start, start + n);
229 (unsafe { buf.add(start) }, n)
230 }
231
232 pub fn pop_done(&mut self, n: usize) {
233 trace!(" ringbuf: pop {:?}", n);
234
235 let start = self.0.start.load(Ordering::Relaxed);
236
237 // Ordering: write `start` last, with Release ordering.
238 // The ordering ensures no preceding memory accesses (such as reading
239 // the actual data) can be reordered down past it. This is necessary
240 // because writing to `start` is effectively freeing the read part of the
241 // buffer, which "gives permission" to the writer to write to it again.
242 // Therefore, all buffer accesses must be completed before this.
243 self.0.start.store(self.0.wrap(start + n), Ordering::Release);
244 }
245}
246
247#[cfg(test)]
248mod tests {
249 use super::*;
250
251 #[test]
252 fn push_pop() {
253 let mut b = [0; 4];
254 let rb = RingBuffer::new();
255 unsafe {
256 rb.init(b.as_mut_ptr(), 4);
257
258 assert_eq!(rb.is_empty(), true);
259 assert_eq!(rb.is_full(), false);
260
261 rb.writer().push(|buf| {
262 // If capacity is 4, we can fill it up to 3.
263 assert_eq!(3, buf.len());
264 buf[0] = 1;
265 buf[1] = 2;
266 buf[2] = 3;
267 3
268 });
269
270 assert_eq!(rb.is_empty(), false);
271 assert_eq!(rb.is_full(), true);
272
273 rb.writer().push(|buf| {
274 // If it's full, we can push 0 bytes.
275 assert_eq!(0, buf.len());
276 0
277 });
278
279 assert_eq!(rb.is_empty(), false);
280 assert_eq!(rb.is_full(), true);
281
282 rb.reader().pop(|buf| {
283 assert_eq!(3, buf.len());
284 assert_eq!(1, buf[0]);
285 1
286 });
287
288 assert_eq!(rb.is_empty(), false);
289 assert_eq!(rb.is_full(), false);
290
291 rb.reader().pop(|buf| {
292 assert_eq!(2, buf.len());
293 0
294 });
295
296 assert_eq!(rb.is_empty(), false);
297 assert_eq!(rb.is_full(), false);
298
299 rb.reader().pop(|buf| {
300 assert_eq!(2, buf.len());
301 assert_eq!(2, buf[0]);
302 assert_eq!(3, buf[1]);
303 2
304 });
305
306 assert_eq!(rb.is_empty(), true);
307 assert_eq!(rb.is_full(), false);
308
309 rb.reader().pop(|buf| {
310 assert_eq!(0, buf.len());
311 0
312 });
313
314 rb.writer().push(|buf| {
315 assert_eq!(1, buf.len());
316 buf[0] = 10;
317 1
318 });
319
320 rb.writer().push(|buf| {
321 assert_eq!(2, buf.len());
322 buf[0] = 11;
323 buf[1] = 12;
324 2
325 });
326
327 assert_eq!(rb.is_empty(), false);
328 assert_eq!(rb.is_full(), true);
329 }
330 }
331}
diff --git a/embassy-hal-common/src/lib.rs b/embassy-hal-common/src/lib.rs
index 5d2649d02..b2a35cd35 100644
--- a/embassy-hal-common/src/lib.rs
+++ b/embassy-hal-common/src/lib.rs
@@ -4,6 +4,7 @@
4// This mod MUST go first, so that the others see its macros. 4// This mod MUST go first, so that the others see its macros.
5pub(crate) mod fmt; 5pub(crate) mod fmt;
6 6
7pub mod atomic_ring_buffer;
7pub mod drop; 8pub mod drop;
8mod macros; 9mod macros;
9mod peripheral; 10mod peripheral;
diff --git a/embassy-rp/Cargo.toml b/embassy-rp/Cargo.toml
index 770d8e25a..daa60f9c5 100644
--- a/embassy-rp/Cargo.toml
+++ b/embassy-rp/Cargo.toml
@@ -13,7 +13,7 @@ flavors = [
13] 13]
14 14
15[features] 15[features]
16defmt = ["dep:defmt", "embassy-usb-driver?/defmt"] 16defmt = ["dep:defmt", "embassy-usb-driver?/defmt", "embassy-hal-common/defmt"]
17 17
18# Reexport the PAC for the currently enabled chip at `embassy_rp::pac`. 18# Reexport the PAC for the currently enabled chip at `embassy_rp::pac`.
19# This is unstable because semver-minor (non-breaking) releases of embassy-rp may major-bump (breaking) the PAC version. 19# This is unstable because semver-minor (non-breaking) releases of embassy-rp may major-bump (breaking) the PAC version.
diff --git a/embassy-rp/src/uart/buffered.rs b/embassy-rp/src/uart/buffered.rs
index fa466c8a1..32029f81e 100644
--- a/embassy-rp/src/uart/buffered.rs
+++ b/embassy-rp/src/uart/buffered.rs
@@ -1,337 +1,421 @@
1use core::future::poll_fn; 1use core::future::{poll_fn, Future};
2use core::task::{Poll, Waker}; 2use core::slice;
3use core::task::Poll;
3 4
4use atomic_polyfill::{compiler_fence, Ordering}; 5use embassy_cortex_m::interrupt::{Interrupt, InterruptExt};
5use embassy_cortex_m::peripheral::{PeripheralMutex, PeripheralState, StateStorage}; 6use embassy_hal_common::atomic_ring_buffer::RingBuffer;
6use embassy_hal_common::ring_buffer::RingBuffer; 7use embassy_sync::waitqueue::AtomicWaker;
7use embassy_sync::waitqueue::WakerRegistration;
8 8
9use super::*; 9use super::*;
10 10
11pub struct State<'d, T: Instance>(StateStorage<FullStateInner<'d, T>>); 11pub struct State {
12impl<'d, T: Instance> State<'d, T> { 12 tx_waker: AtomicWaker,
13 pub const fn new() -> Self { 13 tx_buf: RingBuffer,
14 Self(StateStorage::new()) 14 rx_waker: AtomicWaker,
15 } 15 rx_buf: RingBuffer,
16}
17
18pub struct RxState<'d, T: Instance>(StateStorage<RxStateInner<'d, T>>);
19impl<'d, T: Instance> RxState<'d, T> {
20 pub const fn new() -> Self {
21 Self(StateStorage::new())
22 }
23} 16}
24 17
25pub struct TxState<'d, T: Instance>(StateStorage<TxStateInner<'d, T>>); 18impl State {
26impl<'d, T: Instance> TxState<'d, T> {
27 pub const fn new() -> Self { 19 pub const fn new() -> Self {
28 Self(StateStorage::new()) 20 Self {
21 rx_buf: RingBuffer::new(),
22 tx_buf: RingBuffer::new(),
23 rx_waker: AtomicWaker::new(),
24 tx_waker: AtomicWaker::new(),
25 }
29 } 26 }
30} 27}
31 28
32struct RxStateInner<'d, T: Instance> {
33 phantom: PhantomData<&'d mut T>,
34
35 waker: WakerRegistration,
36 buf: RingBuffer<'d>,
37}
38
39struct TxStateInner<'d, T: Instance> {
40 phantom: PhantomData<&'d mut T>,
41
42 waker: WakerRegistration,
43 buf: RingBuffer<'d>,
44}
45
46struct FullStateInner<'d, T: Instance> {
47 rx: RxStateInner<'d, T>,
48 tx: TxStateInner<'d, T>,
49}
50
51unsafe impl<'d, T: Instance> Send for RxStateInner<'d, T> {}
52unsafe impl<'d, T: Instance> Sync for RxStateInner<'d, T> {}
53
54unsafe impl<'d, T: Instance> Send for TxStateInner<'d, T> {}
55unsafe impl<'d, T: Instance> Sync for TxStateInner<'d, T> {}
56
57unsafe impl<'d, T: Instance> Send for FullStateInner<'d, T> {}
58unsafe impl<'d, T: Instance> Sync for FullStateInner<'d, T> {}
59
60pub struct BufferedUart<'d, T: Instance> { 29pub struct BufferedUart<'d, T: Instance> {
61 inner: PeripheralMutex<'d, FullStateInner<'d, T>>, 30 phantom: PhantomData<&'d mut T>,
62} 31}
63 32
64pub struct BufferedUartRx<'d, T: Instance> { 33pub struct BufferedUartRx<'d, T: Instance> {
65 inner: PeripheralMutex<'d, RxStateInner<'d, T>>, 34 phantom: PhantomData<&'d mut T>,
66} 35}
67 36
68pub struct BufferedUartTx<'d, T: Instance> { 37pub struct BufferedUartTx<'d, T: Instance> {
69 inner: PeripheralMutex<'d, TxStateInner<'d, T>>, 38 phantom: PhantomData<&'d mut T>,
70} 39}
71 40
72impl<'d, T: Instance> Unpin for BufferedUart<'d, T> {}
73impl<'d, T: Instance> Unpin for BufferedUartRx<'d, T> {}
74impl<'d, T: Instance> Unpin for BufferedUartTx<'d, T> {}
75
76impl<'d, T: Instance> BufferedUart<'d, T> { 41impl<'d, T: Instance> BufferedUart<'d, T> {
77 pub fn new<M: Mode>( 42 pub fn new(
78 state: &'d mut State<'d, T>, 43 _uart: impl Peripheral<P = T> + 'd,
79 _uart: Uart<'d, T, M>, 44 irq: impl Peripheral<P = T::Interrupt> + 'd,
45 tx: impl Peripheral<P = impl TxPin<T>> + 'd,
46 rx: impl Peripheral<P = impl RxPin<T>> + 'd,
47 tx_buffer: &'d mut [u8],
48 rx_buffer: &'d mut [u8],
49 config: Config,
50 ) -> Self {
51 into_ref!(tx, rx);
52 Self::new_inner(
53 irq,
54 tx.map_into(),
55 rx.map_into(),
56 None,
57 None,
58 tx_buffer,
59 rx_buffer,
60 config,
61 )
62 }
63
64 pub fn new_with_rtscts(
65 _uart: impl Peripheral<P = T> + 'd,
80 irq: impl Peripheral<P = T::Interrupt> + 'd, 66 irq: impl Peripheral<P = T::Interrupt> + 'd,
67 tx: impl Peripheral<P = impl TxPin<T>> + 'd,
68 rx: impl Peripheral<P = impl RxPin<T>> + 'd,
69 rts: impl Peripheral<P = impl RtsPin<T>> + 'd,
70 cts: impl Peripheral<P = impl CtsPin<T>> + 'd,
81 tx_buffer: &'d mut [u8], 71 tx_buffer: &'d mut [u8],
82 rx_buffer: &'d mut [u8], 72 rx_buffer: &'d mut [u8],
83 ) -> BufferedUart<'d, T> { 73 config: Config,
74 ) -> Self {
75 into_ref!(tx, rx, cts, rts);
76 Self::new_inner(
77 irq,
78 tx.map_into(),
79 rx.map_into(),
80 Some(rts.map_into()),
81 Some(cts.map_into()),
82 tx_buffer,
83 rx_buffer,
84 config,
85 )
86 }
87
88 fn new_inner(
89 irq: impl Peripheral<P = T::Interrupt> + 'd,
90 mut tx: PeripheralRef<'d, AnyPin>,
91 mut rx: PeripheralRef<'d, AnyPin>,
92 mut rts: Option<PeripheralRef<'d, AnyPin>>,
93 mut cts: Option<PeripheralRef<'d, AnyPin>>,
94 tx_buffer: &'d mut [u8],
95 rx_buffer: &'d mut [u8],
96 config: Config,
97 ) -> Self {
84 into_ref!(irq); 98 into_ref!(irq);
99 super::Uart::<'d, T, Async>::init(
100 Some(tx.reborrow()),
101 Some(rx.reborrow()),
102 rts.as_mut().map(|x| x.reborrow()),
103 cts.as_mut().map(|x| x.reborrow()),
104 config,
105 );
106
107 let state = T::state();
108 let regs = T::regs();
109
110 let len = tx_buffer.len();
111 unsafe { state.tx_buf.init(tx_buffer.as_mut_ptr(), len) };
112 let len = rx_buffer.len();
113 unsafe { state.rx_buf.init(rx_buffer.as_mut_ptr(), len) };
85 114
86 let r = T::regs();
87 unsafe { 115 unsafe {
88 r.uartimsc().modify(|w| { 116 regs.uartimsc().modify(|w| {
89 w.set_rxim(true); 117 w.set_rxim(true);
90 w.set_rtim(true); 118 w.set_rtim(true);
91 w.set_txim(true); 119 w.set_txim(true);
92 }); 120 });
93 } 121 }
94 122
95 Self { 123 irq.set_handler(on_interrupt::<T>);
96 inner: PeripheralMutex::new(irq, &mut state.0, move || FullStateInner { 124 irq.unpend();
97 tx: TxStateInner { 125 irq.enable();
98 phantom: PhantomData, 126
99 waker: WakerRegistration::new(), 127 Self { phantom: PhantomData }
100 buf: RingBuffer::new(tx_buffer),
101 },
102 rx: RxStateInner {
103 phantom: PhantomData,
104 waker: WakerRegistration::new(),
105 buf: RingBuffer::new(rx_buffer),
106 },
107 }),
108 }
109 } 128 }
110} 129}
111 130
112impl<'d, T: Instance> BufferedUartRx<'d, T> { 131impl<'d, T: Instance> BufferedUartRx<'d, T> {
113 pub fn new<M: Mode>( 132 pub fn new(
114 state: &'d mut RxState<'d, T>, 133 _uart: impl Peripheral<P = T> + 'd,
115 _uart: UartRx<'d, T, M>,
116 irq: impl Peripheral<P = T::Interrupt> + 'd, 134 irq: impl Peripheral<P = T::Interrupt> + 'd,
135 rx: impl Peripheral<P = impl RxPin<T>> + 'd,
117 rx_buffer: &'d mut [u8], 136 rx_buffer: &'d mut [u8],
118 ) -> BufferedUartRx<'d, T> { 137 config: Config,
138 ) -> Self {
139 into_ref!(rx);
140 Self::new_inner(irq, rx.map_into(), None, rx_buffer, config)
141 }
142
143 pub fn new_with_rts(
144 _uart: impl Peripheral<P = T> + 'd,
145 irq: impl Peripheral<P = T::Interrupt> + 'd,
146 rx: impl Peripheral<P = impl RxPin<T>> + 'd,
147 rts: impl Peripheral<P = impl RtsPin<T>> + 'd,
148 rx_buffer: &'d mut [u8],
149 config: Config,
150 ) -> Self {
151 into_ref!(rx, rts);
152 Self::new_inner(irq, rx.map_into(), Some(rts.map_into()), rx_buffer, config)
153 }
154
155 fn new_inner(
156 irq: impl Peripheral<P = T::Interrupt> + 'd,
157 mut rx: PeripheralRef<'d, AnyPin>,
158 mut rts: Option<PeripheralRef<'d, AnyPin>>,
159 rx_buffer: &'d mut [u8],
160 config: Config,
161 ) -> Self {
119 into_ref!(irq); 162 into_ref!(irq);
163 super::Uart::<'d, T, Async>::init(
164 None,
165 Some(rx.reborrow()),
166 rts.as_mut().map(|x| x.reborrow()),
167 None,
168 config,
169 );
170
171 let state = T::state();
172 let regs = T::regs();
173
174 let len = rx_buffer.len();
175 unsafe { state.rx_buf.init(rx_buffer.as_mut_ptr(), len) };
120 176
121 let r = T::regs();
122 unsafe { 177 unsafe {
123 r.uartimsc().modify(|w| { 178 regs.uartimsc().modify(|w| {
124 w.set_rxim(true); 179 w.set_rxim(true);
125 w.set_rtim(true); 180 w.set_rtim(true);
126 }); 181 });
127 } 182 }
128 183
129 Self { 184 irq.set_handler(on_interrupt::<T>);
130 inner: PeripheralMutex::new(irq, &mut state.0, move || RxStateInner { 185 irq.unpend();
131 phantom: PhantomData, 186 irq.enable();
132 187
133 buf: RingBuffer::new(rx_buffer), 188 Self { phantom: PhantomData }
134 waker: WakerRegistration::new(), 189 }
135 }), 190
136 } 191 fn read<'a>(buf: &'a mut [u8]) -> impl Future<Output = Result<usize, Error>> + 'a {
192 poll_fn(move |cx| {
193 let state = T::state();
194 let mut rx_reader = unsafe { state.rx_buf.reader() };
195 let n = rx_reader.pop(|data| {
196 let n = data.len().min(buf.len());
197 buf[..n].copy_from_slice(&data[..n]);
198 n
199 });
200 if n == 0 {
201 state.rx_waker.register(cx.waker());
202 return Poll::Pending;
203 }
204
205 Poll::Ready(Ok(n))
206 })
207 }
208
209 fn fill_buf<'a>() -> impl Future<Output = Result<&'a [u8], Error>> {
210 poll_fn(move |cx| {
211 let state = T::state();
212 let mut rx_reader = unsafe { state.rx_buf.reader() };
213 let (p, n) = rx_reader.pop_buf();
214 if n == 0 {
215 state.rx_waker.register(cx.waker());
216 return Poll::Pending;
217 }
218
219 let buf = unsafe { slice::from_raw_parts(p, n) };
220 Poll::Ready(Ok(buf))
221 })
222 }
223
224 fn consume(amt: usize) {
225 let state = T::state();
226 let mut rx_reader = unsafe { state.rx_buf.reader() };
227 rx_reader.pop_done(amt)
137 } 228 }
138} 229}
139 230
140impl<'d, T: Instance> BufferedUartTx<'d, T> { 231impl<'d, T: Instance> BufferedUartTx<'d, T> {
141 pub fn new<M: Mode>( 232 pub fn new(
142 state: &'d mut TxState<'d, T>, 233 _uart: impl Peripheral<P = T> + 'd,
143 _uart: UartTx<'d, T, M>,
144 irq: impl Peripheral<P = T::Interrupt> + 'd, 234 irq: impl Peripheral<P = T::Interrupt> + 'd,
235 tx: impl Peripheral<P = impl TxPin<T>> + 'd,
145 tx_buffer: &'d mut [u8], 236 tx_buffer: &'d mut [u8],
146 ) -> BufferedUartTx<'d, T> { 237 config: Config,
238 ) -> Self {
239 into_ref!(tx);
240 Self::new_inner(irq, tx.map_into(), None, tx_buffer, config)
241 }
242
243 pub fn new_with_cts(
244 _uart: impl Peripheral<P = T> + 'd,
245 irq: impl Peripheral<P = T::Interrupt> + 'd,
246 tx: impl Peripheral<P = impl TxPin<T>> + 'd,
247 cts: impl Peripheral<P = impl CtsPin<T>> + 'd,
248 tx_buffer: &'d mut [u8],
249 config: Config,
250 ) -> Self {
251 into_ref!(tx, cts);
252 Self::new_inner(irq, tx.map_into(), Some(cts.map_into()), tx_buffer, config)
253 }
254
255 fn new_inner(
256 irq: impl Peripheral<P = T::Interrupt> + 'd,
257 mut tx: PeripheralRef<'d, AnyPin>,
258 mut cts: Option<PeripheralRef<'d, AnyPin>>,
259 tx_buffer: &'d mut [u8],
260 config: Config,
261 ) -> Self {
147 into_ref!(irq); 262 into_ref!(irq);
263 super::Uart::<'d, T, Async>::init(
264 Some(tx.reborrow()),
265 None,
266 None,
267 cts.as_mut().map(|x| x.reborrow()),
268 config,
269 );
270
271 let state = T::state();
272 let regs = T::regs();
273
274 let len = tx_buffer.len();
275 unsafe { state.tx_buf.init(tx_buffer.as_mut_ptr(), len) };
148 276
149 let r = T::regs();
150 unsafe { 277 unsafe {
151 r.uartimsc().modify(|w| { 278 regs.uartimsc().modify(|w| {
152 w.set_txim(true); 279 w.set_txim(true);
153 }); 280 });
154 } 281 }
155 282
156 Self { 283 irq.set_handler(on_interrupt::<T>);
157 inner: PeripheralMutex::new(irq, &mut state.0, move || TxStateInner { 284 irq.unpend();
158 phantom: PhantomData, 285 irq.enable();
159 286
160 buf: RingBuffer::new(tx_buffer), 287 Self { phantom: PhantomData }
161 waker: WakerRegistration::new(),
162 }),
163 }
164 } 288 }
165}
166 289
167impl<'d, T: Instance> PeripheralState for FullStateInner<'d, T> 290 fn write<'a>(buf: &'a [u8]) -> impl Future<Output = Result<usize, Error>> + 'a {
168where 291 poll_fn(move |cx| {
169 Self: 'd, 292 let state = T::state();
170{ 293 let mut tx_writer = unsafe { state.tx_buf.writer() };
171 type Interrupt = T::Interrupt; 294 let n = tx_writer.push(|data| {
172 fn on_interrupt(&mut self) { 295 let n = data.len().min(buf.len());
173 self.rx.on_interrupt(); 296 data[..n].copy_from_slice(&buf[..n]);
174 self.tx.on_interrupt(); 297 n
298 });
299 if n == 0 {
300 state.tx_waker.register(cx.waker());
301 return Poll::Pending;
302 } else {
303 unsafe { T::Interrupt::steal() }.pend();
304 }
305
306 Poll::Ready(Ok(n))
307 })
175 } 308 }
176}
177 309
178impl<'d, T: Instance> RxStateInner<'d, T> 310 fn flush() -> impl Future<Output = Result<(), Error>> {
179where 311 poll_fn(move |cx| {
180 Self: 'd, 312 let state = T::state();
181{ 313 if !state.tx_buf.is_empty() {
182 fn read(&mut self, buf: &mut [u8], waker: &Waker) -> (Poll<Result<usize, Error>>, bool) { 314 state.tx_waker.register(cx.waker());
183 // We have data ready in buffer? Return it. 315 return Poll::Pending;
184 let mut do_pend = false;
185 let data = self.buf.pop_buf();
186 if !data.is_empty() {
187 let len = data.len().min(buf.len());
188 buf[..len].copy_from_slice(&data[..len]);
189
190 if self.buf.is_full() {
191 do_pend = true;
192 } 316 }
193 self.buf.pop(len);
194 317
195 return (Poll::Ready(Ok(len)), do_pend); 318 Poll::Ready(Ok(()))
196 } 319 })
197
198 self.waker.register(waker);
199 (Poll::Pending, do_pend)
200 } 320 }
321}
201 322
202 fn fill_buf<'a>(&mut self, waker: &Waker) -> Poll<Result<&'a [u8], Error>> { 323impl<'d, T: Instance> Drop for BufferedUart<'d, T> {
203 // We have data ready in buffer? Return it. 324 fn drop(&mut self) {
204 let buf = self.buf.pop_buf(); 325 unsafe {
205 if !buf.is_empty() { 326 T::Interrupt::steal().disable();
206 let buf: &[u8] = buf; 327 let state = T::state();
207 // Safety: buffer lives as long as uart 328 state.tx_buf.deinit();
208 let buf: &[u8] = unsafe { core::mem::transmute(buf) }; 329 state.rx_buf.deinit();
209 return Poll::Ready(Ok(buf));
210 } 330 }
211
212 self.waker.register(waker);
213 Poll::Pending
214 } 331 }
332}
215 333
216 fn consume(&mut self, amt: usize) -> bool { 334impl<'d, T: Instance> Drop for BufferedUartRx<'d, T> {
217 let full = self.buf.is_full(); 335 fn drop(&mut self) {
218 self.buf.pop(amt); 336 unsafe {
219 full 337 T::Interrupt::steal().disable();
338 let state = T::state();
339 state.tx_buf.deinit();
340 state.rx_buf.deinit();
341 }
220 } 342 }
221} 343}
222 344
223impl<'d, T: Instance> PeripheralState for RxStateInner<'d, T> 345impl<'d, T: Instance> Drop for BufferedUartTx<'d, T> {
224where 346 fn drop(&mut self) {
225 Self: 'd,
226{
227 type Interrupt = T::Interrupt;
228 fn on_interrupt(&mut self) {
229 let r = T::regs();
230 unsafe { 347 unsafe {
231 let ris = r.uartris().read(); 348 T::Interrupt::steal().disable();
232 // Clear interrupt flags 349 let state = T::state();
233 r.uarticr().modify(|w| { 350 state.tx_buf.deinit();
234 w.set_rxic(true); 351 state.rx_buf.deinit();
235 w.set_rtic(true);
236 });
237
238 if ris.peris() {
239 warn!("Parity error");
240 r.uarticr().modify(|w| {
241 w.set_peic(true);
242 });
243 }
244 if ris.feris() {
245 warn!("Framing error");
246 r.uarticr().modify(|w| {
247 w.set_feic(true);
248 });
249 }
250 if ris.beris() {
251 warn!("Break error");
252 r.uarticr().modify(|w| {
253 w.set_beic(true);
254 });
255 }
256 if ris.oeris() {
257 warn!("Overrun error");
258 r.uarticr().modify(|w| {
259 w.set_oeic(true);
260 });
261 }
262
263 if !r.uartfr().read().rxfe() {
264 let buf = self.buf.push_buf();
265 if !buf.is_empty() {
266 buf[0] = r.uartdr().read().data();
267 self.buf.push(1);
268 } else {
269 warn!("RX buffer full, discard received byte");
270 }
271
272 if self.buf.is_full() {
273 self.waker.wake();
274 }
275 }
276
277 if ris.rtris() {
278 self.waker.wake();
279 };
280 } 352 }
281 } 353 }
282} 354}
283 355
284impl<'d, T: Instance> TxStateInner<'d, T> 356pub(crate) unsafe fn on_interrupt<T: Instance>(_: *mut ()) {
285where 357 trace!("on_interrupt");
286 Self: 'd,
287{
288 fn write(&mut self, buf: &[u8], waker: &Waker) -> (Poll<Result<usize, Error>>, bool) {
289 let empty = self.buf.is_empty();
290 let tx_buf = self.buf.push_buf();
291 if tx_buf.is_empty() {
292 self.waker.register(waker);
293 return (Poll::Pending, empty);
294 }
295 358
296 let n = core::cmp::min(tx_buf.len(), buf.len()); 359 let r = T::regs();
297 tx_buf[..n].copy_from_slice(&buf[..n]); 360 let s = T::state();
298 self.buf.push(n);
299 361
300 (Poll::Ready(Ok(n)), empty) 362 unsafe {
301 } 363 // RX
302 364
303 fn flush(&mut self, waker: &Waker) -> Poll<Result<(), Error>> { 365 let ris = r.uartris().read();
304 if !self.buf.is_empty() { 366 // Clear interrupt flags
305 self.waker.register(waker); 367 r.uarticr().write(|w| {
306 return Poll::Pending; 368 w.set_rxic(true);
307 } 369 w.set_rtic(true);
370 });
308 371
309 Poll::Ready(Ok(())) 372 if ris.peris() {
310 } 373 warn!("Parity error");
311} 374 r.uarticr().write(|w| {
375 w.set_peic(true);
376 });
377 }
378 if ris.feris() {
379 warn!("Framing error");
380 r.uarticr().write(|w| {
381 w.set_feic(true);
382 });
383 }
384 if ris.beris() {
385 warn!("Break error");
386 r.uarticr().write(|w| {
387 w.set_beic(true);
388 });
389 }
390 if ris.oeris() {
391 warn!("Overrun error");
392 r.uarticr().write(|w| {
393 w.set_oeic(true);
394 });
395 }
312 396
313impl<'d, T: Instance> PeripheralState for TxStateInner<'d, T> 397 let mut rx_writer = s.rx_buf.writer();
314where 398 if !r.uartfr().read().rxfe() {
315 Self: 'd, 399 let val = r.uartdr().read().data();
316{ 400 if !rx_writer.push_one(val) {
317 type Interrupt = T::Interrupt; 401 warn!("RX buffer full, discard received byte");
318 fn on_interrupt(&mut self) {
319 let r = T::regs();
320 unsafe {
321 let buf = self.buf.pop_buf();
322 if !buf.is_empty() {
323 r.uartimsc().modify(|w| {
324 w.set_txim(true);
325 });
326 r.uartdr().write(|w| w.set_data(buf[0].into()));
327 self.buf.pop(1);
328 self.waker.wake();
329 } else {
330 // Disable interrupt until we have something to transmit again
331 r.uartimsc().modify(|w| {
332 w.set_txim(false);
333 });
334 } 402 }
403 s.rx_waker.wake();
404 }
405
406 // TX
407 let mut tx_reader = s.tx_buf.reader();
408 if let Some(val) = tx_reader.pop_one() {
409 r.uartimsc().modify(|w| {
410 w.set_txim(true);
411 });
412 r.uartdr().write(|w| w.set_data(val));
413 s.tx_waker.wake();
414 } else {
415 // Disable interrupt until we have something to transmit again
416 r.uartimsc().modify(|w| {
417 w.set_txim(false);
418 });
335 } 419 }
336 } 420 }
337} 421}
@@ -356,108 +440,52 @@ impl<'d, T: Instance> embedded_io::Io for BufferedUartTx<'d, T> {
356 440
357impl<'d, T: Instance + 'd> embedded_io::asynch::Read for BufferedUart<'d, T> { 441impl<'d, T: Instance + 'd> embedded_io::asynch::Read for BufferedUart<'d, T> {
358 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> { 442 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
359 poll_fn(move |cx| { 443 BufferedUartRx::<'d, T>::read(buf).await
360 let (res, do_pend) = self.inner.with(|state| {
361 compiler_fence(Ordering::SeqCst);
362 state.rx.read(buf, cx.waker())
363 });
364
365 if do_pend {
366 self.inner.pend();
367 }
368
369 res
370 })
371 .await
372 } 444 }
373} 445}
374 446
375impl<'d, T: Instance + 'd> embedded_io::asynch::Read for BufferedUartRx<'d, T> { 447impl<'d, T: Instance + 'd> embedded_io::asynch::Read for BufferedUartRx<'d, T> {
376 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> { 448 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
377 poll_fn(move |cx| { 449 Self::read(buf).await
378 let (res, do_pend) = self.inner.with(|state| {
379 compiler_fence(Ordering::SeqCst);
380 state.read(buf, cx.waker())
381 });
382
383 if do_pend {
384 self.inner.pend();
385 }
386
387 res
388 })
389 .await
390 } 450 }
391} 451}
392 452
393impl<'d, T: Instance + 'd> embedded_io::asynch::BufRead for BufferedUart<'d, T> { 453impl<'d, T: Instance + 'd> embedded_io::asynch::BufRead for BufferedUart<'d, T> {
394 async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> { 454 async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> {
395 poll_fn(move |cx| { 455 BufferedUartRx::<'d, T>::fill_buf().await
396 self.inner.with(|state| {
397 compiler_fence(Ordering::SeqCst);
398 state.rx.fill_buf(cx.waker())
399 })
400 })
401 .await
402 } 456 }
403 457
404 fn consume(&mut self, amt: usize) { 458 fn consume(&mut self, amt: usize) {
405 let signal = self.inner.with(|state| state.rx.consume(amt)); 459 BufferedUartRx::<'d, T>::consume(amt)
406 if signal {
407 self.inner.pend();
408 }
409 } 460 }
410} 461}
411 462
412impl<'d, T: Instance + 'd> embedded_io::asynch::BufRead for BufferedUartRx<'d, T> { 463impl<'d, T: Instance + 'd> embedded_io::asynch::BufRead for BufferedUartRx<'d, T> {
413 async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> { 464 async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> {
414 poll_fn(move |cx| { 465 Self::fill_buf().await
415 self.inner.with(|state| {
416 compiler_fence(Ordering::SeqCst);
417 state.fill_buf(cx.waker())
418 })
419 })
420 .await
421 } 466 }
422 467
423 fn consume(&mut self, amt: usize) { 468 fn consume(&mut self, amt: usize) {
424 let signal = self.inner.with(|state| state.consume(amt)); 469 Self::consume(amt)
425 if signal {
426 self.inner.pend();
427 }
428 } 470 }
429} 471}
430 472
431impl<'d, T: Instance + 'd> embedded_io::asynch::Write for BufferedUart<'d, T> { 473impl<'d, T: Instance + 'd> embedded_io::asynch::Write for BufferedUart<'d, T> {
432 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> { 474 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
433 poll_fn(move |cx| { 475 BufferedUartTx::<'d, T>::write(buf).await
434 let (poll, empty) = self.inner.with(|state| state.tx.write(buf, cx.waker()));
435 if empty {
436 self.inner.pend();
437 }
438 poll
439 })
440 .await
441 } 476 }
442 477
443 async fn flush(&mut self) -> Result<(), Self::Error> { 478 async fn flush(&mut self) -> Result<(), Self::Error> {
444 poll_fn(move |cx| self.inner.with(|state| state.tx.flush(cx.waker()))).await 479 BufferedUartTx::<'d, T>::flush().await
445 } 480 }
446} 481}
447 482
448impl<'d, T: Instance + 'd> embedded_io::asynch::Write for BufferedUartTx<'d, T> { 483impl<'d, T: Instance + 'd> embedded_io::asynch::Write for BufferedUartTx<'d, T> {
449 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> { 484 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
450 poll_fn(move |cx| { 485 Self::write(buf).await
451 let (poll, empty) = self.inner.with(|state| state.write(buf, cx.waker()));
452 if empty {
453 self.inner.pend();
454 }
455 poll
456 })
457 .await
458 } 486 }
459 487
460 async fn flush(&mut self) -> Result<(), Self::Error> { 488 async fn flush(&mut self) -> Result<(), Self::Error> {
461 poll_fn(move |cx| self.inner.with(|state| state.flush(cx.waker()))).await 489 Self::flush().await
462 } 490 }
463} 491}
diff --git a/embassy-rp/src/uart/mod.rs b/embassy-rp/src/uart/mod.rs
index 56c25e189..7e7bcaf30 100644
--- a/embassy-rp/src/uart/mod.rs
+++ b/embassy-rp/src/uart/mod.rs
@@ -7,6 +7,11 @@ use crate::gpio::sealed::Pin;
7use crate::gpio::AnyPin; 7use crate::gpio::AnyPin;
8use crate::{pac, peripherals, Peripheral}; 8use crate::{pac, peripherals, Peripheral};
9 9
10#[cfg(feature = "nightly")]
11mod buffered;
12#[cfg(feature = "nightly")]
13pub use buffered::{BufferedUart, BufferedUartRx, BufferedUartTx};
14
10#[derive(Clone, Copy, PartialEq, Eq, Debug)] 15#[derive(Clone, Copy, PartialEq, Eq, Debug)]
11pub enum DataBits { 16pub enum DataBits {
12 DataBits5, 17 DataBits5,
@@ -196,7 +201,7 @@ impl<'d, T: Instance> Uart<'d, T, Blocking> {
196 config: Config, 201 config: Config,
197 ) -> Self { 202 ) -> Self {
198 into_ref!(tx, rx); 203 into_ref!(tx, rx);
199 Self::new_inner(uart, rx.map_into(), tx.map_into(), None, None, None, None, config) 204 Self::new_inner(uart, tx.map_into(), rx.map_into(), None, None, None, None, config)
200 } 205 }
201 206
202 /// Create a new UART with hardware flow control (RTS/CTS) 207 /// Create a new UART with hardware flow control (RTS/CTS)
@@ -211,8 +216,8 @@ impl<'d, T: Instance> Uart<'d, T, Blocking> {
211 into_ref!(tx, rx, cts, rts); 216 into_ref!(tx, rx, cts, rts);
212 Self::new_inner( 217 Self::new_inner(
213 uart, 218 uart,
214 rx.map_into(),
215 tx.map_into(), 219 tx.map_into(),
220 rx.map_into(),
216 Some(rts.map_into()), 221 Some(rts.map_into()),
217 Some(cts.map_into()), 222 Some(cts.map_into()),
218 None, 223 None,
@@ -235,8 +240,8 @@ impl<'d, T: Instance> Uart<'d, T, Async> {
235 into_ref!(tx, rx, tx_dma, rx_dma); 240 into_ref!(tx, rx, tx_dma, rx_dma);
236 Self::new_inner( 241 Self::new_inner(
237 uart, 242 uart,
238 rx.map_into(),
239 tx.map_into(), 243 tx.map_into(),
244 rx.map_into(),
240 None, 245 None,
241 None, 246 None,
242 Some(tx_dma.map_into()), 247 Some(tx_dma.map_into()),
@@ -259,8 +264,8 @@ impl<'d, T: Instance> Uart<'d, T, Async> {
259 into_ref!(tx, rx, cts, rts, tx_dma, rx_dma); 264 into_ref!(tx, rx, cts, rts, tx_dma, rx_dma);
260 Self::new_inner( 265 Self::new_inner(
261 uart, 266 uart,
262 rx.map_into(),
263 tx.map_into(), 267 tx.map_into(),
268 rx.map_into(),
264 Some(rts.map_into()), 269 Some(rts.map_into()),
265 Some(cts.map_into()), 270 Some(cts.map_into()),
266 Some(tx_dma.map_into()), 271 Some(tx_dma.map_into()),
@@ -273,41 +278,52 @@ impl<'d, T: Instance> Uart<'d, T, Async> {
273impl<'d, T: Instance, M: Mode> Uart<'d, T, M> { 278impl<'d, T: Instance, M: Mode> Uart<'d, T, M> {
274 fn new_inner( 279 fn new_inner(
275 _uart: impl Peripheral<P = T> + 'd, 280 _uart: impl Peripheral<P = T> + 'd,
276 tx: PeripheralRef<'d, AnyPin>, 281 mut tx: PeripheralRef<'d, AnyPin>,
277 rx: PeripheralRef<'d, AnyPin>, 282 mut rx: PeripheralRef<'d, AnyPin>,
278 rts: Option<PeripheralRef<'d, AnyPin>>, 283 mut rts: Option<PeripheralRef<'d, AnyPin>>,
279 cts: Option<PeripheralRef<'d, AnyPin>>, 284 mut cts: Option<PeripheralRef<'d, AnyPin>>,
280 tx_dma: Option<PeripheralRef<'d, AnyChannel>>, 285 tx_dma: Option<PeripheralRef<'d, AnyChannel>>,
281 rx_dma: Option<PeripheralRef<'d, AnyChannel>>, 286 rx_dma: Option<PeripheralRef<'d, AnyChannel>>,
282 config: Config, 287 config: Config,
283 ) -> Self { 288 ) -> Self {
284 into_ref!(_uart); 289 Self::init(
285 290 Some(tx.reborrow()),
286 unsafe { 291 Some(rx.reborrow()),
287 let r = T::regs(); 292 rts.as_mut().map(|x| x.reborrow()),
288 293 cts.as_mut().map(|x| x.reborrow()),
289 tx.io().ctrl().write(|w| w.set_funcsel(2)); 294 config,
290 rx.io().ctrl().write(|w| w.set_funcsel(2)); 295 );
291
292 tx.pad_ctrl().write(|w| {
293 w.set_ie(true);
294 });
295 296
296 rx.pad_ctrl().write(|w| { 297 Self {
297 w.set_ie(true); 298 tx: UartTx::new(tx_dma),
298 }); 299 rx: UartRx::new(rx_dma),
300 }
301 }
299 302
303 fn init(
304 tx: Option<PeripheralRef<'_, AnyPin>>,
305 rx: Option<PeripheralRef<'_, AnyPin>>,
306 rts: Option<PeripheralRef<'_, AnyPin>>,
307 cts: Option<PeripheralRef<'_, AnyPin>>,
308 config: Config,
309 ) {
310 let r = T::regs();
311 unsafe {
312 if let Some(pin) = &tx {
313 pin.io().ctrl().write(|w| w.set_funcsel(2));
314 pin.pad_ctrl().write(|w| w.set_ie(true));
315 }
316 if let Some(pin) = &rx {
317 pin.io().ctrl().write(|w| w.set_funcsel(2));
318 pin.pad_ctrl().write(|w| w.set_ie(true));
319 }
300 if let Some(pin) = &cts { 320 if let Some(pin) = &cts {
301 pin.io().ctrl().write(|w| w.set_funcsel(2)); 321 pin.io().ctrl().write(|w| w.set_funcsel(2));
302 pin.pad_ctrl().write(|w| { 322 pin.pad_ctrl().write(|w| w.set_ie(true));
303 w.set_ie(true);
304 });
305 } 323 }
306 if let Some(pin) = &rts { 324 if let Some(pin) = &rts {
307 pin.io().ctrl().write(|w| w.set_funcsel(2)); 325 pin.io().ctrl().write(|w| w.set_funcsel(2));
308 pin.pad_ctrl().write(|w| { 326 pin.pad_ctrl().write(|w| w.set_ie(true));
309 w.set_ie(true);
310 });
311 } 327 }
312 328
313 let clk_base = crate::clocks::clk_peri_freq(); 329 let clk_base = crate::clocks::clk_peri_freq();
@@ -359,11 +375,6 @@ impl<'d, T: Instance, M: Mode> Uart<'d, T, M> {
359 w.set_rtsen(rts.is_some()); 375 w.set_rtsen(rts.is_some());
360 }); 376 });
361 } 377 }
362
363 Self {
364 tx: UartTx::new(tx_dma),
365 rx: UartRx::new(rx_dma),
366 }
367 } 378 }
368} 379}
369 380
@@ -611,11 +622,6 @@ mod eha {
611 } 622 }
612} 623}
613 624
614#[cfg(feature = "nightly")]
615mod buffered;
616#[cfg(feature = "nightly")]
617pub use buffered::*;
618
619mod sealed { 625mod sealed {
620 use super::*; 626 use super::*;
621 627
@@ -628,6 +634,9 @@ mod sealed {
628 type Interrupt: crate::interrupt::Interrupt; 634 type Interrupt: crate::interrupt::Interrupt;
629 635
630 fn regs() -> pac::uart::Uart; 636 fn regs() -> pac::uart::Uart;
637
638 #[cfg(feature = "nightly")]
639 fn state() -> &'static buffered::State;
631 } 640 }
632 pub trait TxPin<T: Instance> {} 641 pub trait TxPin<T: Instance> {}
633 pub trait RxPin<T: Instance> {} 642 pub trait RxPin<T: Instance> {}
@@ -663,6 +672,12 @@ macro_rules! impl_instance {
663 fn regs() -> pac::uart::Uart { 672 fn regs() -> pac::uart::Uart {
664 pac::$inst 673 pac::$inst
665 } 674 }
675
676 #[cfg(feature = "nightly")]
677 fn state() -> &'static buffered::State {
678 static STATE: buffered::State = buffered::State::new();
679 &STATE
680 }
666 } 681 }
667 impl Instance for peripherals::$inst {} 682 impl Instance for peripherals::$inst {}
668 }; 683 };
diff --git a/tests/rp/src/bin/uart_buffered.rs b/tests/rp/src/bin/uart_buffered.rs
index 9cc20bb98..bea9283e7 100644
--- a/tests/rp/src/bin/uart_buffered.rs
+++ b/tests/rp/src/bin/uart_buffered.rs
@@ -5,7 +5,7 @@
5use defmt::{assert_eq, *}; 5use defmt::{assert_eq, *};
6use embassy_executor::Spawner; 6use embassy_executor::Spawner;
7use embassy_rp::interrupt; 7use embassy_rp::interrupt;
8use embassy_rp::uart::{BufferedUart, Config, State, Uart}; 8use embassy_rp::uart::{BufferedUart, Config};
9use embedded_io::asynch::{Read, Write}; 9use embedded_io::asynch::{Read, Write};
10use {defmt_rtt as _, panic_probe as _}; 10use {defmt_rtt as _, panic_probe as _};
11 11
@@ -17,25 +17,22 @@ async fn main(_spawner: Spawner) {
17 let (tx, rx, uart) = (p.PIN_0, p.PIN_1, p.UART0); 17 let (tx, rx, uart) = (p.PIN_0, p.PIN_1, p.UART0);
18 18
19 let config = Config::default(); 19 let config = Config::default();
20 let uart = Uart::new_blocking(uart, tx, rx, config);
21
22 let irq = interrupt::take!(UART0_IRQ); 20 let irq = interrupt::take!(UART0_IRQ);
23 let tx_buf = &mut [0u8; 16]; 21 let tx_buf = &mut [0u8; 16];
24 let rx_buf = &mut [0u8; 16]; 22 let rx_buf = &mut [0u8; 16];
25 let mut state = State::new(); 23 let mut uart = BufferedUart::new(uart, irq, tx, rx, tx_buf, rx_buf, config);
26 let mut uart = BufferedUart::new(&mut state, uart, irq, tx_buf, rx_buf);
27 24
28 // Make sure we send more bytes than fits in the FIFO, to test the actual 25 // Make sure we send more bytes than fits in the FIFO, to test the actual
29 // bufferedUart. 26 // bufferedUart.
30 27
31 let data = [ 28 let data = [
32 1_u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 29 1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
33 30, 31, 32, 30 30, 31,
34 ]; 31 ];
35 uart.write_all(&data).await.unwrap(); 32 uart.write_all(&data).await.unwrap();
36 info!("Done writing"); 33 info!("Done writing");
37 34
38 let mut buf = [0; 32]; 35 let mut buf = [0; 31];
39 uart.read_exact(&mut buf).await.unwrap(); 36 uart.read_exact(&mut buf).await.unwrap();
40 assert_eq!(buf, data); 37 assert_eq!(buf, data);
41 38