aboutsummaryrefslogtreecommitdiff
path: root/embassy-hal-common/src/atomic_ring_buffer.rs
diff options
context:
space:
mode:
authorDario Nieuwenhuis <[email protected]>2022-11-07 00:27:21 +0100
committerDario Nieuwenhuis <[email protected]>2022-11-25 22:30:47 +0100
commit7b838d03369f94e09d652982f994c5013e81457e (patch)
tree446239a627583d0d20513d8feba2024f72de62ec /embassy-hal-common/src/atomic_ring_buffer.rs
parentfa374523591266f7f5abdd0f02f994174553df71 (diff)
rp/uart: use lockfree ringbuffer.
This gets rid of another PeripheralMutex usage.
Diffstat (limited to 'embassy-hal-common/src/atomic_ring_buffer.rs')
-rw-r--r--embassy-hal-common/src/atomic_ring_buffer.rs331
1 files changed, 331 insertions, 0 deletions
diff --git a/embassy-hal-common/src/atomic_ring_buffer.rs b/embassy-hal-common/src/atomic_ring_buffer.rs
new file mode 100644
index 000000000..c5e444306
--- /dev/null
+++ b/embassy-hal-common/src/atomic_ring_buffer.rs
@@ -0,0 +1,331 @@
1use core::slice;
2use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
3
4/// Atomic reusable ringbuffer
5///
6/// This ringbuffer implementation is designed to be stored in a `static`,
7/// therefore all methods take `&self` and not `&mut self`.
8///
9/// It is "reusable": when created it has no backing buffer, you can give it
10/// one with `init` and take it back with `deinit`, and init it again in the
11/// future if needed. This is very non-idiomatic, but helps a lot when storing
12/// it in a `static`.
13///
14/// One concurrent writer and one concurrent reader are supported, even at
15/// different execution priorities (like main and irq).
16pub struct RingBuffer {
17 buf: AtomicPtr<u8>,
18 len: AtomicUsize,
19 start: AtomicUsize,
20 end: AtomicUsize,
21}
22
23pub struct Reader<'a>(&'a RingBuffer);
24pub struct Writer<'a>(&'a RingBuffer);
25
26impl RingBuffer {
27 /// Create a new empty ringbuffer.
28 pub const fn new() -> Self {
29 Self {
30 buf: AtomicPtr::new(core::ptr::null_mut()),
31 len: AtomicUsize::new(0),
32 start: AtomicUsize::new(0),
33 end: AtomicUsize::new(0),
34 }
35 }
36
37 /// Initialize the ring buffer with a buffer.
38 ///
39 /// # Safety
40 /// - The buffer (`buf .. buf+len`) must be valid memory until `deinit` is called.
41 /// - Must not be called concurrently with any other methods.
42 pub unsafe fn init(&self, buf: *mut u8, len: usize) {
43 // Ordering: it's OK to use `Relaxed` because this is not called
44 // concurrently with other methods.
45 self.buf.store(buf, Ordering::Relaxed);
46 self.len.store(len, Ordering::Relaxed);
47 self.start.store(0, Ordering::Relaxed);
48 self.end.store(0, Ordering::Relaxed);
49 }
50
51 /// Deinitialize the ringbuffer.
52 ///
53 /// After calling this, the ringbuffer becomes empty, as if it was
54 /// just created with `new()`.
55 ///
56 /// # Safety
57 /// - Must not be called concurrently with any other methods.
58 pub unsafe fn deinit(&self) {
59 // Ordering: it's OK to use `Relaxed` because this is not called
60 // concurrently with other methods.
61 self.len.store(0, Ordering::Relaxed);
62 self.start.store(0, Ordering::Relaxed);
63 self.end.store(0, Ordering::Relaxed);
64 }
65
66 /// Create a reader.
67 ///
68 /// # Safety
69 ///
70 /// Only one reader can exist at a time.
71 pub unsafe fn reader(&self) -> Reader<'_> {
72 Reader(self)
73 }
74
75 /// Create a writer.
76 ///
77 /// # Safety
78 ///
79 /// Only one writer can exist at a time.
80 pub unsafe fn writer(&self) -> Writer<'_> {
81 Writer(self)
82 }
83
84 pub fn is_full(&self) -> bool {
85 let start = self.start.load(Ordering::Relaxed);
86 let end = self.end.load(Ordering::Relaxed);
87
88 self.wrap(end + 1) == start
89 }
90
91 pub fn is_empty(&self) -> bool {
92 let start = self.start.load(Ordering::Relaxed);
93 let end = self.end.load(Ordering::Relaxed);
94
95 start == end
96 }
97
98 fn wrap(&self, n: usize) -> usize {
99 let len = self.len.load(Ordering::Relaxed);
100
101 assert!(n <= len);
102 if n == len {
103 0
104 } else {
105 n
106 }
107 }
108}
109
110impl<'a> Writer<'a> {
111 /// Push data into the buffer in-place.
112 ///
113 /// The closure `f` is called with a free part of the buffer, it must write
114 /// some data to it and return the amount of bytes written.
115 pub fn push(&mut self, f: impl FnOnce(&mut [u8]) -> usize) -> usize {
116 let (p, n) = self.push_buf();
117 let buf = unsafe { slice::from_raw_parts_mut(p, n) };
118 let n = f(buf);
119 self.push_done(n);
120 n
121 }
122
123 /// Push one data byte.
124 ///
125 /// Returns true if pushed succesfully.
126 pub fn push_one(&mut self, val: u8) -> bool {
127 let n = self.push(|f| match f {
128 [] => 0,
129 [x, ..] => {
130 *x = val;
131 1
132 }
133 });
134 n != 0
135 }
136
137 /// Get a buffer where data can be pushed to.
138 ///
139 /// Write data to the start of the buffer, then call `push_done` with
140 /// however many bytes you've pushed.
141 ///
142 /// The buffer is suitable to DMA to.
143 ///
144 /// If the ringbuf is full, size=0 will be returned.
145 ///
146 /// The buffer stays valid as long as no other `Writer` method is called
147 /// and `init`/`deinit` aren't called on the ringbuf.
148 pub fn push_buf(&mut self) -> (*mut u8, usize) {
149 // Ordering: popping writes `start` last, so we read `start` first.
150 // Read it with Acquire ordering, so that the next accesses can't be reordered up past it.
151 let start = self.0.start.load(Ordering::Acquire);
152 let buf = self.0.buf.load(Ordering::Relaxed);
153 let len = self.0.len.load(Ordering::Relaxed);
154 let end = self.0.end.load(Ordering::Relaxed);
155
156 let n = if start <= end {
157 len - end - (start == 0) as usize
158 } else {
159 start - end - 1
160 };
161
162 trace!(" ringbuf: push_buf {:?}..{:?}", end, end + n);
163 (unsafe { buf.add(end) }, n)
164 }
165
166 pub fn push_done(&mut self, n: usize) {
167 trace!(" ringbuf: push {:?}", n);
168 let end = self.0.end.load(Ordering::Relaxed);
169
170 // Ordering: write `end` last, with Release ordering.
171 // The ordering ensures no preceding memory accesses (such as writing
172 // the actual data in the buffer) can be reordered down past it, which
173 // will guarantee the reader sees them after reading from `end`.
174 self.0.end.store(self.0.wrap(end + n), Ordering::Release);
175 }
176}
177
178impl<'a> Reader<'a> {
179 /// Pop data from the buffer in-place.
180 ///
181 /// The closure `f` is called with the next data, it must process
182 /// some data from it and return the amount of bytes processed.
183 pub fn pop(&mut self, f: impl FnOnce(&[u8]) -> usize) -> usize {
184 let (p, n) = self.pop_buf();
185 let buf = unsafe { slice::from_raw_parts(p, n) };
186 let n = f(buf);
187 self.pop_done(n);
188 n
189 }
190
191 /// Pop one data byte.
192 ///
193 /// Returns true if popped succesfully.
194 pub fn pop_one(&mut self) -> Option<u8> {
195 let mut res = None;
196 self.pop(|f| match f {
197 &[] => 0,
198 &[x, ..] => {
199 res = Some(x);
200 1
201 }
202 });
203 res
204 }
205
206 /// Get a buffer where data can be popped from.
207 ///
208 /// Read data from the start of the buffer, then call `pop_done` with
209 /// however many bytes you've processed.
210 ///
211 /// The buffer is suitable to DMA from.
212 ///
213 /// If the ringbuf is empty, size=0 will be returned.
214 ///
215 /// The buffer stays valid as long as no other `Reader` method is called
216 /// and `init`/`deinit` aren't called on the ringbuf.
217 pub fn pop_buf(&mut self) -> (*mut u8, usize) {
218 // Ordering: pushing writes `end` last, so we read `end` first.
219 // Read it with Acquire ordering, so that the next accesses can't be reordered up past it.
220 // This is needed to guarantee we "see" the data written by the writer.
221 let end = self.0.end.load(Ordering::Acquire);
222 let buf = self.0.buf.load(Ordering::Relaxed);
223 let len = self.0.len.load(Ordering::Relaxed);
224 let start = self.0.start.load(Ordering::Relaxed);
225
226 let n = if end < start { len - start } else { end - start };
227
228 trace!(" ringbuf: pop_buf {:?}..{:?}", start, start + n);
229 (unsafe { buf.add(start) }, n)
230 }
231
232 pub fn pop_done(&mut self, n: usize) {
233 trace!(" ringbuf: pop {:?}", n);
234
235 let start = self.0.start.load(Ordering::Relaxed);
236
237 // Ordering: write `start` last, with Release ordering.
238 // The ordering ensures no preceding memory accesses (such as reading
239 // the actual data) can be reordered down past it. This is necessary
240 // because writing to `start` is effectively freeing the read part of the
241 // buffer, which "gives permission" to the writer to write to it again.
242 // Therefore, all buffer accesses must be completed before this.
243 self.0.start.store(self.0.wrap(start + n), Ordering::Release);
244 }
245}
246
247#[cfg(test)]
248mod tests {
249 use super::*;
250
251 #[test]
252 fn push_pop() {
253 let mut b = [0; 4];
254 let rb = RingBuffer::new();
255 unsafe {
256 rb.init(b.as_mut_ptr(), 4);
257
258 assert_eq!(rb.is_empty(), true);
259 assert_eq!(rb.is_full(), false);
260
261 rb.writer().push(|buf| {
262 // If capacity is 4, we can fill it up to 3.
263 assert_eq!(3, buf.len());
264 buf[0] = 1;
265 buf[1] = 2;
266 buf[2] = 3;
267 3
268 });
269
270 assert_eq!(rb.is_empty(), false);
271 assert_eq!(rb.is_full(), true);
272
273 rb.writer().push(|buf| {
274 // If it's full, we can push 0 bytes.
275 assert_eq!(0, buf.len());
276 0
277 });
278
279 assert_eq!(rb.is_empty(), false);
280 assert_eq!(rb.is_full(), true);
281
282 rb.reader().pop(|buf| {
283 assert_eq!(3, buf.len());
284 assert_eq!(1, buf[0]);
285 1
286 });
287
288 assert_eq!(rb.is_empty(), false);
289 assert_eq!(rb.is_full(), false);
290
291 rb.reader().pop(|buf| {
292 assert_eq!(2, buf.len());
293 0
294 });
295
296 assert_eq!(rb.is_empty(), false);
297 assert_eq!(rb.is_full(), false);
298
299 rb.reader().pop(|buf| {
300 assert_eq!(2, buf.len());
301 assert_eq!(2, buf[0]);
302 assert_eq!(3, buf[1]);
303 2
304 });
305
306 assert_eq!(rb.is_empty(), true);
307 assert_eq!(rb.is_full(), false);
308
309 rb.reader().pop(|buf| {
310 assert_eq!(0, buf.len());
311 0
312 });
313
314 rb.writer().push(|buf| {
315 assert_eq!(1, buf.len());
316 buf[0] = 10;
317 1
318 });
319
320 rb.writer().push(|buf| {
321 assert_eq!(2, buf.len());
322 buf[0] = 11;
323 buf[1] = 12;
324 2
325 });
326
327 assert_eq!(rb.is_empty(), false);
328 assert_eq!(rb.is_full(), true);
329 }
330 }
331}