diff options
| author | Dario Nieuwenhuis <[email protected]> | 2023-07-28 13:23:22 +0200 |
|---|---|---|
| committer | GitHub <[email protected]> | 2023-07-28 13:23:22 +0200 |
| commit | 036e6ae30c9e772ef8ef20439f121e108b9106f0 (patch) | |
| tree | 7c654de04304274a11d44733b51c5012aeec9e3c /embassy-hal-internal/src/atomic_ring_buffer.rs | |
| parent | 0ced8400d00da30abe76438ef26224c7ed649708 (diff) | |
Rename embassy-hal-common to embassy-hal-internal, document it's for internal use only. (#1700)
Diffstat (limited to 'embassy-hal-internal/src/atomic_ring_buffer.rs')
| -rw-r--r-- | embassy-hal-internal/src/atomic_ring_buffer.rs | 556 |
1 files changed, 556 insertions, 0 deletions
diff --git a/embassy-hal-internal/src/atomic_ring_buffer.rs b/embassy-hal-internal/src/atomic_ring_buffer.rs new file mode 100644 index 000000000..ea84925c4 --- /dev/null +++ b/embassy-hal-internal/src/atomic_ring_buffer.rs | |||
| @@ -0,0 +1,556 @@ | |||
| 1 | use core::slice; | ||
| 2 | use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; | ||
| 3 | |||
| 4 | /// Atomic reusable ringbuffer | ||
| 5 | /// | ||
| 6 | /// This ringbuffer implementation is designed to be stored in a `static`, | ||
| 7 | /// therefore all methods take `&self` and not `&mut self`. | ||
| 8 | /// | ||
| 9 | /// It is "reusable": when created it has no backing buffer, you can give it | ||
| 10 | /// one with `init` and take it back with `deinit`, and init it again in the | ||
| 11 | /// future if needed. This is very non-idiomatic, but helps a lot when storing | ||
| 12 | /// it in a `static`. | ||
| 13 | /// | ||
| 14 | /// One concurrent writer and one concurrent reader are supported, even at | ||
| 15 | /// different execution priorities (like main and irq). | ||
| 16 | pub struct RingBuffer { | ||
| 17 | pub buf: AtomicPtr<u8>, | ||
| 18 | pub len: AtomicUsize, | ||
| 19 | |||
| 20 | // start and end wrap at len*2, not at len. | ||
| 21 | // This allows distinguishing "full" and "empty". | ||
| 22 | // full is when start+len == end (modulo len*2) | ||
| 23 | // empty is when start == end | ||
| 24 | // | ||
| 25 | // This avoids having to consider the ringbuffer "full" at len-1 instead of len. | ||
| 26 | // The usual solution is adding a "full" flag, but that can't be made atomic | ||
| 27 | pub start: AtomicUsize, | ||
| 28 | pub end: AtomicUsize, | ||
| 29 | } | ||
| 30 | |||
| 31 | pub struct Reader<'a>(&'a RingBuffer); | ||
| 32 | pub struct Writer<'a>(&'a RingBuffer); | ||
| 33 | |||
| 34 | impl RingBuffer { | ||
| 35 | /// Create a new empty ringbuffer. | ||
| 36 | pub const fn new() -> Self { | ||
| 37 | Self { | ||
| 38 | buf: AtomicPtr::new(core::ptr::null_mut()), | ||
| 39 | len: AtomicUsize::new(0), | ||
| 40 | start: AtomicUsize::new(0), | ||
| 41 | end: AtomicUsize::new(0), | ||
| 42 | } | ||
| 43 | } | ||
| 44 | |||
| 45 | /// Initialize the ring buffer with a buffer. | ||
| 46 | /// | ||
| 47 | /// # Safety | ||
| 48 | /// - The buffer (`buf .. buf+len`) must be valid memory until `deinit` is called. | ||
| 49 | /// - Must not be called concurrently with any other methods. | ||
| 50 | pub unsafe fn init(&self, buf: *mut u8, len: usize) { | ||
| 51 | // Ordering: it's OK to use `Relaxed` because this is not called | ||
| 52 | // concurrently with other methods. | ||
| 53 | self.buf.store(buf, Ordering::Relaxed); | ||
| 54 | self.len.store(len, Ordering::Relaxed); | ||
| 55 | self.start.store(0, Ordering::Relaxed); | ||
| 56 | self.end.store(0, Ordering::Relaxed); | ||
| 57 | } | ||
| 58 | |||
| 59 | /// Deinitialize the ringbuffer. | ||
| 60 | /// | ||
| 61 | /// After calling this, the ringbuffer becomes empty, as if it was | ||
| 62 | /// just created with `new()`. | ||
| 63 | /// | ||
| 64 | /// # Safety | ||
| 65 | /// - Must not be called concurrently with any other methods. | ||
| 66 | pub unsafe fn deinit(&self) { | ||
| 67 | // Ordering: it's OK to use `Relaxed` because this is not called | ||
| 68 | // concurrently with other methods. | ||
| 69 | self.len.store(0, Ordering::Relaxed); | ||
| 70 | self.start.store(0, Ordering::Relaxed); | ||
| 71 | self.end.store(0, Ordering::Relaxed); | ||
| 72 | } | ||
| 73 | |||
| 74 | /// Create a reader. | ||
| 75 | /// | ||
| 76 | /// # Safety | ||
| 77 | /// | ||
| 78 | /// Only one reader can exist at a time. | ||
| 79 | pub unsafe fn reader(&self) -> Reader<'_> { | ||
| 80 | Reader(self) | ||
| 81 | } | ||
| 82 | |||
| 83 | /// Create a writer. | ||
| 84 | /// | ||
| 85 | /// # Safety | ||
| 86 | /// | ||
| 87 | /// Only one writer can exist at a time. | ||
| 88 | pub unsafe fn writer(&self) -> Writer<'_> { | ||
| 89 | Writer(self) | ||
| 90 | } | ||
| 91 | |||
| 92 | pub fn len(&self) -> usize { | ||
| 93 | self.len.load(Ordering::Relaxed) | ||
| 94 | } | ||
| 95 | |||
| 96 | pub fn is_full(&self) -> bool { | ||
| 97 | let len = self.len.load(Ordering::Relaxed); | ||
| 98 | let start = self.start.load(Ordering::Relaxed); | ||
| 99 | let end = self.end.load(Ordering::Relaxed); | ||
| 100 | |||
| 101 | self.wrap(start + len) == end | ||
| 102 | } | ||
| 103 | |||
| 104 | pub fn is_empty(&self) -> bool { | ||
| 105 | let start = self.start.load(Ordering::Relaxed); | ||
| 106 | let end = self.end.load(Ordering::Relaxed); | ||
| 107 | |||
| 108 | start == end | ||
| 109 | } | ||
| 110 | |||
| 111 | fn wrap(&self, mut n: usize) -> usize { | ||
| 112 | let len = self.len.load(Ordering::Relaxed); | ||
| 113 | |||
| 114 | if n >= len * 2 { | ||
| 115 | n -= len * 2 | ||
| 116 | } | ||
| 117 | n | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | impl<'a> Writer<'a> { | ||
| 122 | /// Push data into the buffer in-place. | ||
| 123 | /// | ||
| 124 | /// The closure `f` is called with a free part of the buffer, it must write | ||
| 125 | /// some data to it and return the amount of bytes written. | ||
| 126 | pub fn push(&mut self, f: impl FnOnce(&mut [u8]) -> usize) -> usize { | ||
| 127 | let (p, n) = self.push_buf(); | ||
| 128 | let buf = unsafe { slice::from_raw_parts_mut(p, n) }; | ||
| 129 | let n = f(buf); | ||
| 130 | self.push_done(n); | ||
| 131 | n | ||
| 132 | } | ||
| 133 | |||
| 134 | /// Push one data byte. | ||
| 135 | /// | ||
| 136 | /// Returns true if pushed successfully. | ||
| 137 | pub fn push_one(&mut self, val: u8) -> bool { | ||
| 138 | let n = self.push(|f| match f { | ||
| 139 | [] => 0, | ||
| 140 | [x, ..] => { | ||
| 141 | *x = val; | ||
| 142 | 1 | ||
| 143 | } | ||
| 144 | }); | ||
| 145 | n != 0 | ||
| 146 | } | ||
| 147 | |||
| 148 | /// Get a buffer where data can be pushed to. | ||
| 149 | /// | ||
| 150 | /// Equivalent to [`Self::push_buf`] but returns a slice. | ||
| 151 | pub fn push_slice(&mut self) -> &mut [u8] { | ||
| 152 | let (data, len) = self.push_buf(); | ||
| 153 | unsafe { slice::from_raw_parts_mut(data, len) } | ||
| 154 | } | ||
| 155 | |||
| 156 | /// Get up to two buffers where data can be pushed to. | ||
| 157 | /// | ||
| 158 | /// Equivalent to [`Self::push_bufs`] but returns slices. | ||
| 159 | pub fn push_slices(&mut self) -> [&mut [u8]; 2] { | ||
| 160 | let [(d0, l0), (d1, l1)] = self.push_bufs(); | ||
| 161 | unsafe { [slice::from_raw_parts_mut(d0, l0), slice::from_raw_parts_mut(d1, l1)] } | ||
| 162 | } | ||
| 163 | |||
| 164 | /// Get a buffer where data can be pushed to. | ||
| 165 | /// | ||
| 166 | /// Write data to the start of the buffer, then call `push_done` with | ||
| 167 | /// however many bytes you've pushed. | ||
| 168 | /// | ||
| 169 | /// The buffer is suitable to DMA to. | ||
| 170 | /// | ||
| 171 | /// If the ringbuf is full, size=0 will be returned. | ||
| 172 | /// | ||
| 173 | /// The buffer stays valid as long as no other `Writer` method is called | ||
| 174 | /// and `init`/`deinit` aren't called on the ringbuf. | ||
| 175 | pub fn push_buf(&mut self) -> (*mut u8, usize) { | ||
| 176 | // Ordering: popping writes `start` last, so we read `start` first. | ||
| 177 | // Read it with Acquire ordering, so that the next accesses can't be reordered up past it. | ||
| 178 | let mut start = self.0.start.load(Ordering::Acquire); | ||
| 179 | let buf = self.0.buf.load(Ordering::Relaxed); | ||
| 180 | let len = self.0.len.load(Ordering::Relaxed); | ||
| 181 | let mut end = self.0.end.load(Ordering::Relaxed); | ||
| 182 | |||
| 183 | let empty = start == end; | ||
| 184 | |||
| 185 | if start >= len { | ||
| 186 | start -= len | ||
| 187 | } | ||
| 188 | if end >= len { | ||
| 189 | end -= len | ||
| 190 | } | ||
| 191 | |||
| 192 | if start == end && !empty { | ||
| 193 | // full | ||
| 194 | return (buf, 0); | ||
| 195 | } | ||
| 196 | let n = if start > end { start - end } else { len - end }; | ||
| 197 | |||
| 198 | trace!(" ringbuf: push_buf {:?}..{:?}", end, end + n); | ||
| 199 | (unsafe { buf.add(end) }, n) | ||
| 200 | } | ||
| 201 | |||
| 202 | /// Get up to two buffers where data can be pushed to. | ||
| 203 | /// | ||
| 204 | /// Write data starting at the beginning of the first buffer, then call | ||
| 205 | /// `push_done` with however many bytes you've pushed. | ||
| 206 | /// | ||
| 207 | /// The buffers are suitable to DMA to. | ||
| 208 | /// | ||
| 209 | /// If the ringbuf is full, both buffers will be zero length. | ||
| 210 | /// If there is only area available, the second buffer will be zero length. | ||
| 211 | /// | ||
| 212 | /// The buffer stays valid as long as no other `Writer` method is called | ||
| 213 | /// and `init`/`deinit` aren't called on the ringbuf. | ||
| 214 | pub fn push_bufs(&mut self) -> [(*mut u8, usize); 2] { | ||
| 215 | // Ordering: as per push_buf() | ||
| 216 | let mut start = self.0.start.load(Ordering::Acquire); | ||
| 217 | let buf = self.0.buf.load(Ordering::Relaxed); | ||
| 218 | let len = self.0.len.load(Ordering::Relaxed); | ||
| 219 | let mut end = self.0.end.load(Ordering::Relaxed); | ||
| 220 | |||
| 221 | let empty = start == end; | ||
| 222 | |||
| 223 | if start >= len { | ||
| 224 | start -= len | ||
| 225 | } | ||
| 226 | if end >= len { | ||
| 227 | end -= len | ||
| 228 | } | ||
| 229 | |||
| 230 | if start == end && !empty { | ||
| 231 | // full | ||
| 232 | return [(buf, 0), (buf, 0)]; | ||
| 233 | } | ||
| 234 | let n0 = if start > end { start - end } else { len - end }; | ||
| 235 | let n1 = if start <= end { start } else { 0 }; | ||
| 236 | |||
| 237 | trace!(" ringbuf: push_bufs [{:?}..{:?}, {:?}..{:?}]", end, end + n0, 0, n1); | ||
| 238 | [(unsafe { buf.add(end) }, n0), (buf, n1)] | ||
| 239 | } | ||
| 240 | |||
| 241 | pub fn push_done(&mut self, n: usize) { | ||
| 242 | trace!(" ringbuf: push {:?}", n); | ||
| 243 | let end = self.0.end.load(Ordering::Relaxed); | ||
| 244 | |||
| 245 | // Ordering: write `end` last, with Release ordering. | ||
| 246 | // The ordering ensures no preceding memory accesses (such as writing | ||
| 247 | // the actual data in the buffer) can be reordered down past it, which | ||
| 248 | // will guarantee the reader sees them after reading from `end`. | ||
| 249 | self.0.end.store(self.0.wrap(end + n), Ordering::Release); | ||
| 250 | } | ||
| 251 | } | ||
| 252 | |||
| 253 | impl<'a> Reader<'a> { | ||
| 254 | /// Pop data from the buffer in-place. | ||
| 255 | /// | ||
| 256 | /// The closure `f` is called with the next data, it must process | ||
| 257 | /// some data from it and return the amount of bytes processed. | ||
| 258 | pub fn pop(&mut self, f: impl FnOnce(&[u8]) -> usize) -> usize { | ||
| 259 | let (p, n) = self.pop_buf(); | ||
| 260 | let buf = unsafe { slice::from_raw_parts(p, n) }; | ||
| 261 | let n = f(buf); | ||
| 262 | self.pop_done(n); | ||
| 263 | n | ||
| 264 | } | ||
| 265 | |||
| 266 | /// Pop one data byte. | ||
| 267 | /// | ||
| 268 | /// Returns true if popped successfully. | ||
| 269 | pub fn pop_one(&mut self) -> Option<u8> { | ||
| 270 | let mut res = None; | ||
| 271 | self.pop(|f| match f { | ||
| 272 | &[] => 0, | ||
| 273 | &[x, ..] => { | ||
| 274 | res = Some(x); | ||
| 275 | 1 | ||
| 276 | } | ||
| 277 | }); | ||
| 278 | res | ||
| 279 | } | ||
| 280 | |||
| 281 | /// Get a buffer where data can be popped from. | ||
| 282 | /// | ||
| 283 | /// Equivalent to [`Self::pop_buf`] but returns a slice. | ||
| 284 | pub fn pop_slice(&mut self) -> &mut [u8] { | ||
| 285 | let (data, len) = self.pop_buf(); | ||
| 286 | unsafe { slice::from_raw_parts_mut(data, len) } | ||
| 287 | } | ||
| 288 | |||
| 289 | /// Get a buffer where data can be popped from. | ||
| 290 | /// | ||
| 291 | /// Read data from the start of the buffer, then call `pop_done` with | ||
| 292 | /// however many bytes you've processed. | ||
| 293 | /// | ||
| 294 | /// The buffer is suitable to DMA from. | ||
| 295 | /// | ||
| 296 | /// If the ringbuf is empty, size=0 will be returned. | ||
| 297 | /// | ||
| 298 | /// The buffer stays valid as long as no other `Reader` method is called | ||
| 299 | /// and `init`/`deinit` aren't called on the ringbuf. | ||
| 300 | pub fn pop_buf(&mut self) -> (*mut u8, usize) { | ||
| 301 | // Ordering: pushing writes `end` last, so we read `end` first. | ||
| 302 | // Read it with Acquire ordering, so that the next accesses can't be reordered up past it. | ||
| 303 | // This is needed to guarantee we "see" the data written by the writer. | ||
| 304 | let mut end = self.0.end.load(Ordering::Acquire); | ||
| 305 | let buf = self.0.buf.load(Ordering::Relaxed); | ||
| 306 | let len = self.0.len.load(Ordering::Relaxed); | ||
| 307 | let mut start = self.0.start.load(Ordering::Relaxed); | ||
| 308 | |||
| 309 | if start == end { | ||
| 310 | return (buf, 0); | ||
| 311 | } | ||
| 312 | |||
| 313 | if start >= len { | ||
| 314 | start -= len | ||
| 315 | } | ||
| 316 | if end >= len { | ||
| 317 | end -= len | ||
| 318 | } | ||
| 319 | |||
| 320 | let n = if end > start { end - start } else { len - start }; | ||
| 321 | |||
| 322 | trace!(" ringbuf: pop_buf {:?}..{:?}", start, start + n); | ||
| 323 | (unsafe { buf.add(start) }, n) | ||
| 324 | } | ||
| 325 | |||
| 326 | pub fn pop_done(&mut self, n: usize) { | ||
| 327 | trace!(" ringbuf: pop {:?}", n); | ||
| 328 | |||
| 329 | let start = self.0.start.load(Ordering::Relaxed); | ||
| 330 | |||
| 331 | // Ordering: write `start` last, with Release ordering. | ||
| 332 | // The ordering ensures no preceding memory accesses (such as reading | ||
| 333 | // the actual data) can be reordered down past it. This is necessary | ||
| 334 | // because writing to `start` is effectively freeing the read part of the | ||
| 335 | // buffer, which "gives permission" to the writer to write to it again. | ||
| 336 | // Therefore, all buffer accesses must be completed before this. | ||
| 337 | self.0.start.store(self.0.wrap(start + n), Ordering::Release); | ||
| 338 | } | ||
| 339 | } | ||
| 340 | |||
| 341 | #[cfg(test)] | ||
| 342 | mod tests { | ||
| 343 | use super::*; | ||
| 344 | |||
| 345 | #[test] | ||
| 346 | fn push_pop() { | ||
| 347 | let mut b = [0; 4]; | ||
| 348 | let rb = RingBuffer::new(); | ||
| 349 | unsafe { | ||
| 350 | rb.init(b.as_mut_ptr(), 4); | ||
| 351 | |||
| 352 | assert_eq!(rb.is_empty(), true); | ||
| 353 | assert_eq!(rb.is_full(), false); | ||
| 354 | |||
| 355 | rb.writer().push(|buf| { | ||
| 356 | assert_eq!(4, buf.len()); | ||
| 357 | buf[0] = 1; | ||
| 358 | buf[1] = 2; | ||
| 359 | buf[2] = 3; | ||
| 360 | buf[3] = 4; | ||
| 361 | 4 | ||
| 362 | }); | ||
| 363 | |||
| 364 | assert_eq!(rb.is_empty(), false); | ||
| 365 | assert_eq!(rb.is_full(), true); | ||
| 366 | |||
| 367 | rb.writer().push(|buf| { | ||
| 368 | // If it's full, we can push 0 bytes. | ||
| 369 | assert_eq!(0, buf.len()); | ||
| 370 | 0 | ||
| 371 | }); | ||
| 372 | |||
| 373 | assert_eq!(rb.is_empty(), false); | ||
| 374 | assert_eq!(rb.is_full(), true); | ||
| 375 | |||
| 376 | rb.reader().pop(|buf| { | ||
| 377 | assert_eq!(4, buf.len()); | ||
| 378 | assert_eq!(1, buf[0]); | ||
| 379 | 1 | ||
| 380 | }); | ||
| 381 | |||
| 382 | assert_eq!(rb.is_empty(), false); | ||
| 383 | assert_eq!(rb.is_full(), false); | ||
| 384 | |||
| 385 | rb.reader().pop(|buf| { | ||
| 386 | assert_eq!(3, buf.len()); | ||
| 387 | 0 | ||
| 388 | }); | ||
| 389 | |||
| 390 | assert_eq!(rb.is_empty(), false); | ||
| 391 | assert_eq!(rb.is_full(), false); | ||
| 392 | |||
| 393 | rb.reader().pop(|buf| { | ||
| 394 | assert_eq!(3, buf.len()); | ||
| 395 | assert_eq!(2, buf[0]); | ||
| 396 | assert_eq!(3, buf[1]); | ||
| 397 | 2 | ||
| 398 | }); | ||
| 399 | rb.reader().pop(|buf| { | ||
| 400 | assert_eq!(1, buf.len()); | ||
| 401 | assert_eq!(4, buf[0]); | ||
| 402 | 1 | ||
| 403 | }); | ||
| 404 | |||
| 405 | assert_eq!(rb.is_empty(), true); | ||
| 406 | assert_eq!(rb.is_full(), false); | ||
| 407 | |||
| 408 | rb.reader().pop(|buf| { | ||
| 409 | assert_eq!(0, buf.len()); | ||
| 410 | 0 | ||
| 411 | }); | ||
| 412 | |||
| 413 | rb.writer().push(|buf| { | ||
| 414 | assert_eq!(4, buf.len()); | ||
| 415 | buf[0] = 10; | ||
| 416 | 1 | ||
| 417 | }); | ||
| 418 | |||
| 419 | rb.writer().push(|buf| { | ||
| 420 | assert_eq!(3, buf.len()); | ||
| 421 | buf[0] = 11; | ||
| 422 | buf[1] = 12; | ||
| 423 | 2 | ||
| 424 | }); | ||
| 425 | |||
| 426 | assert_eq!(rb.is_empty(), false); | ||
| 427 | assert_eq!(rb.is_full(), false); | ||
| 428 | |||
| 429 | rb.writer().push(|buf| { | ||
| 430 | assert_eq!(1, buf.len()); | ||
| 431 | buf[0] = 13; | ||
| 432 | 1 | ||
| 433 | }); | ||
| 434 | |||
| 435 | assert_eq!(rb.is_empty(), false); | ||
| 436 | assert_eq!(rb.is_full(), true); | ||
| 437 | } | ||
| 438 | } | ||
| 439 | |||
| 440 | #[test] | ||
| 441 | fn zero_len() { | ||
| 442 | let rb = RingBuffer::new(); | ||
| 443 | unsafe { | ||
| 444 | assert_eq!(rb.is_empty(), true); | ||
| 445 | assert_eq!(rb.is_full(), true); | ||
| 446 | |||
| 447 | rb.writer().push(|buf| { | ||
| 448 | assert_eq!(0, buf.len()); | ||
| 449 | 0 | ||
| 450 | }); | ||
| 451 | |||
| 452 | rb.reader().pop(|buf| { | ||
| 453 | assert_eq!(0, buf.len()); | ||
| 454 | 0 | ||
| 455 | }); | ||
| 456 | } | ||
| 457 | } | ||
| 458 | |||
| 459 | #[test] | ||
| 460 | fn push_slices() { | ||
| 461 | let mut b = [0; 4]; | ||
| 462 | let rb = RingBuffer::new(); | ||
| 463 | unsafe { | ||
| 464 | rb.init(b.as_mut_ptr(), 4); | ||
| 465 | |||
| 466 | /* push 3 -> [1 2 3 x] */ | ||
| 467 | let mut w = rb.writer(); | ||
| 468 | let ps = w.push_slices(); | ||
| 469 | assert_eq!(4, ps[0].len()); | ||
| 470 | assert_eq!(0, ps[1].len()); | ||
| 471 | ps[0][0] = 1; | ||
| 472 | ps[0][1] = 2; | ||
| 473 | ps[0][2] = 3; | ||
| 474 | w.push_done(3); | ||
| 475 | drop(w); | ||
| 476 | |||
| 477 | /* pop 2 -> [x x 3 x] */ | ||
| 478 | rb.reader().pop(|buf| { | ||
| 479 | assert_eq!(3, buf.len()); | ||
| 480 | assert_eq!(1, buf[0]); | ||
| 481 | assert_eq!(2, buf[1]); | ||
| 482 | assert_eq!(3, buf[2]); | ||
| 483 | 2 | ||
| 484 | }); | ||
| 485 | |||
| 486 | /* push 3 -> [5 6 3 4] */ | ||
| 487 | let mut w = rb.writer(); | ||
| 488 | let ps = w.push_slices(); | ||
| 489 | assert_eq!(1, ps[0].len()); | ||
| 490 | assert_eq!(2, ps[1].len()); | ||
| 491 | ps[0][0] = 4; | ||
| 492 | ps[1][0] = 5; | ||
| 493 | ps[1][1] = 6; | ||
| 494 | w.push_done(3); | ||
| 495 | drop(w); | ||
| 496 | |||
| 497 | /* buf is now full */ | ||
| 498 | let mut w = rb.writer(); | ||
| 499 | let ps = w.push_slices(); | ||
| 500 | assert_eq!(0, ps[0].len()); | ||
| 501 | assert_eq!(0, ps[1].len()); | ||
| 502 | |||
| 503 | /* pop 2 -> [5 6 x x] */ | ||
| 504 | rb.reader().pop(|buf| { | ||
| 505 | assert_eq!(2, buf.len()); | ||
| 506 | assert_eq!(3, buf[0]); | ||
| 507 | assert_eq!(4, buf[1]); | ||
| 508 | 2 | ||
| 509 | }); | ||
| 510 | |||
| 511 | /* should now have one push slice again */ | ||
| 512 | let mut w = rb.writer(); | ||
| 513 | let ps = w.push_slices(); | ||
| 514 | assert_eq!(2, ps[0].len()); | ||
| 515 | assert_eq!(0, ps[1].len()); | ||
| 516 | drop(w); | ||
| 517 | |||
| 518 | /* pop 2 -> [x x x x] */ | ||
| 519 | rb.reader().pop(|buf| { | ||
| 520 | assert_eq!(2, buf.len()); | ||
| 521 | assert_eq!(5, buf[0]); | ||
| 522 | assert_eq!(6, buf[1]); | ||
| 523 | 2 | ||
| 524 | }); | ||
| 525 | |||
| 526 | /* should now have two push slices */ | ||
| 527 | let mut w = rb.writer(); | ||
| 528 | let ps = w.push_slices(); | ||
| 529 | assert_eq!(2, ps[0].len()); | ||
| 530 | assert_eq!(2, ps[1].len()); | ||
| 531 | drop(w); | ||
| 532 | |||
| 533 | /* make sure we exercise all wrap around cases properly */ | ||
| 534 | for _ in 0..10 { | ||
| 535 | /* should be empty, push 1 */ | ||
| 536 | let mut w = rb.writer(); | ||
| 537 | let ps = w.push_slices(); | ||
| 538 | assert_eq!(4, ps[0].len() + ps[1].len()); | ||
| 539 | w.push_done(1); | ||
| 540 | drop(w); | ||
| 541 | |||
| 542 | /* should have 1 element */ | ||
| 543 | let mut w = rb.writer(); | ||
| 544 | let ps = w.push_slices(); | ||
| 545 | assert_eq!(3, ps[0].len() + ps[1].len()); | ||
| 546 | drop(w); | ||
| 547 | |||
| 548 | /* pop 1 */ | ||
| 549 | rb.reader().pop(|buf| { | ||
| 550 | assert_eq!(1, buf.len()); | ||
| 551 | 1 | ||
| 552 | }); | ||
| 553 | } | ||
| 554 | } | ||
| 555 | } | ||
| 556 | } | ||
