diff options
| author | Dario Nieuwenhuis <[email protected]> | 2023-11-14 22:32:48 +0100 |
|---|---|---|
| committer | Dario Nieuwenhuis <[email protected]> | 2023-11-15 18:43:27 +0100 |
| commit | bef9b7a8539c3dddb1cf6ab46db161f1ca56b1a1 (patch) | |
| tree | 6d15736eec0029c13093bee120bd2189aa9537ac /embassy-executor/src/raw/run_queue.rs | |
| parent | 50a983fd9b8f10fa5153757593e9f8cfccc902ac (diff) | |
executor: remove atomic-polyfill.
Diffstat (limited to 'embassy-executor/src/raw/run_queue.rs')
| -rw-r--r-- | embassy-executor/src/raw/run_queue.rs | 88 |
1 files changed, 0 insertions, 88 deletions
diff --git a/embassy-executor/src/raw/run_queue.rs b/embassy-executor/src/raw/run_queue.rs deleted file mode 100644 index f1ec19ac1..000000000 --- a/embassy-executor/src/raw/run_queue.rs +++ /dev/null | |||
| @@ -1,88 +0,0 @@ | |||
| 1 | use core::ptr; | ||
| 2 | use core::ptr::NonNull; | ||
| 3 | |||
| 4 | use atomic_polyfill::{AtomicPtr, Ordering}; | ||
| 5 | |||
| 6 | use super::{TaskHeader, TaskRef}; | ||
| 7 | use crate::raw::util::SyncUnsafeCell; | ||
| 8 | |||
| 9 | pub(crate) struct RunQueueItem { | ||
| 10 | next: SyncUnsafeCell<Option<TaskRef>>, | ||
| 11 | } | ||
| 12 | |||
| 13 | impl RunQueueItem { | ||
| 14 | pub const fn new() -> Self { | ||
| 15 | Self { | ||
| 16 | next: SyncUnsafeCell::new(None), | ||
| 17 | } | ||
| 18 | } | ||
| 19 | } | ||
| 20 | |||
| 21 | /// Atomic task queue using a very, very simple lock-free linked-list queue: | ||
| 22 | /// | ||
| 23 | /// To enqueue a task, task.next is set to the old head, and head is atomically set to task. | ||
| 24 | /// | ||
| 25 | /// Dequeuing is done in batches: the queue is emptied by atomically replacing head with | ||
| 26 | /// null. Then the batch is iterated following the next pointers until null is reached. | ||
| 27 | /// | ||
| 28 | /// Note that batches will be iterated in the reverse order as they were enqueued. This is OK | ||
| 29 | /// for our purposes: it can't create fairness problems since the next batch won't run until the | ||
| 30 | /// current batch is completely processed, so even if a task enqueues itself instantly (for example | ||
| 31 | /// by waking its own waker) can't prevent other tasks from running. | ||
| 32 | pub(crate) struct RunQueue { | ||
| 33 | head: AtomicPtr<TaskHeader>, | ||
| 34 | } | ||
| 35 | |||
| 36 | impl RunQueue { | ||
| 37 | pub const fn new() -> Self { | ||
| 38 | Self { | ||
| 39 | head: AtomicPtr::new(ptr::null_mut()), | ||
| 40 | } | ||
| 41 | } | ||
| 42 | |||
| 43 | /// Enqueues an item. Returns true if the queue was empty. | ||
| 44 | /// | ||
| 45 | /// # Safety | ||
| 46 | /// | ||
| 47 | /// `item` must NOT be already enqueued in any queue. | ||
| 48 | #[inline(always)] | ||
| 49 | pub(crate) unsafe fn enqueue(&self, task: TaskRef) -> bool { | ||
| 50 | let mut was_empty = false; | ||
| 51 | |||
| 52 | self.head | ||
| 53 | .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |prev| { | ||
| 54 | was_empty = prev.is_null(); | ||
| 55 | unsafe { | ||
| 56 | // safety: the pointer is either null or valid | ||
| 57 | let prev = NonNull::new(prev).map(|ptr| TaskRef::from_ptr(ptr.as_ptr())); | ||
| 58 | // safety: there are no concurrent accesses to `next` | ||
| 59 | task.header().run_queue_item.next.set(prev); | ||
| 60 | } | ||
| 61 | Some(task.as_ptr() as *mut _) | ||
| 62 | }) | ||
| 63 | .ok(); | ||
| 64 | |||
| 65 | was_empty | ||
| 66 | } | ||
| 67 | |||
| 68 | /// Empty the queue, then call `on_task` for each task that was in the queue. | ||
| 69 | /// NOTE: It is OK for `on_task` to enqueue more tasks. In this case they're left in the queue | ||
| 70 | /// and will be processed by the *next* call to `dequeue_all`, *not* the current one. | ||
| 71 | pub(crate) fn dequeue_all(&self, on_task: impl Fn(TaskRef)) { | ||
| 72 | // Atomically empty the queue. | ||
| 73 | let ptr = self.head.swap(ptr::null_mut(), Ordering::AcqRel); | ||
| 74 | |||
| 75 | // safety: the pointer is either null or valid | ||
| 76 | let mut next = unsafe { NonNull::new(ptr).map(|ptr| TaskRef::from_ptr(ptr.as_ptr())) }; | ||
| 77 | |||
| 78 | // Iterate the linked list of tasks that were previously in the queue. | ||
| 79 | while let Some(task) = next { | ||
| 80 | // If the task re-enqueues itself, the `next` pointer will get overwritten. | ||
| 81 | // Therefore, first read the next pointer, and only then process the task. | ||
| 82 | // safety: there are no concurrent accesses to `next` | ||
| 83 | next = unsafe { task.header().run_queue_item.next.get() }; | ||
| 84 | |||
| 85 | on_task(task); | ||
| 86 | } | ||
| 87 | } | ||
| 88 | } | ||
