aboutsummaryrefslogtreecommitdiff
path: root/embassy-executor/src
diff options
context:
space:
mode:
authorJames Munns <[email protected]>2025-06-03 16:34:12 +0200
committerDario Nieuwenhuis <[email protected]>2025-09-11 14:45:06 +0200
commit7af8f35a50c631802615044e12cc9c74614f78bb (patch)
treec565eaa560b0955d979325a1bc9cd339c4e6f3ec /embassy-executor/src
parent67ed473973dae6d60aed8b88b8b854b224660e8d (diff)
There can be only one (run queue)
Diffstat (limited to 'embassy-executor/src')
-rw-r--r--embassy-executor/src/raw/deadline.rs3
-rw-r--r--embassy-executor/src/raw/mod.rs2
-rw-r--r--embassy-executor/src/raw/run_queue.rs (renamed from embassy-executor/src/raw/run_queue_atomics.rs)28
-rw-r--r--embassy-executor/src/raw/run_queue_critical_section.rs74
4 files changed, 25 insertions, 82 deletions
diff --git a/embassy-executor/src/raw/deadline.rs b/embassy-executor/src/raw/deadline.rs
index 006c7caf1..0fb22a7ce 100644
--- a/embassy-executor/src/raw/deadline.rs
+++ b/embassy-executor/src/raw/deadline.rs
@@ -1,9 +1,6 @@
1use core::future::{poll_fn, Future}; 1use core::future::{poll_fn, Future};
2use core::task::Poll; 2use core::task::Poll;
3 3
4#[cfg(not(target_has_atomic = "ptr"))]
5compile_error!("The `edf-scheduler` feature is currently only supported on targets with atomics.");
6
7/// A type for interacting with the deadline of the current task 4/// A type for interacting with the deadline of the current task
8/// 5///
9/// Requires the `edf-scheduler` feature 6/// Requires the `edf-scheduler` feature
diff --git a/embassy-executor/src/raw/mod.rs b/embassy-executor/src/raw/mod.rs
index 96e7fda74..cc43690cb 100644
--- a/embassy-executor/src/raw/mod.rs
+++ b/embassy-executor/src/raw/mod.rs
@@ -7,8 +7,6 @@
7//! Using this module requires respecting subtle safety contracts. If you can, prefer using the safe 7//! Using this module requires respecting subtle safety contracts. If you can, prefer using the safe
8//! [executor wrappers](crate::Executor) and the [`embassy_executor::task`](embassy_executor_macros::task) macro, which are fully safe. 8//! [executor wrappers](crate::Executor) and the [`embassy_executor::task`](embassy_executor_macros::task) macro, which are fully safe.
9 9
10#[cfg_attr(target_has_atomic = "ptr", path = "run_queue_atomics.rs")]
11#[cfg_attr(not(target_has_atomic = "ptr"), path = "run_queue_critical_section.rs")]
12mod run_queue; 10mod run_queue;
13 11
14#[cfg_attr(all(cortex_m, target_has_atomic = "32"), path = "state_atomics_arm.rs")] 12#[cfg_attr(all(cortex_m, target_has_atomic = "32"), path = "state_atomics_arm.rs")]
diff --git a/embassy-executor/src/raw/run_queue_atomics.rs b/embassy-executor/src/raw/run_queue.rs
index 65a9b7859..f630041e0 100644
--- a/embassy-executor/src/raw/run_queue_atomics.rs
+++ b/embassy-executor/src/raw/run_queue.rs
@@ -1,9 +1,15 @@
1use core::ptr::{addr_of_mut, NonNull}; 1use core::ptr::{addr_of_mut, NonNull};
2 2
3use cordyceps::sorted_list::Links; 3use cordyceps::sorted_list::Links;
4use cordyceps::Linked;
4#[cfg(feature = "edf-scheduler")] 5#[cfg(feature = "edf-scheduler")]
5use cordyceps::SortedList; 6use cordyceps::SortedList;
6use cordyceps::{Linked, TransferStack}; 7
8#[cfg(target_has_atomic = "ptr")]
9type TransferStack<T> = cordyceps::TransferStack<T>;
10
11#[cfg(not(target_has_atomic = "ptr"))]
12type TransferStack<T> = cordyceps::TransferStack<mutex::raw_impls::cs::CriticalSectionRawMutex, T>;
7 13
8use super::{TaskHeader, TaskRef}; 14use super::{TaskHeader, TaskRef};
9 15
@@ -77,7 +83,7 @@ impl RunQueue {
77 pub(crate) fn dequeue_all(&self, on_task: impl Fn(TaskRef)) { 83 pub(crate) fn dequeue_all(&self, on_task: impl Fn(TaskRef)) {
78 let taken = self.stack.take_all(); 84 let taken = self.stack.take_all();
79 for taskref in taken { 85 for taskref in taken {
80 taskref.header().state.run_dequeue(); 86 run_dequeue(&taskref);
81 on_task(taskref); 87 on_task(taskref);
82 } 88 }
83 } 89 }
@@ -122,8 +128,24 @@ impl RunQueue {
122 }; 128 };
123 129
124 // We got one task, mark it as dequeued, and process the task. 130 // We got one task, mark it as dequeued, and process the task.
125 taskref.header().state.run_dequeue(); 131 run_dequeue(&taskref);
126 on_task(taskref); 132 on_task(taskref);
127 } 133 }
128 } 134 }
129} 135}
136
137/// atomic state does not require a cs...
138#[cfg(target_has_atomic = "ptr")]
139#[inline(always)]
140fn run_dequeue(taskref: &TaskRef) {
141 taskref.header().state.run_dequeue();
142}
143
144/// ...while non-atomic state does
145#[cfg(not(target_has_atomic = "ptr"))]
146#[inline(always)]
147fn run_dequeue(taskref: &TaskRef) {
148 critical_section::with(|cs| {
149 taskref.header().state.run_dequeue(cs);
150 })
151}
diff --git a/embassy-executor/src/raw/run_queue_critical_section.rs b/embassy-executor/src/raw/run_queue_critical_section.rs
deleted file mode 100644
index 86c4085ed..000000000
--- a/embassy-executor/src/raw/run_queue_critical_section.rs
+++ /dev/null
@@ -1,74 +0,0 @@
1use core::cell::Cell;
2
3use critical_section::{CriticalSection, Mutex};
4
5use super::TaskRef;
6
7pub(crate) struct RunQueueItem {
8 next: Mutex<Cell<Option<TaskRef>>>,
9}
10
11impl RunQueueItem {
12 pub const fn new() -> Self {
13 Self {
14 next: Mutex::new(Cell::new(None)),
15 }
16 }
17}
18
19/// Atomic task queue using a very, very simple lock-free linked-list queue:
20///
21/// To enqueue a task, task.next is set to the old head, and head is atomically set to task.
22///
23/// Dequeuing is done in batches: the queue is emptied by atomically replacing head with
24/// null. Then the batch is iterated following the next pointers until null is reached.
25///
26/// Note that batches will be iterated in the reverse order as they were enqueued. This is OK
27/// for our purposes: it can't create fairness problems since the next batch won't run until the
28/// current batch is completely processed, so even if a task enqueues itself instantly (for example
29/// by waking its own waker) can't prevent other tasks from running.
30pub(crate) struct RunQueue {
31 head: Mutex<Cell<Option<TaskRef>>>,
32}
33
34impl RunQueue {
35 pub const fn new() -> Self {
36 Self {
37 head: Mutex::new(Cell::new(None)),
38 }
39 }
40
41 /// Enqueues an item. Returns true if the queue was empty.
42 ///
43 /// # Safety
44 ///
45 /// `item` must NOT be already enqueued in any queue.
46 #[inline(always)]
47 pub(crate) unsafe fn enqueue(&self, task: TaskRef, cs: CriticalSection<'_>) -> bool {
48 let prev = self.head.borrow(cs).replace(Some(task));
49 task.header().run_queue_item.next.borrow(cs).set(prev);
50
51 prev.is_none()
52 }
53
54 /// Empty the queue, then call `on_task` for each task that was in the queue.
55 /// NOTE: It is OK for `on_task` to enqueue more tasks. In this case they're left in the queue
56 /// and will be processed by the *next* call to `dequeue_all`, *not* the current one.
57 pub(crate) fn dequeue_all(&self, on_task: impl Fn(TaskRef)) {
58 // Atomically empty the queue.
59 let mut next = critical_section::with(|cs| self.head.borrow(cs).take());
60
61 // Iterate the linked list of tasks that were previously in the queue.
62 while let Some(task) = next {
63 // If the task re-enqueues itself, the `next` pointer will get overwritten.
64 // Therefore, first read the next pointer, and only then process the task.
65
66 critical_section::with(|cs| {
67 next = task.header().run_queue_item.next.borrow(cs).get();
68 task.header().state.run_dequeue(cs);
69 });
70
71 on_task(task);
72 }
73 }
74}