aboutsummaryrefslogtreecommitdiff
path: root/embassy-executor/src/executor/raw
diff options
context:
space:
mode:
authorDario Nieuwenhuis <[email protected]>2022-07-29 21:58:35 +0200
committerDario Nieuwenhuis <[email protected]>2022-07-29 23:40:36 +0200
commita0f1b0ee01d461607660d2d56b5b1bdc57e0d3fb (patch)
treee60fc8f8db8ec07e55d655c1a830b07f4db0b7d2 /embassy-executor/src/executor/raw
parent8745d646f0976791b7098456aa61adb983fb1c18 (diff)
Split embassy crate into embassy-executor, embassy-util.
Diffstat (limited to 'embassy-executor/src/executor/raw')
-rw-r--r--embassy-executor/src/executor/raw/mod.rs433
-rw-r--r--embassy-executor/src/executor/raw/run_queue.rs74
-rw-r--r--embassy-executor/src/executor/raw/timer_queue.rs85
-rw-r--r--embassy-executor/src/executor/raw/util.rs33
-rw-r--r--embassy-executor/src/executor/raw/waker.rs53
5 files changed, 678 insertions, 0 deletions
diff --git a/embassy-executor/src/executor/raw/mod.rs b/embassy-executor/src/executor/raw/mod.rs
new file mode 100644
index 000000000..87317bc02
--- /dev/null
+++ b/embassy-executor/src/executor/raw/mod.rs
@@ -0,0 +1,433 @@
1//! Raw executor.
2//!
3//! This module exposes "raw" Executor and Task structs for more low level control.
4//!
5//! ## WARNING: here be dragons!
6//!
7//! Using this module requires respecting subtle safety contracts. If you can, prefer using the safe
8//! executor wrappers in [`executor`](crate::executor) and the [`embassy_executor::task`](embassy_macros::task) macro, which are fully safe.
9
10mod run_queue;
11#[cfg(feature = "time")]
12mod timer_queue;
13pub(crate) mod util;
14mod waker;
15
16use core::cell::Cell;
17use core::future::Future;
18use core::pin::Pin;
19use core::ptr::NonNull;
20use core::task::{Context, Poll};
21use core::{mem, ptr};
22
23use atomic_polyfill::{AtomicU32, Ordering};
24use critical_section::CriticalSection;
25
26use self::run_queue::{RunQueue, RunQueueItem};
27use self::util::UninitCell;
28pub use self::waker::task_from_waker;
29use super::SpawnToken;
30#[cfg(feature = "time")]
31use crate::time::driver::{self, AlarmHandle};
32#[cfg(feature = "time")]
33use crate::time::Instant;
34
35/// Task is spawned (has a future)
36pub(crate) const STATE_SPAWNED: u32 = 1 << 0;
37/// Task is in the executor run queue
38pub(crate) const STATE_RUN_QUEUED: u32 = 1 << 1;
39/// Task is in the executor timer queue
40#[cfg(feature = "time")]
41pub(crate) const STATE_TIMER_QUEUED: u32 = 1 << 2;
42
43/// Raw task header for use in task pointers.
44///
45/// This is an opaque struct, used for raw pointers to tasks, for use
46/// with funtions like [`wake_task`] and [`task_from_waker`].
47pub struct TaskHeader {
48 pub(crate) state: AtomicU32,
49 pub(crate) run_queue_item: RunQueueItem,
50 pub(crate) executor: Cell<*const Executor>, // Valid if state != 0
51 pub(crate) poll_fn: UninitCell<unsafe fn(NonNull<TaskHeader>)>, // Valid if STATE_SPAWNED
52
53 #[cfg(feature = "time")]
54 pub(crate) expires_at: Cell<Instant>,
55 #[cfg(feature = "time")]
56 pub(crate) timer_queue_item: timer_queue::TimerQueueItem,
57}
58
59impl TaskHeader {
60 pub(crate) const fn new() -> Self {
61 Self {
62 state: AtomicU32::new(0),
63 run_queue_item: RunQueueItem::new(),
64 executor: Cell::new(ptr::null()),
65 poll_fn: UninitCell::uninit(),
66
67 #[cfg(feature = "time")]
68 expires_at: Cell::new(Instant::from_ticks(0)),
69 #[cfg(feature = "time")]
70 timer_queue_item: timer_queue::TimerQueueItem::new(),
71 }
72 }
73
74 pub(crate) unsafe fn enqueue(&self) {
75 critical_section::with(|cs| {
76 let state = self.state.load(Ordering::Relaxed);
77
78 // If already scheduled, or if not started,
79 if (state & STATE_RUN_QUEUED != 0) || (state & STATE_SPAWNED == 0) {
80 return;
81 }
82
83 // Mark it as scheduled
84 self.state.store(state | STATE_RUN_QUEUED, Ordering::Relaxed);
85
86 // We have just marked the task as scheduled, so enqueue it.
87 let executor = &*self.executor.get();
88 executor.enqueue(cs, self as *const TaskHeader as *mut TaskHeader);
89 })
90 }
91}
92
93/// Raw storage in which a task can be spawned.
94///
95/// This struct holds the necessary memory to spawn one task whose future is `F`.
96/// At a given time, the `TaskStorage` may be in spawned or not-spawned state. You
97/// may spawn it with [`TaskStorage::spawn()`], which will fail if it is already spawned.
98///
99/// A `TaskStorage` must live forever, it may not be deallocated even after the task has finished
100/// running. Hence the relevant methods require `&'static self`. It may be reused, however.
101///
102/// Internally, the [embassy_executor::task](embassy_macros::task) macro allocates an array of `TaskStorage`s
103/// in a `static`. The most common reason to use the raw `Task` is to have control of where
104/// the memory for the task is allocated: on the stack, or on the heap with e.g. `Box::leak`, etc.
105
106// repr(C) is needed to guarantee that the Task is located at offset 0
107// This makes it safe to cast between TaskHeader and TaskStorage pointers.
108#[repr(C)]
109pub struct TaskStorage<F: Future + 'static> {
110 raw: TaskHeader,
111 future: UninitCell<F>, // Valid if STATE_SPAWNED
112}
113
114impl<F: Future + 'static> TaskStorage<F> {
115 const NEW: Self = Self::new();
116
117 /// Create a new TaskStorage, in not-spawned state.
118 pub const fn new() -> Self {
119 Self {
120 raw: TaskHeader::new(),
121 future: UninitCell::uninit(),
122 }
123 }
124
125 /// Try to spawn the task.
126 ///
127 /// The `future` closure constructs the future. It's only called if spawning is
128 /// actually possible. It is a closure instead of a simple `future: F` param to ensure
129 /// the future is constructed in-place, avoiding a temporary copy in the stack thanks to
130 /// NRVO optimizations.
131 ///
132 /// This function will fail if the task is already spawned and has not finished running.
133 /// In this case, the error is delayed: a "poisoned" SpawnToken is returned, which will
134 /// cause [`Spawner::spawn()`](super::Spawner::spawn) to return the error.
135 ///
136 /// Once the task has finished running, you may spawn it again. It is allowed to spawn it
137 /// on a different executor.
138 pub fn spawn(&'static self, future: impl FnOnce() -> F) -> SpawnToken<impl Sized> {
139 if self.spawn_mark_used() {
140 return unsafe { SpawnToken::<F>::new(self.spawn_initialize(future)) };
141 }
142
143 SpawnToken::<F>::new_failed()
144 }
145
146 fn spawn_mark_used(&'static self) -> bool {
147 let state = STATE_SPAWNED | STATE_RUN_QUEUED;
148 self.raw
149 .state
150 .compare_exchange(0, state, Ordering::AcqRel, Ordering::Acquire)
151 .is_ok()
152 }
153
154 unsafe fn spawn_initialize(&'static self, future: impl FnOnce() -> F) -> NonNull<TaskHeader> {
155 // Initialize the task
156 self.raw.poll_fn.write(Self::poll);
157 self.future.write(future());
158 NonNull::new_unchecked(&self.raw as *const TaskHeader as *mut TaskHeader)
159 }
160
161 unsafe fn poll(p: NonNull<TaskHeader>) {
162 let this = &*(p.as_ptr() as *const TaskStorage<F>);
163
164 let future = Pin::new_unchecked(this.future.as_mut());
165 let waker = waker::from_task(p);
166 let mut cx = Context::from_waker(&waker);
167 match future.poll(&mut cx) {
168 Poll::Ready(_) => {
169 this.future.drop_in_place();
170 this.raw.state.fetch_and(!STATE_SPAWNED, Ordering::AcqRel);
171 }
172 Poll::Pending => {}
173 }
174
175 // the compiler is emitting a virtual call for waker drop, but we know
176 // it's a noop for our waker.
177 mem::forget(waker);
178 }
179}
180
181unsafe impl<F: Future + 'static> Sync for TaskStorage<F> {}
182
183/// Raw storage that can hold up to N tasks of the same type.
184///
185/// This is essentially a `[TaskStorage<F>; N]`.
186pub struct TaskPool<F: Future + 'static, const N: usize> {
187 pool: [TaskStorage<F>; N],
188}
189
190impl<F: Future + 'static, const N: usize> TaskPool<F, N> {
191 /// Create a new TaskPool, with all tasks in non-spawned state.
192 pub const fn new() -> Self {
193 Self {
194 pool: [TaskStorage::NEW; N],
195 }
196 }
197
198 /// Try to spawn a task in the pool.
199 ///
200 /// See [`TaskStorage::spawn()`] for details.
201 ///
202 /// This will loop over the pool and spawn the task in the first storage that
203 /// is currently free. If none is free, a "poisoned" SpawnToken is returned,
204 /// which will cause [`Spawner::spawn()`](super::Spawner::spawn) to return the error.
205 pub fn spawn(&'static self, future: impl FnOnce() -> F) -> SpawnToken<impl Sized> {
206 for task in &self.pool {
207 if task.spawn_mark_used() {
208 return unsafe { SpawnToken::<F>::new(task.spawn_initialize(future)) };
209 }
210 }
211
212 SpawnToken::<F>::new_failed()
213 }
214
215 /// Like spawn(), but allows the task to be send-spawned if the args are Send even if
216 /// the future is !Send.
217 ///
218 /// Not covered by semver guarantees. DO NOT call this directly. Intended to be used
219 /// by the Embassy macros ONLY.
220 ///
221 /// SAFETY: `future` must be a closure of the form `move || my_async_fn(args)`, where `my_async_fn`
222 /// is an `async fn`, NOT a hand-written `Future`.
223 #[doc(hidden)]
224 pub unsafe fn _spawn_async_fn<FutFn>(&'static self, future: FutFn) -> SpawnToken<impl Sized>
225 where
226 FutFn: FnOnce() -> F,
227 {
228 // When send-spawning a task, we construct the future in this thread, and effectively
229 // "send" it to the executor thread by enqueuing it in its queue. Therefore, in theory,
230 // send-spawning should require the future `F` to be `Send`.
231 //
232 // The problem is this is more restrictive than needed. Once the future is executing,
233 // it is never sent to another thread. It is only sent when spawning. It should be
234 // enough for the task's arguments to be Send. (and in practice it's super easy to
235 // accidentally make your futures !Send, for example by holding an `Rc` or a `&RefCell` across an `.await`.)
236 //
237 // We can do it by sending the task args and constructing the future in the executor thread
238 // on first poll. However, this cannot be done in-place, so it'll waste stack space for a copy
239 // of the args.
240 //
241 // Luckily, an `async fn` future contains just the args when freshly constructed. So, if the
242 // args are Send, it's OK to send a !Send future, as long as we do it before first polling it.
243 //
244 // (Note: this is how the generators are implemented today, it's not officially guaranteed yet,
245 // but it's possible it'll be guaranteed in the future. See zulip thread:
246 // https://rust-lang.zulipchat.com/#narrow/stream/187312-wg-async/topic/.22only.20before.20poll.22.20Send.20futures )
247 //
248 // The `FutFn` captures all the args, so if it's Send, the task can be send-spawned.
249 // This is why we return `SpawnToken<FutFn>` below.
250 //
251 // This ONLY holds for `async fn` futures. The other `spawn` methods can be called directly
252 // by the user, with arbitrary hand-implemented futures. This is why these return `SpawnToken<F>`.
253
254 for task in &self.pool {
255 if task.spawn_mark_used() {
256 return SpawnToken::<FutFn>::new(task.spawn_initialize(future));
257 }
258 }
259
260 SpawnToken::<FutFn>::new_failed()
261 }
262}
263
264/// Raw executor.
265///
266/// This is the core of the Embassy executor. It is low-level, requiring manual
267/// handling of wakeups and task polling. If you can, prefer using one of the
268/// higher level executors in [`crate::executor`].
269///
270/// The raw executor leaves it up to you to handle wakeups and scheduling:
271///
272/// - To get the executor to do work, call `poll()`. This will poll all queued tasks (all tasks
273/// that "want to run").
274/// - You must supply a `signal_fn`. The executor will call it to notify you it has work
275/// to do. You must arrange for `poll()` to be called as soon as possible.
276///
277/// `signal_fn` can be called from *any* context: any thread, any interrupt priority
278/// level, etc. It may be called synchronously from any `Executor` method call as well.
279/// You must deal with this correctly.
280///
281/// In particular, you must NOT call `poll` directly from `signal_fn`, as this violates
282/// the requirement for `poll` to not be called reentrantly.
283pub struct Executor {
284 run_queue: RunQueue,
285 signal_fn: fn(*mut ()),
286 signal_ctx: *mut (),
287
288 #[cfg(feature = "time")]
289 pub(crate) timer_queue: timer_queue::TimerQueue,
290 #[cfg(feature = "time")]
291 alarm: AlarmHandle,
292}
293
294impl Executor {
295 /// Create a new executor.
296 ///
297 /// When the executor has work to do, it will call `signal_fn` with
298 /// `signal_ctx` as argument.
299 ///
300 /// See [`Executor`] docs for details on `signal_fn`.
301 pub fn new(signal_fn: fn(*mut ()), signal_ctx: *mut ()) -> Self {
302 #[cfg(feature = "time")]
303 let alarm = unsafe { unwrap!(driver::allocate_alarm()) };
304 #[cfg(feature = "time")]
305 driver::set_alarm_callback(alarm, signal_fn, signal_ctx);
306
307 Self {
308 run_queue: RunQueue::new(),
309 signal_fn,
310 signal_ctx,
311
312 #[cfg(feature = "time")]
313 timer_queue: timer_queue::TimerQueue::new(),
314 #[cfg(feature = "time")]
315 alarm,
316 }
317 }
318
319 /// Enqueue a task in the task queue
320 ///
321 /// # Safety
322 /// - `task` must be a valid pointer to a spawned task.
323 /// - `task` must be set up to run in this executor.
324 /// - `task` must NOT be already enqueued (in this executor or another one).
325 #[inline(always)]
326 unsafe fn enqueue(&self, cs: CriticalSection, task: *mut TaskHeader) {
327 if self.run_queue.enqueue(cs, task) {
328 (self.signal_fn)(self.signal_ctx)
329 }
330 }
331
332 /// Spawn a task in this executor.
333 ///
334 /// # Safety
335 ///
336 /// `task` must be a valid pointer to an initialized but not-already-spawned task.
337 ///
338 /// It is OK to use `unsafe` to call this from a thread that's not the executor thread.
339 /// In this case, the task's Future must be Send. This is because this is effectively
340 /// sending the task to the executor thread.
341 pub(super) unsafe fn spawn(&'static self, task: NonNull<TaskHeader>) {
342 let task = task.as_ref();
343 task.executor.set(self);
344
345 critical_section::with(|cs| {
346 self.enqueue(cs, task as *const _ as _);
347 })
348 }
349
350 /// Poll all queued tasks in this executor.
351 ///
352 /// This loops over all tasks that are queued to be polled (i.e. they're
353 /// freshly spawned or they've been woken). Other tasks are not polled.
354 ///
355 /// You must call `poll` after receiving a call to `signal_fn`. It is OK
356 /// to call `poll` even when not requested by `signal_fn`, but it wastes
357 /// energy.
358 ///
359 /// # Safety
360 ///
361 /// You must NOT call `poll` reentrantly on the same executor.
362 ///
363 /// In particular, note that `poll` may call `signal_fn` synchronously. Therefore, you
364 /// must NOT directly call `poll()` from your `signal_fn`. Instead, `signal_fn` has to
365 /// somehow schedule for `poll()` to be called later, at a time you know for sure there's
366 /// no `poll()` already running.
367 pub unsafe fn poll(&'static self) {
368 #[cfg(feature = "time")]
369 self.timer_queue.dequeue_expired(Instant::now(), |p| {
370 p.as_ref().enqueue();
371 });
372
373 self.run_queue.dequeue_all(|p| {
374 let task = p.as_ref();
375
376 #[cfg(feature = "time")]
377 task.expires_at.set(Instant::MAX);
378
379 let state = task.state.fetch_and(!STATE_RUN_QUEUED, Ordering::AcqRel);
380 if state & STATE_SPAWNED == 0 {
381 // If task is not running, ignore it. This can happen in the following scenario:
382 // - Task gets dequeued, poll starts
383 // - While task is being polled, it gets woken. It gets placed in the queue.
384 // - Task poll finishes, returning done=true
385 // - RUNNING bit is cleared, but the task is already in the queue.
386 return;
387 }
388
389 // Run the task
390 task.poll_fn.read()(p as _);
391
392 // Enqueue or update into timer_queue
393 #[cfg(feature = "time")]
394 self.timer_queue.update(p);
395 });
396
397 #[cfg(feature = "time")]
398 {
399 // If this is already in the past, set_alarm will immediately trigger the alarm.
400 // This will cause `signal_fn` to be called, which will cause `poll()` to be called again,
401 // so we immediately do another poll loop iteration.
402 let next_expiration = self.timer_queue.next_expiration();
403 driver::set_alarm(self.alarm, next_expiration.as_ticks());
404 }
405 }
406
407 /// Get a spawner that spawns tasks in this executor.
408 ///
409 /// It is OK to call this method multiple times to obtain multiple
410 /// `Spawner`s. You may also copy `Spawner`s.
411 pub fn spawner(&'static self) -> super::Spawner {
412 super::Spawner::new(self)
413 }
414}
415
416/// Wake a task by raw pointer.
417///
418/// You can obtain task pointers from `Waker`s using [`task_from_waker`].
419///
420/// # Safety
421///
422/// `task` must be a valid task pointer obtained from [`task_from_waker`].
423pub unsafe fn wake_task(task: NonNull<TaskHeader>) {
424 task.as_ref().enqueue();
425}
426
427#[cfg(feature = "time")]
428pub(crate) unsafe fn register_timer(at: Instant, waker: &core::task::Waker) {
429 let task = waker::task_from_waker(waker);
430 let task = task.as_ref();
431 let expires_at = task.expires_at.get();
432 task.expires_at.set(expires_at.min(at));
433}
diff --git a/embassy-executor/src/executor/raw/run_queue.rs b/embassy-executor/src/executor/raw/run_queue.rs
new file mode 100644
index 000000000..31615da7e
--- /dev/null
+++ b/embassy-executor/src/executor/raw/run_queue.rs
@@ -0,0 +1,74 @@
1use core::ptr;
2use core::ptr::NonNull;
3
4use atomic_polyfill::{AtomicPtr, Ordering};
5use critical_section::CriticalSection;
6
7use super::TaskHeader;
8
9pub(crate) struct RunQueueItem {
10 next: AtomicPtr<TaskHeader>,
11}
12
13impl RunQueueItem {
14 pub const fn new() -> Self {
15 Self {
16 next: AtomicPtr::new(ptr::null_mut()),
17 }
18 }
19}
20
21/// Atomic task queue using a very, very simple lock-free linked-list queue:
22///
23/// To enqueue a task, task.next is set to the old head, and head is atomically set to task.
24///
25/// Dequeuing is done in batches: the queue is emptied by atomically replacing head with
26/// null. Then the batch is iterated following the next pointers until null is reached.
27///
28/// Note that batches will be iterated in the reverse order as they were enqueued. This is OK
29/// for our purposes: it can't create fairness problems since the next batch won't run until the
30/// current batch is completely processed, so even if a task enqueues itself instantly (for example
31/// by waking its own waker) can't prevent other tasks from running.
32pub(crate) struct RunQueue {
33 head: AtomicPtr<TaskHeader>,
34}
35
36impl RunQueue {
37 pub const fn new() -> Self {
38 Self {
39 head: AtomicPtr::new(ptr::null_mut()),
40 }
41 }
42
43 /// Enqueues an item. Returns true if the queue was empty.
44 ///
45 /// # Safety
46 ///
47 /// `item` must NOT be already enqueued in any queue.
48 #[inline(always)]
49 pub(crate) unsafe fn enqueue(&self, _cs: CriticalSection, task: *mut TaskHeader) -> bool {
50 let prev = self.head.load(Ordering::Relaxed);
51 (*task).run_queue_item.next.store(prev, Ordering::Relaxed);
52 self.head.store(task, Ordering::Relaxed);
53 prev.is_null()
54 }
55
56 /// Empty the queue, then call `on_task` for each task that was in the queue.
57 /// NOTE: It is OK for `on_task` to enqueue more tasks. In this case they're left in the queue
58 /// and will be processed by the *next* call to `dequeue_all`, *not* the current one.
59 pub(crate) fn dequeue_all(&self, on_task: impl Fn(NonNull<TaskHeader>)) {
60 // Atomically empty the queue.
61 let mut ptr = self.head.swap(ptr::null_mut(), Ordering::AcqRel);
62
63 // Iterate the linked list of tasks that were previously in the queue.
64 while let Some(task) = NonNull::new(ptr) {
65 // If the task re-enqueues itself, the `next` pointer will get overwritten.
66 // Therefore, first read the next pointer, and only then process the task.
67 let next = unsafe { task.as_ref() }.run_queue_item.next.load(Ordering::Relaxed);
68
69 on_task(task);
70
71 ptr = next
72 }
73 }
74}
diff --git a/embassy-executor/src/executor/raw/timer_queue.rs b/embassy-executor/src/executor/raw/timer_queue.rs
new file mode 100644
index 000000000..62fcfc531
--- /dev/null
+++ b/embassy-executor/src/executor/raw/timer_queue.rs
@@ -0,0 +1,85 @@
1use core::cell::Cell;
2use core::cmp::min;
3use core::ptr;
4use core::ptr::NonNull;
5
6use atomic_polyfill::Ordering;
7
8use super::{TaskHeader, STATE_TIMER_QUEUED};
9use crate::time::Instant;
10
11pub(crate) struct TimerQueueItem {
12 next: Cell<*mut TaskHeader>,
13}
14
15impl TimerQueueItem {
16 pub const fn new() -> Self {
17 Self {
18 next: Cell::new(ptr::null_mut()),
19 }
20 }
21}
22
23pub(crate) struct TimerQueue {
24 head: Cell<*mut TaskHeader>,
25}
26
27impl TimerQueue {
28 pub const fn new() -> Self {
29 Self {
30 head: Cell::new(ptr::null_mut()),
31 }
32 }
33
34 pub(crate) unsafe fn update(&self, p: NonNull<TaskHeader>) {
35 let task = p.as_ref();
36 if task.expires_at.get() != Instant::MAX {
37 let old_state = task.state.fetch_or(STATE_TIMER_QUEUED, Ordering::AcqRel);
38 let is_new = old_state & STATE_TIMER_QUEUED == 0;
39
40 if is_new {
41 task.timer_queue_item.next.set(self.head.get());
42 self.head.set(p.as_ptr());
43 }
44 }
45 }
46
47 pub(crate) unsafe fn next_expiration(&self) -> Instant {
48 let mut res = Instant::MAX;
49 self.retain(|p| {
50 let task = p.as_ref();
51 let expires = task.expires_at.get();
52 res = min(res, expires);
53 expires != Instant::MAX
54 });
55 res
56 }
57
58 pub(crate) unsafe fn dequeue_expired(&self, now: Instant, on_task: impl Fn(NonNull<TaskHeader>)) {
59 self.retain(|p| {
60 let task = p.as_ref();
61 if task.expires_at.get() <= now {
62 on_task(p);
63 false
64 } else {
65 true
66 }
67 });
68 }
69
70 pub(crate) unsafe fn retain(&self, mut f: impl FnMut(NonNull<TaskHeader>) -> bool) {
71 let mut prev = &self.head;
72 while !prev.get().is_null() {
73 let p = NonNull::new_unchecked(prev.get());
74 let task = &*p.as_ptr();
75 if f(p) {
76 // Skip to next
77 prev = &task.timer_queue_item.next;
78 } else {
79 // Remove it
80 prev.set(task.timer_queue_item.next.get());
81 task.state.fetch_and(!STATE_TIMER_QUEUED, Ordering::AcqRel);
82 }
83 }
84 }
85}
diff --git a/embassy-executor/src/executor/raw/util.rs b/embassy-executor/src/executor/raw/util.rs
new file mode 100644
index 000000000..ed5822188
--- /dev/null
+++ b/embassy-executor/src/executor/raw/util.rs
@@ -0,0 +1,33 @@
1use core::cell::UnsafeCell;
2use core::mem::MaybeUninit;
3use core::ptr;
4
5pub(crate) struct UninitCell<T>(MaybeUninit<UnsafeCell<T>>);
6impl<T> UninitCell<T> {
7 pub const fn uninit() -> Self {
8 Self(MaybeUninit::uninit())
9 }
10
11 pub unsafe fn as_mut_ptr(&self) -> *mut T {
12 (*self.0.as_ptr()).get()
13 }
14
15 #[allow(clippy::mut_from_ref)]
16 pub unsafe fn as_mut(&self) -> &mut T {
17 &mut *self.as_mut_ptr()
18 }
19
20 pub unsafe fn write(&self, val: T) {
21 ptr::write(self.as_mut_ptr(), val)
22 }
23
24 pub unsafe fn drop_in_place(&self) {
25 ptr::drop_in_place(self.as_mut_ptr())
26 }
27}
28
29impl<T: Copy> UninitCell<T> {
30 pub unsafe fn read(&self) -> T {
31 ptr::read(self.as_mut_ptr())
32 }
33}
diff --git a/embassy-executor/src/executor/raw/waker.rs b/embassy-executor/src/executor/raw/waker.rs
new file mode 100644
index 000000000..f6ae332fa
--- /dev/null
+++ b/embassy-executor/src/executor/raw/waker.rs
@@ -0,0 +1,53 @@
1use core::mem;
2use core::ptr::NonNull;
3use core::task::{RawWaker, RawWakerVTable, Waker};
4
5use super::TaskHeader;
6
7const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake, drop);
8
9unsafe fn clone(p: *const ()) -> RawWaker {
10 RawWaker::new(p, &VTABLE)
11}
12
13unsafe fn wake(p: *const ()) {
14 (*(p as *mut TaskHeader)).enqueue()
15}
16
17unsafe fn drop(_: *const ()) {
18 // nop
19}
20
21pub(crate) unsafe fn from_task(p: NonNull<TaskHeader>) -> Waker {
22 Waker::from_raw(RawWaker::new(p.as_ptr() as _, &VTABLE))
23}
24
25/// Get a task pointer from a waker.
26///
27/// This can be used as an optimization in wait queues to store task pointers
28/// (1 word) instead of full Wakers (2 words). This saves a bit of RAM and helps
29/// avoid dynamic dispatch.
30///
31/// You can use the returned task pointer to wake the task with [`wake_task`](super::wake_task).
32///
33/// # Panics
34///
35/// Panics if the waker is not created by the Embassy executor.
36pub fn task_from_waker(waker: &Waker) -> NonNull<TaskHeader> {
37 // safety: OK because WakerHack has the same layout as Waker.
38 // This is not really guaranteed because the structs are `repr(Rust)`, it is
39 // indeed the case in the current implementation.
40 // TODO use waker_getters when stable. https://github.com/rust-lang/rust/issues/96992
41 let hack: &WakerHack = unsafe { mem::transmute(waker) };
42 if hack.vtable != &VTABLE {
43 panic!("Found waker not created by the Embassy executor. `embassy_executor::time::Timer` only works with the Embassy executor.")
44 }
45
46 // safety: we never create a waker with a null data pointer.
47 unsafe { NonNull::new_unchecked(hack.data as *mut TaskHeader) }
48}
49
50struct WakerHack {
51 data: *const (),
52 vtable: &'static RawWakerVTable,
53}