aboutsummaryrefslogtreecommitdiff
path: root/embassy-executor/src/raw/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to 'embassy-executor/src/raw/mod.rs')
-rw-r--r--embassy-executor/src/raw/mod.rs428
1 files changed, 428 insertions, 0 deletions
diff --git a/embassy-executor/src/raw/mod.rs b/embassy-executor/src/raw/mod.rs
new file mode 100644
index 000000000..afe67decb
--- /dev/null
+++ b/embassy-executor/src/raw/mod.rs
@@ -0,0 +1,428 @@
1//! Raw executor.
2//!
3//! This module exposes "raw" Executor and Task structs for more low level control.
4//!
5//! ## WARNING: here be dragons!
6//!
7//! Using this module requires respecting subtle safety contracts. If you can, prefer using the safe
8//! executor wrappers in [`executor`](crate::executor) and the [`embassy_executor::task`](embassy_macros::task) macro, which are fully safe.
9
10mod run_queue;
11#[cfg(feature = "integrated-timers")]
12mod timer_queue;
13pub(crate) mod util;
14mod waker;
15
16use core::cell::Cell;
17use core::future::Future;
18use core::pin::Pin;
19use core::ptr::NonNull;
20use core::task::{Context, Poll};
21use core::{mem, ptr};
22
23use atomic_polyfill::{AtomicU32, Ordering};
24use critical_section::CriticalSection;
25#[cfg(feature = "integrated-timers")]
26use embassy_time::driver::{self, AlarmHandle};
27#[cfg(feature = "integrated-timers")]
28use embassy_time::Instant;
29
30use self::run_queue::{RunQueue, RunQueueItem};
31use self::util::UninitCell;
32pub use self::waker::task_from_waker;
33use super::SpawnToken;
34
35/// Task is spawned (has a future)
36pub(crate) const STATE_SPAWNED: u32 = 1 << 0;
37/// Task is in the executor run queue
38pub(crate) const STATE_RUN_QUEUED: u32 = 1 << 1;
39/// Task is in the executor timer queue
40#[cfg(feature = "integrated-timers")]
41pub(crate) const STATE_TIMER_QUEUED: u32 = 1 << 2;
42
43/// Raw task header for use in task pointers.
44///
45/// This is an opaque struct, used for raw pointers to tasks, for use
46/// with funtions like [`wake_task`] and [`task_from_waker`].
47pub struct TaskHeader {
48 pub(crate) state: AtomicU32,
49 pub(crate) run_queue_item: RunQueueItem,
50 pub(crate) executor: Cell<*const Executor>, // Valid if state != 0
51 pub(crate) poll_fn: UninitCell<unsafe fn(NonNull<TaskHeader>)>, // Valid if STATE_SPAWNED
52
53 #[cfg(feature = "integrated-timers")]
54 pub(crate) expires_at: Cell<Instant>,
55 #[cfg(feature = "integrated-timers")]
56 pub(crate) timer_queue_item: timer_queue::TimerQueueItem,
57}
58
59impl TaskHeader {
60 pub(crate) const fn new() -> Self {
61 Self {
62 state: AtomicU32::new(0),
63 run_queue_item: RunQueueItem::new(),
64 executor: Cell::new(ptr::null()),
65 poll_fn: UninitCell::uninit(),
66
67 #[cfg(feature = "integrated-timers")]
68 expires_at: Cell::new(Instant::from_ticks(0)),
69 #[cfg(feature = "integrated-timers")]
70 timer_queue_item: timer_queue::TimerQueueItem::new(),
71 }
72 }
73}
74
75/// Raw storage in which a task can be spawned.
76///
77/// This struct holds the necessary memory to spawn one task whose future is `F`.
78/// At a given time, the `TaskStorage` may be in spawned or not-spawned state. You
79/// may spawn it with [`TaskStorage::spawn()`], which will fail if it is already spawned.
80///
81/// A `TaskStorage` must live forever, it may not be deallocated even after the task has finished
82/// running. Hence the relevant methods require `&'static self`. It may be reused, however.
83///
84/// Internally, the [embassy_executor::task](embassy_macros::task) macro allocates an array of `TaskStorage`s
85/// in a `static`. The most common reason to use the raw `Task` is to have control of where
86/// the memory for the task is allocated: on the stack, or on the heap with e.g. `Box::leak`, etc.
87
88// repr(C) is needed to guarantee that the Task is located at offset 0
89// This makes it safe to cast between TaskHeader and TaskStorage pointers.
90#[repr(C)]
91pub struct TaskStorage<F: Future + 'static> {
92 raw: TaskHeader,
93 future: UninitCell<F>, // Valid if STATE_SPAWNED
94}
95
96impl<F: Future + 'static> TaskStorage<F> {
97 const NEW: Self = Self::new();
98
99 /// Create a new TaskStorage, in not-spawned state.
100 pub const fn new() -> Self {
101 Self {
102 raw: TaskHeader::new(),
103 future: UninitCell::uninit(),
104 }
105 }
106
107 /// Try to spawn the task.
108 ///
109 /// The `future` closure constructs the future. It's only called if spawning is
110 /// actually possible. It is a closure instead of a simple `future: F` param to ensure
111 /// the future is constructed in-place, avoiding a temporary copy in the stack thanks to
112 /// NRVO optimizations.
113 ///
114 /// This function will fail if the task is already spawned and has not finished running.
115 /// In this case, the error is delayed: a "poisoned" SpawnToken is returned, which will
116 /// cause [`Spawner::spawn()`](super::Spawner::spawn) to return the error.
117 ///
118 /// Once the task has finished running, you may spawn it again. It is allowed to spawn it
119 /// on a different executor.
120 pub fn spawn(&'static self, future: impl FnOnce() -> F) -> SpawnToken<impl Sized> {
121 if self.spawn_mark_used() {
122 return unsafe { SpawnToken::<F>::new(self.spawn_initialize(future)) };
123 }
124
125 SpawnToken::<F>::new_failed()
126 }
127
128 fn spawn_mark_used(&'static self) -> bool {
129 let state = STATE_SPAWNED | STATE_RUN_QUEUED;
130 self.raw
131 .state
132 .compare_exchange(0, state, Ordering::AcqRel, Ordering::Acquire)
133 .is_ok()
134 }
135
136 unsafe fn spawn_initialize(&'static self, future: impl FnOnce() -> F) -> NonNull<TaskHeader> {
137 // Initialize the task
138 self.raw.poll_fn.write(Self::poll);
139 self.future.write(future());
140 NonNull::new_unchecked(self as *const TaskStorage<F> as *const TaskHeader as *mut TaskHeader)
141 }
142
143 unsafe fn poll(p: NonNull<TaskHeader>) {
144 let this = &*(p.as_ptr() as *const TaskStorage<F>);
145
146 let future = Pin::new_unchecked(this.future.as_mut());
147 let waker = waker::from_task(p);
148 let mut cx = Context::from_waker(&waker);
149 match future.poll(&mut cx) {
150 Poll::Ready(_) => {
151 this.future.drop_in_place();
152 this.raw.state.fetch_and(!STATE_SPAWNED, Ordering::AcqRel);
153 }
154 Poll::Pending => {}
155 }
156
157 // the compiler is emitting a virtual call for waker drop, but we know
158 // it's a noop for our waker.
159 mem::forget(waker);
160 }
161}
162
163unsafe impl<F: Future + 'static> Sync for TaskStorage<F> {}
164
165/// Raw storage that can hold up to N tasks of the same type.
166///
167/// This is essentially a `[TaskStorage<F>; N]`.
168pub struct TaskPool<F: Future + 'static, const N: usize> {
169 pool: [TaskStorage<F>; N],
170}
171
172impl<F: Future + 'static, const N: usize> TaskPool<F, N> {
173 /// Create a new TaskPool, with all tasks in non-spawned state.
174 pub const fn new() -> Self {
175 Self {
176 pool: [TaskStorage::NEW; N],
177 }
178 }
179
180 /// Try to spawn a task in the pool.
181 ///
182 /// See [`TaskStorage::spawn()`] for details.
183 ///
184 /// This will loop over the pool and spawn the task in the first storage that
185 /// is currently free. If none is free, a "poisoned" SpawnToken is returned,
186 /// which will cause [`Spawner::spawn()`](super::Spawner::spawn) to return the error.
187 pub fn spawn(&'static self, future: impl FnOnce() -> F) -> SpawnToken<impl Sized> {
188 for task in &self.pool {
189 if task.spawn_mark_used() {
190 return unsafe { SpawnToken::<F>::new(task.spawn_initialize(future)) };
191 }
192 }
193
194 SpawnToken::<F>::new_failed()
195 }
196
197 /// Like spawn(), but allows the task to be send-spawned if the args are Send even if
198 /// the future is !Send.
199 ///
200 /// Not covered by semver guarantees. DO NOT call this directly. Intended to be used
201 /// by the Embassy macros ONLY.
202 ///
203 /// SAFETY: `future` must be a closure of the form `move || my_async_fn(args)`, where `my_async_fn`
204 /// is an `async fn`, NOT a hand-written `Future`.
205 #[doc(hidden)]
206 pub unsafe fn _spawn_async_fn<FutFn>(&'static self, future: FutFn) -> SpawnToken<impl Sized>
207 where
208 FutFn: FnOnce() -> F,
209 {
210 // When send-spawning a task, we construct the future in this thread, and effectively
211 // "send" it to the executor thread by enqueuing it in its queue. Therefore, in theory,
212 // send-spawning should require the future `F` to be `Send`.
213 //
214 // The problem is this is more restrictive than needed. Once the future is executing,
215 // it is never sent to another thread. It is only sent when spawning. It should be
216 // enough for the task's arguments to be Send. (and in practice it's super easy to
217 // accidentally make your futures !Send, for example by holding an `Rc` or a `&RefCell` across an `.await`.)
218 //
219 // We can do it by sending the task args and constructing the future in the executor thread
220 // on first poll. However, this cannot be done in-place, so it'll waste stack space for a copy
221 // of the args.
222 //
223 // Luckily, an `async fn` future contains just the args when freshly constructed. So, if the
224 // args are Send, it's OK to send a !Send future, as long as we do it before first polling it.
225 //
226 // (Note: this is how the generators are implemented today, it's not officially guaranteed yet,
227 // but it's possible it'll be guaranteed in the future. See zulip thread:
228 // https://rust-lang.zulipchat.com/#narrow/stream/187312-wg-async/topic/.22only.20before.20poll.22.20Send.20futures )
229 //
230 // The `FutFn` captures all the args, so if it's Send, the task can be send-spawned.
231 // This is why we return `SpawnToken<FutFn>` below.
232 //
233 // This ONLY holds for `async fn` futures. The other `spawn` methods can be called directly
234 // by the user, with arbitrary hand-implemented futures. This is why these return `SpawnToken<F>`.
235
236 for task in &self.pool {
237 if task.spawn_mark_used() {
238 return SpawnToken::<FutFn>::new(task.spawn_initialize(future));
239 }
240 }
241
242 SpawnToken::<FutFn>::new_failed()
243 }
244}
245
246/// Raw executor.
247///
248/// This is the core of the Embassy executor. It is low-level, requiring manual
249/// handling of wakeups and task polling. If you can, prefer using one of the
250/// higher level executors in [`crate::executor`].
251///
252/// The raw executor leaves it up to you to handle wakeups and scheduling:
253///
254/// - To get the executor to do work, call `poll()`. This will poll all queued tasks (all tasks
255/// that "want to run").
256/// - You must supply a `signal_fn`. The executor will call it to notify you it has work
257/// to do. You must arrange for `poll()` to be called as soon as possible.
258///
259/// `signal_fn` can be called from *any* context: any thread, any interrupt priority
260/// level, etc. It may be called synchronously from any `Executor` method call as well.
261/// You must deal with this correctly.
262///
263/// In particular, you must NOT call `poll` directly from `signal_fn`, as this violates
264/// the requirement for `poll` to not be called reentrantly.
265pub struct Executor {
266 run_queue: RunQueue,
267 signal_fn: fn(*mut ()),
268 signal_ctx: *mut (),
269
270 #[cfg(feature = "integrated-timers")]
271 pub(crate) timer_queue: timer_queue::TimerQueue,
272 #[cfg(feature = "integrated-timers")]
273 alarm: AlarmHandle,
274}
275
276impl Executor {
277 /// Create a new executor.
278 ///
279 /// When the executor has work to do, it will call `signal_fn` with
280 /// `signal_ctx` as argument.
281 ///
282 /// See [`Executor`] docs for details on `signal_fn`.
283 pub fn new(signal_fn: fn(*mut ()), signal_ctx: *mut ()) -> Self {
284 #[cfg(feature = "integrated-timers")]
285 let alarm = unsafe { unwrap!(driver::allocate_alarm()) };
286 #[cfg(feature = "integrated-timers")]
287 driver::set_alarm_callback(alarm, signal_fn, signal_ctx);
288
289 Self {
290 run_queue: RunQueue::new(),
291 signal_fn,
292 signal_ctx,
293
294 #[cfg(feature = "integrated-timers")]
295 timer_queue: timer_queue::TimerQueue::new(),
296 #[cfg(feature = "integrated-timers")]
297 alarm,
298 }
299 }
300
301 /// Enqueue a task in the task queue
302 ///
303 /// # Safety
304 /// - `task` must be a valid pointer to a spawned task.
305 /// - `task` must be set up to run in this executor.
306 /// - `task` must NOT be already enqueued (in this executor or another one).
307 #[inline(always)]
308 unsafe fn enqueue(&self, cs: CriticalSection, task: NonNull<TaskHeader>) {
309 if self.run_queue.enqueue(cs, task) {
310 (self.signal_fn)(self.signal_ctx)
311 }
312 }
313
314 /// Spawn a task in this executor.
315 ///
316 /// # Safety
317 ///
318 /// `task` must be a valid pointer to an initialized but not-already-spawned task.
319 ///
320 /// It is OK to use `unsafe` to call this from a thread that's not the executor thread.
321 /// In this case, the task's Future must be Send. This is because this is effectively
322 /// sending the task to the executor thread.
323 pub(super) unsafe fn spawn(&'static self, task: NonNull<TaskHeader>) {
324 task.as_ref().executor.set(self);
325
326 critical_section::with(|cs| {
327 self.enqueue(cs, task);
328 })
329 }
330
331 /// Poll all queued tasks in this executor.
332 ///
333 /// This loops over all tasks that are queued to be polled (i.e. they're
334 /// freshly spawned or they've been woken). Other tasks are not polled.
335 ///
336 /// You must call `poll` after receiving a call to `signal_fn`. It is OK
337 /// to call `poll` even when not requested by `signal_fn`, but it wastes
338 /// energy.
339 ///
340 /// # Safety
341 ///
342 /// You must NOT call `poll` reentrantly on the same executor.
343 ///
344 /// In particular, note that `poll` may call `signal_fn` synchronously. Therefore, you
345 /// must NOT directly call `poll()` from your `signal_fn`. Instead, `signal_fn` has to
346 /// somehow schedule for `poll()` to be called later, at a time you know for sure there's
347 /// no `poll()` already running.
348 pub unsafe fn poll(&'static self) {
349 #[cfg(feature = "integrated-timers")]
350 self.timer_queue.dequeue_expired(Instant::now(), |task| wake_task(task));
351
352 self.run_queue.dequeue_all(|p| {
353 let task = p.as_ref();
354
355 #[cfg(feature = "integrated-timers")]
356 task.expires_at.set(Instant::MAX);
357
358 let state = task.state.fetch_and(!STATE_RUN_QUEUED, Ordering::AcqRel);
359 if state & STATE_SPAWNED == 0 {
360 // If task is not running, ignore it. This can happen in the following scenario:
361 // - Task gets dequeued, poll starts
362 // - While task is being polled, it gets woken. It gets placed in the queue.
363 // - Task poll finishes, returning done=true
364 // - RUNNING bit is cleared, but the task is already in the queue.
365 return;
366 }
367
368 // Run the task
369 task.poll_fn.read()(p as _);
370
371 // Enqueue or update into timer_queue
372 #[cfg(feature = "integrated-timers")]
373 self.timer_queue.update(p);
374 });
375
376 #[cfg(feature = "integrated-timers")]
377 {
378 // If this is already in the past, set_alarm will immediately trigger the alarm.
379 // This will cause `signal_fn` to be called, which will cause `poll()` to be called again,
380 // so we immediately do another poll loop iteration.
381 let next_expiration = self.timer_queue.next_expiration();
382 driver::set_alarm(self.alarm, next_expiration.as_ticks());
383 }
384 }
385
386 /// Get a spawner that spawns tasks in this executor.
387 ///
388 /// It is OK to call this method multiple times to obtain multiple
389 /// `Spawner`s. You may also copy `Spawner`s.
390 pub fn spawner(&'static self) -> super::Spawner {
391 super::Spawner::new(self)
392 }
393}
394
395/// Wake a task by raw pointer.
396///
397/// You can obtain task pointers from `Waker`s using [`task_from_waker`].
398///
399/// # Safety
400///
401/// `task` must be a valid task pointer obtained from [`task_from_waker`].
402pub unsafe fn wake_task(task: NonNull<TaskHeader>) {
403 critical_section::with(|cs| {
404 let header = task.as_ref();
405 let state = header.state.load(Ordering::Relaxed);
406
407 // If already scheduled, or if not started,
408 if (state & STATE_RUN_QUEUED != 0) || (state & STATE_SPAWNED == 0) {
409 return;
410 }
411
412 // Mark it as scheduled
413 header.state.store(state | STATE_RUN_QUEUED, Ordering::Relaxed);
414
415 // We have just marked the task as scheduled, so enqueue it.
416 let executor = &*header.executor.get();
417 executor.enqueue(cs, task);
418 })
419}
420
421#[cfg(feature = "integrated-timers")]
422#[no_mangle]
423unsafe fn _embassy_time_schedule_wake(at: Instant, waker: &core::task::Waker) {
424 let task = waker::task_from_waker(waker);
425 let task = task.as_ref();
426 let expires_at = task.expires_at.get();
427 task.expires_at.set(expires_at.min(at));
428}