diff options
| author | Grant Miller <[email protected]> | 2023-01-29 12:55:06 -0600 |
|---|---|---|
| committer | Grant Miller <[email protected]> | 2023-01-29 15:52:13 -0600 |
| commit | 48e1aab762e902ee0a132602d3c2f9ec0551cd6b (patch) | |
| tree | 1b504ef8ffcd62dd5071e90158efd28172238648 /embassy-executor/src | |
| parent | 7e251a25509a02f9388a8352522e1a279ad857b1 (diff) | |
executor: Replace `NonNull<TaskHeader>` with `TaskRef`
Diffstat (limited to 'embassy-executor/src')
| -rw-r--r-- | embassy-executor/src/raw/mod.rs | 63 | ||||
| -rw-r--r-- | embassy-executor/src/raw/run_queue.rs | 13 | ||||
| -rw-r--r-- | embassy-executor/src/raw/timer_queue.rs | 35 | ||||
| -rw-r--r-- | embassy-executor/src/raw/waker.rs | 13 | ||||
| -rw-r--r-- | embassy-executor/src/spawner.rs | 9 |
5 files changed, 76 insertions, 57 deletions
diff --git a/embassy-executor/src/raw/mod.rs b/embassy-executor/src/raw/mod.rs index 181dabe8e..10a154a9f 100644 --- a/embassy-executor/src/raw/mod.rs +++ b/embassy-executor/src/raw/mod.rs | |||
| @@ -43,14 +43,11 @@ pub(crate) const STATE_RUN_QUEUED: u32 = 1 << 1; | |||
| 43 | pub(crate) const STATE_TIMER_QUEUED: u32 = 1 << 2; | 43 | pub(crate) const STATE_TIMER_QUEUED: u32 = 1 << 2; |
| 44 | 44 | ||
| 45 | /// Raw task header for use in task pointers. | 45 | /// Raw task header for use in task pointers. |
| 46 | /// | 46 | pub(crate) struct TaskHeader { |
| 47 | /// This is an opaque struct, used for raw pointers to tasks, for use | ||
| 48 | /// with funtions like [`wake_task`] and [`task_from_waker`]. | ||
| 49 | pub struct TaskHeader { | ||
| 50 | pub(crate) state: AtomicU32, | 47 | pub(crate) state: AtomicU32, |
| 51 | pub(crate) run_queue_item: RunQueueItem, | 48 | pub(crate) run_queue_item: RunQueueItem, |
| 52 | pub(crate) executor: Cell<*const Executor>, // Valid if state != 0 | 49 | pub(crate) executor: Cell<*const Executor>, // Valid if state != 0 |
| 53 | pub(crate) poll_fn: UninitCell<unsafe fn(NonNull<TaskHeader>)>, // Valid if STATE_SPAWNED | 50 | pub(crate) poll_fn: UninitCell<unsafe fn(TaskRef)>, // Valid if STATE_SPAWNED |
| 54 | 51 | ||
| 55 | #[cfg(feature = "integrated-timers")] | 52 | #[cfg(feature = "integrated-timers")] |
| 56 | pub(crate) expires_at: Cell<Instant>, | 53 | pub(crate) expires_at: Cell<Instant>, |
| @@ -59,7 +56,7 @@ pub struct TaskHeader { | |||
| 59 | } | 56 | } |
| 60 | 57 | ||
| 61 | impl TaskHeader { | 58 | impl TaskHeader { |
| 62 | pub(crate) const fn new() -> Self { | 59 | const fn new() -> Self { |
| 63 | Self { | 60 | Self { |
| 64 | state: AtomicU32::new(0), | 61 | state: AtomicU32::new(0), |
| 65 | run_queue_item: RunQueueItem::new(), | 62 | run_queue_item: RunQueueItem::new(), |
| @@ -74,6 +71,36 @@ impl TaskHeader { | |||
| 74 | } | 71 | } |
| 75 | } | 72 | } |
| 76 | 73 | ||
| 74 | /// This is essentially a `&'static TaskStorage<F>` where the type of the future has been erased. | ||
| 75 | #[derive(Clone, Copy)] | ||
| 76 | pub struct TaskRef { | ||
| 77 | ptr: NonNull<TaskHeader>, | ||
| 78 | } | ||
| 79 | |||
| 80 | impl TaskRef { | ||
| 81 | fn new<F: Future + 'static>(task: &'static TaskStorage<F>) -> Self { | ||
| 82 | Self { | ||
| 83 | ptr: NonNull::from(task).cast(), | ||
| 84 | } | ||
| 85 | } | ||
| 86 | |||
| 87 | /// Safety: The pointer must have been obtained with `Task::as_ptr` | ||
| 88 | pub(crate) unsafe fn from_ptr(ptr: *const TaskHeader) -> Self { | ||
| 89 | Self { | ||
| 90 | ptr: NonNull::new_unchecked(ptr as *mut TaskHeader), | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | pub(crate) fn header(self) -> &'static TaskHeader { | ||
| 95 | unsafe { self.ptr.as_ref() } | ||
| 96 | } | ||
| 97 | |||
| 98 | /// The returned pointer is valid for the entire TaskStorage. | ||
| 99 | pub(crate) fn as_ptr(self) -> *const TaskHeader { | ||
| 100 | self.ptr.as_ptr() | ||
| 101 | } | ||
| 102 | } | ||
| 103 | |||
| 77 | /// Raw storage in which a task can be spawned. | 104 | /// Raw storage in which a task can be spawned. |
| 78 | /// | 105 | /// |
| 79 | /// This struct holds the necessary memory to spawn one task whose future is `F`. | 106 | /// This struct holds the necessary memory to spawn one task whose future is `F`. |
| @@ -135,14 +162,14 @@ impl<F: Future + 'static> TaskStorage<F> { | |||
| 135 | .is_ok() | 162 | .is_ok() |
| 136 | } | 163 | } |
| 137 | 164 | ||
| 138 | unsafe fn spawn_initialize(&'static self, future: impl FnOnce() -> F) -> NonNull<TaskHeader> { | 165 | unsafe fn spawn_initialize(&'static self, future: impl FnOnce() -> F) -> TaskRef { |
| 139 | // Initialize the task | 166 | // Initialize the task |
| 140 | self.raw.poll_fn.write(Self::poll); | 167 | self.raw.poll_fn.write(Self::poll); |
| 141 | self.future.write(future()); | 168 | self.future.write(future()); |
| 142 | NonNull::new_unchecked(self as *const TaskStorage<F> as *const TaskHeader as *mut TaskHeader) | 169 | TaskRef::new(self) |
| 143 | } | 170 | } |
| 144 | 171 | ||
| 145 | unsafe fn poll(p: NonNull<TaskHeader>) { | 172 | unsafe fn poll(p: TaskRef) { |
| 146 | let this = &*(p.as_ptr() as *const TaskStorage<F>); | 173 | let this = &*(p.as_ptr() as *const TaskStorage<F>); |
| 147 | 174 | ||
| 148 | let future = Pin::new_unchecked(this.future.as_mut()); | 175 | let future = Pin::new_unchecked(this.future.as_mut()); |
| @@ -307,7 +334,7 @@ impl Executor { | |||
| 307 | /// - `task` must be set up to run in this executor. | 334 | /// - `task` must be set up to run in this executor. |
| 308 | /// - `task` must NOT be already enqueued (in this executor or another one). | 335 | /// - `task` must NOT be already enqueued (in this executor or another one). |
| 309 | #[inline(always)] | 336 | #[inline(always)] |
| 310 | unsafe fn enqueue(&self, cs: CriticalSection, task: NonNull<TaskHeader>) { | 337 | unsafe fn enqueue(&self, cs: CriticalSection, task: TaskRef) { |
| 311 | #[cfg(feature = "rtos-trace")] | 338 | #[cfg(feature = "rtos-trace")] |
| 312 | trace::task_ready_begin(task.as_ptr() as u32); | 339 | trace::task_ready_begin(task.as_ptr() as u32); |
| 313 | 340 | ||
| @@ -325,8 +352,8 @@ impl Executor { | |||
| 325 | /// It is OK to use `unsafe` to call this from a thread that's not the executor thread. | 352 | /// It is OK to use `unsafe` to call this from a thread that's not the executor thread. |
| 326 | /// In this case, the task's Future must be Send. This is because this is effectively | 353 | /// In this case, the task's Future must be Send. This is because this is effectively |
| 327 | /// sending the task to the executor thread. | 354 | /// sending the task to the executor thread. |
| 328 | pub(super) unsafe fn spawn(&'static self, task: NonNull<TaskHeader>) { | 355 | pub(super) unsafe fn spawn(&'static self, task: TaskRef) { |
| 329 | task.as_ref().executor.set(self); | 356 | task.header().executor.set(self); |
| 330 | 357 | ||
| 331 | #[cfg(feature = "rtos-trace")] | 358 | #[cfg(feature = "rtos-trace")] |
| 332 | trace::task_new(task.as_ptr() as u32); | 359 | trace::task_new(task.as_ptr() as u32); |
| @@ -359,7 +386,7 @@ impl Executor { | |||
| 359 | self.timer_queue.dequeue_expired(Instant::now(), |task| wake_task(task)); | 386 | self.timer_queue.dequeue_expired(Instant::now(), |task| wake_task(task)); |
| 360 | 387 | ||
| 361 | self.run_queue.dequeue_all(|p| { | 388 | self.run_queue.dequeue_all(|p| { |
| 362 | let task = p.as_ref(); | 389 | let task = p.header(); |
| 363 | 390 | ||
| 364 | #[cfg(feature = "integrated-timers")] | 391 | #[cfg(feature = "integrated-timers")] |
| 365 | task.expires_at.set(Instant::MAX); | 392 | task.expires_at.set(Instant::MAX); |
| @@ -378,7 +405,7 @@ impl Executor { | |||
| 378 | trace::task_exec_begin(p.as_ptr() as u32); | 405 | trace::task_exec_begin(p.as_ptr() as u32); |
| 379 | 406 | ||
| 380 | // Run the task | 407 | // Run the task |
| 381 | task.poll_fn.read()(p as _); | 408 | task.poll_fn.read()(p); |
| 382 | 409 | ||
| 383 | #[cfg(feature = "rtos-trace")] | 410 | #[cfg(feature = "rtos-trace")] |
| 384 | trace::task_exec_end(); | 411 | trace::task_exec_end(); |
| @@ -424,9 +451,9 @@ impl Executor { | |||
| 424 | /// # Safety | 451 | /// # Safety |
| 425 | /// | 452 | /// |
| 426 | /// `task` must be a valid task pointer obtained from [`task_from_waker`]. | 453 | /// `task` must be a valid task pointer obtained from [`task_from_waker`]. |
| 427 | pub unsafe fn wake_task(task: NonNull<TaskHeader>) { | 454 | pub unsafe fn wake_task(task: TaskRef) { |
| 428 | critical_section::with(|cs| { | 455 | critical_section::with(|cs| { |
| 429 | let header = task.as_ref(); | 456 | let header = task.header(); |
| 430 | let state = header.state.load(Ordering::Relaxed); | 457 | let state = header.state.load(Ordering::Relaxed); |
| 431 | 458 | ||
| 432 | // If already scheduled, or if not started, | 459 | // If already scheduled, or if not started, |
| @@ -450,7 +477,7 @@ struct TimerQueue; | |||
| 450 | impl embassy_time::queue::TimerQueue for TimerQueue { | 477 | impl embassy_time::queue::TimerQueue for TimerQueue { |
| 451 | fn schedule_wake(&'static self, at: Instant, waker: &core::task::Waker) { | 478 | fn schedule_wake(&'static self, at: Instant, waker: &core::task::Waker) { |
| 452 | let task = waker::task_from_waker(waker); | 479 | let task = waker::task_from_waker(waker); |
| 453 | let task = unsafe { task.as_ref() }; | 480 | let task = task.header(); |
| 454 | let expires_at = task.expires_at.get(); | 481 | let expires_at = task.expires_at.get(); |
| 455 | task.expires_at.set(expires_at.min(at)); | 482 | task.expires_at.set(expires_at.min(at)); |
| 456 | } | 483 | } |
diff --git a/embassy-executor/src/raw/run_queue.rs b/embassy-executor/src/raw/run_queue.rs index ed8c82a5c..362157535 100644 --- a/embassy-executor/src/raw/run_queue.rs +++ b/embassy-executor/src/raw/run_queue.rs | |||
| @@ -4,7 +4,7 @@ use core::ptr::NonNull; | |||
| 4 | use atomic_polyfill::{AtomicPtr, Ordering}; | 4 | use atomic_polyfill::{AtomicPtr, Ordering}; |
| 5 | use critical_section::CriticalSection; | 5 | use critical_section::CriticalSection; |
| 6 | 6 | ||
| 7 | use super::TaskHeader; | 7 | use super::{TaskHeader, TaskRef}; |
| 8 | 8 | ||
| 9 | pub(crate) struct RunQueueItem { | 9 | pub(crate) struct RunQueueItem { |
| 10 | next: AtomicPtr<TaskHeader>, | 10 | next: AtomicPtr<TaskHeader>, |
| @@ -46,25 +46,26 @@ impl RunQueue { | |||
| 46 | /// | 46 | /// |
| 47 | /// `item` must NOT be already enqueued in any queue. | 47 | /// `item` must NOT be already enqueued in any queue. |
| 48 | #[inline(always)] | 48 | #[inline(always)] |
| 49 | pub(crate) unsafe fn enqueue(&self, _cs: CriticalSection, task: NonNull<TaskHeader>) -> bool { | 49 | pub(crate) unsafe fn enqueue(&self, _cs: CriticalSection, task: TaskRef) -> bool { |
| 50 | let prev = self.head.load(Ordering::Relaxed); | 50 | let prev = self.head.load(Ordering::Relaxed); |
| 51 | task.as_ref().run_queue_item.next.store(prev, Ordering::Relaxed); | 51 | task.header().run_queue_item.next.store(prev, Ordering::Relaxed); |
| 52 | self.head.store(task.as_ptr(), Ordering::Relaxed); | 52 | self.head.store(task.as_ptr() as _, Ordering::Relaxed); |
| 53 | prev.is_null() | 53 | prev.is_null() |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | /// Empty the queue, then call `on_task` for each task that was in the queue. | 56 | /// Empty the queue, then call `on_task` for each task that was in the queue. |
| 57 | /// NOTE: It is OK for `on_task` to enqueue more tasks. In this case they're left in the queue | 57 | /// NOTE: It is OK for `on_task` to enqueue more tasks. In this case they're left in the queue |
| 58 | /// and will be processed by the *next* call to `dequeue_all`, *not* the current one. | 58 | /// and will be processed by the *next* call to `dequeue_all`, *not* the current one. |
| 59 | pub(crate) fn dequeue_all(&self, on_task: impl Fn(NonNull<TaskHeader>)) { | 59 | pub(crate) fn dequeue_all(&self, on_task: impl Fn(TaskRef)) { |
| 60 | // Atomically empty the queue. | 60 | // Atomically empty the queue. |
| 61 | let mut ptr = self.head.swap(ptr::null_mut(), Ordering::AcqRel); | 61 | let mut ptr = self.head.swap(ptr::null_mut(), Ordering::AcqRel); |
| 62 | 62 | ||
| 63 | // Iterate the linked list of tasks that were previously in the queue. | 63 | // Iterate the linked list of tasks that were previously in the queue. |
| 64 | while let Some(task) = NonNull::new(ptr) { | 64 | while let Some(task) = NonNull::new(ptr) { |
| 65 | let task = unsafe { TaskRef::from_ptr(task.as_ptr()) }; | ||
| 65 | // If the task re-enqueues itself, the `next` pointer will get overwritten. | 66 | // If the task re-enqueues itself, the `next` pointer will get overwritten. |
| 66 | // Therefore, first read the next pointer, and only then process the task. | 67 | // Therefore, first read the next pointer, and only then process the task. |
| 67 | let next = unsafe { task.as_ref() }.run_queue_item.next.load(Ordering::Relaxed); | 68 | let next = task.header().run_queue_item.next.load(Ordering::Relaxed); |
| 68 | 69 | ||
| 69 | on_task(task); | 70 | on_task(task); |
| 70 | 71 | ||
diff --git a/embassy-executor/src/raw/timer_queue.rs b/embassy-executor/src/raw/timer_queue.rs index 24c31892a..57d6d3cda 100644 --- a/embassy-executor/src/raw/timer_queue.rs +++ b/embassy-executor/src/raw/timer_queue.rs | |||
| @@ -1,45 +1,39 @@ | |||
| 1 | use core::cell::Cell; | 1 | use core::cell::Cell; |
| 2 | use core::cmp::min; | 2 | use core::cmp::min; |
| 3 | use core::ptr; | ||
| 4 | use core::ptr::NonNull; | ||
| 5 | 3 | ||
| 6 | use atomic_polyfill::Ordering; | 4 | use atomic_polyfill::Ordering; |
| 7 | use embassy_time::Instant; | 5 | use embassy_time::Instant; |
| 8 | 6 | ||
| 9 | use super::{TaskHeader, STATE_TIMER_QUEUED}; | 7 | use super::{TaskRef, STATE_TIMER_QUEUED}; |
| 10 | 8 | ||
| 11 | pub(crate) struct TimerQueueItem { | 9 | pub(crate) struct TimerQueueItem { |
| 12 | next: Cell<*mut TaskHeader>, | 10 | next: Cell<Option<TaskRef>>, |
| 13 | } | 11 | } |
| 14 | 12 | ||
| 15 | impl TimerQueueItem { | 13 | impl TimerQueueItem { |
| 16 | pub const fn new() -> Self { | 14 | pub const fn new() -> Self { |
| 17 | Self { | 15 | Self { next: Cell::new(None) } |
| 18 | next: Cell::new(ptr::null_mut()), | ||
| 19 | } | ||
| 20 | } | 16 | } |
| 21 | } | 17 | } |
| 22 | 18 | ||
| 23 | pub(crate) struct TimerQueue { | 19 | pub(crate) struct TimerQueue { |
| 24 | head: Cell<*mut TaskHeader>, | 20 | head: Cell<Option<TaskRef>>, |
| 25 | } | 21 | } |
| 26 | 22 | ||
| 27 | impl TimerQueue { | 23 | impl TimerQueue { |
| 28 | pub const fn new() -> Self { | 24 | pub const fn new() -> Self { |
| 29 | Self { | 25 | Self { head: Cell::new(None) } |
| 30 | head: Cell::new(ptr::null_mut()), | ||
| 31 | } | ||
| 32 | } | 26 | } |
| 33 | 27 | ||
| 34 | pub(crate) unsafe fn update(&self, p: NonNull<TaskHeader>) { | 28 | pub(crate) unsafe fn update(&self, p: TaskRef) { |
| 35 | let task = p.as_ref(); | 29 | let task = p.header(); |
| 36 | if task.expires_at.get() != Instant::MAX { | 30 | if task.expires_at.get() != Instant::MAX { |
| 37 | let old_state = task.state.fetch_or(STATE_TIMER_QUEUED, Ordering::AcqRel); | 31 | let old_state = task.state.fetch_or(STATE_TIMER_QUEUED, Ordering::AcqRel); |
| 38 | let is_new = old_state & STATE_TIMER_QUEUED == 0; | 32 | let is_new = old_state & STATE_TIMER_QUEUED == 0; |
| 39 | 33 | ||
| 40 | if is_new { | 34 | if is_new { |
| 41 | task.timer_queue_item.next.set(self.head.get()); | 35 | task.timer_queue_item.next.set(self.head.get()); |
| 42 | self.head.set(p.as_ptr()); | 36 | self.head.set(Some(p)); |
| 43 | } | 37 | } |
| 44 | } | 38 | } |
| 45 | } | 39 | } |
| @@ -47,7 +41,7 @@ impl TimerQueue { | |||
| 47 | pub(crate) unsafe fn next_expiration(&self) -> Instant { | 41 | pub(crate) unsafe fn next_expiration(&self) -> Instant { |
| 48 | let mut res = Instant::MAX; | 42 | let mut res = Instant::MAX; |
| 49 | self.retain(|p| { | 43 | self.retain(|p| { |
| 50 | let task = p.as_ref(); | 44 | let task = p.header(); |
| 51 | let expires = task.expires_at.get(); | 45 | let expires = task.expires_at.get(); |
| 52 | res = min(res, expires); | 46 | res = min(res, expires); |
| 53 | expires != Instant::MAX | 47 | expires != Instant::MAX |
| @@ -55,9 +49,9 @@ impl TimerQueue { | |||
| 55 | res | 49 | res |
| 56 | } | 50 | } |
| 57 | 51 | ||
| 58 | pub(crate) unsafe fn dequeue_expired(&self, now: Instant, on_task: impl Fn(NonNull<TaskHeader>)) { | 52 | pub(crate) unsafe fn dequeue_expired(&self, now: Instant, on_task: impl Fn(TaskRef)) { |
| 59 | self.retain(|p| { | 53 | self.retain(|p| { |
| 60 | let task = p.as_ref(); | 54 | let task = p.header(); |
| 61 | if task.expires_at.get() <= now { | 55 | if task.expires_at.get() <= now { |
| 62 | on_task(p); | 56 | on_task(p); |
| 63 | false | 57 | false |
| @@ -67,11 +61,10 @@ impl TimerQueue { | |||
| 67 | }); | 61 | }); |
| 68 | } | 62 | } |
| 69 | 63 | ||
| 70 | pub(crate) unsafe fn retain(&self, mut f: impl FnMut(NonNull<TaskHeader>) -> bool) { | 64 | pub(crate) unsafe fn retain(&self, mut f: impl FnMut(TaskRef) -> bool) { |
| 71 | let mut prev = &self.head; | 65 | let mut prev = &self.head; |
| 72 | while !prev.get().is_null() { | 66 | while let Some(p) = prev.get() { |
| 73 | let p = NonNull::new_unchecked(prev.get()); | 67 | let task = p.header(); |
| 74 | let task = &*p.as_ptr(); | ||
| 75 | if f(p) { | 68 | if f(p) { |
| 76 | // Skip to next | 69 | // Skip to next |
| 77 | prev = &task.timer_queue_item.next; | 70 | prev = &task.timer_queue_item.next; |
diff --git a/embassy-executor/src/raw/waker.rs b/embassy-executor/src/raw/waker.rs index 5765259f2..400b37fa9 100644 --- a/embassy-executor/src/raw/waker.rs +++ b/embassy-executor/src/raw/waker.rs | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | use core::mem; | 1 | use core::mem; |
| 2 | use core::ptr::NonNull; | ||
| 3 | use core::task::{RawWaker, RawWakerVTable, Waker}; | 2 | use core::task::{RawWaker, RawWakerVTable, Waker}; |
| 4 | 3 | ||
| 5 | use super::{wake_task, TaskHeader}; | 4 | use super::{wake_task, TaskHeader, TaskRef}; |
| 6 | 5 | ||
| 7 | const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake, drop); | 6 | const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake, drop); |
| 8 | 7 | ||
| @@ -11,14 +10,14 @@ unsafe fn clone(p: *const ()) -> RawWaker { | |||
| 11 | } | 10 | } |
| 12 | 11 | ||
| 13 | unsafe fn wake(p: *const ()) { | 12 | unsafe fn wake(p: *const ()) { |
| 14 | wake_task(NonNull::new_unchecked(p as *mut TaskHeader)) | 13 | wake_task(TaskRef::from_ptr(p as *const TaskHeader)) |
| 15 | } | 14 | } |
| 16 | 15 | ||
| 17 | unsafe fn drop(_: *const ()) { | 16 | unsafe fn drop(_: *const ()) { |
| 18 | // nop | 17 | // nop |
| 19 | } | 18 | } |
| 20 | 19 | ||
| 21 | pub(crate) unsafe fn from_task(p: NonNull<TaskHeader>) -> Waker { | 20 | pub(crate) unsafe fn from_task(p: TaskRef) -> Waker { |
| 22 | Waker::from_raw(RawWaker::new(p.as_ptr() as _, &VTABLE)) | 21 | Waker::from_raw(RawWaker::new(p.as_ptr() as _, &VTABLE)) |
| 23 | } | 22 | } |
| 24 | 23 | ||
| @@ -33,7 +32,7 @@ pub(crate) unsafe fn from_task(p: NonNull<TaskHeader>) -> Waker { | |||
| 33 | /// # Panics | 32 | /// # Panics |
| 34 | /// | 33 | /// |
| 35 | /// Panics if the waker is not created by the Embassy executor. | 34 | /// Panics if the waker is not created by the Embassy executor. |
| 36 | pub fn task_from_waker(waker: &Waker) -> NonNull<TaskHeader> { | 35 | pub fn task_from_waker(waker: &Waker) -> TaskRef { |
| 37 | // safety: OK because WakerHack has the same layout as Waker. | 36 | // safety: OK because WakerHack has the same layout as Waker. |
| 38 | // This is not really guaranteed because the structs are `repr(Rust)`, it is | 37 | // This is not really guaranteed because the structs are `repr(Rust)`, it is |
| 39 | // indeed the case in the current implementation. | 38 | // indeed the case in the current implementation. |
| @@ -43,8 +42,8 @@ pub fn task_from_waker(waker: &Waker) -> NonNull<TaskHeader> { | |||
| 43 | panic!("Found waker not created by the Embassy executor. `embassy_time::Timer` only works with the Embassy executor.") | 42 | panic!("Found waker not created by the Embassy executor. `embassy_time::Timer` only works with the Embassy executor.") |
| 44 | } | 43 | } |
| 45 | 44 | ||
| 46 | // safety: we never create a waker with a null data pointer. | 45 | // safety: our wakers are always created with `TaskRef::as_ptr` |
| 47 | unsafe { NonNull::new_unchecked(hack.data as *mut TaskHeader) } | 46 | unsafe { TaskRef::from_ptr(hack.data as *const TaskHeader) } |
| 48 | } | 47 | } |
| 49 | 48 | ||
| 50 | struct WakerHack { | 49 | struct WakerHack { |
diff --git a/embassy-executor/src/spawner.rs b/embassy-executor/src/spawner.rs index 400d973ff..650ea06cb 100644 --- a/embassy-executor/src/spawner.rs +++ b/embassy-executor/src/spawner.rs | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | use core::future::poll_fn; | 1 | use core::future::poll_fn; |
| 2 | use core::marker::PhantomData; | 2 | use core::marker::PhantomData; |
| 3 | use core::mem; | 3 | use core::mem; |
| 4 | use core::ptr::NonNull; | ||
| 5 | use core::task::Poll; | 4 | use core::task::Poll; |
| 6 | 5 | ||
| 7 | use super::raw; | 6 | use super::raw; |
| @@ -22,12 +21,12 @@ use super::raw; | |||
| 22 | /// Once you've invoked a task function and obtained a SpawnToken, you *must* spawn it. | 21 | /// Once you've invoked a task function and obtained a SpawnToken, you *must* spawn it. |
| 23 | #[must_use = "Calling a task function does nothing on its own. You must spawn the returned SpawnToken, typically with Spawner::spawn()"] | 22 | #[must_use = "Calling a task function does nothing on its own. You must spawn the returned SpawnToken, typically with Spawner::spawn()"] |
| 24 | pub struct SpawnToken<S> { | 23 | pub struct SpawnToken<S> { |
| 25 | raw_task: Option<NonNull<raw::TaskHeader>>, | 24 | raw_task: Option<raw::TaskRef>, |
| 26 | phantom: PhantomData<*mut S>, | 25 | phantom: PhantomData<*mut S>, |
| 27 | } | 26 | } |
| 28 | 27 | ||
| 29 | impl<S> SpawnToken<S> { | 28 | impl<S> SpawnToken<S> { |
| 30 | pub(crate) unsafe fn new(raw_task: NonNull<raw::TaskHeader>) -> Self { | 29 | pub(crate) unsafe fn new(raw_task: raw::TaskRef) -> Self { |
| 31 | Self { | 30 | Self { |
| 32 | raw_task: Some(raw_task), | 31 | raw_task: Some(raw_task), |
| 33 | phantom: PhantomData, | 32 | phantom: PhantomData, |
| @@ -92,7 +91,7 @@ impl Spawner { | |||
| 92 | pub async fn for_current_executor() -> Self { | 91 | pub async fn for_current_executor() -> Self { |
| 93 | poll_fn(|cx| unsafe { | 92 | poll_fn(|cx| unsafe { |
| 94 | let task = raw::task_from_waker(cx.waker()); | 93 | let task = raw::task_from_waker(cx.waker()); |
| 95 | let executor = (*task.as_ptr()).executor.get(); | 94 | let executor = task.header().executor.get(); |
| 96 | Poll::Ready(Self::new(&*executor)) | 95 | Poll::Ready(Self::new(&*executor)) |
| 97 | }) | 96 | }) |
| 98 | .await | 97 | .await |
| @@ -168,7 +167,7 @@ impl SendSpawner { | |||
| 168 | pub async fn for_current_executor() -> Self { | 167 | pub async fn for_current_executor() -> Self { |
| 169 | poll_fn(|cx| unsafe { | 168 | poll_fn(|cx| unsafe { |
| 170 | let task = raw::task_from_waker(cx.waker()); | 169 | let task = raw::task_from_waker(cx.waker()); |
| 171 | let executor = (*task.as_ptr()).executor.get(); | 170 | let executor = task.header().executor.get(); |
| 172 | Poll::Ready(Self::new(&*executor)) | 171 | Poll::Ready(Self::new(&*executor)) |
| 173 | }) | 172 | }) |
| 174 | .await | 173 | .await |
