aboutsummaryrefslogtreecommitdiff
path: root/embassy-executor/src/raw/run_queue_critical_section.rs
blob: 86c4085ed310dbb34b3b2c0ff295f2d6333fcdf1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
use core::cell::Cell;

use critical_section::{CriticalSection, Mutex};

use super::TaskRef;

pub(crate) struct RunQueueItem {
    next: Mutex<Cell<Option<TaskRef>>>,
}

impl RunQueueItem {
    pub const fn new() -> Self {
        Self {
            next: Mutex::new(Cell::new(None)),
        }
    }
}

/// Atomic task queue using a very, very simple lock-free linked-list queue:
///
/// To enqueue a task, task.next is set to the old head, and head is atomically set to task.
///
/// Dequeuing is done in batches: the queue is emptied by atomically replacing head with
/// null. Then the batch is iterated following the next pointers until null is reached.
///
/// Note that batches will be iterated in the reverse order as they were enqueued. This is OK
/// for our purposes: it can't create fairness problems since the next batch won't run until the
/// current batch is completely processed, so even if a task enqueues itself instantly (for example
/// by waking its own waker) can't prevent other tasks from running.
pub(crate) struct RunQueue {
    head: Mutex<Cell<Option<TaskRef>>>,
}

impl RunQueue {
    pub const fn new() -> Self {
        Self {
            head: Mutex::new(Cell::new(None)),
        }
    }

    /// Enqueues an item. Returns true if the queue was empty.
    ///
    /// # Safety
    ///
    /// `item` must NOT be already enqueued in any queue.
    #[inline(always)]
    pub(crate) unsafe fn enqueue(&self, task: TaskRef, cs: CriticalSection<'_>) -> bool {
        let prev = self.head.borrow(cs).replace(Some(task));
        task.header().run_queue_item.next.borrow(cs).set(prev);

        prev.is_none()
    }

    /// Empty the queue, then call `on_task` for each task that was in the queue.
    /// NOTE: It is OK for `on_task` to enqueue more tasks. In this case they're left in the queue
    /// and will be processed by the *next* call to `dequeue_all`, *not* the current one.
    pub(crate) fn dequeue_all(&self, on_task: impl Fn(TaskRef)) {
        // Atomically empty the queue.
        let mut next = critical_section::with(|cs| self.head.borrow(cs).take());

        // Iterate the linked list of tasks that were previously in the queue.
        while let Some(task) = next {
            // If the task re-enqueues itself, the `next` pointer will get overwritten.
            // Therefore, first read the next pointer, and only then process the task.

            critical_section::with(|cs| {
                next = task.header().run_queue_item.next.borrow(cs).get();
                task.header().state.run_dequeue(cs);
            });

            on_task(task);
        }
    }
}