aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--embassy-stm32-wpan/Cargo.toml2
-rw-r--r--embassy-stm32-wpan/src/wba/ll_sys_if.rs87
-rw-r--r--embassy-stm32-wpan/src/wba/mac_sys_if.rs64
-rw-r--r--embassy-stm32-wpan/src/wba/mod.rs1
-rw-r--r--embassy-stm32-wpan/src/wba/util_seq.rs243
5 files changed, 362 insertions, 35 deletions
diff --git a/embassy-stm32-wpan/Cargo.toml b/embassy-stm32-wpan/Cargo.toml
index 7e562f5cc..103dedead 100644
--- a/embassy-stm32-wpan/Cargo.toml
+++ b/embassy-stm32-wpan/Cargo.toml
@@ -59,7 +59,7 @@ wb55_mac = ["dep:bitflags", "dep:embassy-net-driver", "dep:smoltcp", "smoltcp/me
59 59
60wba = [ "dep:stm32-bindings" ] 60wba = [ "dep:stm32-bindings" ]
61wba_ble = [ "stm32-bindings/wba_wpan_mac" , "stm32-bindings/wba_wpan" ] 61wba_ble = [ "stm32-bindings/wba_wpan_mac" , "stm32-bindings/wba_wpan" ]
62wba_mac = [ "stm32-bindings/wba_wpan_ble" , "stm32-bindings/lib_wba5_linklayer15_4", "stm32-bindings/lib_wba_mac_lib" , "stm32-bindings/wba_wpan" ] 62wba_mac = [ "stm32-bindings/wba_wpan_mac", "stm32-bindings/wba_wpan_ble" , "stm32-bindings/lib_wba5_linklayer15_4", "stm32-bindings/lib_wba_mac_lib" , "stm32-bindings/wba_wpan" ]
63 63
64extended = [] 64extended = []
65 65
diff --git a/embassy-stm32-wpan/src/wba/ll_sys_if.rs b/embassy-stm32-wpan/src/wba/ll_sys_if.rs
index 992c2a6f1..7218b69c4 100644
--- a/embassy-stm32-wpan/src/wba/ll_sys_if.rs
+++ b/embassy-stm32-wpan/src/wba/ll_sys_if.rs
@@ -1,6 +1,4 @@
1#[allow(dead_code)] 1#![cfg(feature = "wba")]
2fn test_fn() {}
3
4// /* USER CODE BEGIN Header */ 2// /* USER CODE BEGIN Header */
5// /** 3// /**
6// ****************************************************************************** 4// ******************************************************************************
@@ -333,3 +331,86 @@ fn test_fn() {}
333// } 331// }
334// #endif 332// #endif
335// 333//
334use super::bindings::{link_layer, mac};
335use super::util_seq;
336
337const UTIL_SEQ_RFU: u32 = 0;
338const TASK_LINK_LAYER_MASK: u32 = 1 << mac::CFG_TASK_ID_T_CFG_TASK_LINK_LAYER;
339const TASK_PRIO_LINK_LAYER: u32 = mac::CFG_SEQ_PRIO_ID_T_CFG_SEQ_PRIO_0 as u32;
340
341/**
342 * @brief Link Layer background process initialization
343 * @param None
344 * @retval None
345 */
346#[unsafe(no_mangle)]
347pub unsafe extern "C" fn ll_sys_bg_process_init() {
348 util_seq::UTIL_SEQ_RegTask(TASK_LINK_LAYER_MASK, UTIL_SEQ_RFU, Some(link_layer::ll_sys_bg_process));
349}
350
351/**
352 * @brief Link Layer background process next iteration scheduling
353 * @param None
354 * @retval None
355 */
356#[unsafe(no_mangle)]
357pub unsafe extern "C" fn ll_sys_schedule_bg_process() {
358 util_seq::UTIL_SEQ_SetTask(TASK_LINK_LAYER_MASK, TASK_PRIO_LINK_LAYER);
359}
360
361/**
362 * @brief Link Layer background process next iteration scheduling from ISR
363 * @param None
364 * @retval None
365 */
366#[unsafe(no_mangle)]
367pub unsafe extern "C" fn ll_sys_schedule_bg_process_isr() {
368 util_seq::UTIL_SEQ_SetTask(TASK_LINK_LAYER_MASK, TASK_PRIO_LINK_LAYER);
369}
370
371/**
372 * @brief Link Layer configuration phase before application startup.
373 * @param None
374 * @retval None
375 */
376#[unsafe(no_mangle)]
377pub unsafe extern "C" fn ll_sys_config_params() {
378 let allow_low_isr = mac::USE_RADIO_LOW_ISR as u8;
379 let run_from_isr = mac::NEXT_EVENT_SCHEDULING_FROM_ISR as u8;
380 let _ = link_layer::ll_intf_cmn_config_ll_ctx_params(allow_low_isr, run_from_isr);
381
382 ll_sys_sleep_clock_source_selection();
383 let _ = link_layer::ll_intf_cmn_select_tx_power_table(mac::CFG_RF_TX_POWER_TABLE_ID as u8);
384}
385
386/**
387 * @brief Reset Link Layer timing parameters to their default configuration.
388 * @param None
389 * @retval None
390 */
391#[unsafe(no_mangle)]
392pub unsafe extern "C" fn ll_sys_reset() {
393 ll_sys_sleep_clock_source_selection();
394
395 let sleep_accuracy = ll_sys_BLE_sleep_clock_accuracy_selection();
396 let _ = link_layer::ll_intf_le_set_sleep_clock_accuracy(sleep_accuracy);
397}
398
399/// Select the sleep-clock source used by the Link Layer.
400/// Defaults to the crystal oscillator when no explicit configuration is available.
401#[unsafe(no_mangle)]
402pub unsafe extern "C" fn ll_sys_sleep_clock_source_selection() {
403 let mut frequency: u16 = 0;
404 let _ = link_layer::ll_intf_cmn_le_select_slp_clk_src(
405 link_layer::_SLPTMR_SRC_TYPE_E_CRYSTAL_OSCILLATOR_SLPTMR as u8,
406 &mut frequency as *mut u16,
407 );
408}
409
410/// Determine the BLE sleep-clock accuracy used by the stack.
411/// Returns zero when board-specific calibration data is unavailable.
412#[unsafe(no_mangle)]
413pub unsafe extern "C" fn ll_sys_BLE_sleep_clock_accuracy_selection() -> u8 {
414 // TODO: derive the board-specific sleep clock accuracy once calibration data is available.
415 0
416}
diff --git a/embassy-stm32-wpan/src/wba/mac_sys_if.rs b/embassy-stm32-wpan/src/wba/mac_sys_if.rs
index b0dab238e..273399a19 100644
--- a/embassy-stm32-wpan/src/wba/mac_sys_if.rs
+++ b/embassy-stm32-wpan/src/wba/mac_sys_if.rs
@@ -1,4 +1,6 @@
1use crate::bindings::mac::mac_baremetal_run; 1#![cfg(feature = "wba")]
2#![allow(non_snake_case)]
3
2// 4//
3// /* USER CODE BEGIN Header */ 5// /* USER CODE BEGIN Header */
4// /** 6// /**
@@ -113,16 +115,28 @@ use crate::bindings::mac::mac_baremetal_run;
113// } 115// }
114// 116//
115 117
116/** 118use super::util_seq;
117 * @brief Mac Layer Initialisation 119use crate::bindings::mac;
118 * @param None 120
119 * @retval None 121/// Placeholder value used by the original ST middleware when registering tasks.
120 */ 122const UTIL_SEQ_RFU: u32 = 0;
123
124/// Bit mask identifying the MAC layer task within the sequencer.
125const TASK_MAC_LAYER_MASK: u32 = 1 << mac::CFG_TASK_ID_T_CFG_TASK_MAC_LAYER;
126
127/// Sequencer priority assigned to the MAC layer task.
128const TASK_PRIO_MAC_LAYER: u32 = mac::CFG_SEQ_PRIO_ID_T_CFG_SEQ_PRIO_0 as u32;
129
130/// Event flag consumed by the MAC task while waiting on notifications.
131const EVENT_MAC_LAYER_MASK: u32 = 1 << 0;
132
133/// Registers the MAC bare-metal runner with the lightweight sequencer.
134///
135/// Mirrors the behaviour of the reference implementation:
136/// `UTIL_SEQ_RegTask(TASK_MAC_LAYER, UTIL_SEQ_RFU, mac_baremetal_run);`
121#[unsafe(no_mangle)] 137#[unsafe(no_mangle)]
122pub extern "C" fn MacSys_Init() { 138pub unsafe extern "C" fn MacSys_Init() {
123 unsafe { 139 util_seq::UTIL_SEQ_RegTask(TASK_MAC_LAYER_MASK, UTIL_SEQ_RFU, Some(mac::mac_baremetal_run));
124 mac_baremetal_run();
125 }
126} 140}
127 141
128/** 142/**
@@ -131,10 +145,8 @@ pub extern "C" fn MacSys_Init() {
131 * @retval None 145 * @retval None
132 */ 146 */
133#[unsafe(no_mangle)] 147#[unsafe(no_mangle)]
134pub extern "C" fn MacSys_Resume() { 148pub unsafe extern "C" fn MacSys_Resume() {
135 unsafe { 149 util_seq::UTIL_SEQ_ResumeTask(TASK_MAC_LAYER_MASK);
136 mac_baremetal_run();
137 }
138} 150}
139 151
140/** 152/**
@@ -143,10 +155,8 @@ pub extern "C" fn MacSys_Resume() {
143 * @retval None 155 * @retval None
144 */ 156 */
145#[unsafe(no_mangle)] 157#[unsafe(no_mangle)]
146pub extern "C" fn MacSys_SemaphoreSet() { 158pub unsafe extern "C" fn MacSys_SemaphoreSet() {
147 unsafe { 159 util_seq::UTIL_SEQ_SetTask(TASK_MAC_LAYER_MASK, TASK_PRIO_MAC_LAYER);
148 mac_baremetal_run();
149 }
150} 160}
151 161
152/** 162/**
@@ -155,11 +165,7 @@ pub extern "C" fn MacSys_SemaphoreSet() {
155 * @retval None 165 * @retval None
156 */ 166 */
157#[unsafe(no_mangle)] 167#[unsafe(no_mangle)]
158pub extern "C" fn MacSys_SemaphoreWait() { 168pub unsafe extern "C" fn MacSys_SemaphoreWait() {}
159 unsafe {
160 mac_baremetal_run();
161 }
162}
163 169
164/** 170/**
165 * @brief MAC Layer set Event. 171 * @brief MAC Layer set Event.
@@ -167,10 +173,8 @@ pub extern "C" fn MacSys_SemaphoreWait() {
167 * @retval None 173 * @retval None
168 */ 174 */
169#[unsafe(no_mangle)] 175#[unsafe(no_mangle)]
170pub extern "C" fn MacSys_EventSet() { 176pub unsafe extern "C" fn MacSys_EventSet() {
171 unsafe { 177 util_seq::UTIL_SEQ_SetEvt(EVENT_MAC_LAYER_MASK);
172 mac_baremetal_run();
173 }
174} 178}
175 179
176/** 180/**
@@ -179,8 +183,6 @@ pub extern "C" fn MacSys_EventSet() {
179 * @retval None 183 * @retval None
180 */ 184 */
181#[unsafe(no_mangle)] 185#[unsafe(no_mangle)]
182pub extern "C" fn MacSys_EventWait() { 186pub unsafe extern "C" fn MacSys_EventWait() {
183 unsafe { 187 util_seq::UTIL_SEQ_WaitEvt(EVENT_MAC_LAYER_MASK);
184 mac_baremetal_run();
185 }
186} 188}
diff --git a/embassy-stm32-wpan/src/wba/mod.rs b/embassy-stm32-wpan/src/wba/mod.rs
index c93b8d020..3161b578e 100644
--- a/embassy-stm32-wpan/src/wba/mod.rs
+++ b/embassy-stm32-wpan/src/wba/mod.rs
@@ -3,3 +3,4 @@ pub mod linklayer_plat;
3pub mod ll_sys; 3pub mod ll_sys;
4pub mod ll_sys_if; 4pub mod ll_sys_if;
5pub mod mac_sys_if; 5pub mod mac_sys_if;
6pub mod util_seq;
diff --git a/embassy-stm32-wpan/src/wba/util_seq.rs b/embassy-stm32-wpan/src/wba/util_seq.rs
new file mode 100644
index 000000000..b596df908
--- /dev/null
+++ b/embassy-stm32-wpan/src/wba/util_seq.rs
@@ -0,0 +1,243 @@
1#![cfg(feature = "wba")]
2
3use core::cell::UnsafeCell;
4use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
5
6use critical_section::with as critical;
7
8type TaskFn = unsafe extern "C" fn();
9
10const MAX_TASKS: usize = 32;
11const DEFAULT_PRIORITY: u8 = u8::MAX;
12
13struct TaskTable {
14 funcs: UnsafeCell<[Option<TaskFn>; MAX_TASKS]>,
15 priorities: UnsafeCell<[u8; MAX_TASKS]>,
16}
17
18impl TaskTable {
19 const fn new() -> Self {
20 Self {
21 funcs: UnsafeCell::new([None; MAX_TASKS]),
22 priorities: UnsafeCell::new([DEFAULT_PRIORITY; MAX_TASKS]),
23 }
24 }
25
26 unsafe fn set_task(&self, idx: usize, func: Option<TaskFn>, priority: u8) {
27 (*self.funcs.get())[idx] = func;
28 (*self.priorities.get())[idx] = priority;
29 }
30
31 unsafe fn update_priority(&self, idx: usize, priority: u8) {
32 (*self.priorities.get())[idx] = priority;
33 }
34
35 unsafe fn task(&self, idx: usize) -> Option<TaskFn> {
36 (*self.funcs.get())[idx]
37 }
38
39 unsafe fn priority(&self, idx: usize) -> u8 {
40 (*self.priorities.get())[idx]
41 }
42}
43
44unsafe impl Sync for TaskTable {}
45
46#[inline(always)]
47fn wake_event() {
48 #[cfg(target_arch = "arm")]
49 {
50 cortex_m::asm::sev();
51 }
52
53 #[cfg(not(target_arch = "arm"))]
54 {
55 // No-op on architectures without SEV support.
56 }
57}
58
59#[inline(always)]
60fn wait_event() {
61 #[cfg(target_arch = "arm")]
62 {
63 cortex_m::asm::wfe();
64 }
65
66 #[cfg(not(target_arch = "arm"))]
67 {
68 core::hint::spin_loop();
69 }
70}
71
72static TASKS: TaskTable = TaskTable::new();
73static PENDING_TASKS: AtomicU32 = AtomicU32::new(0);
74static EVENTS: AtomicU32 = AtomicU32::new(0);
75static SCHEDULING: AtomicBool = AtomicBool::new(false);
76
77fn mask_to_index(mask: u32) -> Option<usize> {
78 if mask == 0 {
79 return None;
80 }
81 let idx = mask.trailing_zeros() as usize;
82 if idx < MAX_TASKS { Some(idx) } else { None }
83}
84
85fn drain_pending_tasks() {
86 loop {
87 if SCHEDULING
88 .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
89 .is_err()
90 {
91 return;
92 }
93
94 loop {
95 let next = critical(|_| select_next_task());
96 match next {
97 Some((idx, task)) => unsafe {
98 task();
99 // Force a fresh read of the pending bitmask after each task completion.
100 let _ = idx;
101 },
102 None => break,
103 }
104 }
105
106 SCHEDULING.store(false, Ordering::Release);
107
108 if PENDING_TASKS.load(Ordering::Acquire) == 0 {
109 break;
110 }
111 }
112}
113
114/// Poll and execute any tasks that have been scheduled via the UTIL sequencer API.
115pub fn poll_pending_tasks() {
116 drain_pending_tasks();
117}
118
119fn select_next_task() -> Option<(usize, TaskFn)> {
120 let pending = PENDING_TASKS.load(Ordering::Acquire);
121 if pending == 0 {
122 return None;
123 }
124
125 let mut remaining = pending;
126 let mut best_idx: Option<usize> = None;
127 let mut best_priority = DEFAULT_PRIORITY;
128 let mut best_fn: Option<TaskFn> = None;
129
130 while remaining != 0 {
131 let idx = remaining.trailing_zeros() as usize;
132 remaining &= remaining - 1;
133
134 if idx >= MAX_TASKS {
135 continue;
136 }
137
138 unsafe {
139 if let Some(func) = TASKS.task(idx) {
140 let prio = TASKS.priority(idx);
141 if prio <= best_priority {
142 if prio < best_priority || best_idx.map_or(true, |current| idx < current) {
143 best_priority = prio;
144 best_idx = Some(idx);
145 best_fn = Some(func);
146 }
147 }
148 } else {
149 PENDING_TASKS.fetch_and(!(1u32 << idx), Ordering::AcqRel);
150 }
151 }
152 }
153
154 if let (Some(idx), Some(func)) = (best_idx, best_fn) {
155 PENDING_TASKS.fetch_and(!(1u32 << idx), Ordering::AcqRel);
156 Some((idx, func))
157 } else {
158 None
159 }
160}
161
162#[unsafe(no_mangle)]
163pub extern "C" fn UTIL_SEQ_RegTask(task_mask: u32, _flags: u32, task: Option<TaskFn>) {
164 if let Some(idx) = mask_to_index(task_mask) {
165 critical(|_| unsafe {
166 TASKS.set_task(idx, task, DEFAULT_PRIORITY);
167 });
168 }
169}
170
171#[unsafe(no_mangle)]
172pub extern "C" fn UTIL_SEQ_UnregTask(task_mask: u32) {
173 if let Some(idx) = mask_to_index(task_mask) {
174 critical(|_| unsafe {
175 TASKS.set_task(idx, None, DEFAULT_PRIORITY);
176 });
177 PENDING_TASKS.fetch_and(!(task_mask), Ordering::AcqRel);
178 }
179}
180
181#[unsafe(no_mangle)]
182pub extern "C" fn UTIL_SEQ_SetTask(task_mask: u32, priority: u32) {
183 let prio = (priority & 0xFF) as u8;
184
185 if let Some(idx) = mask_to_index(task_mask) {
186 let registered = critical(|_| unsafe {
187 if TASKS.task(idx).is_some() {
188 TASKS.update_priority(idx, prio);
189 true
190 } else {
191 false
192 }
193 });
194
195 if registered {
196 PENDING_TASKS.fetch_or(task_mask, Ordering::Release);
197 wake_event();
198 }
199 }
200}
201
202#[unsafe(no_mangle)]
203pub extern "C" fn UTIL_SEQ_ResumeTask(task_mask: u32) {
204 PENDING_TASKS.fetch_or(task_mask, Ordering::Release);
205 wake_event();
206}
207
208#[unsafe(no_mangle)]
209pub extern "C" fn UTIL_SEQ_PauseTask(task_mask: u32) {
210 PENDING_TASKS.fetch_and(!task_mask, Ordering::AcqRel);
211}
212
213#[unsafe(no_mangle)]
214pub extern "C" fn UTIL_SEQ_SetEvt(event_mask: u32) {
215 EVENTS.fetch_or(event_mask, Ordering::Release);
216 wake_event();
217}
218
219#[unsafe(no_mangle)]
220pub extern "C" fn UTIL_SEQ_ClrEvt(event_mask: u32) {
221 EVENTS.fetch_and(!event_mask, Ordering::AcqRel);
222}
223
224#[unsafe(no_mangle)]
225pub extern "C" fn UTIL_SEQ_IsEvtSet(event_mask: u32) -> u32 {
226 let state = EVENTS.load(Ordering::Acquire);
227 if (state & event_mask) == event_mask { 1 } else { 0 }
228}
229
230#[unsafe(no_mangle)]
231pub extern "C" fn UTIL_SEQ_WaitEvt(event_mask: u32) {
232 loop {
233 poll_pending_tasks();
234
235 let current = EVENTS.load(Ordering::Acquire);
236 if (current & event_mask) == event_mask {
237 EVENTS.fetch_and(!event_mask, Ordering::AcqRel);
238 break;
239 }
240
241 wait_event();
242 }
243}