aboutsummaryrefslogtreecommitdiff
path: root/embassy-stm32/src
diff options
context:
space:
mode:
authorRasmus Melchior Jacobsen <[email protected]>2023-03-29 15:45:18 +0200
committerRasmus Melchior Jacobsen <[email protected]>2023-03-29 15:45:18 +0200
commitef1890e9110c8ef3553e6a2d0979dfb52520b025 (patch)
treeb7d855ca1c73f61c280a6efc02a82d0470bf9d36 /embassy-stm32/src
parent68c260edeb8822411ac03d569c1c0d91be2b98a5 (diff)
Remove flash operations from FlashRegion trait and move to common module
Diffstat (limited to 'embassy-stm32/src')
-rw-r--r--embassy-stm32/src/flash/common.rs177
-rw-r--r--embassy-stm32/src/flash/mod.rs200
2 files changed, 186 insertions, 191 deletions
diff --git a/embassy-stm32/src/flash/common.rs b/embassy-stm32/src/flash/common.rs
new file mode 100644
index 000000000..f92236bb0
--- /dev/null
+++ b/embassy-stm32/src/flash/common.rs
@@ -0,0 +1,177 @@
1use embassy_hal_common::drop::OnDrop;
2use embassy_hal_common::{into_ref, PeripheralRef};
3use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
4use embassy_sync::mutex::{Mutex, MutexGuard};
5use embedded_storage::nor_flash::{ErrorType, NorFlash, ReadNorFlash};
6
7use super::{family, Error, FlashRegion};
8pub use crate::_generated::flash_regions::*;
9pub use crate::pac::{FLASH_BASE, FLASH_SIZE, WRITE_SIZE};
10use crate::Peripheral;
11
12pub struct Flash<'d> {
13 inner: PeripheralRef<'d, crate::peripherals::FLASH>,
14}
15
16impl<'d> Flash<'d> {
17 pub fn new(p: impl Peripheral<P = crate::peripherals::FLASH> + 'd) -> Self {
18 into_ref!(p);
19 Self { inner: p }
20 }
21
22 pub fn into_regions(self) -> FlashRegions<'d> {
23 let mut flash = self;
24 let p = unsafe { flash.inner.clone_unchecked() };
25 FlashRegions::new(p)
26 }
27
28 pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
29 Self::blocking_read_inner(FLASH_BASE as u32 + offset, bytes)
30 }
31
32 fn blocking_read_inner(start_address: u32, bytes: &mut [u8]) -> Result<(), Error> {
33 assert!(start_address >= FLASH_BASE as u32);
34 if start_address as usize + bytes.len() > FLASH_BASE + FLASH_SIZE {
35 return Err(Error::Size);
36 }
37
38 let flash_data = unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) };
39 bytes.copy_from_slice(flash_data);
40 Ok(())
41 }
42
43 pub fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> {
44 let start_address = FLASH_BASE as u32 + offset;
45
46 // No need to take lock here as we only have one mut flash reference.
47
48 unsafe { Flash::blocking_write_inner(start_address, buf) }
49 }
50
51 unsafe fn blocking_write_inner(start_address: u32, buf: &[u8]) -> Result<(), Error> {
52 assert!(start_address >= FLASH_BASE as u32);
53 if start_address as usize + buf.len() > FLASH_BASE + FLASH_SIZE {
54 return Err(Error::Size);
55 }
56 if (start_address as usize - FLASH_BASE) % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 {
57 return Err(Error::Unaligned);
58 }
59
60 trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address);
61
62 family::clear_all_err();
63 family::unlock();
64 family::begin_write();
65
66 let _ = OnDrop::new(|| {
67 family::end_write();
68 family::lock();
69 });
70
71 let mut address = start_address;
72 for chunk in buf.chunks(WRITE_SIZE) {
73 unsafe { family::blocking_write(address, chunk.try_into().unwrap())? };
74 address += WRITE_SIZE as u32;
75 }
76 Ok(())
77 }
78
79 pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
80 let start_address = FLASH_BASE as u32 + from;
81 let end_address = FLASH_BASE as u32 + to;
82
83 unsafe { Flash::blocking_erase_inner(start_address, end_address) }
84 }
85
86 unsafe fn blocking_erase_inner(start_address: u32, end_address: u32) -> Result<(), Error> {
87 // Test if the address range is aligned at sector base addresses
88 let mut address = start_address;
89 while address < end_address {
90 let sector = family::get_sector(address);
91 if sector.start != address {
92 return Err(Error::Unaligned);
93 }
94 address += sector.size;
95 }
96 if address != end_address {
97 return Err(Error::Unaligned);
98 }
99
100 trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address);
101
102 family::clear_all_err();
103 family::unlock();
104
105 let _ = OnDrop::new(|| {
106 family::lock();
107 });
108
109 let mut address = start_address;
110 while address < end_address {
111 let sector = family::get_sector(address);
112 family::blocking_erase_sector(&sector)?;
113 address += sector.size;
114 }
115 Ok(())
116 }
117}
118
119impl Drop for Flash<'_> {
120 fn drop(&mut self) {
121 unsafe { family::lock() };
122 }
123}
124
125static REGION_LOCK: Mutex<CriticalSectionRawMutex, ()> = Mutex::new(());
126
127fn take_lock_spin() -> MutexGuard<'static, CriticalSectionRawMutex, ()> {
128 loop {
129 if let Ok(guard) = REGION_LOCK.try_lock() {
130 return guard;
131 }
132 }
133}
134
135foreach_flash_region! {
136 ($name:ident) => {
137 impl ErrorType for crate::_generated::flash_regions::$name {
138 type Error = Error;
139 }
140
141 impl ReadNorFlash for crate::_generated::flash_regions::$name {
142 const READ_SIZE: usize = 1;
143
144 fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
145 Flash::blocking_read_inner(Self::SETTINGS.base as u32 + offset, bytes)
146 }
147
148 fn capacity(&self) -> usize {
149 <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.size
150 }
151 }
152
153 impl NorFlash for crate::_generated::flash_regions::$name {
154 const WRITE_SIZE: usize = <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.write_size;
155 const ERASE_SIZE: usize = <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.erase_size;
156
157 fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
158 let start_address = Self::SETTINGS.base as u32 + from;
159 let end_address = Self::SETTINGS.base as u32 + to;
160
161 // Protect agains simultaneous write/erase to multiple regions.
162 let _guard = take_lock_spin();
163
164 unsafe { Flash::blocking_erase_inner(start_address, end_address) }
165 }
166
167 fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
168 let start_address = Self::SETTINGS.base as u32 + offset;
169
170 // Protect agains simultaneous write/erase to multiple regions.
171 let _guard = take_lock_spin();
172
173 unsafe { Flash::blocking_write_inner(start_address, bytes) }
174 }
175 }
176 };
177}
diff --git a/embassy-stm32/src/flash/mod.rs b/embassy-stm32/src/flash/mod.rs
index 1186a182d..ec7c66947 100644
--- a/embassy-stm32/src/flash/mod.rs
+++ b/embassy-stm32/src/flash/mod.rs
@@ -1,13 +1,4 @@
1use embassy_hal_common::drop::OnDrop; 1use embedded_storage::nor_flash::{NorFlashError, NorFlashErrorKind};
2use embassy_hal_common::{into_ref, PeripheralRef};
3use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
4use embassy_sync::mutex::{Mutex, MutexGuard};
5use embedded_storage::nor_flash::{ErrorType, NorFlash, NorFlashError, NorFlashErrorKind, ReadNorFlash};
6
7pub use crate::_generated::flash_regions::*;
8pub use crate::pac::{FLASH_BASE, FLASH_SIZE, WRITE_SIZE};
9use crate::peripherals::FLASH;
10use crate::Peripheral;
11 2
12#[cfg_attr(any(flash_l0, flash_l1, flash_l4, flash_wl, flash_wb), path = "l.rs")] 3#[cfg_attr(any(flash_l0, flash_l1, flash_l4, flash_wl, flash_wb), path = "l.rs")]
13#[cfg_attr(flash_f3, path = "f3.rs")] 4#[cfg_attr(flash_f3, path = "f3.rs")]
@@ -48,8 +39,14 @@ mod family {
48 } 39 }
49} 40}
50 41
51pub struct Flash<'d> { 42#[cfg(flash)]
52 inner: PeripheralRef<'d, FLASH>, 43mod common;
44
45#[cfg(flash)]
46pub use common::*;
47
48pub trait FlashRegion {
49 const SETTINGS: FlashRegionSettings;
53} 50}
54 51
55pub struct FlashRegionSettings { 52pub struct FlashRegionSettings {
@@ -67,158 +64,12 @@ pub struct FlashSector {
67 pub size: u32, 64 pub size: u32,
68} 65}
69 66
70pub trait FlashRegion {
71 const SETTINGS: FlashRegionSettings;
72
73 fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
74 Flash::blocking_read_inner(Self::SETTINGS.base as u32 + offset, bytes)
75 }
76
77 fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> {
78 let start_address = Self::SETTINGS.base as u32 + offset;
79
80 // Protect agains simultaneous write/erase to multiple regions.
81 let _guard = take_lock_spin();
82
83 unsafe { Flash::blocking_write_inner(start_address, buf) }
84 }
85
86 fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
87 let start_address = Self::SETTINGS.base as u32 + from;
88 let end_address = Self::SETTINGS.base as u32 + to;
89
90 // Protect agains simultaneous write/erase to multiple regions.
91 let _guard = take_lock_spin();
92
93 unsafe { Flash::blocking_erase_inner(start_address, end_address) }
94 }
95}
96
97static REGION_LOCK: Mutex<CriticalSectionRawMutex, ()> = Mutex::new(());
98
99impl<'d> Flash<'d> {
100 pub fn new(p: impl Peripheral<P = FLASH> + 'd) -> Self {
101 into_ref!(p);
102 Self { inner: p }
103 }
104
105 pub fn into_regions(self) -> FlashRegions<'d> {
106 let mut flash = self;
107 let p = unsafe { flash.inner.clone_unchecked() };
108 FlashRegions::new(p)
109 }
110
111 pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
112 Self::blocking_read_inner(FLASH_BASE as u32 + offset, bytes)
113 }
114
115 fn blocking_read_inner(start_address: u32, bytes: &mut [u8]) -> Result<(), Error> {
116 assert!(start_address >= FLASH_BASE as u32);
117 if start_address as usize + bytes.len() > FLASH_BASE + FLASH_SIZE {
118 return Err(Error::Size);
119 }
120
121 let flash_data = unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) };
122 bytes.copy_from_slice(flash_data);
123 Ok(())
124 }
125
126 pub fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> {
127 let start_address = FLASH_BASE as u32 + offset;
128
129 // No need to take lock here as we only have one mut flash reference.
130
131 unsafe { Flash::blocking_write_inner(start_address, buf) }
132 }
133
134 unsafe fn blocking_write_inner(start_address: u32, buf: &[u8]) -> Result<(), Error> {
135 assert!(start_address >= FLASH_BASE as u32);
136 if start_address as usize + buf.len() > FLASH_BASE + FLASH_SIZE {
137 return Err(Error::Size);
138 }
139 if (start_address as usize - FLASH_BASE) % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 {
140 return Err(Error::Unaligned);
141 }
142
143 trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address);
144
145 family::clear_all_err();
146 family::unlock();
147 family::begin_write();
148
149 let _ = OnDrop::new(|| {
150 family::end_write();
151 family::lock();
152 });
153
154 let mut address = start_address;
155 for chunk in buf.chunks(WRITE_SIZE) {
156 unsafe { family::blocking_write(address, chunk.try_into().unwrap())? };
157 address += WRITE_SIZE as u32;
158 }
159 Ok(())
160 }
161
162 pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
163 let start_address = FLASH_BASE as u32 + from;
164 let end_address = FLASH_BASE as u32 + to;
165
166 unsafe { Flash::blocking_erase_inner(start_address, end_address) }
167 }
168
169 unsafe fn blocking_erase_inner(start_address: u32, end_address: u32) -> Result<(), Error> {
170 // Test if the address range is aligned at sector base addresses
171 let mut address = start_address;
172 while address < end_address {
173 let sector = family::get_sector(address);
174 if sector.start != address {
175 return Err(Error::Unaligned);
176 }
177 address += sector.size;
178 }
179 if address != end_address {
180 return Err(Error::Unaligned);
181 }
182
183 trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address);
184
185 family::clear_all_err();
186 family::unlock();
187
188 let _ = OnDrop::new(|| {
189 family::lock();
190 });
191
192 let mut address = start_address;
193 while address < end_address {
194 let sector = family::get_sector(address);
195 family::blocking_erase_sector(&sector)?;
196 address += sector.size;
197 }
198 Ok(())
199 }
200}
201
202impl Drop for Flash<'_> {
203 fn drop(&mut self) {
204 unsafe { family::lock() };
205 }
206}
207
208impl Drop for FlashRegions<'_> { 67impl Drop for FlashRegions<'_> {
209 fn drop(&mut self) { 68 fn drop(&mut self) {
210 unsafe { family::lock() }; 69 unsafe { family::lock() };
211 } 70 }
212} 71}
213 72
214fn take_lock_spin() -> MutexGuard<'static, CriticalSectionRawMutex, ()> {
215 loop {
216 if let Ok(guard) = REGION_LOCK.try_lock() {
217 return guard;
218 }
219 }
220}
221
222#[derive(Debug, Copy, Clone, PartialEq, Eq)] 73#[derive(Debug, Copy, Clone, PartialEq, Eq)]
223#[cfg_attr(feature = "defmt", derive(defmt::Format))] 74#[cfg_attr(feature = "defmt", derive(defmt::Format))]
224pub enum Error { 75pub enum Error {
@@ -240,36 +91,3 @@ impl NorFlashError for Error {
240 } 91 }
241 } 92 }
242} 93}
243
244foreach_flash_region! {
245 ($name:ident) => {
246 impl ErrorType for crate::_generated::flash_regions::$name {
247 type Error = Error;
248 }
249
250 impl ReadNorFlash for crate::_generated::flash_regions::$name {
251 const READ_SIZE: usize = <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.write_size;
252
253 fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
254 self.blocking_read(offset, bytes)
255 }
256
257 fn capacity(&self) -> usize {
258 <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.size
259 }
260 }
261
262 impl NorFlash for crate::_generated::flash_regions::$name {
263 const WRITE_SIZE: usize = <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.write_size;
264 const ERASE_SIZE: usize = <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.erase_size;
265
266 fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
267 self.blocking_erase(from, to)
268 }
269
270 fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
271 self.blocking_write(offset, bytes)
272 }
273 }
274 };
275}