diff options
| author | Rasmus Melchior Jacobsen <[email protected]> | 2023-05-24 17:24:28 +0200 |
|---|---|---|
| committer | Rasmus Melchior Jacobsen <[email protected]> | 2023-05-25 13:04:48 +0200 |
| commit | dfd56031713aa04af682aa1b2b113a72831728f1 (patch) | |
| tree | ab31ce6efc740161471cc6b4b0f766cd83619e9e /embassy-stm32/src/flash/common.rs | |
| parent | 966f0abf48cc143fc33e17a5fc9e138cf82ab05f (diff) | |
Let FlashLayout and FlashRegion depends on a Blocking/Async mode generic
Diffstat (limited to 'embassy-stm32/src/flash/common.rs')
| -rw-r--r-- | embassy-stm32/src/flash/common.rs | 218 |
1 files changed, 88 insertions, 130 deletions
diff --git a/embassy-stm32/src/flash/common.rs b/embassy-stm32/src/flash/common.rs index e1fe7e9da..8b38745cf 100644 --- a/embassy-stm32/src/flash/common.rs +++ b/embassy-stm32/src/flash/common.rs | |||
| @@ -1,14 +1,12 @@ | |||
| 1 | use atomic_polyfill::{fence, Ordering}; | 1 | use atomic_polyfill::{fence, Ordering}; |
| 2 | use embassy_cortex_m::interrupt::InterruptExt; | 2 | use embassy_cortex_m::interrupt::InterruptExt; |
| 3 | use embassy_futures::block_on; | ||
| 4 | use embassy_hal_common::drop::OnDrop; | 3 | use embassy_hal_common::drop::OnDrop; |
| 5 | use embassy_hal_common::{into_ref, PeripheralRef}; | 4 | use embassy_hal_common::{into_ref, PeripheralRef}; |
| 6 | use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex; | ||
| 7 | use embassy_sync::mutex::Mutex; | ||
| 8 | use stm32_metapac::FLASH_BASE; | 5 | use stm32_metapac::FLASH_BASE; |
| 9 | 6 | ||
| 10 | use super::{ | 7 | use super::{ |
| 11 | family, Error, FlashBank, FlashLayout, FlashRegion, FlashSector, FLASH_SIZE, MAX_ERASE_SIZE, READ_SIZE, WRITE_SIZE, | 8 | family, Blocking, Error, FlashBank, FlashLayout, FlashRegion, FlashSector, FLASH_SIZE, MAX_ERASE_SIZE, READ_SIZE, |
| 9 | WRITE_SIZE, | ||
| 12 | }; | 10 | }; |
| 13 | use crate::peripherals::FLASH; | 11 | use crate::peripherals::FLASH; |
| 14 | use crate::Peripheral; | 12 | use crate::Peripheral; |
| @@ -17,8 +15,6 @@ pub struct Flash<'d> { | |||
| 17 | pub(crate) inner: PeripheralRef<'d, FLASH>, | 15 | pub(crate) inner: PeripheralRef<'d, FLASH>, |
| 18 | } | 16 | } |
| 19 | 17 | ||
| 20 | pub(crate) static REGION_ACCESS: Mutex<CriticalSectionRawMutex, ()> = Mutex::new(()); | ||
| 21 | |||
| 22 | impl<'d> Flash<'d> { | 18 | impl<'d> Flash<'d> { |
| 23 | pub fn new(p: impl Peripheral<P = FLASH> + 'd, irq: impl Peripheral<P = crate::interrupt::FLASH> + 'd) -> Self { | 19 | pub fn new(p: impl Peripheral<P = FLASH> + 'd, irq: impl Peripheral<P = crate::interrupt::FLASH> + 'd) -> Self { |
| 24 | into_ref!(p, irq); | 20 | into_ref!(p, irq); |
| @@ -30,7 +26,7 @@ impl<'d> Flash<'d> { | |||
| 30 | Self { inner: p } | 26 | Self { inner: p } |
| 31 | } | 27 | } |
| 32 | 28 | ||
| 33 | pub fn into_regions(self) -> FlashLayout<'d> { | 29 | pub fn into_blocking_regions(self) -> FlashLayout<'d, Blocking> { |
| 34 | family::set_default_layout(); | 30 | family::set_default_layout(); |
| 35 | FlashLayout::new(self.inner) | 31 | FlashLayout::new(self.inner) |
| 36 | } | 32 | } |
| @@ -40,11 +36,19 @@ impl<'d> Flash<'d> { | |||
| 40 | } | 36 | } |
| 41 | 37 | ||
| 42 | pub fn write_blocking(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { | 38 | pub fn write_blocking(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { |
| 43 | unsafe { write_chunked_blocking(FLASH_BASE as u32, FLASH_SIZE as u32, offset, bytes) } | 39 | unsafe { |
| 40 | write_blocking( | ||
| 41 | FLASH_BASE as u32, | ||
| 42 | FLASH_SIZE as u32, | ||
| 43 | offset, | ||
| 44 | bytes, | ||
| 45 | write_chunk_unlocked, | ||
| 46 | ) | ||
| 47 | } | ||
| 44 | } | 48 | } |
| 45 | 49 | ||
| 46 | pub fn erase_blocking(&mut self, from: u32, to: u32) -> Result<(), Error> { | 50 | pub fn erase_blocking(&mut self, from: u32, to: u32) -> Result<(), Error> { |
| 47 | unsafe { erase_sectored_blocking(FLASH_BASE as u32, from, to) } | 51 | unsafe { erase_blocking(FLASH_BASE as u32, from, to, erase_sector_unlocked) } |
| 48 | } | 52 | } |
| 49 | } | 53 | } |
| 50 | 54 | ||
| @@ -59,7 +63,13 @@ pub(super) fn read_blocking(base: u32, size: u32, offset: u32, bytes: &mut [u8]) | |||
| 59 | Ok(()) | 63 | Ok(()) |
| 60 | } | 64 | } |
| 61 | 65 | ||
| 62 | pub(super) unsafe fn write_chunked_blocking(base: u32, size: u32, offset: u32, bytes: &[u8]) -> Result<(), Error> { | 66 | pub(super) unsafe fn write_blocking( |
| 67 | base: u32, | ||
| 68 | size: u32, | ||
| 69 | offset: u32, | ||
| 70 | bytes: &[u8], | ||
| 71 | write_chunk: unsafe fn(u32, &[u8]) -> Result<(), Error>, | ||
| 72 | ) -> Result<(), Error> { | ||
| 63 | if offset + bytes.len() as u32 > size { | 73 | if offset + bytes.len() as u32 > size { |
| 64 | return Err(Error::Size); | 74 | return Err(Error::Size); |
| 65 | } | 75 | } |
| @@ -71,26 +81,39 @@ pub(super) unsafe fn write_chunked_blocking(base: u32, size: u32, offset: u32, b | |||
| 71 | trace!("Writing {} bytes at 0x{:x}", bytes.len(), address); | 81 | trace!("Writing {} bytes at 0x{:x}", bytes.len(), address); |
| 72 | 82 | ||
| 73 | for chunk in bytes.chunks(WRITE_SIZE) { | 83 | for chunk in bytes.chunks(WRITE_SIZE) { |
| 74 | family::clear_all_err(); | 84 | write_chunk(address, chunk)?; |
| 75 | fence(Ordering::SeqCst); | ||
| 76 | family::unlock(); | ||
| 77 | fence(Ordering::SeqCst); | ||
| 78 | family::enable_blocking_write(); | ||
| 79 | fence(Ordering::SeqCst); | ||
| 80 | |||
| 81 | let _on_drop = OnDrop::new(|| { | ||
| 82 | family::disable_blocking_write(); | ||
| 83 | fence(Ordering::SeqCst); | ||
| 84 | family::lock(); | ||
| 85 | }); | ||
| 86 | |||
| 87 | family::write_blocking(address, chunk.try_into().unwrap())?; | ||
| 88 | address += WRITE_SIZE as u32; | 85 | address += WRITE_SIZE as u32; |
| 89 | } | 86 | } |
| 90 | Ok(()) | 87 | Ok(()) |
| 91 | } | 88 | } |
| 92 | 89 | ||
| 93 | pub(super) unsafe fn erase_sectored_blocking(base: u32, from: u32, to: u32) -> Result<(), Error> { | 90 | pub(super) unsafe fn write_chunk_unlocked(address: u32, chunk: &[u8]) -> Result<(), Error> { |
| 91 | family::clear_all_err(); | ||
| 92 | fence(Ordering::SeqCst); | ||
| 93 | family::unlock(); | ||
| 94 | fence(Ordering::SeqCst); | ||
| 95 | family::enable_blocking_write(); | ||
| 96 | fence(Ordering::SeqCst); | ||
| 97 | |||
| 98 | let _on_drop = OnDrop::new(|| { | ||
| 99 | family::disable_blocking_write(); | ||
| 100 | fence(Ordering::SeqCst); | ||
| 101 | family::lock(); | ||
| 102 | }); | ||
| 103 | |||
| 104 | family::write_blocking(address, chunk.try_into().unwrap()) | ||
| 105 | } | ||
| 106 | |||
| 107 | pub(super) unsafe fn write_chunk_with_critical_section(address: u32, chunk: &[u8]) -> Result<(), Error> { | ||
| 108 | critical_section::with(|_| write_chunk_unlocked(address, chunk)) | ||
| 109 | } | ||
| 110 | |||
| 111 | pub(super) unsafe fn erase_blocking( | ||
| 112 | base: u32, | ||
| 113 | from: u32, | ||
| 114 | to: u32, | ||
| 115 | erase_sector: unsafe fn(&FlashSector) -> Result<(), Error>, | ||
| 116 | ) -> Result<(), Error> { | ||
| 94 | let start_address = base + from; | 117 | let start_address = base + from; |
| 95 | let end_address = base + to; | 118 | let end_address = base + to; |
| 96 | let regions = family::get_flash_regions(); | 119 | let regions = family::get_flash_regions(); |
| @@ -103,21 +126,28 @@ pub(super) unsafe fn erase_sectored_blocking(base: u32, from: u32, to: u32) -> R | |||
| 103 | while address < end_address { | 126 | while address < end_address { |
| 104 | let sector = get_sector(address, regions); | 127 | let sector = get_sector(address, regions); |
| 105 | trace!("Erasing sector: {:?}", sector); | 128 | trace!("Erasing sector: {:?}", sector); |
| 106 | 129 | erase_sector(§or)?; | |
| 107 | family::clear_all_err(); | ||
| 108 | fence(Ordering::SeqCst); | ||
| 109 | family::unlock(); | ||
| 110 | fence(Ordering::SeqCst); | ||
| 111 | |||
| 112 | let _on_drop = OnDrop::new(|| family::lock()); | ||
| 113 | |||
| 114 | family::erase_sector_blocking(§or)?; | ||
| 115 | address += sector.size; | 130 | address += sector.size; |
| 116 | } | 131 | } |
| 117 | Ok(()) | 132 | Ok(()) |
| 118 | } | 133 | } |
| 119 | 134 | ||
| 120 | pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector { | 135 | pub(super) unsafe fn erase_sector_unlocked(sector: &FlashSector) -> Result<(), Error> { |
| 136 | family::clear_all_err(); | ||
| 137 | fence(Ordering::SeqCst); | ||
| 138 | family::unlock(); | ||
| 139 | fence(Ordering::SeqCst); | ||
| 140 | |||
| 141 | let _on_drop = OnDrop::new(|| family::lock()); | ||
| 142 | |||
| 143 | family::erase_sector_blocking(§or) | ||
| 144 | } | ||
| 145 | |||
| 146 | pub(super) unsafe fn erase_sector_with_critical_section(sector: &FlashSector) -> Result<(), Error> { | ||
| 147 | critical_section::with(|_| erase_sector_unlocked(sector)) | ||
| 148 | } | ||
| 149 | |||
| 150 | pub(super) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector { | ||
| 121 | let mut current_bank = FlashBank::Bank1; | 151 | let mut current_bank = FlashBank::Bank1; |
| 122 | let mut bank_offset = 0; | 152 | let mut bank_offset = 0; |
| 123 | for region in regions { | 153 | for region in regions { |
| @@ -142,7 +172,7 @@ pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector | |||
| 142 | panic!("Flash sector not found"); | 172 | panic!("Flash sector not found"); |
| 143 | } | 173 | } |
| 144 | 174 | ||
| 145 | pub(crate) fn ensure_sector_aligned( | 175 | pub(super) fn ensure_sector_aligned( |
| 146 | start_address: u32, | 176 | start_address: u32, |
| 147 | end_address: u32, | 177 | end_address: u32, |
| 148 | regions: &[&FlashRegion], | 178 | regions: &[&FlashRegion], |
| @@ -190,121 +220,49 @@ impl embedded_storage::nor_flash::NorFlash for Flash<'_> { | |||
| 190 | } | 220 | } |
| 191 | } | 221 | } |
| 192 | 222 | ||
| 193 | #[cfg(feature = "nightly")] | ||
| 194 | impl embedded_storage_async::nor_flash::ReadNorFlash for Flash<'_> { | ||
| 195 | const READ_SIZE: usize = READ_SIZE; | ||
| 196 | |||
| 197 | async fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { | ||
| 198 | self.read(offset, bytes) | ||
| 199 | } | ||
| 200 | |||
| 201 | fn capacity(&self) -> usize { | ||
| 202 | FLASH_SIZE | ||
| 203 | } | ||
| 204 | } | ||
| 205 | |||
| 206 | pub struct BlockingFlashRegion<'d, const WRITE_SIZE: u32, const ERASE_SIZE: u32>( | ||
| 207 | &'static FlashRegion, | ||
| 208 | PeripheralRef<'d, FLASH>, | ||
| 209 | ); | ||
| 210 | |||
| 211 | impl<const WRITE_SIZE: u32, const ERASE_SIZE: u32> BlockingFlashRegion<'_, WRITE_SIZE, ERASE_SIZE> { | ||
| 212 | pub fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { | ||
| 213 | read_blocking(self.0.base, self.0.size, offset, bytes) | ||
| 214 | } | ||
| 215 | |||
| 216 | pub fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { | ||
| 217 | let _guard = block_on(REGION_ACCESS.lock()); | ||
| 218 | unsafe { write_chunked_blocking(self.0.base, self.0.size, offset, bytes) } | ||
| 219 | } | ||
| 220 | |||
| 221 | pub fn erase(&mut self, from: u32, to: u32) -> Result<(), Error> { | ||
| 222 | let _guard = block_on(REGION_ACCESS.lock()); | ||
| 223 | unsafe { erase_sectored_blocking(self.0.base, from, to) } | ||
| 224 | } | ||
| 225 | } | ||
| 226 | |||
| 227 | impl<const WRITE_SIZE: u32, const ERASE_SIZE: u32> embedded_storage::nor_flash::ErrorType | ||
| 228 | for BlockingFlashRegion<'_, WRITE_SIZE, ERASE_SIZE> | ||
| 229 | { | ||
| 230 | type Error = Error; | ||
| 231 | } | ||
| 232 | |||
| 233 | impl<const WRITE_SIZE: u32, const ERASE_SIZE: u32> embedded_storage::nor_flash::ReadNorFlash | ||
| 234 | for BlockingFlashRegion<'_, WRITE_SIZE, ERASE_SIZE> | ||
| 235 | { | ||
| 236 | const READ_SIZE: usize = READ_SIZE; | ||
| 237 | |||
| 238 | fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { | ||
| 239 | self.read(offset, bytes) | ||
| 240 | } | ||
| 241 | |||
| 242 | fn capacity(&self) -> usize { | ||
| 243 | self.0.size as usize | ||
| 244 | } | ||
| 245 | } | ||
| 246 | |||
| 247 | impl<const WRITE_SIZE: u32, const ERASE_SIZE: u32> embedded_storage::nor_flash::NorFlash | ||
| 248 | for BlockingFlashRegion<'_, WRITE_SIZE, ERASE_SIZE> | ||
| 249 | { | ||
| 250 | const WRITE_SIZE: usize = WRITE_SIZE as usize; | ||
| 251 | const ERASE_SIZE: usize = ERASE_SIZE as usize; | ||
| 252 | |||
| 253 | fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> { | ||
| 254 | self.write(offset, bytes) | ||
| 255 | } | ||
| 256 | |||
| 257 | fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> { | ||
| 258 | self.erase(from, to) | ||
| 259 | } | ||
| 260 | } | ||
| 261 | |||
| 262 | foreach_flash_region! { | 223 | foreach_flash_region! { |
| 263 | ($type_name:ident, $write_size:literal, $erase_size:literal) => { | 224 | ($type_name:ident, $write_size:literal, $erase_size:literal) => { |
| 264 | paste::paste! { | 225 | impl<'d> crate::_generated::flash_regions::$type_name<'d, Blocking> { |
| 265 | pub type [<Blocking $type_name>]<'d> = BlockingFlashRegion<'d, $write_size, $erase_size>; | 226 | pub fn read_blocking(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { |
| 266 | } | ||
| 267 | |||
| 268 | impl<'d> crate::_generated::flash_regions::$type_name<'d> { | ||
| 269 | /// Make this flash region work in a blocking context. | ||
| 270 | /// | ||
| 271 | /// SAFETY | ||
| 272 | /// | ||
| 273 | /// This function is unsafe as incorect usage of parallel blocking operations | ||
| 274 | /// on multiple regions may cause a deadlock because each region requires mutual access to the flash. | ||
| 275 | pub unsafe fn into_blocking(self) -> BlockingFlashRegion<'d, $write_size, $erase_size> { | ||
| 276 | BlockingFlashRegion(self.0, self.1) | ||
| 277 | } | ||
| 278 | |||
| 279 | pub fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { | ||
| 280 | read_blocking(self.0.base, self.0.size, offset, bytes) | 227 | read_blocking(self.0.base, self.0.size, offset, bytes) |
| 281 | } | 228 | } |
| 282 | 229 | ||
| 283 | pub fn try_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { | 230 | pub fn write_blocking(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { |
| 284 | let _guard = REGION_ACCESS.try_lock().map_err(|_| Error::TryLockError)?; | 231 | unsafe { write_blocking(self.0.base, self.0.size, offset, bytes, write_chunk_with_critical_section) } |
| 285 | unsafe { write_chunked_blocking(self.0.base, self.0.size, offset, bytes) } | ||
| 286 | } | 232 | } |
| 287 | 233 | ||
| 288 | pub fn try_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { | 234 | pub fn erase_blocking(&mut self, from: u32, to: u32) -> Result<(), Error> { |
| 289 | let _guard = REGION_ACCESS.try_lock().map_err(|_| Error::TryLockError)?; | 235 | unsafe { erase_blocking(self.0.base, from, to, erase_sector_with_critical_section) } |
| 290 | unsafe { erase_sectored_blocking(self.0.base, from, to) } | ||
| 291 | } | 236 | } |
| 292 | } | 237 | } |
| 293 | 238 | ||
| 294 | impl embedded_storage::nor_flash::ErrorType for crate::_generated::flash_regions::$type_name<'_> { | 239 | impl<MODE> embedded_storage::nor_flash::ErrorType for crate::_generated::flash_regions::$type_name<'_, MODE> { |
| 295 | type Error = Error; | 240 | type Error = Error; |
| 296 | } | 241 | } |
| 297 | 242 | ||
| 298 | impl embedded_storage::nor_flash::ReadNorFlash for crate::_generated::flash_regions::$type_name<'_> { | 243 | impl embedded_storage::nor_flash::ReadNorFlash for crate::_generated::flash_regions::$type_name<'_, Blocking> { |
| 299 | const READ_SIZE: usize = READ_SIZE; | 244 | const READ_SIZE: usize = READ_SIZE; |
| 300 | 245 | ||
| 301 | fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { | 246 | fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { |
| 302 | self.read(offset, bytes) | 247 | self.read_blocking(offset, bytes) |
| 303 | } | 248 | } |
| 304 | 249 | ||
| 305 | fn capacity(&self) -> usize { | 250 | fn capacity(&self) -> usize { |
| 306 | self.0.size as usize | 251 | self.0.size as usize |
| 307 | } | 252 | } |
| 308 | } | 253 | } |
| 254 | |||
| 255 | impl embedded_storage::nor_flash::NorFlash for crate::_generated::flash_regions::$type_name<'_, Blocking> { | ||
| 256 | const WRITE_SIZE: usize = $write_size; | ||
| 257 | const ERASE_SIZE: usize = $erase_size; | ||
| 258 | |||
| 259 | fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> { | ||
| 260 | self.write_blocking(offset, bytes) | ||
| 261 | } | ||
| 262 | |||
| 263 | fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> { | ||
| 264 | self.erase_blocking(from, to) | ||
| 265 | } | ||
| 266 | } | ||
| 309 | }; | 267 | }; |
| 310 | } | 268 | } |
