diff options
| author | Rasmus Melchior Jacobsen <[email protected]> | 2023-03-30 15:13:44 +0200 |
|---|---|---|
| committer | Rasmus Melchior Jacobsen <[email protected]> | 2023-03-30 15:13:44 +0200 |
| commit | f3dcb5eb22dfac21024e77023d1967b4eaa0b176 (patch) | |
| tree | 5a2a05997263c5e2431c5d8659c92e36b81d1c68 /embassy-stm32/src/flash | |
| parent | a78e10e00362bf0f5649e200fa62c75d6f3808d0 (diff) | |
Wrap write/erase operations in cs
Diffstat (limited to 'embassy-stm32/src/flash')
| -rw-r--r-- | embassy-stm32/src/flash/common.rs | 205 |
1 files changed, 89 insertions, 116 deletions
diff --git a/embassy-stm32/src/flash/common.rs b/embassy-stm32/src/flash/common.rs index 47e94f753..59429e344 100644 --- a/embassy-stm32/src/flash/common.rs +++ b/embassy-stm32/src/flash/common.rs | |||
| @@ -1,7 +1,5 @@ | |||
| 1 | use embassy_hal_common::drop::OnDrop; | 1 | use embassy_hal_common::drop::OnDrop; |
| 2 | use embassy_hal_common::{into_ref, PeripheralRef}; | 2 | use embassy_hal_common::{into_ref, PeripheralRef}; |
| 3 | use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex; | ||
| 4 | use embassy_sync::mutex::{Mutex, MutexGuard}; | ||
| 5 | 3 | ||
| 6 | use super::{family, Error, FlashLayout, FlashRegion, FlashSector, FLASH_BASE, FLASH_SIZE, WRITE_SIZE}; | 4 | use super::{family, Error, FlashLayout, FlashRegion, FlashSector, FLASH_BASE, FLASH_SIZE, WRITE_SIZE}; |
| 7 | use crate::flash::FlashBank; | 5 | use crate::flash::FlashBank; |
| @@ -22,96 +20,21 @@ impl<'d> Flash<'d> { | |||
| 22 | } | 20 | } |
| 23 | 21 | ||
| 24 | pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { | 22 | pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { |
| 25 | Self::blocking_read_inner(FLASH_BASE as u32 + offset, bytes) | 23 | let start_address = FLASH_BASE as u32 + offset; |
| 26 | } | 24 | blocking_read(start_address, bytes) |
| 27 | |||
| 28 | fn blocking_read_inner(start_address: u32, bytes: &mut [u8]) -> Result<(), Error> { | ||
| 29 | assert!(start_address >= FLASH_BASE as u32); | ||
| 30 | if start_address as usize + bytes.len() > FLASH_BASE + FLASH_SIZE { | ||
| 31 | return Err(Error::Size); | ||
| 32 | } | ||
| 33 | |||
| 34 | let flash_data = unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) }; | ||
| 35 | bytes.copy_from_slice(flash_data); | ||
| 36 | Ok(()) | ||
| 37 | } | 25 | } |
| 38 | 26 | ||
| 39 | pub fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> { | 27 | pub fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> { |
| 40 | let start_address = FLASH_BASE as u32 + offset; | 28 | let start_address = FLASH_BASE as u32 + offset; |
| 41 | 29 | ||
| 42 | // No need to take lock here as we only have one mut flash reference. | 30 | unsafe { blocking_write(start_address, buf) } |
| 43 | |||
| 44 | unsafe { Flash::blocking_write_inner(start_address, buf) } | ||
| 45 | } | ||
| 46 | |||
| 47 | unsafe fn blocking_write_inner(start_address: u32, buf: &[u8]) -> Result<(), Error> { | ||
| 48 | assert!(start_address >= FLASH_BASE as u32); | ||
| 49 | if start_address as usize + buf.len() > FLASH_BASE + FLASH_SIZE { | ||
| 50 | return Err(Error::Size); | ||
| 51 | } | ||
| 52 | if (start_address as usize - FLASH_BASE) % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 { | ||
| 53 | return Err(Error::Unaligned); | ||
| 54 | } | ||
| 55 | |||
| 56 | trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address); | ||
| 57 | |||
| 58 | family::clear_all_err(); | ||
| 59 | family::unlock(); | ||
| 60 | family::begin_write(); | ||
| 61 | |||
| 62 | let _ = OnDrop::new(|| { | ||
| 63 | family::end_write(); | ||
| 64 | family::lock(); | ||
| 65 | }); | ||
| 66 | |||
| 67 | let mut address = start_address; | ||
| 68 | for chunk in buf.chunks(WRITE_SIZE) { | ||
| 69 | unsafe { family::blocking_write(address, chunk.try_into().unwrap())? }; | ||
| 70 | address += WRITE_SIZE as u32; | ||
| 71 | } | ||
| 72 | Ok(()) | ||
| 73 | } | 31 | } |
| 74 | 32 | ||
| 75 | pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { | 33 | pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { |
| 76 | let start_address = FLASH_BASE as u32 + from; | 34 | let start_address = FLASH_BASE as u32 + from; |
| 77 | let end_address = FLASH_BASE as u32 + to; | 35 | let end_address = FLASH_BASE as u32 + to; |
| 78 | 36 | ||
| 79 | unsafe { Flash::blocking_erase_inner(start_address, end_address) } | 37 | unsafe { blocking_erase(start_address, end_address) } |
| 80 | } | ||
| 81 | |||
| 82 | unsafe fn blocking_erase_inner(start_address: u32, end_address: u32) -> Result<(), Error> { | ||
| 83 | let regions = family::get_flash_regions(); | ||
| 84 | |||
| 85 | // Test if the address range is aligned at sector base addresses | ||
| 86 | let mut address = start_address; | ||
| 87 | while address < end_address { | ||
| 88 | let sector = get_sector(address, regions); | ||
| 89 | if sector.start != address { | ||
| 90 | return Err(Error::Unaligned); | ||
| 91 | } | ||
| 92 | address += sector.size; | ||
| 93 | } | ||
| 94 | if address != end_address { | ||
| 95 | return Err(Error::Unaligned); | ||
| 96 | } | ||
| 97 | |||
| 98 | trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address); | ||
| 99 | |||
| 100 | family::clear_all_err(); | ||
| 101 | family::unlock(); | ||
| 102 | |||
| 103 | let _ = OnDrop::new(|| { | ||
| 104 | family::lock(); | ||
| 105 | }); | ||
| 106 | |||
| 107 | let mut address = start_address; | ||
| 108 | while address < end_address { | ||
| 109 | let sector = get_sector(address, regions); | ||
| 110 | trace!("Erasing sector: {}", sector); | ||
| 111 | family::blocking_erase_sector(§or)?; | ||
| 112 | address += sector.size; | ||
| 113 | } | ||
| 114 | Ok(()) | ||
| 115 | } | 38 | } |
| 116 | 39 | ||
| 117 | pub(crate) fn release(self) -> PeripheralRef<'d, crate::peripherals::FLASH> { | 40 | pub(crate) fn release(self) -> PeripheralRef<'d, crate::peripherals::FLASH> { |
| @@ -132,14 +55,79 @@ impl Drop for FlashLayout<'_> { | |||
| 132 | } | 55 | } |
| 133 | } | 56 | } |
| 134 | 57 | ||
| 135 | static REGION_LOCK: Mutex<CriticalSectionRawMutex, ()> = Mutex::new(()); | 58 | fn blocking_read(start_address: u32, bytes: &mut [u8]) -> Result<(), Error> { |
| 59 | assert!(start_address >= FLASH_BASE as u32); | ||
| 60 | if start_address as usize + bytes.len() > FLASH_BASE + FLASH_SIZE { | ||
| 61 | return Err(Error::Size); | ||
| 62 | } | ||
| 63 | |||
| 64 | let flash_data = unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) }; | ||
| 65 | bytes.copy_from_slice(flash_data); | ||
| 66 | Ok(()) | ||
| 67 | } | ||
| 68 | |||
| 69 | unsafe fn blocking_write(start_address: u32, buf: &[u8]) -> Result<(), Error> { | ||
| 70 | assert!(start_address >= FLASH_BASE as u32); | ||
| 71 | if start_address as usize + buf.len() > FLASH_BASE + FLASH_SIZE { | ||
| 72 | return Err(Error::Size); | ||
| 73 | } | ||
| 74 | if (start_address as usize - FLASH_BASE) % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 { | ||
| 75 | return Err(Error::Unaligned); | ||
| 76 | } | ||
| 77 | |||
| 78 | trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address); | ||
| 136 | 79 | ||
| 137 | fn take_lock_spin() -> MutexGuard<'static, CriticalSectionRawMutex, ()> { | 80 | let mut address = start_address; |
| 138 | loop { | 81 | for chunk in buf.chunks(WRITE_SIZE) { |
| 139 | if let Ok(guard) = REGION_LOCK.try_lock() { | 82 | critical_section::with(|_| { |
| 140 | return guard; | 83 | family::clear_all_err(); |
| 84 | family::unlock(); | ||
| 85 | family::begin_write(); | ||
| 86 | let _ = OnDrop::new(|| { | ||
| 87 | family::end_write(); | ||
| 88 | family::lock(); | ||
| 89 | }); | ||
| 90 | family::blocking_write(address, chunk.try_into().unwrap()) | ||
| 91 | })?; | ||
| 92 | address += WRITE_SIZE as u32; | ||
| 93 | } | ||
| 94 | Ok(()) | ||
| 95 | } | ||
| 96 | |||
| 97 | unsafe fn blocking_erase(start_address: u32, end_address: u32) -> Result<(), Error> { | ||
| 98 | let regions = family::get_flash_regions(); | ||
| 99 | |||
| 100 | // Test if the address range is aligned at sector base addresses | ||
| 101 | let mut address = start_address; | ||
| 102 | while address < end_address { | ||
| 103 | let sector = get_sector(address, regions); | ||
| 104 | if sector.start != address { | ||
| 105 | return Err(Error::Unaligned); | ||
| 141 | } | 106 | } |
| 107 | address += sector.size; | ||
| 108 | } | ||
| 109 | if address != end_address { | ||
| 110 | return Err(Error::Unaligned); | ||
| 142 | } | 111 | } |
| 112 | |||
| 113 | trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address); | ||
| 114 | |||
| 115 | let mut address = start_address; | ||
| 116 | while address < end_address { | ||
| 117 | let sector = get_sector(address, regions); | ||
| 118 | trace!("Erasing sector: {}", sector); | ||
| 119 | |||
| 120 | critical_section::with(|_| { | ||
| 121 | family::clear_all_err(); | ||
| 122 | family::unlock(); | ||
| 123 | let _ = OnDrop::new(|| { | ||
| 124 | family::lock(); | ||
| 125 | }); | ||
| 126 | family::blocking_erase_sector(§or) | ||
| 127 | })?; | ||
| 128 | address += sector.size; | ||
| 129 | } | ||
| 130 | Ok(()) | ||
| 143 | } | 131 | } |
| 144 | 132 | ||
| 145 | pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector { | 133 | pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector { |
| @@ -169,38 +157,19 @@ pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector | |||
| 169 | 157 | ||
| 170 | impl FlashRegion { | 158 | impl FlashRegion { |
| 171 | pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { | 159 | pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { |
| 172 | unsafe { self.blocking_read_inner(offset, bytes) } | 160 | let start_address = self.base + offset; |
| 161 | blocking_read(start_address, bytes) | ||
| 173 | } | 162 | } |
| 174 | 163 | ||
| 175 | pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { | 164 | pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { |
| 176 | unsafe { self.blocking_write_inner(offset, bytes) } | ||
| 177 | } | ||
| 178 | |||
| 179 | pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { | ||
| 180 | unsafe { self.blocking_erase_inner(from, to) } | ||
| 181 | } | ||
| 182 | |||
| 183 | unsafe fn blocking_read_inner(&self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { | ||
| 184 | Flash::blocking_read_inner(self.base + offset, bytes) | ||
| 185 | } | ||
| 186 | |||
| 187 | unsafe fn blocking_write_inner(&self, offset: u32, bytes: &[u8]) -> Result<(), Error> { | ||
| 188 | let start_address = self.base + offset; | 165 | let start_address = self.base + offset; |
| 189 | 166 | unsafe { blocking_write(start_address, bytes) } | |
| 190 | // Protect agains simultaneous write/erase to multiple regions. | ||
| 191 | let _guard = take_lock_spin(); | ||
| 192 | |||
| 193 | Flash::blocking_write_inner(start_address, bytes) | ||
| 194 | } | 167 | } |
| 195 | 168 | ||
| 196 | unsafe fn blocking_erase_inner(&self, from: u32, to: u32) -> Result<(), Error> { | 169 | pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { |
| 197 | let start_address = self.base + from; | 170 | let start_address = self.base + from; |
| 198 | let end_address = self.base + to; | 171 | let end_address = self.base + to; |
| 199 | 172 | unsafe { blocking_erase(start_address, end_address) } | |
| 200 | // Protect agains simultaneous write/erase to multiple regions. | ||
| 201 | let _guard = take_lock_spin(); | ||
| 202 | |||
| 203 | Flash::blocking_erase_inner(start_address, end_address) | ||
| 204 | } | 173 | } |
| 205 | } | 174 | } |
| 206 | 175 | ||
| @@ -208,15 +177,19 @@ foreach_flash_region! { | |||
| 208 | ($type_name:ident, $write_size:literal, $erase_size:literal) => { | 177 | ($type_name:ident, $write_size:literal, $erase_size:literal) => { |
| 209 | impl crate::_generated::flash_regions::$type_name { | 178 | impl crate::_generated::flash_regions::$type_name { |
| 210 | pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { | 179 | pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { |
| 211 | unsafe { self.0.blocking_read_inner(offset, bytes) } | 180 | let start_address = self.0.base + offset; |
| 181 | blocking_read(start_address, bytes) | ||
| 212 | } | 182 | } |
| 213 | 183 | ||
| 214 | pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { | 184 | pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { |
| 215 | unsafe { self.0.blocking_write_inner(offset, bytes) } | 185 | let start_address = self.0.base + offset; |
| 186 | unsafe { blocking_write(start_address, bytes) } | ||
| 216 | } | 187 | } |
| 217 | 188 | ||
| 218 | pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { | 189 | pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { |
| 219 | unsafe { self.0.blocking_erase_inner(from, to) } | 190 | let start_address = self.0.base + from; |
| 191 | let end_address = self.0.base + to; | ||
| 192 | unsafe { blocking_erase(start_address, end_address) } | ||
| 220 | } | 193 | } |
| 221 | } | 194 | } |
| 222 | 195 | ||
| @@ -228,7 +201,7 @@ foreach_flash_region! { | |||
| 228 | const READ_SIZE: usize = 1; | 201 | const READ_SIZE: usize = 1; |
| 229 | 202 | ||
| 230 | fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { | 203 | fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { |
| 231 | unsafe { self.0.blocking_read_inner(offset, bytes) } | 204 | self.blocking_read(offset, bytes) |
| 232 | } | 205 | } |
| 233 | 206 | ||
| 234 | fn capacity(&self) -> usize { | 207 | fn capacity(&self) -> usize { |
| @@ -241,11 +214,11 @@ foreach_flash_region! { | |||
| 241 | const ERASE_SIZE: usize = $erase_size; | 214 | const ERASE_SIZE: usize = $erase_size; |
| 242 | 215 | ||
| 243 | fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> { | 216 | fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> { |
| 244 | unsafe { self.0.blocking_write_inner(offset, bytes) } | 217 | self.blocking_write(offset, bytes) |
| 245 | } | 218 | } |
| 246 | 219 | ||
| 247 | fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> { | 220 | fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> { |
| 248 | unsafe { self.0.blocking_erase_inner(from, to) } | 221 | self.blocking_erase(from, to) |
| 249 | } | 222 | } |
| 250 | } | 223 | } |
| 251 | }; | 224 | }; |
