diff options
| author | Dario Nieuwenhuis <[email protected]> | 2022-01-13 19:27:10 +0100 |
|---|---|---|
| committer | Dario Nieuwenhuis <[email protected]> | 2022-01-13 19:53:12 +0100 |
| commit | 7086642ce43de7c2fe476da94ec53ed6282087ec (patch) | |
| tree | 9e5eee224e7bcad9159b2142ad9be631d269084c | |
| parent | 167af012114cca8747e65d0dc5157f066bb2ed5d (diff) | |
nrf/spim: share code between blocking+async.
| -rw-r--r-- | embassy-nrf/src/spim.rs | 209 | ||||
| -rw-r--r-- | embassy-nrf/src/util.rs | 22 |
2 files changed, 89 insertions, 142 deletions
diff --git a/embassy-nrf/src/spim.rs b/embassy-nrf/src/spim.rs index bc5823f6a..8159cefe8 100644 --- a/embassy-nrf/src/spim.rs +++ b/embassy-nrf/src/spim.rs | |||
| @@ -15,6 +15,7 @@ use crate::gpio; | |||
| 15 | use crate::gpio::sealed::Pin as _; | 15 | use crate::gpio::sealed::Pin as _; |
| 16 | use crate::gpio::{OptionalPin, Pin as GpioPin}; | 16 | use crate::gpio::{OptionalPin, Pin as GpioPin}; |
| 17 | use crate::interrupt::Interrupt; | 17 | use crate::interrupt::Interrupt; |
| 18 | use crate::util::{slice_ptr_parts, slice_ptr_parts_mut}; | ||
| 18 | use crate::{pac, util::slice_in_ram_or}; | 19 | use crate::{pac, util::slice_in_ram_or}; |
| 19 | 20 | ||
| 20 | pub use embedded_hal::spi::{Mode, Phase, Polarity, MODE_0, MODE_1, MODE_2, MODE_3}; | 21 | pub use embedded_hal::spi::{Mode, Phase, Polarity, MODE_0, MODE_1, MODE_2, MODE_3}; |
| @@ -157,6 +158,74 @@ impl<'d, T: Instance> Spim<'d, T> { | |||
| 157 | r.intenclr.write(|w| w.end().clear()); | 158 | r.intenclr.write(|w| w.end().clear()); |
| 158 | } | 159 | } |
| 159 | } | 160 | } |
| 161 | |||
| 162 | fn start_transfer(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> { | ||
| 163 | slice_in_ram_or(tx, Error::DMABufferNotInDataMemory)?; | ||
| 164 | // NOTE: RAM slice check for rx is not necessary, as a mutable | ||
| 165 | // slice can only be built from data located in RAM. | ||
| 166 | |||
| 167 | // Conservative compiler fence to prevent optimizations that do not | ||
| 168 | // take in to account actions by DMA. The fence has been placed here, | ||
| 169 | // before any DMA action has started. | ||
| 170 | compiler_fence(Ordering::SeqCst); | ||
| 171 | |||
| 172 | let r = T::regs(); | ||
| 173 | |||
| 174 | // Set up the DMA write. | ||
| 175 | let (ptr, len) = slice_ptr_parts(tx); | ||
| 176 | r.txd.ptr.write(|w| unsafe { w.ptr().bits(ptr as _) }); | ||
| 177 | r.txd.maxcnt.write(|w| unsafe { w.maxcnt().bits(len as _) }); | ||
| 178 | |||
| 179 | // Set up the DMA read. | ||
| 180 | let (ptr, len) = slice_ptr_parts_mut(rx); | ||
| 181 | r.rxd.ptr.write(|w| unsafe { w.ptr().bits(ptr as _) }); | ||
| 182 | r.rxd.maxcnt.write(|w| unsafe { w.maxcnt().bits(len as _) }); | ||
| 183 | |||
| 184 | // Reset and enable the event | ||
| 185 | r.events_end.reset(); | ||
| 186 | r.intenset.write(|w| w.end().set()); | ||
| 187 | |||
| 188 | // Start SPI transaction. | ||
| 189 | r.tasks_start.write(|w| unsafe { w.bits(1) }); | ||
| 190 | |||
| 191 | Ok(()) | ||
| 192 | } | ||
| 193 | |||
| 194 | fn blocking_transfer(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> { | ||
| 195 | self.start_transfer(rx, tx)?; | ||
| 196 | |||
| 197 | // Wait for 'end' event. | ||
| 198 | while T::regs().events_end.read().bits() == 0 {} | ||
| 199 | |||
| 200 | // Conservative compiler fence to prevent optimizations that do not | ||
| 201 | // take in to account actions by DMA. The fence has been placed here, | ||
| 202 | // after all possible DMA actions have completed. | ||
| 203 | compiler_fence(Ordering::SeqCst); | ||
| 204 | |||
| 205 | Ok(()) | ||
| 206 | } | ||
| 207 | |||
| 208 | async fn async_transfer(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> { | ||
| 209 | self.start_transfer(rx, tx)?; | ||
| 210 | |||
| 211 | // Wait for 'end' event. | ||
| 212 | poll_fn(|cx| { | ||
| 213 | T::state().end_waker.register(cx.waker()); | ||
| 214 | if T::regs().events_end.read().bits() != 0 { | ||
| 215 | return Poll::Ready(()); | ||
| 216 | } | ||
| 217 | |||
| 218 | Poll::Pending | ||
| 219 | }) | ||
| 220 | .await; | ||
| 221 | |||
| 222 | // Conservative compiler fence to prevent optimizations that do not | ||
| 223 | // take in to account actions by DMA. The fence has been placed here, | ||
| 224 | // after all possible DMA actions have completed. | ||
| 225 | compiler_fence(Ordering::SeqCst); | ||
| 226 | |||
| 227 | Ok(()) | ||
| 228 | } | ||
| 160 | } | 229 | } |
| 161 | 230 | ||
| 162 | impl<'d, T: Instance> Drop for Spim<'d, T> { | 231 | impl<'d, T: Instance> Drop for Spim<'d, T> { |
| @@ -210,108 +279,14 @@ impl<'d, T: Instance> FullDuplex<u8> for Spim<'d, T> { | |||
| 210 | = impl Future<Output = Result<(), Self::Error>> + 'a; | 279 | = impl Future<Output = Result<(), Self::Error>> + 'a; |
| 211 | 280 | ||
| 212 | fn read_write<'a>(&'a mut self, rx: &'a mut [u8], tx: &'a [u8]) -> Self::WriteReadFuture<'a> { | 281 | fn read_write<'a>(&'a mut self, rx: &'a mut [u8], tx: &'a [u8]) -> Self::WriteReadFuture<'a> { |
| 213 | async move { | 282 | self.async_transfer(rx, tx) |
| 214 | slice_in_ram_or(tx, Error::DMABufferNotInDataMemory)?; | ||
| 215 | // NOTE: RAM slice check for rx is not necessary, as a mutable | ||
| 216 | // slice can only be built from data located in RAM. | ||
| 217 | |||
| 218 | // Conservative compiler fence to prevent optimizations that do not | ||
| 219 | // take in to account actions by DMA. The fence has been placed here, | ||
| 220 | // before any DMA action has started. | ||
| 221 | compiler_fence(Ordering::SeqCst); | ||
| 222 | |||
| 223 | let r = T::regs(); | ||
| 224 | let s = T::state(); | ||
| 225 | |||
| 226 | // Set up the DMA write. | ||
| 227 | r.txd | ||
| 228 | .ptr | ||
| 229 | .write(|w| unsafe { w.ptr().bits(tx.as_ptr() as u32) }); | ||
| 230 | r.txd | ||
| 231 | .maxcnt | ||
| 232 | .write(|w| unsafe { w.maxcnt().bits(tx.len() as _) }); | ||
| 233 | |||
| 234 | // Set up the DMA read. | ||
| 235 | r.rxd | ||
| 236 | .ptr | ||
| 237 | .write(|w| unsafe { w.ptr().bits(rx.as_mut_ptr() as u32) }); | ||
| 238 | r.rxd | ||
| 239 | .maxcnt | ||
| 240 | .write(|w| unsafe { w.maxcnt().bits(rx.len() as _) }); | ||
| 241 | |||
| 242 | // Reset and enable the event | ||
| 243 | r.events_end.reset(); | ||
| 244 | r.intenset.write(|w| w.end().set()); | ||
| 245 | |||
| 246 | // Start SPI transaction. | ||
| 247 | r.tasks_start.write(|w| unsafe { w.bits(1) }); | ||
| 248 | |||
| 249 | // Conservative compiler fence to prevent optimizations that do not | ||
| 250 | // take in to account actions by DMA. The fence has been placed here, | ||
| 251 | // after all possible DMA actions have completed. | ||
| 252 | compiler_fence(Ordering::SeqCst); | ||
| 253 | |||
| 254 | // Wait for 'end' event. | ||
| 255 | poll_fn(|cx| { | ||
| 256 | s.end_waker.register(cx.waker()); | ||
| 257 | if r.events_end.read().bits() != 0 { | ||
| 258 | return Poll::Ready(()); | ||
| 259 | } | ||
| 260 | |||
| 261 | Poll::Pending | ||
| 262 | }) | ||
| 263 | .await; | ||
| 264 | |||
| 265 | Ok(()) | ||
| 266 | } | ||
| 267 | } | 283 | } |
| 268 | } | 284 | } |
| 269 | 285 | ||
| 270 | // Blocking functions are provided by implementing `embedded_hal` traits. | ||
| 271 | // | ||
| 272 | // Code could be shared between traits to reduce code size. | ||
| 273 | impl<'d, T: Instance> embedded_hal::blocking::spi::Transfer<u8> for Spim<'d, T> { | 286 | impl<'d, T: Instance> embedded_hal::blocking::spi::Transfer<u8> for Spim<'d, T> { |
| 274 | type Error = Error; | 287 | type Error = Error; |
| 275 | fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> { | 288 | fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> { |
| 276 | slice_in_ram_or(words, Error::DMABufferNotInDataMemory)?; | 289 | self.blocking_transfer(words, words)?; |
| 277 | |||
| 278 | // Conservative compiler fence to prevent optimizations that do not | ||
| 279 | // take in to account actions by DMA. The fence has been placed here, | ||
| 280 | // before any DMA action has started. | ||
| 281 | compiler_fence(Ordering::SeqCst); | ||
| 282 | |||
| 283 | let r = T::regs(); | ||
| 284 | |||
| 285 | // Set up the DMA write. | ||
| 286 | r.txd | ||
| 287 | .ptr | ||
| 288 | .write(|w| unsafe { w.ptr().bits(words.as_ptr() as u32) }); | ||
| 289 | r.txd | ||
| 290 | .maxcnt | ||
| 291 | .write(|w| unsafe { w.maxcnt().bits(words.len() as _) }); | ||
| 292 | |||
| 293 | // Set up the DMA read. | ||
| 294 | r.rxd | ||
| 295 | .ptr | ||
| 296 | .write(|w| unsafe { w.ptr().bits(words.as_mut_ptr() as u32) }); | ||
| 297 | r.rxd | ||
| 298 | .maxcnt | ||
| 299 | .write(|w| unsafe { w.maxcnt().bits(words.len() as _) }); | ||
| 300 | |||
| 301 | // Disable the end event since we are busy-polling. | ||
| 302 | r.events_end.reset(); | ||
| 303 | |||
| 304 | // Start SPI transaction. | ||
| 305 | r.tasks_start.write(|w| unsafe { w.bits(1) }); | ||
| 306 | |||
| 307 | // Wait for 'end' event. | ||
| 308 | while r.events_end.read().bits() == 0 {} | ||
| 309 | |||
| 310 | // Conservative compiler fence to prevent optimizations that do not | ||
| 311 | // take in to account actions by DMA. The fence has been placed here, | ||
| 312 | // after all possible DMA actions have completed. | ||
| 313 | compiler_fence(Ordering::SeqCst); | ||
| 314 | |||
| 315 | Ok(words) | 290 | Ok(words) |
| 316 | } | 291 | } |
| 317 | } | 292 | } |
| @@ -320,47 +295,7 @@ impl<'d, T: Instance> embedded_hal::blocking::spi::Write<u8> for Spim<'d, T> { | |||
| 320 | type Error = Error; | 295 | type Error = Error; |
| 321 | 296 | ||
| 322 | fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { | 297 | fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { |
| 323 | slice_in_ram_or(words, Error::DMABufferNotInDataMemory)?; | 298 | self.blocking_transfer(&mut [], words) |
| 324 | let recv: &mut [u8] = &mut []; | ||
| 325 | |||
| 326 | // Conservative compiler fence to prevent optimizations that do not | ||
| 327 | // take in to account actions by DMA. The fence has been placed here, | ||
| 328 | // before any DMA action has started. | ||
| 329 | compiler_fence(Ordering::SeqCst); | ||
| 330 | |||
| 331 | let r = T::regs(); | ||
| 332 | |||
| 333 | // Set up the DMA write. | ||
| 334 | r.txd | ||
| 335 | .ptr | ||
| 336 | .write(|w| unsafe { w.ptr().bits(words.as_ptr() as u32) }); | ||
| 337 | r.txd | ||
| 338 | .maxcnt | ||
| 339 | .write(|w| unsafe { w.maxcnt().bits(words.len() as _) }); | ||
| 340 | |||
| 341 | // Set up the DMA read. | ||
| 342 | r.rxd | ||
| 343 | .ptr | ||
| 344 | .write(|w| unsafe { w.ptr().bits(recv.as_mut_ptr() as u32) }); | ||
| 345 | r.rxd | ||
| 346 | .maxcnt | ||
| 347 | .write(|w| unsafe { w.maxcnt().bits(recv.len() as _) }); | ||
| 348 | |||
| 349 | // Disable the end event since we are busy-polling. | ||
| 350 | r.events_end.reset(); | ||
| 351 | |||
| 352 | // Start SPI transaction. | ||
| 353 | r.tasks_start.write(|w| unsafe { w.bits(1) }); | ||
| 354 | |||
| 355 | // Wait for 'end' event. | ||
| 356 | while r.events_end.read().bits() == 0 {} | ||
| 357 | |||
| 358 | // Conservative compiler fence to prevent optimizations that do not | ||
| 359 | // take in to account actions by DMA. The fence has been placed here, | ||
| 360 | // after all possible DMA actions have completed. | ||
| 361 | compiler_fence(Ordering::SeqCst); | ||
| 362 | |||
| 363 | Ok(()) | ||
| 364 | } | 299 | } |
| 365 | } | 300 | } |
| 366 | 301 | ||
diff --git a/embassy-nrf/src/util.rs b/embassy-nrf/src/util.rs index 2fd0bc5a8..76162b701 100644 --- a/embassy-nrf/src/util.rs +++ b/embassy-nrf/src/util.rs | |||
| @@ -1,16 +1,28 @@ | |||
| 1 | use core::mem; | ||
| 2 | |||
| 1 | const SRAM_LOWER: usize = 0x2000_0000; | 3 | const SRAM_LOWER: usize = 0x2000_0000; |
| 2 | const SRAM_UPPER: usize = 0x3000_0000; | 4 | const SRAM_UPPER: usize = 0x3000_0000; |
| 3 | 5 | ||
| 6 | // TODO: replace transmutes with core::ptr::metadata once it's stable | ||
| 7 | |||
| 8 | pub(crate) fn slice_ptr_parts<T>(slice: *const [T]) -> (usize, usize) { | ||
| 9 | unsafe { mem::transmute(slice) } | ||
| 10 | } | ||
| 11 | |||
| 12 | pub(crate) fn slice_ptr_parts_mut<T>(slice: *mut [T]) -> (usize, usize) { | ||
| 13 | unsafe { mem::transmute(slice) } | ||
| 14 | } | ||
| 15 | |||
| 4 | /// Does this slice reside entirely within RAM? | 16 | /// Does this slice reside entirely within RAM? |
| 5 | pub(crate) fn slice_in_ram<T>(slice: &[T]) -> bool { | 17 | pub(crate) fn slice_in_ram<T>(slice: *const [T]) -> bool { |
| 6 | let ptr = slice.as_ptr() as usize; | 18 | let (ptr, len) = slice_ptr_parts(slice); |
| 7 | ptr >= SRAM_LOWER && (ptr + slice.len() * core::mem::size_of::<T>()) < SRAM_UPPER | 19 | ptr >= SRAM_LOWER && (ptr + len * core::mem::size_of::<T>()) < SRAM_UPPER |
| 8 | } | 20 | } |
| 9 | 21 | ||
| 10 | /// Return an error if slice is not in RAM. | 22 | /// Return an error if slice is not in RAM. |
| 11 | #[cfg(not(feature = "nrf51"))] | 23 | #[cfg(not(feature = "nrf51"))] |
| 12 | pub(crate) fn slice_in_ram_or<T, E>(slice: &[T], err: E) -> Result<(), E> { | 24 | pub(crate) fn slice_in_ram_or<T, E>(slice: *const [T], err: E) -> Result<(), E> { |
| 13 | if slice.is_empty() || slice_in_ram(slice) { | 25 | if slice_in_ram(slice) { |
| 14 | Ok(()) | 26 | Ok(()) |
| 15 | } else { | 27 | } else { |
| 16 | Err(err) | 28 | Err(err) |
