diff options
| author | Brian Schwind <[email protected]> | 2025-10-04 13:32:46 +0900 |
|---|---|---|
| committer | Brian Schwind <[email protected]> | 2025-10-04 20:14:39 +0900 |
| commit | 64d0fdf1d1b2ce1a3d90b312e009dfc17171086a (patch) | |
| tree | c0345a19df50dcc63f9734e8a3900fd19adb7d60 | |
| parent | f471f72d3173f91ebbc1b6eb797278e7ff988e4e (diff) | |
xspi: properly respect the max DMA transfer size when reading and writing
| -rw-r--r-- | embassy-stm32/src/xspi/mod.rs | 73 |
1 files changed, 41 insertions, 32 deletions
diff --git a/embassy-stm32/src/xspi/mod.rs b/embassy-stm32/src/xspi/mod.rs index 1f051bffe..6f224ab99 100644 --- a/embassy-stm32/src/xspi/mod.rs +++ b/embassy-stm32/src/xspi/mod.rs | |||
| @@ -1162,16 +1162,18 @@ impl<'d, T: Instance> Xspi<'d, T, Async> { | |||
| 1162 | T::REGS.ar().write(|v| v.set_address(current_address)); | 1162 | T::REGS.ar().write(|v| v.set_address(current_address)); |
| 1163 | } | 1163 | } |
| 1164 | 1164 | ||
| 1165 | let transfer = unsafe { | 1165 | for chunk in buf.chunks_mut(0xFFFF / W::size().bytes()) { |
| 1166 | self.dma | 1166 | let transfer = unsafe { |
| 1167 | .as_mut() | 1167 | self.dma |
| 1168 | .unwrap() | 1168 | .as_mut() |
| 1169 | .read(T::REGS.dr().as_ptr() as *mut W, buf, Default::default()) | 1169 | .unwrap() |
| 1170 | }; | 1170 | .read(T::REGS.dr().as_ptr() as *mut W, chunk, Default::default()) |
| 1171 | }; | ||
| 1171 | 1172 | ||
| 1172 | T::REGS.cr().modify(|w| w.set_dmaen(true)); | 1173 | T::REGS.cr().modify(|w| w.set_dmaen(true)); |
| 1173 | 1174 | ||
| 1174 | transfer.blocking_wait(); | 1175 | transfer.blocking_wait(); |
| 1176 | } | ||
| 1175 | 1177 | ||
| 1176 | finish_dma(T::REGS); | 1178 | finish_dma(T::REGS); |
| 1177 | 1179 | ||
| @@ -1193,16 +1195,18 @@ impl<'d, T: Instance> Xspi<'d, T, Async> { | |||
| 1193 | .cr() | 1195 | .cr() |
| 1194 | .modify(|v| v.set_fmode(Fmode::from_bits(XspiMode::IndirectWrite.into()))); | 1196 | .modify(|v| v.set_fmode(Fmode::from_bits(XspiMode::IndirectWrite.into()))); |
| 1195 | 1197 | ||
| 1196 | let transfer = unsafe { | 1198 | for chunk in buf.chunks(0xFFFF / W::size().bytes()) { |
| 1197 | self.dma | 1199 | let transfer = unsafe { |
| 1198 | .as_mut() | 1200 | self.dma |
| 1199 | .unwrap() | 1201 | .as_mut() |
| 1200 | .write(buf, T::REGS.dr().as_ptr() as *mut W, Default::default()) | 1202 | .unwrap() |
| 1201 | }; | 1203 | .write(chunk, T::REGS.dr().as_ptr() as *mut W, Default::default()) |
| 1204 | }; | ||
| 1202 | 1205 | ||
| 1203 | T::REGS.cr().modify(|w| w.set_dmaen(true)); | 1206 | T::REGS.cr().modify(|w| w.set_dmaen(true)); |
| 1204 | 1207 | ||
| 1205 | transfer.blocking_wait(); | 1208 | transfer.blocking_wait(); |
| 1209 | } | ||
| 1206 | 1210 | ||
| 1207 | finish_dma(T::REGS); | 1211 | finish_dma(T::REGS); |
| 1208 | 1212 | ||
| @@ -1234,16 +1238,18 @@ impl<'d, T: Instance> Xspi<'d, T, Async> { | |||
| 1234 | T::REGS.ar().write(|v| v.set_address(current_address)); | 1238 | T::REGS.ar().write(|v| v.set_address(current_address)); |
| 1235 | } | 1239 | } |
| 1236 | 1240 | ||
| 1237 | let transfer = unsafe { | 1241 | for chunk in buf.chunks_mut(0xFFFF / W::size().bytes()) { |
| 1238 | self.dma | 1242 | let transfer = unsafe { |
| 1239 | .as_mut() | 1243 | self.dma |
| 1240 | .unwrap() | 1244 | .as_mut() |
| 1241 | .read(T::REGS.dr().as_ptr() as *mut W, buf, Default::default()) | 1245 | .unwrap() |
| 1242 | }; | 1246 | .read(T::REGS.dr().as_ptr() as *mut W, chunk, Default::default()) |
| 1247 | }; | ||
| 1243 | 1248 | ||
| 1244 | T::REGS.cr().modify(|w| w.set_dmaen(true)); | 1249 | T::REGS.cr().modify(|w| w.set_dmaen(true)); |
| 1245 | 1250 | ||
| 1246 | transfer.await; | 1251 | transfer.await; |
| 1252 | } | ||
| 1247 | 1253 | ||
| 1248 | finish_dma(T::REGS); | 1254 | finish_dma(T::REGS); |
| 1249 | 1255 | ||
| @@ -1265,16 +1271,19 @@ impl<'d, T: Instance> Xspi<'d, T, Async> { | |||
| 1265 | .cr() | 1271 | .cr() |
| 1266 | .modify(|v| v.set_fmode(Fmode::from_bits(XspiMode::IndirectWrite.into()))); | 1272 | .modify(|v| v.set_fmode(Fmode::from_bits(XspiMode::IndirectWrite.into()))); |
| 1267 | 1273 | ||
| 1268 | let transfer = unsafe { | 1274 | // TODO: implement this using a LinkedList DMA to offload the whole transfer off the CPU. |
| 1269 | self.dma | 1275 | for chunk in buf.chunks(0xFFFF / W::size().bytes()) { |
| 1270 | .as_mut() | 1276 | let transfer = unsafe { |
| 1271 | .unwrap() | 1277 | self.dma |
| 1272 | .write(buf, T::REGS.dr().as_ptr() as *mut W, Default::default()) | 1278 | .as_mut() |
| 1273 | }; | 1279 | .unwrap() |
| 1280 | .write(chunk, T::REGS.dr().as_ptr() as *mut W, Default::default()) | ||
| 1281 | }; | ||
| 1274 | 1282 | ||
| 1275 | T::REGS.cr().modify(|w| w.set_dmaen(true)); | 1283 | T::REGS.cr().modify(|w| w.set_dmaen(true)); |
| 1276 | 1284 | ||
| 1277 | transfer.await; | 1285 | transfer.await; |
| 1286 | } | ||
| 1278 | 1287 | ||
| 1279 | finish_dma(T::REGS); | 1288 | finish_dma(T::REGS); |
| 1280 | 1289 | ||
