aboutsummaryrefslogtreecommitdiff
path: root/embassy-stm32/src/hash
diff options
context:
space:
mode:
authorCaleb Garrett <[email protected]>2024-02-11 11:32:29 -0500
committerCaleb Garrett <[email protected]>2024-02-11 11:32:29 -0500
commiteb64d71247dd7c217c7ead98635610fdd8a104e3 (patch)
tree3173d8df9f869b9a20ef91625b814a23801c6e36 /embassy-stm32/src/hash
parent0c9661a66164736aae9f4c9df250494c17379f5c (diff)
Consolidated hash drivers.
Diffstat (limited to 'embassy-stm32/src/hash')
-rw-r--r--embassy-stm32/src/hash/mod.rs553
-rw-r--r--embassy-stm32/src/hash/v1v3v4.rs399
-rw-r--r--embassy-stm32/src/hash/v2.rs389
3 files changed, 545 insertions, 796 deletions
diff --git a/embassy-stm32/src/hash/mod.rs b/embassy-stm32/src/hash/mod.rs
index 64c1a0a8c..f0c2c839a 100644
--- a/embassy-stm32/src/hash/mod.rs
+++ b/embassy-stm32/src/hash/mod.rs
@@ -1,8 +1,545 @@
1//! Hash Accelerator (HASH) 1//! Hash generator (HASH)
2#[cfg_attr(hash_v1, path = "v1v3v4.rs")] 2use core::cmp::min;
3#[cfg_attr(hash_v2, path = "v2.rs")] 3#[cfg(hash_v2)]
4#[cfg_attr(hash_v3, path = "v1v3v4.rs")] 4use core::future::poll_fn;
5#[cfg_attr(hash_v4, path = "v1v3v4.rs")] 5use core::marker::PhantomData;
6mod _version; 6#[cfg(hash_v2)]
7 7use core::ptr;
8pub use _version::*; 8#[cfg(hash_v2)]
9use core::task::Poll;
10
11use embassy_hal_internal::{into_ref, PeripheralRef};
12use embassy_sync::waitqueue::AtomicWaker;
13use stm32_metapac::hash::regs::*;
14
15use crate::dma::NoDma;
16#[cfg(hash_v2)]
17use crate::dma::Transfer;
18use crate::interrupt::typelevel::Interrupt;
19use crate::peripherals::HASH;
20use crate::rcc::sealed::RccPeripheral;
21use crate::{interrupt, pac, peripherals, Peripheral};
22
23#[cfg(hash_v1)]
24const NUM_CONTEXT_REGS: usize = 51;
25#[cfg(hash_v3)]
26const NUM_CONTEXT_REGS: usize = 103;
27#[cfg(any(hash_v2, hash_v4))]
28const NUM_CONTEXT_REGS: usize = 54;
29
30const HASH_BUFFER_LEN: usize = 132;
31const DIGEST_BLOCK_SIZE: usize = 128;
32
33static HASH_WAKER: AtomicWaker = AtomicWaker::new();
34
35/// HASH interrupt handler.
36pub struct InterruptHandler<T: Instance> {
37 _phantom: PhantomData<T>,
38}
39
40impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
41 unsafe fn on_interrupt() {
42 let bits = T::regs().sr().read();
43 if bits.dinis() {
44 T::regs().imr().modify(|reg| reg.set_dinie(false));
45 HASH_WAKER.wake();
46 }
47 if bits.dcis() {
48 T::regs().imr().modify(|reg| reg.set_dcie(false));
49 HASH_WAKER.wake();
50 }
51 }
52}
53
54///Hash algorithm selection
55#[derive(Clone, Copy, PartialEq)]
56pub enum Algorithm {
57 /// SHA-1 Algorithm
58 SHA1 = 0,
59
60 #[cfg(any(hash_v1, hash_v2, hash_v4))]
61 /// MD5 Algorithm
62 MD5 = 1,
63
64 /// SHA-224 Algorithm
65 SHA224 = 2,
66
67 /// SHA-256 Algorithm
68 SHA256 = 3,
69
70 #[cfg(hash_v3)]
71 /// SHA-384 Algorithm
72 SHA384 = 12,
73
74 #[cfg(hash_v3)]
75 /// SHA-512/224 Algorithm
76 SHA512_224 = 13,
77
78 #[cfg(hash_v3)]
79 /// SHA-512/256 Algorithm
80 SHA512_256 = 14,
81
82 #[cfg(hash_v3)]
83 /// SHA-256 Algorithm
84 SHA512 = 15,
85}
86
87/// Input data width selection
88#[repr(u8)]
89#[derive(Clone, Copy)]
90pub enum DataType {
91 ///32-bit data, no data is swapped.
92 Width32 = 0,
93 ///16-bit data, each half-word is swapped.
94 Width16 = 1,
95 ///8-bit data, all bytes are swapped.
96 Width8 = 2,
97 ///1-bit data, all bits are swapped.
98 Width1 = 3,
99}
100
101/// Stores the state of the HASH peripheral for suspending/resuming
102/// digest calculation.
103pub struct Context {
104 first_word_sent: bool,
105 buffer: [u8; HASH_BUFFER_LEN],
106 buflen: usize,
107 algo: Algorithm,
108 format: DataType,
109 imr: u32,
110 str: u32,
111 cr: u32,
112 csr: [u32; NUM_CONTEXT_REGS],
113}
114
115/// HASH driver.
116pub struct Hash<'d, T: Instance, D = NoDma> {
117 _peripheral: PeripheralRef<'d, T>,
118 #[allow(dead_code)]
119 dma: PeripheralRef<'d, D>,
120}
121
122impl<'d, T: Instance, D> Hash<'d, T, D> {
123 /// Instantiates, resets, and enables the HASH peripheral.
124 pub fn new(
125 peripheral: impl Peripheral<P = T> + 'd,
126 dma: impl Peripheral<P = D> + 'd,
127 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
128 ) -> Self {
129 HASH::enable_and_reset();
130 into_ref!(peripheral, dma);
131 let instance = Self {
132 _peripheral: peripheral,
133 dma: dma,
134 };
135
136 T::Interrupt::unpend();
137 unsafe { T::Interrupt::enable() };
138
139 instance
140 }
141
142 /// Starts computation of a new hash and returns the saved peripheral state.
143 pub fn start(&mut self, algorithm: Algorithm, format: DataType) -> Context {
144 // Define a context for this new computation.
145 let mut ctx = Context {
146 first_word_sent: false,
147 buffer: [0; HASH_BUFFER_LEN],
148 buflen: 0,
149 algo: algorithm,
150 format: format,
151 imr: 0,
152 str: 0,
153 cr: 0,
154 csr: [0; NUM_CONTEXT_REGS],
155 };
156
157 // Set the data type in the peripheral.
158 T::regs().cr().modify(|w| w.set_datatype(ctx.format as u8));
159
160 // Select the algorithm.
161 #[cfg(hash_v1)]
162 if ctx.algo == Algorithm::MD5 {
163 T::regs().cr().modify(|w| w.set_algo(true));
164 }
165
166 #[cfg(hash_v2)]
167 {
168 // Select the algorithm.
169 let mut algo0 = false;
170 let mut algo1 = false;
171 if ctx.algo == Algorithm::MD5 || ctx.algo == Algorithm::SHA256 {
172 algo0 = true;
173 }
174 if ctx.algo == Algorithm::SHA224 || ctx.algo == Algorithm::SHA256 {
175 algo1 = true;
176 }
177 T::regs().cr().modify(|w| w.set_algo0(algo0));
178 T::regs().cr().modify(|w| w.set_algo1(algo1));
179 }
180
181 #[cfg(any(hash_v3, hash_v4))]
182 T::regs().cr().modify(|w| w.set_algo(ctx.algo as u8));
183
184 T::regs().cr().modify(|w| w.set_init(true));
185
186 // Store and return the state of the peripheral.
187 self.store_context(&mut ctx);
188 ctx
189 }
190
191 /// Restores the peripheral state using the given context,
192 /// then updates the state with the provided data.
193 /// Peripheral state is saved upon return.
194 pub fn update_blocking(&mut self, ctx: &mut Context, input: &[u8]) {
195 let mut data_waiting = input.len() + ctx.buflen;
196 if data_waiting < DIGEST_BLOCK_SIZE || (data_waiting < ctx.buffer.len() && !ctx.first_word_sent) {
197 // There isn't enough data to digest a block, so append it to the buffer.
198 ctx.buffer[ctx.buflen..ctx.buflen + input.len()].copy_from_slice(input);
199 ctx.buflen += input.len();
200 return;
201 }
202
203 // Restore the peripheral state.
204 self.load_context(&ctx);
205
206 let mut ilen_remaining = input.len();
207 let mut input_start = 0;
208
209 // Handle first block.
210 if !ctx.first_word_sent {
211 let empty_len = ctx.buffer.len() - ctx.buflen;
212 let copy_len = min(empty_len, ilen_remaining);
213 // Fill the buffer.
214 if copy_len > 0 {
215 ctx.buffer[ctx.buflen..ctx.buflen + copy_len].copy_from_slice(&input[0..copy_len]);
216 ctx.buflen += copy_len;
217 ilen_remaining -= copy_len;
218 input_start += copy_len;
219 }
220 self.accumulate_blocking(ctx.buffer.as_slice());
221 data_waiting -= ctx.buflen;
222 ctx.buflen = 0;
223 ctx.first_word_sent = true;
224 }
225
226 if data_waiting < DIGEST_BLOCK_SIZE {
227 // There isn't enough data remaining to process another block, so store it.
228 ctx.buffer[0..ilen_remaining].copy_from_slice(&input[input_start..input_start + ilen_remaining]);
229 ctx.buflen += ilen_remaining;
230 } else {
231 // First ingest the data in the buffer.
232 let empty_len = DIGEST_BLOCK_SIZE - ctx.buflen;
233 if empty_len > 0 {
234 let copy_len = min(empty_len, ilen_remaining);
235 ctx.buffer[ctx.buflen..ctx.buflen + copy_len]
236 .copy_from_slice(&input[input_start..input_start + copy_len]);
237 ctx.buflen += copy_len;
238 ilen_remaining -= copy_len;
239 input_start += copy_len;
240 }
241 self.accumulate_blocking(&ctx.buffer[0..DIGEST_BLOCK_SIZE]);
242 ctx.buflen = 0;
243
244 // Move any extra data to the now-empty buffer.
245 let leftovers = ilen_remaining % 64;
246 if leftovers > 0 {
247 ctx.buffer[0..leftovers].copy_from_slice(&input[input.len() - leftovers..input.len()]);
248 ctx.buflen += leftovers;
249 ilen_remaining -= leftovers;
250 }
251
252 // Hash the remaining data.
253 self.accumulate_blocking(&input[input_start..input_start + ilen_remaining]);
254 }
255
256 // Save the peripheral context.
257 self.store_context(ctx);
258 }
259
260 /// Restores the peripheral state using the given context,
261 /// then updates the state with the provided data.
262 /// Peripheral state is saved upon return.
263 #[cfg(hash_v2)]
264 pub async fn update(&mut self, ctx: &mut Context, input: &[u8])
265 where
266 D: crate::hash::Dma<T>,
267 {
268 let data_waiting = input.len() + ctx.buflen;
269 if data_waiting < DIGEST_BLOCK_SIZE {
270 // There isn't enough data to digest a block, so append it to the buffer.
271 ctx.buffer[ctx.buflen..ctx.buflen + input.len()].copy_from_slice(input);
272 ctx.buflen += input.len();
273 return;
274 }
275
276 // Restore the peripheral state.
277 self.load_context(&ctx);
278
279 // Enable multiple DMA transfers.
280 T::regs().cr().modify(|w| w.set_mdmat(true));
281
282 let mut ilen_remaining = input.len();
283 let mut input_start = 0;
284
285 // First ingest the data in the buffer.
286 let empty_len = DIGEST_BLOCK_SIZE - ctx.buflen;
287 if empty_len > 0 {
288 let copy_len = min(empty_len, ilen_remaining);
289 ctx.buffer[ctx.buflen..ctx.buflen + copy_len].copy_from_slice(&input[input_start..input_start + copy_len]);
290 ctx.buflen += copy_len;
291 ilen_remaining -= copy_len;
292 input_start += copy_len;
293 }
294 self.accumulate(&ctx.buffer[..DIGEST_BLOCK_SIZE]).await;
295 ctx.buflen = 0;
296
297 // Move any extra data to the now-empty buffer.
298 let leftovers = ilen_remaining % DIGEST_BLOCK_SIZE;
299 if leftovers > 0 {
300 assert!(ilen_remaining >= leftovers);
301 ctx.buffer[0..leftovers].copy_from_slice(&input[input.len() - leftovers..input.len()]);
302 ctx.buflen += leftovers;
303 ilen_remaining -= leftovers;
304 } else {
305 ctx.buffer
306 .copy_from_slice(&input[input.len() - DIGEST_BLOCK_SIZE..input.len()]);
307 ctx.buflen += DIGEST_BLOCK_SIZE;
308 ilen_remaining -= DIGEST_BLOCK_SIZE;
309 }
310
311 // Hash the remaining data.
312 self.accumulate(&input[input_start..input_start + ilen_remaining]).await;
313
314 // Save the peripheral context.
315 self.store_context(ctx);
316 }
317
318 /// Computes a digest for the given context.
319 /// The digest buffer must be large enough to accomodate a digest for the selected algorithm.
320 /// The largest returned digest size is 128 bytes for SHA-512.
321 /// Panics if the supplied digest buffer is too short.
322 pub fn finish_blocking(&mut self, mut ctx: Context, digest: &mut [u8]) -> usize {
323 // Restore the peripheral state.
324 self.load_context(&ctx);
325
326 // Hash the leftover bytes, if any.
327 self.accumulate_blocking(&ctx.buffer[0..ctx.buflen]);
328 ctx.buflen = 0;
329
330 //Start the digest calculation.
331 T::regs().str().write(|w| w.set_dcal(true));
332
333 // Block waiting for digest.
334 while !T::regs().sr().read().dcis() {}
335
336 // Return the digest.
337 let digest_words = match ctx.algo {
338 Algorithm::SHA1 => 5,
339 #[cfg(any(hash_v1, hash_v2, hash_v4))]
340 Algorithm::MD5 => 4,
341 Algorithm::SHA224 => 7,
342 Algorithm::SHA256 => 8,
343 #[cfg(hash_v3)]
344 Algorithm::SHA384 => 12,
345 #[cfg(hash_v3)]
346 Algorithm::SHA512_224 => 7,
347 #[cfg(hash_v3)]
348 Algorithm::SHA512_256 => 8,
349 #[cfg(hash_v3)]
350 Algorithm::SHA512 => 16,
351 };
352
353 let digest_len_bytes = digest_words * 4;
354 // Panics if the supplied digest buffer is too short.
355 if digest.len() < digest_len_bytes {
356 panic!("Digest buffer must be at least {} bytes long.", digest_words * 4);
357 }
358
359 let mut i = 0;
360 while i < digest_words {
361 let word = T::regs().hr(i).read();
362 digest[(i * 4)..((i * 4) + 4)].copy_from_slice(word.to_be_bytes().as_slice());
363 i += 1;
364 }
365 digest_len_bytes
366 }
367
368 /// Computes a digest for the given context.
369 /// The digest buffer must be large enough to accomodate a digest for the selected algorithm.
370 /// The largest returned digest size is 128 bytes for SHA-512.
371 /// Panics if the supplied digest buffer is too short.
372 #[cfg(hash_v2)]
373 pub async fn finish(&mut self, mut ctx: Context, digest: &mut [u8]) -> usize
374 where
375 D: crate::hash::Dma<T>,
376 {
377 // Restore the peripheral state.
378 self.load_context(&ctx);
379
380 // Must be cleared prior to the last DMA transfer.
381 T::regs().cr().modify(|w| w.set_mdmat(false));
382
383 // Hash the leftover bytes, if any.
384 self.accumulate(&ctx.buffer[0..ctx.buflen]).await;
385 ctx.buflen = 0;
386
387 // Wait for completion.
388 poll_fn(|cx| {
389 // Check if already done.
390 let bits = T::regs().sr().read();
391 if bits.dcis() {
392 return Poll::Ready(());
393 }
394 // Register waker, then enable interrupts.
395 HASH_WAKER.register(cx.waker());
396 T::regs().imr().modify(|reg| reg.set_dcie(true));
397 // Check for completion.
398 let bits = T::regs().sr().read();
399 if bits.dcis() {
400 Poll::Ready(())
401 } else {
402 Poll::Pending
403 }
404 })
405 .await;
406
407 // Return the digest.
408 let digest_words = match ctx.algo {
409 Algorithm::SHA1 => 5,
410 #[cfg(any(hash_v1, hash_v2, hash_v4))]
411 Algorithm::MD5 => 4,
412 Algorithm::SHA224 => 7,
413 Algorithm::SHA256 => 8,
414 #[cfg(hash_v3)]
415 Algorithm::SHA384 => 12,
416 #[cfg(hash_v3)]
417 Algorithm::SHA512_224 => 7,
418 #[cfg(hash_v3)]
419 Algorithm::SHA512_256 => 8,
420 #[cfg(hash_v3)]
421 Algorithm::SHA512 => 16,
422 };
423
424 let digest_len_bytes = digest_words * 4;
425 // Panics if the supplied digest buffer is too short.
426 if digest.len() < digest_len_bytes {
427 panic!("Digest buffer must be at least {} bytes long.", digest_words * 4);
428 }
429
430 let mut i = 0;
431 while i < digest_words {
432 let word = T::regs().hr(i).read();
433 digest[(i * 4)..((i * 4) + 4)].copy_from_slice(word.to_be_bytes().as_slice());
434 i += 1;
435 }
436 digest_len_bytes
437 }
438
439 /// Push data into the hash core.
440 fn accumulate_blocking(&mut self, input: &[u8]) {
441 // Set the number of valid bits.
442 let num_valid_bits: u8 = (8 * (input.len() % 4)) as u8;
443 T::regs().str().modify(|w| w.set_nblw(num_valid_bits));
444
445 let mut i = 0;
446 while i < input.len() {
447 let mut word: [u8; 4] = [0; 4];
448 let copy_idx = min(i + 4, input.len());
449 word[0..copy_idx - i].copy_from_slice(&input[i..copy_idx]);
450 T::regs().din().write_value(u32::from_ne_bytes(word));
451 i += 4;
452 }
453 }
454
455 /// Push data into the hash core.
456 #[cfg(hash_v2)]
457 async fn accumulate(&mut self, input: &[u8])
458 where
459 D: crate::hash::Dma<T>,
460 {
461 // Ignore an input length of 0.
462 if input.len() == 0 {
463 return;
464 }
465
466 // Set the number of valid bits.
467 let num_valid_bits: u8 = (8 * (input.len() % 4)) as u8;
468 T::regs().str().modify(|w| w.set_nblw(num_valid_bits));
469
470 // Configure DMA to transfer input to hash core.
471 let dma_request = self.dma.request();
472 let dst_ptr = T::regs().din().as_ptr();
473 let mut num_words = input.len() / 4;
474 if input.len() % 4 > 0 {
475 num_words += 1;
476 }
477 let src_ptr = ptr::slice_from_raw_parts(input.as_ptr().cast(), num_words);
478 let dma_transfer =
479 unsafe { Transfer::new_write_raw(&mut self.dma, dma_request, src_ptr, dst_ptr, Default::default()) };
480 T::regs().cr().modify(|w| w.set_dmae(true));
481
482 // Wait for the transfer to complete.
483 dma_transfer.await;
484 }
485
486 /// Save the peripheral state to a context.
487 fn store_context(&mut self, ctx: &mut Context) {
488 // Block waiting for data in ready.
489 while !T::regs().sr().read().dinis() {}
490
491 // Store peripheral context.
492 ctx.imr = T::regs().imr().read().0;
493 ctx.str = T::regs().str().read().0;
494 ctx.cr = T::regs().cr().read().0;
495 let mut i = 0;
496 while i < NUM_CONTEXT_REGS {
497 ctx.csr[i] = T::regs().csr(i).read();
498 i += 1;
499 }
500 }
501
502 /// Restore the peripheral state from a context.
503 fn load_context(&mut self, ctx: &Context) {
504 // Restore the peripheral state from the context.
505 T::regs().imr().write_value(Imr { 0: ctx.imr });
506 T::regs().str().write_value(Str { 0: ctx.str });
507 T::regs().cr().write_value(Cr { 0: ctx.cr });
508 T::regs().cr().modify(|w| w.set_init(true));
509 let mut i = 0;
510 while i < NUM_CONTEXT_REGS {
511 T::regs().csr(i).write_value(ctx.csr[i]);
512 i += 1;
513 }
514 }
515}
516
517pub(crate) mod sealed {
518 use super::*;
519
520 pub trait Instance {
521 fn regs() -> pac::hash::Hash;
522 }
523}
524
525/// HASH instance trait.
526pub trait Instance: sealed::Instance + Peripheral<P = Self> + crate::rcc::RccPeripheral + 'static + Send {
527 /// Interrupt for this HASH instance.
528 type Interrupt: interrupt::typelevel::Interrupt;
529}
530
531foreach_interrupt!(
532 ($inst:ident, hash, HASH, GLOBAL, $irq:ident) => {
533 impl Instance for peripherals::$inst {
534 type Interrupt = crate::interrupt::typelevel::$irq;
535 }
536
537 impl sealed::Instance for peripherals::$inst {
538 fn regs() -> crate::pac::hash::Hash {
539 crate::pac::$inst
540 }
541 }
542 };
543);
544
545dma_trait!(Dma, Instance);
diff --git a/embassy-stm32/src/hash/v1v3v4.rs b/embassy-stm32/src/hash/v1v3v4.rs
deleted file mode 100644
index 771144b11..000000000
--- a/embassy-stm32/src/hash/v1v3v4.rs
+++ /dev/null
@@ -1,399 +0,0 @@
1//! Hash generator (HASH)
2use core::cmp::min;
3use core::future::poll_fn;
4use core::marker::PhantomData;
5use core::task::Poll;
6
7use embassy_hal_internal::{into_ref, PeripheralRef};
8use embassy_sync::waitqueue::AtomicWaker;
9use stm32_metapac::hash::regs::*;
10
11use crate::interrupt::typelevel::Interrupt;
12use crate::peripherals::HASH;
13use crate::rcc::sealed::RccPeripheral;
14use crate::{interrupt, pac, peripherals, Peripheral};
15
16#[cfg(hash_v1)]
17const NUM_CONTEXT_REGS: usize = 51;
18#[cfg(hash_v3)]
19const NUM_CONTEXT_REGS: usize = 103;
20#[cfg(hash_v4)]
21const NUM_CONTEXT_REGS: usize = 54;
22
23const HASH_BUFFER_LEN: usize = 132;
24const DIGEST_BLOCK_SIZE: usize = 128;
25const MAX_DIGEST_SIZE: usize = 128;
26
27static HASH_WAKER: AtomicWaker = AtomicWaker::new();
28
29/// HASH interrupt handler.
30pub struct InterruptHandler<T: Instance> {
31 _phantom: PhantomData<T>,
32}
33
34impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
35 unsafe fn on_interrupt() {
36 let bits = T::regs().sr().read();
37 if bits.dinis() {
38 T::regs().imr().modify(|reg| reg.set_dinie(false));
39 HASH_WAKER.wake();
40 }
41 if bits.dcis() {
42 T::regs().imr().modify(|reg| reg.set_dcie(false));
43 HASH_WAKER.wake();
44 }
45 }
46}
47
48///Hash algorithm selection
49#[derive(Clone, Copy, PartialEq)]
50pub enum Algorithm {
51 /// SHA-1 Algorithm
52 SHA1 = 0,
53
54 #[cfg(any(hash_v1, hash_v4))]
55 /// MD5 Algorithm
56 MD5 = 1,
57
58 /// SHA-224 Algorithm
59 SHA224 = 2,
60
61 /// SHA-256 Algorithm
62 SHA256 = 3,
63
64 #[cfg(hash_v3)]
65 /// SHA-384 Algorithm
66 SHA384 = 12,
67
68 #[cfg(hash_v3)]
69 /// SHA-512/224 Algorithm
70 SHA512_224 = 13,
71
72 #[cfg(hash_v3)]
73 /// SHA-512/256 Algorithm
74 SHA512_256 = 14,
75
76 #[cfg(hash_v3)]
77 /// SHA-256 Algorithm
78 SHA512 = 15,
79}
80
81/// Input data width selection
82#[repr(u8)]
83#[derive(Clone, Copy)]
84pub enum DataType {
85 ///32-bit data, no data is swapped.
86 Width32 = 0,
87 ///16-bit data, each half-word is swapped.
88 Width16 = 1,
89 ///8-bit data, all bytes are swapped.
90 Width8 = 2,
91 ///1-bit data, all bits are swapped.
92 Width1 = 3,
93}
94
95/// Stores the state of the HASH peripheral for suspending/resuming
96/// digest calculation.
97pub struct Context {
98 first_word_sent: bool,
99 buffer: [u8; HASH_BUFFER_LEN],
100 buflen: usize,
101 algo: Algorithm,
102 format: DataType,
103 imr: u32,
104 str: u32,
105 cr: u32,
106 csr: [u32; NUM_CONTEXT_REGS],
107}
108
109/// HASH driver.
110pub struct Hash<'d, T: Instance> {
111 _peripheral: PeripheralRef<'d, T>,
112}
113
114impl<'d, T: Instance> Hash<'d, T> {
115 /// Instantiates, resets, and enables the HASH peripheral.
116 pub fn new(
117 peripheral: impl Peripheral<P = T> + 'd,
118 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
119 ) -> Self {
120 HASH::enable_and_reset();
121 into_ref!(peripheral);
122 let instance = Self {
123 _peripheral: peripheral,
124 };
125
126 T::Interrupt::unpend();
127 unsafe { T::Interrupt::enable() };
128
129 instance
130 }
131
132 /// Starts computation of a new hash and returns the saved peripheral state.
133 pub async fn start(&mut self, algorithm: Algorithm, format: DataType) -> Context {
134 // Define a context for this new computation.
135 let mut ctx = Context {
136 first_word_sent: false,
137 buffer: [0; HASH_BUFFER_LEN],
138 buflen: 0,
139 algo: algorithm,
140 format: format,
141 imr: 0,
142 str: 0,
143 cr: 0,
144 csr: [0; NUM_CONTEXT_REGS],
145 };
146
147 // Set the data type in the peripheral.
148 T::regs().cr().modify(|w| w.set_datatype(ctx.format as u8));
149
150 // Select the algorithm.
151 #[cfg(hash_v1)]
152 if ctx.algo == Algorithm::MD5 {
153 T::regs().cr().modify(|w| w.set_algo(true));
154 }
155
156 #[cfg(hash_v2)]
157 {
158 // Select the algorithm.
159 let mut algo0 = false;
160 let mut algo1 = false;
161 if ctx.algo == Algorithm::MD5 || ctx.algo == Algorithm::SHA256 {
162 algo0 = true;
163 }
164 if ctx.algo == Algorithm::SHA224 || ctx.algo == Algorithm::SHA256 {
165 algo1 = true;
166 }
167 T::regs().cr().modify(|w| w.set_algo0(algo0));
168 T::regs().cr().modify(|w| w.set_algo1(algo1));
169 }
170
171 #[cfg(any(hash_v3, hash_v4))]
172 T::regs().cr().modify(|w| w.set_algo(ctx.algo as u8));
173
174 T::regs().cr().modify(|w| w.set_init(true));
175
176 // Store and return the state of the peripheral.
177 self.store_context(&mut ctx).await;
178 ctx
179 }
180
181 /// Restores the peripheral state using the given context,
182 /// then updates the state with the provided data.
183 /// Peripheral state is saved upon return.
184 pub async fn update(&mut self, ctx: &mut Context, input: &[u8]) {
185 let mut data_waiting = input.len() + ctx.buflen;
186 if data_waiting < DIGEST_BLOCK_SIZE || (data_waiting < ctx.buffer.len() && !ctx.first_word_sent) {
187 // There isn't enough data to digest a block, so append it to the buffer.
188 ctx.buffer[ctx.buflen..ctx.buflen + input.len()].copy_from_slice(input);
189 ctx.buflen += input.len();
190 return;
191 }
192
193 // Restore the peripheral state.
194 self.load_context(&ctx);
195
196 let mut ilen_remaining = input.len();
197 let mut input_start = 0;
198
199 // Handle first block.
200 if !ctx.first_word_sent {
201 let empty_len = ctx.buffer.len() - ctx.buflen;
202 let copy_len = min(empty_len, ilen_remaining);
203 // Fill the buffer.
204 if copy_len > 0 {
205 ctx.buffer[ctx.buflen..ctx.buflen + copy_len].copy_from_slice(&input[0..copy_len]);
206 ctx.buflen += copy_len;
207 ilen_remaining -= copy_len;
208 input_start += copy_len;
209 }
210 self.accumulate(ctx.buffer.as_slice());
211 data_waiting -= ctx.buflen;
212 ctx.buflen = 0;
213 ctx.first_word_sent = true;
214 }
215
216 if data_waiting < DIGEST_BLOCK_SIZE {
217 // There isn't enough data remaining to process another block, so store it.
218 ctx.buffer[0..ilen_remaining].copy_from_slice(&input[input_start..input_start + ilen_remaining]);
219 ctx.buflen += ilen_remaining;
220 } else {
221 // First ingest the data in the buffer.
222 let empty_len = DIGEST_BLOCK_SIZE - ctx.buflen;
223 if empty_len > 0 {
224 let copy_len = min(empty_len, ilen_remaining);
225 ctx.buffer[ctx.buflen..ctx.buflen + copy_len]
226 .copy_from_slice(&input[input_start..input_start + copy_len]);
227 ctx.buflen += copy_len;
228 ilen_remaining -= copy_len;
229 input_start += copy_len;
230 }
231 self.accumulate(&ctx.buffer[0..DIGEST_BLOCK_SIZE]);
232 ctx.buflen = 0;
233
234 // Move any extra data to the now-empty buffer.
235 let leftovers = ilen_remaining % 64;
236 if leftovers > 0 {
237 ctx.buffer[0..leftovers].copy_from_slice(&input[input.len() - leftovers..input.len()]);
238 ctx.buflen += leftovers;
239 ilen_remaining -= leftovers;
240 }
241
242 // Hash the remaining data.
243 self.accumulate(&input[input_start..input_start + ilen_remaining]);
244 }
245
246 // Save the peripheral context.
247 self.store_context(ctx).await;
248 }
249
250 /// Computes a digest for the given context. A slice of the provided digest buffer is returned.
251 /// The length of the returned slice is dependent on the digest length of the selected algorithm.
252 pub async fn finish<'a>(&mut self, mut ctx: Context, digest: &'a mut [u8; MAX_DIGEST_SIZE]) -> &'a [u8] {
253 // Restore the peripheral state.
254 self.load_context(&ctx);
255
256 // Hash the leftover bytes, if any.
257 self.accumulate(&ctx.buffer[0..ctx.buflen]);
258 ctx.buflen = 0;
259
260 //Start the digest calculation.
261 T::regs().str().write(|w| w.set_dcal(true));
262
263 // Wait for completion.
264 poll_fn(|cx| {
265 // Check if already done.
266 let bits = T::regs().sr().read();
267 if bits.dcis() {
268 return Poll::Ready(());
269 }
270 // Register waker, then enable interrupts.
271 HASH_WAKER.register(cx.waker());
272 T::regs().imr().modify(|reg| reg.set_dcie(true));
273 // Check for completion.
274 let bits = T::regs().sr().read();
275 if bits.dcis() {
276 Poll::Ready(())
277 } else {
278 Poll::Pending
279 }
280 })
281 .await;
282
283 // Return the digest.
284 let digest_words = match ctx.algo {
285 Algorithm::SHA1 => 5,
286 #[cfg(any(hash_v1, hash_v4))]
287 Algorithm::MD5 => 4,
288 Algorithm::SHA224 => 7,
289 Algorithm::SHA256 => 8,
290 #[cfg(hash_v3)]
291 Algorithm::SHA384 => 12,
292 #[cfg(hash_v3)]
293 Algorithm::SHA512_224 => 7,
294 #[cfg(hash_v3)]
295 Algorithm::SHA512_256 => 8,
296 #[cfg(hash_v3)]
297 Algorithm::SHA512 => 16,
298 };
299 let mut i = 0;
300 while i < digest_words {
301 let word = T::regs().hr(i).read();
302 digest[(i * 4)..((i * 4) + 4)].copy_from_slice(word.to_be_bytes().as_slice());
303 i += 1;
304 }
305 &digest[0..digest_words * 4]
306 }
307
308 /// Push data into the hash core.
309 fn accumulate(&mut self, input: &[u8]) {
310 // Set the number of valid bits.
311 let num_valid_bits: u8 = (8 * (input.len() % 4)) as u8;
312 T::regs().str().modify(|w| w.set_nblw(num_valid_bits));
313
314 let mut i = 0;
315 while i < input.len() {
316 let mut word: [u8; 4] = [0; 4];
317 let copy_idx = min(i + 4, input.len());
318 word[0..copy_idx - i].copy_from_slice(&input[i..copy_idx]);
319 T::regs().din().write_value(u32::from_ne_bytes(word));
320 i += 4;
321 }
322 }
323
324 /// Save the peripheral state to a context.
325 async fn store_context(&mut self, ctx: &mut Context) {
326 // Wait for interrupt.
327 poll_fn(|cx| {
328 // Check if already done.
329 let bits = T::regs().sr().read();
330 if bits.dinis() {
331 return Poll::Ready(());
332 }
333 // Register waker, then enable interrupts.
334 HASH_WAKER.register(cx.waker());
335 T::regs().imr().modify(|reg| reg.set_dinie(true));
336 // Check for completion.
337 let bits = T::regs().sr().read();
338 if bits.dinis() {
339 Poll::Ready(())
340 } else {
341 Poll::Pending
342 }
343 })
344 .await;
345
346 ctx.imr = T::regs().imr().read().0;
347 ctx.str = T::regs().str().read().0;
348 ctx.cr = T::regs().cr().read().0;
349 let mut i = 0;
350 while i < NUM_CONTEXT_REGS {
351 ctx.csr[i] = T::regs().csr(i).read();
352 i += 1;
353 }
354 }
355
356 /// Restore the peripheral state from a context.
357 fn load_context(&mut self, ctx: &Context) {
358 // Restore the peripheral state from the context.
359 T::regs().imr().write_value(Imr { 0: ctx.imr });
360 T::regs().str().write_value(Str { 0: ctx.str });
361 T::regs().cr().write_value(Cr { 0: ctx.cr });
362 T::regs().cr().modify(|w| w.set_init(true));
363 let mut i = 0;
364 while i < NUM_CONTEXT_REGS {
365 T::regs().csr(i).write_value(ctx.csr[i]);
366 i += 1;
367 }
368 }
369}
370
371pub(crate) mod sealed {
372 use super::*;
373
374 pub trait Instance {
375 fn regs() -> pac::hash::Hash;
376 }
377}
378
379/// HASH instance trait.
380pub trait Instance: sealed::Instance + Peripheral<P = Self> + crate::rcc::RccPeripheral + 'static + Send {
381 /// Interrupt for this HASH instance.
382 type Interrupt: interrupt::typelevel::Interrupt;
383}
384
385foreach_interrupt!(
386 ($inst:ident, hash, HASH, GLOBAL, $irq:ident) => {
387 impl Instance for peripherals::$inst {
388 type Interrupt = crate::interrupt::typelevel::$irq;
389 }
390
391 impl sealed::Instance for peripherals::$inst {
392 fn regs() -> crate::pac::hash::Hash {
393 crate::pac::$inst
394 }
395 }
396 };
397);
398
399dma_trait!(Dma, Instance);
diff --git a/embassy-stm32/src/hash/v2.rs b/embassy-stm32/src/hash/v2.rs
deleted file mode 100644
index b8104c825..000000000
--- a/embassy-stm32/src/hash/v2.rs
+++ /dev/null
@@ -1,389 +0,0 @@
1//! Hash generator (HASH)
2use core::cmp::min;
3use core::future::poll_fn;
4use core::marker::PhantomData;
5use core::ptr;
6use core::task::Poll;
7
8use embassy_hal_internal::{into_ref, PeripheralRef};
9use embassy_sync::waitqueue::AtomicWaker;
10use stm32_metapac::hash::regs::*;
11
12use crate::dma::Transfer;
13use crate::interrupt::typelevel::Interrupt;
14use crate::peripherals::HASH;
15use crate::rcc::sealed::RccPeripheral;
16use crate::{interrupt, pac, peripherals, Peripheral};
17
18#[cfg(hash_v2)]
19const NUM_CONTEXT_REGS: usize = 54;
20#[cfg(hash_v3)]
21const NUM_CONTEXT_REGS: usize = 103;
22const DIGEST_BLOCK_SIZE: usize = 64;
23const MAX_DIGEST_SIZE: usize = 64;
24
25static HASH_WAKER: AtomicWaker = AtomicWaker::new();
26
27/// HASH interrupt handler.
28pub struct InterruptHandler<T: Instance> {
29 _phantom: PhantomData<T>,
30}
31
32impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
33 unsafe fn on_interrupt() {
34 let bits = T::regs().sr().read();
35 if bits.dinis() {
36 T::regs().imr().modify(|reg| reg.set_dinie(false));
37 HASH_WAKER.wake();
38 }
39 if bits.dcis() {
40 T::regs().imr().modify(|reg| reg.set_dcie(false));
41 HASH_WAKER.wake();
42 }
43 }
44}
45
46///Hash algorithm selection
47#[derive(Clone, Copy, PartialEq)]
48pub enum Algorithm {
49 /// SHA-1 Algorithm
50 SHA1 = 0,
51
52 #[cfg(hash_v2)]
53 /// MD5 Algorithm
54 MD5 = 1,
55
56 /// SHA-224 Algorithm
57 SHA224 = 2,
58
59 /// SHA-256 Algorithm
60 SHA256 = 3,
61
62 #[cfg(hash_v3)]
63 /// SHA-384 Algorithm
64 SHA384 = 12,
65
66 #[cfg(hash_v3)]
67 /// SHA-512/224 Algorithm
68 SHA512_224 = 13,
69
70 #[cfg(hash_v3)]
71 /// SHA-512/256 Algorithm
72 SHA512_256 = 14,
73
74 #[cfg(hash_v3)]
75 /// SHA-256 Algorithm
76 SHA512 = 15,
77}
78
79/// Input data width selection
80#[repr(u8)]
81#[derive(Clone, Copy)]
82pub enum DataType {
83 ///32-bit data, no data is swapped.
84 Width32 = 0,
85 ///16-bit data, each half-word is swapped.
86 Width16 = 1,
87 ///8-bit data, all bytes are swapped.
88 Width8 = 2,
89 ///1-bit data, all bits are swapped.
90 Width1 = 3,
91}
92
93/// Stores the state of the HASH peripheral for suspending/resuming
94/// digest calculation.
95pub struct Context {
96 buffer: [u8; DIGEST_BLOCK_SIZE],
97 buflen: usize,
98 algo: Algorithm,
99 format: DataType,
100 imr: u32,
101 str: u32,
102 cr: u32,
103 csr: [u32; NUM_CONTEXT_REGS],
104}
105
106/// HASH driver.
107pub struct Hash<'d, T: Instance, D: Dma<T>> {
108 _peripheral: PeripheralRef<'d, T>,
109 dma: PeripheralRef<'d, D>,
110}
111
112impl<'d, T: Instance, D: Dma<T>> Hash<'d, T, D> {
113 /// Instantiates, resets, and enables the HASH peripheral.
114 pub fn new(
115 peripheral: impl Peripheral<P = T> + 'd,
116 dma: impl Peripheral<P = D> + 'd,
117 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
118 ) -> Self {
119 HASH::enable_and_reset();
120 into_ref!(peripheral, dma);
121 let instance = Self {
122 _peripheral: peripheral,
123 dma: dma,
124 };
125
126 T::Interrupt::unpend();
127 unsafe { T::Interrupt::enable() };
128
129 instance
130 }
131
132 /// Starts computation of a new hash and returns the saved peripheral state.
133 pub async fn start(&mut self, algorithm: Algorithm, format: DataType) -> Context {
134 // Define a context for this new computation.
135 let mut ctx = Context {
136 buffer: [0; DIGEST_BLOCK_SIZE],
137 buflen: 0,
138 algo: algorithm,
139 format: format,
140 imr: 0,
141 str: 0,
142 cr: 0,
143 csr: [0; NUM_CONTEXT_REGS],
144 };
145
146 // Set the data type in the peripheral.
147 T::regs().cr().modify(|w| w.set_datatype(ctx.format as u8));
148
149 #[cfg(hash_v2)]
150 {
151 // Select the algorithm.
152 let mut algo0 = false;
153 let mut algo1 = false;
154 if ctx.algo == Algorithm::MD5 || ctx.algo == Algorithm::SHA256 {
155 algo0 = true;
156 }
157 if ctx.algo == Algorithm::SHA224 || ctx.algo == Algorithm::SHA256 {
158 algo1 = true;
159 }
160 T::regs().cr().modify(|w| w.set_algo0(algo0));
161 T::regs().cr().modify(|w| w.set_algo1(algo1));
162 }
163
164 #[cfg(hash_v3)]
165 T::regs().cr().modify(|w| w.set_algo(ctx.algo as u8));
166
167 // Enable multiple DMA transfers.
168 T::regs().cr().modify(|w| w.set_mdmat(true));
169
170 // Set init to load the context registers. Necessary before storing context.
171 T::regs().cr().modify(|w| w.set_init(true));
172
173 // Store and return the state of the peripheral.
174 self.store_context(&mut ctx).await;
175 ctx
176 }
177
178 /// Restores the peripheral state using the given context,
179 /// then updates the state with the provided data.
180 /// Peripheral state is saved upon return.
181 pub async fn update(&mut self, ctx: &mut Context, input: &[u8]) {
182 let data_waiting = input.len() + ctx.buflen;
183 if data_waiting < DIGEST_BLOCK_SIZE {
184 // There isn't enough data to digest a block, so append it to the buffer.
185 ctx.buffer[ctx.buflen..ctx.buflen + input.len()].copy_from_slice(input);
186 ctx.buflen += input.len();
187 return;
188 }
189
190 // Restore the peripheral state.
191 self.load_context(&ctx);
192
193 let mut ilen_remaining = input.len();
194 let mut input_start = 0;
195
196 // First ingest the data in the buffer.
197 let empty_len = DIGEST_BLOCK_SIZE - ctx.buflen;
198 if empty_len > 0 {
199 let copy_len = min(empty_len, ilen_remaining);
200 ctx.buffer[ctx.buflen..ctx.buflen + copy_len].copy_from_slice(&input[input_start..input_start + copy_len]);
201 ctx.buflen += copy_len;
202 ilen_remaining -= copy_len;
203 input_start += copy_len;
204 }
205 self.accumulate(&ctx.buffer).await;
206 ctx.buflen = 0;
207
208 // Move any extra data to the now-empty buffer.
209 let leftovers = ilen_remaining % DIGEST_BLOCK_SIZE;
210 if leftovers > 0 {
211 assert!(ilen_remaining >= leftovers);
212 ctx.buffer[0..leftovers].copy_from_slice(&input[input.len() - leftovers..input.len()]);
213 ctx.buflen += leftovers;
214 ilen_remaining -= leftovers;
215 } else {
216 ctx.buffer
217 .copy_from_slice(&input[input.len() - DIGEST_BLOCK_SIZE..input.len()]);
218 ctx.buflen += DIGEST_BLOCK_SIZE;
219 ilen_remaining -= DIGEST_BLOCK_SIZE;
220 }
221
222 // Hash the remaining data.
223 self.accumulate(&input[input_start..input_start + ilen_remaining]).await;
224
225 // Save the peripheral context.
226 self.store_context(ctx).await;
227 }
228
229 /// Computes a digest for the given context. A slice of the provided digest buffer is returned.
230 /// The length of the returned slice is dependent on the digest length of the selected algorithm.
231 pub async fn finish<'a>(&mut self, mut ctx: Context, digest: &'a mut [u8; MAX_DIGEST_SIZE]) -> &'a [u8] {
232 // Restore the peripheral state.
233 self.load_context(&ctx);
234
235 // Must be cleared prior to the last DMA transfer.
236 T::regs().cr().modify(|w| w.set_mdmat(false));
237
238 // Hash the leftover bytes, if any.
239 self.accumulate(&ctx.buffer[0..ctx.buflen]).await;
240 ctx.buflen = 0;
241
242 // Wait for completion.
243 poll_fn(|cx| {
244 // Check if already done.
245 let bits = T::regs().sr().read();
246 if bits.dcis() {
247 return Poll::Ready(());
248 }
249 // Register waker, then enable interrupts.
250 HASH_WAKER.register(cx.waker());
251 T::regs().imr().modify(|reg| reg.set_dcie(true));
252 // Check for completion.
253 let bits = T::regs().sr().read();
254 if bits.dcis() {
255 Poll::Ready(())
256 } else {
257 Poll::Pending
258 }
259 })
260 .await;
261
262 // Return the digest.
263 let digest_words = match ctx.algo {
264 Algorithm::SHA1 => 5,
265 #[cfg(hash_v2)]
266 Algorithm::MD5 => 4,
267 Algorithm::SHA224 => 7,
268 Algorithm::SHA256 => 8,
269 #[cfg(hash_v3)]
270 Algorithm::SHA384 => 12,
271 #[cfg(hash_v3)]
272 Algorithm::SHA512_224 => 7,
273 #[cfg(hash_v3)]
274 Algorithm::SHA512_256 => 8,
275 #[cfg(hash_v3)]
276 Algorithm::SHA512 => 16,
277 };
278 let mut i = 0;
279 while i < digest_words {
280 let word = T::regs().hr(i).read();
281 digest[(i * 4)..((i * 4) + 4)].copy_from_slice(word.to_be_bytes().as_slice());
282 i += 1;
283 }
284 &digest[0..digest_words * 4]
285 }
286
287 /// Push data into the hash core.
288 async fn accumulate(&mut self, input: &[u8]) {
289 // Ignore an input length of 0.
290 if input.len() == 0 {
291 return;
292 }
293
294 // Set the number of valid bits.
295 let num_valid_bits: u8 = (8 * (input.len() % 4)) as u8;
296 T::regs().str().modify(|w| w.set_nblw(num_valid_bits));
297
298 // Configure DMA to transfer input to hash core.
299 let dma_request = self.dma.request();
300 let dst_ptr = T::regs().din().as_ptr();
301 let mut num_words = input.len() / 4;
302 if input.len() % 4 > 0 {
303 num_words += 1;
304 }
305 let src_ptr = ptr::slice_from_raw_parts(input.as_ptr().cast(), num_words);
306 let dma_transfer =
307 unsafe { Transfer::new_write_raw(&mut self.dma, dma_request, src_ptr, dst_ptr, Default::default()) };
308 T::regs().cr().modify(|w| w.set_dmae(true));
309
310 // Wait for the transfer to complete.
311 dma_transfer.await;
312 }
313
314 /// Save the peripheral state to a context.
315 async fn store_context(&mut self, ctx: &mut Context) {
316 // Wait for interrupt.
317 poll_fn(|cx| {
318 // Check if already done.
319 let bits = T::regs().sr().read();
320 if bits.dinis() {
321 return Poll::Ready(());
322 }
323 // Register waker, then enable interrupts.
324 HASH_WAKER.register(cx.waker());
325 T::regs().imr().modify(|reg| reg.set_dinie(true));
326 // Check for completion.
327 let bits = T::regs().sr().read();
328 if bits.dinis() {
329 Poll::Ready(())
330 } else {
331 Poll::Pending
332 }
333 })
334 .await;
335
336 ctx.imr = T::regs().imr().read().0;
337 ctx.str = T::regs().str().read().0;
338 ctx.cr = T::regs().cr().read().0;
339 let mut i = 0;
340 while i < NUM_CONTEXT_REGS {
341 ctx.csr[i] = T::regs().csr(i).read();
342 i += 1;
343 }
344 }
345
346 /// Restore the peripheral state from a context.
347 fn load_context(&mut self, ctx: &Context) {
348 // Restore the peripheral state from the context.
349 T::regs().imr().write_value(Imr { 0: ctx.imr });
350 T::regs().str().write_value(Str { 0: ctx.str });
351 T::regs().cr().write_value(Cr { 0: ctx.cr });
352 T::regs().cr().modify(|w| w.set_init(true));
353 let mut i = 0;
354 while i < NUM_CONTEXT_REGS {
355 T::regs().csr(i).write_value(ctx.csr[i]);
356 i += 1;
357 }
358 }
359}
360
361pub(crate) mod sealed {
362 use super::*;
363
364 pub trait Instance {
365 fn regs() -> pac::hash::Hash;
366 }
367}
368
369/// HASH instance trait.
370pub trait Instance: sealed::Instance + Peripheral<P = Self> + crate::rcc::RccPeripheral + 'static + Send {
371 /// Interrupt for this HASH instance.
372 type Interrupt: interrupt::typelevel::Interrupt;
373}
374
375foreach_interrupt!(
376 ($inst:ident, hash, HASH, GLOBAL, $irq:ident) => {
377 impl Instance for peripherals::$inst {
378 type Interrupt = crate::interrupt::typelevel::$irq;
379 }
380
381 impl sealed::Instance for peripherals::$inst {
382 fn regs() -> crate::pac::hash::Hash {
383 crate::pac::$inst
384 }
385 }
386 };
387);
388
389dma_trait!(Dma, Instance);