diff options
| author | Dario Nieuwenhuis <[email protected]> | 2025-03-25 22:18:21 +0100 |
|---|---|---|
| committer | Dario Nieuwenhuis <[email protected]> | 2025-03-25 22:18:21 +0100 |
| commit | a592acb8068011787453944963b384fd43a52e65 (patch) | |
| tree | a24154b18817b34cbb80b3b253fe3b7a1bc7cb6f | |
| parent | db86aba841851d1a25f9bec7a1686db34c94c885 (diff) | |
stm32/cryp: remove DMA generic param.
| -rw-r--r-- | embassy-stm32/src/cryp/mod.rs | 877 | ||||
| -rw-r--r-- | embassy-stm32/src/dma/mod.rs | 11 |
2 files changed, 435 insertions, 453 deletions
diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 6afe68a39..54d2c30e5 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs | |||
| @@ -7,8 +7,9 @@ use core::ptr; | |||
| 7 | use embassy_hal_internal::{into_ref, PeripheralRef}; | 7 | use embassy_hal_internal::{into_ref, PeripheralRef}; |
| 8 | use embassy_sync::waitqueue::AtomicWaker; | 8 | use embassy_sync::waitqueue::AtomicWaker; |
| 9 | 9 | ||
| 10 | use crate::dma::{NoDma, Transfer, TransferOptions}; | 10 | use crate::dma::{ChannelAndRequest, TransferOptions}; |
| 11 | use crate::interrupt::typelevel::Interrupt; | 11 | use crate::interrupt::typelevel::Interrupt; |
| 12 | use crate::mode::{Async, Blocking, Mode}; | ||
| 12 | use crate::{interrupt, pac, peripherals, rcc, Peripheral}; | 13 | use crate::{interrupt, pac, peripherals, rcc, Peripheral}; |
| 13 | 14 | ||
| 14 | const DES_BLOCK_SIZE: usize = 8; // 64 bits | 15 | const DES_BLOCK_SIZE: usize = 8; // 64 bits |
| @@ -57,15 +58,10 @@ pub trait Cipher<'c> { | |||
| 57 | fn prepare_key(&self, _p: pac::cryp::Cryp) {} | 58 | fn prepare_key(&self, _p: pac::cryp::Cryp) {} |
| 58 | 59 | ||
| 59 | /// Performs any cipher-specific initialization. | 60 | /// Performs any cipher-specific initialization. |
| 60 | fn init_phase_blocking<T: Instance, DmaIn, DmaOut>(&self, _p: pac::cryp::Cryp, _cryp: &Cryp<T, DmaIn, DmaOut>) {} | 61 | fn init_phase_blocking<T: Instance, M: Mode>(&self, _p: pac::cryp::Cryp, _cryp: &Cryp<T, M>) {} |
| 61 | 62 | ||
| 62 | /// Performs any cipher-specific initialization. | 63 | /// Performs any cipher-specific initialization. |
| 63 | async fn init_phase<T: Instance, DmaIn, DmaOut>(&self, _p: pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) | 64 | async fn init_phase<T: Instance>(&self, _p: pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, Async>) {} |
| 64 | where | ||
| 65 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 66 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 67 | { | ||
| 68 | } | ||
| 69 | 65 | ||
| 70 | /// Called prior to processing the last data block for cipher-specific operations. | 66 | /// Called prior to processing the last data block for cipher-specific operations. |
| 71 | fn pre_final(&self, _p: pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { | 67 | fn pre_final(&self, _p: pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { |
| @@ -73,10 +69,10 @@ pub trait Cipher<'c> { | |||
| 73 | } | 69 | } |
| 74 | 70 | ||
| 75 | /// Called after processing the last data block for cipher-specific operations. | 71 | /// Called after processing the last data block for cipher-specific operations. |
| 76 | fn post_final_blocking<T: Instance, DmaIn, DmaOut>( | 72 | fn post_final_blocking<T: Instance, M: Mode>( |
| 77 | &self, | 73 | &self, |
| 78 | _p: pac::cryp::Cryp, | 74 | _p: pac::cryp::Cryp, |
| 79 | _cryp: &Cryp<T, DmaIn, DmaOut>, | 75 | _cryp: &Cryp<T, M>, |
| 80 | _dir: Direction, | 76 | _dir: Direction, |
| 81 | _int_data: &mut [u8; AES_BLOCK_SIZE], | 77 | _int_data: &mut [u8; AES_BLOCK_SIZE], |
| 82 | _temp1: [u32; 4], | 78 | _temp1: [u32; 4], |
| @@ -85,18 +81,15 @@ pub trait Cipher<'c> { | |||
| 85 | } | 81 | } |
| 86 | 82 | ||
| 87 | /// Called after processing the last data block for cipher-specific operations. | 83 | /// Called after processing the last data block for cipher-specific operations. |
| 88 | async fn post_final<T: Instance, DmaIn, DmaOut>( | 84 | async fn post_final<T: Instance>( |
| 89 | &self, | 85 | &self, |
| 90 | _p: pac::cryp::Cryp, | 86 | _p: pac::cryp::Cryp, |
| 91 | _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, | 87 | _cryp: &mut Cryp<'_, T, Async>, |
| 92 | _dir: Direction, | 88 | _dir: Direction, |
| 93 | _int_data: &mut [u8; AES_BLOCK_SIZE], | 89 | _int_data: &mut [u8; AES_BLOCK_SIZE], |
| 94 | _temp1: [u32; 4], | 90 | _temp1: [u32; 4], |
| 95 | _padding_mask: [u8; 16], | 91 | _padding_mask: [u8; 16], |
| 96 | ) where | 92 | ) { |
| 97 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 98 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 99 | { | ||
| 100 | } | 93 | } |
| 101 | 94 | ||
| 102 | /// Returns the AAD header block as required by the cipher. | 95 | /// Returns the AAD header block as required by the cipher. |
| @@ -474,13 +467,13 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { | |||
| 474 | p.cr().modify(|w| w.set_algomode3(true)); | 467 | p.cr().modify(|w| w.set_algomode3(true)); |
| 475 | } | 468 | } |
| 476 | 469 | ||
| 477 | fn init_phase_blocking<T: Instance, DmaIn, DmaOut>(&self, p: pac::cryp::Cryp, _cryp: &Cryp<T, DmaIn, DmaOut>) { | 470 | fn init_phase_blocking<T: Instance, M: Mode>(&self, p: pac::cryp::Cryp, _cryp: &Cryp<T, M>) { |
| 478 | p.cr().modify(|w| w.set_gcm_ccmph(0)); | 471 | p.cr().modify(|w| w.set_gcm_ccmph(0)); |
| 479 | p.cr().modify(|w| w.set_crypen(true)); | 472 | p.cr().modify(|w| w.set_crypen(true)); |
| 480 | while p.cr().read().crypen() {} | 473 | while p.cr().read().crypen() {} |
| 481 | } | 474 | } |
| 482 | 475 | ||
| 483 | async fn init_phase<T: Instance, DmaIn, DmaOut>(&self, p: pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) { | 476 | async fn init_phase<T: Instance>(&self, p: pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, Async>) { |
| 484 | p.cr().modify(|w| w.set_gcm_ccmph(0)); | 477 | p.cr().modify(|w| w.set_gcm_ccmph(0)); |
| 485 | p.cr().modify(|w| w.set_crypen(true)); | 478 | p.cr().modify(|w| w.set_crypen(true)); |
| 486 | while p.cr().read().crypen() {} | 479 | while p.cr().read().crypen() {} |
| @@ -508,10 +501,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { | |||
| 508 | } | 501 | } |
| 509 | 502 | ||
| 510 | #[cfg(cryp_v2)] | 503 | #[cfg(cryp_v2)] |
| 511 | fn post_final_blocking<T: Instance, DmaIn, DmaOut>( | 504 | fn post_final_blocking<T: Instance, M: Mode>( |
| 512 | &self, | 505 | &self, |
| 513 | p: pac::cryp::Cryp, | 506 | p: pac::cryp::Cryp, |
| 514 | cryp: &Cryp<T, DmaIn, DmaOut>, | 507 | cryp: &Cryp<T, M>, |
| 515 | dir: Direction, | 508 | dir: Direction, |
| 516 | int_data: &mut [u8; AES_BLOCK_SIZE], | 509 | int_data: &mut [u8; AES_BLOCK_SIZE], |
| 517 | _temp1: [u32; 4], | 510 | _temp1: [u32; 4], |
| @@ -534,18 +527,15 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { | |||
| 534 | } | 527 | } |
| 535 | 528 | ||
| 536 | #[cfg(cryp_v2)] | 529 | #[cfg(cryp_v2)] |
| 537 | async fn post_final<T: Instance, DmaIn, DmaOut>( | 530 | async fn post_final<T: Instance>( |
| 538 | &self, | 531 | &self, |
| 539 | p: pac::cryp::Cryp, | 532 | p: pac::cryp::Cryp, |
| 540 | cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, | 533 | cryp: &mut Cryp<'_, T, Async>, |
| 541 | dir: Direction, | 534 | dir: Direction, |
| 542 | int_data: &mut [u8; AES_BLOCK_SIZE], | 535 | int_data: &mut [u8; AES_BLOCK_SIZE], |
| 543 | _temp1: [u32; 4], | 536 | _temp1: [u32; 4], |
| 544 | padding_mask: [u8; AES_BLOCK_SIZE], | 537 | padding_mask: [u8; AES_BLOCK_SIZE], |
| 545 | ) where | 538 | ) { |
| 546 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 547 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 548 | { | ||
| 549 | if dir == Direction::Encrypt { | 539 | if dir == Direction::Encrypt { |
| 550 | // Handle special GCM partial block process. | 540 | // Handle special GCM partial block process. |
| 551 | p.cr().modify(|w| w.set_crypen(false)); | 541 | p.cr().modify(|w| w.set_crypen(false)); |
| @@ -559,8 +549,8 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { | |||
| 559 | 549 | ||
| 560 | let mut out_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; | 550 | let mut out_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; |
| 561 | 551 | ||
| 562 | let read = Cryp::<T, DmaIn, DmaOut>::read_bytes(&mut cryp.outdma, Self::BLOCK_SIZE, &mut out_data); | 552 | let read = Cryp::<T, Async>::read_bytes(cryp.outdma.as_mut().unwrap(), Self::BLOCK_SIZE, &mut out_data); |
| 563 | let write = Cryp::<T, DmaIn, DmaOut>::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, int_data); | 553 | let write = Cryp::<T, Async>::write_bytes(cryp.indma.as_mut().unwrap(), Self::BLOCK_SIZE, int_data); |
| 564 | 554 | ||
| 565 | embassy_futures::join::join(read, write).await; | 555 | embassy_futures::join::join(read, write).await; |
| 566 | 556 | ||
| @@ -615,13 +605,13 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { | |||
| 615 | p.cr().modify(|w| w.set_algomode3(true)); | 605 | p.cr().modify(|w| w.set_algomode3(true)); |
| 616 | } | 606 | } |
| 617 | 607 | ||
| 618 | fn init_phase_blocking<T: Instance, DmaIn, DmaOut>(&self, p: pac::cryp::Cryp, _cryp: &Cryp<T, DmaIn, DmaOut>) { | 608 | fn init_phase_blocking<T: Instance, M: Mode>(&self, p: pac::cryp::Cryp, _cryp: &Cryp<T, M>) { |
| 619 | p.cr().modify(|w| w.set_gcm_ccmph(0)); | 609 | p.cr().modify(|w| w.set_gcm_ccmph(0)); |
| 620 | p.cr().modify(|w| w.set_crypen(true)); | 610 | p.cr().modify(|w| w.set_crypen(true)); |
| 621 | while p.cr().read().crypen() {} | 611 | while p.cr().read().crypen() {} |
| 622 | } | 612 | } |
| 623 | 613 | ||
| 624 | async fn init_phase<T: Instance, DmaIn, DmaOut>(&self, p: pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) { | 614 | async fn init_phase<T: Instance>(&self, p: pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, Async>) { |
| 625 | p.cr().modify(|w| w.set_gcm_ccmph(0)); | 615 | p.cr().modify(|w| w.set_gcm_ccmph(0)); |
| 626 | p.cr().modify(|w| w.set_crypen(true)); | 616 | p.cr().modify(|w| w.set_crypen(true)); |
| 627 | while p.cr().read().crypen() {} | 617 | while p.cr().read().crypen() {} |
| @@ -649,10 +639,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { | |||
| 649 | } | 639 | } |
| 650 | 640 | ||
| 651 | #[cfg(cryp_v2)] | 641 | #[cfg(cryp_v2)] |
| 652 | fn post_final_blocking<T: Instance, DmaIn, DmaOut>( | 642 | fn post_final_blocking<T: Instance, M: Mode>( |
| 653 | &self, | 643 | &self, |
| 654 | p: pac::cryp::Cryp, | 644 | p: pac::cryp::Cryp, |
| 655 | cryp: &Cryp<T, DmaIn, DmaOut>, | 645 | cryp: &Cryp<T, M>, |
| 656 | dir: Direction, | 646 | dir: Direction, |
| 657 | int_data: &mut [u8; AES_BLOCK_SIZE], | 647 | int_data: &mut [u8; AES_BLOCK_SIZE], |
| 658 | _temp1: [u32; 4], | 648 | _temp1: [u32; 4], |
| @@ -675,18 +665,15 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { | |||
| 675 | } | 665 | } |
| 676 | 666 | ||
| 677 | #[cfg(cryp_v2)] | 667 | #[cfg(cryp_v2)] |
| 678 | async fn post_final<T: Instance, DmaIn, DmaOut>( | 668 | async fn post_final<T: Instance>( |
| 679 | &self, | 669 | &self, |
| 680 | p: pac::cryp::Cryp, | 670 | p: pac::cryp::Cryp, |
| 681 | cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, | 671 | cryp: &mut Cryp<'_, T, Async>, |
| 682 | dir: Direction, | 672 | dir: Direction, |
| 683 | int_data: &mut [u8; AES_BLOCK_SIZE], | 673 | int_data: &mut [u8; AES_BLOCK_SIZE], |
| 684 | _temp1: [u32; 4], | 674 | _temp1: [u32; 4], |
| 685 | padding_mask: [u8; AES_BLOCK_SIZE], | 675 | padding_mask: [u8; AES_BLOCK_SIZE], |
| 686 | ) where | 676 | ) { |
| 687 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 688 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 689 | { | ||
| 690 | if dir == Direction::Encrypt { | 677 | if dir == Direction::Encrypt { |
| 691 | // Handle special GCM partial block process. | 678 | // Handle special GCM partial block process. |
| 692 | p.cr().modify(|w| w.set_crypen(false)); | 679 | p.cr().modify(|w| w.set_crypen(false)); |
| @@ -700,8 +687,8 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { | |||
| 700 | 687 | ||
| 701 | let mut out_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; | 688 | let mut out_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; |
| 702 | 689 | ||
| 703 | let read = Cryp::<T, DmaIn, DmaOut>::read_bytes(&mut cryp.outdma, Self::BLOCK_SIZE, &mut out_data); | 690 | let read = Cryp::<T, Async>::read_bytes(cryp.outdma.as_mut().unwrap(), Self::BLOCK_SIZE, &mut out_data); |
| 704 | let write = Cryp::<T, DmaIn, DmaOut>::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, int_data); | 691 | let write = Cryp::<T, Async>::write_bytes(cryp.indma.as_mut().unwrap(), Self::BLOCK_SIZE, int_data); |
| 705 | 692 | ||
| 706 | embassy_futures::join::join(read, write).await; | 693 | embassy_futures::join::join(read, write).await; |
| 707 | } | 694 | } |
| @@ -812,7 +799,7 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip | |||
| 812 | p.cr().modify(|w| w.set_algomode3(true)); | 799 | p.cr().modify(|w| w.set_algomode3(true)); |
| 813 | } | 800 | } |
| 814 | 801 | ||
| 815 | fn init_phase_blocking<T: Instance, DmaIn, DmaOut>(&self, p: pac::cryp::Cryp, cryp: &Cryp<T, DmaIn, DmaOut>) { | 802 | fn init_phase_blocking<T: Instance, M: Mode>(&self, p: pac::cryp::Cryp, cryp: &Cryp<T, M>) { |
| 816 | p.cr().modify(|w| w.set_gcm_ccmph(0)); | 803 | p.cr().modify(|w| w.set_gcm_ccmph(0)); |
| 817 | 804 | ||
| 818 | cryp.write_bytes_blocking(Self::BLOCK_SIZE, &self.block0); | 805 | cryp.write_bytes_blocking(Self::BLOCK_SIZE, &self.block0); |
| @@ -821,14 +808,10 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip | |||
| 821 | while p.cr().read().crypen() {} | 808 | while p.cr().read().crypen() {} |
| 822 | } | 809 | } |
| 823 | 810 | ||
| 824 | async fn init_phase<T: Instance, DmaIn, DmaOut>(&self, p: pac::cryp::Cryp, cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) | 811 | async fn init_phase<T: Instance>(&self, p: pac::cryp::Cryp, cryp: &mut Cryp<'_, T, Async>) { |
| 825 | where | ||
| 826 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 827 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 828 | { | ||
| 829 | p.cr().modify(|w| w.set_gcm_ccmph(0)); | 812 | p.cr().modify(|w| w.set_gcm_ccmph(0)); |
| 830 | 813 | ||
| 831 | Cryp::<T, DmaIn, DmaOut>::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, &self.block0).await; | 814 | Cryp::<T, Async>::write_bytes(cryp.indma.as_mut().unwrap(), Self::BLOCK_SIZE, &self.block0).await; |
| 832 | 815 | ||
| 833 | p.cr().modify(|w| w.set_crypen(true)); | 816 | p.cr().modify(|w| w.set_crypen(true)); |
| 834 | while p.cr().read().crypen() {} | 817 | while p.cr().read().crypen() {} |
| @@ -865,10 +848,10 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip | |||
| 865 | } | 848 | } |
| 866 | 849 | ||
| 867 | #[cfg(cryp_v2)] | 850 | #[cfg(cryp_v2)] |
| 868 | fn post_final_blocking<T: Instance, DmaIn, DmaOut>( | 851 | fn post_final_blocking<T: Instance, M: Mode>( |
| 869 | &self, | 852 | &self, |
| 870 | p: pac::cryp::Cryp, | 853 | p: pac::cryp::Cryp, |
| 871 | cryp: &Cryp<T, DmaIn, DmaOut>, | 854 | cryp: &Cryp<T, M>, |
| 872 | dir: Direction, | 855 | dir: Direction, |
| 873 | int_data: &mut [u8; AES_BLOCK_SIZE], | 856 | int_data: &mut [u8; AES_BLOCK_SIZE], |
| 874 | temp1: [u32; 4], | 857 | temp1: [u32; 4], |
| @@ -902,18 +885,15 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip | |||
| 902 | } | 885 | } |
| 903 | 886 | ||
| 904 | #[cfg(cryp_v2)] | 887 | #[cfg(cryp_v2)] |
| 905 | async fn post_final<T: Instance, DmaIn, DmaOut>( | 888 | async fn post_final<T: Instance>( |
| 906 | &self, | 889 | &self, |
| 907 | p: pac::cryp::Cryp, | 890 | p: pac::cryp::Cryp, |
| 908 | cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, | 891 | cryp: &mut Cryp<'_, T, Async>, |
| 909 | dir: Direction, | 892 | dir: Direction, |
| 910 | int_data: &mut [u8; AES_BLOCK_SIZE], | 893 | int_data: &mut [u8; AES_BLOCK_SIZE], |
| 911 | temp1: [u32; 4], | 894 | temp1: [u32; 4], |
| 912 | padding_mask: [u8; 16], | 895 | padding_mask: [u8; 16], |
| 913 | ) where | 896 | ) { |
| 914 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 915 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 916 | { | ||
| 917 | if dir == Direction::Decrypt { | 897 | if dir == Direction::Decrypt { |
| 918 | //Handle special CCM partial block process. | 898 | //Handle special CCM partial block process. |
| 919 | let mut temp2 = [0; 4]; | 899 | let mut temp2 = [0; 4]; |
| @@ -937,7 +917,7 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip | |||
| 937 | in_data[i] = int_word; | 917 | in_data[i] = int_word; |
| 938 | in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i]; | 918 | in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i]; |
| 939 | } | 919 | } |
| 940 | Cryp::<T, DmaIn, DmaOut>::write_words(&mut cryp.indma, Self::BLOCK_SIZE, &in_data).await; | 920 | Cryp::<T, Async>::write_words(cryp.indma.as_mut().unwrap(), Self::BLOCK_SIZE, &in_data).await; |
| 941 | } | 921 | } |
| 942 | } | 922 | } |
| 943 | } | 923 | } |
| @@ -1007,26 +987,26 @@ pub enum Direction { | |||
| 1007 | } | 987 | } |
| 1008 | 988 | ||
| 1009 | /// Crypto Accelerator Driver | 989 | /// Crypto Accelerator Driver |
| 1010 | pub struct Cryp<'d, T: Instance, DmaIn = NoDma, DmaOut = NoDma> { | 990 | pub struct Cryp<'d, T: Instance, M: Mode> { |
| 1011 | _peripheral: PeripheralRef<'d, T>, | 991 | _peripheral: PeripheralRef<'d, T>, |
| 1012 | indma: PeripheralRef<'d, DmaIn>, | 992 | _phantom: PhantomData<M>, |
| 1013 | outdma: PeripheralRef<'d, DmaOut>, | 993 | indma: Option<ChannelAndRequest<'d>>, |
| 994 | outdma: Option<ChannelAndRequest<'d>>, | ||
| 1014 | } | 995 | } |
| 1015 | 996 | ||
| 1016 | impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | 997 | impl<'d, T: Instance> Cryp<'d, T, Blocking> { |
| 1017 | /// Create a new CRYP driver. | 998 | /// Create a new CRYP driver in blocking mode. |
| 1018 | pub fn new( | 999 | pub fn new_blocking( |
| 1019 | peri: impl Peripheral<P = T> + 'd, | 1000 | peri: impl Peripheral<P = T> + 'd, |
| 1020 | indma: impl Peripheral<P = DmaIn> + 'd, | ||
| 1021 | outdma: impl Peripheral<P = DmaOut> + 'd, | ||
| 1022 | _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd, | 1001 | _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd, |
| 1023 | ) -> Self { | 1002 | ) -> Self { |
| 1024 | rcc::enable_and_reset::<T>(); | 1003 | rcc::enable_and_reset::<T>(); |
| 1025 | into_ref!(peri, indma, outdma); | 1004 | into_ref!(peri); |
| 1026 | let instance = Self { | 1005 | let instance = Self { |
| 1027 | _peripheral: peri, | 1006 | _peripheral: peri, |
| 1028 | indma: indma, | 1007 | _phantom: PhantomData, |
| 1029 | outdma: outdma, | 1008 | indma: None, |
| 1009 | outdma: None, | ||
| 1030 | }; | 1010 | }; |
| 1031 | 1011 | ||
| 1032 | T::Interrupt::unpend(); | 1012 | T::Interrupt::unpend(); |
| @@ -1034,7 +1014,9 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1034 | 1014 | ||
| 1035 | instance | 1015 | instance |
| 1036 | } | 1016 | } |
| 1017 | } | ||
| 1037 | 1018 | ||
| 1019 | impl<'d, T: Instance, M: Mode> Cryp<'d, T, M> { | ||
| 1038 | /// Start a new encrypt or decrypt operation for the given cipher. | 1020 | /// Start a new encrypt or decrypt operation for the given cipher. |
| 1039 | pub fn start_blocking<'c, C: Cipher<'c> + CipherSized + IVSized>( | 1021 | pub fn start_blocking<'c, C: Cipher<'c> + CipherSized + IVSized>( |
| 1040 | &self, | 1022 | &self, |
| @@ -1114,89 +1096,6 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1114 | ctx | 1096 | ctx |
| 1115 | } | 1097 | } |
| 1116 | 1098 | ||
| 1117 | /// Start a new encrypt or decrypt operation for the given cipher. | ||
| 1118 | pub async fn start<'c, C: Cipher<'c> + CipherSized + IVSized>( | ||
| 1119 | &mut self, | ||
| 1120 | cipher: &'c C, | ||
| 1121 | dir: Direction, | ||
| 1122 | ) -> Context<'c, C> | ||
| 1123 | where | ||
| 1124 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 1125 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 1126 | { | ||
| 1127 | let mut ctx: Context<'c, C> = Context { | ||
| 1128 | dir, | ||
| 1129 | last_block_processed: false, | ||
| 1130 | cr: 0, | ||
| 1131 | iv: [0; 4], | ||
| 1132 | csgcmccm: [0; 8], | ||
| 1133 | csgcm: [0; 8], | ||
| 1134 | aad_complete: false, | ||
| 1135 | header_len: 0, | ||
| 1136 | payload_len: 0, | ||
| 1137 | cipher: cipher, | ||
| 1138 | phantom_data: PhantomData, | ||
| 1139 | header_processed: false, | ||
| 1140 | aad_buffer: [0; 16], | ||
| 1141 | aad_buffer_len: 0, | ||
| 1142 | }; | ||
| 1143 | |||
| 1144 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1145 | |||
| 1146 | let key = ctx.cipher.key(); | ||
| 1147 | |||
| 1148 | if key.len() == (128 / 8) { | ||
| 1149 | T::regs().cr().modify(|w| w.set_keysize(0)); | ||
| 1150 | } else if key.len() == (192 / 8) { | ||
| 1151 | T::regs().cr().modify(|w| w.set_keysize(1)); | ||
| 1152 | } else if key.len() == (256 / 8) { | ||
| 1153 | T::regs().cr().modify(|w| w.set_keysize(2)); | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | self.load_key(key); | ||
| 1157 | |||
| 1158 | // Set data type to 8-bit. This will match software implementations. | ||
| 1159 | T::regs().cr().modify(|w| w.set_datatype(2)); | ||
| 1160 | |||
| 1161 | ctx.cipher.prepare_key(T::regs()); | ||
| 1162 | |||
| 1163 | ctx.cipher.set_algomode(T::regs()); | ||
| 1164 | |||
| 1165 | // Set encrypt/decrypt | ||
| 1166 | if dir == Direction::Encrypt { | ||
| 1167 | T::regs().cr().modify(|w| w.set_algodir(false)); | ||
| 1168 | } else { | ||
| 1169 | T::regs().cr().modify(|w| w.set_algodir(true)); | ||
| 1170 | } | ||
| 1171 | |||
| 1172 | // Load the IV into the registers. | ||
| 1173 | let iv = ctx.cipher.iv(); | ||
| 1174 | let mut full_iv: [u8; 16] = [0; 16]; | ||
| 1175 | full_iv[0..iv.len()].copy_from_slice(iv); | ||
| 1176 | let mut iv_idx = 0; | ||
| 1177 | let mut iv_word: [u8; 4] = [0; 4]; | ||
| 1178 | iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); | ||
| 1179 | iv_idx += 4; | ||
| 1180 | T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word)); | ||
| 1181 | iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); | ||
| 1182 | iv_idx += 4; | ||
| 1183 | T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); | ||
| 1184 | iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); | ||
| 1185 | iv_idx += 4; | ||
| 1186 | T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); | ||
| 1187 | iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); | ||
| 1188 | T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); | ||
| 1189 | |||
| 1190 | // Flush in/out FIFOs | ||
| 1191 | T::regs().cr().modify(|w| w.fflush()); | ||
| 1192 | |||
| 1193 | ctx.cipher.init_phase(T::regs(), self).await; | ||
| 1194 | |||
| 1195 | self.store_context(&mut ctx); | ||
| 1196 | |||
| 1197 | ctx | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | 1099 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] |
| 1201 | /// Controls the header phase of cipher processing. | 1100 | /// Controls the header phase of cipher processing. |
| 1202 | /// This function is only valid for authenticated ciphers including GCM, CCM, and GMAC. | 1101 | /// This function is only valid for authenticated ciphers including GCM, CCM, and GMAC. |
| @@ -1294,101 +1193,6 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1294 | self.store_context(ctx); | 1193 | self.store_context(ctx); |
| 1295 | } | 1194 | } |
| 1296 | 1195 | ||
| 1297 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | ||
| 1298 | /// Controls the header phase of cipher processing. | ||
| 1299 | /// This function is only valid for authenticated ciphers including GCM, CCM, and GMAC. | ||
| 1300 | /// All additional associated data (AAD) must be supplied to this function prior to starting the payload phase with `payload`. | ||
| 1301 | /// The AAD must be supplied in multiples of the block size (128-bits for AES, 64-bits for DES), except when supplying the last block. | ||
| 1302 | /// When supplying the last block of AAD, `last_aad_block` must be `true`. | ||
| 1303 | pub async fn aad<'c, const TAG_SIZE: usize, C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated<TAG_SIZE>>( | ||
| 1304 | &mut self, | ||
| 1305 | ctx: &mut Context<'c, C>, | ||
| 1306 | aad: &[u8], | ||
| 1307 | last_aad_block: bool, | ||
| 1308 | ) where | ||
| 1309 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 1310 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 1311 | { | ||
| 1312 | self.load_context(ctx); | ||
| 1313 | |||
| 1314 | // Perform checks for correctness. | ||
| 1315 | if ctx.aad_complete { | ||
| 1316 | panic!("Cannot update AAD after starting payload!") | ||
| 1317 | } | ||
| 1318 | |||
| 1319 | ctx.header_len += aad.len() as u64; | ||
| 1320 | |||
| 1321 | // Header phase | ||
| 1322 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1323 | T::regs().cr().modify(|w| w.set_gcm_ccmph(1)); | ||
| 1324 | T::regs().cr().modify(|w| w.set_crypen(true)); | ||
| 1325 | |||
| 1326 | // First write the header B1 block if not yet written. | ||
| 1327 | if !ctx.header_processed { | ||
| 1328 | ctx.header_processed = true; | ||
| 1329 | let header = ctx.cipher.get_header_block(); | ||
| 1330 | ctx.aad_buffer[0..header.len()].copy_from_slice(header); | ||
| 1331 | ctx.aad_buffer_len += header.len(); | ||
| 1332 | } | ||
| 1333 | |||
| 1334 | // Fill the header block to make a full block. | ||
| 1335 | let len_to_copy = min(aad.len(), C::BLOCK_SIZE - ctx.aad_buffer_len); | ||
| 1336 | ctx.aad_buffer[ctx.aad_buffer_len..ctx.aad_buffer_len + len_to_copy].copy_from_slice(&aad[..len_to_copy]); | ||
| 1337 | ctx.aad_buffer_len += len_to_copy; | ||
| 1338 | ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); | ||
| 1339 | let mut aad_len_remaining = aad.len() - len_to_copy; | ||
| 1340 | |||
| 1341 | if ctx.aad_buffer_len < C::BLOCK_SIZE { | ||
| 1342 | // The buffer isn't full and this is the last buffer, so process it as is (already padded). | ||
| 1343 | if last_aad_block { | ||
| 1344 | Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; | ||
| 1345 | assert_eq!(T::regs().sr().read().ifem(), true); | ||
| 1346 | |||
| 1347 | // Switch to payload phase. | ||
| 1348 | ctx.aad_complete = true; | ||
| 1349 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1350 | T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); | ||
| 1351 | T::regs().cr().modify(|w| w.fflush()); | ||
| 1352 | } else { | ||
| 1353 | // Just return because we don't yet have a full block to process. | ||
| 1354 | return; | ||
| 1355 | } | ||
| 1356 | } else { | ||
| 1357 | // Load the full block from the buffer. | ||
| 1358 | Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; | ||
| 1359 | assert_eq!(T::regs().sr().read().ifem(), true); | ||
| 1360 | } | ||
| 1361 | |||
| 1362 | // Handle a partial block that is passed in. | ||
| 1363 | ctx.aad_buffer_len = 0; | ||
| 1364 | let leftovers = aad_len_remaining % C::BLOCK_SIZE; | ||
| 1365 | ctx.aad_buffer[..leftovers].copy_from_slice(&aad[aad.len() - leftovers..aad.len()]); | ||
| 1366 | ctx.aad_buffer_len += leftovers; | ||
| 1367 | ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); | ||
| 1368 | aad_len_remaining -= leftovers; | ||
| 1369 | assert_eq!(aad_len_remaining % C::BLOCK_SIZE, 0); | ||
| 1370 | |||
| 1371 | // Load full data blocks into core. | ||
| 1372 | let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE; | ||
| 1373 | let start_index = len_to_copy; | ||
| 1374 | let end_index = start_index + (C::BLOCK_SIZE * num_full_blocks); | ||
| 1375 | Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &aad[start_index..end_index]).await; | ||
| 1376 | |||
| 1377 | if last_aad_block { | ||
| 1378 | if leftovers > 0 { | ||
| 1379 | Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; | ||
| 1380 | assert_eq!(T::regs().sr().read().ifem(), true); | ||
| 1381 | } | ||
| 1382 | // Switch to payload phase. | ||
| 1383 | ctx.aad_complete = true; | ||
| 1384 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1385 | T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); | ||
| 1386 | T::regs().cr().modify(|w| w.fflush()); | ||
| 1387 | } | ||
| 1388 | |||
| 1389 | self.store_context(ctx); | ||
| 1390 | } | ||
| 1391 | |||
| 1392 | /// Performs encryption/decryption on the provided context. | 1196 | /// Performs encryption/decryption on the provided context. |
| 1393 | /// The context determines algorithm, mode, and state of the crypto accelerator. | 1197 | /// The context determines algorithm, mode, and state of the crypto accelerator. |
| 1394 | /// When the last piece of data is supplied, `last_block` should be `true`. | 1198 | /// When the last piece of data is supplied, `last_block` should be `true`. |
| @@ -1478,105 +1282,6 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1478 | self.store_context(ctx); | 1282 | self.store_context(ctx); |
| 1479 | } | 1283 | } |
| 1480 | 1284 | ||
| 1481 | /// Performs encryption/decryption on the provided context. | ||
| 1482 | /// The context determines algorithm, mode, and state of the crypto accelerator. | ||
| 1483 | /// When the last piece of data is supplied, `last_block` should be `true`. | ||
| 1484 | /// This function panics under various mismatches of parameters. | ||
| 1485 | /// Output buffer must be at least as long as the input buffer. | ||
| 1486 | /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. | ||
| 1487 | /// Padding or ciphertext stealing must be managed by the application for these modes. | ||
| 1488 | /// Data must also be a multiple of block size unless `last_block` is `true`. | ||
| 1489 | pub async fn payload<'c, C: Cipher<'c> + CipherSized + IVSized>( | ||
| 1490 | &mut self, | ||
| 1491 | ctx: &mut Context<'c, C>, | ||
| 1492 | input: &[u8], | ||
| 1493 | output: &mut [u8], | ||
| 1494 | last_block: bool, | ||
| 1495 | ) where | ||
| 1496 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 1497 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 1498 | { | ||
| 1499 | self.load_context(ctx); | ||
| 1500 | |||
| 1501 | let last_block_remainder = input.len() % C::BLOCK_SIZE; | ||
| 1502 | |||
| 1503 | // Perform checks for correctness. | ||
| 1504 | if !ctx.aad_complete && ctx.header_len > 0 { | ||
| 1505 | panic!("Additional associated data must be processed first!"); | ||
| 1506 | } else if !ctx.aad_complete { | ||
| 1507 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | ||
| 1508 | { | ||
| 1509 | ctx.aad_complete = true; | ||
| 1510 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1511 | T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); | ||
| 1512 | T::regs().cr().modify(|w| w.fflush()); | ||
| 1513 | T::regs().cr().modify(|w| w.set_crypen(true)); | ||
| 1514 | } | ||
| 1515 | } | ||
| 1516 | if ctx.last_block_processed { | ||
| 1517 | panic!("The last block has already been processed!"); | ||
| 1518 | } | ||
| 1519 | if input.len() > output.len() { | ||
| 1520 | panic!("Output buffer length must match input length."); | ||
| 1521 | } | ||
| 1522 | if !last_block { | ||
| 1523 | if last_block_remainder != 0 { | ||
| 1524 | panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE); | ||
| 1525 | } | ||
| 1526 | } | ||
| 1527 | if C::REQUIRES_PADDING { | ||
| 1528 | if last_block_remainder != 0 { | ||
| 1529 | panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", C::BLOCK_SIZE); | ||
| 1530 | } | ||
| 1531 | } | ||
| 1532 | if last_block { | ||
| 1533 | ctx.last_block_processed = true; | ||
| 1534 | } | ||
| 1535 | |||
| 1536 | // Load data into core, block by block. | ||
| 1537 | let num_full_blocks = input.len() / C::BLOCK_SIZE; | ||
| 1538 | for block in 0..num_full_blocks { | ||
| 1539 | let index = block * C::BLOCK_SIZE; | ||
| 1540 | // Read block out | ||
| 1541 | let read = Self::read_bytes( | ||
| 1542 | &mut self.outdma, | ||
| 1543 | C::BLOCK_SIZE, | ||
| 1544 | &mut output[index..index + C::BLOCK_SIZE], | ||
| 1545 | ); | ||
| 1546 | // Write block in | ||
| 1547 | let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &input[index..index + C::BLOCK_SIZE]); | ||
| 1548 | embassy_futures::join::join(read, write).await; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | // Handle the final block, which is incomplete. | ||
| 1552 | if last_block_remainder > 0 { | ||
| 1553 | let padding_len = C::BLOCK_SIZE - last_block_remainder; | ||
| 1554 | let temp1 = ctx.cipher.pre_final(T::regs(), ctx.dir, padding_len); | ||
| 1555 | |||
| 1556 | let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; | ||
| 1557 | let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; | ||
| 1558 | last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); | ||
| 1559 | let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut intermediate_data); | ||
| 1560 | let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &last_block); | ||
| 1561 | embassy_futures::join::join(read, write).await; | ||
| 1562 | |||
| 1563 | // Handle the last block depending on mode. | ||
| 1564 | let output_len = output.len(); | ||
| 1565 | output[output_len - last_block_remainder..output_len] | ||
| 1566 | .copy_from_slice(&intermediate_data[0..last_block_remainder]); | ||
| 1567 | |||
| 1568 | let mut mask: [u8; 16] = [0; 16]; | ||
| 1569 | mask[..last_block_remainder].fill(0xFF); | ||
| 1570 | ctx.cipher | ||
| 1571 | .post_final(T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask) | ||
| 1572 | .await; | ||
| 1573 | } | ||
| 1574 | |||
| 1575 | ctx.payload_len += input.len() as u64; | ||
| 1576 | |||
| 1577 | self.store_context(ctx); | ||
| 1578 | } | ||
| 1579 | |||
| 1580 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | 1285 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] |
| 1581 | /// Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. | 1286 | /// Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. |
| 1582 | /// Called after the all data has been encrypted/decrypted by `payload`. | 1287 | /// Called after the all data has been encrypted/decrypted by `payload`. |
| @@ -1623,57 +1328,6 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1623 | tag | 1328 | tag |
| 1624 | } | 1329 | } |
| 1625 | 1330 | ||
| 1626 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | ||
| 1627 | // Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. | ||
| 1628 | /// Called after the all data has been encrypted/decrypted by `payload`. | ||
| 1629 | pub async fn finish< | ||
| 1630 | 'c, | ||
| 1631 | const TAG_SIZE: usize, | ||
| 1632 | C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated<TAG_SIZE>, | ||
| 1633 | >( | ||
| 1634 | &mut self, | ||
| 1635 | mut ctx: Context<'c, C>, | ||
| 1636 | ) -> [u8; TAG_SIZE] | ||
| 1637 | where | ||
| 1638 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 1639 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 1640 | { | ||
| 1641 | self.load_context(&mut ctx); | ||
| 1642 | |||
| 1643 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1644 | T::regs().cr().modify(|w| w.set_gcm_ccmph(3)); | ||
| 1645 | T::regs().cr().modify(|w| w.set_crypen(true)); | ||
| 1646 | |||
| 1647 | let headerlen1: u32 = ((ctx.header_len * 8) >> 32) as u32; | ||
| 1648 | let headerlen2: u32 = (ctx.header_len * 8) as u32; | ||
| 1649 | let payloadlen1: u32 = ((ctx.payload_len * 8) >> 32) as u32; | ||
| 1650 | let payloadlen2: u32 = (ctx.payload_len * 8) as u32; | ||
| 1651 | |||
| 1652 | #[cfg(cryp_v2)] | ||
| 1653 | let footer: [u32; 4] = [ | ||
| 1654 | headerlen1.swap_bytes(), | ||
| 1655 | headerlen2.swap_bytes(), | ||
| 1656 | payloadlen1.swap_bytes(), | ||
| 1657 | payloadlen2.swap_bytes(), | ||
| 1658 | ]; | ||
| 1659 | #[cfg(any(cryp_v3, cryp_v4))] | ||
| 1660 | let footer: [u32; 4] = [headerlen1, headerlen2, payloadlen1, payloadlen2]; | ||
| 1661 | |||
| 1662 | let write = Self::write_words(&mut self.indma, C::BLOCK_SIZE, &footer); | ||
| 1663 | |||
| 1664 | let mut full_tag: [u8; 16] = [0; 16]; | ||
| 1665 | let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut full_tag); | ||
| 1666 | |||
| 1667 | embassy_futures::join::join(read, write).await; | ||
| 1668 | |||
| 1669 | let mut tag: [u8; TAG_SIZE] = [0; TAG_SIZE]; | ||
| 1670 | tag.copy_from_slice(&full_tag[0..TAG_SIZE]); | ||
| 1671 | |||
| 1672 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1673 | |||
| 1674 | tag | ||
| 1675 | } | ||
| 1676 | |||
| 1677 | fn load_key(&self, key: &[u8]) { | 1331 | fn load_key(&self, key: &[u8]) { |
| 1678 | // Load the key into the registers. | 1332 | // Load the key into the registers. |
| 1679 | let mut keyidx = 0; | 1333 | let mut keyidx = 0; |
| @@ -1774,17 +1428,393 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1774 | } | 1428 | } |
| 1775 | } | 1429 | } |
| 1776 | 1430 | ||
| 1777 | async fn write_bytes(dma: &mut PeripheralRef<'_, DmaIn>, block_size: usize, blocks: &[u8]) | 1431 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] |
| 1778 | where | 1432 | fn write_words_blocking(&self, block_size: usize, blocks: &[u32]) { |
| 1779 | DmaIn: crate::cryp::DmaIn<T>, | 1433 | assert_eq!((blocks.len() * 4) % block_size, 0); |
| 1780 | { | 1434 | let mut byte_counter: usize = 0; |
| 1435 | for word in blocks { | ||
| 1436 | T::regs().din().write_value(*word); | ||
| 1437 | byte_counter += 4; | ||
| 1438 | if byte_counter % block_size == 0 { | ||
| 1439 | // Block until input FIFO is empty. | ||
| 1440 | while !T::regs().sr().read().ifem() {} | ||
| 1441 | } | ||
| 1442 | } | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | fn read_bytes_blocking(&self, block_size: usize, blocks: &mut [u8]) { | ||
| 1446 | // Block until there is output to read. | ||
| 1447 | while !T::regs().sr().read().ofne() {} | ||
| 1448 | // Ensure input is a multiple of block size. | ||
| 1449 | assert_eq!(blocks.len() % block_size, 0); | ||
| 1450 | // Read block out | ||
| 1451 | let mut index = 0; | ||
| 1452 | let end_index = blocks.len(); | ||
| 1453 | while index < end_index { | ||
| 1454 | let out_word: u32 = T::regs().dout().read(); | ||
| 1455 | blocks[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); | ||
| 1456 | index += 4; | ||
| 1457 | } | ||
| 1458 | } | ||
| 1459 | } | ||
| 1460 | |||
| 1461 | impl<'d, T: Instance> Cryp<'d, T, Async> { | ||
| 1462 | /// Create a new CRYP driver. | ||
| 1463 | pub fn new( | ||
| 1464 | peri: impl Peripheral<P = T> + 'd, | ||
| 1465 | indma: impl Peripheral<P = impl DmaIn<T>> + 'd, | ||
| 1466 | outdma: impl Peripheral<P = impl DmaOut<T>> + 'd, | ||
| 1467 | _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd, | ||
| 1468 | ) -> Self { | ||
| 1469 | rcc::enable_and_reset::<T>(); | ||
| 1470 | into_ref!(peri, indma, outdma); | ||
| 1471 | let instance = Self { | ||
| 1472 | _peripheral: peri, | ||
| 1473 | _phantom: PhantomData, | ||
| 1474 | indma: new_dma!(indma), | ||
| 1475 | outdma: new_dma!(outdma), | ||
| 1476 | }; | ||
| 1477 | |||
| 1478 | T::Interrupt::unpend(); | ||
| 1479 | unsafe { T::Interrupt::enable() }; | ||
| 1480 | |||
| 1481 | instance | ||
| 1482 | } | ||
| 1483 | |||
| 1484 | /// Start a new encrypt or decrypt operation for the given cipher. | ||
| 1485 | pub async fn start<'c, C: Cipher<'c> + CipherSized + IVSized>( | ||
| 1486 | &mut self, | ||
| 1487 | cipher: &'c C, | ||
| 1488 | dir: Direction, | ||
| 1489 | ) -> Context<'c, C> { | ||
| 1490 | let mut ctx: Context<'c, C> = Context { | ||
| 1491 | dir, | ||
| 1492 | last_block_processed: false, | ||
| 1493 | cr: 0, | ||
| 1494 | iv: [0; 4], | ||
| 1495 | csgcmccm: [0; 8], | ||
| 1496 | csgcm: [0; 8], | ||
| 1497 | aad_complete: false, | ||
| 1498 | header_len: 0, | ||
| 1499 | payload_len: 0, | ||
| 1500 | cipher: cipher, | ||
| 1501 | phantom_data: PhantomData, | ||
| 1502 | header_processed: false, | ||
| 1503 | aad_buffer: [0; 16], | ||
| 1504 | aad_buffer_len: 0, | ||
| 1505 | }; | ||
| 1506 | |||
| 1507 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1508 | |||
| 1509 | let key = ctx.cipher.key(); | ||
| 1510 | |||
| 1511 | if key.len() == (128 / 8) { | ||
| 1512 | T::regs().cr().modify(|w| w.set_keysize(0)); | ||
| 1513 | } else if key.len() == (192 / 8) { | ||
| 1514 | T::regs().cr().modify(|w| w.set_keysize(1)); | ||
| 1515 | } else if key.len() == (256 / 8) { | ||
| 1516 | T::regs().cr().modify(|w| w.set_keysize(2)); | ||
| 1517 | } | ||
| 1518 | |||
| 1519 | self.load_key(key); | ||
| 1520 | |||
| 1521 | // Set data type to 8-bit. This will match software implementations. | ||
| 1522 | T::regs().cr().modify(|w| w.set_datatype(2)); | ||
| 1523 | |||
| 1524 | ctx.cipher.prepare_key(T::regs()); | ||
| 1525 | |||
| 1526 | ctx.cipher.set_algomode(T::regs()); | ||
| 1527 | |||
| 1528 | // Set encrypt/decrypt | ||
| 1529 | if dir == Direction::Encrypt { | ||
| 1530 | T::regs().cr().modify(|w| w.set_algodir(false)); | ||
| 1531 | } else { | ||
| 1532 | T::regs().cr().modify(|w| w.set_algodir(true)); | ||
| 1533 | } | ||
| 1534 | |||
| 1535 | // Load the IV into the registers. | ||
| 1536 | let iv = ctx.cipher.iv(); | ||
| 1537 | let mut full_iv: [u8; 16] = [0; 16]; | ||
| 1538 | full_iv[0..iv.len()].copy_from_slice(iv); | ||
| 1539 | let mut iv_idx = 0; | ||
| 1540 | let mut iv_word: [u8; 4] = [0; 4]; | ||
| 1541 | iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); | ||
| 1542 | iv_idx += 4; | ||
| 1543 | T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word)); | ||
| 1544 | iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); | ||
| 1545 | iv_idx += 4; | ||
| 1546 | T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); | ||
| 1547 | iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); | ||
| 1548 | iv_idx += 4; | ||
| 1549 | T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); | ||
| 1550 | iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); | ||
| 1551 | T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); | ||
| 1552 | |||
| 1553 | // Flush in/out FIFOs | ||
| 1554 | T::regs().cr().modify(|w| w.fflush()); | ||
| 1555 | |||
| 1556 | ctx.cipher.init_phase(T::regs(), self).await; | ||
| 1557 | |||
| 1558 | self.store_context(&mut ctx); | ||
| 1559 | |||
| 1560 | ctx | ||
| 1561 | } | ||
| 1562 | |||
| 1563 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | ||
| 1564 | /// Controls the header phase of cipher processing. | ||
| 1565 | /// This function is only valid for authenticated ciphers including GCM, CCM, and GMAC. | ||
| 1566 | /// All additional associated data (AAD) must be supplied to this function prior to starting the payload phase with `payload`. | ||
| 1567 | /// The AAD must be supplied in multiples of the block size (128-bits for AES, 64-bits for DES), except when supplying the last block. | ||
| 1568 | /// When supplying the last block of AAD, `last_aad_block` must be `true`. | ||
| 1569 | pub async fn aad< | ||
| 1570 | 'c, | ||
| 1571 | const TAG_SIZE: usize, | ||
| 1572 | C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated<TAG_SIZE>, | ||
| 1573 | >( | ||
| 1574 | &mut self, | ||
| 1575 | ctx: &mut Context<'c, C>, | ||
| 1576 | aad: &[u8], | ||
| 1577 | last_aad_block: bool, | ||
| 1578 | ) { | ||
| 1579 | self.load_context(ctx); | ||
| 1580 | |||
| 1581 | // Perform checks for correctness. | ||
| 1582 | if ctx.aad_complete { | ||
| 1583 | panic!("Cannot update AAD after starting payload!") | ||
| 1584 | } | ||
| 1585 | |||
| 1586 | ctx.header_len += aad.len() as u64; | ||
| 1587 | |||
| 1588 | // Header phase | ||
| 1589 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1590 | T::regs().cr().modify(|w| w.set_gcm_ccmph(1)); | ||
| 1591 | T::regs().cr().modify(|w| w.set_crypen(true)); | ||
| 1592 | |||
| 1593 | // First write the header B1 block if not yet written. | ||
| 1594 | if !ctx.header_processed { | ||
| 1595 | ctx.header_processed = true; | ||
| 1596 | let header = ctx.cipher.get_header_block(); | ||
| 1597 | ctx.aad_buffer[0..header.len()].copy_from_slice(header); | ||
| 1598 | ctx.aad_buffer_len += header.len(); | ||
| 1599 | } | ||
| 1600 | |||
| 1601 | // Fill the header block to make a full block. | ||
| 1602 | let len_to_copy = min(aad.len(), C::BLOCK_SIZE - ctx.aad_buffer_len); | ||
| 1603 | ctx.aad_buffer[ctx.aad_buffer_len..ctx.aad_buffer_len + len_to_copy].copy_from_slice(&aad[..len_to_copy]); | ||
| 1604 | ctx.aad_buffer_len += len_to_copy; | ||
| 1605 | ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); | ||
| 1606 | let mut aad_len_remaining = aad.len() - len_to_copy; | ||
| 1607 | |||
| 1608 | if ctx.aad_buffer_len < C::BLOCK_SIZE { | ||
| 1609 | // The buffer isn't full and this is the last buffer, so process it as is (already padded). | ||
| 1610 | if last_aad_block { | ||
| 1611 | Self::write_bytes(self.indma.as_mut().unwrap(), C::BLOCK_SIZE, &ctx.aad_buffer).await; | ||
| 1612 | assert_eq!(T::regs().sr().read().ifem(), true); | ||
| 1613 | |||
| 1614 | // Switch to payload phase. | ||
| 1615 | ctx.aad_complete = true; | ||
| 1616 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1617 | T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); | ||
| 1618 | T::regs().cr().modify(|w| w.fflush()); | ||
| 1619 | } else { | ||
| 1620 | // Just return because we don't yet have a full block to process. | ||
| 1621 | return; | ||
| 1622 | } | ||
| 1623 | } else { | ||
| 1624 | // Load the full block from the buffer. | ||
| 1625 | Self::write_bytes(self.indma.as_mut().unwrap(), C::BLOCK_SIZE, &ctx.aad_buffer).await; | ||
| 1626 | assert_eq!(T::regs().sr().read().ifem(), true); | ||
| 1627 | } | ||
| 1628 | |||
| 1629 | // Handle a partial block that is passed in. | ||
| 1630 | ctx.aad_buffer_len = 0; | ||
| 1631 | let leftovers = aad_len_remaining % C::BLOCK_SIZE; | ||
| 1632 | ctx.aad_buffer[..leftovers].copy_from_slice(&aad[aad.len() - leftovers..aad.len()]); | ||
| 1633 | ctx.aad_buffer_len += leftovers; | ||
| 1634 | ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); | ||
| 1635 | aad_len_remaining -= leftovers; | ||
| 1636 | assert_eq!(aad_len_remaining % C::BLOCK_SIZE, 0); | ||
| 1637 | |||
| 1638 | // Load full data blocks into core. | ||
| 1639 | let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE; | ||
| 1640 | let start_index = len_to_copy; | ||
| 1641 | let end_index = start_index + (C::BLOCK_SIZE * num_full_blocks); | ||
| 1642 | Self::write_bytes( | ||
| 1643 | self.indma.as_mut().unwrap(), | ||
| 1644 | C::BLOCK_SIZE, | ||
| 1645 | &aad[start_index..end_index], | ||
| 1646 | ) | ||
| 1647 | .await; | ||
| 1648 | |||
| 1649 | if last_aad_block { | ||
| 1650 | if leftovers > 0 { | ||
| 1651 | Self::write_bytes(self.indma.as_mut().unwrap(), C::BLOCK_SIZE, &ctx.aad_buffer).await; | ||
| 1652 | assert_eq!(T::regs().sr().read().ifem(), true); | ||
| 1653 | } | ||
| 1654 | // Switch to payload phase. | ||
| 1655 | ctx.aad_complete = true; | ||
| 1656 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1657 | T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); | ||
| 1658 | T::regs().cr().modify(|w| w.fflush()); | ||
| 1659 | } | ||
| 1660 | |||
| 1661 | self.store_context(ctx); | ||
| 1662 | } | ||
| 1663 | |||
| 1664 | /// Performs encryption/decryption on the provided context. | ||
| 1665 | /// The context determines algorithm, mode, and state of the crypto accelerator. | ||
| 1666 | /// When the last piece of data is supplied, `last_block` should be `true`. | ||
| 1667 | /// This function panics under various mismatches of parameters. | ||
| 1668 | /// Output buffer must be at least as long as the input buffer. | ||
| 1669 | /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. | ||
| 1670 | /// Padding or ciphertext stealing must be managed by the application for these modes. | ||
| 1671 | /// Data must also be a multiple of block size unless `last_block` is `true`. | ||
| 1672 | pub async fn payload<'c, C: Cipher<'c> + CipherSized + IVSized>( | ||
| 1673 | &mut self, | ||
| 1674 | ctx: &mut Context<'c, C>, | ||
| 1675 | input: &[u8], | ||
| 1676 | output: &mut [u8], | ||
| 1677 | last_block: bool, | ||
| 1678 | ) { | ||
| 1679 | self.load_context(ctx); | ||
| 1680 | |||
| 1681 | let last_block_remainder = input.len() % C::BLOCK_SIZE; | ||
| 1682 | |||
| 1683 | // Perform checks for correctness. | ||
| 1684 | if !ctx.aad_complete && ctx.header_len > 0 { | ||
| 1685 | panic!("Additional associated data must be processed first!"); | ||
| 1686 | } else if !ctx.aad_complete { | ||
| 1687 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | ||
| 1688 | { | ||
| 1689 | ctx.aad_complete = true; | ||
| 1690 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1691 | T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); | ||
| 1692 | T::regs().cr().modify(|w| w.fflush()); | ||
| 1693 | T::regs().cr().modify(|w| w.set_crypen(true)); | ||
| 1694 | } | ||
| 1695 | } | ||
| 1696 | if ctx.last_block_processed { | ||
| 1697 | panic!("The last block has already been processed!"); | ||
| 1698 | } | ||
| 1699 | if input.len() > output.len() { | ||
| 1700 | panic!("Output buffer length must match input length."); | ||
| 1701 | } | ||
| 1702 | if !last_block { | ||
| 1703 | if last_block_remainder != 0 { | ||
| 1704 | panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE); | ||
| 1705 | } | ||
| 1706 | } | ||
| 1707 | if C::REQUIRES_PADDING { | ||
| 1708 | if last_block_remainder != 0 { | ||
| 1709 | panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", C::BLOCK_SIZE); | ||
| 1710 | } | ||
| 1711 | } | ||
| 1712 | if last_block { | ||
| 1713 | ctx.last_block_processed = true; | ||
| 1714 | } | ||
| 1715 | |||
| 1716 | // Load data into core, block by block. | ||
| 1717 | let num_full_blocks = input.len() / C::BLOCK_SIZE; | ||
| 1718 | for block in 0..num_full_blocks { | ||
| 1719 | let index = block * C::BLOCK_SIZE; | ||
| 1720 | // Read block out | ||
| 1721 | let read = Self::read_bytes( | ||
| 1722 | self.outdma.as_mut().unwrap(), | ||
| 1723 | C::BLOCK_SIZE, | ||
| 1724 | &mut output[index..index + C::BLOCK_SIZE], | ||
| 1725 | ); | ||
| 1726 | // Write block in | ||
| 1727 | let write = Self::write_bytes( | ||
| 1728 | self.indma.as_mut().unwrap(), | ||
| 1729 | C::BLOCK_SIZE, | ||
| 1730 | &input[index..index + C::BLOCK_SIZE], | ||
| 1731 | ); | ||
| 1732 | embassy_futures::join::join(read, write).await; | ||
| 1733 | } | ||
| 1734 | |||
| 1735 | // Handle the final block, which is incomplete. | ||
| 1736 | if last_block_remainder > 0 { | ||
| 1737 | let padding_len = C::BLOCK_SIZE - last_block_remainder; | ||
| 1738 | let temp1 = ctx.cipher.pre_final(T::regs(), ctx.dir, padding_len); | ||
| 1739 | |||
| 1740 | let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; | ||
| 1741 | let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; | ||
| 1742 | last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); | ||
| 1743 | let read = Self::read_bytes(self.outdma.as_mut().unwrap(), C::BLOCK_SIZE, &mut intermediate_data); | ||
| 1744 | let write = Self::write_bytes(self.indma.as_mut().unwrap(), C::BLOCK_SIZE, &last_block); | ||
| 1745 | embassy_futures::join::join(read, write).await; | ||
| 1746 | |||
| 1747 | // Handle the last block depending on mode. | ||
| 1748 | let output_len = output.len(); | ||
| 1749 | output[output_len - last_block_remainder..output_len] | ||
| 1750 | .copy_from_slice(&intermediate_data[0..last_block_remainder]); | ||
| 1751 | |||
| 1752 | let mut mask: [u8; 16] = [0; 16]; | ||
| 1753 | mask[..last_block_remainder].fill(0xFF); | ||
| 1754 | ctx.cipher | ||
| 1755 | .post_final(T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask) | ||
| 1756 | .await; | ||
| 1757 | } | ||
| 1758 | |||
| 1759 | ctx.payload_len += input.len() as u64; | ||
| 1760 | |||
| 1761 | self.store_context(ctx); | ||
| 1762 | } | ||
| 1763 | |||
| 1764 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | ||
| 1765 | // Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. | ||
| 1766 | /// Called after the all data has been encrypted/decrypted by `payload`. | ||
| 1767 | pub async fn finish< | ||
| 1768 | 'c, | ||
| 1769 | const TAG_SIZE: usize, | ||
| 1770 | C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated<TAG_SIZE>, | ||
| 1771 | >( | ||
| 1772 | &mut self, | ||
| 1773 | mut ctx: Context<'c, C>, | ||
| 1774 | ) -> [u8; TAG_SIZE] { | ||
| 1775 | self.load_context(&mut ctx); | ||
| 1776 | |||
| 1777 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1778 | T::regs().cr().modify(|w| w.set_gcm_ccmph(3)); | ||
| 1779 | T::regs().cr().modify(|w| w.set_crypen(true)); | ||
| 1780 | |||
| 1781 | let headerlen1: u32 = ((ctx.header_len * 8) >> 32) as u32; | ||
| 1782 | let headerlen2: u32 = (ctx.header_len * 8) as u32; | ||
| 1783 | let payloadlen1: u32 = ((ctx.payload_len * 8) >> 32) as u32; | ||
| 1784 | let payloadlen2: u32 = (ctx.payload_len * 8) as u32; | ||
| 1785 | |||
| 1786 | #[cfg(cryp_v2)] | ||
| 1787 | let footer: [u32; 4] = [ | ||
| 1788 | headerlen1.swap_bytes(), | ||
| 1789 | headerlen2.swap_bytes(), | ||
| 1790 | payloadlen1.swap_bytes(), | ||
| 1791 | payloadlen2.swap_bytes(), | ||
| 1792 | ]; | ||
| 1793 | #[cfg(any(cryp_v3, cryp_v4))] | ||
| 1794 | let footer: [u32; 4] = [headerlen1, headerlen2, payloadlen1, payloadlen2]; | ||
| 1795 | |||
| 1796 | let write = Self::write_words(self.indma.as_mut().unwrap(), C::BLOCK_SIZE, &footer); | ||
| 1797 | |||
| 1798 | let mut full_tag: [u8; 16] = [0; 16]; | ||
| 1799 | let read = Self::read_bytes(self.outdma.as_mut().unwrap(), C::BLOCK_SIZE, &mut full_tag); | ||
| 1800 | |||
| 1801 | embassy_futures::join::join(read, write).await; | ||
| 1802 | |||
| 1803 | let mut tag: [u8; TAG_SIZE] = [0; TAG_SIZE]; | ||
| 1804 | tag.copy_from_slice(&full_tag[0..TAG_SIZE]); | ||
| 1805 | |||
| 1806 | T::regs().cr().modify(|w| w.set_crypen(false)); | ||
| 1807 | |||
| 1808 | tag | ||
| 1809 | } | ||
| 1810 | |||
| 1811 | async fn write_bytes(dma: &mut ChannelAndRequest<'d>, block_size: usize, blocks: &[u8]) { | ||
| 1781 | if blocks.len() == 0 { | 1812 | if blocks.len() == 0 { |
| 1782 | return; | 1813 | return; |
| 1783 | } | 1814 | } |
| 1784 | // Ensure input is a multiple of block size. | 1815 | // Ensure input is a multiple of block size. |
| 1785 | assert_eq!(blocks.len() % block_size, 0); | 1816 | assert_eq!(blocks.len() % block_size, 0); |
| 1786 | // Configure DMA to transfer input to crypto core. | 1817 | // Configure DMA to transfer input to crypto core. |
| 1787 | let dma_request = dma.request(); | ||
| 1788 | let dst_ptr: *mut u32 = T::regs().din().as_ptr(); | 1818 | let dst_ptr: *mut u32 = T::regs().din().as_ptr(); |
| 1789 | let num_words = blocks.len() / 4; | 1819 | let num_words = blocks.len() / 4; |
| 1790 | let src_ptr: *const [u8] = ptr::slice_from_raw_parts(blocks.as_ptr().cast(), num_words); | 1820 | let src_ptr: *const [u8] = ptr::slice_from_raw_parts(blocks.as_ptr().cast(), num_words); |
| @@ -1793,38 +1823,20 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1793 | priority: crate::dma::Priority::High, | 1823 | priority: crate::dma::Priority::High, |
| 1794 | ..Default::default() | 1824 | ..Default::default() |
| 1795 | }; | 1825 | }; |
| 1796 | let dma_transfer = unsafe { Transfer::new_write_raw(dma, dma_request, src_ptr, dst_ptr, options) }; | 1826 | let dma_transfer = unsafe { dma.write_raw(src_ptr, dst_ptr, options) }; |
| 1797 | T::regs().dmacr().modify(|w| w.set_dien(true)); | 1827 | T::regs().dmacr().modify(|w| w.set_dien(true)); |
| 1798 | // Wait for the transfer to complete. | 1828 | // Wait for the transfer to complete. |
| 1799 | dma_transfer.await; | 1829 | dma_transfer.await; |
| 1800 | } | 1830 | } |
| 1801 | 1831 | ||
| 1802 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | 1832 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] |
| 1803 | fn write_words_blocking(&self, block_size: usize, blocks: &[u32]) { | 1833 | async fn write_words(dma: &mut ChannelAndRequest<'d>, block_size: usize, blocks: &[u32]) { |
| 1804 | assert_eq!((blocks.len() * 4) % block_size, 0); | ||
| 1805 | let mut byte_counter: usize = 0; | ||
| 1806 | for word in blocks { | ||
| 1807 | T::regs().din().write_value(*word); | ||
| 1808 | byte_counter += 4; | ||
| 1809 | if byte_counter % block_size == 0 { | ||
| 1810 | // Block until input FIFO is empty. | ||
| 1811 | while !T::regs().sr().read().ifem() {} | ||
| 1812 | } | ||
| 1813 | } | ||
| 1814 | } | ||
| 1815 | |||
| 1816 | #[cfg(any(cryp_v2, cryp_v3, cryp_v4))] | ||
| 1817 | async fn write_words(dma: &mut PeripheralRef<'_, DmaIn>, block_size: usize, blocks: &[u32]) | ||
| 1818 | where | ||
| 1819 | DmaIn: crate::cryp::DmaIn<T>, | ||
| 1820 | { | ||
| 1821 | if blocks.len() == 0 { | 1834 | if blocks.len() == 0 { |
| 1822 | return; | 1835 | return; |
| 1823 | } | 1836 | } |
| 1824 | // Ensure input is a multiple of block size. | 1837 | // Ensure input is a multiple of block size. |
| 1825 | assert_eq!((blocks.len() * 4) % block_size, 0); | 1838 | assert_eq!((blocks.len() * 4) % block_size, 0); |
| 1826 | // Configure DMA to transfer input to crypto core. | 1839 | // Configure DMA to transfer input to crypto core. |
| 1827 | let dma_request = dma.request(); | ||
| 1828 | let dst_ptr: *mut u32 = T::regs().din().as_ptr(); | 1840 | let dst_ptr: *mut u32 = T::regs().din().as_ptr(); |
| 1829 | let num_words = blocks.len(); | 1841 | let num_words = blocks.len(); |
| 1830 | let src_ptr: *const [u32] = ptr::slice_from_raw_parts(blocks.as_ptr().cast(), num_words); | 1842 | let src_ptr: *const [u32] = ptr::slice_from_raw_parts(blocks.as_ptr().cast(), num_words); |
| @@ -1833,38 +1845,19 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1833 | priority: crate::dma::Priority::High, | 1845 | priority: crate::dma::Priority::High, |
| 1834 | ..Default::default() | 1846 | ..Default::default() |
| 1835 | }; | 1847 | }; |
| 1836 | let dma_transfer = unsafe { Transfer::new_write_raw(dma, dma_request, src_ptr, dst_ptr, options) }; | 1848 | let dma_transfer = unsafe { dma.write_raw(src_ptr, dst_ptr, options) }; |
| 1837 | T::regs().dmacr().modify(|w| w.set_dien(true)); | 1849 | T::regs().dmacr().modify(|w| w.set_dien(true)); |
| 1838 | // Wait for the transfer to complete. | 1850 | // Wait for the transfer to complete. |
| 1839 | dma_transfer.await; | 1851 | dma_transfer.await; |
| 1840 | } | 1852 | } |
| 1841 | 1853 | ||
| 1842 | fn read_bytes_blocking(&self, block_size: usize, blocks: &mut [u8]) { | 1854 | async fn read_bytes(dma: &mut ChannelAndRequest<'d>, block_size: usize, blocks: &mut [u8]) { |
| 1843 | // Block until there is output to read. | ||
| 1844 | while !T::regs().sr().read().ofne() {} | ||
| 1845 | // Ensure input is a multiple of block size. | ||
| 1846 | assert_eq!(blocks.len() % block_size, 0); | ||
| 1847 | // Read block out | ||
| 1848 | let mut index = 0; | ||
| 1849 | let end_index = blocks.len(); | ||
| 1850 | while index < end_index { | ||
| 1851 | let out_word: u32 = T::regs().dout().read(); | ||
| 1852 | blocks[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); | ||
| 1853 | index += 4; | ||
| 1854 | } | ||
| 1855 | } | ||
| 1856 | |||
| 1857 | async fn read_bytes(dma: &mut PeripheralRef<'_, DmaOut>, block_size: usize, blocks: &mut [u8]) | ||
| 1858 | where | ||
| 1859 | DmaOut: crate::cryp::DmaOut<T>, | ||
| 1860 | { | ||
| 1861 | if blocks.len() == 0 { | 1855 | if blocks.len() == 0 { |
| 1862 | return; | 1856 | return; |
| 1863 | } | 1857 | } |
| 1864 | // Ensure input is a multiple of block size. | 1858 | // Ensure input is a multiple of block size. |
| 1865 | assert_eq!(blocks.len() % block_size, 0); | 1859 | assert_eq!(blocks.len() % block_size, 0); |
| 1866 | // Configure DMA to get output from crypto core. | 1860 | // Configure DMA to get output from crypto core. |
| 1867 | let dma_request = dma.request(); | ||
| 1868 | let src_ptr = T::regs().dout().as_ptr(); | 1861 | let src_ptr = T::regs().dout().as_ptr(); |
| 1869 | let num_words = blocks.len() / 4; | 1862 | let num_words = blocks.len() / 4; |
| 1870 | let dst_ptr = ptr::slice_from_raw_parts_mut(blocks.as_mut_ptr().cast(), num_words); | 1863 | let dst_ptr = ptr::slice_from_raw_parts_mut(blocks.as_mut_ptr().cast(), num_words); |
| @@ -1873,7 +1866,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { | |||
| 1873 | priority: crate::dma::Priority::VeryHigh, | 1866 | priority: crate::dma::Priority::VeryHigh, |
| 1874 | ..Default::default() | 1867 | ..Default::default() |
| 1875 | }; | 1868 | }; |
| 1876 | let dma_transfer = unsafe { Transfer::new_read_raw(dma, dma_request, src_ptr, dst_ptr, options) }; | 1869 | let dma_transfer = unsafe { dma.read_raw(src_ptr, dst_ptr, options) }; |
| 1877 | T::regs().dmacr().modify(|w| w.set_doen(true)); | 1870 | T::regs().dmacr().modify(|w| w.set_doen(true)); |
| 1878 | // Wait for the transfer to complete. | 1871 | // Wait for the transfer to complete. |
| 1879 | dma_transfer.await; | 1872 | dma_transfer.await; |
diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs index 66c4aa53c..ac4a0f98e 100644 --- a/embassy-stm32/src/dma/mod.rs +++ b/embassy-stm32/src/dma/mod.rs | |||
| @@ -108,17 +108,6 @@ impl Channel for AnyChannel {} | |||
| 108 | const CHANNEL_COUNT: usize = crate::_generated::DMA_CHANNELS.len(); | 108 | const CHANNEL_COUNT: usize = crate::_generated::DMA_CHANNELS.len(); |
| 109 | static STATE: [ChannelState; CHANNEL_COUNT] = [ChannelState::NEW; CHANNEL_COUNT]; | 109 | static STATE: [ChannelState; CHANNEL_COUNT] = [ChannelState::NEW; CHANNEL_COUNT]; |
| 110 | 110 | ||
| 111 | /// "No DMA" placeholder. | ||
| 112 | /// | ||
| 113 | /// You may pass this in place of a real DMA channel when creating a driver | ||
| 114 | /// to indicate it should not use DMA. | ||
| 115 | /// | ||
| 116 | /// This often causes async functionality to not be available on the instance, | ||
| 117 | /// leaving only blocking functionality. | ||
| 118 | pub struct NoDma; | ||
| 119 | |||
| 120 | impl_peripheral!(NoDma); | ||
| 121 | |||
| 122 | // safety: must be called only once at startup | 111 | // safety: must be called only once at startup |
| 123 | pub(crate) unsafe fn init( | 112 | pub(crate) unsafe fn init( |
| 124 | cs: critical_section::CriticalSection, | 113 | cs: critical_section::CriticalSection, |
