diff --git a/src/lib/block/aes/aes_armv8/aes_armv8.cpp b/src/lib/block/aes/aes_armv8/aes_armv8.cpp index a7a834d82b8..5dce2727e12 100644 --- a/src/lib/block/aes/aes_armv8/aes_armv8.cpp +++ b/src/lib/block/aes/aes_armv8/aes_armv8.cpp @@ -17,11 +17,11 @@ namespace Botan { namespace AES_AARCH64 { -BOTAN_FUNC_ISA_INLINE("+crypto") void enc(uint8x16_t& B, uint8x16_t K) { +BOTAN_FUNC_ISA_INLINE("+crypto,aes") void enc(uint8x16_t& B, uint8x16_t K) { B = vaesmcq_u8(vaeseq_u8(B, K)); } -BOTAN_FUNC_ISA_INLINE("+crypto") +BOTAN_FUNC_ISA_INLINE("+crypto,aes") void enc4(uint8x16_t& B0, uint8x16_t& B1, uint8x16_t& B2, uint8x16_t& B3, uint8x16_t K) { B0 = vaesmcq_u8(vaeseq_u8(B0, K)); B1 = vaesmcq_u8(vaeseq_u8(B1, K)); @@ -29,11 +29,11 @@ void enc4(uint8x16_t& B0, uint8x16_t& B1, uint8x16_t& B2, uint8x16_t& B3, uint8x B3 = vaesmcq_u8(vaeseq_u8(B3, K)); } -BOTAN_FUNC_ISA_INLINE("+crypto") void enc_last(uint8x16_t& B, uint8x16_t K, uint8x16_t K2) { +BOTAN_FUNC_ISA_INLINE("+crypto,aes") void enc_last(uint8x16_t& B, uint8x16_t K, uint8x16_t K2) { B = veorq_u8(vaeseq_u8(B, K), K2); } -BOTAN_FUNC_ISA_INLINE("+crypto") +BOTAN_FUNC_ISA_INLINE("+crypto,aes") void enc4_last(uint8x16_t& B0, uint8x16_t& B1, uint8x16_t& B2, uint8x16_t& B3, uint8x16_t K, uint8x16_t K2) { B0 = veorq_u8(vaeseq_u8(B0, K), K2); B1 = veorq_u8(vaeseq_u8(B1, K), K2); @@ -41,11 +41,11 @@ void enc4_last(uint8x16_t& B0, uint8x16_t& B1, uint8x16_t& B2, uint8x16_t& B3, u B3 = veorq_u8(vaeseq_u8(B3, K), K2); } -BOTAN_FUNC_ISA_INLINE("+crypto") void dec(uint8x16_t& B, uint8x16_t K) { +BOTAN_FUNC_ISA_INLINE("+crypto,aes") void dec(uint8x16_t& B, uint8x16_t K) { B = vaesimcq_u8(vaesdq_u8(B, K)); } -BOTAN_FUNC_ISA_INLINE("+crypto") +BOTAN_FUNC_ISA_INLINE("+crypto,aes") void dec4(uint8x16_t& B0, uint8x16_t& B1, uint8x16_t& B2, uint8x16_t& B3, uint8x16_t K) { B0 = vaesimcq_u8(vaesdq_u8(B0, K)); B1 = vaesimcq_u8(vaesdq_u8(B1, K)); @@ -57,7 +57,7 @@ BOTAN_FUNC_ISA_INLINE("+crypto") void dec_last(uint8x16_t& B, uint8x16_t K, uint B = veorq_u8(vaesdq_u8(B, K), K2); } -BOTAN_FUNC_ISA_INLINE("+crypto") +BOTAN_FUNC_ISA_INLINE("+crypto,aes") void dec4_last(uint8x16_t& B0, uint8x16_t& B1, uint8x16_t& B2, uint8x16_t& B3, uint8x16_t K, uint8x16_t K2) { B0 = veorq_u8(vaesdq_u8(B0, K), K2); B1 = veorq_u8(vaesdq_u8(B1, K), K2); @@ -70,7 +70,7 @@ void dec4_last(uint8x16_t& B0, uint8x16_t& B1, uint8x16_t& B2, uint8x16_t& B3, u /* * AES-128 Encryption */ -BOTAN_FUNC_ISA("+crypto") void AES_128::hw_aes_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { +BOTAN_FUNC_ISA("+crypto,aes") void AES_128::hw_aes_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { const uint8_t* skey = reinterpret_cast(m_EK.data()); const uint8x16_t K0 = vld1q_u8(skey + 0 * 16); @@ -133,7 +133,7 @@ BOTAN_FUNC_ISA("+crypto") void AES_128::hw_aes_encrypt_n(const uint8_t in[], uin /* * AES-128 Decryption */ -BOTAN_FUNC_ISA("+crypto") void AES_128::hw_aes_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { +BOTAN_FUNC_ISA("+crypto,aes") void AES_128::hw_aes_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { const uint8_t* skey = reinterpret_cast(m_DK.data()); const uint8x16_t K0 = vld1q_u8(skey + 0 * 16); @@ -196,7 +196,7 @@ BOTAN_FUNC_ISA("+crypto") void AES_128::hw_aes_decrypt_n(const uint8_t in[], uin /* * AES-192 Encryption */ -BOTAN_FUNC_ISA("+crypto") void AES_192::hw_aes_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { +BOTAN_FUNC_ISA("+crypto,aes") void AES_192::hw_aes_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { const uint8_t* skey = reinterpret_cast(m_EK.data()); const uint8x16_t K0 = vld1q_u8(skey + 0 * 16); @@ -265,7 +265,7 @@ BOTAN_FUNC_ISA("+crypto") void AES_192::hw_aes_encrypt_n(const uint8_t in[], uin /* * AES-192 Decryption */ -BOTAN_FUNC_ISA("+crypto") void AES_192::hw_aes_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { +BOTAN_FUNC_ISA("+crypto,aes") void AES_192::hw_aes_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { const uint8_t* skey = reinterpret_cast(m_DK.data()); const uint8x16_t K0 = vld1q_u8(skey + 0 * 16); @@ -334,7 +334,7 @@ BOTAN_FUNC_ISA("+crypto") void AES_192::hw_aes_decrypt_n(const uint8_t in[], uin /* * AES-256 Encryption */ -BOTAN_FUNC_ISA("+crypto") void AES_256::hw_aes_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { +BOTAN_FUNC_ISA("+crypto,aes") void AES_256::hw_aes_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { const uint8_t* skey = reinterpret_cast(m_EK.data()); const uint8x16_t K0 = vld1q_u8(skey + 0 * 16); @@ -411,7 +411,7 @@ BOTAN_FUNC_ISA("+crypto") void AES_256::hw_aes_encrypt_n(const uint8_t in[], uin /* * AES-256 Decryption */ -BOTAN_FUNC_ISA("+crypto") void AES_256::hw_aes_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { +BOTAN_FUNC_ISA("+crypto,aes") void AES_256::hw_aes_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const { const uint8_t* skey = reinterpret_cast(m_DK.data()); const uint8x16_t K0 = vld1q_u8(skey + 0 * 16); diff --git a/src/lib/block/shacal2/shacal2_armv8/shacal2_arvm8.cpp b/src/lib/block/shacal2/shacal2_armv8/shacal2_arvm8.cpp index 0e17967482f..880246ee4a6 100644 --- a/src/lib/block/shacal2/shacal2_armv8/shacal2_arvm8.cpp +++ b/src/lib/block/shacal2/shacal2_armv8/shacal2_arvm8.cpp @@ -13,7 +13,7 @@ namespace Botan { Only encryption is supported since the inverse round function would require a different instruction */ -BOTAN_FUNC_ISA("+crypto") void SHACAL2::armv8_encrypt_blocks(const uint8_t in[], uint8_t out[], size_t blocks) const { +BOTAN_FUNC_ISA("+crypto,sha2") void SHACAL2::armv8_encrypt_blocks(const uint8_t in[], uint8_t out[], size_t blocks) const { const uint32_t* input32 = reinterpret_cast(in); uint32_t* output32 = reinterpret_cast(out); diff --git a/src/lib/hash/sha1/sha1_armv8/sha1_armv8.cpp b/src/lib/hash/sha1/sha1_armv8/sha1_armv8.cpp index 913df62559e..38b5f1fbdf4 100644 --- a/src/lib/hash/sha1/sha1_armv8/sha1_armv8.cpp +++ b/src/lib/hash/sha1/sha1_armv8/sha1_armv8.cpp @@ -16,7 +16,7 @@ namespace Botan { * SHA-1 using CPU instructions in ARMv8 */ //static -BOTAN_FUNC_ISA("+crypto") +BOTAN_FUNC_ISA("+crypto,sha2") void SHA_1::sha1_armv8_compress_n(digest_type& digest, std::span input8, size_t blocks) { uint32x4_t ABCD; uint32_t E0; diff --git a/src/lib/hash/sha2_32/sha2_32_armv8/sha2_32_armv8.cpp b/src/lib/hash/sha2_32/sha2_32_armv8/sha2_32_armv8.cpp index e32e3924f87..0036dc59cc0 100644 --- a/src/lib/hash/sha2_32/sha2_32_armv8/sha2_32_armv8.cpp +++ b/src/lib/hash/sha2_32/sha2_32_armv8/sha2_32_armv8.cpp @@ -18,7 +18,7 @@ namespace Botan { * SHA-256 using CPU instructions in ARMv8 */ //static -BOTAN_FUNC_ISA("+crypto") +BOTAN_FUNC_ISA("+crypto,sha2") void SHA_256::compress_digest_armv8(digest_type& digest, std::span input8, size_t blocks) { alignas(64) static const uint32_t K[] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, diff --git a/src/lib/utils/simd/simd_32.h b/src/lib/utils/simd/simd_32.h index 5f62d5645b4..557fd5717fe 100644 --- a/src/lib/utils/simd/simd_32.h +++ b/src/lib/utils/simd/simd_32.h @@ -38,7 +38,7 @@ #elif defined(BOTAN_SIMD_USE_NEON) #if defined(BOTAN_TARGET_ARCH_IS_ARM64) #define BOTAN_SIMD_ISA "+simd" - #define BOTAN_CLMUL_ISA "+crypto" + #define BOTAN_CLMUL_ISA "+crypto,aes" #else #define BOTAN_SIMD_ISA "fpu=neon" #endif