Skip to content

Commit

Permalink
aes: replace inline ASM with ARMv8 intrinsics
Browse files Browse the repository at this point in the history
Note: bumps the MSRV for `aes_armv8` from 1.65 -> 1.72

Rust 1.72 stabilized the ARMv8 AES intrinsics, which means we no longer
need to use inline `asm!` "polyfills" for these functions to support
stable Rust.
  • Loading branch information
tarcieri committed Jan 6, 2024
1 parent 6b263c0 commit 433ef1f
Show file tree
Hide file tree
Showing 6 changed files with 21 additions and 116 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/aes.yml
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ jobs:
matrix:
include:
- target: aarch64-unknown-linux-gnu
rust: 1.65.0 # MSRV
rust: 1.72.0 # MSRV for `aes_armv8`
runs-on: ubuntu-latest
# Cross mounts only current package, i.e. by default it ignores workspace's Cargo.toml
defaults:
Expand Down
1 change: 0 additions & 1 deletion aes/src/armv8.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ pub(crate) mod hazmat;

mod encdec;
mod expand;
mod intrinsics;
#[cfg(test)]
mod test_expand;

Expand Down
34 changes: 20 additions & 14 deletions aes/src/armv8/encdec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,6 @@ use crate::{Block, Block8};
use cipher::inout::InOut;
use core::arch::aarch64::*;

// Stable "polyfills" for unstable core::arch::aarch64 intrinsics
// TODO(tarcieri): remove when these intrinsics have been stabilized
use super::intrinsics::{
vaesdq_u8, vaesdq_u8_and_vaesimcq_u8, vaeseq_u8, vaeseq_u8_and_vaesmcq_u8,
};

/// Perform AES encryption using the given expanded keys.
#[target_feature(enable = "aes")]
#[target_feature(enable = "neon")]
Expand All @@ -25,8 +19,11 @@ pub(super) unsafe fn encrypt1<const N: usize>(
let mut state = vld1q_u8(in_ptr as *const u8);

for k in expanded_keys.iter().take(rounds - 1) {
// AES single round encryption and mix columns
state = vaeseq_u8_and_vaesmcq_u8(state, *k);
// AES single round encryption
state = vaeseq_u8(state, *k);

// Mix columns
state = vaesmcq_u8(state);
}

// AES single round encryption
Expand Down Expand Up @@ -65,8 +62,11 @@ pub(super) unsafe fn encrypt8<const N: usize>(

for k in expanded_keys.iter().take(rounds - 1) {
for i in 0..8 {
// AES single round encryption and mix columns
state[i] = vaeseq_u8_and_vaesmcq_u8(state[i], *k);
// AES single round encryption
state[i] = vaeseq_u8(state[i], *k);

// Mix columns
state[i] = vaesmcq_u8(state[i]);
}
}

Expand Down Expand Up @@ -95,8 +95,11 @@ pub(super) unsafe fn decrypt1<const N: usize>(
let mut state = vld1q_u8(in_ptr as *const u8);

for k in expanded_keys.iter().take(rounds - 1) {
// AES single round decryption and inverse mix columns
state = vaesdq_u8_and_vaesimcq_u8(state, *k);
// AES single round decryption
state = vaesdq_u8(state, *k);

// Inverse mix columns
state = vaesimcq_u8(state);
}

// AES single round decryption
Expand Down Expand Up @@ -135,8 +138,11 @@ pub(super) unsafe fn decrypt8<const N: usize>(

for k in expanded_keys.iter().take(rounds - 1) {
for i in 0..8 {
// AES single round decryption and inverse mix columns
state[i] = vaesdq_u8_and_vaesimcq_u8(state[i], *k);
// AES single round decryption
state[i] = vaesdq_u8(state[i], *k);

// Inverse mix columns
state[i] = vaesimcq_u8(state[i]);
}
}

Expand Down
4 changes: 0 additions & 4 deletions aes/src/armv8/expand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,6 @@

use core::{arch::aarch64::*, mem, slice};

// Stable "polyfills" for unstable core::arch::aarch64 intrinsics
// TODO(tarcieri): remove when these intrinsics have been stabilized
use super::intrinsics::{vaeseq_u8, vaesimcq_u8};

/// There are 4 AES words in a block.
const BLOCK_WORDS: usize = 4;

Expand Down
3 changes: 0 additions & 3 deletions aes/src/armv8/hazmat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@
use crate::{Block, Block8};
use core::arch::aarch64::*;

// Stable "polyfills" for unstable core::arch::aarch64 intrinsics
use super::intrinsics::{vaesdq_u8, vaeseq_u8, vaesimcq_u8, vaesmcq_u8};

/// AES cipher (encrypt) round function.
#[allow(clippy::cast_ptr_alignment)]
#[target_feature(enable = "aes")]
Expand Down
93 changes: 0 additions & 93 deletions aes/src/armv8/intrinsics.rs

This file was deleted.

0 comments on commit 433ef1f

Please sign in to comment.