From 13c8bbd1f562f9752ccbf51a603beee0a19e21c3 Mon Sep 17 00:00:00 2001 From: SparrowLii Date: Fri, 10 Dec 2021 10:13:28 +0800 Subject: [PATCH] correct fmt --- crates/core_arch/src/arm_shared/neon/mod.rs | 2315 +++++++++++++++---- 1 file changed, 1852 insertions(+), 463 deletions(-) diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index 92306bb7e9..483554df29 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -107,54 +107,90 @@ types! { /// ARM-specific type containing two `int8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t); /// ARM-specific type containing three `int8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t); /// ARM-specific type containing four `int8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t); /// ARM-specific type containing two `int8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t); /// ARM-specific type containing three `int8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t); /// ARM-specific type containing four `int8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_t); /// ARM-specific type containing two `uint8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing three `uint8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing four `uint8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing two `uint8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t); /// ARM-specific type containing three `uint8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t); /// ARM-specific type containing four `uint8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint8x16x4_t( pub uint8x16_t, pub uint8x16_t, @@ -164,28 +200,46 @@ pub struct uint8x16x4_t( /// ARM-specific type containing two `poly8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing three `poly8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing four `poly8x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing two `poly8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t); /// ARM-specific type containing three `poly8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t); /// ARM-specific type containing four `poly8x16_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly8x16x4_t( pub poly8x16_t, pub poly8x16_t, @@ -195,41 +249,68 @@ pub struct poly8x16x4_t( /// ARM-specific type containing two `int16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t); /// ARM-specific type containing three `int16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t); /// ARM-specific type containing four `int16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_t); /// ARM-specific type containing two `int16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t); /// ARM-specific type containing three `int16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t); /// ARM-specific type containing four `int16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_t); /// ARM-specific type containing two `uint16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t); /// ARM-specific type containing three `uint16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t); /// ARM-specific type containing four `uint16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x4x4_t( pub uint16x4_t, pub uint16x4_t, @@ -239,15 +320,24 @@ pub struct uint16x4x4_t( /// ARM-specific type containing two `uint16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t); /// ARM-specific type containing three `uint16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t); /// ARM-specific type containing four `uint16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint16x8x4_t( pub uint16x8_t, pub uint16x8_t, @@ -257,15 +347,24 @@ pub struct uint16x8x4_t( /// ARM-specific type containing two `poly16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t); /// ARM-specific type containing three `poly16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t); /// ARM-specific type containing four `poly16x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x4x4_t( pub poly16x4_t, pub poly16x4_t, @@ -275,15 +374,24 @@ pub struct poly16x4x4_t( /// ARM-specific type containing two `poly16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t); /// ARM-specific type containing three `poly16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t); /// ARM-specific type containing four `poly16x8_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly16x8x4_t( pub poly16x8_t, pub poly16x8_t, @@ -293,41 +401,68 @@ pub struct poly16x8x4_t( /// ARM-specific type containing two `int32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t); /// ARM-specific type containing three `int32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t); /// ARM-specific type containing four `int32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_t); /// ARM-specific type containing two `int32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t); /// ARM-specific type containing three `int32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t); /// ARM-specific type containing four `int32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_t); /// ARM-specific type containing two `uint32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t); /// ARM-specific type containing three `uint32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t); /// ARM-specific type containing four `uint32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x2x4_t( pub uint32x2_t, pub uint32x2_t, @@ -337,15 +472,24 @@ pub struct uint32x2x4_t( /// ARM-specific type containing two `uint32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t); /// ARM-specific type containing three `uint32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t); /// ARM-specific type containing four `uint32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint32x4x4_t( pub uint32x4_t, pub uint32x4_t, @@ -355,15 +499,24 @@ pub struct uint32x4x4_t( /// ARM-specific type containing two `float32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t); /// ARM-specific type containing three `float32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t); /// ARM-specific type containing four `float32x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x2x4_t( pub float32x2_t, pub float32x2_t, @@ -373,15 +526,24 @@ pub struct float32x2x4_t( /// ARM-specific type containing two `float32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t); /// ARM-specific type containing three `float32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t); /// ARM-specific type containing four `float32x4_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct float32x4x4_t( pub float32x4_t, pub float32x4_t, @@ -391,41 +553,68 @@ pub struct float32x4x4_t( /// ARM-specific type containing four `int64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `int64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `int64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `uint64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t); /// ARM-specific type containing four `uint64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t); /// ARM-specific type containing four `uint64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x1x4_t( pub uint64x1_t, pub uint64x1_t, @@ -435,15 +624,24 @@ pub struct uint64x1x4_t( /// ARM-specific type containing four `uint64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t); /// ARM-specific type containing four `uint64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t); /// ARM-specific type containing four `uint64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct uint64x2x4_t( pub uint64x2_t, pub uint64x2_t, @@ -453,15 +651,24 @@ pub struct uint64x2x4_t( /// ARM-specific type containing four `poly64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t); /// ARM-specific type containing four `poly64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t); /// ARM-specific type containing four `poly64x1_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x1x4_t( pub poly64x1_t, pub poly64x1_t, @@ -471,15 +678,24 @@ pub struct poly64x1x4_t( /// ARM-specific type containing four `poly64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t); /// ARM-specific type containing four `poly64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t); /// ARM-specific type containing four `poly64x2_t` vectors. #[derive(Copy, Clone, Debug)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub struct poly64x2x4_t( pub poly64x2_t, pub poly64x2_t, @@ -682,7 +898,10 @@ extern "unadjusted" { #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -695,7 +914,10 @@ pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> in #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -708,7 +930,10 @@ pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -721,7 +946,10 @@ pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -734,7 +962,10 @@ pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -747,7 +978,10 @@ pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -760,7 +994,10 @@ pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -773,7 +1010,10 @@ pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -786,7 +1026,10 @@ pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -799,7 +1042,10 @@ pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> u #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -812,7 +1058,10 @@ pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -825,7 +1074,10 @@ pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -838,7 +1090,10 @@ pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -851,7 +1106,10 @@ pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -864,7 +1122,10 @@ pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -877,7 +1138,10 @@ pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -890,7 +1154,10 @@ pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -903,7 +1170,10 @@ pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> p #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -916,7 +1186,10 @@ pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -929,7 +1202,10 @@ pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -942,7 +1218,10 @@ pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) -> poly64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -955,7 +1234,10 @@ pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) -> poly64x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -968,7 +1250,10 @@ pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -981,7 +1266,10 @@ pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) #[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -993,7 +1281,10 @@ pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1005,7 +1296,10 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0))); simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -1017,7 +1311,10 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1029,7 +1326,10 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1041,7 +1341,10 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -1053,7 +1356,10 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1065,7 +1371,10 @@ pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { #[cfg(target_arch = "aarch64")] { @@ -1083,7 +1392,10 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -1095,7 +1407,10 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1107,7 +1422,10 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0))); simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -1119,7 +1437,10 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1131,7 +1452,10 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1143,7 +1467,10 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -1155,7 +1482,10 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1167,7 +1497,10 @@ pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[cfg(target_arch = "aarch64")] { @@ -1185,7 +1518,10 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -1197,7 +1533,10 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1209,7 +1548,10 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0))); simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -1221,7 +1563,10 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1233,7 +1578,10 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0))); simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1245,7 +1593,10 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.))); simd_shuffle2!(x, x, [0, 0]) @@ -1257,7 +1608,10 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { #[cfg(target_arch = "aarch64")] { @@ -1275,7 +1629,10 @@ pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0))); simd_shuffle2!(x, x, [0, 0]) @@ -1287,7 +1644,10 @@ pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.))); simd_shuffle4!(x, x, [0, 0, 0, 0]) @@ -1299,7 +1659,10 @@ pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_add(a, vabd_s8(b, c)) } @@ -1308,7 +1671,10 @@ pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_add(a, vabd_s16(b, c)) } @@ -1317,7 +1683,10 @@ pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_add(a, vabd_s32(b, c)) } @@ -1326,7 +1695,10 @@ pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_add(a, vabd_u8(b, c)) } @@ -1335,7 +1707,10 @@ pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_add(a, vabd_u16(b, c)) } @@ -1344,7 +1719,10 @@ pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_add(a, vabd_u32(b, c)) } @@ -1354,7 +1732,10 @@ pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_add(a, vabdq_s8(b, c)) } @@ -1363,7 +1744,10 @@ pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_add(a, vabdq_s16(b, c)) } @@ -1372,7 +1756,10 @@ pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("saba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_add(a, vabdq_s32(b, c)) } @@ -1381,7 +1768,10 @@ pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_add(a, vabdq_u8(b, c)) } @@ -1390,7 +1780,10 @@ pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_add(a, vabdq_u16(b, c)) } @@ -1399,7 +1792,10 @@ pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("uaba"))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_add(a, vabdq_u32(b, c)) } @@ -1410,7 +1806,10 @@ pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { vabs_s8_(a) } @@ -1420,7 +1819,10 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { vabs_s16_(a) } @@ -1430,7 +1832,10 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { vabs_s32_(a) } @@ -1440,7 +1845,10 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { vabsq_s8_(a) } @@ -1450,7 +1858,10 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { vabsq_s16_(a) } @@ -1460,7 +1871,10 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(abs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { vabsq_s32_(a) } @@ -1471,7 +1885,10 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpadd_s16_(a, b) } @@ -1481,7 +1898,10 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpadd_s32_(a, b) } @@ -1491,7 +1911,10 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpadd_s8_(a, b) } @@ -1501,7 +1924,10 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { transmute(vpadd_s16_(transmute(a), transmute(b))) } @@ -1511,7 +1937,10 @@ pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { transmute(vpadd_s32_(transmute(a), transmute(b))) } @@ -1521,7 +1950,10 @@ pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { transmute(vpadd_s8_(transmute(a), transmute(b))) } @@ -1532,7 +1964,10 @@ pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_add(a, b) } @@ -1543,7 +1978,10 @@ pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_add(a, b) } @@ -1554,7 +1992,10 @@ pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_add(a, b) } @@ -1565,7 +2006,10 @@ pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_add(a, b) } @@ -1576,7 +2020,10 @@ pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_add(a, b) } @@ -1587,7 +2034,10 @@ pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_add(a, b) } @@ -1598,7 +2048,10 @@ pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_add(a, b) } @@ -1609,7 +2062,10 @@ pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_add(a, b) } @@ -1620,7 +2076,10 @@ pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_add(a, b) } @@ -1631,7 +2090,10 @@ pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_add(a, b) } @@ -1642,7 +2104,10 @@ pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_add(a, b) } @@ -1653,7 +2118,10 @@ pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_add(a, b) } @@ -1664,7 +2132,10 @@ pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_add(a, b) } @@ -1675,7 +2146,10 @@ pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(add))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_add(a, b) } @@ -1686,7 +2160,10 @@ pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_add(a, b) } @@ -1697,7 +2174,10 @@ pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_add(a, b) } @@ -1708,7 +2188,10 @@ pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let a: int16x8_t = simd_cast(a); let b: int16x8_t = simd_cast(b); @@ -1721,7 +2204,10 @@ pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let a: int32x4_t = simd_cast(a); let b: int32x4_t = simd_cast(b); @@ -1734,7 +2220,10 @@ pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let a: int64x2_t = simd_cast(a); let b: int64x2_t = simd_cast(b); @@ -1747,7 +2236,10 @@ pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let a: uint16x8_t = simd_cast(a); let b: uint16x8_t = simd_cast(b); @@ -1760,7 +2252,10 @@ pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let a: uint32x4_t = simd_cast(a); let b: uint32x4_t = simd_cast(b); @@ -1773,7 +2268,10 @@ pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let a: uint64x2_t = simd_cast(a); let b: uint64x2_t = simd_cast(b); @@ -1786,7 +2284,10 @@ pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let a: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -1801,7 +2302,10 @@ pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -1816,7 +2320,10 @@ pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddl2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]); let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -1831,7 +2338,10 @@ pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let a: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -1846,7 +2356,10 @@ pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let a: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]); let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); @@ -1861,7 +2374,10 @@ pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddl2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let a: uint32x2_t = simd_shuffle2!(a, a, [2, 3]); let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); @@ -1876,7 +2392,10 @@ pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { let b: int16x8_t = simd_cast(b); simd_add(a, b) @@ -1888,7 +2407,10 @@ pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { let b: int32x4_t = simd_cast(b); simd_add(a, b) @@ -1900,7 +2422,10 @@ pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { let b: int64x2_t = simd_cast(b); simd_add(a, b) @@ -1912,7 +2437,10 @@ pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { let b: uint16x8_t = simd_cast(b); simd_add(a, b) @@ -1924,7 +2452,10 @@ pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { let b: uint32x4_t = simd_cast(b); simd_add(a, b) @@ -1936,7 +2467,10 @@ pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { let b: uint64x2_t = simd_cast(b); simd_add(a, b) @@ -1948,7 +2482,10 @@ pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let b: int16x8_t = simd_cast(b); @@ -1961,7 +2498,10 @@ pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let b: int32x4_t = simd_cast(b); @@ -1974,7 +2514,10 @@ pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddw2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]); let b: int64x2_t = simd_cast(b); @@ -1987,7 +2530,10 @@ pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let b: uint16x8_t = simd_cast(b); @@ -2000,7 +2546,10 @@ pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]); let b: uint32x4_t = simd_cast(b); @@ -2013,7 +2562,10 @@ pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddw2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]); let b: uint64x2_t = simd_cast(b); @@ -2026,7 +2578,10 @@ pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8))) } @@ -2037,7 +2592,10 @@ pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16))) } @@ -2048,7 +2606,10 @@ pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32))) } @@ -2059,7 +2620,10 @@ pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8))) } @@ -2070,7 +2634,10 @@ pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16))) } @@ -2081,7 +2648,10 @@ pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32))) } @@ -2092,7 +2662,10 @@ pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { let x = simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8))); simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2104,7 +2677,10 @@ pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x1 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { let x = simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16))); simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2116,7 +2692,10 @@ pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { let x = simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32))); simd_shuffle4!(r, x, [0, 1, 2, 3]) @@ -2128,7 +2707,10 @@ pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { let x = simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8))); simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2140,7 +2722,10 @@ pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uin #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { let x = simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16))); simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2152,7 +2737,10 @@ pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(addhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { let x = simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32))); simd_shuffle4!(r, x, [0, 1, 2, 3]) @@ -2164,7 +2752,10 @@ pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { vraddhn_s16_(a, b) } @@ -2175,7 +2766,10 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { vraddhn_s32_(a, b) } @@ -2186,7 +2780,10 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { vraddhn_s64_(a, b) } @@ -2197,7 +2794,10 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vraddhn_s16_(transmute(a), transmute(b))) } @@ -2208,7 +2808,10 @@ pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vraddhn_s32_(transmute(a), transmute(b))) } @@ -2219,7 +2822,10 @@ pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vraddhn_s64_(transmute(a), transmute(b))) } @@ -2230,7 +2836,10 @@ pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { let x = vraddhn_s16_(a, b); simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2242,7 +2851,10 @@ pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { let x = vraddhn_s32_(a, b); simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2254,7 +2866,10 @@ pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int1 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { let x = vraddhn_s64_(a, b); simd_shuffle4!(r, x, [0, 1, 2, 3]) @@ -2266,7 +2881,10 @@ pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { let x: uint8x8_t = transmute(vraddhn_s16_(transmute(a), transmute(b))); simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2278,7 +2896,10 @@ pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { let x: uint16x4_t = transmute(vraddhn_s32_(transmute(a), transmute(b))); simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2290,7 +2911,10 @@ pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> u #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(raddhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { let x: uint32x2_t = transmute(vraddhn_s64_(transmute(a), transmute(b))); simd_shuffle4!(r, x, [0, 1, 2, 3]) @@ -2302,7 +2926,10 @@ pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> u #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { vpaddl_s8_(a) } @@ -2313,7 +2940,10 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { vpaddl_s16_(a) } @@ -2324,7 +2954,10 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { vpaddl_s32_(a) } @@ -2335,7 +2968,10 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { vpaddlq_s8_(a) } @@ -2346,7 +2982,10 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { vpaddlq_s16_(a) } @@ -2357,7 +2996,10 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(saddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { vpaddlq_s32_(a) } @@ -2368,7 +3010,10 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { vpaddl_u8_(a) } @@ -2379,7 +3024,10 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { vpaddl_u16_(a) } @@ -2390,7 +3038,10 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { vpaddl_u32_(a) } @@ -2401,7 +3052,10 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { vpaddlq_u8_(a) } @@ -2412,7 +3066,10 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { vpaddlq_u16_(a) } @@ -2423,7 +3080,10 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uaddlp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { vpaddlq_u32_(a) } @@ -2434,7 +3094,10 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { simd_cast(a) } @@ -2445,7 +3108,10 @@ pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { simd_cast(a) } @@ -2456,7 +3122,10 @@ pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { simd_cast(a) } @@ -2467,7 +3136,10 @@ pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { simd_cast(a) } @@ -2478,7 +3150,10 @@ pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { simd_cast(a) } @@ -2489,7 +3164,10 @@ pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(xtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { simd_cast(a) } @@ -2500,7 +3178,10 @@ pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { simd_cast(a) } @@ -2511,7 +3192,10 @@ pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { simd_cast(a) } @@ -2522,7 +3206,10 @@ pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sxtl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { simd_cast(a) } @@ -2533,7 +3220,10 @@ pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { simd_cast(a) } @@ -2544,7 +3234,10 @@ pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { simd_cast(a) } @@ -2555,7 +3248,10 @@ pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uxtl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { simd_cast(a) } @@ -2566,7 +3262,10 @@ pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { let b = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_xor(a, b) @@ -2578,7 +3277,10 @@ pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { let b = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -2592,7 +3294,10 @@ pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { let b = int16x4_t(-1, -1, -1, -1); simd_xor(a, b) @@ -2604,7 +3309,10 @@ pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { let b = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_xor(a, b) @@ -2616,7 +3324,10 @@ pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { let b = int32x2_t(-1, -1); simd_xor(a, b) @@ -2628,7 +3339,10 @@ pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { let b = int32x4_t(-1, -1, -1, -1); simd_xor(a, b) @@ -2640,7 +3354,10 @@ pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { let b = uint8x8_t(255, 255, 255, 255, 255, 255, 255, 255); simd_xor(a, b) @@ -2652,7 +3369,10 @@ pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { let b = uint8x16_t( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, @@ -2666,7 +3386,10 @@ pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { let b = uint16x4_t(65_535, 65_535, 65_535, 65_535); simd_xor(a, b) @@ -2678,7 +3401,10 @@ pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { let b = uint16x8_t( 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, @@ -2692,7 +3418,10 @@ pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { let b = uint32x2_t(4_294_967_295, 4_294_967_295); simd_xor(a, b) @@ -2704,7 +3433,10 @@ pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { let b = uint32x4_t(4_294_967_295, 4_294_967_295, 4_294_967_295, 4_294_967_295); simd_xor(a, b) @@ -2716,7 +3448,10 @@ pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { let b = poly8x8_t(255, 255, 255, 255, 255, 255, 255, 255); simd_xor(a, b) @@ -2728,7 +3463,10 @@ pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mvn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { let b = poly8x16_t( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, @@ -2742,7 +3480,10 @@ pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -2754,7 +3495,10 @@ pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -2768,7 +3512,10 @@ pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -2780,7 +3527,10 @@ pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -2792,7 +3542,10 @@ pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let c = int32x2_t(-1, -1); simd_and(simd_xor(b, c), a) @@ -2804,7 +3557,10 @@ pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -2816,7 +3572,10 @@ pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { let c = int64x1_t(-1); simd_and(simd_xor(b, c), a) @@ -2828,7 +3587,10 @@ pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let c = int64x2_t(-1, -1); simd_and(simd_xor(b, c), a) @@ -2840,7 +3602,10 @@ pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2852,7 +3617,10 @@ pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -2866,7 +3634,10 @@ pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2878,7 +3649,10 @@ pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2890,7 +3664,10 @@ pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c = int32x2_t(-1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2902,7 +3679,10 @@ pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2914,7 +3694,10 @@ pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { let c = int64x1_t(-1); simd_and(simd_xor(b, transmute(c)), a) @@ -2926,7 +3709,10 @@ pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bic))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let c = int64x2_t(-1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -2942,7 +3728,10 @@ pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_select(transmute::<_, int8x8_t>(a), b, c) } @@ -2953,7 +3742,10 @@ pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_select(transmute::<_, int16x4_t>(a), b, c) } @@ -2964,7 +3756,10 @@ pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_select(transmute::<_, int32x2_t>(a), b, c) } @@ -2975,7 +3770,10 @@ pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { simd_select(transmute::<_, int64x1_t>(a), b, c) } @@ -2986,7 +3784,10 @@ pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_select(transmute::<_, int8x8_t>(a), b, c) } @@ -2997,7 +3798,10 @@ pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_select(transmute::<_, int16x4_t>(a), b, c) } @@ -3008,7 +3812,10 @@ pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_select(transmute::<_, int32x2_t>(a), b, c) } @@ -3019,7 +3826,10 @@ pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { simd_select(transmute::<_, int64x1_t>(a), b, c) } @@ -3030,7 +3840,10 @@ pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_select(transmute::<_, int32x2_t>(a), b, c) } @@ -3041,7 +3854,10 @@ pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { simd_select(transmute::<_, int8x8_t>(a), b, c) } @@ -3052,7 +3868,10 @@ pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { simd_select(transmute::<_, int16x4_t>(a), b, c) } @@ -3063,7 +3882,10 @@ pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_select(transmute::<_, int8x16_t>(a), b, c) } @@ -3074,7 +3896,10 @@ pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_select(transmute::<_, int16x8_t>(a), b, c) } @@ -3085,7 +3910,10 @@ pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_select(transmute::<_, int32x4_t>(a), b, c) } @@ -3096,7 +3924,10 @@ pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { simd_select(transmute::<_, int64x2_t>(a), b, c) } @@ -3107,7 +3938,10 @@ pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_select(transmute::<_, int8x16_t>(a), b, c) } @@ -3118,7 +3952,10 @@ pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_select(transmute::<_, int16x8_t>(a), b, c) } @@ -3129,7 +3966,10 @@ pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_select(transmute::<_, int32x4_t>(a), b, c) } @@ -3140,7 +3980,10 @@ pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { simd_select(transmute::<_, int64x2_t>(a), b, c) } @@ -3151,7 +3994,10 @@ pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { simd_select(transmute::<_, int8x16_t>(a), b, c) } @@ -3162,7 +4008,10 @@ pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { simd_select(transmute::<_, int16x8_t>(a), b, c) } @@ -3173,7 +4022,10 @@ pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(bsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_select(transmute::<_, int32x4_t>(a), b, c) } @@ -3184,7 +4036,10 @@ pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -3196,7 +4051,10 @@ pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -3210,7 +4068,10 @@ pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -3222,7 +4083,10 @@ pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -3234,7 +4098,10 @@ pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let c = int32x2_t(-1, -1); simd_or(simd_xor(b, c), a) @@ -3246,7 +4113,10 @@ pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -3258,7 +4128,10 @@ pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { let c = int64x1_t(-1); simd_or(simd_xor(b, c), a) @@ -3270,7 +4143,10 @@ pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let c = int64x2_t(-1, -1); simd_or(simd_xor(b, c), a) @@ -3282,7 +4158,10 @@ pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3294,7 +4173,10 @@ pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -3308,7 +4190,10 @@ pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3320,7 +4205,10 @@ pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3332,7 +4220,10 @@ pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c = int32x2_t(-1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3344,7 +4235,10 @@ pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3356,7 +4250,10 @@ pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { let c = int64x1_t(-1); simd_or(simd_xor(b, transmute(c)), a) @@ -3368,7 +4265,10 @@ pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let c = int64x2_t(-1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -3380,7 +4280,10 @@ pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpmins_v8i8(a, b) } @@ -3391,7 +4294,10 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpmins_v4i16(a, b) } @@ -3402,7 +4308,10 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sminp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpmins_v2i32(a, b) } @@ -3413,7 +4322,10 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { vpminu_v8i8(a, b) } @@ -3424,7 +4336,10 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { vpminu_v4i16(a, b) } @@ -3435,7 +4350,10 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uminp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { vpminu_v2i32(a, b) } @@ -3446,7 +4364,10 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { vpminf_v2f32(a, b) } @@ -3457,7 +4378,10 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpmaxs_v8i8(a, b) } @@ -3468,7 +4392,10 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpmaxs_v4i16(a, b) } @@ -3479,7 +4406,10 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smaxp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpmaxs_v2i32(a, b) } @@ -3490,7 +4420,10 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { vpmaxu_v8i8(a, b) } @@ -3501,7 +4434,10 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { vpmaxu_v4i16(a, b) } @@ -3512,7 +4448,10 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umaxp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { vpmaxu_v2i32(a, b) } @@ -3523,7 +4462,10 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { vpmaxf_v2f32(a, b) } @@ -3534,7 +4476,10 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3546,7 +4491,10 @@ pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, 0) @@ -3558,7 +4506,10 @@ pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3570,7 +4521,10 @@ pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3582,7 +4536,10 @@ pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3594,7 +4551,10 @@ pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3606,7 +4566,10 @@ pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3618,7 +4581,10 @@ pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3630,7 +4596,10 @@ pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3642,7 +4611,10 @@ pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, IMM5 as u32) @@ -3654,7 +4626,10 @@ pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3666,7 +4641,10 @@ pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, IMM5 as u32) @@ -3678,7 +4656,10 @@ pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3690,7 +4671,10 @@ pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3702,7 +4686,10 @@ pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3714,7 +4701,10 @@ pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3726,7 +4716,10 @@ pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3738,7 +4731,10 @@ pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3750,7 +4746,10 @@ pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3762,7 +4761,10 @@ pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3774,7 +4776,10 @@ pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3786,7 +4791,10 @@ pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3798,7 +4806,10 @@ pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3810,7 +4821,10 @@ pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3822,7 +4836,10 @@ pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -3833,7 +4850,10 @@ pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { simd_shuffle4!(a, a, [4, 5, 6, 7]) } @@ -3844,7 +4864,10 @@ pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { simd_shuffle2!(a, a, [2, 3]) } @@ -3855,7 +4878,10 @@ pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { int64x1_t(simd_extract(a, 1)) } @@ -3866,7 +4892,10 @@ pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -3877,7 +4906,10 @@ pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { simd_shuffle4!(a, a, [4, 5, 6, 7]) } @@ -3888,7 +4920,10 @@ pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { simd_shuffle2!(a, a, [2, 3]) } @@ -3899,7 +4934,10 @@ pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { uint64x1_t(simd_extract(a, 1)) } @@ -3910,7 +4948,10 @@ pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -3921,7 +4962,10 @@ pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { simd_shuffle4!(a, a, [4, 5, 6, 7]) } @@ -3932,7 +4976,10 @@ pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t { simd_shuffle2!(a, a, [2, 3]) } @@ -3951,7 +4998,10 @@ pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -3961,7 +5011,10 @@ pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { simd_shuffle2!(a, a, [0, 1]) } @@ -3971,7 +5024,10 @@ pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { int64x1_t(simd_extract(a, 0)) } @@ -3981,7 +5037,10 @@ pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -3991,7 +5050,10 @@ pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -4001,7 +5063,10 @@ pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { simd_shuffle2!(a, a, [0, 1]) } @@ -4011,7 +5076,10 @@ pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { uint64x1_t(simd_extract(a, 0)) } @@ -4021,7 +5089,10 @@ pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -4031,7 +5102,10 @@ pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -4041,7 +5115,10 @@ pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { simd_shuffle2!(a, a, [0, 1]) } @@ -4052,7 +5129,10 @@ pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { int8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -4066,7 +5146,10 @@ pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { int16x8_t(value, value, value, value, value, value, value, value) } @@ -4077,7 +5160,10 @@ pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { int32x4_t(value, value, value, value) } @@ -4088,7 +5174,10 @@ pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { int64x2_t(value, value) } @@ -4099,7 +5188,10 @@ pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { uint8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -4113,7 +5205,10 @@ pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { uint16x8_t(value, value, value, value, value, value, value, value) } @@ -4124,7 +5219,10 @@ pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { uint32x4_t(value, value, value, value) } @@ -4135,7 +5233,10 @@ pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { uint64x2_t(value, value) } @@ -4146,7 +5247,10 @@ pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { poly8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -4160,7 +5264,10 @@ pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { poly16x8_t(value, value, value, value, value, value, value, value) } @@ -4171,7 +5278,10 @@ pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t { float32x4_t(value, value, value, value) } @@ -4185,7 +5295,10 @@ pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { float32x4_t(value, value, value, value) } @@ -4196,7 +5309,10 @@ unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { int8x8_t(value, value, value, value, value, value, value, value) } @@ -4207,7 +5323,10 @@ pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { int16x4_t(value, value, value, value) } @@ -4218,7 +5337,10 @@ pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { int32x2_t(value, value) } @@ -4229,7 +5351,10 @@ pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { int64x1_t(value) } @@ -4240,7 +5365,10 @@ pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { uint8x8_t(value, value, value, value, value, value, value, value) } @@ -4251,7 +5379,10 @@ pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { uint16x4_t(value, value, value, value) } @@ -4262,7 +5393,10 @@ pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { uint32x2_t(value, value) } @@ -4273,7 +5407,10 @@ pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { uint64x1_t(value) } @@ -4284,7 +5421,10 @@ pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { poly8x8_t(value, value, value, value, value, value, value, value) } @@ -4295,7 +5435,10 @@ pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { poly16x4_t(value, value, value, value) } @@ -4306,7 +5449,10 @@ pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t { float32x2_t(value, value) } @@ -4320,7 +5466,10 @@ pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { float32x2_t(value, value) } @@ -4331,7 +5480,10 @@ unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vldrq_p128(a: *const p128) -> p128 { *a } @@ -4342,7 +5494,10 @@ pub unsafe fn vldrq_p128(a: *const p128) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { *a = b; } @@ -4353,7 +5508,10 @@ pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { vdup_n_s8(value) } @@ -4364,7 +5522,10 @@ pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { vdup_n_s16(value) } @@ -4375,7 +5536,10 @@ pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { vdup_n_s32(value) } @@ -4386,7 +5550,10 @@ pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { vdup_n_s64(value) } @@ -4397,7 +5564,10 @@ pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t { vdup_n_u8(value) } @@ -4408,7 +5578,10 @@ pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { vdup_n_u16(value) } @@ -4419,7 +5592,10 @@ pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { vdup_n_u32(value) } @@ -4430,7 +5606,10 @@ pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { vdup_n_u64(value) } @@ -4441,7 +5620,10 @@ pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { vdup_n_p8(value) } @@ -4452,7 +5634,10 @@ pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { vdup_n_p16(value) } @@ -4463,7 +5648,10 @@ pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { vdup_n_f32(value) } @@ -4474,7 +5662,10 @@ pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { vdupq_n_s8(value) } @@ -4485,7 +5676,10 @@ pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { vdupq_n_s16(value) } @@ -4496,7 +5690,10 @@ pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { vdupq_n_s32(value) } @@ -4507,7 +5704,10 @@ pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { vdupq_n_s64(value) } @@ -4518,7 +5718,10 @@ pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { vdupq_n_u8(value) } @@ -4529,7 +5732,10 @@ pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { vdupq_n_u16(value) } @@ -4540,7 +5746,10 @@ pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { vdupq_n_u32(value) } @@ -4551,7 +5760,10 @@ pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { vdupq_n_u64(value) } @@ -4562,7 +5774,10 @@ pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { vdupq_n_p8(value) } @@ -4573,7 +5788,10 @@ pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { vdupq_n_p16(value) } @@ -4584,7 +5802,10 @@ pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { vdupq_n_f32(value) } @@ -4596,7 +5817,10 @@ pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("nop", N = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { if N != 0 { unreachable_unchecked() @@ -4611,7 +5835,10 @@ pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("nop", N = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { if N != 0 { unreachable_unchecked() @@ -4625,7 +5852,10 @@ pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { vcnt_s8_(a) } @@ -4635,7 +5865,10 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { vcntq_s8_(a) } @@ -4645,7 +5878,10 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { transmute(vcnt_s8_(transmute(a))) } @@ -4655,7 +5891,10 @@ pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vcntq_s8_(transmute(a))) } @@ -4665,7 +5904,10 @@ pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { transmute(vcnt_s8_(transmute(a))) } @@ -4675,7 +5917,10 @@ pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cnt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { transmute(vcntq_s8_(transmute(a))) } @@ -4686,7 +5931,10 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4697,7 +5945,10 @@ pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -4708,7 +5959,10 @@ pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4719,7 +5973,10 @@ pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -4730,7 +5987,10 @@ pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4741,7 +6001,10 @@ pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev16))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -4752,7 +6015,10 @@ pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4763,7 +6029,10 @@ pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -4774,7 +6043,10 @@ pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4785,7 +6057,10 @@ pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -4796,7 +6071,10 @@ pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4807,7 +6085,10 @@ pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4818,7 +6099,10 @@ pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4829,7 +6113,10 @@ pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4840,7 +6127,10 @@ pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4851,7 +6141,10 @@ pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -4862,7 +6155,10 @@ pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4873,7 +6169,10 @@ pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev32))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -4884,7 +6183,10 @@ pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t { simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -4895,7 +6197,10 @@ pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -4906,7 +6211,10 @@ pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t { simd_shuffle4!(a, a, [3, 2, 1, 0]) } @@ -4917,7 +6225,10 @@ pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4928,7 +6239,10 @@ pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t { simd_shuffle2!(a, a, [1, 0]) } @@ -4939,7 +6253,10 @@ pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -4950,7 +6267,10 @@ pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -4961,7 +6281,10 @@ pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -4972,7 +6295,10 @@ pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { simd_shuffle4!(a, a, [3, 2, 1, 0]) } @@ -4983,7 +6309,10 @@ pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -4994,7 +6323,10 @@ pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { simd_shuffle2!(a, a, [1, 0]) } @@ -5005,7 +6337,10 @@ pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -5016,7 +6351,10 @@ pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t { simd_shuffle2!(a, a, [1, 0]) } @@ -5027,7 +6365,10 @@ pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t { simd_shuffle4!(a, a, [1, 0, 3, 2]) } @@ -5038,7 +6379,10 @@ pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -5049,7 +6393,10 @@ pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -5060,7 +6407,10 @@ pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { simd_shuffle4!(a, a, [3, 2, 1, 0]) } @@ -5071,7 +6421,10 @@ pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rev64))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -5082,7 +6435,10 @@ pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { #[cfg(target_arch = "arm")] { @@ -5100,7 +6456,10 @@ pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { #[cfg(target_arch = "arm")] { @@ -5118,7 +6477,10 @@ pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[cfg(target_arch = "arm")] { @@ -5136,7 +6498,10 @@ pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg(target_arch = "arm")] { @@ -5154,7 +6519,10 @@ pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg(target_arch = "arm")] { @@ -5172,7 +6540,10 @@ pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg(target_arch = "arm")] { @@ -5190,7 +6561,10 @@ pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { #[cfg(target_arch = "arm")] { @@ -5208,7 +6582,10 @@ pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { #[cfg(target_arch = "arm")] { @@ -5226,7 +6603,10 @@ pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[cfg(target_arch = "arm")] { @@ -5244,7 +6624,10 @@ pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg(target_arch = "arm")] { @@ -5262,7 +6645,10 @@ pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg(target_arch = "arm")] { @@ -5280,7 +6666,10 @@ pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uadalp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { #[cfg(target_arch = "arm")] {