diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs index 8139b8cd6f3cf..231d28c67bb89 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs @@ -4117,7 +4117,7 @@ pub fn _mm_mask_cmpneq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mma #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm512_cmp_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { @@ -4144,7 +4144,7 @@ pub fn _mm512_cmp_epu16_mask(a: __m512i, b: __m512i) -> __mmask #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm512_mask_cmp_epu16_mask( k1: __mmask32, @@ -4176,7 +4176,7 @@ pub fn _mm512_mask_cmp_epu16_mask( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm256_cmp_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { @@ -4203,7 +4203,7 @@ pub fn _mm256_cmp_epu16_mask(a: __m256i, b: __m256i) -> __mmask #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm256_mask_cmp_epu16_mask( k1: __mmask16, @@ -4235,7 +4235,7 @@ pub fn _mm256_mask_cmp_epu16_mask( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm_cmp_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { @@ -4262,7 +4262,7 @@ pub fn _mm_cmp_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm_mask_cmp_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { unsafe { @@ -4290,7 +4290,7 @@ pub fn _mm_mask_cmp_epu16_mask(k1: __mmask8, a: __m128i, b: __m #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm512_cmp_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { @@ -4317,7 +4317,7 @@ pub fn _mm512_cmp_epu8_mask(a: __m512i, b: __m512i) -> __mmask6 #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm512_mask_cmp_epu8_mask( k1: __mmask64, @@ -4349,7 +4349,7 @@ pub fn _mm512_mask_cmp_epu8_mask( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm256_cmp_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { @@ -4376,7 +4376,7 @@ pub fn _mm256_cmp_epu8_mask(a: __m256i, b: __m256i) -> __mmask3 #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm256_mask_cmp_epu8_mask( k1: __mmask32, @@ -4408,7 +4408,7 @@ pub fn _mm256_mask_cmp_epu8_mask( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm_cmp_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { @@ -4435,7 +4435,7 @@ pub fn _mm_cmp_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm_mask_cmp_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { unsafe { @@ -4463,7 +4463,7 @@ pub fn _mm_mask_cmp_epu8_mask(k1: __mmask16, a: __m128i, b: __m #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm512_cmp_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { @@ -4490,7 +4490,7 @@ pub fn _mm512_cmp_epi16_mask(a: __m512i, b: __m512i) -> __mmask #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm512_mask_cmp_epi16_mask( k1: __mmask32, @@ -4522,7 +4522,7 @@ pub fn _mm512_mask_cmp_epi16_mask( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm256_cmp_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { @@ -4549,7 +4549,7 @@ pub fn _mm256_cmp_epi16_mask(a: __m256i, b: __m256i) -> __mmask #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm256_mask_cmp_epi16_mask( k1: __mmask16, @@ -4581,7 +4581,7 @@ pub fn _mm256_mask_cmp_epi16_mask( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm_cmp_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { @@ -4608,7 +4608,7 @@ pub fn _mm_cmp_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm_mask_cmp_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { unsafe { @@ -4636,7 +4636,7 @@ pub fn _mm_mask_cmp_epi16_mask(k1: __mmask8, a: __m128i, b: __m #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm512_cmp_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { @@ -4663,7 +4663,7 @@ pub fn _mm512_cmp_epi8_mask(a: __m512i, b: __m512i) -> __mmask6 #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm512_mask_cmp_epi8_mask( k1: __mmask64, @@ -4695,7 +4695,7 @@ pub fn _mm512_mask_cmp_epi8_mask( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm256_cmp_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { @@ -4722,7 +4722,7 @@ pub fn _mm256_cmp_epi8_mask(a: __m256i, b: __m256i) -> __mmask3 #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm256_mask_cmp_epi8_mask( k1: __mmask32, @@ -4754,7 +4754,7 @@ pub fn _mm256_mask_cmp_epi8_mask( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm_cmp_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { @@ -4781,7 +4781,7 @@ pub fn _mm_cmp_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub fn _mm_mask_cmp_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { unsafe { @@ -6699,7 +6699,7 @@ pub fn _mm_maskz_sll_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_slli_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6718,7 +6718,7 @@ pub fn _mm512_slli_epi16(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_slli_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6738,7 +6738,7 @@ pub fn _mm512_mask_slli_epi16(src: __m512i, k: __mmask32, a: __ #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_slli_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6758,7 +6758,7 @@ pub fn _mm512_maskz_slli_epi16(k: __mmask32, a: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_slli_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6778,7 +6778,7 @@ pub fn _mm256_mask_slli_epi16(src: __m256i, k: __mmask16, a: __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_slli_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6798,7 +6798,7 @@ pub fn _mm256_maskz_slli_epi16(k: __mmask16, a: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_slli_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6818,7 +6818,7 @@ pub fn _mm_mask_slli_epi16(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_slli_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7050,7 +7050,7 @@ pub fn _mm_maskz_srl_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_srli_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7069,7 +7069,7 @@ pub fn _mm512_srli_epi16(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_srli_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7089,7 +7089,7 @@ pub fn _mm512_mask_srli_epi16(src: __m512i, k: __mmask32, a: __ #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_srli_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7110,7 +7110,7 @@ pub fn _mm512_maskz_srli_epi16(k: __mmask32, a: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_srli_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7126,7 +7126,7 @@ pub fn _mm256_mask_srli_epi16(src: __m256i, k: __mmask16, a: __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_srli_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7142,7 +7142,7 @@ pub fn _mm256_maskz_srli_epi16(k: __mmask16, a: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_srli_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7158,7 +7158,7 @@ pub fn _mm_mask_srli_epi16(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_srli_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7386,7 +7386,7 @@ pub fn _mm_maskz_sra_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_srai_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7401,7 +7401,7 @@ pub fn _mm512_srai_epi16(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_srai_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7417,7 +7417,7 @@ pub fn _mm512_mask_srai_epi16(src: __m512i, k: __mmask32, a: __ #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_srai_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7433,7 +7433,7 @@ pub fn _mm512_maskz_srai_epi16(k: __mmask32, a: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_srai_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7449,7 +7449,7 @@ pub fn _mm256_mask_srai_epi16(src: __m256i, k: __mmask16, a: __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_srai_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7465,7 +7465,7 @@ pub fn _mm256_maskz_srai_epi16(k: __mmask16, a: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_srai_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -7481,7 +7481,7 @@ pub fn _mm_mask_srai_epi16(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_srai_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9017,7 +9017,7 @@ pub fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9071,7 +9071,7 @@ pub fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_shufflelo_epi16( src: __m512i, k: __mmask32, @@ -9091,7 +9091,7 @@ pub fn _mm512_mask_shufflelo_epi16( #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9107,7 +9107,7 @@ pub fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m512i) - #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_shufflelo_epi16( src: __m256i, k: __mmask16, @@ -9127,7 +9127,7 @@ pub fn _mm256_mask_shufflelo_epi16( #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9143,7 +9143,7 @@ pub fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m256i) - #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_shufflelo_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9159,7 +9159,7 @@ pub fn _mm_mask_shufflelo_epi16(src: __m128i, k: __mmask8, a: _ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9175,7 +9175,7 @@ pub fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i) -> __ #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9229,7 +9229,7 @@ pub fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_shufflehi_epi16( src: __m512i, k: __mmask32, @@ -9249,7 +9249,7 @@ pub fn _mm512_mask_shufflehi_epi16( #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9265,7 +9265,7 @@ pub fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m512i) - #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_shufflehi_epi16( src: __m256i, k: __mmask16, @@ -9285,7 +9285,7 @@ pub fn _mm256_mask_shufflehi_epi16( #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9301,7 +9301,7 @@ pub fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i) - #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_shufflehi_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9317,7 +9317,7 @@ pub fn _mm_mask_shufflehi_epi16(src: __m128i, k: __mmask8, a: _ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_shufflehi_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -9794,7 +9794,7 @@ pub fn _mm512_sad_epu8(a: __m512i, b: __m512i) -> __m512i { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm512_dbsad_epu8(a: __m512i, b: __m512i) -> __m512i { unsafe { @@ -9812,7 +9812,7 @@ pub fn _mm512_dbsad_epu8(a: __m512i, b: __m512i) -> __m512i { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(4)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm512_mask_dbsad_epu8( src: __m512i, @@ -9835,7 +9835,7 @@ pub fn _mm512_mask_dbsad_epu8( #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm512_maskz_dbsad_epu8(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { @@ -9853,7 +9853,7 @@ pub fn _mm512_maskz_dbsad_epu8(k: __mmask32, a: __m512i, b: __m #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm256_dbsad_epu8(a: __m256i, b: __m256i) -> __m256i { unsafe { @@ -9871,7 +9871,7 @@ pub fn _mm256_dbsad_epu8(a: __m256i, b: __m256i) -> __m256i { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(4)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm256_mask_dbsad_epu8( src: __m256i, @@ -9894,7 +9894,7 @@ pub fn _mm256_mask_dbsad_epu8( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm256_maskz_dbsad_epu8(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { @@ -9912,7 +9912,7 @@ pub fn _mm256_maskz_dbsad_epu8(k: __mmask16, a: __m256i, b: __m #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm_dbsad_epu8(a: __m128i, b: __m128i) -> __m128i { unsafe { @@ -9930,7 +9930,7 @@ pub fn _mm_dbsad_epu8(a: __m128i, b: __m128i) -> __m128i { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(4)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm_mask_dbsad_epu8( src: __m128i, @@ -9953,7 +9953,7 @@ pub fn _mm_mask_dbsad_epu8( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub fn _mm_maskz_dbsad_epu8(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { @@ -10424,7 +10424,7 @@ pub fn _kortestz_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kshiftli_mask32) #[inline] #[target_feature(enable = "avx512bw")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _kshiftli_mask32(a: __mmask32) -> __mmask32 { a << COUNT @@ -10435,7 +10435,7 @@ pub fn _kshiftli_mask32(a: __mmask32) -> __mmask32 { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kshiftli_mask64) #[inline] #[target_feature(enable = "avx512bw")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _kshiftli_mask64(a: __mmask64) -> __mmask64 { a << COUNT @@ -10446,7 +10446,7 @@ pub fn _kshiftli_mask64(a: __mmask64) -> __mmask64 { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kshiftri_mask32) #[inline] #[target_feature(enable = "avx512bw")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _kshiftri_mask32(a: __mmask32) -> __mmask32 { a >> COUNT @@ -10457,7 +10457,7 @@ pub fn _kshiftri_mask32(a: __mmask32) -> __mmask32 { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kshiftri_mask64) #[inline] #[target_feature(enable = "avx512bw")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _kshiftri_mask64(a: __mmask64) -> __mmask64 { a >> COUNT @@ -11107,7 +11107,7 @@ pub fn _mm_maskz_cvtepu8_epi16(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_bslli_epi128(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -11202,7 +11202,7 @@ pub fn _mm512_bslli_epi128(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 3))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_bsrli_epi128(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -11299,7 +11299,7 @@ pub fn _mm512_bsrli_epi128(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_alignr_epi8(a: __m512i, b: __m512i) -> __m512i { const fn mask(shift: u32, i: u32) -> u32 { let shift = shift % 16; @@ -11409,7 +11409,7 @@ pub fn _mm512_alignr_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_alignr_epi8( src: __m512i, k: __mmask64, @@ -11430,7 +11430,7 @@ pub fn _mm512_mask_alignr_epi8( #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_alignr_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -11445,7 +11445,7 @@ pub fn _mm512_maskz_alignr_epi8(k: __mmask64, a: __m512i, b: __ #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(4)] + #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] pub fn _mm256_mask_alignr_epi8( src: __m256i, @@ -11466,7 +11466,7 @@ pub fn _mm256_mask_alignr_epi8( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] pub fn _mm256_maskz_alignr_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { @@ -11482,7 +11482,7 @@ pub fn _mm256_maskz_alignr_epi8(k: __mmask32, a: __m256i, b: __ #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(4)] + #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] pub fn _mm_mask_alignr_epi8( src: __m128i, @@ -11503,7 +11503,7 @@ pub fn _mm_mask_alignr_epi8( #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] pub fn _mm_maskz_alignr_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { diff --git a/library/stdarch/crates/core_arch/src/x86/avx512dq.rs b/library/stdarch/crates/core_arch/src/x86/avx512dq.rs index c90ec894f2174..a4f631ff107a7 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512dq.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512dq.rs @@ -1355,7 +1355,7 @@ pub fn _mm512_maskz_broadcast_i64x2(k: __mmask8, a: __m128i) -> __m512i { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extractf32x8_ps&ig_expand=2946) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_extractf32x8_ps(a: __m512) -> __m256 { unsafe { @@ -1375,7 +1375,7 @@ pub fn _mm512_extractf32x8_ps(a: __m512) -> __m256 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vextractf32x8, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_extractf32x8_ps(src: __m256, k: __mmask8, a: __m512) -> __m256 { unsafe { @@ -1393,7 +1393,7 @@ pub fn _mm512_mask_extractf32x8_ps(src: __m256, k: __mmask8, a: #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vextractf32x8, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_extractf32x8_ps(k: __mmask8, a: __m512) -> __m256 { unsafe { @@ -1409,7 +1409,7 @@ pub fn _mm512_maskz_extractf32x8_ps(k: __mmask8, a: __m512) -> /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extractf64x2_pd&ig_expand=2949) #[inline] #[target_feature(enable = "avx512dq,avx512vl")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_extractf64x2_pd(a: __m256d) -> __m128d { unsafe { @@ -1429,7 +1429,7 @@ pub fn _mm256_extractf64x2_pd(a: __m256d) -> __m128d { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vextractf64x2, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_extractf64x2_pd( src: __m128d, @@ -1451,7 +1451,7 @@ pub fn _mm256_mask_extractf64x2_pd( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vextractf64x2, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_maskz_extractf64x2_pd(k: __mmask8, a: __m256d) -> __m128d { unsafe { @@ -1467,7 +1467,7 @@ pub fn _mm256_maskz_extractf64x2_pd(k: __mmask8, a: __m256d) -> /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extractf64x2_pd&ig_expand=2952) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_extractf64x2_pd(a: __m512d) -> __m128d { unsafe { @@ -1489,7 +1489,7 @@ pub fn _mm512_extractf64x2_pd(a: __m512d) -> __m128d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vextractf64x2, IMM8 = 3))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_extractf64x2_pd( src: __m128d, @@ -1511,7 +1511,7 @@ pub fn _mm512_mask_extractf64x2_pd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vextractf64x2, IMM8 = 3))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_extractf64x2_pd(k: __mmask8, a: __m512d) -> __m128d { unsafe { @@ -1527,7 +1527,7 @@ pub fn _mm512_maskz_extractf64x2_pd(k: __mmask8, a: __m512d) -> /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extracti32x8_epi32&ig_expand=2965) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_extracti32x8_epi32(a: __m512i) -> __m256i { unsafe { @@ -1548,7 +1548,7 @@ pub fn _mm512_extracti32x8_epi32(a: __m512i) -> __m256i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vextracti32x8, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_extracti32x8_epi32( src: __m256i, @@ -1569,7 +1569,7 @@ pub fn _mm512_mask_extracti32x8_epi32( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vextracti32x8, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_extracti32x8_epi32(k: __mmask8, a: __m512i) -> __m256i { unsafe { @@ -1585,7 +1585,7 @@ pub fn _mm512_maskz_extracti32x8_epi32(k: __mmask8, a: __m512i) /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extracti64x2_epi64&ig_expand=2968) #[inline] #[target_feature(enable = "avx512dq,avx512vl")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_extracti64x2_epi64(a: __m256i) -> __m128i { unsafe { @@ -1605,7 +1605,7 @@ pub fn _mm256_extracti64x2_epi64(a: __m256i) -> __m128i { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vextracti64x2, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_extracti64x2_epi64( src: __m128i, @@ -1626,7 +1626,7 @@ pub fn _mm256_mask_extracti64x2_epi64( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vextracti64x2, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_maskz_extracti64x2_epi64(k: __mmask8, a: __m256i) -> __m128i { unsafe { @@ -1642,7 +1642,7 @@ pub fn _mm256_maskz_extracti64x2_epi64(k: __mmask8, a: __m256i) /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extracti64x2_epi64&ig_expand=2971) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_extracti64x2_epi64(a: __m512i) -> __m128i { unsafe { @@ -1664,7 +1664,7 @@ pub fn _mm512_extracti64x2_epi64(a: __m512i) -> __m128i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vextracti64x2, IMM8 = 3))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_extracti64x2_epi64( src: __m128i, @@ -1685,7 +1685,7 @@ pub fn _mm512_mask_extracti64x2_epi64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vextracti64x2, IMM8 = 3))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_extracti64x2_epi64(k: __mmask8, a: __m512i) -> __m128i { unsafe { @@ -1703,7 +1703,7 @@ pub fn _mm512_maskz_extracti64x2_epi64(k: __mmask8, a: __m512i) /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_insertf32x8&ig_expand=3850) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_insertf32x8(a: __m512, b: __m256) -> __m512 { unsafe { @@ -1736,7 +1736,7 @@ pub fn _mm512_insertf32x8(a: __m512, b: __m256) -> __m512 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vinsertf32x8, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_insertf32x8( src: __m512, @@ -1759,7 +1759,7 @@ pub fn _mm512_mask_insertf32x8( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vinsertf32x8, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_insertf32x8(k: __mmask16, a: __m512, b: __m256) -> __m512 { unsafe { @@ -1775,7 +1775,7 @@ pub fn _mm512_maskz_insertf32x8(k: __mmask16, a: __m512, b: __m /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insertf64x2&ig_expand=3853) #[inline] #[target_feature(enable = "avx512dq,avx512vl")] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_insertf64x2(a: __m256d, b: __m128d) -> __m256d { unsafe { @@ -1796,7 +1796,7 @@ pub fn _mm256_insertf64x2(a: __m256d, b: __m128d) -> __m256d { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vinsertf64x2, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_insertf64x2( src: __m256d, @@ -1819,7 +1819,7 @@ pub fn _mm256_mask_insertf64x2( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vinsertf64x2, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_maskz_insertf64x2(k: __mmask8, a: __m256d, b: __m128d) -> __m256d { unsafe { @@ -1835,7 +1835,7 @@ pub fn _mm256_maskz_insertf64x2(k: __mmask8, a: __m256d, b: __m /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_insertf64x2&ig_expand=3856) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_insertf64x2(a: __m512d, b: __m128d) -> __m512d { unsafe { @@ -1858,7 +1858,7 @@ pub fn _mm512_insertf64x2(a: __m512d, b: __m128d) -> __m512d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vinsertf64x2, IMM8 = 3))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_insertf64x2( src: __m512d, @@ -1881,7 +1881,7 @@ pub fn _mm512_mask_insertf64x2( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vinsertf64x2, IMM8 = 3))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_insertf64x2(k: __mmask8, a: __m512d, b: __m128d) -> __m512d { unsafe { @@ -1897,7 +1897,7 @@ pub fn _mm512_maskz_insertf64x2(k: __mmask8, a: __m512d, b: __m /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_inserti32x8&ig_expand=3869) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_inserti32x8(a: __m512i, b: __m256i) -> __m512i { unsafe { @@ -1932,7 +1932,7 @@ pub fn _mm512_inserti32x8(a: __m512i, b: __m256i) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vinserti32x8, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_inserti32x8( src: __m512i, @@ -1955,7 +1955,7 @@ pub fn _mm512_mask_inserti32x8( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vinserti32x8, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_inserti32x8(k: __mmask16, a: __m512i, b: __m256i) -> __m512i { unsafe { @@ -1971,7 +1971,7 @@ pub fn _mm512_maskz_inserti32x8(k: __mmask16, a: __m512i, b: __ /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_inserti64x2&ig_expand=3872) #[inline] #[target_feature(enable = "avx512dq,avx512vl")] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_inserti64x2(a: __m256i, b: __m128i) -> __m256i { unsafe { @@ -1993,7 +1993,7 @@ pub fn _mm256_inserti64x2(a: __m256i, b: __m128i) -> __m256i { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vinserti64x2, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_inserti64x2( src: __m256i, @@ -2016,7 +2016,7 @@ pub fn _mm256_mask_inserti64x2( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vinserti64x2, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_maskz_inserti64x2(k: __mmask8, a: __m256i, b: __m128i) -> __m256i { unsafe { @@ -2032,7 +2032,7 @@ pub fn _mm256_maskz_inserti64x2(k: __mmask8, a: __m256i, b: __m /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_inserti64x2&ig_expand=3875) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_inserti64x2(a: __m512i, b: __m128i) -> __m512i { unsafe { @@ -2056,7 +2056,7 @@ pub fn _mm512_inserti64x2(a: __m512i, b: __m128i) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vinserti64x2, IMM8 = 3))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_inserti64x2( src: __m512i, @@ -2079,7 +2079,7 @@ pub fn _mm512_mask_inserti64x2( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vinserti64x2, IMM8 = 3))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_inserti64x2(k: __mmask8, a: __m512i, b: __m128i) -> __m512i { unsafe { @@ -2104,7 +2104,7 @@ pub fn _mm512_maskz_inserti64x2(k: __mmask8, a: __m512i, b: __m #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtqq2pd, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvt_roundepi64_pd(a: __m512i) -> __m512d { unsafe { @@ -2127,7 +2127,7 @@ pub fn _mm512_cvt_roundepi64_pd(a: __m512i) -> __m512d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtqq2pd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvt_roundepi64_pd( src: __m512d, @@ -2155,7 +2155,7 @@ pub fn _mm512_mask_cvt_roundepi64_pd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtqq2pd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvt_roundepi64_pd(k: __mmask8, a: __m512i) -> __m512d { unsafe { @@ -2307,7 +2307,7 @@ pub fn _mm512_maskz_cvtepi64_pd(k: __mmask8, a: __m512i) -> __m512d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtqq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvt_roundepi64_ps(a: __m512i) -> __m256 { unsafe { @@ -2330,7 +2330,7 @@ pub fn _mm512_cvt_roundepi64_ps(a: __m512i) -> __m256 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtqq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvt_roundepi64_ps( src: __m256, @@ -2358,7 +2358,7 @@ pub fn _mm512_mask_cvt_roundepi64_ps( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtqq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvt_roundepi64_ps(k: __mmask8, a: __m512i) -> __m256 { unsafe { @@ -2504,7 +2504,7 @@ pub fn _mm512_maskz_cvtepi64_ps(k: __mmask8, a: __m512i) -> __m256 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtuqq2pd, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvt_roundepu64_pd(a: __m512i) -> __m512d { unsafe { @@ -2527,7 +2527,7 @@ pub fn _mm512_cvt_roundepu64_pd(a: __m512i) -> __m512d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtuqq2pd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvt_roundepu64_pd( src: __m512d, @@ -2555,7 +2555,7 @@ pub fn _mm512_mask_cvt_roundepu64_pd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtuqq2pd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvt_roundepu64_pd(k: __mmask8, a: __m512i) -> __m512d { unsafe { @@ -2707,7 +2707,7 @@ pub fn _mm512_maskz_cvtepu64_pd(k: __mmask8, a: __m512i) -> __m512d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtuqq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvt_roundepu64_ps(a: __m512i) -> __m256 { unsafe { @@ -2730,7 +2730,7 @@ pub fn _mm512_cvt_roundepu64_ps(a: __m512i) -> __m256 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtuqq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvt_roundepu64_ps( src: __m256, @@ -2758,7 +2758,7 @@ pub fn _mm512_mask_cvt_roundepu64_ps( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtuqq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvt_roundepu64_ps(k: __mmask8, a: __m512i) -> __m256 { unsafe { @@ -2904,7 +2904,7 @@ pub fn _mm512_maskz_cvtepu64_ps(k: __mmask8, a: __m512i) -> __m256 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtpd2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvt_roundpd_epi64(a: __m512d) -> __m512i { static_assert_rounding!(ROUNDING); @@ -2925,7 +2925,7 @@ pub fn _mm512_cvt_roundpd_epi64(a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtpd2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvt_roundpd_epi64( src: __m512i, @@ -2952,7 +2952,7 @@ pub fn _mm512_mask_cvt_roundpd_epi64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtpd2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvt_roundpd_epi64(k: __mmask8, a: __m512d) -> __m512i { static_assert_rounding!(ROUNDING); @@ -3090,7 +3090,7 @@ pub fn _mm512_maskz_cvtpd_epi64(k: __mmask8, a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtps2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvt_roundps_epi64(a: __m256) -> __m512i { static_assert_rounding!(ROUNDING); @@ -3111,7 +3111,7 @@ pub fn _mm512_cvt_roundps_epi64(a: __m256) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtps2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvt_roundps_epi64( src: __m512i, @@ -3138,7 +3138,7 @@ pub fn _mm512_mask_cvt_roundps_epi64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtps2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvt_roundps_epi64(k: __mmask8, a: __m256) -> __m512i { static_assert_rounding!(ROUNDING); @@ -3276,7 +3276,7 @@ pub fn _mm512_maskz_cvtps_epi64(k: __mmask8, a: __m256) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtpd2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvt_roundpd_epu64(a: __m512d) -> __m512i { static_assert_rounding!(ROUNDING); @@ -3297,7 +3297,7 @@ pub fn _mm512_cvt_roundpd_epu64(a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtpd2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvt_roundpd_epu64( src: __m512i, @@ -3324,7 +3324,7 @@ pub fn _mm512_mask_cvt_roundpd_epu64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtpd2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvt_roundpd_epu64(k: __mmask8, a: __m512d) -> __m512i { static_assert_rounding!(ROUNDING); @@ -3462,7 +3462,7 @@ pub fn _mm512_maskz_cvtpd_epu64(k: __mmask8, a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtps2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvt_roundps_epu64(a: __m256) -> __m512i { static_assert_rounding!(ROUNDING); @@ -3483,7 +3483,7 @@ pub fn _mm512_cvt_roundps_epu64(a: __m256) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtps2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvt_roundps_epu64( src: __m512i, @@ -3510,7 +3510,7 @@ pub fn _mm512_mask_cvt_roundps_epu64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvtps2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvt_roundps_epu64(k: __mmask8, a: __m256) -> __m512i { static_assert_rounding!(ROUNDING); @@ -3643,7 +3643,7 @@ pub fn _mm512_maskz_cvtps_epu64(k: __mmask8, a: __m256) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttpd2qq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvtt_roundpd_epi64(a: __m512d) -> __m512i { static_assert_sae!(SAE); @@ -3658,7 +3658,7 @@ pub fn _mm512_cvtt_roundpd_epi64(a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttpd2qq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvtt_roundpd_epi64( src: __m512i, @@ -3679,7 +3679,7 @@ pub fn _mm512_mask_cvtt_roundpd_epi64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttpd2qq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvtt_roundpd_epi64(k: __mmask8, a: __m512d) -> __m512i { static_assert_sae!(SAE); @@ -3815,7 +3815,7 @@ pub fn _mm512_maskz_cvttpd_epi64(k: __mmask8, a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttps2qq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvtt_roundps_epi64(a: __m256) -> __m512i { static_assert_sae!(SAE); @@ -3830,7 +3830,7 @@ pub fn _mm512_cvtt_roundps_epi64(a: __m256) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttps2qq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvtt_roundps_epi64( src: __m512i, @@ -3851,7 +3851,7 @@ pub fn _mm512_mask_cvtt_roundps_epi64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttps2qq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvtt_roundps_epi64(k: __mmask8, a: __m256) -> __m512i { static_assert_sae!(SAE); @@ -3987,7 +3987,7 @@ pub fn _mm512_maskz_cvttps_epi64(k: __mmask8, a: __m256) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttpd2uqq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvtt_roundpd_epu64(a: __m512d) -> __m512i { static_assert_sae!(SAE); @@ -4002,7 +4002,7 @@ pub fn _mm512_cvtt_roundpd_epu64(a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttpd2uqq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvtt_roundpd_epu64( src: __m512i, @@ -4023,7 +4023,7 @@ pub fn _mm512_mask_cvtt_roundpd_epu64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttpd2uqq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvtt_roundpd_epu64(k: __mmask8, a: __m512d) -> __m512i { static_assert_sae!(SAE); @@ -4159,7 +4159,7 @@ pub fn _mm512_maskz_cvttpd_epu64(k: __mmask8, a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttps2uqq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_cvtt_roundps_epu64(a: __m256) -> __m512i { static_assert_sae!(SAE); @@ -4174,7 +4174,7 @@ pub fn _mm512_cvtt_roundps_epu64(a: __m256) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttps2uqq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_cvtt_roundps_epu64( src: __m512i, @@ -4195,7 +4195,7 @@ pub fn _mm512_mask_cvtt_roundps_epu64( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vcvttps2uqq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_cvtt_roundps_epu64(k: __mmask8, a: __m256) -> __m512i { static_assert_sae!(SAE); @@ -4599,7 +4599,7 @@ pub fn _kortestz_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kshiftli_mask8&ig_expand=3945) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _kshiftli_mask8(a: __mmask8) -> __mmask8 { a << COUNT @@ -4610,7 +4610,7 @@ pub fn _kshiftli_mask8(a: __mmask8) -> __mmask8 { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kshiftri_mask8&ig_expand=3949) #[inline] #[target_feature(enable = "avx512dq")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _kshiftri_mask8(a: __mmask8) -> __mmask8 { a >> COUNT @@ -4870,7 +4870,7 @@ pub fn _mm512_movm_epi64(k: __mmask8) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_range_round_pd(a: __m512d, b: __m512d) -> __m512d { static_assert_uimm_bits!(IMM8, 4); @@ -4891,7 +4891,7 @@ pub fn _mm512_range_round_pd(a: __m512d, b: __m #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_range_round_pd( src: __m512d, @@ -4926,7 +4926,7 @@ pub fn _mm512_mask_range_round_pd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_range_round_pd( k: __mmask8, @@ -4949,7 +4949,7 @@ pub fn _mm512_maskz_range_round_pd( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_range_pd(a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 4); @@ -4968,7 +4968,7 @@ pub fn _mm_range_pd(a: __m128d, b: __m128d) -> __m128d { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_range_pd( src: __m128d, @@ -5000,7 +5000,7 @@ pub fn _mm_mask_range_pd( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_range_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 4); @@ -5018,7 +5018,7 @@ pub fn _mm_maskz_range_pd(k: __mmask8, a: __m128d, b: __m128d) #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_range_pd(a: __m256d, b: __m256d) -> __m256d { static_assert_uimm_bits!(IMM8, 4); @@ -5037,7 +5037,7 @@ pub fn _mm256_range_pd(a: __m256d, b: __m256d) -> __m256d { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_range_pd( src: __m256d, @@ -5069,7 +5069,7 @@ pub fn _mm256_mask_range_pd( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_maskz_range_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { static_assert_uimm_bits!(IMM8, 4); @@ -5087,7 +5087,7 @@ pub fn _mm256_maskz_range_pd(k: __mmask8, a: __m256d, b: __m256 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_range_pd(a: __m512d, b: __m512d) -> __m512d { static_assert_uimm_bits!(IMM8, 4); @@ -5106,7 +5106,7 @@ pub fn _mm512_range_pd(a: __m512d, b: __m512d) -> __m512d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_range_pd( src: __m512d, @@ -5139,7 +5139,7 @@ pub fn _mm512_mask_range_pd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangepd, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_range_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { static_assert_uimm_bits!(IMM8, 4); @@ -5158,7 +5158,7 @@ pub fn _mm512_maskz_range_pd(k: __mmask8, a: __m512d, b: __m512 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_range_round_ps(a: __m512, b: __m512) -> __m512 { static_assert_uimm_bits!(IMM8, 4); @@ -5178,7 +5178,7 @@ pub fn _mm512_range_round_ps(a: __m512, b: __m5 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_range_round_ps( src: __m512, @@ -5212,7 +5212,7 @@ pub fn _mm512_mask_range_round_ps( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_range_round_ps( k: __mmask16, @@ -5235,7 +5235,7 @@ pub fn _mm512_maskz_range_round_ps( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_range_ps(a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 4); @@ -5254,7 +5254,7 @@ pub fn _mm_range_ps(a: __m128, b: __m128) -> __m128 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_range_ps( src: __m128, @@ -5286,7 +5286,7 @@ pub fn _mm_mask_range_ps( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_range_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 4); @@ -5304,7 +5304,7 @@ pub fn _mm_maskz_range_ps(k: __mmask8, a: __m128, b: __m128) -> #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_range_ps(a: __m256, b: __m256) -> __m256 { static_assert_uimm_bits!(IMM8, 4); @@ -5323,7 +5323,7 @@ pub fn _mm256_range_ps(a: __m256, b: __m256) -> __m256 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_range_ps( src: __m256, @@ -5355,7 +5355,7 @@ pub fn _mm256_mask_range_ps( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_maskz_range_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { static_assert_uimm_bits!(IMM8, 4); @@ -5373,7 +5373,7 @@ pub fn _mm256_maskz_range_ps(k: __mmask8, a: __m256, b: __m256) #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_range_ps(a: __m512, b: __m512) -> __m512 { static_assert_uimm_bits!(IMM8, 4); @@ -5392,7 +5392,7 @@ pub fn _mm512_range_ps(a: __m512, b: __m512) -> __m512 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_range_ps( src: __m512, @@ -5425,7 +5425,7 @@ pub fn _mm512_mask_range_ps( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangeps, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_range_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { static_assert_uimm_bits!(IMM8, 4); @@ -5445,7 +5445,7 @@ pub fn _mm512_maskz_range_ps(k: __mmask16, a: __m512, b: __m512 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangesd, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_range_round_sd(a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 4); @@ -5467,7 +5467,7 @@ pub fn _mm_range_round_sd(a: __m128d, b: __m128 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangesd, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_range_round_sd( src: __m128d, @@ -5503,7 +5503,7 @@ pub fn _mm_mask_range_round_sd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangesd, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_range_round_sd( k: __mmask8, @@ -5528,7 +5528,7 @@ pub fn _mm_maskz_range_round_sd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangesd, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_range_sd( src: __m128d, @@ -5562,7 +5562,7 @@ pub fn _mm_mask_range_sd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangesd, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_range_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 4); @@ -5582,7 +5582,7 @@ pub fn _mm_maskz_range_sd(k: __mmask8, a: __m128d, b: __m128d) #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangess, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_range_round_ss(a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 4); @@ -5604,7 +5604,7 @@ pub fn _mm_range_round_ss(a: __m128, b: __m128) #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangess, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_range_round_ss( src: __m128, @@ -5640,7 +5640,7 @@ pub fn _mm_mask_range_round_ss( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangess, IMM8 = 5, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_range_round_ss( k: __mmask8, @@ -5665,7 +5665,7 @@ pub fn _mm_maskz_range_round_ss( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangess, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_range_ss( src: __m128, @@ -5699,7 +5699,7 @@ pub fn _mm_mask_range_ss( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vrangess, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_range_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 4); @@ -5724,7 +5724,7 @@ pub fn _mm_maskz_range_ss(k: __mmask8, a: __m128, b: __m128) -> #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(1, 2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_reduce_round_pd(a: __m512d) -> __m512d { static_assert_uimm_bits!(IMM8, 8); @@ -5749,7 +5749,7 @@ pub fn _mm512_reduce_round_pd(a: __m512d) -> __ #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_reduce_round_pd( src: __m512d, @@ -5780,7 +5780,7 @@ pub fn _mm512_mask_reduce_round_pd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_reduce_round_pd( k: __mmask8, @@ -5805,7 +5805,7 @@ pub fn _mm512_maskz_reduce_round_pd( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_reduce_pd(a: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 8); @@ -5827,7 +5827,7 @@ pub fn _mm_reduce_pd(a: __m128d) -> __m128d { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_reduce_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { unsafe { @@ -5851,7 +5851,7 @@ pub fn _mm_mask_reduce_pd(src: __m128d, k: __mmask8, a: __m128d #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_reduce_pd(k: __mmask8, a: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 8); @@ -5872,7 +5872,7 @@ pub fn _mm_maskz_reduce_pd(k: __mmask8, a: __m128d) -> __m128d #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_reduce_pd(a: __m256d) -> __m256d { static_assert_uimm_bits!(IMM8, 8); @@ -5894,7 +5894,7 @@ pub fn _mm256_reduce_pd(a: __m256d) -> __m256d { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_reduce_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { unsafe { @@ -5918,7 +5918,7 @@ pub fn _mm256_mask_reduce_pd(src: __m256d, k: __mmask8, a: __m2 #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_maskz_reduce_pd(k: __mmask8, a: __m256d) -> __m256d { static_assert_uimm_bits!(IMM8, 8); @@ -5939,7 +5939,7 @@ pub fn _mm256_maskz_reduce_pd(k: __mmask8, a: __m256d) -> __m25 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_reduce_pd(a: __m512d) -> __m512d { static_assert_uimm_bits!(IMM8, 8); @@ -5961,7 +5961,7 @@ pub fn _mm512_reduce_pd(a: __m512d) -> __m512d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_reduce_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { unsafe { @@ -5991,7 +5991,7 @@ pub fn _mm512_mask_reduce_pd(src: __m512d, k: __mmask8, a: __m5 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducepd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_reduce_pd(k: __mmask8, a: __m512d) -> __m512d { static_assert_uimm_bits!(IMM8, 8); @@ -6014,7 +6014,7 @@ pub fn _mm512_maskz_reduce_pd(k: __mmask8, a: __m512d) -> __m51 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(1, 2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_reduce_round_ps(a: __m512) -> __m512 { static_assert_uimm_bits!(IMM8, 8); @@ -6039,7 +6039,7 @@ pub fn _mm512_reduce_round_ps(a: __m512) -> __m #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_reduce_round_ps( src: __m512, @@ -6070,7 +6070,7 @@ pub fn _mm512_mask_reduce_round_ps( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_reduce_round_ps( k: __mmask16, @@ -6095,7 +6095,7 @@ pub fn _mm512_maskz_reduce_round_ps( #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_reduce_ps(a: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 8); @@ -6117,7 +6117,7 @@ pub fn _mm_reduce_ps(a: __m128) -> __m128 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_reduce_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { unsafe { @@ -6141,7 +6141,7 @@ pub fn _mm_mask_reduce_ps(src: __m128, k: __mmask8, a: __m128) #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_reduce_ps(k: __mmask8, a: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 8); @@ -6162,7 +6162,7 @@ pub fn _mm_maskz_reduce_ps(k: __mmask8, a: __m128) -> __m128 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_reduce_ps(a: __m256) -> __m256 { static_assert_uimm_bits!(IMM8, 8); @@ -6184,7 +6184,7 @@ pub fn _mm256_reduce_ps(a: __m256) -> __m256 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_reduce_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { unsafe { @@ -6208,7 +6208,7 @@ pub fn _mm256_mask_reduce_ps(src: __m256, k: __mmask8, a: __m25 #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_maskz_reduce_ps(k: __mmask8, a: __m256) -> __m256 { static_assert_uimm_bits!(IMM8, 8); @@ -6229,7 +6229,7 @@ pub fn _mm256_maskz_reduce_ps(k: __mmask8, a: __m256) -> __m256 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_reduce_ps(a: __m512) -> __m512 { static_assert_uimm_bits!(IMM8, 8); @@ -6251,7 +6251,7 @@ pub fn _mm512_reduce_ps(a: __m512) -> __m512 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_reduce_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { unsafe { @@ -6281,7 +6281,7 @@ pub fn _mm512_mask_reduce_ps(src: __m512, k: __mmask16, a: __m5 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreduceps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_maskz_reduce_ps(k: __mmask16, a: __m512) -> __m512 { static_assert_uimm_bits!(IMM8, 8); @@ -6305,7 +6305,7 @@ pub fn _mm512_maskz_reduce_ps(k: __mmask16, a: __m512) -> __m51 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducesd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_reduce_round_sd(a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 8); @@ -6331,7 +6331,7 @@ pub fn _mm_reduce_round_sd(a: __m128d, b: __m12 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducesd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_reduce_round_sd( src: __m128d, @@ -6371,7 +6371,7 @@ pub fn _mm_mask_reduce_round_sd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducesd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_reduce_round_sd( k: __mmask8, @@ -6399,7 +6399,7 @@ pub fn _mm_maskz_reduce_round_sd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducesd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_reduce_sd(a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 8); @@ -6422,7 +6422,7 @@ pub fn _mm_reduce_sd(a: __m128d, b: __m128d) -> __m128d { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducesd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_reduce_sd( src: __m128d, @@ -6459,7 +6459,7 @@ pub fn _mm_mask_reduce_sd( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducesd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_reduce_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(IMM8, 8); @@ -6484,7 +6484,7 @@ pub fn _mm_maskz_reduce_sd(k: __mmask8, a: __m128d, b: __m128d) #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducess, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_reduce_round_ss(a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 8); @@ -6510,7 +6510,7 @@ pub fn _mm_reduce_round_ss(a: __m128, b: __m128 #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducess, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_reduce_round_ss( src: __m128, @@ -6550,7 +6550,7 @@ pub fn _mm_mask_reduce_round_ss( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducess, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_reduce_round_ss( k: __mmask8, @@ -6578,7 +6578,7 @@ pub fn _mm_maskz_reduce_round_ss( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducess, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_reduce_ss(a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 8); @@ -6601,7 +6601,7 @@ pub fn _mm_reduce_ss(a: __m128, b: __m128) -> __m128 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducess, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_reduce_ss( src: __m128, @@ -6638,7 +6638,7 @@ pub fn _mm_mask_reduce_ss( #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vreducess, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_reduce_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 8); @@ -6664,7 +6664,7 @@ pub fn _mm_maskz_reduce_ss(k: __mmask8, a: __m128, b: __m128) - #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vfpclasspd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_fpclass_pd_mask(a: __m128d) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); @@ -6689,7 +6689,7 @@ pub fn _mm_fpclass_pd_mask(a: __m128d) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vfpclasspd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_fpclass_pd_mask(k1: __mmask8, a: __m128d) -> __mmask8 { unsafe { @@ -6715,7 +6715,7 @@ pub fn _mm_mask_fpclass_pd_mask(k1: __mmask8, a: __m128d) -> __ #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vfpclasspd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_fpclass_pd_mask(a: __m256d) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); @@ -6740,7 +6740,7 @@ pub fn _mm256_fpclass_pd_mask(a: __m256d) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vfpclasspd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_fpclass_pd_mask(k1: __mmask8, a: __m256d) -> __mmask8 { unsafe { @@ -6766,7 +6766,7 @@ pub fn _mm256_mask_fpclass_pd_mask(k1: __mmask8, a: __m256d) -> #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vfpclasspd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_fpclass_pd_mask(a: __m512d) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); @@ -6791,7 +6791,7 @@ pub fn _mm512_fpclass_pd_mask(a: __m512d) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vfpclasspd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_fpclass_pd_mask(k1: __mmask8, a: __m512d) -> __mmask8 { unsafe { @@ -6817,7 +6817,7 @@ pub fn _mm512_mask_fpclass_pd_mask(k1: __mmask8, a: __m512d) -> #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vfpclassps, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_fpclass_ps_mask(a: __m128) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); @@ -6842,7 +6842,7 @@ pub fn _mm_fpclass_ps_mask(a: __m128) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vfpclassps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_fpclass_ps_mask(k1: __mmask8, a: __m128) -> __mmask8 { unsafe { @@ -6868,7 +6868,7 @@ pub fn _mm_mask_fpclass_ps_mask(k1: __mmask8, a: __m128) -> __m #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vfpclassps, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_fpclass_ps_mask(a: __m256) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); @@ -6893,7 +6893,7 @@ pub fn _mm256_fpclass_ps_mask(a: __m256) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vfpclassps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_fpclass_ps_mask(k1: __mmask8, a: __m256) -> __mmask8 { unsafe { @@ -6919,7 +6919,7 @@ pub fn _mm256_mask_fpclass_ps_mask(k1: __mmask8, a: __m256) -> #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vfpclassps, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_fpclass_ps_mask(a: __m512) -> __mmask16 { static_assert_uimm_bits!(IMM8, 8); @@ -6944,7 +6944,7 @@ pub fn _mm512_fpclass_ps_mask(a: __m512) -> __mmask16 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vfpclassps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_fpclass_ps_mask(k1: __mmask16, a: __m512) -> __mmask16 { unsafe { @@ -6970,7 +6970,7 @@ pub fn _mm512_mask_fpclass_ps_mask(k1: __mmask16, a: __m512) -> #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vfpclasssd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_fpclass_sd_mask(a: __m128d) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); @@ -6995,7 +6995,7 @@ pub fn _mm_fpclass_sd_mask(a: __m128d) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vfpclasssd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_fpclass_sd_mask(k1: __mmask8, a: __m128d) -> __mmask8 { unsafe { @@ -7021,7 +7021,7 @@ pub fn _mm_mask_fpclass_sd_mask(k1: __mmask8, a: __m128d) -> __ #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vfpclassss, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_fpclass_ss_mask(a: __m128) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); @@ -7046,7 +7046,7 @@ pub fn _mm_fpclass_ss_mask(a: __m128) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vfpclassss, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_fpclass_ss_mask(k1: __mmask8, a: __m128) -> __mmask8 { unsafe { diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index d53f83c0a10bc..a0c036a54190a 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -5534,7 +5534,7 @@ pub fn _mm_maskz_getexp_pd(k: __mmask8, a: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_roundscale_ps(a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5563,7 +5563,7 @@ pub fn _mm512_roundscale_ps(a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_roundscale_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5587,7 +5587,7 @@ pub fn _mm512_mask_roundscale_ps(src: __m512, k: __mmask16, a: #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_roundscale_ps(k: __mmask16, a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5610,7 +5610,7 @@ pub fn _mm512_maskz_roundscale_ps(k: __mmask16, a: __m512) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 250))] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_roundscale_ps(a: __m256) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5633,7 +5633,7 @@ pub fn _mm256_roundscale_ps(a: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_roundscale_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5657,7 +5657,7 @@ pub fn _mm256_mask_roundscale_ps(src: __m256, k: __mmask8, a: _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_roundscale_ps(k: __mmask8, a: __m256) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5680,7 +5680,7 @@ pub fn _mm256_maskz_roundscale_ps(k: __mmask8, a: __m256) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 250))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_roundscale_ps(a: __m128) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5703,7 +5703,7 @@ pub fn _mm_roundscale_ps(a: __m128) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_roundscale_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5727,7 +5727,7 @@ pub fn _mm_mask_roundscale_ps(src: __m128, k: __mmask8, a: __m1 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_roundscale_ps(k: __mmask8, a: __m128) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5750,7 +5750,7 @@ pub fn _mm_maskz_roundscale_ps(k: __mmask8, a: __m128) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_roundscale_pd(a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5773,7 +5773,7 @@ pub fn _mm512_roundscale_pd(a: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_roundscale_pd( src: __m512d, k: __mmask8, @@ -5801,7 +5801,7 @@ pub fn _mm512_mask_roundscale_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_roundscale_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5824,7 +5824,7 @@ pub fn _mm512_maskz_roundscale_pd(k: __mmask8, a: __m512d) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_roundscale_pd(a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5847,7 +5847,7 @@ pub fn _mm256_roundscale_pd(a: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_roundscale_pd( src: __m256d, k: __mmask8, @@ -5875,7 +5875,7 @@ pub fn _mm256_mask_roundscale_pd( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_roundscale_pd(k: __mmask8, a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5898,7 +5898,7 @@ pub fn _mm256_maskz_roundscale_pd(k: __mmask8, a: __m256d) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_roundscale_pd(a: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5921,7 +5921,7 @@ pub fn _mm_roundscale_pd(a: __m128d) -> __m128d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_roundscale_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -5945,7 +5945,7 @@ pub fn _mm_mask_roundscale_pd(src: __m128d, k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_roundscale_pd(k: __mmask8, a: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6236,7 +6236,7 @@ pub fn _mm_maskz_scalef_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fixupimm_ps(a: __m512, b: __m512, c: __m512i) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6255,7 +6255,7 @@ pub fn _mm512_fixupimm_ps(a: __m512, b: __m512, c: __m512i) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fixupimm_ps( a: __m512, k: __mmask16, @@ -6279,7 +6279,7 @@ pub fn _mm512_mask_fixupimm_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fixupimm_ps( k: __mmask16, a: __m512, @@ -6303,7 +6303,7 @@ pub fn _mm512_maskz_fixupimm_ps( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_fixupimm_ps(a: __m256, b: __m256, c: __m256i) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6322,7 +6322,7 @@ pub fn _mm256_fixupimm_ps(a: __m256, b: __m256, c: __m256i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_fixupimm_ps( a: __m256, k: __mmask8, @@ -6346,7 +6346,7 @@ pub fn _mm256_mask_fixupimm_ps( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_maskz_fixupimm_ps( k: __mmask8, a: __m256, @@ -6370,7 +6370,7 @@ pub fn _mm256_maskz_fixupimm_ps( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fixupimm_ps(a: __m128, b: __m128, c: __m128i) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6389,7 +6389,7 @@ pub fn _mm_fixupimm_ps(a: __m128, b: __m128, c: __m128i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fixupimm_ps( a: __m128, k: __mmask8, @@ -6413,7 +6413,7 @@ pub fn _mm_mask_fixupimm_ps( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fixupimm_ps( k: __mmask8, a: __m128, @@ -6437,7 +6437,7 @@ pub fn _mm_maskz_fixupimm_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fixupimm_pd(a: __m512d, b: __m512d, c: __m512i) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6456,7 +6456,7 @@ pub fn _mm512_fixupimm_pd(a: __m512d, b: __m512d, c: __m512i) - #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fixupimm_pd( a: __m512d, k: __mmask8, @@ -6480,7 +6480,7 @@ pub fn _mm512_mask_fixupimm_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fixupimm_pd( k: __mmask8, a: __m512d, @@ -6504,7 +6504,7 @@ pub fn _mm512_maskz_fixupimm_pd( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_fixupimm_pd(a: __m256d, b: __m256d, c: __m256i) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6523,7 +6523,7 @@ pub fn _mm256_fixupimm_pd(a: __m256d, b: __m256d, c: __m256i) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_fixupimm_pd( a: __m256d, k: __mmask8, @@ -6547,7 +6547,7 @@ pub fn _mm256_mask_fixupimm_pd( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_maskz_fixupimm_pd( k: __mmask8, a: __m256d, @@ -6571,7 +6571,7 @@ pub fn _mm256_maskz_fixupimm_pd( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fixupimm_pd(a: __m128d, b: __m128d, c: __m128i) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6590,7 +6590,7 @@ pub fn _mm_fixupimm_pd(a: __m128d, b: __m128d, c: __m128i) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fixupimm_pd( a: __m128d, k: __mmask8, @@ -6614,7 +6614,7 @@ pub fn _mm_mask_fixupimm_pd( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fixupimm_pd( k: __mmask8, a: __m128d, @@ -6638,7 +6638,7 @@ pub fn _mm_maskz_fixupimm_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_ternarylogic_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6657,7 +6657,7 @@ pub fn _mm512_ternarylogic_epi32(a: __m512i, b: __m512i, c: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_ternarylogic_epi32( src: __m512i, k: __mmask16, @@ -6681,7 +6681,7 @@ pub fn _mm512_mask_ternarylogic_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_ternarylogic_epi32( k: __mmask16, a: __m512i, @@ -6705,7 +6705,7 @@ pub fn _mm512_maskz_ternarylogic_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_ternarylogic_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6724,7 +6724,7 @@ pub fn _mm256_ternarylogic_epi32(a: __m256i, b: __m256i, c: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_ternarylogic_epi32( src: __m256i, k: __mmask8, @@ -6748,7 +6748,7 @@ pub fn _mm256_mask_ternarylogic_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_maskz_ternarylogic_epi32( k: __mmask8, a: __m256i, @@ -6772,7 +6772,7 @@ pub fn _mm256_maskz_ternarylogic_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_ternarylogic_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6791,7 +6791,7 @@ pub fn _mm_ternarylogic_epi32(a: __m128i, b: __m128i, c: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_ternarylogic_epi32( src: __m128i, k: __mmask8, @@ -6815,7 +6815,7 @@ pub fn _mm_mask_ternarylogic_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_ternarylogic_epi32( k: __mmask8, a: __m128i, @@ -6839,7 +6839,7 @@ pub fn _mm_maskz_ternarylogic_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_ternarylogic_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6858,7 +6858,7 @@ pub fn _mm512_ternarylogic_epi64(a: __m512i, b: __m512i, c: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_ternarylogic_epi64( src: __m512i, k: __mmask8, @@ -6882,7 +6882,7 @@ pub fn _mm512_mask_ternarylogic_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_ternarylogic_epi64( k: __mmask8, a: __m512i, @@ -6906,7 +6906,7 @@ pub fn _mm512_maskz_ternarylogic_epi64( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_ternarylogic_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6925,7 +6925,7 @@ pub fn _mm256_ternarylogic_epi64(a: __m256i, b: __m256i, c: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_ternarylogic_epi64( src: __m256i, k: __mmask8, @@ -6949,7 +6949,7 @@ pub fn _mm256_mask_ternarylogic_epi64( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_maskz_ternarylogic_epi64( k: __mmask8, a: __m256i, @@ -6973,7 +6973,7 @@ pub fn _mm256_maskz_ternarylogic_epi64( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_ternarylogic_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -6992,7 +6992,7 @@ pub fn _mm_ternarylogic_epi64(a: __m128i, b: __m128i, c: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_ternarylogic_epi64( src: __m128i, k: __mmask8, @@ -7016,7 +7016,7 @@ pub fn _mm_mask_ternarylogic_epi64( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_ternarylogic_epi64( k: __mmask8, a: __m128i, @@ -7049,7 +7049,7 @@ pub fn _mm_maskz_ternarylogic_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] -#[rustc_legacy_const_generics(1, 2)] + pub fn _mm512_getmant_ps( a: __m512, ) -> __m512 { @@ -7085,7 +7085,7 @@ pub fn _mm512_getmant_ps( a: __m256, ) -> __m256 { @@ -7188,7 +7188,7 @@ pub fn _mm256_getmant_ps( a: __m128, ) -> __m128 { @@ -7285,7 +7285,7 @@ pub fn _mm_getmant_ps( a: __m512d, ) -> __m512d { @@ -7389,7 +7389,7 @@ pub fn _mm512_getmant_pd( a: __m256d, ) -> __m256d { @@ -7492,7 +7492,7 @@ pub fn _mm256_getmant_pd( a: __m128d, ) -> __m128d { @@ -7589,7 +7589,7 @@ pub fn _mm_getmant_pd(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -7680,7 +7680,7 @@ pub fn _mm512_add_round_ps(a: __m512, b: __m512) -> __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_add_round_ps( src: __m512, k: __mmask16, @@ -7710,7 +7710,7 @@ pub fn _mm512_mask_add_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_add_round_ps( k: __mmask16, a: __m512, @@ -7739,7 +7739,7 @@ pub fn _mm512_maskz_add_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_add_round_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -7764,7 +7764,7 @@ pub fn _mm512_add_round_pd(a: __m512d, b: __m512d) -> __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_add_round_pd( src: __m512d, k: __mmask8, @@ -7794,7 +7794,7 @@ pub fn _mm512_mask_add_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_add_round_pd( k: __mmask8, a: __m512d, @@ -7823,7 +7823,7 @@ pub fn _mm512_maskz_add_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_sub_round_ps(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -7848,7 +7848,7 @@ pub fn _mm512_sub_round_ps(a: __m512, b: __m512) -> __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_sub_round_ps( src: __m512, k: __mmask16, @@ -7878,7 +7878,7 @@ pub fn _mm512_mask_sub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_sub_round_ps( k: __mmask16, a: __m512, @@ -7907,7 +7907,7 @@ pub fn _mm512_maskz_sub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_sub_round_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -7932,7 +7932,7 @@ pub fn _mm512_sub_round_pd(a: __m512d, b: __m512d) -> __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_sub_round_pd( src: __m512d, k: __mmask8, @@ -7962,7 +7962,7 @@ pub fn _mm512_mask_sub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_sub_round_pd( k: __mmask8, a: __m512d, @@ -7991,7 +7991,7 @@ pub fn _mm512_maskz_sub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_mul_round_ps(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -8016,7 +8016,7 @@ pub fn _mm512_mul_round_ps(a: __m512, b: __m512) -> __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_mul_round_ps( src: __m512, k: __mmask16, @@ -8046,7 +8046,7 @@ pub fn _mm512_mask_mul_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_mul_round_ps( k: __mmask16, a: __m512, @@ -8075,7 +8075,7 @@ pub fn _mm512_maskz_mul_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_mul_round_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -8100,7 +8100,7 @@ pub fn _mm512_mul_round_pd(a: __m512d, b: __m512d) -> __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_mul_round_pd( src: __m512d, k: __mmask8, @@ -8130,7 +8130,7 @@ pub fn _mm512_mask_mul_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_mul_round_pd( k: __mmask8, a: __m512d, @@ -8159,7 +8159,7 @@ pub fn _mm512_maskz_mul_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_div_round_ps(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -8184,7 +8184,7 @@ pub fn _mm512_div_round_ps(a: __m512, b: __m512) -> __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_div_round_ps( src: __m512, k: __mmask16, @@ -8214,7 +8214,7 @@ pub fn _mm512_mask_div_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_div_round_ps( k: __mmask16, a: __m512, @@ -8243,7 +8243,7 @@ pub fn _mm512_maskz_div_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_div_round_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -8268,7 +8268,7 @@ pub fn _mm512_div_round_pd(a: __m512d, b: __m512d) -> __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_div_round_pd( src: __m512d, k: __mmask8, @@ -8298,7 +8298,7 @@ pub fn _mm512_mask_div_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_div_round_pd( k: __mmask8, a: __m512d, @@ -8327,7 +8327,7 @@ pub fn _mm512_maskz_div_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_sqrt_round_ps(a: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -8351,7 +8351,7 @@ pub fn _mm512_sqrt_round_ps(a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_sqrt_round_ps( src: __m512, k: __mmask16, @@ -8379,7 +8379,7 @@ pub fn _mm512_mask_sqrt_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_sqrt_round_ps(k: __mmask16, a: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -8403,7 +8403,7 @@ pub fn _mm512_maskz_sqrt_round_ps(k: __mmask16, a: __m512) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_sqrt_round_pd(a: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -8427,7 +8427,7 @@ pub fn _mm512_sqrt_round_pd(a: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_sqrt_round_pd( src: __m512d, k: __mmask8, @@ -8455,7 +8455,7 @@ pub fn _mm512_mask_sqrt_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_sqrt_round_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -8479,7 +8479,7 @@ pub fn _mm512_maskz_sqrt_round_pd(k: __mmask8, a: __m512d) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fmadd_round_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -8501,7 +8501,7 @@ pub fn _mm512_fmadd_round_ps(a: __m512, b: __m512, c: __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fmadd_round_ps( a: __m512, k: __mmask16, @@ -8528,7 +8528,7 @@ pub fn _mm512_mask_fmadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fmadd_round_ps( k: __mmask16, a: __m512, @@ -8555,7 +8555,7 @@ pub fn _mm512_maskz_fmadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fmadd_round_ps( a: __m512, b: __m512, @@ -8582,7 +8582,7 @@ pub fn _mm512_mask3_fmadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fmadd_round_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -8604,7 +8604,7 @@ pub fn _mm512_fmadd_round_pd(a: __m512d, b: __m512d, c: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fmadd_round_pd( a: __m512d, k: __mmask8, @@ -8631,7 +8631,7 @@ pub fn _mm512_mask_fmadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fmadd_round_pd( k: __mmask8, a: __m512d, @@ -8658,7 +8658,7 @@ pub fn _mm512_maskz_fmadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fmadd_round_pd( a: __m512d, b: __m512d, @@ -8685,7 +8685,7 @@ pub fn _mm512_mask3_fmadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fmsub_round_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -8707,7 +8707,7 @@ pub fn _mm512_fmsub_round_ps(a: __m512, b: __m512, c: __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fmsub_round_ps( a: __m512, k: __mmask16, @@ -8735,7 +8735,7 @@ pub fn _mm512_mask_fmsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fmsub_round_ps( k: __mmask16, a: __m512, @@ -8763,7 +8763,7 @@ pub fn _mm512_maskz_fmsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fmsub_round_ps( a: __m512, b: __m512, @@ -8791,7 +8791,7 @@ pub fn _mm512_mask3_fmsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fmsub_round_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -8813,7 +8813,7 @@ pub fn _mm512_fmsub_round_pd(a: __m512d, b: __m512d, c: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fmsub_round_pd( a: __m512d, k: __mmask8, @@ -8841,7 +8841,7 @@ pub fn _mm512_mask_fmsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fmsub_round_pd( k: __mmask8, a: __m512d, @@ -8869,7 +8869,7 @@ pub fn _mm512_maskz_fmsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fmsub_round_pd( a: __m512d, b: __m512d, @@ -8897,7 +8897,7 @@ pub fn _mm512_mask3_fmsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fmaddsub_round_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -8919,7 +8919,7 @@ pub fn _mm512_fmaddsub_round_ps(a: __m512, b: __m512, c: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fmaddsub_round_ps( a: __m512, k: __mmask16, @@ -8946,7 +8946,7 @@ pub fn _mm512_mask_fmaddsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fmaddsub_round_ps( k: __mmask16, a: __m512, @@ -8973,7 +8973,7 @@ pub fn _mm512_maskz_fmaddsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fmaddsub_round_ps( a: __m512, b: __m512, @@ -9000,7 +9000,7 @@ pub fn _mm512_mask3_fmaddsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fmaddsub_round_pd( a: __m512d, b: __m512d, @@ -9026,7 +9026,7 @@ pub fn _mm512_fmaddsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fmaddsub_round_pd( a: __m512d, k: __mmask8, @@ -9053,7 +9053,7 @@ pub fn _mm512_mask_fmaddsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fmaddsub_round_pd( k: __mmask8, a: __m512d, @@ -9080,7 +9080,7 @@ pub fn _mm512_maskz_fmaddsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fmaddsub_round_pd( a: __m512d, b: __m512d, @@ -9107,7 +9107,7 @@ pub fn _mm512_mask3_fmaddsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fmsubadd_round_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -9129,7 +9129,7 @@ pub fn _mm512_fmsubadd_round_ps(a: __m512, b: __m512, c: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fmsubadd_round_ps( a: __m512, k: __mmask16, @@ -9157,7 +9157,7 @@ pub fn _mm512_mask_fmsubadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fmsubadd_round_ps( k: __mmask16, a: __m512, @@ -9185,7 +9185,7 @@ pub fn _mm512_maskz_fmsubadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fmsubadd_round_ps( a: __m512, b: __m512, @@ -9213,7 +9213,7 @@ pub fn _mm512_mask3_fmsubadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fmsubadd_round_pd( a: __m512d, b: __m512d, @@ -9239,7 +9239,7 @@ pub fn _mm512_fmsubadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fmsubadd_round_pd( a: __m512d, k: __mmask8, @@ -9267,7 +9267,7 @@ pub fn _mm512_mask_fmsubadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fmsubadd_round_pd( k: __mmask8, a: __m512d, @@ -9295,7 +9295,7 @@ pub fn _mm512_maskz_fmsubadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fmsubadd_round_pd( a: __m512d, b: __m512d, @@ -9323,7 +9323,7 @@ pub fn _mm512_mask3_fmsubadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fnmadd_round_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -9345,7 +9345,7 @@ pub fn _mm512_fnmadd_round_ps(a: __m512, b: __m512, c: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fnmadd_round_ps( a: __m512, k: __mmask16, @@ -9373,7 +9373,7 @@ pub fn _mm512_mask_fnmadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fnmadd_round_ps( k: __mmask16, a: __m512, @@ -9401,7 +9401,7 @@ pub fn _mm512_maskz_fnmadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fnmadd_round_ps( a: __m512, b: __m512, @@ -9429,7 +9429,7 @@ pub fn _mm512_mask3_fnmadd_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fnmadd_round_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -9451,7 +9451,7 @@ pub fn _mm512_fnmadd_round_pd(a: __m512d, b: __m512d, c: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fnmadd_round_pd( a: __m512d, k: __mmask8, @@ -9479,7 +9479,7 @@ pub fn _mm512_mask_fnmadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fnmadd_round_pd( k: __mmask8, a: __m512d, @@ -9507,7 +9507,7 @@ pub fn _mm512_maskz_fnmadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fnmadd_round_pd( a: __m512d, b: __m512d, @@ -9535,7 +9535,7 @@ pub fn _mm512_mask3_fnmadd_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fnmsub_round_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -9557,7 +9557,7 @@ pub fn _mm512_fnmsub_round_ps(a: __m512, b: __m512, c: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fnmsub_round_ps( a: __m512, k: __mmask16, @@ -9585,7 +9585,7 @@ pub fn _mm512_mask_fnmsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fnmsub_round_ps( k: __mmask16, a: __m512, @@ -9613,7 +9613,7 @@ pub fn _mm512_maskz_fnmsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fnmsub_round_ps( a: __m512, b: __m512, @@ -9641,7 +9641,7 @@ pub fn _mm512_mask3_fnmsub_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -#[rustc_legacy_const_generics(3)] + pub fn _mm512_fnmsub_round_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -9663,7 +9663,7 @@ pub fn _mm512_fnmsub_round_pd(a: __m512d, b: __m512d, c: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_fnmsub_round_pd( a: __m512d, k: __mmask8, @@ -9691,7 +9691,7 @@ pub fn _mm512_mask_fnmsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_maskz_fnmsub_round_pd( k: __mmask8, a: __m512d, @@ -9719,7 +9719,7 @@ pub fn _mm512_maskz_fnmsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask3_fnmsub_round_pd( a: __m512d, b: __m512d, @@ -9741,7 +9741,7 @@ pub fn _mm512_mask3_fnmsub_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxps, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_max_round_ps(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -9760,7 +9760,7 @@ pub fn _mm512_max_round_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxps, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_max_round_ps( src: __m512, k: __mmask16, @@ -9784,7 +9784,7 @@ pub fn _mm512_mask_max_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxps, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_max_round_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -9803,7 +9803,7 @@ pub fn _mm512_maskz_max_round_ps(k: __mmask16, a: __m512, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_max_round_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -9822,7 +9822,7 @@ pub fn _mm512_max_round_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_max_round_pd( src: __m512d, k: __mmask8, @@ -9846,7 +9846,7 @@ pub fn _mm512_mask_max_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_max_round_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -9865,7 +9865,7 @@ pub fn _mm512_maskz_max_round_pd(k: __mmask8, a: __m512d, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminps, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_min_round_ps(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -9884,7 +9884,7 @@ pub fn _mm512_min_round_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminps, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_min_round_ps( src: __m512, k: __mmask16, @@ -9908,7 +9908,7 @@ pub fn _mm512_mask_min_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminps, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_min_round_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -9927,7 +9927,7 @@ pub fn _mm512_maskz_min_round_ps(k: __mmask16, a: __m512, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminpd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_min_round_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -9946,7 +9946,7 @@ pub fn _mm512_min_round_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminpd, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_min_round_pd( src: __m512d, k: __mmask8, @@ -9970,7 +9970,7 @@ pub fn _mm512_mask_min_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminpd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_min_round_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -9989,7 +9989,7 @@ pub fn _mm512_maskz_min_round_pd(k: __mmask8, a: __m512d, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_getexp_round_ps(a: __m512) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -10007,7 +10007,7 @@ pub fn _mm512_getexp_round_ps(a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_getexp_round_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -10026,7 +10026,7 @@ pub fn _mm512_mask_getexp_round_ps(src: __m512, k: __mmask16, a: #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_getexp_round_ps(k: __mmask16, a: __m512) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -10044,7 +10044,7 @@ pub fn _mm512_maskz_getexp_round_ps(k: __mmask16, a: __m512) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_getexp_round_pd(a: __m512d) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -10062,7 +10062,7 @@ pub fn _mm512_getexp_round_pd(a: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_getexp_round_pd( src: __m512d, k: __mmask8, @@ -10085,7 +10085,7 @@ pub fn _mm512_mask_getexp_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_getexp_round_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -10109,7 +10109,7 @@ pub fn _mm512_maskz_getexp_round_pd(k: __mmask8, a: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(1, 2)] + pub fn _mm512_roundscale_round_ps(a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -10134,7 +10134,7 @@ pub fn _mm512_roundscale_round_ps(a: __m512) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm512_mask_roundscale_round_ps( src: __m512, k: __mmask16, @@ -10164,7 +10164,7 @@ pub fn _mm512_mask_roundscale_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm512_maskz_roundscale_round_ps( k: __mmask16, a: __m512, @@ -10192,7 +10192,7 @@ pub fn _mm512_maskz_roundscale_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(1, 2)] + pub fn _mm512_roundscale_round_pd(a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -10217,7 +10217,7 @@ pub fn _mm512_roundscale_round_pd(a: __m512d) - #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm512_mask_roundscale_round_pd( src: __m512d, k: __mmask8, @@ -10247,7 +10247,7 @@ pub fn _mm512_mask_roundscale_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm512_maskz_roundscale_round_pd( k: __mmask8, a: __m512d, @@ -10275,7 +10275,7 @@ pub fn _mm512_maskz_roundscale_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_scalef_round_ps(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -10300,7 +10300,7 @@ pub fn _mm512_scalef_round_ps(a: __m512, b: __m512) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_scalef_round_ps( src: __m512, k: __mmask16, @@ -10331,7 +10331,7 @@ pub fn _mm512_mask_scalef_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_scalef_round_ps( k: __mmask16, a: __m512, @@ -10360,7 +10360,7 @@ pub fn _mm512_maskz_scalef_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_scalef_round_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_rounding!(ROUNDING); @@ -10385,7 +10385,7 @@ pub fn _mm512_scalef_round_pd(a: __m512d, b: __m512d) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_scalef_round_pd( src: __m512d, k: __mmask8, @@ -10416,7 +10416,7 @@ pub fn _mm512_mask_scalef_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_scalef_round_pd( k: __mmask8, a: __m512d, @@ -10439,7 +10439,7 @@ pub fn _mm512_maskz_scalef_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm512_fixupimm_round_ps( a: __m512, b: __m512, @@ -10464,7 +10464,7 @@ pub fn _mm512_fixupimm_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm512_mask_fixupimm_round_ps( a: __m512, k: __mmask16, @@ -10490,7 +10490,7 @@ pub fn _mm512_mask_fixupimm_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm512_maskz_fixupimm_round_ps( k: __mmask16, a: __m512, @@ -10516,7 +10516,7 @@ pub fn _mm512_maskz_fixupimm_round_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm512_fixupimm_round_pd( a: __m512d, b: __m512d, @@ -10541,7 +10541,7 @@ pub fn _mm512_fixupimm_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm512_mask_fixupimm_round_pd( a: __m512d, k: __mmask8, @@ -10567,7 +10567,7 @@ pub fn _mm512_mask_fixupimm_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm512_maskz_fixupimm_round_pd( k: __mmask8, a: __m512d, @@ -10602,7 +10602,7 @@ pub fn _mm512_maskz_fixupimm_round_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(1, 2, 3)] + pub fn _mm512_getmant_round_ps< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -10637,7 +10637,7 @@ pub fn _mm512_getmant_round_ps< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(3, 4, 5)] + pub fn _mm512_mask_getmant_round_ps< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -10675,7 +10675,7 @@ pub fn _mm512_mask_getmant_round_ps< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(2, 3, 4)] + pub fn _mm512_maskz_getmant_round_ps< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -10711,7 +10711,7 @@ pub fn _mm512_maskz_getmant_round_ps< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(1, 2, 3)] + pub fn _mm512_getmant_round_pd< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -10746,7 +10746,7 @@ pub fn _mm512_getmant_round_pd< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(3, 4, 5)] + pub fn _mm512_mask_getmant_round_pd< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -10784,7 +10784,7 @@ pub fn _mm512_mask_getmant_round_pd< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(2, 3, 4)] + pub fn _mm512_maskz_getmant_round_pd< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -14467,7 +14467,7 @@ pub fn _mm_maskz_cvtusepi64_epi8(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundps_epi32(a: __m512) -> __m512i { unsafe { static_assert_rounding!(ROUNDING); @@ -14491,7 +14491,7 @@ pub fn _mm512_cvt_roundps_epi32(a: __m512) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundps_epi32( src: __m512i, k: __mmask16, @@ -14520,7 +14520,7 @@ pub fn _mm512_mask_cvt_roundps_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundps_epi32(k: __mmask16, a: __m512) -> __m512i { unsafe { static_assert_rounding!(ROUNDING); @@ -14544,7 +14544,7 @@ pub fn _mm512_maskz_cvt_roundps_epi32(k: __mmask16, a: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundps_epu32(a: __m512) -> __m512i { unsafe { static_assert_rounding!(ROUNDING); @@ -14568,7 +14568,7 @@ pub fn _mm512_cvt_roundps_epu32(a: __m512) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundps_epu32( src: __m512i, k: __mmask16, @@ -14597,7 +14597,7 @@ pub fn _mm512_mask_cvt_roundps_epu32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundps_epu32(k: __mmask16, a: __m512) -> __m512i { unsafe { static_assert_rounding!(ROUNDING); @@ -14615,7 +14615,7 @@ pub fn _mm512_maskz_cvt_roundps_epu32(k: __mmask16, a: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundps_pd(a: __m256) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -14633,7 +14633,7 @@ pub fn _mm512_cvt_roundps_pd(a: __m256) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundps_pd(src: __m512d, k: __mmask8, a: __m256) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -14652,7 +14652,7 @@ pub fn _mm512_mask_cvt_roundps_pd(src: __m512d, k: __mmask8, a: #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundps_pd(k: __mmask8, a: __m256) -> __m512d { unsafe { static_assert_sae!(SAE); @@ -14676,7 +14676,7 @@ pub fn _mm512_maskz_cvt_roundps_pd(k: __mmask8, a: __m256) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundpd_epi32(a: __m512d) -> __m256i { unsafe { static_assert_rounding!(ROUNDING); @@ -14700,7 +14700,7 @@ pub fn _mm512_cvt_roundpd_epi32(a: __m512d) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundpd_epi32( src: __m256i, k: __mmask8, @@ -14729,7 +14729,7 @@ pub fn _mm512_mask_cvt_roundpd_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundpd_epi32(k: __mmask8, a: __m512d) -> __m256i { unsafe { static_assert_rounding!(ROUNDING); @@ -14753,7 +14753,7 @@ pub fn _mm512_maskz_cvt_roundpd_epi32(k: __mmask8, a: __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundpd_epu32(a: __m512d) -> __m256i { unsafe { static_assert_rounding!(ROUNDING); @@ -14777,7 +14777,7 @@ pub fn _mm512_cvt_roundpd_epu32(a: __m512d) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundpd_epu32( src: __m256i, k: __mmask8, @@ -14806,7 +14806,7 @@ pub fn _mm512_mask_cvt_roundpd_epu32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundpd_epu32(k: __mmask8, a: __m512d) -> __m256i { unsafe { static_assert_rounding!(ROUNDING); @@ -14830,7 +14830,7 @@ pub fn _mm512_maskz_cvt_roundpd_epu32(k: __mmask8, a: __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundpd_ps(a: __m512d) -> __m256 { unsafe { static_assert_rounding!(ROUNDING); @@ -14854,7 +14854,7 @@ pub fn _mm512_cvt_roundpd_ps(a: __m512d) -> __m256 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundpd_ps( src: __m256, k: __mmask8, @@ -14883,7 +14883,7 @@ pub fn _mm512_mask_cvt_roundpd_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundpd_ps(k: __mmask8, a: __m512d) -> __m256 { unsafe { static_assert_rounding!(ROUNDING); @@ -14907,7 +14907,7 @@ pub fn _mm512_maskz_cvt_roundpd_ps(k: __mmask8, a: __m512d) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundepi32_ps(a: __m512i) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -14931,7 +14931,7 @@ pub fn _mm512_cvt_roundepi32_ps(a: __m512i) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundepi32_ps( src: __m512, k: __mmask16, @@ -14959,7 +14959,7 @@ pub fn _mm512_mask_cvt_roundepi32_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundepi32_ps(k: __mmask16, a: __m512i) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -14983,7 +14983,7 @@ pub fn _mm512_maskz_cvt_roundepi32_ps(k: __mmask16, a: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundepu32_ps(a: __m512i) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -15007,7 +15007,7 @@ pub fn _mm512_cvt_roundepu32_ps(a: __m512i) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundepu32_ps( src: __m512, k: __mmask16, @@ -15035,7 +15035,7 @@ pub fn _mm512_mask_cvt_roundepu32_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundepu32_ps(k: __mmask16, a: __m512i) -> __m512 { unsafe { static_assert_rounding!(ROUNDING); @@ -15063,7 +15063,7 @@ pub fn _mm512_maskz_cvt_roundepu32_ps(k: __mmask16, a: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundps_ph(a: __m512) -> __m256i { unsafe { static_assert_extended_rounding!(ROUNDING); @@ -15091,7 +15091,7 @@ pub fn _mm512_cvt_roundps_ph(a: __m512) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundps_ph( src: __m256i, k: __mmask16, @@ -15124,7 +15124,7 @@ pub fn _mm512_mask_cvt_roundps_ph( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundps_ph(k: __mmask16, a: __m512) -> __m256i { unsafe { static_assert_extended_rounding!(ROUNDING); @@ -15147,7 +15147,7 @@ pub fn _mm512_maskz_cvt_roundps_ph(k: __mmask16, a: __m512) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_cvt_roundps_ph( src: __m128i, k: __mmask8, @@ -15175,7 +15175,7 @@ pub fn _mm256_mask_cvt_roundps_ph( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_cvt_roundps_ph(k: __mmask8, a: __m256) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -15198,7 +15198,7 @@ pub fn _mm256_maskz_cvt_roundps_ph(k: __mmask8, a: __m256) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_cvt_roundps_ph(src: __m128i, k: __mmask8, a: __m128) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -15222,7 +15222,7 @@ pub fn _mm_mask_cvt_roundps_ph(src: __m128i, k: __mmask8, a: __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_cvt_roundps_ph(k: __mmask8, a: __m128) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -15250,7 +15250,7 @@ pub fn _mm_maskz_cvt_roundps_ph(k: __mmask8, a: __m128) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvtps_ph(a: __m512) -> __m256i { unsafe { static_assert_extended_rounding!(ROUNDING); @@ -15278,7 +15278,7 @@ pub fn _mm512_cvtps_ph(a: __m512) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvtps_ph(src: __m256i, k: __mmask16, a: __m512) -> __m256i { unsafe { static_assert_extended_rounding!(ROUNDING); @@ -15307,7 +15307,7 @@ pub fn _mm512_mask_cvtps_ph(src: __m256i, k: __mmask16, a: #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvtps_ph(k: __mmask16, a: __m512) -> __m256i { unsafe { static_assert_extended_rounding!(ROUNDING); @@ -15330,7 +15330,7 @@ pub fn _mm512_maskz_cvtps_ph(k: __mmask16, a: __m512) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_cvtps_ph(src: __m128i, k: __mmask8, a: __m256) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -15354,7 +15354,7 @@ pub fn _mm256_mask_cvtps_ph(src: __m128i, k: __mmask8, a: __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_cvtps_ph(k: __mmask8, a: __m256) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -15377,7 +15377,7 @@ pub fn _mm256_maskz_cvtps_ph(k: __mmask8, a: __m256) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_cvtps_ph(src: __m128i, k: __mmask8, a: __m128) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -15401,7 +15401,7 @@ pub fn _mm_mask_cvtps_ph(src: __m128i, k: __mmask8, a: __m128) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_cvtps_ph(k: __mmask8, a: __m128) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -15419,7 +15419,7 @@ pub fn _mm_maskz_cvtps_ph(k: __mmask8, a: __m128) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvt_roundph_ps(a: __m256i) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -15437,7 +15437,7 @@ pub fn _mm512_cvt_roundph_ps(a: __m256i) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvt_roundph_ps(src: __m512, k: __mmask16, a: __m256i) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -15456,7 +15456,7 @@ pub fn _mm512_mask_cvt_roundph_ps(src: __m512, k: __mmask16, a: #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvt_roundph_ps(k: __mmask16, a: __m256i) -> __m512 { unsafe { static_assert_sae!(SAE); @@ -15577,7 +15577,7 @@ pub fn _mm_maskz_cvtph_ps(k: __mmask8, a: __m128i) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvtt_roundps_epi32(a: __m512) -> __m512i { unsafe { static_assert_sae!(SAE); @@ -15595,7 +15595,7 @@ pub fn _mm512_cvtt_roundps_epi32(a: __m512) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvtt_roundps_epi32( src: __m512i, k: __mmask16, @@ -15618,7 +15618,7 @@ pub fn _mm512_mask_cvtt_roundps_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvtt_roundps_epi32(k: __mmask16, a: __m512) -> __m512i { unsafe { static_assert_sae!(SAE); @@ -15636,7 +15636,7 @@ pub fn _mm512_maskz_cvtt_roundps_epi32(k: __mmask16, a: __m512) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvtt_roundps_epu32(a: __m512) -> __m512i { unsafe { static_assert_sae!(SAE); @@ -15654,7 +15654,7 @@ pub fn _mm512_cvtt_roundps_epu32(a: __m512) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvtt_roundps_epu32( src: __m512i, k: __mmask16, @@ -15677,7 +15677,7 @@ pub fn _mm512_mask_cvtt_roundps_epu32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvtt_roundps_epu32(k: __mmask16, a: __m512) -> __m512i { unsafe { static_assert_sae!(SAE); @@ -15695,7 +15695,7 @@ pub fn _mm512_maskz_cvtt_roundps_epu32(k: __mmask16, a: __m512) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvtt_roundpd_epi32(a: __m512d) -> __m256i { unsafe { static_assert_sae!(SAE); @@ -15713,7 +15713,7 @@ pub fn _mm512_cvtt_roundpd_epi32(a: __m512d) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvtt_roundpd_epi32( src: __m256i, k: __mmask8, @@ -15736,7 +15736,7 @@ pub fn _mm512_mask_cvtt_roundpd_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvtt_roundpd_epi32(k: __mmask8, a: __m512d) -> __m256i { unsafe { static_assert_sae!(SAE); @@ -15754,7 +15754,7 @@ pub fn _mm512_maskz_cvtt_roundpd_epi32(k: __mmask8, a: __m512d) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_cvtt_roundpd_epu32(a: __m512d) -> __m256i { unsafe { static_assert_sae!(SAE); @@ -15772,7 +15772,7 @@ pub fn _mm512_cvtt_roundpd_epu32(a: __m512d) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_cvtt_roundpd_epu32( src: __m256i, k: __mmask8, @@ -16013,7 +16013,7 @@ pub fn _mm_maskz_cvttps_epu32(k: __mmask8, a: __m128) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_cvtt_roundpd_epu32(k: __mmask8, a: __m512d) -> __m256i { unsafe { static_assert_sae!(SAE); @@ -16574,7 +16574,7 @@ pub fn _mm512_setr_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + pub unsafe fn _mm512_i32gather_pd( offsets: __m256i, slice: *const f64, @@ -16595,7 +16595,7 @@ pub unsafe fn _mm512_i32gather_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i32gather_pd( src: __m512d, mask: __mmask8, @@ -16617,7 +16617,7 @@ pub unsafe fn _mm512_mask_i32gather_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + pub unsafe fn _mm512_i64gather_pd( offsets: __m512i, slice: *const f64, @@ -16638,7 +16638,7 @@ pub unsafe fn _mm512_i64gather_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i64gather_pd( src: __m512d, mask: __mmask8, @@ -16660,7 +16660,7 @@ pub unsafe fn _mm512_mask_i64gather_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + pub unsafe fn _mm512_i64gather_ps(offsets: __m512i, slice: *const f32) -> __m256 { static_assert_imm8_scale!(SCALE); let zero = f32x8::ZERO; @@ -16678,7 +16678,7 @@ pub unsafe fn _mm512_i64gather_ps(offsets: __m512i, slice: *co #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i64gather_ps( src: __m256, mask: __mmask8, @@ -16700,7 +16700,7 @@ pub unsafe fn _mm512_mask_i64gather_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + pub unsafe fn _mm512_i32gather_ps(offsets: __m512i, slice: *const f32) -> __m512 { static_assert_imm8_scale!(SCALE); let zero = f32x16::ZERO; @@ -16718,7 +16718,7 @@ pub unsafe fn _mm512_i32gather_ps(offsets: __m512i, slice: *co #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i32gather_ps( src: __m512, mask: __mmask16, @@ -16740,7 +16740,7 @@ pub unsafe fn _mm512_mask_i32gather_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + pub unsafe fn _mm512_i32gather_epi32( offsets: __m512i, slice: *const i32, @@ -16761,7 +16761,7 @@ pub unsafe fn _mm512_i32gather_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i32gather_epi32( src: __m512i, mask: __mmask16, @@ -16784,7 +16784,7 @@ pub unsafe fn _mm512_mask_i32gather_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + pub unsafe fn _mm512_i32gather_epi64( offsets: __m256i, slice: *const i64, @@ -16805,7 +16805,7 @@ pub unsafe fn _mm512_i32gather_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i32gather_epi64( src: __m512i, mask: __mmask8, @@ -16828,7 +16828,7 @@ pub unsafe fn _mm512_mask_i32gather_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + pub unsafe fn _mm512_i64gather_epi64( offsets: __m512i, slice: *const i64, @@ -16849,7 +16849,7 @@ pub unsafe fn _mm512_i64gather_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i64gather_epi64( src: __m512i, mask: __mmask8, @@ -16872,7 +16872,7 @@ pub unsafe fn _mm512_mask_i64gather_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + pub unsafe fn _mm512_i64gather_epi32( offsets: __m512i, slice: *const i32, @@ -16893,7 +16893,7 @@ pub unsafe fn _mm512_i64gather_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i64gather_epi32( src: __m256i, mask: __mmask8, @@ -16916,7 +16916,7 @@ pub unsafe fn _mm512_mask_i64gather_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm512_i32scatter_pd( slice: *mut f64, offsets: __m256i, @@ -16937,7 +16937,7 @@ pub unsafe fn _mm512_i32scatter_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i32scatter_pd( slice: *mut f64, mask: __mmask8, @@ -16958,7 +16958,7 @@ pub unsafe fn _mm512_mask_i32scatter_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm512_i64scatter_pd( slice: *mut f64, offsets: __m512i, @@ -16979,7 +16979,7 @@ pub unsafe fn _mm512_i64scatter_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i64scatter_pd( slice: *mut f64, mask: __mmask8, @@ -17000,7 +17000,7 @@ pub unsafe fn _mm512_mask_i64scatter_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm512_i32scatter_ps( slice: *mut f32, offsets: __m512i, @@ -17021,7 +17021,7 @@ pub unsafe fn _mm512_i32scatter_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i32scatter_ps( slice: *mut f32, mask: __mmask16, @@ -17042,7 +17042,7 @@ pub unsafe fn _mm512_mask_i32scatter_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm512_i64scatter_ps( slice: *mut f32, offsets: __m512i, @@ -17063,7 +17063,7 @@ pub unsafe fn _mm512_i64scatter_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i64scatter_ps( slice: *mut f32, mask: __mmask8, @@ -17084,7 +17084,7 @@ pub unsafe fn _mm512_mask_i64scatter_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm512_i32scatter_epi64( slice: *mut i64, offsets: __m256i, @@ -17105,7 +17105,7 @@ pub unsafe fn _mm512_i32scatter_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i32scatter_epi64( slice: *mut i64, mask: __mmask8, @@ -17127,7 +17127,7 @@ pub unsafe fn _mm512_mask_i32scatter_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm512_i64scatter_epi64( slice: *mut i64, offsets: __m512i, @@ -17148,7 +17148,7 @@ pub unsafe fn _mm512_i64scatter_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i64scatter_epi64( slice: *mut i64, mask: __mmask8, @@ -17170,7 +17170,7 @@ pub unsafe fn _mm512_mask_i64scatter_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm512_i32scatter_epi32( slice: *mut i32, offsets: __m512i, @@ -17191,7 +17191,7 @@ pub unsafe fn _mm512_i32scatter_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i32scatter_epi32( slice: *mut i32, mask: __mmask16, @@ -17213,7 +17213,7 @@ pub unsafe fn _mm512_mask_i32scatter_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm512_i64scatter_epi32( slice: *mut i32, offsets: __m512i, @@ -17234,7 +17234,7 @@ pub unsafe fn _mm512_i64scatter_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + pub unsafe fn _mm512_mask_i64scatter_epi32( slice: *mut i32, mask: __mmask8, @@ -17256,7 +17256,7 @@ pub unsafe fn _mm512_mask_i64scatter_epi32( #[inline] #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm512_i32logather_epi64( vindex: __m512i, @@ -17273,7 +17273,7 @@ pub unsafe fn _mm512_i32logather_epi64( #[inline] #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm512_mask_i32logather_epi64( src: __m512i, @@ -17291,7 +17291,7 @@ pub unsafe fn _mm512_mask_i32logather_epi64( #[inline] #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))] -#[rustc_legacy_const_generics(2)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm512_i32logather_pd( vindex: __m512i, @@ -17308,7 +17308,7 @@ pub unsafe fn _mm512_i32logather_pd( #[inline] #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm512_mask_i32logather_pd( src: __m512d, @@ -17326,7 +17326,7 @@ pub unsafe fn _mm512_mask_i32logather_pd( #[inline] #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm512_i32loscatter_epi64( base_addr: *mut i64, @@ -17344,7 +17344,7 @@ pub unsafe fn _mm512_i32loscatter_epi64( #[inline] #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm512_mask_i32loscatter_epi64( base_addr: *mut i64, @@ -17362,7 +17362,7 @@ pub unsafe fn _mm512_mask_i32loscatter_epi64( #[inline] #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm512_i32loscatter_pd( base_addr: *mut f64, @@ -17380,7 +17380,7 @@ pub unsafe fn _mm512_i32loscatter_pd( #[inline] #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm512_mask_i32loscatter_pd( base_addr: *mut f64, @@ -17398,7 +17398,7 @@ pub unsafe fn _mm512_mask_i32loscatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_i32scatter_epi32( base_addr: *mut i32, @@ -17417,7 +17417,7 @@ pub unsafe fn _mm256_i32scatter_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mask_i32scatter_epi32( base_addr: *mut i32, @@ -17436,7 +17436,7 @@ pub unsafe fn _mm256_mask_i32scatter_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + pub unsafe fn _mm256_i32scatter_epi64( slice: *mut i64, offsets: __m128i, @@ -17457,7 +17457,7 @@ pub unsafe fn _mm256_i32scatter_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mask_i32scatter_epi64( base_addr: *mut i64, @@ -17476,7 +17476,7 @@ pub unsafe fn _mm256_mask_i32scatter_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_i32scatter_pd( base_addr: *mut f64, @@ -17495,7 +17495,7 @@ pub unsafe fn _mm256_i32scatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mask_i32scatter_pd( base_addr: *mut f64, @@ -17514,7 +17514,7 @@ pub unsafe fn _mm256_mask_i32scatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_i32scatter_ps( base_addr: *mut f32, @@ -17533,7 +17533,7 @@ pub unsafe fn _mm256_i32scatter_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mask_i32scatter_ps( base_addr: *mut f32, @@ -17552,7 +17552,7 @@ pub unsafe fn _mm256_mask_i32scatter_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_i64scatter_epi32( base_addr: *mut i32, @@ -17571,7 +17571,7 @@ pub unsafe fn _mm256_i64scatter_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mask_i64scatter_epi32( base_addr: *mut i32, @@ -17590,7 +17590,7 @@ pub unsafe fn _mm256_mask_i64scatter_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_i64scatter_epi64( base_addr: *mut i64, @@ -17609,7 +17609,7 @@ pub unsafe fn _mm256_i64scatter_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mask_i64scatter_epi64( base_addr: *mut i64, @@ -17628,7 +17628,7 @@ pub unsafe fn _mm256_mask_i64scatter_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_i64scatter_pd( base_addr: *mut f64, @@ -17647,7 +17647,7 @@ pub unsafe fn _mm256_i64scatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mask_i64scatter_pd( base_addr: *mut f64, @@ -17666,7 +17666,7 @@ pub unsafe fn _mm256_mask_i64scatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_i64scatter_ps( base_addr: *mut f32, @@ -17685,7 +17685,7 @@ pub unsafe fn _mm256_i64scatter_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mask_i64scatter_ps( base_addr: *mut f32, @@ -17705,7 +17705,7 @@ pub unsafe fn _mm256_mask_i64scatter_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mmask_i32gather_epi32( src: __m256i, @@ -17731,7 +17731,7 @@ pub unsafe fn _mm256_mmask_i32gather_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mmask_i32gather_epi64( src: __m256i, @@ -17757,7 +17757,7 @@ pub unsafe fn _mm256_mmask_i32gather_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mmask_i32gather_pd( src: __m256d, @@ -17783,7 +17783,7 @@ pub unsafe fn _mm256_mmask_i32gather_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mmask_i32gather_ps( src: __m256, @@ -17809,7 +17809,7 @@ pub unsafe fn _mm256_mmask_i32gather_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mmask_i64gather_epi32( src: __m128i, @@ -17835,7 +17835,7 @@ pub unsafe fn _mm256_mmask_i64gather_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mmask_i64gather_epi64( src: __m256i, @@ -17861,7 +17861,7 @@ pub unsafe fn _mm256_mmask_i64gather_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mmask_i64gather_pd( src: __m256d, @@ -17887,7 +17887,7 @@ pub unsafe fn _mm256_mmask_i64gather_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm256_mmask_i64gather_ps( src: __m128, @@ -17912,7 +17912,7 @@ pub unsafe fn _mm256_mmask_i64gather_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_i32scatter_epi32( base_addr: *mut i32, @@ -17931,7 +17931,7 @@ pub unsafe fn _mm_i32scatter_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mask_i32scatter_epi32( base_addr: *mut i32, @@ -17950,7 +17950,7 @@ pub unsafe fn _mm_mask_i32scatter_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_i32scatter_epi64( base_addr: *mut i64, @@ -17969,7 +17969,7 @@ pub unsafe fn _mm_i32scatter_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mask_i32scatter_epi64( base_addr: *mut i64, @@ -17988,7 +17988,7 @@ pub unsafe fn _mm_mask_i32scatter_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_i32scatter_pd( base_addr: *mut f64, @@ -18007,7 +18007,7 @@ pub unsafe fn _mm_i32scatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mask_i32scatter_pd( base_addr: *mut f64, @@ -18026,7 +18026,7 @@ pub unsafe fn _mm_mask_i32scatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_i32scatter_ps(base_addr: *mut f32, vindex: __m128i, a: __m128) { static_assert_imm8_scale!(SCALE); @@ -18041,7 +18041,7 @@ pub unsafe fn _mm_i32scatter_ps(base_addr: *mut f32, vindex: _ #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mask_i32scatter_ps( base_addr: *mut f32, @@ -18060,7 +18060,7 @@ pub unsafe fn _mm_mask_i32scatter_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_i64scatter_epi32( base_addr: *mut i32, @@ -18079,7 +18079,7 @@ pub unsafe fn _mm_i64scatter_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mask_i64scatter_epi32( base_addr: *mut i32, @@ -18098,7 +18098,7 @@ pub unsafe fn _mm_mask_i64scatter_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_i64scatter_epi64( base_addr: *mut i64, @@ -18117,7 +18117,7 @@ pub unsafe fn _mm_i64scatter_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mask_i64scatter_epi64( base_addr: *mut i64, @@ -18136,7 +18136,7 @@ pub unsafe fn _mm_mask_i64scatter_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_i64scatter_pd( base_addr: *mut f64, @@ -18155,7 +18155,7 @@ pub unsafe fn _mm_i64scatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mask_i64scatter_pd( base_addr: *mut f64, @@ -18174,7 +18174,7 @@ pub unsafe fn _mm_mask_i64scatter_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))] -#[rustc_legacy_const_generics(3)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_i64scatter_ps(base_addr: *mut f32, vindex: __m128i, a: __m128) { static_assert_imm8_scale!(SCALE); @@ -18188,7 +18188,7 @@ pub unsafe fn _mm_i64scatter_ps(base_addr: *mut f32, vindex: _ #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mask_i64scatter_ps( base_addr: *mut f32, @@ -18208,7 +18208,7 @@ pub unsafe fn _mm_mask_i64scatter_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mmask_i32gather_epi32( src: __m128i, @@ -18234,7 +18234,7 @@ pub unsafe fn _mm_mmask_i32gather_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mmask_i32gather_epi64( src: __m128i, @@ -18260,7 +18260,7 @@ pub unsafe fn _mm_mmask_i32gather_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mmask_i32gather_pd( src: __m128d, @@ -18286,7 +18286,7 @@ pub unsafe fn _mm_mmask_i32gather_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mmask_i32gather_ps( src: __m128, @@ -18312,7 +18312,7 @@ pub unsafe fn _mm_mmask_i32gather_ps( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mmask_i64gather_epi32( src: __m128i, @@ -18338,7 +18338,7 @@ pub unsafe fn _mm_mmask_i64gather_epi32( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mmask_i64gather_epi64( src: __m128i, @@ -18364,7 +18364,7 @@ pub unsafe fn _mm_mmask_i64gather_epi64( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mmask_i64gather_pd( src: __m128d, @@ -18390,7 +18390,7 @@ pub unsafe fn _mm_mmask_i64gather_pd( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))] -#[rustc_legacy_const_generics(4)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub unsafe fn _mm_mmask_i64gather_ps( src: __m128, @@ -19075,7 +19075,7 @@ pub fn _mm_maskz_expand_pd(k: __mmask8, a: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_rol_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19092,7 +19092,7 @@ pub fn _mm512_rol_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_rol_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19109,7 +19109,7 @@ pub fn _mm512_mask_rol_epi32(src: __m512i, k: __mmask16, a: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19126,7 +19126,7 @@ pub fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_rol_epi32(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19143,7 +19143,7 @@ pub fn _mm256_rol_epi32(a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_rol_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19160,7 +19160,7 @@ pub fn _mm256_mask_rol_epi32(src: __m256i, k: __mmask8, a: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19177,7 +19177,7 @@ pub fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_rol_epi32(a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19194,7 +19194,7 @@ pub fn _mm_rol_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_rol_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19211,7 +19211,7 @@ pub fn _mm_mask_rol_epi32(src: __m128i, k: __mmask8, a: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19228,7 +19228,7 @@ pub fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_ror_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19245,7 +19245,7 @@ pub fn _mm512_ror_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_ror_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19262,7 +19262,7 @@ pub fn _mm512_mask_ror_epi32(src: __m512i, k: __mmask16, a: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19279,7 +19279,7 @@ pub fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_ror_epi32(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19296,7 +19296,7 @@ pub fn _mm256_ror_epi32(a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_ror_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19313,7 +19313,7 @@ pub fn _mm256_mask_ror_epi32(src: __m256i, k: __mmask8, a: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19330,7 +19330,7 @@ pub fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_ror_epi32(a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19347,7 +19347,7 @@ pub fn _mm_ror_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_ror_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19364,7 +19364,7 @@ pub fn _mm_mask_ror_epi32(src: __m128i, k: __mmask8, a: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19381,7 +19381,7 @@ pub fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_rol_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19398,7 +19398,7 @@ pub fn _mm512_rol_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_rol_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19415,7 +19415,7 @@ pub fn _mm512_mask_rol_epi64(src: __m512i, k: __mmask8, a: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19432,7 +19432,7 @@ pub fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) -> __m51 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_rol_epi64(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19449,7 +19449,7 @@ pub fn _mm256_rol_epi64(a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_rol_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19466,7 +19466,7 @@ pub fn _mm256_mask_rol_epi64(src: __m256i, k: __mmask8, a: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19483,7 +19483,7 @@ pub fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_rol_epi64(a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19500,7 +19500,7 @@ pub fn _mm_rol_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_rol_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19517,7 +19517,7 @@ pub fn _mm_mask_rol_epi64(src: __m128i, k: __mmask8, a: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19534,7 +19534,7 @@ pub fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_ror_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19551,7 +19551,7 @@ pub fn _mm512_ror_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_ror_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19568,7 +19568,7 @@ pub fn _mm512_mask_ror_epi64(src: __m512i, k: __mmask8, a: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19585,7 +19585,7 @@ pub fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) -> __m51 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_ror_epi64(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19602,7 +19602,7 @@ pub fn _mm256_ror_epi64(a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_ror_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19619,7 +19619,7 @@ pub fn _mm256_mask_ror_epi64(src: __m256i, k: __mmask8, a: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19636,7 +19636,7 @@ pub fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_ror_epi64(a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19653,7 +19653,7 @@ pub fn _mm_ror_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_ror_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19670,7 +19670,7 @@ pub fn _mm_mask_ror_epi64(src: __m128i, k: __mmask8, a: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19687,7 +19687,7 @@ pub fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_slli_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19706,7 +19706,7 @@ pub fn _mm512_slli_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_slli_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19726,7 +19726,7 @@ pub fn _mm512_mask_slli_epi32(src: __m512i, k: __mmask16, a: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_slli_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19746,7 +19746,7 @@ pub fn _mm512_maskz_slli_epi32(k: __mmask16, a: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_slli_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19766,7 +19766,7 @@ pub fn _mm256_mask_slli_epi32(src: __m256i, k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_slli_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19786,7 +19786,7 @@ pub fn _mm256_maskz_slli_epi32(k: __mmask8, a: __m256i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_slli_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19806,7 +19806,7 @@ pub fn _mm_mask_slli_epi32(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19826,7 +19826,7 @@ pub fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_srli_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19845,7 +19845,7 @@ pub fn _mm512_srli_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_srli_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19865,7 +19865,7 @@ pub fn _mm512_mask_srli_epi32(src: __m512i, k: __mmask16, a: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_srli_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19885,7 +19885,7 @@ pub fn _mm512_maskz_srli_epi32(k: __mmask16, a: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_srli_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19905,7 +19905,7 @@ pub fn _mm256_mask_srli_epi32(src: __m256i, k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_srli_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19925,7 +19925,7 @@ pub fn _mm256_maskz_srli_epi32(k: __mmask8, a: __m256i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_srli_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19945,7 +19945,7 @@ pub fn _mm_mask_srli_epi32(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19965,7 +19965,7 @@ pub fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_slli_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -19984,7 +19984,7 @@ pub fn _mm512_slli_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_slli_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20004,7 +20004,7 @@ pub fn _mm512_mask_slli_epi64(src: __m512i, k: __mmask8, a: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_slli_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20024,7 +20024,7 @@ pub fn _mm512_maskz_slli_epi64(k: __mmask8, a: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_slli_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20044,7 +20044,7 @@ pub fn _mm256_mask_slli_epi64(src: __m256i, k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_slli_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20064,7 +20064,7 @@ pub fn _mm256_maskz_slli_epi64(k: __mmask8, a: __m256i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_slli_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20084,7 +20084,7 @@ pub fn _mm_mask_slli_epi64(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20104,7 +20104,7 @@ pub fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_srli_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20123,7 +20123,7 @@ pub fn _mm512_srli_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_srli_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20143,7 +20143,7 @@ pub fn _mm512_mask_srli_epi64(src: __m512i, k: __mmask8, a: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_srli_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20163,7 +20163,7 @@ pub fn _mm512_maskz_srli_epi64(k: __mmask8, a: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_srli_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20183,7 +20183,7 @@ pub fn _mm256_mask_srli_epi64(src: __m256i, k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_srli_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20203,7 +20203,7 @@ pub fn _mm256_maskz_srli_epi64(k: __mmask8, a: __m256i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_srli_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20223,7 +20223,7 @@ pub fn _mm_mask_srli_epi64(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_srli_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20835,7 +20835,7 @@ pub fn _mm_maskz_sra_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_srai_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20850,7 +20850,7 @@ pub fn _mm512_srai_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_srai_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20866,7 +20866,7 @@ pub fn _mm512_mask_srai_epi32(src: __m512i, k: __mmask16, a: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_srai_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20882,7 +20882,7 @@ pub fn _mm512_maskz_srai_epi32(k: __mmask16, a: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_srai_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { let r = simd_shr(a.as_i32x8(), i32x8::splat(IMM8.min(31) as i32)); @@ -20897,7 +20897,7 @@ pub fn _mm256_mask_srai_epi32(src: __m256i, k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_srai_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { let r = simd_shr(a.as_i32x8(), i32x8::splat(IMM8.min(31) as i32)); @@ -20912,7 +20912,7 @@ pub fn _mm256_maskz_srai_epi32(k: __mmask8, a: __m256i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_srai_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let r = simd_shr(a.as_i32x4(), i32x4::splat(IMM8.min(31) as i32)); @@ -20927,7 +20927,7 @@ pub fn _mm_mask_srai_epi32(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_srai_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let r = simd_shr(a.as_i32x4(), i32x4::splat(IMM8.min(31) as i32)); @@ -20942,7 +20942,7 @@ pub fn _mm_maskz_srai_epi32(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_srai_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20957,7 +20957,7 @@ pub fn _mm512_srai_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_srai_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20973,7 +20973,7 @@ pub fn _mm512_mask_srai_epi64(src: __m512i, k: __mmask8, a: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -20989,7 +20989,7 @@ pub fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_srai_epi64(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -21004,7 +21004,7 @@ pub fn _mm256_srai_epi64(a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_srai_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -21020,7 +21020,7 @@ pub fn _mm256_mask_srai_epi64(src: __m256i, k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -21036,7 +21036,7 @@ pub fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_srai_epi64(a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -21051,7 +21051,7 @@ pub fn _mm_srai_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_srai_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -21067,7 +21067,7 @@ pub fn _mm_mask_srai_epi64(src: __m128i, k: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_srai_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -22143,7 +22143,7 @@ pub fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_permute_ps(a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22179,7 +22179,7 @@ pub fn _mm512_permute_ps(a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_permute_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22195,7 +22195,7 @@ pub fn _mm512_mask_permute_ps(src: __m512, k: __mmask16, a: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_permute_ps(k: __mmask16, a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22211,7 +22211,7 @@ pub fn _mm512_maskz_permute_ps(k: __mmask16, a: __m512) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_permute_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { unsafe { let r = _mm256_permute_ps::(a); @@ -22226,7 +22226,7 @@ pub fn _mm256_mask_permute_ps(src: __m256, k: __mmask8, a: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_permute_ps(k: __mmask8, a: __m256) -> __m256 { unsafe { let r = _mm256_permute_ps::(a); @@ -22241,7 +22241,7 @@ pub fn _mm256_maskz_permute_ps(k: __mmask8, a: __m256) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_permute_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { unsafe { let r = _mm_permute_ps::(a); @@ -22256,7 +22256,7 @@ pub fn _mm_mask_permute_ps(src: __m128, k: __mmask8, a: __m128) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_permute_ps(k: __mmask8, a: __m128) -> __m128 { unsafe { let r = _mm_permute_ps::(a); @@ -22271,7 +22271,7 @@ pub fn _mm_maskz_permute_ps(k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_permute_pd(a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22299,7 +22299,7 @@ pub fn _mm512_permute_pd(a: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_permute_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22315,7 +22315,7 @@ pub fn _mm512_mask_permute_pd(src: __m512d, k: __mmask8, a: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_permute_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22331,7 +22331,7 @@ pub fn _mm512_maskz_permute_pd(k: __mmask8, a: __m512d) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_permute_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 4); @@ -22347,7 +22347,7 @@ pub fn _mm256_mask_permute_pd(src: __m256d, k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_permute_pd(k: __mmask8, a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 4); @@ -22363,7 +22363,7 @@ pub fn _mm256_maskz_permute_pd(k: __mmask8, a: __m256d) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0b01))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_permute_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM2, 2); @@ -22379,7 +22379,7 @@ pub fn _mm_mask_permute_pd(src: __m128d, k: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0b01))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM2, 2); @@ -22395,7 +22395,7 @@ pub fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> __m128d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq -#[rustc_legacy_const_generics(1)] + pub fn _mm512_permutex_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22423,7 +22423,7 @@ pub fn _mm512_permutex_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_permutex_epi64( src: __m512i, k: __mmask8, @@ -22443,7 +22443,7 @@ pub fn _mm512_mask_permutex_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22459,7 +22459,7 @@ pub fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m512i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq -#[rustc_legacy_const_generics(1)] + pub fn _mm256_permutex_epi64(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22483,7 +22483,7 @@ pub fn _mm256_permutex_epi64(a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_permutex_epi64( src: __m256i, k: __mmask8, @@ -22503,7 +22503,7 @@ pub fn _mm256_mask_permutex_epi64( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22519,7 +22519,7 @@ pub fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m256i) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd -#[rustc_legacy_const_generics(1)] + pub fn _mm512_permutex_pd(a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22547,7 +22547,7 @@ pub fn _mm512_permutex_pd(a: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_permutex_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { unsafe { let r = _mm512_permutex_pd::(a); @@ -22562,7 +22562,7 @@ pub fn _mm512_mask_permutex_pd(src: __m512d, k: __mmask8, a: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_permutex_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { let r = _mm512_permutex_pd::(a); @@ -22577,7 +22577,7 @@ pub fn _mm512_maskz_permutex_pd(k: __mmask8, a: __m512d) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd -#[rustc_legacy_const_generics(1)] + pub fn _mm256_permutex_pd(a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22601,7 +22601,7 @@ pub fn _mm256_permutex_pd(a: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_permutex_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -22617,7 +22617,7 @@ pub fn _mm256_mask_permutex_pd(src: __m256d, k: __mmask8, a: __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_permutex_pd(k: __mmask8, a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -23887,7 +23887,7 @@ pub fn _mm_mask2_permutex2var_pd(a: __m128d, idx: __m128i, k: __mmask8, b: __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 9))] //should be vpshufd -#[rustc_legacy_const_generics(1)] + pub fn _mm512_shuffle_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -23924,7 +23924,7 @@ pub fn _mm512_shuffle_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_shuffle_epi32( src: __m512i, k: __mmask16, @@ -23944,7 +23944,7 @@ pub fn _mm512_mask_shuffle_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_shuffle_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -23960,7 +23960,7 @@ pub fn _mm512_maskz_shuffle_epi32(k: __mmask16, a: __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_shuffle_epi32( src: __m256i, k: __mmask8, @@ -23980,7 +23980,7 @@ pub fn _mm256_mask_shuffle_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_shuffle_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -23996,7 +23996,7 @@ pub fn _mm256_maskz_shuffle_epi32(k: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_mask_shuffle_epi32( src: __m128i, k: __mmask8, @@ -24016,7 +24016,7 @@ pub fn _mm_mask_shuffle_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_maskz_shuffle_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24032,7 +24032,7 @@ pub fn _mm_maskz_shuffle_epi32(k: __mmask8, a: __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shuffle_ps(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24068,7 +24068,7 @@ pub fn _mm512_shuffle_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shuffle_ps( src: __m512, k: __mmask16, @@ -24089,7 +24089,7 @@ pub fn _mm512_mask_shuffle_ps( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shuffle_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24105,7 +24105,7 @@ pub fn _mm512_maskz_shuffle_ps(k: __mmask16, a: __m512, b: __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shuffle_ps( src: __m256, k: __mmask8, @@ -24126,7 +24126,7 @@ pub fn _mm256_mask_shuffle_ps( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shuffle_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24142,7 +24142,7 @@ pub fn _mm256_maskz_shuffle_ps(k: __mmask8, a: __m256, b: __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_shuffle_ps( src: __m128, k: __mmask8, @@ -24163,7 +24163,7 @@ pub fn _mm_mask_shuffle_ps( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24179,7 +24179,7 @@ pub fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shuffle_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24207,7 +24207,7 @@ pub fn _mm512_shuffle_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shuffle_pd( src: __m512d, k: __mmask8, @@ -24228,7 +24228,7 @@ pub fn _mm512_mask_shuffle_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shuffle_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24244,7 +24244,7 @@ pub fn _mm512_maskz_shuffle_pd(k: __mmask8, a: __m512d, b: __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shuffle_pd( src: __m256d, k: __mmask8, @@ -24265,7 +24265,7 @@ pub fn _mm256_mask_shuffle_pd( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shuffle_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24281,7 +24281,7 @@ pub fn _mm256_maskz_shuffle_pd(k: __mmask8, a: __m256d, b: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_shuffle_pd( src: __m128d, k: __mmask8, @@ -24302,7 +24302,7 @@ pub fn _mm_mask_shuffle_pd( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_shuffle_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24318,7 +24318,7 @@ pub fn _mm_maskz_shuffle_pd(k: __mmask8, a: __m128d, b: __m128d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_01_01_01))] //should be vshufi32x4 -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shuffle_i32x4(a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24357,7 +24357,7 @@ pub fn _mm512_shuffle_i32x4(a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b10_11_01_01))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shuffle_i32x4( src: __m512i, k: __mmask16, @@ -24378,7 +24378,7 @@ pub fn _mm512_mask_shuffle_i32x4( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b10_11_01_01))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shuffle_i32x4( k: __mmask16, a: __m512i, @@ -24398,7 +24398,7 @@ pub fn _mm512_maskz_shuffle_i32x4( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b11))] //should be vshufi32x4 -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shuffle_i32x4(a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24429,7 +24429,7 @@ pub fn _mm256_shuffle_i32x4(a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b11))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shuffle_i32x4( src: __m256i, k: __mmask8, @@ -24450,7 +24450,7 @@ pub fn _mm256_mask_shuffle_i32x4( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shuffle_i32x4(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24466,7 +24466,7 @@ pub fn _mm256_maskz_shuffle_i32x4(k: __mmask8, a: __m256i, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shuffle_i64x2(a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24497,7 +24497,7 @@ pub fn _mm512_shuffle_i64x2(a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shuffle_i64x2( src: __m512i, k: __mmask8, @@ -24518,7 +24518,7 @@ pub fn _mm512_mask_shuffle_i64x2( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shuffle_i64x2(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24534,7 +24534,7 @@ pub fn _mm512_maskz_shuffle_i64x2(k: __mmask8, a: __m512i, b: _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshufi64x2 -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shuffle_i64x2(a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24561,7 +24561,7 @@ pub fn _mm256_shuffle_i64x2(a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b11))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shuffle_i64x2( src: __m256i, k: __mmask8, @@ -24582,7 +24582,7 @@ pub fn _mm256_mask_shuffle_i64x2( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shuffle_i64x2(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24598,7 +24598,7 @@ pub fn _mm256_maskz_shuffle_i64x2(k: __mmask8, a: __m256i, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b1011))] //should be vshuff32x4, but generate vshuff64x2 -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shuffle_f32x4(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24637,7 +24637,7 @@ pub fn _mm512_shuffle_f32x4(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b1011))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shuffle_f32x4( src: __m512, k: __mmask16, @@ -24658,7 +24658,7 @@ pub fn _mm512_mask_shuffle_f32x4( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b1011))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shuffle_f32x4(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24674,7 +24674,7 @@ pub fn _mm512_maskz_shuffle_f32x4(k: __mmask16, a: __m512, b: _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff32x4 -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shuffle_f32x4(a: __m256, b: __m256) -> __m256 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24705,7 +24705,7 @@ pub fn _mm256_shuffle_f32x4(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b11))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shuffle_f32x4( src: __m256, k: __mmask8, @@ -24726,7 +24726,7 @@ pub fn _mm256_mask_shuffle_f32x4( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shuffle_f32x4(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24742,7 +24742,7 @@ pub fn _mm256_maskz_shuffle_f32x4(k: __mmask8, a: __m256, b: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shuffle_f64x2(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24773,7 +24773,7 @@ pub fn _mm512_shuffle_f64x2(a: __m512d, b: __m512d) -> __m512d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shuffle_f64x2( src: __m512d, k: __mmask8, @@ -24794,7 +24794,7 @@ pub fn _mm512_mask_shuffle_f64x2( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shuffle_f64x2(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24810,7 +24810,7 @@ pub fn _mm512_maskz_shuffle_f64x2(k: __mmask8, a: __m512d, b: _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff64x2 -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shuffle_f64x2(a: __m256d, b: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24837,7 +24837,7 @@ pub fn _mm256_shuffle_f64x2(a: __m256d, b: __m256d) -> __m256d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b11))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shuffle_f64x2( src: __m256d, k: __mmask8, @@ -24858,7 +24858,7 @@ pub fn _mm256_mask_shuffle_f64x2( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b11))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shuffle_f64x2(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); @@ -24874,7 +24874,7 @@ pub fn _mm256_maskz_shuffle_f64x2(k: __mmask8, a: __m256d, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 3))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_extractf32x4_ps(a: __m512) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 2); @@ -24894,7 +24894,7 @@ pub fn _mm512_extractf32x4_ps(a: __m512) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 3))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_extractf32x4_ps(src: __m128, k: __mmask8, a: __m512) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 2); @@ -24910,7 +24910,7 @@ pub fn _mm512_mask_extractf32x4_ps(src: __m128, k: __mmask8, a: #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 3))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_extractf32x4_ps(k: __mmask8, a: __m512) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 2); @@ -24929,7 +24929,7 @@ pub fn _mm512_maskz_extractf32x4_ps(k: __mmask8, a: __m512) -> test, assert_instr(vextract, IMM8 = 1) //should be vextractf32x4 )] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_extractf32x4_ps(a: __m256) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -24947,7 +24947,7 @@ pub fn _mm256_extractf32x4_ps(a: __m256) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_extractf32x4_ps(src: __m128, k: __mmask8, a: __m256) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -24963,7 +24963,7 @@ pub fn _mm256_mask_extractf32x4_ps(src: __m128, k: __mmask8, a: #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_extractf32x4_ps(k: __mmask8, a: __m256) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -24982,7 +24982,7 @@ pub fn _mm256_maskz_extractf32x4_ps(k: __mmask8, a: __m256) -> test, assert_instr(vextractf64x4, IMM1 = 1) //should be vextracti64x4 )] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_extracti64x4_epi64(a: __m512i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM1, 1); @@ -25000,7 +25000,7 @@ pub fn _mm512_extracti64x4_epi64(a: __m512i) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti64x4, IMM1 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_extracti64x4_epi64( src: __m256i, k: __mmask8, @@ -25020,7 +25020,7 @@ pub fn _mm512_mask_extracti64x4_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti64x4, IMM1 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_extracti64x4_epi64(k: __mmask8, a: __m512i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM1, 1); @@ -25036,7 +25036,7 @@ pub fn _mm512_maskz_extracti64x4_epi64(k: __mmask8, a: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf64x4, IMM8 = 1))] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_extractf64x4_pd(a: __m512d) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25054,7 +25054,7 @@ pub fn _mm512_extractf64x4_pd(a: __m512d) -> __m256d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf64x4, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_extractf64x4_pd( src: __m256d, k: __mmask8, @@ -25074,7 +25074,7 @@ pub fn _mm512_mask_extractf64x4_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf64x4, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_extractf64x4_pd(k: __mmask8, a: __m512d) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25093,7 +25093,7 @@ pub fn _mm512_maskz_extractf64x4_pd(k: __mmask8, a: __m512d) -> test, assert_instr(vextractf32x4, IMM2 = 3) //should be vextracti32x4 )] -#[rustc_legacy_const_generics(1)] + pub fn _mm512_extracti32x4_epi32(a: __m512i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM2, 2); @@ -25116,7 +25116,7 @@ pub fn _mm512_extracti32x4_epi32(a: __m512i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti32x4, IMM2 = 3))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_mask_extracti32x4_epi32( src: __m128i, k: __mmask8, @@ -25136,7 +25136,7 @@ pub fn _mm512_mask_extracti32x4_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti32x4, IMM2 = 3))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_maskz_extracti32x4_epi32(k: __mmask8, a: __m512i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM2, 2); @@ -25155,7 +25155,7 @@ pub fn _mm512_maskz_extracti32x4_epi32(k: __mmask8, a: __m512i) test, assert_instr(vextract, IMM1 = 1) //should be vextracti32x4 )] -#[rustc_legacy_const_generics(1)] + pub fn _mm256_extracti32x4_epi32(a: __m256i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM1, 1); @@ -25176,7 +25176,7 @@ pub fn _mm256_extracti32x4_epi32(a: __m256i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti32x4, IMM1 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_mask_extracti32x4_epi32( src: __m128i, k: __mmask8, @@ -25196,7 +25196,7 @@ pub fn _mm256_mask_extracti32x4_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti32x4, IMM1 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_maskz_extracti32x4_epi32(k: __mmask8, a: __m256i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM1, 1); @@ -25510,7 +25510,7 @@ pub fn _mm_maskz_movedup_pd(k: __mmask8, a: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] //should be vinserti32x4 -#[rustc_legacy_const_generics(2)] + pub fn _mm512_inserti32x4(a: __m512i, b: __m128i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 2); @@ -25553,7 +25553,7 @@ pub fn _mm512_inserti32x4(a: __m512i, b: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 2))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_inserti32x4( src: __m512i, k: __mmask16, @@ -25574,7 +25574,7 @@ pub fn _mm512_mask_inserti32x4( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 2))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_inserti32x4(k: __mmask16, a: __m512i, b: __m128i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 2); @@ -25593,7 +25593,7 @@ pub fn _mm512_maskz_inserti32x4(k: __mmask16, a: __m512i, b: __ test, assert_instr(vinsert, IMM8 = 1) //should be vinserti32x4 )] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_inserti32x4(a: __m256i, b: __m128i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25614,7 +25614,7 @@ pub fn _mm256_inserti32x4(a: __m256i, b: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_inserti32x4( src: __m256i, k: __mmask8, @@ -25635,7 +25635,7 @@ pub fn _mm256_mask_inserti32x4( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_inserti32x4(k: __mmask8, a: __m256i, b: __m128i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25651,7 +25651,7 @@ pub fn _mm256_maskz_inserti32x4(k: __mmask8, a: __m256i, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] //should be vinserti64x4 -#[rustc_legacy_const_generics(2)] + pub fn _mm512_inserti64x4(a: __m512i, b: __m256i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25670,7 +25670,7 @@ pub fn _mm512_inserti64x4(a: __m512i, b: __m256i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti64x4, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_inserti64x4( src: __m512i, k: __mmask8, @@ -25691,7 +25691,7 @@ pub fn _mm512_mask_inserti64x4( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti64x4, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_inserti64x4(k: __mmask8, a: __m512i, b: __m256i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25707,7 +25707,7 @@ pub fn _mm512_maskz_inserti64x4(k: __mmask8, a: __m512i, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_insertf32x4(a: __m512, b: __m128) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 2); @@ -25748,7 +25748,7 @@ pub fn _mm512_insertf32x4(a: __m512, b: __m128) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_insertf32x4( src: __m512, k: __mmask16, @@ -25769,7 +25769,7 @@ pub fn _mm512_mask_insertf32x4( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_insertf32x4(k: __mmask16, a: __m512, b: __m128) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 2); @@ -25788,7 +25788,7 @@ pub fn _mm512_maskz_insertf32x4(k: __mmask16, a: __m512, b: __m test, assert_instr(vinsert, IMM8 = 1) //should be vinsertf32x4 )] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_insertf32x4(a: __m256, b: __m128) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25807,7 +25807,7 @@ pub fn _mm256_insertf32x4(a: __m256, b: __m128) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_insertf32x4( src: __m256, k: __mmask8, @@ -25828,7 +25828,7 @@ pub fn _mm256_mask_insertf32x4( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_insertf32x4(k: __mmask8, a: __m256, b: __m128) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25844,7 +25844,7 @@ pub fn _mm256_maskz_insertf32x4(k: __mmask8, a: __m256, b: __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_insertf64x4(a: __m512d, b: __m256d) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -25863,7 +25863,7 @@ pub fn _mm512_insertf64x4(a: __m512d, b: __m256d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_insertf64x4( src: __m512d, k: __mmask8, @@ -25884,7 +25884,7 @@ pub fn _mm512_mask_insertf64x4( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_insertf64x4(k: __mmask8, a: __m512d, b: __m256d) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 1); @@ -27710,7 +27710,7 @@ pub fn _mm_mask_blend_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -27791,7 +27791,7 @@ pub fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_alignr_epi32( src: __m512i, k: __mmask16, @@ -27812,7 +27812,7 @@ pub fn _mm512_mask_alignr_epi32( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_alignr_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -27830,7 +27830,7 @@ pub fn _mm512_maskz_alignr_epi32(k: __mmask16, a: __m512i, b: _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_alignr_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -27859,7 +27859,7 @@ pub fn _mm256_alignr_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_alignr_epi32( src: __m256i, k: __mmask8, @@ -27880,7 +27880,7 @@ pub fn _mm256_mask_alignr_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_alignr_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -27898,7 +27898,7 @@ pub fn _mm256_maskz_alignr_epi32(k: __mmask8, a: __m256i, b: __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignd -#[rustc_legacy_const_generics(2)] + pub fn _mm_alignr_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -27923,7 +27923,7 @@ pub fn _mm_alignr_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_alignr_epi32( src: __m128i, k: __mmask8, @@ -27944,7 +27944,7 @@ pub fn _mm_mask_alignr_epi32( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_alignr_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -27962,7 +27962,7 @@ pub fn _mm_maskz_alignr_epi32(k: __mmask8, a: __m128i, b: __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_alignr_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -27989,7 +27989,7 @@ pub fn _mm512_alignr_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_alignr_epi64( src: __m512i, k: __mmask8, @@ -28010,7 +28010,7 @@ pub fn _mm512_mask_alignr_epi64( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_alignr_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -28028,7 +28028,7 @@ pub fn _mm512_maskz_alignr_epi64(k: __mmask8, a: __m512i, b: __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_alignr_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -28051,7 +28051,7 @@ pub fn _mm256_alignr_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_alignr_epi64( src: __m256i, k: __mmask8, @@ -28072,7 +28072,7 @@ pub fn _mm256_mask_alignr_epi64( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_alignr_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -28090,7 +28090,7 @@ pub fn _mm256_maskz_alignr_epi64(k: __mmask8, a: __m256i, b: __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignq -#[rustc_legacy_const_generics(2)] + pub fn _mm_alignr_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -28111,7 +28111,7 @@ pub fn _mm_alignr_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_alignr_epi64( src: __m128i, k: __mmask8, @@ -28132,7 +28132,7 @@ pub fn _mm_mask_alignr_epi64( #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_alignr_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -29231,7 +29231,7 @@ pub fn _kortestz_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kshiftli_mask16) #[inline] #[target_feature(enable = "avx512f")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _kshiftli_mask16(a: __mmask16) -> __mmask16 { a << COUNT @@ -29242,7 +29242,7 @@ pub fn _kshiftli_mask16(a: __mmask16) -> __mmask16 { /// [Intel's Documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kshiftri_mask16) #[inline] #[target_feature(enable = "avx512f")] -#[rustc_legacy_const_generics(1)] + #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _kshiftri_mask16(a: __mmask16) -> __mmask16 { a >> COUNT @@ -30221,7 +30221,7 @@ pub fn _mm512_mask_cmpneq_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmas #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm512_cmp_ps_mask(a: __m512, b: __m512) -> __mmask16 { unsafe { @@ -30240,7 +30240,7 @@ pub fn _mm512_cmp_ps_mask(a: __m512, b: __m512) -> __mmask16 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm512_mask_cmp_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { unsafe { @@ -30258,7 +30258,7 @@ pub fn _mm512_mask_cmp_ps_mask(k1: __mmask16, a: __m512, b: __m #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm256_cmp_ps_mask(a: __m256, b: __m256) -> __mmask8 { unsafe { @@ -30277,7 +30277,7 @@ pub fn _mm256_cmp_ps_mask(a: __m256, b: __m256) -> __mmask8 { #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm256_mask_cmp_ps_mask(k1: __mmask8, a: __m256, b: __m256) -> __mmask8 { unsafe { @@ -30295,7 +30295,7 @@ pub fn _mm256_mask_cmp_ps_mask(k1: __mmask8, a: __m256, b: __m2 #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm_cmp_ps_mask(a: __m128, b: __m128) -> __mmask8 { unsafe { @@ -30314,7 +30314,7 @@ pub fn _mm_cmp_ps_mask(a: __m128, b: __m128) -> __mmask8 { #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm_mask_cmp_ps_mask(k1: __mmask8, a: __m128, b: __m128) -> __mmask8 { unsafe { @@ -30334,7 +30334,7 @@ pub fn _mm_mask_cmp_ps_mask(k1: __mmask8, a: __m128, b: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm512_cmp_round_ps_mask( a: __m512, b: __m512, @@ -30358,7 +30358,7 @@ pub fn _mm512_cmp_round_ps_mask( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm512_mask_cmp_round_ps_mask( m: __mmask16, a: __m512, @@ -30556,7 +30556,7 @@ pub fn _mm512_mask_cmpneq_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mma #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm512_cmp_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { unsafe { @@ -30575,7 +30575,7 @@ pub fn _mm512_cmp_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm512_mask_cmp_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { unsafe { @@ -30593,7 +30593,7 @@ pub fn _mm512_mask_cmp_pd_mask(k1: __mmask8, a: __m512d, b: __m #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm256_cmp_pd_mask(a: __m256d, b: __m256d) -> __mmask8 { unsafe { @@ -30612,7 +30612,7 @@ pub fn _mm256_cmp_pd_mask(a: __m256d, b: __m256d) -> __mmask8 { #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm256_mask_cmp_pd_mask(k1: __mmask8, a: __m256d, b: __m256d) -> __mmask8 { unsafe { @@ -30630,7 +30630,7 @@ pub fn _mm256_mask_cmp_pd_mask(k1: __mmask8, a: __m256d, b: __m #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm_cmp_pd_mask(a: __m128d, b: __m128d) -> __mmask8 { unsafe { @@ -30649,7 +30649,7 @@ pub fn _mm_cmp_pd_mask(a: __m128d, b: __m128d) -> __mmask8 { #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm_mask_cmp_pd_mask(k1: __mmask8, a: __m128d, b: __m128d) -> __mmask8 { unsafe { @@ -30669,7 +30669,7 @@ pub fn _mm_mask_cmp_pd_mask(k1: __mmask8, a: __m128d, b: __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm512_cmp_round_pd_mask( a: __m512d, b: __m512d, @@ -30693,7 +30693,7 @@ pub fn _mm512_cmp_round_pd_mask( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm512_mask_cmp_round_pd_mask( k1: __mmask8, a: __m512d, @@ -30759,7 +30759,7 @@ pub fn _mm512_mask_cmpunord_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __m #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm_cmp_ss_mask(a: __m128, b: __m128) -> __mmask8 { unsafe { @@ -30776,7 +30776,7 @@ pub fn _mm_cmp_ss_mask(a: __m128, b: __m128) -> __mmask8 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm_mask_cmp_ss_mask(k1: __mmask8, a: __m128, b: __m128) -> __mmask8 { unsafe { @@ -30794,7 +30794,7 @@ pub fn _mm_mask_cmp_ss_mask(k1: __mmask8, a: __m128, b: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm_cmp_round_ss_mask(a: __m128, b: __m128) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM5, 5); @@ -30813,7 +30813,7 @@ pub fn _mm_cmp_round_ss_mask(a: __m128, b: __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm_mask_cmp_round_ss_mask( k1: __mmask8, a: __m128, @@ -30833,7 +30833,7 @@ pub fn _mm_mask_cmp_round_ss_mask( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm_cmp_sd_mask(a: __m128d, b: __m128d) -> __mmask8 { unsafe { @@ -30850,7 +30850,7 @@ pub fn _mm_cmp_sd_mask(a: __m128d, b: __m128d) -> __mmask8 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub fn _mm_mask_cmp_sd_mask(k1: __mmask8, a: __m128d, b: __m128d) -> __mmask8 { unsafe { @@ -30868,7 +30868,7 @@ pub fn _mm_mask_cmp_sd_mask(k1: __mmask8, a: __m128d, b: __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm_cmp_round_sd_mask(a: __m128d, b: __m128d) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM5, 5); @@ -30887,7 +30887,7 @@ pub fn _mm_cmp_round_sd_mask(a: __m128d, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm_mask_cmp_round_sd_mask( k1: __mmask8, a: __m128d, @@ -31303,7 +31303,7 @@ pub fn _mm_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm512_cmp_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { @@ -31330,7 +31330,7 @@ pub fn _mm512_cmp_epu32_mask(a: __m512i, b: __m512i #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm512_mask_cmp_epu32_mask( k1: __mmask16, @@ -31362,7 +31362,7 @@ pub fn _mm512_mask_cmp_epu32_mask( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm256_cmp_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { @@ -31389,7 +31389,7 @@ pub fn _mm256_cmp_epu32_mask(a: __m256i, b: __m256i #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm256_mask_cmp_epu32_mask( k1: __mmask8, @@ -31421,7 +31421,7 @@ pub fn _mm256_mask_cmp_epu32_mask( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm_cmp_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { @@ -31448,7 +31448,7 @@ pub fn _mm_cmp_epu32_mask(a: __m128i, b: __m128i) - #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm_mask_cmp_epu32_mask( k1: __mmask8, @@ -31876,7 +31876,7 @@ pub fn _mm_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm512_cmp_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { @@ -31903,7 +31903,7 @@ pub fn _mm512_cmp_epi32_mask(a: __m512i, b: __m512i #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm512_mask_cmp_epi32_mask( k1: __mmask16, @@ -31935,7 +31935,7 @@ pub fn _mm512_mask_cmp_epi32_mask( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm256_cmp_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { @@ -31962,7 +31962,7 @@ pub fn _mm256_cmp_epi32_mask(a: __m256i, b: __m256i #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm256_mask_cmp_epi32_mask( k1: __mmask8, @@ -31994,7 +31994,7 @@ pub fn _mm256_mask_cmp_epi32_mask( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm_cmp_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { @@ -32021,7 +32021,7 @@ pub fn _mm_cmp_epi32_mask(a: __m128i, b: __m128i) - #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm_mask_cmp_epi32_mask( k1: __mmask8, @@ -32449,7 +32449,7 @@ pub fn _mm_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm512_cmp_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { @@ -32476,7 +32476,7 @@ pub fn _mm512_cmp_epu64_mask(a: __m512i, b: __m512i #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm512_mask_cmp_epu64_mask( k1: __mmask8, @@ -32508,7 +32508,7 @@ pub fn _mm512_mask_cmp_epu64_mask( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm256_cmp_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { @@ -32535,7 +32535,7 @@ pub fn _mm256_cmp_epu64_mask(a: __m256i, b: __m256i #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm256_mask_cmp_epu64_mask( k1: __mmask8, @@ -32567,7 +32567,7 @@ pub fn _mm256_mask_cmp_epu64_mask( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm_cmp_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { @@ -32594,7 +32594,7 @@ pub fn _mm_cmp_epu64_mask(a: __m128i, b: __m128i) - #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm_mask_cmp_epu64_mask( k1: __mmask8, @@ -33022,7 +33022,7 @@ pub fn _mm_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm512_cmp_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { @@ -33049,7 +33049,7 @@ pub fn _mm512_cmp_epi64_mask(a: __m512i, b: __m512i #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm512_mask_cmp_epi64_mask( k1: __mmask8, @@ -33081,7 +33081,7 @@ pub fn _mm512_mask_cmp_epi64_mask( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm256_cmp_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { @@ -33108,7 +33108,7 @@ pub fn _mm256_cmp_epi64_mask(a: __m256i, b: __m256i #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm256_mask_cmp_epi64_mask( k1: __mmask8, @@ -33140,7 +33140,7 @@ pub fn _mm256_mask_cmp_epi64_mask( #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(2)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm_cmp_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { @@ -33167,7 +33167,7 @@ pub fn _mm_cmp_epi64_mask(a: __m128i, b: __m128i) - #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[rustc_legacy_const_generics(3)] + #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub fn _mm_mask_cmp_epi64_mask( k1: __mmask8, @@ -36992,7 +36992,7 @@ pub fn _mm_maskz_getexp_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm_getmant_ss( a: __m128, b: __m128, @@ -37031,7 +37031,7 @@ pub fn _mm_getmant_ss( a: __m128d, b: __m128d, @@ -37151,7 +37151,7 @@ pub fn _mm_getmant_sd(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -37259,7 +37259,7 @@ pub fn _mm_roundscale_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_roundscale_ss( src: __m128, k: __mmask8, @@ -37289,7 +37289,7 @@ pub fn _mm_mask_roundscale_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_roundscale_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -37313,7 +37313,7 @@ pub fn _mm_maskz_roundscale_ss(k: __mmask8, a: __m128, b: __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 255))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_roundscale_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -37344,7 +37344,7 @@ pub fn _mm_roundscale_sd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_roundscale_sd( src: __m128d, k: __mmask8, @@ -37374,7 +37374,7 @@ pub fn _mm_mask_roundscale_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_roundscale_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -38000,7 +38000,7 @@ pub fn _mm_mask3_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_add_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38025,7 +38025,7 @@ pub fn _mm_add_round_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_add_round_ss( src: __m128, k: __mmask8, @@ -38056,7 +38056,7 @@ pub fn _mm_mask_add_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_add_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38081,7 +38081,7 @@ pub fn _mm_maskz_add_round_ss(k: __mmask8, a: __m128, b: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_add_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38106,7 +38106,7 @@ pub fn _mm_add_round_sd(a: __m128d, b: __m128d) -> __m128d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_add_round_sd( src: __m128d, k: __mmask8, @@ -38137,7 +38137,7 @@ pub fn _mm_mask_add_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_add_round_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38162,7 +38162,7 @@ pub fn _mm_maskz_add_round_sd(k: __mmask8, a: __m128d, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_sub_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38187,7 +38187,7 @@ pub fn _mm_sub_round_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_sub_round_ss( src: __m128, k: __mmask8, @@ -38218,7 +38218,7 @@ pub fn _mm_mask_sub_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_sub_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38243,7 +38243,7 @@ pub fn _mm_maskz_sub_round_ss(k: __mmask8, a: __m128, b: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_sub_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38268,7 +38268,7 @@ pub fn _mm_sub_round_sd(a: __m128d, b: __m128d) -> __m128d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_sub_round_sd( src: __m128d, k: __mmask8, @@ -38299,7 +38299,7 @@ pub fn _mm_mask_sub_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_sub_round_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38324,7 +38324,7 @@ pub fn _mm_maskz_sub_round_sd(k: __mmask8, a: __m128d, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_mul_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38349,7 +38349,7 @@ pub fn _mm_mul_round_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_mul_round_ss( src: __m128, k: __mmask8, @@ -38380,7 +38380,7 @@ pub fn _mm_mask_mul_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_mul_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38405,7 +38405,7 @@ pub fn _mm_maskz_mul_round_ss(k: __mmask8, a: __m128, b: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_mul_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38430,7 +38430,7 @@ pub fn _mm_mul_round_sd(a: __m128d, b: __m128d) -> __m128d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_mul_round_sd( src: __m128d, k: __mmask8, @@ -38461,7 +38461,7 @@ pub fn _mm_mask_mul_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_mul_round_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38486,7 +38486,7 @@ pub fn _mm_maskz_mul_round_sd(k: __mmask8, a: __m128d, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_div_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38511,7 +38511,7 @@ pub fn _mm_div_round_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_div_round_ss( src: __m128, k: __mmask8, @@ -38542,7 +38542,7 @@ pub fn _mm_mask_div_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_div_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38567,7 +38567,7 @@ pub fn _mm_maskz_div_round_ss(k: __mmask8, a: __m128, b: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_div_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38592,7 +38592,7 @@ pub fn _mm_div_round_sd(a: __m128d, b: __m128d) -> __m128d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_div_round_sd( src: __m128d, k: __mmask8, @@ -38623,7 +38623,7 @@ pub fn _mm_mask_div_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_div_round_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38642,7 +38642,7 @@ pub fn _mm_maskz_div_round_sd(k: __mmask8, a: __m128d, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxss, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_max_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_sae!(SAE); @@ -38661,7 +38661,7 @@ pub fn _mm_max_round_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxss, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_max_round_ss( src: __m128, k: __mmask8, @@ -38686,7 +38686,7 @@ pub fn _mm_mask_max_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxss, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_max_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_sae!(SAE); @@ -38705,7 +38705,7 @@ pub fn _mm_maskz_max_round_ss(k: __mmask8, a: __m128, b: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_max_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_sae!(SAE); @@ -38724,7 +38724,7 @@ pub fn _mm_max_round_sd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_max_round_sd( src: __m128d, k: __mmask8, @@ -38749,7 +38749,7 @@ pub fn _mm_mask_max_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_max_round_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_sae!(SAE); @@ -38768,7 +38768,7 @@ pub fn _mm_maskz_max_round_sd(k: __mmask8, a: __m128d, b: __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminss, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_min_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_sae!(SAE); @@ -38787,7 +38787,7 @@ pub fn _mm_min_round_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminss, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_min_round_ss( src: __m128, k: __mmask8, @@ -38812,7 +38812,7 @@ pub fn _mm_mask_min_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminss, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_min_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_sae!(SAE); @@ -38831,7 +38831,7 @@ pub fn _mm_maskz_min_round_ss(k: __mmask8, a: __m128, b: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminsd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_min_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_sae!(SAE); @@ -38850,7 +38850,7 @@ pub fn _mm_min_round_sd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminsd, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_min_round_sd( src: __m128d, k: __mmask8, @@ -38875,7 +38875,7 @@ pub fn _mm_mask_min_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vminsd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_min_round_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_sae!(SAE); @@ -38900,7 +38900,7 @@ pub fn _mm_maskz_min_round_sd(k: __mmask8, a: __m128d, b: __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_sqrt_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38922,7 +38922,7 @@ pub fn _mm_sqrt_round_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_sqrt_round_ss( src: __m128, k: __mmask8, @@ -38949,7 +38949,7 @@ pub fn _mm_mask_sqrt_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_sqrt_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -38971,7 +38971,7 @@ pub fn _mm_maskz_sqrt_round_ss(k: __mmask8, a: __m128, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_sqrt_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -38993,7 +38993,7 @@ pub fn _mm_sqrt_round_sd(a: __m128d, b: __m128d) -> __m128d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_sqrt_round_sd( src: __m128d, k: __mmask8, @@ -39020,7 +39020,7 @@ pub fn _mm_mask_sqrt_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_sqrt_round_sd( k: __mmask8, a: __m128d, @@ -39040,7 +39040,7 @@ pub fn _mm_maskz_sqrt_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_getexp_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_sae!(SAE); @@ -39059,7 +39059,7 @@ pub fn _mm_getexp_round_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_getexp_round_ss( src: __m128, k: __mmask8, @@ -39084,7 +39084,7 @@ pub fn _mm_mask_getexp_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_getexp_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_sae!(SAE); @@ -39103,7 +39103,7 @@ pub fn _mm_maskz_getexp_round_ss(k: __mmask8, a: __m128, b: __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_getexp_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_sae!(SAE); @@ -39122,7 +39122,7 @@ pub fn _mm_getexp_round_sd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_getexp_round_sd( src: __m128d, k: __mmask8, @@ -39147,7 +39147,7 @@ pub fn _mm_mask_getexp_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_getexp_round_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_sae!(SAE); @@ -39175,7 +39175,7 @@ pub fn _mm_maskz_getexp_round_sd(k: __mmask8, a: __m128d, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(2, 3, 4)] + pub fn _mm_getmant_round_ss< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -39212,7 +39212,7 @@ pub fn _mm_getmant_round_ss< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(4, 5, 6)] + pub fn _mm_mask_getmant_round_ss< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -39252,7 +39252,7 @@ pub fn _mm_mask_getmant_round_ss< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(3, 4, 5)] + pub fn _mm_maskz_getmant_round_ss< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -39290,7 +39290,7 @@ pub fn _mm_maskz_getmant_round_ss< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(2, 3, 4)] + pub fn _mm_getmant_round_sd< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -39327,7 +39327,7 @@ pub fn _mm_getmant_round_sd< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(4, 5, 6)] + pub fn _mm_mask_getmant_round_sd< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -39367,7 +39367,7 @@ pub fn _mm_mask_getmant_round_sd< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))] -#[rustc_legacy_const_generics(3, 4, 5)] + pub fn _mm_maskz_getmant_round_sd< const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM, @@ -39402,7 +39402,7 @@ pub fn _mm_maskz_getmant_round_sd< #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm_roundscale_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -39428,7 +39428,7 @@ pub fn _mm_roundscale_round_ss(a: __m128, b: __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm_mask_roundscale_round_ss( src: __m128, k: __mmask8, @@ -39460,7 +39460,7 @@ pub fn _mm_mask_roundscale_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm_maskz_roundscale_round_ss( k: __mmask8, a: __m128, @@ -39490,7 +39490,7 @@ pub fn _mm_maskz_roundscale_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm_roundscale_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -39516,7 +39516,7 @@ pub fn _mm_roundscale_round_sd(a: __m128d, b: _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm_mask_roundscale_round_sd( src: __m128d, k: __mmask8, @@ -39548,7 +39548,7 @@ pub fn _mm_mask_roundscale_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm_maskz_roundscale_round_sd( k: __mmask8, a: __m128d, @@ -39578,7 +39578,7 @@ pub fn _mm_maskz_roundscale_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_scalef_round_ss(a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -39603,7 +39603,7 @@ pub fn _mm_scalef_round_ss(a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_scalef_round_ss( src: __m128, k: __mmask8, @@ -39634,7 +39634,7 @@ pub fn _mm_mask_scalef_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_scalef_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -39659,7 +39659,7 @@ pub fn _mm_maskz_scalef_round_ss(k: __mmask8, a: __m128, b: #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_scalef_round_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -39684,7 +39684,7 @@ pub fn _mm_scalef_round_sd(a: __m128d, b: __m128d) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_scalef_round_sd( src: __m128d, k: __mmask8, @@ -39714,7 +39714,7 @@ pub fn _mm_mask_scalef_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_scalef_round_sd( k: __mmask8, a: __m128d, @@ -39743,7 +39743,7 @@ pub fn _mm_maskz_scalef_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fmadd_round_ss(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -39769,7 +39769,7 @@ pub fn _mm_fmadd_round_ss(a: __m128, b: __m128, c: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fmadd_round_ss( a: __m128, k: __mmask8, @@ -39802,7 +39802,7 @@ pub fn _mm_mask_fmadd_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fmadd_round_ss( k: __mmask8, a: __m128, @@ -39836,7 +39836,7 @@ pub fn _mm_maskz_fmadd_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask3_fmadd_round_ss( a: __m128, b: __m128, @@ -39869,7 +39869,7 @@ pub fn _mm_mask3_fmadd_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fmadd_round_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -39895,7 +39895,7 @@ pub fn _mm_fmadd_round_sd(a: __m128d, b: __m128d, c: __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fmadd_round_sd( a: __m128d, k: __mmask8, @@ -39928,7 +39928,7 @@ pub fn _mm_mask_fmadd_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fmadd_round_sd( k: __mmask8, a: __m128d, @@ -39962,7 +39962,7 @@ pub fn _mm_maskz_fmadd_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask3_fmadd_round_sd( a: __m128d, b: __m128d, @@ -39995,7 +39995,7 @@ pub fn _mm_mask3_fmadd_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fmsub_round_ss(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -40022,7 +40022,7 @@ pub fn _mm_fmsub_round_ss(a: __m128, b: __m128, c: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fmsub_round_ss( a: __m128, k: __mmask8, @@ -40056,7 +40056,7 @@ pub fn _mm_mask_fmsub_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fmsub_round_ss( k: __mmask8, a: __m128, @@ -40091,7 +40091,7 @@ pub fn _mm_maskz_fmsub_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask3_fmsub_round_ss( a: __m128, b: __m128, @@ -40125,7 +40125,7 @@ pub fn _mm_mask3_fmsub_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fmsub_round_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -40152,7 +40152,7 @@ pub fn _mm_fmsub_round_sd(a: __m128d, b: __m128d, c: __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fmsub_round_sd( a: __m128d, k: __mmask8, @@ -40186,7 +40186,7 @@ pub fn _mm_mask_fmsub_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fmsub_round_sd( k: __mmask8, a: __m128d, @@ -40221,7 +40221,7 @@ pub fn _mm_maskz_fmsub_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask3_fmsub_round_sd( a: __m128d, b: __m128d, @@ -40255,7 +40255,7 @@ pub fn _mm_mask3_fmsub_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fnmadd_round_ss(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -40282,7 +40282,7 @@ pub fn _mm_fnmadd_round_ss(a: __m128, b: __m128, c: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fnmadd_round_ss( a: __m128, k: __mmask8, @@ -40316,7 +40316,7 @@ pub fn _mm_mask_fnmadd_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fnmadd_round_ss( k: __mmask8, a: __m128, @@ -40351,7 +40351,7 @@ pub fn _mm_maskz_fnmadd_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask3_fnmadd_round_ss( a: __m128, b: __m128, @@ -40385,7 +40385,7 @@ pub fn _mm_mask3_fnmadd_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fnmadd_round_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -40412,7 +40412,7 @@ pub fn _mm_fnmadd_round_sd(a: __m128d, b: __m128d, c: __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fnmadd_round_sd( a: __m128d, k: __mmask8, @@ -40446,7 +40446,7 @@ pub fn _mm_mask_fnmadd_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fnmadd_round_sd( k: __mmask8, a: __m128d, @@ -40481,7 +40481,7 @@ pub fn _mm_maskz_fnmadd_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask3_fnmadd_round_sd( a: __m128d, b: __m128d, @@ -40515,7 +40515,7 @@ pub fn _mm_mask3_fnmadd_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fnmsub_round_ss(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -40543,7 +40543,7 @@ pub fn _mm_fnmsub_round_ss(a: __m128, b: __m128, c: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fnmsub_round_ss( a: __m128, k: __mmask8, @@ -40578,7 +40578,7 @@ pub fn _mm_mask_fnmsub_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fnmsub_round_ss( k: __mmask8, a: __m128, @@ -40614,7 +40614,7 @@ pub fn _mm_maskz_fnmsub_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask3_fnmsub_round_ss( a: __m128, b: __m128, @@ -40649,7 +40649,7 @@ pub fn _mm_mask3_fnmsub_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fnmsub_round_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -40677,7 +40677,7 @@ pub fn _mm_fnmsub_round_sd(a: __m128d, b: __m128d, c: __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fnmsub_round_sd( a: __m128d, k: __mmask8, @@ -40712,7 +40712,7 @@ pub fn _mm_mask_fnmsub_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fnmsub_round_sd( k: __mmask8, a: __m128d, @@ -40748,7 +40748,7 @@ pub fn _mm_maskz_fnmsub_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask3_fnmsub_round_sd( a: __m128d, b: __m128d, @@ -40776,7 +40776,7 @@ pub fn _mm_mask3_fnmsub_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fixupimm_ss(a: __m128, b: __m128, c: __m128i) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -40797,7 +40797,7 @@ pub fn _mm_fixupimm_ss(a: __m128, b: __m128, c: __m128i) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fixupimm_ss( a: __m128, k: __mmask8, @@ -40823,7 +40823,7 @@ pub fn _mm_mask_fixupimm_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fixupimm_ss( k: __mmask8, a: __m128, @@ -40849,7 +40849,7 @@ pub fn _mm_maskz_fixupimm_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_fixupimm_sd(a: __m128d, b: __m128d, c: __m128i) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -40870,7 +40870,7 @@ pub fn _mm_fixupimm_sd(a: __m128d, b: __m128d, c: __m128i) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_fixupimm_sd( a: __m128d, k: __mmask8, @@ -40896,7 +40896,7 @@ pub fn _mm_mask_fixupimm_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_maskz_fixupimm_sd( k: __mmask8, a: __m128d, @@ -40923,7 +40923,7 @@ pub fn _mm_maskz_fixupimm_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm_fixupimm_round_ss( a: __m128, b: __m128, @@ -40950,7 +40950,7 @@ pub fn _mm_fixupimm_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm_mask_fixupimm_round_ss( a: __m128, k: __mmask8, @@ -40978,7 +40978,7 @@ pub fn _mm_mask_fixupimm_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm_maskz_fixupimm_round_ss( k: __mmask8, a: __m128, @@ -41006,7 +41006,7 @@ pub fn _mm_maskz_fixupimm_round_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + pub fn _mm_fixupimm_round_sd( a: __m128d, b: __m128d, @@ -41033,7 +41033,7 @@ pub fn _mm_fixupimm_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm_mask_fixupimm_round_sd( a: __m128d, k: __mmask8, @@ -41061,7 +41061,7 @@ pub fn _mm_mask_fixupimm_round_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + pub fn _mm_maskz_fixupimm_round_sd( k: __mmask8, a: __m128d, @@ -41165,7 +41165,7 @@ pub fn _mm_maskz_cvtsd_ss(k: __mmask8, a: __m128, b: __m128d) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_cvt_roundss_sd(a: __m128d, b: __m128) -> __m128d { unsafe { static_assert_sae!(SAE); @@ -41184,7 +41184,7 @@ pub fn _mm_cvt_roundss_sd(a: __m128d, b: __m128) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_cvt_roundss_sd( src: __m128d, k: __mmask8, @@ -41209,7 +41209,7 @@ pub fn _mm_mask_cvt_roundss_sd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_cvt_roundss_sd(k: __mmask8, a: __m128d, b: __m128) -> __m128d { unsafe { static_assert_sae!(SAE); @@ -41233,7 +41233,7 @@ pub fn _mm_maskz_cvt_roundss_sd(k: __mmask8, a: __m128d, b: __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_cvt_roundsd_ss(a: __m128, b: __m128d) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -41257,7 +41257,7 @@ pub fn _mm_cvt_roundsd_ss(a: __m128, b: __m128d) -> __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_cvt_roundsd_ss( src: __m128, k: __mmask8, @@ -41287,7 +41287,7 @@ pub fn _mm_mask_cvt_roundsd_ss( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_cvt_roundsd_ss(k: __mmask8, a: __m128, b: __m128d) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -41311,7 +41311,7 @@ pub fn _mm_maskz_cvt_roundsd_ss(k: __mmask8, a: __m128, b: #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvt_roundss_si32(a: __m128) -> i32 { unsafe { static_assert_rounding!(ROUNDING); @@ -41333,7 +41333,7 @@ pub fn _mm_cvt_roundss_si32(a: __m128) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvt_roundss_i32(a: __m128) -> i32 { unsafe { static_assert_rounding!(ROUNDING); @@ -41355,7 +41355,7 @@ pub fn _mm_cvt_roundss_i32(a: __m128) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2usi, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvt_roundss_u32(a: __m128) -> u32 { unsafe { static_assert_rounding!(ROUNDING); @@ -41399,7 +41399,7 @@ pub fn _mm_cvtss_u32(a: __m128) -> u32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvt_roundsd_si32(a: __m128d) -> i32 { unsafe { static_assert_rounding!(ROUNDING); @@ -41421,7 +41421,7 @@ pub fn _mm_cvt_roundsd_si32(a: __m128d) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvt_roundsd_i32(a: __m128d) -> i32 { unsafe { static_assert_rounding!(ROUNDING); @@ -41443,7 +41443,7 @@ pub fn _mm_cvt_roundsd_i32(a: __m128d) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2usi, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvt_roundsd_u32(a: __m128d) -> u32 { unsafe { static_assert_rounding!(ROUNDING); @@ -41488,7 +41488,7 @@ pub fn _mm_cvtsd_u32(a: __m128d) -> u32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_cvt_roundi32_ss(a: __m128, b: i32) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -41512,7 +41512,7 @@ pub fn _mm_cvt_roundi32_ss(a: __m128, b: i32) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_cvt_roundsi32_ss(a: __m128, b: i32) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -41535,7 +41535,7 @@ pub fn _mm_cvt_roundsi32_ss(a: __m128, b: i32) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtusi2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_cvt_roundu32_ss(a: __m128, b: u32) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -41581,7 +41581,7 @@ pub fn _mm_cvti32_sd(a: __m128d, b: i32) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttss2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvtt_roundss_si32(a: __m128) -> i32 { unsafe { static_assert_sae!(SAE); @@ -41598,7 +41598,7 @@ pub fn _mm_cvtt_roundss_si32(a: __m128) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttss2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvtt_roundss_i32(a: __m128) -> i32 { unsafe { static_assert_sae!(SAE); @@ -41615,7 +41615,7 @@ pub fn _mm_cvtt_roundss_i32(a: __m128) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttss2usi, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvtt_roundss_u32(a: __m128) -> u32 { unsafe { static_assert_sae!(SAE); @@ -41654,7 +41654,7 @@ pub fn _mm_cvttss_u32(a: __m128) -> u32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttsd2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvtt_roundsd_si32(a: __m128d) -> i32 { unsafe { static_assert_sae!(SAE); @@ -41671,7 +41671,7 @@ pub fn _mm_cvtt_roundsd_si32(a: __m128d) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttsd2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvtt_roundsd_i32(a: __m128d) -> i32 { unsafe { static_assert_sae!(SAE); @@ -41688,7 +41688,7 @@ pub fn _mm_cvtt_roundsd_i32(a: __m128d) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttsd2usi, SAE = 8))] -#[rustc_legacy_const_generics(1)] + pub fn _mm_cvtt_roundsd_u32(a: __m128d) -> u32 { unsafe { static_assert_sae!(SAE); @@ -41755,7 +41755,7 @@ pub fn _mm_cvtu32_sd(a: __m128d, b: u32) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 5, SAE = 4))] //should be vcomiss -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm_comi_round_ss(a: __m128, b: __m128) -> i32 { unsafe { static_assert_uimm_bits!(IMM5, 5); @@ -41774,7 +41774,7 @@ pub fn _mm_comi_round_ss(a: __m128, b: __m128) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 5, SAE = 4))] //should be vcomisd -#[rustc_legacy_const_generics(2, 3)] + pub fn _mm_comi_round_sd(a: __m128d, b: __m128d) -> i32 { unsafe { static_assert_uimm_bits!(IMM5, 5); diff --git a/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs b/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs index 8c914803c665d..22be00870d588 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs @@ -686,7 +686,7 @@ macro_rules! cmp_asm { // FIXME: use LLVM intrinsics /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ph_mask) #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cmp_ph_mask(a: __m128h, b: __m128h) -> __mmask8 { unsafe { @@ -702,7 +702,7 @@ pub fn _mm_cmp_ph_mask(a: __m128h, b: __m128h) -> __mmask8 { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ph_mask) #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_cmp_ph_mask(k1: __mmask8, a: __m128h, b: __m128h) -> __mmask8 { unsafe { @@ -717,7 +717,7 @@ pub fn _mm_mask_cmp_ph_mask(k1: __mmask8, a: __m128h, b: __m128 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ph_mask) #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_cmp_ph_mask(a: __m256h, b: __m256h) -> __mmask16 { unsafe { @@ -733,7 +733,7 @@ pub fn _mm256_cmp_ph_mask(a: __m256h, b: __m256h) -> __mmask16 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_ph_mask) #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_mask_cmp_ph_mask( k1: __mmask16, @@ -752,7 +752,7 @@ pub fn _mm256_mask_cmp_ph_mask( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_ph_mask) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cmp_ph_mask(a: __m512h, b: __m512h) -> __mmask32 { unsafe { @@ -768,7 +768,7 @@ pub fn _mm512_cmp_ph_mask(a: __m512h, b: __m512h) -> __mmask32 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_ph_mask) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cmp_ph_mask( k1: __mmask32, @@ -789,7 +789,7 @@ pub fn _mm512_mask_cmp_ph_mask( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(2, 3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cmp_round_ph_mask( a: __m512h, @@ -824,7 +824,7 @@ pub fn _mm512_cmp_round_ph_mask( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(3, 4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cmp_round_ph_mask( k1: __mmask32, @@ -859,7 +859,7 @@ pub fn _mm512_mask_cmp_round_ph_mask( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_round_sh_mask) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(2, 3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cmp_round_sh_mask(a: __m128h, b: __m128h) -> __mmask8 { static_assert_uimm_bits!(IMM5, 5); @@ -874,7 +874,7 @@ pub fn _mm_cmp_round_sh_mask(a: __m128h, b: __m /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_round_sh_mask) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(3, 4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_cmp_round_sh_mask( k1: __mmask8, @@ -894,7 +894,7 @@ pub fn _mm_mask_cmp_round_sh_mask( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_sh_mask) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cmp_sh_mask(a: __m128h, b: __m128h) -> __mmask8 { static_assert_uimm_bits!(IMM5, 5); @@ -907,7 +907,7 @@ pub fn _mm_cmp_sh_mask(a: __m128h, b: __m128h) -> __mmask8 { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_sh_mask) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_cmp_sh_mask(k1: __mmask8, a: __m128h, b: __m128h) -> __mmask8 { static_assert_uimm_bits!(IMM5, 5); @@ -921,7 +921,7 @@ pub fn _mm_mask_cmp_sh_mask(k1: __mmask8, a: __m128h, b: __m128 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comi_round_sh) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(2, 3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_comi_round_sh(a: __m128h, b: __m128h) -> i32 { unsafe { @@ -937,7 +937,7 @@ pub fn _mm_comi_round_sh(a: __m128h, b: __m128h /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comi_sh) #[inline] #[target_feature(enable = "avx512fp16")] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_comi_sh(a: __m128h, b: __m128h) -> i32 { static_assert_uimm_bits!(IMM5, 5); @@ -1469,7 +1469,7 @@ pub fn _mm512_maskz_add_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_add_round_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { @@ -1492,7 +1492,7 @@ pub fn _mm512_add_round_ph(a: __m512h, b: __m512h) -> __m51 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_add_round_ph( src: __m512h, @@ -1520,7 +1520,7 @@ pub fn _mm512_mask_add_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_add_round_ph( k: __mmask32, @@ -1548,7 +1548,7 @@ pub fn _mm512_maskz_add_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_add_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -1570,7 +1570,7 @@ pub fn _mm_add_round_sh(a: __m128h, b: __m128h) -> __m128h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_add_round_sh( src: __m128h, @@ -1599,7 +1599,7 @@ pub fn _mm_mask_add_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_add_round_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -1780,7 +1780,7 @@ pub fn _mm512_maskz_sub_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_sub_round_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { @@ -1803,7 +1803,7 @@ pub fn _mm512_sub_round_ph(a: __m512h, b: __m512h) -> __m51 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_sub_round_ph( src: __m512h, @@ -1832,7 +1832,7 @@ pub fn _mm512_mask_sub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_sub_round_ph( k: __mmask32, @@ -1860,7 +1860,7 @@ pub fn _mm512_maskz_sub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_sub_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -1882,7 +1882,7 @@ pub fn _mm_sub_round_sh(a: __m128h, b: __m128h) -> __m128h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_sub_round_sh( src: __m128h, @@ -1911,7 +1911,7 @@ pub fn _mm_mask_sub_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_sub_round_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -2092,7 +2092,7 @@ pub fn _mm512_maskz_mul_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mul_round_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { @@ -2115,7 +2115,7 @@ pub fn _mm512_mul_round_ph(a: __m512h, b: __m512h) -> __m51 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_mul_round_ph( src: __m512h, @@ -2144,7 +2144,7 @@ pub fn _mm512_mask_mul_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_mul_round_ph( k: __mmask32, @@ -2172,7 +2172,7 @@ pub fn _mm512_maskz_mul_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mul_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -2194,7 +2194,7 @@ pub fn _mm_mul_round_sh(a: __m128h, b: __m128h) -> __m128h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_mul_round_sh( src: __m128h, @@ -2223,7 +2223,7 @@ pub fn _mm_mask_mul_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_mul_round_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -2404,7 +2404,7 @@ pub fn _mm512_maskz_div_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_div_round_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { @@ -2427,7 +2427,7 @@ pub fn _mm512_div_round_ph(a: __m512h, b: __m512h) -> __m51 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_div_round_ph( src: __m512h, @@ -2456,7 +2456,7 @@ pub fn _mm512_mask_div_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_div_round_ph( k: __mmask32, @@ -2484,7 +2484,7 @@ pub fn _mm512_maskz_div_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_div_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -2506,7 +2506,7 @@ pub fn _mm_div_round_sh(a: __m128h, b: __m128h) -> __m128h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_div_round_sh( src: __m128h, @@ -2535,7 +2535,7 @@ pub fn _mm_mask_div_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_div_round_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -2713,7 +2713,7 @@ pub fn _mm512_maskz_mul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mul_round_pch(a: __m512h, b: __m512h) -> __m512h { static_assert_rounding!(ROUNDING); @@ -2736,7 +2736,7 @@ pub fn _mm512_mul_round_pch(a: __m512h, b: __m512h) -> __m5 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_mul_round_pch( src: __m512h, @@ -2772,7 +2772,7 @@ pub fn _mm512_mask_mul_round_pch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_mul_round_pch( k: __mmask16, @@ -2842,7 +2842,7 @@ pub fn _mm_maskz_mul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mul_round_sch(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -2866,7 +2866,7 @@ pub fn _mm_mul_round_sch(a: __m128h, b: __m128h) -> __m128h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_mul_round_sch( src: __m128h, @@ -2903,7 +2903,7 @@ pub fn _mm_mask_mul_round_sch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_mul_round_sch( k: __mmask8, @@ -3044,7 +3044,7 @@ pub fn _mm512_maskz_fmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fmul_round_pch(a: __m512h, b: __m512h) -> __m512h { static_assert_rounding!(ROUNDING); @@ -3066,7 +3066,7 @@ pub fn _mm512_fmul_round_pch(a: __m512h, b: __m512h) -> __m #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fmul_round_pch( src: __m512h, @@ -3093,7 +3093,7 @@ pub fn _mm512_mask_fmul_round_pch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fmul_round_pch( k: __mmask16, @@ -3158,7 +3158,7 @@ pub fn _mm_maskz_fmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fmul_round_sch(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -3181,7 +3181,7 @@ pub fn _mm_fmul_round_sch(a: __m128h, b: __m128h) -> __m128 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fmul_round_sch( src: __m128h, @@ -3209,7 +3209,7 @@ pub fn _mm_mask_fmul_round_sch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_fmul_round_sch( k: __mmask8, @@ -3363,7 +3363,7 @@ pub fn _mm512_maskz_cmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cmul_round_pch(a: __m512h, b: __m512h) -> __m512h { static_assert_rounding!(ROUNDING); @@ -3387,7 +3387,7 @@ pub fn _mm512_cmul_round_pch(a: __m512h, b: __m512h) -> __m #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cmul_round_pch( src: __m512h, @@ -3424,7 +3424,7 @@ pub fn _mm512_mask_cmul_round_pch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cmul_round_pch( k: __mmask16, @@ -3492,7 +3492,7 @@ pub fn _mm_maskz_cmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cmul_round_sch(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -3516,7 +3516,7 @@ pub fn _mm_cmul_round_sch(a: __m128h, b: __m128h) -> __m128 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_cmul_round_sch( src: __m128h, @@ -3553,7 +3553,7 @@ pub fn _mm_mask_cmul_round_sch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_cmul_round_sch( k: __mmask8, @@ -3706,7 +3706,7 @@ pub fn _mm512_maskz_fcmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fcmul_round_pch(a: __m512h, b: __m512h) -> __m512h { static_assert_rounding!(ROUNDING); @@ -3730,7 +3730,7 @@ pub fn _mm512_fcmul_round_pch(a: __m512h, b: __m512h) -> __ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fcmul_round_pch( src: __m512h, @@ -3759,7 +3759,7 @@ pub fn _mm512_mask_fcmul_round_pch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fcmul_round_pch( k: __mmask16, @@ -3828,7 +3828,7 @@ pub fn _mm_maskz_fcmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fcmul_round_sch(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -3852,7 +3852,7 @@ pub fn _mm_fcmul_round_sch(a: __m128h, b: __m128h) -> __m12 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fcmul_round_sch( src: __m128h, @@ -3881,7 +3881,7 @@ pub fn _mm_mask_fcmul_round_sch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_fcmul_round_sch( k: __mmask8, @@ -4264,7 +4264,7 @@ pub fn _mm512_maskz_fmadd_pch(k: __mmask16, a: __m512h, b: __m512h, c: __m512h) #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fmadd_round_pch(a: __m512h, b: __m512h, c: __m512h) -> __m512h { static_assert_rounding!(ROUNDING); @@ -4288,7 +4288,7 @@ pub fn _mm512_fmadd_round_pch(a: __m512h, b: __m512h, c: __ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fmadd_round_pch( a: __m512h, @@ -4320,7 +4320,7 @@ pub fn _mm512_mask_fmadd_round_pch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask3_fmadd_round_pch( a: __m512h, @@ -4357,7 +4357,7 @@ pub fn _mm512_mask3_fmadd_round_pch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fmadd_round_pch( k: __mmask16, @@ -4452,7 +4452,7 @@ pub fn _mm_maskz_fmadd_sch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> _ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fmadd_round_sch(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { @@ -4485,7 +4485,7 @@ pub fn _mm_fmadd_round_sch(a: __m128h, b: __m128h, c: __m12 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fmadd_round_sch( a: __m128h, @@ -4519,7 +4519,7 @@ pub fn _mm_mask_fmadd_round_sch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask3_fmadd_round_sch( a: __m128h, @@ -4553,7 +4553,7 @@ pub fn _mm_mask3_fmadd_round_sch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_fmadd_round_sch( k: __mmask8, @@ -4801,7 +4801,7 @@ pub fn _mm512_maskz_fcmadd_pch(k: __mmask16, a: __m512h, b: __m512h, c: __m512h) #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fcmadd_round_pch(a: __m512h, b: __m512h, c: __m512h) -> __m512h { static_assert_rounding!(ROUNDING); @@ -4826,7 +4826,7 @@ pub fn _mm512_fcmadd_round_pch(a: __m512h, b: __m512h, c: _ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fcmadd_round_pch( a: __m512h, @@ -4859,7 +4859,7 @@ pub fn _mm512_mask_fcmadd_round_pch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask3_fcmadd_round_pch( a: __m512h, @@ -4897,7 +4897,7 @@ pub fn _mm512_mask3_fcmadd_round_pch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fcmadd_round_pch( k: __mmask16, @@ -4998,7 +4998,7 @@ pub fn _mm_maskz_fcmadd_sch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fcmadd_round_sch(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { @@ -5032,7 +5032,7 @@ pub fn _mm_fcmadd_round_sch(a: __m128h, b: __m128h, c: __m1 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fcmadd_round_sch( a: __m128h, @@ -5067,7 +5067,7 @@ pub fn _mm_mask_fcmadd_round_sch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask3_fcmadd_round_sch( a: __m128h, @@ -5102,7 +5102,7 @@ pub fn _mm_mask3_fcmadd_round_sch( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_fcmadd_round_sch( k: __mmask8, @@ -5290,7 +5290,7 @@ pub fn _mm512_maskz_fmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) - #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fmadd_round_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { @@ -5315,7 +5315,7 @@ pub fn _mm512_fmadd_round_ph(a: __m512h, b: __m512h, c: __m #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fmadd_round_ph( a: __m512h, @@ -5345,7 +5345,7 @@ pub fn _mm512_mask_fmadd_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask3_fmadd_round_ph( a: __m512h, @@ -5375,7 +5375,7 @@ pub fn _mm512_mask3_fmadd_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fmadd_round_ph( k: __mmask32, @@ -5495,7 +5495,7 @@ pub fn _mm_maskz_fmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fmadd_round_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { @@ -5525,7 +5525,7 @@ pub fn _mm_fmadd_round_sh(a: __m128h, b: __m128h, c: __m128 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fmadd_round_sh( a: __m128h, @@ -5562,7 +5562,7 @@ pub fn _mm_mask_fmadd_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask3_fmadd_round_sh( a: __m128h, @@ -5599,7 +5599,7 @@ pub fn _mm_mask3_fmadd_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_fmadd_round_sh( k: __mmask8, @@ -5789,7 +5789,7 @@ pub fn _mm512_maskz_fmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) - #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fmsub_round_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { @@ -5814,7 +5814,7 @@ pub fn _mm512_fmsub_round_ph(a: __m512h, b: __m512h, c: __m #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fmsub_round_ph( a: __m512h, @@ -5844,7 +5844,7 @@ pub fn _mm512_mask_fmsub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask3_fmsub_round_ph( a: __m512h, @@ -5874,7 +5874,7 @@ pub fn _mm512_mask3_fmsub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fmsub_round_ph( k: __mmask32, @@ -5994,7 +5994,7 @@ pub fn _mm_maskz_fmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fmsub_round_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { @@ -6024,7 +6024,7 @@ pub fn _mm_fmsub_round_sh(a: __m128h, b: __m128h, c: __m128 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fmsub_round_sh( a: __m128h, @@ -6061,7 +6061,7 @@ pub fn _mm_mask_fmsub_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask3_fmsub_round_sh( a: __m128h, @@ -6090,7 +6090,7 @@ pub fn _mm_mask3_fmsub_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_fmsub_round_sh( k: __mmask8, @@ -6279,7 +6279,7 @@ pub fn _mm512_maskz_fnmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fnmadd_round_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { @@ -6304,7 +6304,7 @@ pub fn _mm512_fnmadd_round_ph(a: __m512h, b: __m512h, c: __ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fnmadd_round_ph( a: __m512h, @@ -6334,7 +6334,7 @@ pub fn _mm512_mask_fnmadd_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask3_fnmadd_round_ph( a: __m512h, @@ -6364,7 +6364,7 @@ pub fn _mm512_mask3_fnmadd_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fnmadd_round_ph( k: __mmask32, @@ -6484,7 +6484,7 @@ pub fn _mm_maskz_fnmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> _ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fnmadd_round_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { @@ -6514,7 +6514,7 @@ pub fn _mm_fnmadd_round_sh(a: __m128h, b: __m128h, c: __m12 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fnmadd_round_sh( a: __m128h, @@ -6551,7 +6551,7 @@ pub fn _mm_mask_fnmadd_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask3_fnmadd_round_sh( a: __m128h, @@ -6588,7 +6588,7 @@ pub fn _mm_mask3_fnmadd_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_fnmadd_round_sh( k: __mmask8, @@ -6777,7 +6777,7 @@ pub fn _mm512_maskz_fnmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fnmsub_round_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { @@ -6802,7 +6802,7 @@ pub fn _mm512_fnmsub_round_ph(a: __m512h, b: __m512h, c: __ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fnmsub_round_ph( a: __m512h, @@ -6832,7 +6832,7 @@ pub fn _mm512_mask_fnmsub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask3_fnmsub_round_ph( a: __m512h, @@ -6862,7 +6862,7 @@ pub fn _mm512_mask3_fnmsub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fnmsub_round_ph( k: __mmask32, @@ -6982,7 +6982,7 @@ pub fn _mm_maskz_fnmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> _ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fnmsub_round_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { @@ -7012,7 +7012,7 @@ pub fn _mm_fnmsub_round_sh(a: __m128h, b: __m128h, c: __m12 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fnmsub_round_sh( a: __m128h, @@ -7049,7 +7049,7 @@ pub fn _mm_mask_fnmsub_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask3_fnmsub_round_sh( a: __m128h, @@ -7086,7 +7086,7 @@ pub fn _mm_mask3_fnmsub_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_fnmsub_round_sh( k: __mmask8, @@ -7275,7 +7275,7 @@ pub fn _mm512_maskz_fmaddsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fmaddsub_round_ph( a: __m512h, @@ -7304,7 +7304,7 @@ pub fn _mm512_fmaddsub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fmaddsub_round_ph( a: __m512h, @@ -7334,7 +7334,7 @@ pub fn _mm512_mask_fmaddsub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask3_fmaddsub_round_ph( a: __m512h, @@ -7364,7 +7364,7 @@ pub fn _mm512_mask3_fmaddsub_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fmaddsub_round_ph( k: __mmask32, @@ -7550,7 +7550,7 @@ pub fn _mm512_maskz_fmsubadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fmsubadd_round_ph( a: __m512h, @@ -7579,7 +7579,7 @@ pub fn _mm512_fmsubadd_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fmsubadd_round_ph( a: __m512h, @@ -7609,7 +7609,7 @@ pub fn _mm512_mask_fmsubadd_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask3_fmsubadd_round_ph( a: __m512h, @@ -7639,7 +7639,7 @@ pub fn _mm512_mask3_fmsubadd_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_fmsubadd_round_ph( k: __mmask32, @@ -8100,7 +8100,7 @@ pub fn _mm512_maskz_sqrt_ph(k: __mmask32, a: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_sqrt_round_ph(a: __m512h) -> __m512h { unsafe { @@ -8123,7 +8123,7 @@ pub fn _mm512_sqrt_round_ph(a: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_sqrt_round_ph( src: __m512h, @@ -8150,7 +8150,7 @@ pub fn _mm512_mask_sqrt_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_sqrt_round_ph(k: __mmask32, a: __m512h) -> __m512h { unsafe { @@ -8213,7 +8213,7 @@ pub fn _mm_maskz_sqrt_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_sqrt_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -8235,7 +8235,7 @@ pub fn _mm_sqrt_round_sh(a: __m128h, b: __m128h) -> __m128h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_sqrt_round_sh( src: __m128h, @@ -8264,7 +8264,7 @@ pub fn _mm_mask_sqrt_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_sqrt_round_sh( k: __mmask8, @@ -8407,7 +8407,7 @@ pub fn _mm512_maskz_max_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmaxph, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_max_round_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { @@ -8425,7 +8425,7 @@ pub fn _mm512_max_round_ph(a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmaxph, SAE = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_max_round_ph( src: __m512h, @@ -8448,7 +8448,7 @@ pub fn _mm512_mask_max_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmaxph, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_max_round_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { @@ -8508,7 +8508,7 @@ pub fn _mm_maskz_max_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_max_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_sae!(SAE); @@ -8525,7 +8525,7 @@ pub fn _mm_max_round_sh(a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_max_round_sh( src: __m128h, @@ -8549,7 +8549,7 @@ pub fn _mm_mask_max_round_sh( #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_max_round_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_sae!(SAE); @@ -8687,7 +8687,7 @@ pub fn _mm512_maskz_min_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vminph, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_min_round_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { @@ -8705,7 +8705,7 @@ pub fn _mm512_min_round_ph(a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vminph, SAE = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_min_round_ph( src: __m512h, @@ -8728,7 +8728,7 @@ pub fn _mm512_mask_min_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vminph, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_min_round_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { @@ -8788,7 +8788,7 @@ pub fn _mm_maskz_min_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vminsh, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_min_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_sae!(SAE); @@ -8805,7 +8805,7 @@ pub fn _mm_min_round_sh(a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vminsh, SAE = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_min_round_sh( src: __m128h, @@ -8829,7 +8829,7 @@ pub fn _mm_mask_min_round_sh( #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vminsh, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_min_round_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_sae!(SAE); @@ -8968,7 +8968,7 @@ pub fn _mm512_maskz_getexp_ph(k: __mmask32, a: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_getexp_round_ph(a: __m512h) -> __m512h { static_assert_sae!(SAE); @@ -8984,7 +8984,7 @@ pub fn _mm512_getexp_round_ph(a: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_getexp_round_ph( src: __m512h, @@ -9006,7 +9006,7 @@ pub fn _mm512_mask_getexp_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_getexp_round_ph(k: __mmask32, a: __m512h) -> __m512h { static_assert_sae!(SAE); @@ -9067,7 +9067,7 @@ pub fn _mm_maskz_getexp_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_getexp_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_sae!(SAE); @@ -9084,7 +9084,7 @@ pub fn _mm_getexp_round_sh(a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_getexp_round_sh( src: __m128h, @@ -9108,7 +9108,7 @@ pub fn _mm_mask_getexp_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_getexp_round_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_sae!(SAE); @@ -9136,7 +9136,7 @@ pub fn _mm_maskz_getexp_round_sh(k: __mmask8, a: __m128h, b: __m #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] -#[rustc_legacy_const_generics(1, 2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_getmant_ph( a: __m128h, @@ -9168,7 +9168,7 @@ pub fn _mm_getmant_ph( a: __m256h, @@ -9274,7 +9274,7 @@ pub fn _mm256_getmant_ph( a: __m512h, @@ -9380,7 +9380,7 @@ pub fn _mm512_getmant_ph( a: __m128h, @@ -9611,7 +9611,7 @@ pub fn _mm_getmant_sh(a: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -9831,7 +9831,7 @@ pub fn _mm_roundscale_ph(a: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_roundscale_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { unsafe { @@ -9856,7 +9856,7 @@ pub fn _mm_mask_roundscale_ph(src: __m128h, k: __mmask8, a: __m #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_roundscale_ph(k: __mmask8, a: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -9878,7 +9878,7 @@ pub fn _mm_maskz_roundscale_ph(k: __mmask8, a: __m128h) -> __m1 #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_roundscale_ph(a: __m256h) -> __m256h { static_assert_uimm_bits!(IMM8, 8); @@ -9901,7 +9901,7 @@ pub fn _mm256_roundscale_ph(a: __m256h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_mask_roundscale_ph( src: __m256h, @@ -9930,7 +9930,7 @@ pub fn _mm256_mask_roundscale_ph( #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_maskz_roundscale_ph(k: __mmask16, a: __m256h) -> __m256h { static_assert_uimm_bits!(IMM8, 8); @@ -9952,7 +9952,7 @@ pub fn _mm256_maskz_roundscale_ph(k: __mmask16, a: __m256h) -> #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_roundscale_ph(a: __m512h) -> __m512h { static_assert_uimm_bits!(IMM8, 8); @@ -9975,7 +9975,7 @@ pub fn _mm512_roundscale_ph(a: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_roundscale_ph( src: __m512h, @@ -10002,7 +10002,7 @@ pub fn _mm512_mask_roundscale_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_roundscale_ph(k: __mmask32, a: __m512h) -> __m512h { static_assert_uimm_bits!(IMM8, 8); @@ -10025,7 +10025,7 @@ pub fn _mm512_maskz_roundscale_ph(k: __mmask32, a: __m512h) -> #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(1, 2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_roundscale_round_ph(a: __m512h) -> __m512h { static_assert_uimm_bits!(IMM8, 8); @@ -10050,7 +10050,7 @@ pub fn _mm512_roundscale_round_ph(a: __m512h) - #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_roundscale_round_ph( src: __m512h, @@ -10080,7 +10080,7 @@ pub fn _mm512_mask_roundscale_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_roundscale_round_ph( k: __mmask32, @@ -10107,7 +10107,7 @@ pub fn _mm512_maskz_roundscale_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_roundscale_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -10130,7 +10130,7 @@ pub fn _mm_roundscale_sh(a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_roundscale_sh( src: __m128h, @@ -10158,7 +10158,7 @@ pub fn _mm_mask_roundscale_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_roundscale_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -10183,7 +10183,7 @@ pub fn _mm_maskz_roundscale_sh(k: __mmask8, a: __m128h, b: __m1 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_roundscale_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -10209,7 +10209,7 @@ pub fn _mm_roundscale_round_sh(a: __m128h, b: _ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_roundscale_round_sh( src: __m128h, @@ -10242,7 +10242,7 @@ pub fn _mm_mask_roundscale_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_roundscale_round_sh( k: __mmask8, @@ -10377,7 +10377,7 @@ pub fn _mm512_maskz_scalef_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_scalef_round_ph(a: __m512h, b: __m512h) -> __m512h { static_assert_rounding!(ROUNDING); @@ -10399,7 +10399,7 @@ pub fn _mm512_scalef_round_ph(a: __m512h, b: __m512h) -> __ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_scalef_round_ph( src: __m512h, @@ -10428,7 +10428,7 @@ pub fn _mm512_mask_scalef_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_scalef_round_ph( k: __mmask32, @@ -10494,7 +10494,7 @@ pub fn _mm_maskz_scalef_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_scalef_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_rounding!(ROUNDING); @@ -10517,7 +10517,7 @@ pub fn _mm_scalef_round_sh(a: __m128h, b: __m128h) -> __m12 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_scalef_round_sh( src: __m128h, @@ -10547,7 +10547,7 @@ pub fn _mm_mask_scalef_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_scalef_round_sh( k: __mmask8, @@ -10573,7 +10573,7 @@ pub fn _mm_maskz_scalef_round_sh( #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_reduce_ph(a: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -10596,7 +10596,7 @@ pub fn _mm_reduce_ph(a: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_reduce_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { unsafe { @@ -10621,7 +10621,7 @@ pub fn _mm_mask_reduce_ph(src: __m128h, k: __mmask8, a: __m128h #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_reduce_ph(k: __mmask8, a: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -10643,7 +10643,7 @@ pub fn _mm_maskz_reduce_ph(k: __mmask8, a: __m128h) -> __m128h #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_reduce_ph(a: __m256h) -> __m256h { static_assert_uimm_bits!(IMM8, 8); @@ -10666,7 +10666,7 @@ pub fn _mm256_reduce_ph(a: __m256h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_mask_reduce_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h { unsafe { @@ -10691,7 +10691,7 @@ pub fn _mm256_mask_reduce_ph(src: __m256h, k: __mmask16, a: __m #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_maskz_reduce_ph(k: __mmask16, a: __m256h) -> __m256h { static_assert_uimm_bits!(IMM8, 8); @@ -10713,7 +10713,7 @@ pub fn _mm256_maskz_reduce_ph(k: __mmask16, a: __m256h) -> __m2 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_reduce_ph(a: __m512h) -> __m512h { static_assert_uimm_bits!(IMM8, 8); @@ -10736,7 +10736,7 @@ pub fn _mm512_reduce_ph(a: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_reduce_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h { static_assert_uimm_bits!(IMM8, 8); @@ -10759,7 +10759,7 @@ pub fn _mm512_mask_reduce_ph(src: __m512h, k: __mmask32, a: __m #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_reduce_ph(k: __mmask32, a: __m512h) -> __m512h { static_assert_uimm_bits!(IMM8, 8); @@ -10783,7 +10783,7 @@ pub fn _mm512_maskz_reduce_ph(k: __mmask32, a: __m512h) -> __m5 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(1, 2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_reduce_round_ph(a: __m512h) -> __m512h { static_assert_uimm_bits!(IMM8, 8); @@ -10809,7 +10809,7 @@ pub fn _mm512_reduce_round_ph(a: __m512h) -> __ #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_reduce_round_ph( src: __m512h, @@ -10841,7 +10841,7 @@ pub fn _mm512_mask_reduce_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_reduce_round_ph( k: __mmask32, @@ -10868,7 +10868,7 @@ pub fn _mm512_maskz_reduce_round_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_reduce_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -10892,7 +10892,7 @@ pub fn _mm_reduce_sh(a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_reduce_sh( src: __m128h, @@ -10921,7 +10921,7 @@ pub fn _mm_mask_reduce_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_reduce_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -10946,7 +10946,7 @@ pub fn _mm_maskz_reduce_sh(k: __mmask8, a: __m128h, b: __m128h) #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(2, 3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_reduce_round_sh(a: __m128h, b: __m128h) -> __m128h { static_assert_uimm_bits!(IMM8, 8); @@ -10973,7 +10973,7 @@ pub fn _mm_reduce_round_sh(a: __m128h, b: __m12 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(4, 5)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_reduce_round_sh( src: __m128h, @@ -11007,7 +11007,7 @@ pub fn _mm_mask_reduce_round_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))] -#[rustc_legacy_const_generics(3, 4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_reduce_round_sh( k: __mmask8, @@ -11276,7 +11276,7 @@ macro_rules! fpclass_asm { // FIXME: use LLVM intrinsics #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fpclass_ph_mask(a: __m128h) -> __mmask8 { unsafe { @@ -11303,7 +11303,7 @@ pub fn _mm_fpclass_ph_mask(a: __m128h) -> __mmask8 { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fpclass_ph_mask(k1: __mmask8, a: __m128h) -> __mmask8 { unsafe { @@ -11329,7 +11329,7 @@ pub fn _mm_mask_fpclass_ph_mask(k1: __mmask8, a: __m128h) -> __ #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_fpclass_ph_mask(a: __m256h) -> __mmask16 { unsafe { @@ -11356,7 +11356,7 @@ pub fn _mm256_fpclass_ph_mask(a: __m256h) -> __mmask16 { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm256_mask_fpclass_ph_mask(k1: __mmask16, a: __m256h) -> __mmask16 { unsafe { @@ -11382,7 +11382,7 @@ pub fn _mm256_mask_fpclass_ph_mask(k1: __mmask16, a: __m256h) - #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_fpclass_ph_mask(a: __m512h) -> __mmask32 { unsafe { @@ -11409,7 +11409,7 @@ pub fn _mm512_fpclass_ph_mask(a: __m512h) -> __mmask32 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_fpclass_ph_mask(k1: __mmask32, a: __m512h) -> __mmask32 { unsafe { @@ -11435,7 +11435,7 @@ pub fn _mm512_mask_fpclass_ph_mask(k1: __mmask32, a: __m512h) - #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfpclasssh, IMM8 = 0))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_fpclass_sh_mask(a: __m128h) -> __mmask8 { _mm_mask_fpclass_sh_mask::(0xff, a) @@ -11459,7 +11459,7 @@ pub fn _mm_fpclass_sh_mask(a: __m128h) -> __mmask8 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfpclasssh, IMM8 = 0))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_fpclass_sh_mask(k1: __mmask8, a: __m128h) -> __mmask8 { unsafe { @@ -11705,7 +11705,7 @@ pub fn _mm512_maskz_cvtepi16_ph(k: __mmask32, a: __m512i) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundepi16_ph(a: __m512i) -> __m512h { unsafe { @@ -11730,7 +11730,7 @@ pub fn _mm512_cvt_roundepi16_ph(a: __m512i) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundepi16_ph( src: __m512h, @@ -11758,7 +11758,7 @@ pub fn _mm512_mask_cvt_roundepi16_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundepi16_ph(k: __mmask32, a: __m512i) -> __m512h { static_assert_rounding!(ROUNDING); @@ -11891,7 +11891,7 @@ pub fn _mm512_maskz_cvtepu16_ph(k: __mmask32, a: __m512i) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundepu16_ph(a: __m512i) -> __m512h { unsafe { @@ -11916,7 +11916,7 @@ pub fn _mm512_cvt_roundepu16_ph(a: __m512i) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundepu16_ph( src: __m512h, @@ -11944,7 +11944,7 @@ pub fn _mm512_mask_cvt_roundepu16_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundepu16_ph(k: __mmask32, a: __m512i) -> __m512h { static_assert_rounding!(ROUNDING); @@ -12078,7 +12078,7 @@ pub fn _mm512_maskz_cvtepi32_ph(k: __mmask16, a: __m512i) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundepi32_ph(a: __m512i) -> __m256h { unsafe { @@ -12103,7 +12103,7 @@ pub fn _mm512_cvt_roundepi32_ph(a: __m512i) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundepi32_ph( src: __m256h, @@ -12131,7 +12131,7 @@ pub fn _mm512_mask_cvt_roundepi32_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundepi32_ph(k: __mmask16, a: __m512i) -> __m256h { static_assert_rounding!(ROUNDING); @@ -12167,7 +12167,7 @@ pub fn _mm_cvti32_sh(a: __m128h, b: i32) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsi2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundi32_sh(a: __m128h, b: i32) -> __m128h { unsafe { @@ -12303,7 +12303,7 @@ pub fn _mm512_maskz_cvtepu32_ph(k: __mmask16, a: __m512i) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundepu32_ph(a: __m512i) -> __m256h { unsafe { @@ -12328,7 +12328,7 @@ pub fn _mm512_cvt_roundepu32_ph(a: __m512i) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundepu32_ph( src: __m256h, @@ -12356,7 +12356,7 @@ pub fn _mm512_mask_cvt_roundepu32_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundepu32_ph(k: __mmask16, a: __m512i) -> __m256h { static_assert_rounding!(ROUNDING); @@ -12392,7 +12392,7 @@ pub fn _mm_cvtu32_sh(a: __m128h, b: u32) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtusi2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundu32_sh(a: __m128h, b: u32) -> __m128h { unsafe { @@ -12529,7 +12529,7 @@ pub fn _mm512_maskz_cvtepi64_ph(k: __mmask8, a: __m512i) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundepi64_ph(a: __m512i) -> __m128h { unsafe { @@ -12554,7 +12554,7 @@ pub fn _mm512_cvt_roundepi64_ph(a: __m512i) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundepi64_ph( src: __m128h, @@ -12582,7 +12582,7 @@ pub fn _mm512_mask_cvt_roundepi64_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundepi64_ph(k: __mmask8, a: __m512i) -> __m128h { static_assert_rounding!(ROUNDING); @@ -12717,7 +12717,7 @@ pub fn _mm512_maskz_cvtepu64_ph(k: __mmask8, a: __m512i) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundepu64_ph(a: __m512i) -> __m128h { unsafe { @@ -12742,7 +12742,7 @@ pub fn _mm512_cvt_roundepu64_ph(a: __m512i) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundepu64_ph( src: __m128h, @@ -12770,7 +12770,7 @@ pub fn _mm512_mask_cvt_roundepu64_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundepu64_ph(k: __mmask8, a: __m512i) -> __m128h { static_assert_rounding!(ROUNDING); @@ -12906,7 +12906,7 @@ pub fn _mm512_maskz_cvtxps_ph(k: __mmask16, a: __m512) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvtx_roundps_ph(a: __m512) -> __m256h { static_assert_rounding!(ROUNDING); @@ -12929,7 +12929,7 @@ pub fn _mm512_cvtx_roundps_ph(a: __m512) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvtx_roundps_ph( src: __m256h, @@ -12958,7 +12958,7 @@ pub fn _mm512_mask_cvtx_roundps_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvtx_roundps_ph(k: __mmask16, a: __m512) -> __m256h { static_assert_rounding!(ROUNDING); @@ -13022,7 +13022,7 @@ pub fn _mm_maskz_cvtss_sh(k: __mmask8, a: __m128h, b: __m128) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundss_sh(a: __m128h, b: __m128) -> __m128h { static_assert_rounding!(ROUNDING); @@ -13046,7 +13046,7 @@ pub fn _mm_cvt_roundss_sh(a: __m128h, b: __m128) -> __m128h #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_cvt_roundss_sh( src: __m128h, @@ -13077,7 +13077,7 @@ pub fn _mm_mask_cvt_roundss_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_cvt_roundss_sh( k: __mmask8, @@ -13217,7 +13217,7 @@ pub fn _mm512_maskz_cvtpd_ph(k: __mmask8, a: __m512d) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundpd_ph(a: __m512d) -> __m128h { static_assert_rounding!(ROUNDING); @@ -13240,7 +13240,7 @@ pub fn _mm512_cvt_roundpd_ph(a: __m512d) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundpd_ph( src: __m128h, @@ -13269,7 +13269,7 @@ pub fn _mm512_mask_cvt_roundpd_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundpd_ph(k: __mmask8, a: __m512d) -> __m128h { static_assert_rounding!(ROUNDING); @@ -13333,7 +13333,7 @@ pub fn _mm_maskz_cvtsd_sh(k: __mmask8, a: __m128h, b: __m128d) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundsd_sh(a: __m128h, b: __m128d) -> __m128h { static_assert_rounding!(ROUNDING); @@ -13357,7 +13357,7 @@ pub fn _mm_cvt_roundsd_sh(a: __m128h, b: __m128d) -> __m128 #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_cvt_roundsd_sh( src: __m128h, @@ -13388,7 +13388,7 @@ pub fn _mm_mask_cvt_roundsd_sh( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_cvt_roundsd_sh( k: __mmask8, @@ -13532,7 +13532,7 @@ pub fn _mm512_maskz_cvtph_epi16(k: __mmask32, a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundph_epi16(a: __m512h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -13555,7 +13555,7 @@ pub fn _mm512_cvt_roundph_epi16(a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundph_epi16( src: __m512i, @@ -13583,7 +13583,7 @@ pub fn _mm512_mask_cvt_roundph_epi16( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundph_epi16(k: __mmask32, a: __m512h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -13717,7 +13717,7 @@ pub fn _mm512_maskz_cvtph_epu16(k: __mmask32, a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2uw, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundph_epu16(a: __m512h) -> __m512i { static_assert_sae!(SAE); @@ -13734,7 +13734,7 @@ pub fn _mm512_cvt_roundph_epu16(a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2uw, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundph_epu16( src: __m512i, @@ -13756,7 +13756,7 @@ pub fn _mm512_mask_cvt_roundph_epu16( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2uw, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundph_epu16(k: __mmask32, a: __m512h) -> __m512i { static_assert_sae!(SAE); @@ -13893,7 +13893,7 @@ pub fn _mm512_maskz_cvttph_epi16(k: __mmask32, a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvtt_roundph_epi16(a: __m512h) -> __m512i { static_assert_sae!(SAE); @@ -13910,7 +13910,7 @@ pub fn _mm512_cvtt_roundph_epi16(a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvtt_roundph_epi16( src: __m512i, @@ -13933,7 +13933,7 @@ pub fn _mm512_mask_cvtt_roundph_epi16( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvtt_roundph_epi16(k: __mmask32, a: __m512h) -> __m512i { static_assert_sae!(SAE); @@ -14070,7 +14070,7 @@ pub fn _mm512_maskz_cvttph_epu16(k: __mmask32, a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvtt_roundph_epu16(a: __m512h) -> __m512i { static_assert_sae!(SAE); @@ -14087,7 +14087,7 @@ pub fn _mm512_cvtt_roundph_epu16(a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvtt_roundph_epu16( src: __m512i, @@ -14110,7 +14110,7 @@ pub fn _mm512_mask_cvtt_roundph_epu16( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvtt_roundph_epu16(k: __mmask32, a: __m512h) -> __m512i { static_assert_sae!(SAE); @@ -14247,7 +14247,7 @@ pub fn _mm512_maskz_cvtph_epi32(k: __mmask16, a: __m256h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundph_epi32(a: __m256h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -14269,7 +14269,7 @@ pub fn _mm512_cvt_roundph_epi32(a: __m256h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundph_epi32( src: __m512i, @@ -14297,7 +14297,7 @@ pub fn _mm512_mask_cvt_roundph_epi32( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundph_epi32(k: __mmask16, a: __m256h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -14331,7 +14331,7 @@ pub fn _mm_cvtsh_i32(a: __m128h) -> i32 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundsh_i32(a: __m128h) -> i32 { unsafe { @@ -14470,7 +14470,7 @@ pub fn _mm512_maskz_cvtph_epu32(k: __mmask16, a: __m256h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundph_epu32(a: __m256h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -14492,7 +14492,7 @@ pub fn _mm512_cvt_roundph_epu32(a: __m256h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundph_epu32( src: __m512i, @@ -14520,7 +14520,7 @@ pub fn _mm512_mask_cvt_roundph_epu32( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundph_epu32(k: __mmask16, a: __m256h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -14548,7 +14548,7 @@ pub fn _mm_cvtsh_u32(a: __m128h) -> u32 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2usi, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundsh_u32(a: __m128h) -> u32 { unsafe { @@ -14681,7 +14681,7 @@ pub fn _mm512_maskz_cvttph_epi32(k: __mmask16, a: __m256h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvtt_roundph_epi32(a: __m256h) -> __m512i { static_assert_sae!(SAE); @@ -14697,7 +14697,7 @@ pub fn _mm512_cvtt_roundph_epi32(a: __m256h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvtt_roundph_epi32( src: __m512i, @@ -14719,7 +14719,7 @@ pub fn _mm512_mask_cvtt_roundph_epi32( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvtt_roundph_epi32(k: __mmask16, a: __m256h) -> __m512i { static_assert_sae!(SAE); @@ -14747,7 +14747,7 @@ pub fn _mm_cvttsh_i32(a: __m128h) -> i32 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttsh2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvtt_roundsh_i32(a: __m128h) -> i32 { unsafe { @@ -14880,7 +14880,7 @@ pub fn _mm512_maskz_cvttph_epu32(k: __mmask16, a: __m256h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvtt_roundph_epu32(a: __m256h) -> __m512i { static_assert_sae!(SAE); @@ -14896,7 +14896,7 @@ pub fn _mm512_cvtt_roundph_epu32(a: __m256h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvtt_roundph_epu32( src: __m512i, @@ -14918,7 +14918,7 @@ pub fn _mm512_mask_cvtt_roundph_epu32( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvtt_roundph_epu32(k: __mmask16, a: __m256h) -> __m512i { static_assert_sae!(SAE); @@ -14946,7 +14946,7 @@ pub fn _mm_cvttsh_u32(a: __m128h) -> u32 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttsh2usi, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvtt_roundsh_u32(a: __m128h) -> u32 { unsafe { @@ -15085,7 +15085,7 @@ pub fn _mm512_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundph_epi64(a: __m128h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -15107,7 +15107,7 @@ pub fn _mm512_cvt_roundph_epi64(a: __m128h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundph_epi64( src: __m512i, @@ -15135,7 +15135,7 @@ pub fn _mm512_mask_cvt_roundph_epi64( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundph_epi64(k: __mmask8, a: __m128h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -15272,7 +15272,7 @@ pub fn _mm512_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundph_epu64(a: __m128h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -15294,7 +15294,7 @@ pub fn _mm512_cvt_roundph_epu64(a: __m128h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundph_epu64( src: __m512i, @@ -15322,7 +15322,7 @@ pub fn _mm512_mask_cvt_roundph_epu64( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundph_epu64(k: __mmask8, a: __m128h) -> __m512i { static_assert_rounding!(ROUNDING); @@ -15453,7 +15453,7 @@ pub fn _mm512_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvtt_roundph_epi64(a: __m128h) -> __m512i { static_assert_sae!(SAE); @@ -15469,7 +15469,7 @@ pub fn _mm512_cvtt_roundph_epi64(a: __m128h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvtt_roundph_epi64( src: __m512i, @@ -15491,7 +15491,7 @@ pub fn _mm512_mask_cvtt_roundph_epi64( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvtt_roundph_epi64(k: __mmask8, a: __m128h) -> __m512i { static_assert_sae!(SAE); @@ -15622,7 +15622,7 @@ pub fn _mm512_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvtt_roundph_epu64(a: __m128h) -> __m512i { static_assert_sae!(SAE); @@ -15638,7 +15638,7 @@ pub fn _mm512_cvtt_roundph_epu64(a: __m128h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvtt_roundph_epu64( src: __m512i, @@ -15660,7 +15660,7 @@ pub fn _mm512_mask_cvtt_roundph_epu64( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvtt_roundph_epu64(k: __mmask8, a: __m128h) -> __m512i { static_assert_sae!(SAE); @@ -15790,7 +15790,7 @@ pub fn _mm512_maskz_cvtxph_ps(k: __mmask16, a: __m256h) -> __m512 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvtx_roundph_ps(a: __m256h) -> __m512 { static_assert_sae!(SAE); @@ -15807,7 +15807,7 @@ pub fn _mm512_cvtx_roundph_ps(a: __m256h) -> __m512 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvtx_roundph_ps( src: __m512, @@ -15830,7 +15830,7 @@ pub fn _mm512_mask_cvtx_roundph_ps( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvtx_roundph_ps(k: __mmask16, a: __m256h) -> __m512 { static_assert_sae!(SAE); @@ -15888,7 +15888,7 @@ pub fn _mm_maskz_cvtsh_ss(k: __mmask8, a: __m128, b: __m128h) -> __m128 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundsh_ss(a: __m128, b: __m128h) -> __m128 { static_assert_sae!(SAE); @@ -15906,7 +15906,7 @@ pub fn _mm_cvt_roundsh_ss(a: __m128, b: __m128h) -> __m128 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_cvt_roundsh_ss( src: __m128, @@ -15931,7 +15931,7 @@ pub fn _mm_mask_cvt_roundsh_ss( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_cvt_roundsh_ss(k: __mmask8, a: __m128, b: __m128h) -> __m128 { static_assert_sae!(SAE); @@ -16061,7 +16061,7 @@ pub fn _mm512_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m512d { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))] -#[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_cvt_roundph_pd(a: __m128h) -> __m512d { static_assert_sae!(SAE); @@ -16078,7 +16078,7 @@ pub fn _mm512_cvt_roundph_pd(a: __m128h) -> __m512d { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_mask_cvt_roundph_pd( src: __m512d, @@ -16101,7 +16101,7 @@ pub fn _mm512_mask_cvt_roundph_pd( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm512_maskz_cvt_roundph_pd(k: __mmask8, a: __m128h) -> __m512d { static_assert_sae!(SAE); @@ -16158,7 +16158,7 @@ pub fn _mm_maskz_cvtsh_sd(k: __mmask8, a: __m128d, b: __m128h) -> __m128d { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))] -#[rustc_legacy_const_generics(2)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundsh_sd(a: __m128d, b: __m128h) -> __m128d { static_assert_sae!(SAE); @@ -16176,7 +16176,7 @@ pub fn _mm_cvt_roundsh_sd(a: __m128d, b: __m128h) -> __m128d { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))] -#[rustc_legacy_const_generics(4)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_mask_cvt_roundsh_sd( src: __m128d, @@ -16200,7 +16200,7 @@ pub fn _mm_mask_cvt_roundsh_sd( #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))] -#[rustc_legacy_const_generics(3)] + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_maskz_cvt_roundsh_sd(k: __mmask8, a: __m128d, b: __m128h) -> __m128d { static_assert_sae!(SAE); diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs b/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs index 09a90e29bf088..371b63482ed3c 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs @@ -1201,7 +1201,7 @@ pub fn _mm_maskz_shrdv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shldi_epi64(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shldv_epi64(a, b, _mm512_set1_epi64(IMM8 as i64)) @@ -1214,7 +1214,7 @@ pub fn _mm512_shldi_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shldi_epi64( src: __m512i, k: __mmask8, @@ -1235,7 +1235,7 @@ pub fn _mm512_mask_shldi_epi64( #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shldi_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1251,7 +1251,7 @@ pub fn _mm512_maskz_shldi_epi64(k: __mmask8, a: __m512i, b: __m #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shldi_epi64(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shldv_epi64(a, b, _mm256_set1_epi64x(IMM8 as i64)) @@ -1264,7 +1264,7 @@ pub fn _mm256_shldi_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shldi_epi64( src: __m256i, k: __mmask8, @@ -1285,7 +1285,7 @@ pub fn _mm256_mask_shldi_epi64( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shldi_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1301,7 +1301,7 @@ pub fn _mm256_maskz_shldi_epi64(k: __mmask8, a: __m256i, b: __m #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_shldi_epi64(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shldv_epi64(a, b, _mm_set1_epi64x(IMM8 as i64)) @@ -1314,7 +1314,7 @@ pub fn _mm_shldi_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_shldi_epi64( src: __m128i, k: __mmask8, @@ -1335,7 +1335,7 @@ pub fn _mm_mask_shldi_epi64( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_shldi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1351,7 +1351,7 @@ pub fn _mm_maskz_shldi_epi64(k: __mmask8, a: __m128i, b: __m128 #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shldi_epi32(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shldv_epi32(a, b, _mm512_set1_epi32(IMM8)) @@ -1364,7 +1364,7 @@ pub fn _mm512_shldi_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shldi_epi32( src: __m512i, k: __mmask16, @@ -1385,7 +1385,7 @@ pub fn _mm512_mask_shldi_epi32( #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shldi_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1401,7 +1401,7 @@ pub fn _mm512_maskz_shldi_epi32(k: __mmask16, a: __m512i, b: __ #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shldi_epi32(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shldv_epi32(a, b, _mm256_set1_epi32(IMM8)) @@ -1414,7 +1414,7 @@ pub fn _mm256_shldi_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shldi_epi32( src: __m256i, k: __mmask8, @@ -1435,7 +1435,7 @@ pub fn _mm256_mask_shldi_epi32( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shldi_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1451,7 +1451,7 @@ pub fn _mm256_maskz_shldi_epi32(k: __mmask8, a: __m256i, b: __m #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_shldi_epi32(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shldv_epi32(a, b, _mm_set1_epi32(IMM8)) @@ -1464,7 +1464,7 @@ pub fn _mm_shldi_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_shldi_epi32( src: __m128i, k: __mmask8, @@ -1485,7 +1485,7 @@ pub fn _mm_mask_shldi_epi32( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_shldi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1501,7 +1501,7 @@ pub fn _mm_maskz_shldi_epi32(k: __mmask8, a: __m128i, b: __m128 #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shldi_epi16(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shldv_epi16(a, b, _mm512_set1_epi16(IMM8 as i16)) @@ -1514,7 +1514,7 @@ pub fn _mm512_shldi_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shldi_epi16( src: __m512i, k: __mmask32, @@ -1535,7 +1535,7 @@ pub fn _mm512_mask_shldi_epi16( #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shldi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1551,7 +1551,7 @@ pub fn _mm512_maskz_shldi_epi16(k: __mmask32, a: __m512i, b: __ #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shldi_epi16(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shldv_epi16(a, b, _mm256_set1_epi16(IMM8 as i16)) @@ -1564,7 +1564,7 @@ pub fn _mm256_shldi_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shldi_epi16( src: __m256i, k: __mmask16, @@ -1585,7 +1585,7 @@ pub fn _mm256_mask_shldi_epi16( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shldi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1601,7 +1601,7 @@ pub fn _mm256_maskz_shldi_epi16(k: __mmask16, a: __m256i, b: __ #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(2)] + pub fn _mm_shldi_epi16(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shldv_epi16(a, b, _mm_set1_epi16(IMM8 as i16)) @@ -1614,7 +1614,7 @@ pub fn _mm_shldi_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_shldi_epi16( src: __m128i, k: __mmask8, @@ -1635,7 +1635,7 @@ pub fn _mm_mask_shldi_epi16( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_shldi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1651,7 +1651,7 @@ pub fn _mm_maskz_shldi_epi16(k: __mmask8, a: __m128i, b: __m128 #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shrdi_epi64(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shrdv_epi64(a, b, _mm512_set1_epi64(IMM8 as i64)) @@ -1664,7 +1664,7 @@ pub fn _mm512_shrdi_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shrdi_epi64( src: __m512i, k: __mmask8, @@ -1685,7 +1685,7 @@ pub fn _mm512_mask_shrdi_epi64( #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 255))] //should be vpshrdq -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shrdi_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1701,7 +1701,7 @@ pub fn _mm512_maskz_shrdi_epi64(k: __mmask8, a: __m512i, b: __m #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shrdi_epi64(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shrdv_epi64(a, b, _mm256_set1_epi64x(IMM8 as i64)) @@ -1714,7 +1714,7 @@ pub fn _mm256_shrdi_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shrdi_epi64( src: __m256i, k: __mmask8, @@ -1735,7 +1735,7 @@ pub fn _mm256_mask_shrdi_epi64( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shrdi_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1751,7 +1751,7 @@ pub fn _mm256_maskz_shrdi_epi64(k: __mmask8, a: __m256i, b: __m #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq -#[rustc_legacy_const_generics(2)] + pub fn _mm_shrdi_epi64(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shrdv_epi64(a, b, _mm_set1_epi64x(IMM8 as i64)) @@ -1764,7 +1764,7 @@ pub fn _mm_shrdi_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_shrdi_epi64( src: __m128i, k: __mmask8, @@ -1785,7 +1785,7 @@ pub fn _mm_mask_shrdi_epi64( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_shrdi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1801,7 +1801,7 @@ pub fn _mm_maskz_shrdi_epi64(k: __mmask8, a: __m128i, b: __m128 #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shrdi_epi32(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shrdv_epi32(a, b, _mm512_set1_epi32(IMM8)) @@ -1814,7 +1814,7 @@ pub fn _mm512_shrdi_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shrdi_epi32( src: __m512i, k: __mmask16, @@ -1835,7 +1835,7 @@ pub fn _mm512_mask_shrdi_epi32( #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shrdi_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1851,7 +1851,7 @@ pub fn _mm512_maskz_shrdi_epi32(k: __mmask16, a: __m512i, b: __ #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shrdi_epi32(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shrdv_epi32(a, b, _mm256_set1_epi32(IMM8)) @@ -1864,7 +1864,7 @@ pub fn _mm256_shrdi_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shrdi_epi32( src: __m256i, k: __mmask8, @@ -1885,7 +1885,7 @@ pub fn _mm256_mask_shrdi_epi32( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shrdi_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1901,7 +1901,7 @@ pub fn _mm256_maskz_shrdi_epi32(k: __mmask8, a: __m256i, b: __m #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(2)] + pub fn _mm_shrdi_epi32(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shrdv_epi32(a, b, _mm_set1_epi32(IMM8)) @@ -1914,7 +1914,7 @@ pub fn _mm_shrdi_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_shrdi_epi32( src: __m128i, k: __mmask8, @@ -1935,7 +1935,7 @@ pub fn _mm_mask_shrdi_epi32( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_shrdi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -1951,7 +1951,7 @@ pub fn _mm_maskz_shrdi_epi32(k: __mmask8, a: __m128i, b: __m128 #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(2)] + pub fn _mm512_shrdi_epi16(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shrdv_epi16(a, b, _mm512_set1_epi16(IMM8 as i16)) @@ -1964,7 +1964,7 @@ pub fn _mm512_shrdi_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(4)] + pub fn _mm512_mask_shrdi_epi16( src: __m512i, k: __mmask32, @@ -1985,7 +1985,7 @@ pub fn _mm512_mask_shrdi_epi16( #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(3)] + pub fn _mm512_maskz_shrdi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -2001,7 +2001,7 @@ pub fn _mm512_maskz_shrdi_epi16(k: __mmask32, a: __m512i, b: __ #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(2)] + pub fn _mm256_shrdi_epi16(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shrdv_epi16(a, b, _mm256_set1_epi16(IMM8 as i16)) @@ -2014,7 +2014,7 @@ pub fn _mm256_shrdi_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(4)] + pub fn _mm256_mask_shrdi_epi16( src: __m256i, k: __mmask16, @@ -2035,7 +2035,7 @@ pub fn _mm256_mask_shrdi_epi16( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(3)] + pub fn _mm256_maskz_shrdi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); @@ -2051,7 +2051,7 @@ pub fn _mm256_maskz_shrdi_epi16(k: __mmask16, a: __m256i, b: __ #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(2)] + pub fn _mm_shrdi_epi16(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shrdv_epi16(a, b, _mm_set1_epi16(IMM8 as i16)) @@ -2064,7 +2064,7 @@ pub fn _mm_shrdi_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(4)] + pub fn _mm_mask_shrdi_epi16( src: __m128i, k: __mmask8, @@ -2085,7 +2085,7 @@ pub fn _mm_mask_shrdi_epi16( #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw -#[rustc_legacy_const_generics(3)] + pub fn _mm_maskz_shrdi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs b/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs index 934c9e2812c42..8e86a683b9ee7 100644 --- a/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs @@ -163,7 +163,6 @@ pub fn _mm_cvttss_u64(a: __m128) -> u64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2sd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] pub fn _mm_cvt_roundi64_sd(a: __m128d, b: i64) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -186,7 +185,6 @@ pub fn _mm_cvt_roundi64_sd(a: __m128d, b: i64) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2sd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] pub fn _mm_cvt_roundsi64_sd(a: __m128d, b: i64) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -209,7 +207,6 @@ pub fn _mm_cvt_roundsi64_sd(a: __m128d, b: i64) -> __m128d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] pub fn _mm_cvt_roundi64_ss(a: __m128, b: i64) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -232,7 +229,6 @@ pub fn _mm_cvt_roundi64_ss(a: __m128, b: i64) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtusi2sd, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] pub fn _mm_cvt_roundu64_sd(a: __m128d, b: u64) -> __m128d { unsafe { static_assert_rounding!(ROUNDING); @@ -255,7 +251,6 @@ pub fn _mm_cvt_roundu64_sd(a: __m128d, b: u64) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] pub fn _mm_cvt_roundsi64_ss(a: __m128, b: i64) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -278,7 +273,6 @@ pub fn _mm_cvt_roundsi64_ss(a: __m128, b: i64) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtusi2ss, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] pub fn _mm_cvt_roundu64_ss(a: __m128, b: u64) -> __m128 { unsafe { static_assert_rounding!(ROUNDING); @@ -301,7 +295,6 @@ pub fn _mm_cvt_roundu64_ss(a: __m128, b: u64) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvt_roundsd_si64(a: __m128d) -> i64 { unsafe { static_assert_rounding!(ROUNDING); @@ -323,7 +316,6 @@ pub fn _mm_cvt_roundsd_si64(a: __m128d) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvt_roundsd_i64(a: __m128d) -> i64 { unsafe { static_assert_rounding!(ROUNDING); @@ -345,7 +337,6 @@ pub fn _mm_cvt_roundsd_i64(a: __m128d) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsd2usi, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvt_roundsd_u64(a: __m128d) -> u64 { unsafe { static_assert_rounding!(ROUNDING); @@ -367,7 +358,6 @@ pub fn _mm_cvt_roundsd_u64(a: __m128d) -> u64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvt_roundss_si64(a: __m128) -> i64 { unsafe { static_assert_rounding!(ROUNDING); @@ -389,7 +379,6 @@ pub fn _mm_cvt_roundss_si64(a: __m128) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvt_roundss_i64(a: __m128) -> i64 { unsafe { static_assert_rounding!(ROUNDING); @@ -411,7 +400,6 @@ pub fn _mm_cvt_roundss_i64(a: __m128) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtss2usi, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvt_roundss_u64(a: __m128) -> u64 { unsafe { static_assert_rounding!(ROUNDING); @@ -428,7 +416,6 @@ pub fn _mm_cvt_roundss_u64(a: __m128) -> u64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttsd2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvtt_roundsd_si64(a: __m128d) -> i64 { unsafe { static_assert_sae!(SAE); @@ -445,7 +432,6 @@ pub fn _mm_cvtt_roundsd_si64(a: __m128d) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttsd2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvtt_roundsd_i64(a: __m128d) -> i64 { unsafe { static_assert_sae!(SAE); @@ -462,7 +448,6 @@ pub fn _mm_cvtt_roundsd_i64(a: __m128d) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttsd2usi, SAE = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvtt_roundsd_u64(a: __m128d) -> u64 { unsafe { static_assert_sae!(SAE); @@ -479,7 +464,6 @@ pub fn _mm_cvtt_roundsd_u64(a: __m128d) -> u64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttss2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvtt_roundss_i64(a: __m128) -> i64 { unsafe { static_assert_sae!(SAE); @@ -496,7 +480,6 @@ pub fn _mm_cvtt_roundss_i64(a: __m128) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttss2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvtt_roundss_si64(a: __m128) -> i64 { unsafe { static_assert_sae!(SAE); @@ -513,7 +496,6 @@ pub fn _mm_cvtt_roundss_si64(a: __m128) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttss2usi, SAE = 8))] -#[rustc_legacy_const_generics(1)] pub fn _mm_cvtt_roundss_u64(a: __m128) -> u64 { unsafe { static_assert_sae!(SAE); diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx512fp16.rs b/library/stdarch/crates/core_arch/src/x86_64/avx512fp16.rs index 955c6ccc7526b..b3827651a1b95 100644 --- a/library/stdarch/crates/core_arch/src/x86_64/avx512fp16.rs +++ b/library/stdarch/crates/core_arch/src/x86_64/avx512fp16.rs @@ -69,7 +69,6 @@ pub fn _mm_cvtu64_sh(a: __m128h, b: u64) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtusi2sh, ROUNDING = 8))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundu64_sh(a: __m128h, b: u64) -> __m128h { unsafe { @@ -105,7 +104,6 @@ pub fn _mm_cvtsh_i64(a: __m128h) -> i64 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2si, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundsh_i64(a: __m128h) -> i64 { unsafe { @@ -141,7 +139,6 @@ pub fn _mm_cvtsh_u64(a: __m128h) -> u64 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvtsh2usi, ROUNDING = 8))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvt_roundsh_u64(a: __m128h) -> u64 { unsafe { @@ -171,7 +168,6 @@ pub fn _mm_cvttsh_i64(a: __m128h) -> i64 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttsh2si, SAE = 8))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvtt_roundsh_i64(a: __m128h) -> i64 { unsafe { @@ -201,7 +197,6 @@ pub fn _mm_cvttsh_u64(a: __m128h) -> u64 { #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vcvttsh2usi, SAE = 8))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_cvtt_roundsh_u64(a: __m128h) -> u64 { unsafe {