|
| 1 | + |
| 2 | +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512vlvbmi2 -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion |
| 3 | +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s |
| 4 | +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512vlvbmi2 -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion |
| 5 | +// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s |
| 6 | + |
| 7 | +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512vlvbmi2 -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion |
| 8 | +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s |
| 9 | +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512vlvbmi2 -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion |
| 10 | +// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s |
| 11 | + |
| 12 | +#include <immintrin.h> |
| 13 | + |
| 14 | + |
| 15 | +__m128i test_mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D) { |
| 16 | + // CIR-LABEL: test_mm_mask_compress_epi16 |
| 17 | + // %[[MASK8:.+]] = cir.cast bitcast %{{.+}} : !u8i -> !cir.vector<8 x !cir.int<u, 1>> |
| 18 | + // %[[RES:.+]] = cir.call_llvm_intrinsic "x86.avx512.mask.compress" %{{.+}}, %{{.+}}, %[[MASK8]]: (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>, !cir.vector<8 x !cir.int<u, 1>>) -> !cir.vector<8 x !s16i> |
| 19 | + // %[[CAST:.+]] = cir.cast bitcast %[[RES]] : !cir.vector<8 x !s16i> -> !cir.vector<2 x !s64i> |
| 20 | + |
| 21 | + // LLVM-LABEL: test_mm_mask_compress_epi16 |
| 22 | + // %[[MASK8:.+]] = bitcast i8 %{{.+}} to <8 x i1> |
| 23 | + // %[[RES:.+]] = call <8 x i16> @llvm.x86.avx512.mask.compress.v8i16(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i1> %[[MASK8]]) |
| 24 | + // %[[CAST:.+]] = bitcast <8 x i16> %[[RES]] to <2 x i64> |
| 25 | + |
| 26 | + // OGCG-LABEL: test_mm_mask_compress_epi16 |
| 27 | + // %[[MASK8:.+]] = bitcast i8 %{{.+}} to <8 x i1> |
| 28 | + // %[[RES:.+]] = call <8 x i16> @llvm.x86.avx512.mask.compress.v8i16(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i1> %[[MASK8]]) |
| 29 | + // %[[CAST:.+]] = bitcast <8 x i16> %[[RES]] to <2 x i64> |
| 30 | + |
| 31 | + return _mm_mask_compress_epi16(__S, __U, __D); |
| 32 | +} |
| 33 | + |
| 34 | +__m128i test_mm_maskz_compress_epi16(__mmask8 __U, __m128i __D) { |
| 35 | + // CIR-LABEL: test_mm_maskz_compress_epi16 |
| 36 | + // %[[MASK8:.+]] = cir.cast bitcast %{{.+}} : !u8i -> !cir.vector<8 x !cir.int<u, 1>> |
| 37 | + // %[[RES:.+]] = cir.call_llvm_intrinsic "x86.avx512.mask.compress" %{{.+}}, %{{.+}}, %[[MASK8]]: (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>, !cir.vector<8 x !cir.int<u, 1>>) -> !cir.vector<8 x !s16i> |
| 38 | + // %[[CAST:.+]] = cir.cast bitcast %[[RES]] : !cir.vector<8 x !s16i> -> !cir.vector<2 x !s64i> |
| 39 | + |
| 40 | + // LLVM-LABEL: test_mm_maskz_compress_epi16 |
| 41 | + // %[[MASK8:.+]] = bitcast i8 %{{.+}} to <8 x i1> |
| 42 | + // %[[RES:.+]] = call <8 x i16> @llvm.x86.avx512.mask.compress.v8i16(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i1> %[[MASK8]]) |
| 43 | + // %[[CAST:.+]] = bitcast <8 x i16> %[[RES]] to <2 x i64> |
| 44 | + |
| 45 | + // OGCG-LABEL: test_mm_maskz_compress_epi16 |
| 46 | + // %[[MASK8:.+]] = bitcast i8 %{{.+}} to <8 x i1> |
| 47 | + // %[[RES:.+]] = call <8 x i16> @llvm.x86.avx512.mask.compress.v8i16(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i1> %[[MASK8]]) |
| 48 | + // %[[CAST:.+]] = bitcast <8 x i16> %[[RES]] to <2 x i64> |
| 49 | + |
| 50 | + return _mm_maskz_compress_epi16(__U, __D); |
| 51 | +} |
| 52 | + |
| 53 | +__m128i test_mm_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D) { |
| 54 | + // CIR-LABEL: test_mm_mask_compress_epi8 |
| 55 | + // %[[MASK16:.+]] = cir.cast bitcast %{{.+}} : !u16i -> !cir.vector<16 x !cir.int<u, 1>> |
| 56 | + // %[[RES:.+]] = cir.call_llvm_intrinsic "x86.avx512.mask.compress" %{{.+}}, %{{.+}}, %[[MASK16]]: (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>, !cir.vector<16 x !cir.int<u, 1>>) -> !cir.vector<16 x !s8i> |
| 57 | + // %[[CAST:.+]] = cir.cast bitcast %[[RES]] : !cir.vector<16 x !s8i> -> !cir.vector<2 x !s64i> |
| 58 | + |
| 59 | + // LLVM-LABEL: test_mm_mask_compress_epi8 |
| 60 | + // %[[MASK16:.+]] = bitcast i16 %{{.+}} to <16 x i1> |
| 61 | + // %[[RES:.+]] = call <16 x i8> @llvm.x86.avx512.mask.compress.v16i8(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i1> %[[MASK16]]) |
| 62 | + // %[[CAST:.+]] = bitcast <16 x i8> %[[RES]] to <2 x i64> |
| 63 | + |
| 64 | + // OGCG-LABEL: test_mm_mask_compress_epi8 |
| 65 | + // %[[MASK16:.+]] = bitcast i16 %{{.+}} to <16 x i1> |
| 66 | + // %[[RES:.+]] = call <16 x i8> @llvm.x86.avx512.mask.compress.v16i8(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i1> %[[MASK16]]) |
| 67 | + // %[[CAST:.+]] = bitcast <16 x i8> %[[RES]] to <2 x i64> |
| 68 | + |
| 69 | + return _mm_mask_compress_epi8(__S, __U, __D); |
| 70 | +} |
| 71 | + |
| 72 | +__m128i test_mm_maskz_compress_epi8(__mmask16 __U, __m128i __D) { |
| 73 | + // CIR-LABEL: test_mm_maskz_compress_epi8 |
| 74 | + // %[[ZERO:.+]] = cir.call @_mm_setzero_si128() : () -> !cir.vector<2 x !s64i> |
| 75 | + // %[[CAST1:.+]] = cir.cast bitcast %[[ZERO]] : !cir.vector<2 x !s64i> -> !cir.vector<16 x !s8i> |
| 76 | + // %[[MASK16:.+]] = cir.cast bitcast %{{.+}} : !u16i -> !cir.vector<16 x !cir.int<u, 1>> |
| 77 | + // %[[RES:.+]] = cir.call_llvm_intrinsic "x86.avx512.mask.compress" %{{.+}}, %[[CAST1]], %[[MASK16]]: (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>, !cir.vector<16 x !cir.int<u, 1>>) -> !cir.vector<16 x !s8i> |
| 78 | + // %[[CAST2:.+]] = cir.cast bitcast %[[RES]] : !cir.vector<16 x !s8i> -> !cir.vector<2 x !s64i> |
| 79 | + |
| 80 | + // LLVM-LABEL: test_mm_maskz_compress_epi8 |
| 81 | + // store <2 x i64> zeroinitializer, ptr %{{.+}}, align 16 |
| 82 | + // %[[CAST1:.+]] = bitcast <2 x i64> %{{.+}} to <16 x i8> |
| 83 | + // %[[MASK16:.+]] = bitcast i16 %{{.+}} to <16 x i1> |
| 84 | + // %[[RES:.+]] = call <16 x i8> @llvm.x86.avx512.mask.compress.v16i8(<16 x i8> %{{.+}}, <16 x i8> %[[CAST1]], <16 x i1> %[[MASK16]]) |
| 85 | + // %[[CAST2:.+]] = bitcast <16 x i8> %[[RES]] to <2 x i64> |
| 86 | + |
| 87 | + // OGCG-LABEL: test_mm_maskz_compress_epi8 |
| 88 | + // store <2 x i64> zeroinitializer, ptr %{{.+}}, align 16 |
| 89 | + // %[[CAST1:.+]] = bitcast <2 x i64> %{{.+}} to <16 x i8> |
| 90 | + // %[[MASK16:.+]] = bitcast i16 %{{.+}} to <16 x i1> |
| 91 | + // %[[RES:.+]] = call <16 x i8> @llvm.x86.avx512.mask.compress.v16i8(<16 x i8> %{{.+}}, <16 x i8> %[[CAST1]], <16 x i1> %[[MASK16]]) |
| 92 | + // %[[CAST2:.+]] = bitcast <16 x i8> %[[RES]] to <2 x i64> |
| 93 | + |
| 94 | + return _mm_maskz_compress_epi8(__U, __D); |
| 95 | +} |
| 96 | + |
| 97 | +__m128i test_mm_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D) { |
| 98 | + // CIR-LABEL: test_mm_mask_expand_epi16 |
| 99 | + // %[[MASK16:.+]] = cir.cast bitcast %{{.+}} : !u8i -> !cir.vector<8 x !cir.int<u, 1>> |
| 100 | + // %[[RES:.+]] = cir.call_llvm_intrinsic "x86.avx512.mask.expand" %{{.+}}, %{{.+}}, %[[MASK16]]: (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>, !cir.vector<8 x !cir.int<u, 1>>) -> !cir.vector<8 x !s16i> |
| 101 | + // %[[CAST:.+]] = cir.cast bitcast %[[RES]] : !cir.vector<8 x !s16i> -> !cir.vector<2 x !s64i> |
| 102 | + |
| 103 | + // LLVM-LABEL: test_mm_mask_expand_epi16 |
| 104 | + // %[[MASK16:.+]] = bitcast i8 %{{.+}} to <8 x i1> |
| 105 | + // %[[RES:.+]] = call <8 x i16> @llvm.x86.avx512.mask.expand.v8i16(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i1> %[[MASK16]]) |
| 106 | + // %[[CAST:.+]] = bitcast <8 x i16> %[[RES]] to <2 x i64> |
| 107 | + |
| 108 | + // OGCG-LABEL: test_mm_mask_expand_epi16 |
| 109 | + // %[[MASK16:.+]] = bitcast i8 %{{.+}} to <8 x i1> |
| 110 | + // %[[RES:.+]] = call <8 x i16> @llvm.x86.avx512.mask.expand.v8i16(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i1> %[[MASK16]]) |
| 111 | + // %[[CAST:.+]] = bitcast <8 x i16> %[[RES]] to <2 x i64> |
| 112 | + |
| 113 | + return _mm_mask_expand_epi16(__S, __U, __D); |
| 114 | +} |
| 115 | + |
| 116 | +__m128i test_mm_maskz_expand_epi16(__mmask8 __U, __m128i __D) { |
| 117 | + // CIR-LABEL: test_mm_maskz_expand_epi16 |
| 118 | + // %[[MASK:.+]] = cir.cast bitcast %{{.+}} : !u8i -> !cir.vector<8 x !cir.int<u, 1>> |
| 119 | + // %[[RES:.+]] = cir.call_llvm_intrinsic "x86.avx512.mask.expand" %{{.+}}, %{{.+}}, %[[MASK]]: (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>, !cir.vector<8 x !cir.int<u, 1>>) -> !cir.vector<8 x !s16i> |
| 120 | + // %[[CAST:.+]] = cir.cast bitcast %[[RES]] : !cir.vector<8 x !s16i> -> !cir.vector<2 x !s64i> |
| 121 | + |
| 122 | + // LLVM-LABEL: test_mm_maskz_expand_epi16 |
| 123 | + // %[[MASK:.+]] = bitcast i8 %{{.+}} to <8 x i1> |
| 124 | + // %[[RES:.+]] = call <8 x i16> @llvm.x86.avx512.mask.expand.v8i16(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i1> %[[MASK]]) |
| 125 | + // %[[CAST:.+]] = bitcast <8 x i16> %[[RES]] to <2 x i64> |
| 126 | + |
| 127 | + // OGCG-LABEL: test_mm_maskz_expand_epi16 |
| 128 | + // %[[MASK:.+]] = bitcast i8 %{{.+}} to <8 x i1> |
| 129 | + // %[[RES:.+]] = call <8 x i16> @llvm.x86.avx512.mask.expand.v8i16(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i1> %[[MASK]]) |
| 130 | + // %[[CAST:.+]] = bitcast <8 x i16> %[[RES]] to <2 x i64> |
| 131 | + |
| 132 | + return _mm_maskz_expand_epi16(__U, __D); |
| 133 | +} |
| 134 | + |
| 135 | +__m128i test_mm_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D) { |
| 136 | + // CIR-LABEL: test_mm_mask_expand_epi8 |
| 137 | + // %[[MASK:.+]] = cir.cast bitcast %{{.+}} : !u16i -> !cir.vector<16 x !cir.int<u, 1>> |
| 138 | + // %[[RES:.+]] = cir.call_llvm_intrinsic "x86.avx512.mask.expand" %{{.+}}, %{{.+}}, %[[MASK]]: (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>, !cir.vector<16 x !cir.int<u, 1>>) -> !cir.vector<16 x !s8i> |
| 139 | + // %[[CAST:.+]] = cir.cast bitcast %[[RES]] : !cir.vector<16 x !s8i> -> !cir.vector<2 x !s64i> |
| 140 | + |
| 141 | + // LLVM-LABEL: test_mm_mask_expand_epi8 |
| 142 | + // %[[MASK:.+]] = bitcast i16 %{{.+}} to <16 x i1> |
| 143 | + // %[[RES:.+]] = call <16 x i8> @llvm.x86.avx512.mask.expand.v16i8(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i1> %[[MASK]]) |
| 144 | + // %[[CAST:.+]] = bitcast <16 x i8> %[[RES]] to <2 x i64> |
| 145 | + |
| 146 | + // OGCG-LABEL: test_mm_mask_expand_epi8 |
| 147 | + // %[[MASK:.+]] = bitcast i16 %{{.+}} to <16 x i1> |
| 148 | + // %[[RES:.+]] = call <16 x i8> @llvm.x86.avx512.mask.expand.v16i8(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i1> %[[MASK]]) |
| 149 | + // %[[CAST:.+]] = bitcast <16 x i8> %[[RES]] to <2 x i64> |
| 150 | + |
| 151 | + return _mm_mask_expand_epi8(__S, __U, __D); |
| 152 | +} |
| 153 | + |
| 154 | +__m128i test_mm_maskz_expand_epi8(__mmask16 __U, __m128i __D) { |
| 155 | + // CIR-LABEL: test_mm_maskz_expand_epi8 |
| 156 | + // %[[MASK:.+]] = cir.cast bitcast %{{.+}} : !u16i -> !cir.vector<16 x !cir.int<u, 1>> |
| 157 | + // %[[RES:.+]] = cir.call_llvm_intrinsic "x86.avx512.mask.expand" %{{.+}}, %{{.+}}, %[[MASK]]: (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>, !cir.vector<16 x !cir.int<u, 1>>) -> !cir.vector<16 x !s8i> |
| 158 | + // %[[CAST:.+]] = cir.cast bitcast %[[RES]] : !cir.vector<16 x !s8i> -> !cir.vector<2 x !s64i> |
| 159 | + |
| 160 | + // LLVM-LABEL: test_mm_maskz_expand_epi8 |
| 161 | + // %[[MASK:.+]] = bitcast i16 %{{.+}} to <16 x i1> |
| 162 | + // %[[RES:.+]] = call <16 x i8> @llvm.x86.avx512.mask.expand.v16i8(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i1> %[[MASK]]) |
| 163 | + // %[[CAST:.+]] = bitcast <16 x i8> %[[RES]] to <2 x i64> |
| 164 | + |
| 165 | + // OGCG-LABEL: test_mm_maskz_expand_epi8 |
| 166 | + // %[[MASK:.+]] = bitcast i16 %{{.+}} to <16 x i1> |
| 167 | + // %[[RES:.+]] = call <16 x i8> @llvm.x86.avx512.mask.expand.v16i8(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i1> %[[MASK]]) |
| 168 | + // %[[CAST:.+]] = bitcast <16 x i8> %[[RES]] to <2 x i64> |
| 169 | + |
| 170 | + return _mm_maskz_expand_epi8(__U, __D); |
| 171 | +} |
0 commit comments