diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv.td b/clang/include/clang/Basic/riscv_vector_xtheadv.td index e3d5305c898410..f5737b34db0245 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv.td @@ -1151,6 +1151,9 @@ let ManualCodegen = [{ // 13.2. Vector Single-Width Averaging Add and Subtract defm th_vaadd : RVVSignedBinBuiltinSetRoundingMode; defm th_vasub : RVVSignedBinBuiltinSetRoundingMode; + // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation Operations + defm th_vsmul : RVVSignedBinBuiltinSetRoundingMode; } + include "riscv_vector_xtheadv_wrappers.td" diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td index 06dda98c023c43..eab73a0caf1992 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td @@ -1843,3 +1843,77 @@ let HeaderCode = }] in def th_single_width_averaging_add_and_subtract_wrapper_macros: RVVHeader; + +// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation + +let HeaderCode = +[{ + +#define __riscv_vsmul_vv_i8m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4(op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8(op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8(op1, op2, rm, vl) + +#define __riscv_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl) + +}] in +def th_single_width_fractional_multiply_with_rounding_and_saturation_wrapper_macros: RVVHeader; diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/thead/vsmul.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/thead/vsmul.c new file mode 100644 index 00000000000000..115f988f418469 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/thead/vsmul.c @@ -0,0 +1,664 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | \ +// RUN: opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_th_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_th_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/wrappers/vsmul.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/wrappers/vsmul.c new file mode 100644 index 00000000000000..75e3b32307ad3b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/wrappers/vsmul.c @@ -0,0 +1,664 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | \ +// RUN: opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +} + diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td b/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td index 836ae5a29c4ff0..011f40836730e3 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td @@ -816,6 +816,6 @@ let TargetPrefix = "riscv" in { } // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation Instructions - defm th_vsmul : XVBinaryABX; + defm th_vsmul : XVBinaryAAXRoundingMode; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td index 93aa61cf2b3257..b5c10f7b926efd 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td @@ -2168,7 +2168,7 @@ multiclass XVPseudoVAALU_VV_VX_RM { } } -multiclass XVPseudoVSMUL_VV_VX { +multiclass XVPseudoVSMUL_VV_VX_RM { foreach m = MxListXTHeadV in { defvar mx = m.MX; defvar WriteVSIMulV_MX = !cast("WriteVSMulV_" # mx); @@ -2176,9 +2176,9 @@ multiclass XVPseudoVSMUL_VV_VX { defvar ReadVSIMulV_MX = !cast("ReadVSMulV_" # mx); defvar ReadVSIMulX_MX = !cast("ReadVSMulX_" # mx); - defm "" : XVPseudoBinaryV_VV, + defm "" : XVPseudoBinaryV_VV_RM, Sched<[WriteVSIMulV_MX, ReadVSIMulV_MX, ReadVSIMulV_MX, ReadVMask]>; - defm "" : XVPseudoBinaryV_VX, + defm "" : XVPseudoBinaryV_VX_RM, Sched<[WriteVSIMulX_MX, ReadVSIMulV_MX, ReadVSIMulX_MX, ReadVMask]>; } } @@ -3229,12 +3229,11 @@ let Predicates = [HasVendorXTHeadV] in { //===----------------------------------------------------------------------===// let Predicates = [HasVendorXTHeadV] in { - defm PseudoTH_VSMUL : XVPseudoVSMUL_VV_VX; + defm PseudoTH_VSMUL : XVPseudoVSMUL_VV_VX_RM; } // Predicates = [HasVendorXTHeadV] let Predicates = [HasVendorXTHeadV] in { - defm : XVPatBinaryV_VV_VX<"int_riscv_th_vsmul", "PseudoTH_VSMUL", AllIntegerXVectors>; - // defm : XVPatBinaryV_VV_VX<"int_riscv_th_vsmul", "PseudoTH_VSMUL", AllIntegerXVectors, isSEWAware=1>; + defm : XVPatBinaryV_VV_VX_RM<"int_riscv_th_vsmul", "PseudoTH_VSMUL", AllIntegerXVectors>; } // Predicates = [HasVendorXTHeadV] include "RISCVInstrInfoXTHeadVVLPatterns.td" diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vsmul.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vsmul.ll index be7ea0afefd13b..76a6142fa2eec7 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vsmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vsmul.ll @@ -1,13 +1,13 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 - declare @llvm.riscv.th.vsmul.nxv8i8.nxv8i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8: @@ -21,6 +21,7 @@ define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -38,7 +39,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv8i8.nxv8i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -64,6 +65,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv8i8.i8( - , - , - i8, - iXLen); - -define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv8i8.i8( - undef, - %0, - i8 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 -; CHECK-NEXT: th.vsmul.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -147,7 +83,7 @@ declare @llvm.riscv.th.vsmul.nxv16i8.nxv16i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8: @@ -161,6 +97,7 @@ define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -178,7 +115,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv16i8.nxv16i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -204,6 +141,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv16i8.i8( - , - , - i8, - iXLen); - -define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv16i8.i8( - undef, - %0, - i8 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 -; CHECK-NEXT: th.vsmul.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -287,7 +159,7 @@ declare @llvm.riscv.th.vsmul.nxv32i8.nxv32i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8: @@ -301,6 +173,7 @@ define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -318,7 +191,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv32i8.nxv32i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -344,6 +217,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv32i8.i8( - , - , - i8, - iXLen); - -define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv32i8.i8( - undef, - %0, - i8 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 -; CHECK-NEXT: th.vsmul.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -427,7 +235,7 @@ declare @llvm.riscv.th.vsmul.nxv64i8.nxv64i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8: @@ -441,6 +249,7 @@ define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -458,7 +267,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv64i8.nxv64i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: @@ -485,6 +294,7 @@ define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv64i8.i8( - , - , - i8, - iXLen); - -define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv64i8.i8( - undef, - %0, - i8 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv64i8.i8( - , - , - i8, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 -; CHECK-NEXT: th.vsmul.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -568,7 +312,7 @@ declare @llvm.riscv.th.vsmul.nxv4i16.nxv4i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16: @@ -582,6 +326,7 @@ define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -599,7 +344,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv4i16.nxv4i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -625,6 +370,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv4i16.i16( - , - , - i16, - iXLen); - -define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv4i16.i16( - undef, - %0, - i16 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 -; CHECK-NEXT: th.vsmul.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -708,7 +388,7 @@ declare @llvm.riscv.th.vsmul.nxv8i16.nxv8i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16: @@ -722,6 +402,7 @@ define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -739,7 +420,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv8i16.nxv8i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -765,6 +446,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv8i16.i16( - , - , - i16, - iXLen); - -define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv8i16.i16( - undef, - %0, - i16 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 -; CHECK-NEXT: th.vsmul.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -848,7 +464,7 @@ declare @llvm.riscv.th.vsmul.nxv16i16.nxv16i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16: @@ -862,6 +478,7 @@ define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -879,7 +496,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv16i16.nxv16i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -905,6 +522,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(< ; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 ; CHECK-NEXT: th.vsetvl zero, a1, a2 ; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: th.vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -913,73 +531,7 @@ entry: %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv16i16.i16( - , - , - i16, - iXLen); - -define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv16i16.i16( - undef, - %0, - i16 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 -; CHECK-NEXT: th.vsmul.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -988,7 +540,7 @@ declare @llvm.riscv.th.vsmul.nxv32i16.nxv32i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16: @@ -1002,6 +554,7 @@ define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1019,7 +572,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv32i16.nxv32i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: @@ -1046,6 +599,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(< ; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 ; CHECK-NEXT: th.vsetvl zero, a0, a2 ; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: th.vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1054,73 +608,7 @@ entry: %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv32i16.i16( - , - , - i16, - iXLen); - -define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv32i16.i16( - undef, - %0, - i16 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv32i16.i16( - , - , - i16, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 -; CHECK-NEXT: th.vsmul.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -1129,7 +617,7 @@ declare @llvm.riscv.th.vsmul.nxv2i32.nxv2i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32: @@ -1143,6 +631,7 @@ define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1160,7 +649,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv2i32.nxv2i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -1186,6 +675,7 @@ define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %1, %2, %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } -declare @llvm.riscv.th.vsmul.nxv2i32.i32( - , - , - i32, - iXLen); +declare @llvm.riscv.th.vsmul.nxv4i32.nxv4i32( + , + , + , + iXLen, iXLen); -define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv2i32.i32( - undef, - %0, - i32 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 -; CHECK-NEXT: th.vsmul.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv4i32.nxv4i32( - , - , - , - iXLen); - -define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: +define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a1, vl ; CHECK-NEXT: csrr a2, vtype @@ -1283,6 +707,7 @@ define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1300,7 +725,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv4i32.nxv4i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -1326,6 +751,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv4i32.i32( - , - , - i32, - iXLen); - -define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv4i32.i32( - undef, - %0, - i32 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 -; CHECK-NEXT: th.vsmul.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -1409,7 +769,7 @@ declare @llvm.riscv.th.vsmul.nxv8i32.nxv8i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32: @@ -1423,6 +783,7 @@ define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1440,7 +801,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv8i32.nxv8i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -1466,6 +827,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv8i32.i32( - , - , - i32, - iXLen); - -define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv8i32.i32( - undef, - %0, - i32 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 -; CHECK-NEXT: th.vsmul.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -1549,7 +845,7 @@ declare @llvm.riscv.th.vsmul.nxv16i32.nxv16i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32: @@ -1563,6 +859,7 @@ define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1580,7 +877,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv16i32.nxv16i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: @@ -1607,6 +904,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(< ; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 ; CHECK-NEXT: th.vsetvl zero, a0, a2 ; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: th.vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1615,73 +913,7 @@ entry: %1, %2, %3, - iXLen %4) - - ret %a -} - -declare @llvm.riscv.th.vsmul.nxv16i32.i32( - , - , - i32, - iXLen); - -define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 -; CHECK-NEXT: th.vsmul.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv16i32.i32( - undef, - %0, - i32 %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv16i32.i32( - , - , - i32, - , - iXLen); - -define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 -; CHECK-NEXT: th.vsmul.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } @@ -1690,7 +922,7 @@ declare @llvm.riscv.th.vsmul.nxv1i64.nxv1i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64: @@ -1704,6 +936,7 @@ define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1721,50 +954,1096 @@ declare @llvm.riscv.th.vsmul.mask.nxv1i64.nxv1i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv2i64.nxv2i64( + , + , + , + iXLen, iXLen); + +define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv4i64.nxv4i64( + , + , + , + iXLen, iXLen); + +define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv8i64.nxv8i64( + , + , + , + iXLen, iXLen); + +define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e64, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv8i8.i8( + , + , + i8, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv16i8.i8( + , + , + i8, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv32i8.i8( + , + , + i8, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv64i8.i8( + , + , + i8, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv64i8.i8( + undef, + %0, + i8 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv4i16.i16( + , + , + i16, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv8i16.i16( + , + , + i16, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv16i16.i16( + , + , + i16, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv32i16.i16( + , + , + i16, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv32i16.i16( + undef, + %0, + i16 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv2i32.i32( + , + , + i32, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv4i32.i32( + , + , + i32, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv8i32.i32( + , + , + i32, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen 0, iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsmul.nxv16i32.i32( + , + , + i32, + iXLen, iXLen); + +define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsmul.nxv16i32.i32( + undef, + %0, + i32 %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsmul.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype ; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype ; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype ; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype ; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 -; CHECK-NEXT: th.vsmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: th.vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - iXLen %4) + %a = call @llvm.riscv.th.vsmul.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen 0, iXLen %4) - ret %a + ret %a } declare @llvm.riscv.th.vsmul.nxv1i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: @@ -1779,6 +2058,7 @@ define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1807,7 +2088,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv1i64.i64( , i64, , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64: @@ -1834,6 +2115,7 @@ define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %1, i64 %2, %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } -declare @llvm.riscv.th.vsmul.nxv2i64.nxv2i64( - , - , - , - iXLen); - -define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 -; CHECK-NEXT: th.vsmul.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv2i64.nxv2i64( - undef, - %0, - %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen); - -define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 -; CHECK-NEXT: th.vsmul.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - iXLen %4) - - ret %a -} - declare @llvm.riscv.th.vsmul.nxv2i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64: @@ -1967,6 +2176,7 @@ define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1995,7 +2206,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv2i64.i64( , i64, , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64: @@ -2022,6 +2233,7 @@ define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %1, i64 %2, %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } -declare @llvm.riscv.th.vsmul.nxv4i64.nxv4i64( - , - , - , - iXLen); - -define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 -; CHECK-NEXT: th.vsmul.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv4i64.nxv4i64( - undef, - %0, - %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen); - -define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 -; CHECK-NEXT: th.vsmul.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - iXLen %4) - - ret %a -} - declare @llvm.riscv.th.vsmul.nxv4i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64: @@ -2155,6 +2294,7 @@ define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -2183,7 +2324,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv4i64.i64( , i64, , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64: @@ -2210,6 +2351,7 @@ define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %1, i64 %2, %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } -declare @llvm.riscv.th.vsmul.nxv8i64.nxv8i64( - , - , - , - iXLen); - -define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: csrr a1, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a1, a2 -; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 -; CHECK-NEXT: th.vsmul.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.nxv8i64.nxv8i64( - undef, - %0, - %1, - iXLen %2) - - ret %a -} - -declare @llvm.riscv.th.vsmul.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen); - -define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { -; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a2, vl -; CHECK-NEXT: csrr a3, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e64, m8, d1 -; CHECK-NEXT: th.vle.v v24, (a0) -; CHECK-NEXT: th.vsetvl zero, a2, a3 -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: csrr a2, vtype -; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 -; CHECK-NEXT: th.vsetvl zero, a0, a2 -; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 -; CHECK-NEXT: th.vsmul.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vsmul.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - iXLen %4) - - ret %a -} - declare @llvm.riscv.th.vsmul.nxv8i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64: @@ -2344,6 +2412,7 @@ define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -2372,7 +2442,7 @@ declare @llvm.riscv.th.vsmul.mask.nxv8i64.i64( , i64, , - iXLen); + iXLen, iXLen); define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64: @@ -2399,6 +2469,7 @@ define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %1, i64 %2, %3, - iXLen %4) + iXLen 0, iXLen %4) ret %a } \ No newline at end of file