Skip to content

Commit 8c8d655

Browse files
zhanyi22333imkiva
authored andcommitted
[LLVM+Clang][XTHeadVector] Implement intrinsics for vsmul (llvm#82)
* [LLVM][XTHeadVector] Change vsmul test cases Make test more likely to rvv vsmul Change intrinsic interface Add csrwi vxrm sentence * [LLVM][XTHeadVector] Redefine vsmul * [Clang][XTHeadVector] Add test cases for vsmul * [Clang][XTHeadVector] Define vsmul
1 parent 5976867 commit 8c8d655

File tree

7 files changed

+2597
-1121
lines changed

7 files changed

+2597
-1121
lines changed

Diff for: clang/include/clang/Basic/riscv_vector_xtheadv.td

+3
Original file line numberDiff line numberDiff line change
@@ -1151,6 +1151,9 @@ let ManualCodegen = [{
11511151
// 13.2. Vector Single-Width Averaging Add and Subtract
11521152
defm th_vaadd : RVVSignedBinBuiltinSetRoundingMode;
11531153
defm th_vasub : RVVSignedBinBuiltinSetRoundingMode;
1154+
// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation Operations
1155+
defm th_vsmul : RVVSignedBinBuiltinSetRoundingMode;
11541156
}
11551157

1158+
11561159
include "riscv_vector_xtheadv_wrappers.td"

Diff for: clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td

+74
Original file line numberDiff line numberDiff line change
@@ -1843,3 +1843,77 @@ let HeaderCode =
18431843

18441844
}] in
18451845
def th_single_width_averaging_add_and_subtract_wrapper_macros: RVVHeader;
1846+
1847+
// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
1848+
1849+
let HeaderCode =
1850+
[{
1851+
1852+
#define __riscv_vsmul_vv_i8m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1(op1, op2, rm, vl)
1853+
#define __riscv_vsmul_vx_i8m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1(op1, op2, rm, vl)
1854+
#define __riscv_vsmul_vv_i8m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2(op1, op2, rm, vl)
1855+
#define __riscv_vsmul_vx_i8m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2(op1, op2, rm, vl)
1856+
#define __riscv_vsmul_vv_i8m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4(op1, op2, rm, vl)
1857+
#define __riscv_vsmul_vx_i8m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4(op1, op2, rm, vl)
1858+
#define __riscv_vsmul_vv_i8m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8(op1, op2, rm, vl)
1859+
#define __riscv_vsmul_vx_i8m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8(op1, op2, rm, vl)
1860+
#define __riscv_vsmul_vv_i16m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1(op1, op2, rm, vl)
1861+
#define __riscv_vsmul_vx_i16m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1(op1, op2, rm, vl)
1862+
#define __riscv_vsmul_vv_i16m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2(op1, op2, rm, vl)
1863+
#define __riscv_vsmul_vx_i16m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2(op1, op2, rm, vl)
1864+
#define __riscv_vsmul_vv_i16m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4(op1, op2, rm, vl)
1865+
#define __riscv_vsmul_vx_i16m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4(op1, op2, rm, vl)
1866+
#define __riscv_vsmul_vv_i16m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8(op1, op2, rm, vl)
1867+
#define __riscv_vsmul_vx_i16m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8(op1, op2, rm, vl)
1868+
#define __riscv_vsmul_vv_i32m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1(op1, op2, rm, vl)
1869+
#define __riscv_vsmul_vx_i32m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1(op1, op2, rm, vl)
1870+
#define __riscv_vsmul_vv_i32m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2(op1, op2, rm, vl)
1871+
#define __riscv_vsmul_vx_i32m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2(op1, op2, rm, vl)
1872+
#define __riscv_vsmul_vv_i32m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4(op1, op2, rm, vl)
1873+
#define __riscv_vsmul_vx_i32m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4(op1, op2, rm, vl)
1874+
#define __riscv_vsmul_vv_i32m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8(op1, op2, rm, vl)
1875+
#define __riscv_vsmul_vx_i32m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8(op1, op2, rm, vl)
1876+
#define __riscv_vsmul_vv_i64m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1(op1, op2, rm, vl)
1877+
#define __riscv_vsmul_vx_i64m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1(op1, op2, rm, vl)
1878+
#define __riscv_vsmul_vv_i64m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2(op1, op2, rm, vl)
1879+
#define __riscv_vsmul_vx_i64m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2(op1, op2, rm, vl)
1880+
#define __riscv_vsmul_vv_i64m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4(op1, op2, rm, vl)
1881+
#define __riscv_vsmul_vx_i64m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4(op1, op2, rm, vl)
1882+
#define __riscv_vsmul_vv_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8(op1, op2, rm, vl)
1883+
#define __riscv_vsmul_vx_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8(op1, op2, rm, vl)
1884+
1885+
#define __riscv_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl)
1886+
#define __riscv_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl)
1887+
#define __riscv_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl)
1888+
#define __riscv_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl)
1889+
#define __riscv_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl)
1890+
#define __riscv_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl)
1891+
#define __riscv_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl)
1892+
#define __riscv_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl)
1893+
#define __riscv_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl)
1894+
#define __riscv_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl)
1895+
#define __riscv_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl)
1896+
#define __riscv_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl)
1897+
#define __riscv_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl)
1898+
#define __riscv_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl)
1899+
#define __riscv_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl)
1900+
#define __riscv_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl)
1901+
#define __riscv_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl)
1902+
#define __riscv_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl)
1903+
#define __riscv_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl)
1904+
#define __riscv_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl)
1905+
#define __riscv_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl)
1906+
#define __riscv_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl)
1907+
#define __riscv_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl)
1908+
#define __riscv_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl)
1909+
#define __riscv_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl)
1910+
#define __riscv_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl)
1911+
#define __riscv_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl)
1912+
#define __riscv_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl)
1913+
#define __riscv_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl)
1914+
#define __riscv_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl)
1915+
#define __riscv_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl)
1916+
#define __riscv_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl)
1917+
1918+
}] in
1919+
def th_single_width_fractional_multiply_with_rounding_and_saturation_wrapper_macros: RVVHeader;

0 commit comments

Comments
 (0)