@@ -1843,3 +1843,77 @@ let HeaderCode =
1843
1843
1844
1844
}] in
1845
1845
def th_single_width_averaging_add_and_subtract_wrapper_macros: RVVHeader;
1846
+
1847
+ // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
1848
+
1849
+ let HeaderCode =
1850
+ [{
1851
+
1852
+ #define __riscv_vsmul_vv_i8m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1(op1, op2, rm, vl)
1853
+ #define __riscv_vsmul_vx_i8m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1(op1, op2, rm, vl)
1854
+ #define __riscv_vsmul_vv_i8m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2(op1, op2, rm, vl)
1855
+ #define __riscv_vsmul_vx_i8m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2(op1, op2, rm, vl)
1856
+ #define __riscv_vsmul_vv_i8m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4(op1, op2, rm, vl)
1857
+ #define __riscv_vsmul_vx_i8m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4(op1, op2, rm, vl)
1858
+ #define __riscv_vsmul_vv_i8m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8(op1, op2, rm, vl)
1859
+ #define __riscv_vsmul_vx_i8m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8(op1, op2, rm, vl)
1860
+ #define __riscv_vsmul_vv_i16m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1(op1, op2, rm, vl)
1861
+ #define __riscv_vsmul_vx_i16m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1(op1, op2, rm, vl)
1862
+ #define __riscv_vsmul_vv_i16m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2(op1, op2, rm, vl)
1863
+ #define __riscv_vsmul_vx_i16m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2(op1, op2, rm, vl)
1864
+ #define __riscv_vsmul_vv_i16m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4(op1, op2, rm, vl)
1865
+ #define __riscv_vsmul_vx_i16m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4(op1, op2, rm, vl)
1866
+ #define __riscv_vsmul_vv_i16m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8(op1, op2, rm, vl)
1867
+ #define __riscv_vsmul_vx_i16m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8(op1, op2, rm, vl)
1868
+ #define __riscv_vsmul_vv_i32m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1(op1, op2, rm, vl)
1869
+ #define __riscv_vsmul_vx_i32m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1(op1, op2, rm, vl)
1870
+ #define __riscv_vsmul_vv_i32m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2(op1, op2, rm, vl)
1871
+ #define __riscv_vsmul_vx_i32m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2(op1, op2, rm, vl)
1872
+ #define __riscv_vsmul_vv_i32m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4(op1, op2, rm, vl)
1873
+ #define __riscv_vsmul_vx_i32m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4(op1, op2, rm, vl)
1874
+ #define __riscv_vsmul_vv_i32m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8(op1, op2, rm, vl)
1875
+ #define __riscv_vsmul_vx_i32m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8(op1, op2, rm, vl)
1876
+ #define __riscv_vsmul_vv_i64m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1(op1, op2, rm, vl)
1877
+ #define __riscv_vsmul_vx_i64m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1(op1, op2, rm, vl)
1878
+ #define __riscv_vsmul_vv_i64m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2(op1, op2, rm, vl)
1879
+ #define __riscv_vsmul_vx_i64m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2(op1, op2, rm, vl)
1880
+ #define __riscv_vsmul_vv_i64m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4(op1, op2, rm, vl)
1881
+ #define __riscv_vsmul_vx_i64m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4(op1, op2, rm, vl)
1882
+ #define __riscv_vsmul_vv_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8(op1, op2, rm, vl)
1883
+ #define __riscv_vsmul_vx_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8(op1, op2, rm, vl)
1884
+
1885
+ #define __riscv_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl)
1886
+ #define __riscv_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl)
1887
+ #define __riscv_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl)
1888
+ #define __riscv_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl)
1889
+ #define __riscv_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl)
1890
+ #define __riscv_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl)
1891
+ #define __riscv_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl)
1892
+ #define __riscv_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl)
1893
+ #define __riscv_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl)
1894
+ #define __riscv_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl)
1895
+ #define __riscv_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl)
1896
+ #define __riscv_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl)
1897
+ #define __riscv_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl)
1898
+ #define __riscv_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl)
1899
+ #define __riscv_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl)
1900
+ #define __riscv_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl)
1901
+ #define __riscv_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl)
1902
+ #define __riscv_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl)
1903
+ #define __riscv_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl)
1904
+ #define __riscv_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl)
1905
+ #define __riscv_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl)
1906
+ #define __riscv_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl)
1907
+ #define __riscv_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl)
1908
+ #define __riscv_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl)
1909
+ #define __riscv_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl)
1910
+ #define __riscv_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl)
1911
+ #define __riscv_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl)
1912
+ #define __riscv_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl)
1913
+ #define __riscv_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl)
1914
+ #define __riscv_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl)
1915
+ #define __riscv_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl)
1916
+ #define __riscv_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl)
1917
+
1918
+ }] in
1919
+ def th_single_width_fractional_multiply_with_rounding_and_saturation_wrapper_macros: RVVHeader;
0 commit comments