diff --git a/BUILD.bazel b/BUILD.bazel index 51ed10a9eaa..d157d8121a1 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1378,6 +1378,14 @@ WASMSIMD_UKERNELS = [ "src/qs8-gavgpool/gen/7x-minmax-wasmsimd-c8-acc2.c", "src/qs8-gavgpool/gen/7x-minmax-wasmsimd-c16-acc2.c", "src/qs8-gavgpool/gen/7x-minmax-wasmsimd-c24-acc2.c", + "src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c", + "src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c", + "src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c", + "src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c", + "src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c", + "src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c", + "src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c", + "src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c", "src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-ld64.c", "src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-ld128.c", "src/qs8-gemm/gen/1x4c8-xw-minmax-wasmsimd.c", @@ -1387,6 +1395,14 @@ WASMSIMD_UKERNELS = [ "src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-ld64.c", "src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-ld128.c", "src/qs8-gemm/gen/3x4c8-xw-minmax-wasmsimd.c", + "src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c", + "src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c", + "src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c", + "src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c", + "src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c", + "src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c", + "src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c", + "src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c", "src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-ld64.c", "src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-ld128.c", "src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-ld64.c", diff --git a/scripts/generate-qs8-gemm.sh b/scripts/generate-qs8-gemm.sh index f6cf0bec85c..0514667054d 100755 --- a/scripts/generate-qs8-gemm.sh +++ b/scripts/generate-qs8-gemm.sh @@ -23,6 +23,18 @@ tools/xngen src/qs8-gemm/MRx4c8-wasmsimd.c.in -D MR=1 -D VARIANT=EXTENDED -o src tools/xngen src/qs8-gemm/MRx4c8-wasmsimd.c.in -D MR=2 -D VARIANT=EXTENDED -o src/qs8-gemm/gen/2x4c8-xw-minmax-wasmsimd.c tools/xngen src/qs8-gemm/MRx4c8-wasmsimd.c.in -D MR=3 -D VARIANT=EXTENDED -o src/qs8-gemm/gen/3x4c8-xw-minmax-wasmsimd.c +### C8 ExtMul+Widen micro-kernels +tools/xngen src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in -D MR=1 -o src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c +tools/xngen src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in -D MR=2 -o src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c +tools/xngen src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in -D MR=3 -o src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c +tools/xngen src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in -D MR=4 -o src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c + +### C8 ExtMul+ExtAddPair micro-kernels +tools/xngen src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in -D MR=1 -o src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c +tools/xngen src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in -D MR=2 -o src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c +tools/xngen src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in -D MR=3 -o src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c +tools/xngen src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in -D MR=4 -o src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c + ################################### ARM NEON ################################## tools/xngen src/qs8-gemm/neon-mlal-lane.c.in -D MR=1 -D NR=8 -o src/qs8-gemm/gen/1x8-minmax-neon-mlal-lane.c tools/xngen src/qs8-gemm/neon-mlal-lane.c.in -D MR=2 -D NR=8 -o src/qs8-gemm/gen/2x8-minmax-neon-mlal-lane.c diff --git a/scripts/generate-qs8-igemm.sh b/scripts/generate-qs8-igemm.sh index 859d0f7d37c..6f0bdca38f8 100755 --- a/scripts/generate-qs8-igemm.sh +++ b/scripts/generate-qs8-igemm.sh @@ -14,6 +14,18 @@ tools/xngen src/qs8-igemm/MRx4c8-wasmsimd.c.in -D MR=1 -D VARIANT=LD128 -o src/q tools/xngen src/qs8-igemm/MRx4c8-wasmsimd.c.in -D MR=2 -D VARIANT=LD128 -o src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-ld128.c tools/xngen src/qs8-igemm/MRx4c8-wasmsimd.c.in -D MR=3 -D VARIANT=LD128 -o src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-ld128.c +### C8 ExtMul+Widen micro-kernels +tools/xngen src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in -D MR=1 -o src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c +tools/xngen src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in -D MR=2 -o src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c +tools/xngen src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in -D MR=3 -o src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c +tools/xngen src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in -D MR=4 -o src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c + +### C8 ExtMul+ExtAddPair micro-kernels +tools/xngen src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in -D MR=1 -o src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c +tools/xngen src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in -D MR=2 -o src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c +tools/xngen src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in -D MR=3 -o src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c +tools/xngen src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in -D MR=4 -o src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c + ################################### ARM NEON ################################## tools/xngen src/qs8-igemm/neon-mlal-lane.c.in -D MR=1 -D NR=8 -o src/qs8-igemm/gen/1x8-minmax-neon-mlal-lane.c tools/xngen src/qs8-igemm/neon-mlal-lane.c.in -D MR=2 -D NR=8 -o src/qs8-igemm/gen/2x8-minmax-neon-mlal-lane.c diff --git a/src/init.c b/src/init.c index aea948c80a2..d91c31c394a 100644 --- a/src/init.c +++ b/src/init.c @@ -2097,11 +2097,11 @@ static void init(void) { #ifndef XNN_NO_QS8_OPERATORS init_flags |= XNN_INIT_FLAG_QS8; - xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_ld64); - xnn_params.qs8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_ld64); - xnn_params.qs8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_ld64); - xnn_params.qs8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_ld64); - xnn_params.qs8.gemm.mr = 3; + xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + xnn_params.qs8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + xnn_params.qs8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + xnn_params.qs8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + xnn_params.qs8.gemm.mr = 4; xnn_params.qs8.gemm.nr = 4; xnn_params.qs8.gemm.log2_kr = 3; diff --git a/src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in b/src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in new file mode 100644 index 00000000000..851117e2f54 --- /dev/null +++ b/src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in @@ -0,0 +1,160 @@ +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +$assert MR <= 4 +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_${MR}x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= ${MR}); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + $for M in range(1, MR): + const int8_t* a${M} = (const int8_t*) ((uintptr_t) a${M-1} + a_stride); + int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); + $if M % 2 == 0: + if XNN_UNPREDICTABLE(mr <= ${M}) { + a${M} = a${M-1}; + c${M} = c${M-1}; + } + $elif M + 1 == MR: + if XNN_UNPREDICTABLE(mr != ${M+1}) { + a${M} = a${M-1}; + c${M} = c${M-1}; + } + $else: + if XNN_UNPREDICTABLE(mr < ${M+1}) { + a${M} = a${M-1}; + c${M} = c${M-1}; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + $for N in range(4): + v128_t vacc0x${N} = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[${N}]); + $for M in range(1, MR): + $for N in range(4): + v128_t vacc${M}x${N} = vacc0x${N}; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + $for M in range(MR): + const v128_t vxa${M} = __builtin_wasm_load64_zero((long long*) a${M}); + a${M} += 8; + + $for N in range(4): + $if N == 0: + const v128_t vxb${N} = __builtin_wasm_load64_zero((long long*) w); + $else: + const v128_t vxb${N} = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + ${N * 8} * sizeof(int8_t))); + + $for M in range(MR): + const v128_t vprod${M}x${N} = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa${M}, vxb${N}); + vacc${M}x${N} = wasm_i32x4_add(vacc${M}x${N}, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod${M}x${N})); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + $for M in range(MR): + const v128_t vacc${M}x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x0, vacc${M}x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x0, vacc${M}x2, 2, 6, 3, 7)); + const v128_t vacc${M}x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x1, vacc${M}x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x1, vacc${M}x3, 2, 6, 3, 7)); + + $for M in range(MR): + v128_t vacc${M}x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x02, vacc${M}x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x02, vacc${M}x13, 2, 6, 3, 7)); + + $for M in range(MR): + const v128_t vsign${M}x0123 = wasm_i32x4_lt(vacc${M}x0123, vzero); + + $for M in range(MR): + const v128_t vacc${M}x01 = wasm_v32x4_shuffle(vacc${M}x0123, vsign${M}x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + $for M in range(MR): + const v128_t vprod${M}x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc${M}x01, vmultiplier), vrounding); + const v128_t vacc${M}x23 = wasm_v32x4_shuffle(vacc${M}x0123, vsign${M}x0123, 2, 6, 3, 7); + + $for M in range(MR): + const v128_t vprod${M}x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc${M}x23, vmultiplier), vrounding); + + $for M in range(MR): + const v128_t vq31prod${M}x0123 = wasm_v32x4_shuffle(vprod${M}x01, vprod${M}x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + $for M in range(MR): + const v128_t vrem${M}x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod${M}x0123, vremainder_mask), wasm_i32x4_lt(vq31prod${M}x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + $for M in range(MR): + vacc${M}x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod${M}x0123, vshift), wasm_i32x4_gt(vrem${M}x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + $for M in range(0, MR, 2): + v128_t vacc${M}${min(M+1, MR-1)}x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point); + + $if MR > 2: + v128_t vout = wasm_i8x16_narrow_i16x8(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); + $else: + v128_t vout = wasm_i8x16_narrow_i16x8(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + $for M in range(MR): + *((float*) c${M}) = (float) wasm_f32x4_extract_lane(vout, ${M}); + + $for M in range(MR): + a${M} = (const int8_t*) ((uintptr_t) a${M} - k); + + $for M in range(MR): + c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + $for M in range(MR): + *((uint16_t*) c${M}) = (uint16_t) wasm_i16x8_extract_lane(vout, ${M * 2}); + c${M} += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + $for M in range(MR): + *c${M} = (int8_t) wasm_i8x16_extract_lane(vout, ${M * 4}); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in b/src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in new file mode 100644 index 00000000000..1112c2997f5 --- /dev/null +++ b/src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in @@ -0,0 +1,161 @@ +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +$assert MR <= 4 +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_${MR}x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= ${MR}); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + $for M in range(1, MR): + const int8_t* a${M} = (const int8_t*) ((uintptr_t) a${M-1} + a_stride); + int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); + $if M % 2 == 0: + if XNN_UNPREDICTABLE(mr <= ${M}) { + a${M} = a${M-1}; + c${M} = c${M-1}; + } + $elif M + 1 == MR: + if XNN_UNPREDICTABLE(mr != ${M+1}) { + a${M} = a${M-1}; + c${M} = c${M-1}; + } + $else: + if XNN_UNPREDICTABLE(mr < ${M+1}) { + a${M} = a${M-1}; + c${M} = c${M-1}; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + $for N in range(4): + v128_t vacc0x${N} = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[${N}]); + $for M in range(1, MR): + $for N in range(4): + v128_t vacc${M}x${N} = vacc0x${N}; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + $for M in range(MR): + const v128_t vxa${M} = __builtin_wasm_load64_zero((long long*) a${M}); + a${M} += 8; + + $for N in range(4): + $if N == 0: + const v128_t vxb${N} = __builtin_wasm_load64_zero((long long*) w); + $else: + const v128_t vxb${N} = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + ${N * 8} * sizeof(int8_t))); + + $for M in range(MR): + const v128_t vprod${M}x${N} = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa${M}, vxb${N}); + vacc${M}x${N} = wasm_i32x4_add(vacc${M}x${N}, wasm_i32x4_widen_low_i16x8(vprod${M}x${N})); + vacc${M}x${N} = wasm_i32x4_add(vacc${M}x${N}, wasm_i32x4_widen_high_i16x8(vprod${M}x${N})); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + $for M in range(MR): + const v128_t vacc${M}x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x0, vacc${M}x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x0, vacc${M}x2, 2, 6, 3, 7)); + const v128_t vacc${M}x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x1, vacc${M}x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x1, vacc${M}x3, 2, 6, 3, 7)); + + $for M in range(MR): + v128_t vacc${M}x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x02, vacc${M}x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x02, vacc${M}x13, 2, 6, 3, 7)); + + $for M in range(MR): + const v128_t vsign${M}x0123 = wasm_i32x4_lt(vacc${M}x0123, vzero); + + $for M in range(MR): + const v128_t vacc${M}x01 = wasm_v32x4_shuffle(vacc${M}x0123, vsign${M}x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + $for M in range(MR): + const v128_t vprod${M}x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc${M}x01, vmultiplier), vrounding); + const v128_t vacc${M}x23 = wasm_v32x4_shuffle(vacc${M}x0123, vsign${M}x0123, 2, 6, 3, 7); + + $for M in range(MR): + const v128_t vprod${M}x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc${M}x23, vmultiplier), vrounding); + + $for M in range(MR): + const v128_t vq31prod${M}x0123 = wasm_v32x4_shuffle(vprod${M}x01, vprod${M}x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + $for M in range(MR): + const v128_t vrem${M}x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod${M}x0123, vremainder_mask), wasm_i32x4_lt(vq31prod${M}x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + $for M in range(MR): + vacc${M}x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod${M}x0123, vshift), wasm_i32x4_gt(vrem${M}x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + $for M in range(0, MR, 2): + v128_t vacc${M}${min(M+1, MR-1)}x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point); + + $if MR > 2: + v128_t vout = wasm_i8x16_narrow_i16x8(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); + $else: + v128_t vout = wasm_i8x16_narrow_i16x8(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + $for M in range(MR): + *((float*) c${M}) = (float) wasm_f32x4_extract_lane(vout, ${M}); + + $for M in range(MR): + a${M} = (const int8_t*) ((uintptr_t) a${M} - k); + + $for M in range(MR): + c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + $for M in range(MR): + *((uint16_t*) c${M}) = (uint16_t) wasm_i16x8_extract_lane(vout, ${M * 2}); + c${M} += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + $for M in range(MR): + *c${M} = (int8_t) wasm_i8x16_extract_lane(vout, ${M * 4}); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c b/src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c new file mode 100644 index 00000000000..f11df375220 --- /dev/null +++ b/src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c @@ -0,0 +1,132 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 1); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc00x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + a0 = (const int8_t*) ((uintptr_t) a0 - k); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c b/src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c new file mode 100644 index 00000000000..2bda21c38ba --- /dev/null +++ b/src/qs8-gemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c @@ -0,0 +1,136 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 1); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0)); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1)); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2)); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3)); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc00x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + a0 = (const int8_t*) ((uintptr_t) a0 - k); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c b/src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c new file mode 100644 index 00000000000..6e91223d02d --- /dev/null +++ b/src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c @@ -0,0 +1,169 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 2); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr != 2) { + a1 = a0; + c1 = c0; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc01x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + + a0 = (const int8_t*) ((uintptr_t) a0 - k); + a1 = (const int8_t*) ((uintptr_t) a1 - k); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c b/src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c new file mode 100644 index 00000000000..132233bbae7 --- /dev/null +++ b/src/qs8-gemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c @@ -0,0 +1,177 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 2); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr != 2) { + a1 = a0; + c1 = c0; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0)); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_low_i16x8(vprod1x0)); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_high_i16x8(vprod1x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1)); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_low_i16x8(vprod1x1)); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_high_i16x8(vprod1x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2)); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_low_i16x8(vprod1x2)); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_high_i16x8(vprod1x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3)); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_low_i16x8(vprod1x3)); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_high_i16x8(vprod1x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc01x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + + a0 = (const int8_t*) ((uintptr_t) a0 - k); + a1 = (const int8_t*) ((uintptr_t) a1 - k); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c b/src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c new file mode 100644 index 00000000000..2bd998e358d --- /dev/null +++ b/src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c @@ -0,0 +1,207 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 3); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + a1 = a0; + c1 = c0; + } + const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); + int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + a2 = a1; + c2 = c1; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + v128_t vacc2x0 = vacc0x0; + v128_t vacc2x1 = vacc0x1; + v128_t vacc2x2 = vacc0x2; + v128_t vacc2x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + const v128_t vxa2 = __builtin_wasm_load64_zero((long long*) a2); + a2 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x0)); + const v128_t vprod2x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb0); + vacc2x0 = wasm_i32x4_add(vacc2x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x1)); + const v128_t vprod2x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb1); + vacc2x1 = wasm_i32x4_add(vacc2x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x2)); + const v128_t vprod2x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb2); + vacc2x2 = wasm_i32x4_add(vacc2x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x3)); + const v128_t vprod2x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb3); + vacc2x3 = wasm_i32x4_add(vacc2x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + const v128_t vacc2x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x0, vacc2x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x0, vacc2x2, 2, 6, 3, 7)); + const v128_t vacc2x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x1, vacc2x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x1, vacc2x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + v128_t vacc2x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x02, vacc2x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x02, vacc2x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + const v128_t vsign2x0123 = wasm_i32x4_lt(vacc2x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + const v128_t vacc2x01 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + const v128_t vprod2x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x01, vmultiplier), vrounding); + const v128_t vacc2x23 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + const v128_t vprod2x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + const v128_t vq31prod2x0123 = wasm_v32x4_shuffle(vprod2x01, vprod2x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + const v128_t vrem2x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod2x0123, vremainder_mask), wasm_i32x4_lt(vq31prod2x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + vacc2x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod2x0123, vshift), wasm_i32x4_gt(vrem2x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + v128_t vacc22x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2); + + a0 = (const int8_t*) ((uintptr_t) a0 - k); + a1 = (const int8_t*) ((uintptr_t) a1 - k); + a2 = (const int8_t*) ((uintptr_t) a2 - k); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c2) = (uint16_t) wasm_i16x8_extract_lane(vout, 4); + c2 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c2 = (int8_t) wasm_i8x16_extract_lane(vout, 8); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c b/src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c new file mode 100644 index 00000000000..7bb56dfe06e --- /dev/null +++ b/src/qs8-gemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c @@ -0,0 +1,219 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 3); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + a1 = a0; + c1 = c0; + } + const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); + int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + a2 = a1; + c2 = c1; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + v128_t vacc2x0 = vacc0x0; + v128_t vacc2x1 = vacc0x1; + v128_t vacc2x2 = vacc0x2; + v128_t vacc2x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + const v128_t vxa2 = __builtin_wasm_load64_zero((long long*) a2); + a2 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0)); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_low_i16x8(vprod1x0)); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_high_i16x8(vprod1x0)); + const v128_t vprod2x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb0); + vacc2x0 = wasm_i32x4_add(vacc2x0, wasm_i32x4_widen_low_i16x8(vprod2x0)); + vacc2x0 = wasm_i32x4_add(vacc2x0, wasm_i32x4_widen_high_i16x8(vprod2x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1)); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_low_i16x8(vprod1x1)); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_high_i16x8(vprod1x1)); + const v128_t vprod2x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb1); + vacc2x1 = wasm_i32x4_add(vacc2x1, wasm_i32x4_widen_low_i16x8(vprod2x1)); + vacc2x1 = wasm_i32x4_add(vacc2x1, wasm_i32x4_widen_high_i16x8(vprod2x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2)); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_low_i16x8(vprod1x2)); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_high_i16x8(vprod1x2)); + const v128_t vprod2x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb2); + vacc2x2 = wasm_i32x4_add(vacc2x2, wasm_i32x4_widen_low_i16x8(vprod2x2)); + vacc2x2 = wasm_i32x4_add(vacc2x2, wasm_i32x4_widen_high_i16x8(vprod2x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3)); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_low_i16x8(vprod1x3)); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_high_i16x8(vprod1x3)); + const v128_t vprod2x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb3); + vacc2x3 = wasm_i32x4_add(vacc2x3, wasm_i32x4_widen_low_i16x8(vprod2x3)); + vacc2x3 = wasm_i32x4_add(vacc2x3, wasm_i32x4_widen_high_i16x8(vprod2x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + const v128_t vacc2x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x0, vacc2x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x0, vacc2x2, 2, 6, 3, 7)); + const v128_t vacc2x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x1, vacc2x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x1, vacc2x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + v128_t vacc2x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x02, vacc2x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x02, vacc2x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + const v128_t vsign2x0123 = wasm_i32x4_lt(vacc2x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + const v128_t vacc2x01 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + const v128_t vprod2x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x01, vmultiplier), vrounding); + const v128_t vacc2x23 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + const v128_t vprod2x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + const v128_t vq31prod2x0123 = wasm_v32x4_shuffle(vprod2x01, vprod2x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + const v128_t vrem2x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod2x0123, vremainder_mask), wasm_i32x4_lt(vq31prod2x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + vacc2x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod2x0123, vshift), wasm_i32x4_gt(vrem2x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + v128_t vacc22x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2); + + a0 = (const int8_t*) ((uintptr_t) a0 - k); + a1 = (const int8_t*) ((uintptr_t) a1 - k); + a2 = (const int8_t*) ((uintptr_t) a2 - k); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c2) = (uint16_t) wasm_i16x8_extract_lane(vout, 4); + c2 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c2 = (int8_t) wasm_i8x16_extract_lane(vout, 8); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c b/src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c new file mode 100644 index 00000000000..ef05ed60872 --- /dev/null +++ b/src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c @@ -0,0 +1,244 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-gemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 4); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + a1 = a0; + c1 = c0; + } + const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); + int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + a2 = a1; + c2 = c1; + } + const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); + int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); + if XNN_UNPREDICTABLE(mr != 4) { + a3 = a2; + c3 = c2; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + v128_t vacc2x0 = vacc0x0; + v128_t vacc2x1 = vacc0x1; + v128_t vacc2x2 = vacc0x2; + v128_t vacc2x3 = vacc0x3; + v128_t vacc3x0 = vacc0x0; + v128_t vacc3x1 = vacc0x1; + v128_t vacc3x2 = vacc0x2; + v128_t vacc3x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + const v128_t vxa2 = __builtin_wasm_load64_zero((long long*) a2); + a2 += 8; + const v128_t vxa3 = __builtin_wasm_load64_zero((long long*) a3); + a3 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x0)); + const v128_t vprod2x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb0); + vacc2x0 = wasm_i32x4_add(vacc2x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x0)); + const v128_t vprod3x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb0); + vacc3x0 = wasm_i32x4_add(vacc3x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod3x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x1)); + const v128_t vprod2x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb1); + vacc2x1 = wasm_i32x4_add(vacc2x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x1)); + const v128_t vprod3x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb1); + vacc3x1 = wasm_i32x4_add(vacc3x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod3x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x2)); + const v128_t vprod2x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb2); + vacc2x2 = wasm_i32x4_add(vacc2x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x2)); + const v128_t vprod3x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb2); + vacc3x2 = wasm_i32x4_add(vacc3x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod3x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x3)); + const v128_t vprod2x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb3); + vacc2x3 = wasm_i32x4_add(vacc2x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x3)); + const v128_t vprod3x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb3); + vacc3x3 = wasm_i32x4_add(vacc3x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod3x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + const v128_t vacc2x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x0, vacc2x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x0, vacc2x2, 2, 6, 3, 7)); + const v128_t vacc2x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x1, vacc2x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x1, vacc2x3, 2, 6, 3, 7)); + const v128_t vacc3x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x0, vacc3x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x0, vacc3x2, 2, 6, 3, 7)); + const v128_t vacc3x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x1, vacc3x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x1, vacc3x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + v128_t vacc2x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x02, vacc2x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x02, vacc2x13, 2, 6, 3, 7)); + v128_t vacc3x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x02, vacc3x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x02, vacc3x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + const v128_t vsign2x0123 = wasm_i32x4_lt(vacc2x0123, vzero); + const v128_t vsign3x0123 = wasm_i32x4_lt(vacc3x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + const v128_t vacc2x01 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 0, 4, 1, 5); + const v128_t vacc3x01 = wasm_v32x4_shuffle(vacc3x0123, vsign3x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + const v128_t vprod2x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x01, vmultiplier), vrounding); + const v128_t vacc2x23 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 2, 6, 3, 7); + const v128_t vprod3x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc3x01, vmultiplier), vrounding); + const v128_t vacc3x23 = wasm_v32x4_shuffle(vacc3x0123, vsign3x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + const v128_t vprod2x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x23, vmultiplier), vrounding); + const v128_t vprod3x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc3x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + const v128_t vq31prod2x0123 = wasm_v32x4_shuffle(vprod2x01, vprod2x23, 1, 3, 5, 7); + const v128_t vq31prod3x0123 = wasm_v32x4_shuffle(vprod3x01, vprod3x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + const v128_t vrem2x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod2x0123, vremainder_mask), wasm_i32x4_lt(vq31prod2x0123, vzero)); + const v128_t vrem3x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod3x0123, vremainder_mask), wasm_i32x4_lt(vq31prod3x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + vacc2x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod2x0123, vshift), wasm_i32x4_gt(vrem2x0123, vthreshold)); + vacc3x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod3x0123, vshift), wasm_i32x4_gt(vrem3x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + v128_t vacc23x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2); + *((float*) c3) = (float) wasm_f32x4_extract_lane(vout, 3); + + a0 = (const int8_t*) ((uintptr_t) a0 - k); + a1 = (const int8_t*) ((uintptr_t) a1 - k); + a2 = (const int8_t*) ((uintptr_t) a2 - k); + a3 = (const int8_t*) ((uintptr_t) a3 - k); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); + c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c2) = (uint16_t) wasm_i16x8_extract_lane(vout, 4); + c2 += 2; + *((uint16_t*) c3) = (uint16_t) wasm_i16x8_extract_lane(vout, 6); + c3 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c2 = (int8_t) wasm_i8x16_extract_lane(vout, 8); + *c3 = (int8_t) wasm_i8x16_extract_lane(vout, 12); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c b/src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c new file mode 100644 index 00000000000..04ec0f3d7bb --- /dev/null +++ b/src/qs8-gemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c @@ -0,0 +1,260 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-gemm/MRx4c8-wasmsimd-extmul-widen.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 4); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + const int8_t* a0 = a; + int8_t* c0 = c; + const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + a1 = a0; + c1 = c0; + } + const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); + int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + a2 = a1; + c2 = c1; + } + const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); + int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); + if XNN_UNPREDICTABLE(mr != 4) { + a3 = a2; + c3 = c2; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + v128_t vacc2x0 = vacc0x0; + v128_t vacc2x1 = vacc0x1; + v128_t vacc2x2 = vacc0x2; + v128_t vacc2x3 = vacc0x3; + v128_t vacc3x0 = vacc0x0; + v128_t vacc3x1 = vacc0x1; + v128_t vacc3x2 = vacc0x2; + v128_t vacc3x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + const v128_t vxa2 = __builtin_wasm_load64_zero((long long*) a2); + a2 += 8; + const v128_t vxa3 = __builtin_wasm_load64_zero((long long*) a3); + a3 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0)); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_low_i16x8(vprod1x0)); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_high_i16x8(vprod1x0)); + const v128_t vprod2x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb0); + vacc2x0 = wasm_i32x4_add(vacc2x0, wasm_i32x4_widen_low_i16x8(vprod2x0)); + vacc2x0 = wasm_i32x4_add(vacc2x0, wasm_i32x4_widen_high_i16x8(vprod2x0)); + const v128_t vprod3x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb0); + vacc3x0 = wasm_i32x4_add(vacc3x0, wasm_i32x4_widen_low_i16x8(vprod3x0)); + vacc3x0 = wasm_i32x4_add(vacc3x0, wasm_i32x4_widen_high_i16x8(vprod3x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1)); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_low_i16x8(vprod1x1)); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_high_i16x8(vprod1x1)); + const v128_t vprod2x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb1); + vacc2x1 = wasm_i32x4_add(vacc2x1, wasm_i32x4_widen_low_i16x8(vprod2x1)); + vacc2x1 = wasm_i32x4_add(vacc2x1, wasm_i32x4_widen_high_i16x8(vprod2x1)); + const v128_t vprod3x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb1); + vacc3x1 = wasm_i32x4_add(vacc3x1, wasm_i32x4_widen_low_i16x8(vprod3x1)); + vacc3x1 = wasm_i32x4_add(vacc3x1, wasm_i32x4_widen_high_i16x8(vprod3x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2)); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_low_i16x8(vprod1x2)); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_high_i16x8(vprod1x2)); + const v128_t vprod2x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb2); + vacc2x2 = wasm_i32x4_add(vacc2x2, wasm_i32x4_widen_low_i16x8(vprod2x2)); + vacc2x2 = wasm_i32x4_add(vacc2x2, wasm_i32x4_widen_high_i16x8(vprod2x2)); + const v128_t vprod3x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb2); + vacc3x2 = wasm_i32x4_add(vacc3x2, wasm_i32x4_widen_low_i16x8(vprod3x2)); + vacc3x2 = wasm_i32x4_add(vacc3x2, wasm_i32x4_widen_high_i16x8(vprod3x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3)); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_low_i16x8(vprod1x3)); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_high_i16x8(vprod1x3)); + const v128_t vprod2x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb3); + vacc2x3 = wasm_i32x4_add(vacc2x3, wasm_i32x4_widen_low_i16x8(vprod2x3)); + vacc2x3 = wasm_i32x4_add(vacc2x3, wasm_i32x4_widen_high_i16x8(vprod2x3)); + const v128_t vprod3x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb3); + vacc3x3 = wasm_i32x4_add(vacc3x3, wasm_i32x4_widen_low_i16x8(vprod3x3)); + vacc3x3 = wasm_i32x4_add(vacc3x3, wasm_i32x4_widen_high_i16x8(vprod3x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + const v128_t vacc2x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x0, vacc2x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x0, vacc2x2, 2, 6, 3, 7)); + const v128_t vacc2x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x1, vacc2x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x1, vacc2x3, 2, 6, 3, 7)); + const v128_t vacc3x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x0, vacc3x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x0, vacc3x2, 2, 6, 3, 7)); + const v128_t vacc3x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x1, vacc3x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x1, vacc3x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + v128_t vacc2x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x02, vacc2x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x02, vacc2x13, 2, 6, 3, 7)); + v128_t vacc3x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x02, vacc3x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x02, vacc3x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + const v128_t vsign2x0123 = wasm_i32x4_lt(vacc2x0123, vzero); + const v128_t vsign3x0123 = wasm_i32x4_lt(vacc3x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + const v128_t vacc2x01 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 0, 4, 1, 5); + const v128_t vacc3x01 = wasm_v32x4_shuffle(vacc3x0123, vsign3x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + const v128_t vprod2x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x01, vmultiplier), vrounding); + const v128_t vacc2x23 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 2, 6, 3, 7); + const v128_t vprod3x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc3x01, vmultiplier), vrounding); + const v128_t vacc3x23 = wasm_v32x4_shuffle(vacc3x0123, vsign3x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + const v128_t vprod2x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x23, vmultiplier), vrounding); + const v128_t vprod3x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc3x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + const v128_t vq31prod2x0123 = wasm_v32x4_shuffle(vprod2x01, vprod2x23, 1, 3, 5, 7); + const v128_t vq31prod3x0123 = wasm_v32x4_shuffle(vprod3x01, vprod3x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + const v128_t vrem2x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod2x0123, vremainder_mask), wasm_i32x4_lt(vq31prod2x0123, vzero)); + const v128_t vrem3x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod3x0123, vremainder_mask), wasm_i32x4_lt(vq31prod3x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + vacc2x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod2x0123, vshift), wasm_i32x4_gt(vrem2x0123, vthreshold)); + vacc3x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod3x0123, vshift), wasm_i32x4_gt(vrem3x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + v128_t vacc23x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2); + *((float*) c3) = (float) wasm_f32x4_extract_lane(vout, 3); + + a0 = (const int8_t*) ((uintptr_t) a0 - k); + a1 = (const int8_t*) ((uintptr_t) a1 - k); + a2 = (const int8_t*) ((uintptr_t) a2 - k); + a3 = (const int8_t*) ((uintptr_t) a3 - k); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); + c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c2) = (uint16_t) wasm_i16x8_extract_lane(vout, 4); + c2 += 2; + *((uint16_t*) c3) = (uint16_t) wasm_i16x8_extract_lane(vout, 6); + c3 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c2 = (int8_t) wasm_i8x16_extract_lane(vout, 8); + *c3 = (int8_t) wasm_i8x16_extract_lane(vout, 12); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in b/src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in new file mode 100644 index 00000000000..cf4fe7370e7 --- /dev/null +++ b/src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in @@ -0,0 +1,169 @@ +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +$assert MR <= 4 +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_${MR}x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= ${MR}); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (${MR} * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + $for M in range(1, MR): + int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); + $if M % 2 == 0: + if XNN_UNPREDICTABLE(mr <= ${M}) { + c${M} = c${M-1}; + } + $elif M + 1 == MR: + if XNN_UNPREDICTABLE(mr != ${M+1}) { + c${M} = c${M-1}; + } + $else: + if XNN_UNPREDICTABLE(mr < ${M+1}) { + c${M} = c${M-1}; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + $for N in range(4): + v128_t vacc0x${N} = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[${N}]); + $for M in range(1, MR): + $for N in range(4): + v128_t vacc${M}x${N} = vacc0x${N}; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + $for M in range(MR): + const int8_t* restrict a${M} = a[${M}]; + if XNN_UNPREDICTABLE(a${M} != zero) { + a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset); + } + a += ${MR}; + + size_t k = 0; + while (k < kc) { + $for M in range(MR): + const v128_t vxa${M} = __builtin_wasm_load64_zero((long long*) a${M}); + a${M} += 8; + + $for N in range(4): + $if N == 0: + const v128_t vxb${N} = __builtin_wasm_load64_zero((long long*) w); + $else: + const v128_t vxb${N} = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + ${N * 8} * sizeof(int8_t))); + + $for M in range(MR): + const v128_t vprod${M}x${N} = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa${M}, vxb${N}); + vacc${M}x${N} = wasm_i32x4_add(vacc${M}x${N}, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod${M}x${N})); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= ${MR} * sizeof(void*); + } while (p != 0); + + $for M in range(MR): + const v128_t vacc${M}x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x0, vacc${M}x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x0, vacc${M}x2, 2, 6, 3, 7)); + const v128_t vacc${M}x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x1, vacc${M}x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x1, vacc${M}x3, 2, 6, 3, 7)); + + $for M in range(MR): + v128_t vacc${M}x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x02, vacc${M}x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x02, vacc${M}x13, 2, 6, 3, 7)); + + $for M in range(MR): + const v128_t vsign${M}x0123 = wasm_i32x4_lt(vacc${M}x0123, vzero); + + $for M in range(MR): + const v128_t vacc${M}x01 = wasm_v32x4_shuffle(vacc${M}x0123, vsign${M}x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + $for M in range(MR): + const v128_t vprod${M}x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc${M}x01, vmultiplier), vrounding); + const v128_t vacc${M}x23 = wasm_v32x4_shuffle(vacc${M}x0123, vsign${M}x0123, 2, 6, 3, 7); + + $for M in range(MR): + const v128_t vprod${M}x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc${M}x23, vmultiplier), vrounding); + + $for M in range(MR): + const v128_t vq31prod${M}x0123 = wasm_v32x4_shuffle(vprod${M}x01, vprod${M}x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + $for M in range(MR): + const v128_t vrem${M}x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod${M}x0123, vremainder_mask), wasm_i32x4_lt(vq31prod${M}x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + $for M in range(MR): + vacc${M}x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod${M}x0123, vshift), wasm_i32x4_gt(vrem${M}x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + $for M in range(0, MR, 2): + v128_t vacc${M}${min(M+1, MR-1)}x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point); + + $if MR > 2: + v128_t vout = wasm_i8x16_narrow_i16x8(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); + $else: + v128_t vout = wasm_i8x16_narrow_i16x8(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + $for M in reversed(range(MR)): + *((float*) c${M}) = (float) wasm_f32x4_extract_lane(vout, ${M}); + + $for M in reversed(range(MR)): + c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + $for M in reversed(range(MR)): + *((uint16_t*) c${M}) = (uint16_t) wasm_i16x8_extract_lane(vout, ${M * 2}); + c${M} += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + $for M in reversed(range(MR)): + *c${M} = (int8_t) wasm_i8x16_extract_lane(vout, ${M * 4}); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in b/src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in new file mode 100644 index 00000000000..cd9bd7c5c92 --- /dev/null +++ b/src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in @@ -0,0 +1,170 @@ +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +$assert MR <= 4 +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_${MR}x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= ${MR}); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (${MR} * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + $for M in range(1, MR): + int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); + $if M % 2 == 0: + if XNN_UNPREDICTABLE(mr <= ${M}) { + c${M} = c${M-1}; + } + $elif M + 1 == MR: + if XNN_UNPREDICTABLE(mr != ${M+1}) { + c${M} = c${M-1}; + } + $else: + if XNN_UNPREDICTABLE(mr < ${M+1}) { + c${M} = c${M-1}; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + $for N in range(4): + v128_t vacc0x${N} = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[${N}]); + $for M in range(1, MR): + $for N in range(4): + v128_t vacc${M}x${N} = vacc0x${N}; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + $for M in range(MR): + const int8_t* restrict a${M} = a[${M}]; + if XNN_UNPREDICTABLE(a${M} != zero) { + a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset); + } + a += ${MR}; + + size_t k = 0; + while (k < kc) { + $for M in range(MR): + const v128_t vxa${M} = __builtin_wasm_load64_zero((long long*) a${M}); + a${M} += 8; + + $for N in range(4): + $if N == 0: + const v128_t vxb${N} = __builtin_wasm_load64_zero((long long*) w); + $else: + const v128_t vxb${N} = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + ${N * 8} * sizeof(int8_t))); + + $for M in range(MR): + const v128_t vprod${M}x${N} = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa${M}, vxb${N}); + vacc${M}x${N} = wasm_i32x4_add(vacc${M}x${N}, wasm_i32x4_widen_low_i16x8(vprod${M}x${N})); + vacc${M}x${N} = wasm_i32x4_add(vacc${M}x${N}, wasm_i32x4_widen_high_i16x8(vprod${M}x${N})); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= ${MR} * sizeof(void*); + } while (p != 0); + + $for M in range(MR): + const v128_t vacc${M}x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x0, vacc${M}x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x0, vacc${M}x2, 2, 6, 3, 7)); + const v128_t vacc${M}x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x1, vacc${M}x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x1, vacc${M}x3, 2, 6, 3, 7)); + + $for M in range(MR): + v128_t vacc${M}x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc${M}x02, vacc${M}x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc${M}x02, vacc${M}x13, 2, 6, 3, 7)); + + $for M in range(MR): + const v128_t vsign${M}x0123 = wasm_i32x4_lt(vacc${M}x0123, vzero); + + $for M in range(MR): + const v128_t vacc${M}x01 = wasm_v32x4_shuffle(vacc${M}x0123, vsign${M}x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + $for M in range(MR): + const v128_t vprod${M}x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc${M}x01, vmultiplier), vrounding); + const v128_t vacc${M}x23 = wasm_v32x4_shuffle(vacc${M}x0123, vsign${M}x0123, 2, 6, 3, 7); + + $for M in range(MR): + const v128_t vprod${M}x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc${M}x23, vmultiplier), vrounding); + + $for M in range(MR): + const v128_t vq31prod${M}x0123 = wasm_v32x4_shuffle(vprod${M}x01, vprod${M}x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + $for M in range(MR): + const v128_t vrem${M}x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod${M}x0123, vremainder_mask), wasm_i32x4_lt(vq31prod${M}x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + $for M in range(MR): + vacc${M}x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod${M}x0123, vshift), wasm_i32x4_gt(vrem${M}x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + $for M in range(0, MR, 2): + v128_t vacc${M}${min(M+1, MR-1)}x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point); + + $if MR > 2: + v128_t vout = wasm_i8x16_narrow_i16x8(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); + $else: + v128_t vout = wasm_i8x16_narrow_i16x8(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + $for M in reversed(range(MR)): + *((float*) c${M}) = (float) wasm_f32x4_extract_lane(vout, ${M}); + + $for M in reversed(range(MR)): + c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + $for M in reversed(range(MR)): + *((uint16_t*) c${M}) = (uint16_t) wasm_i16x8_extract_lane(vout, ${M * 2}); + c${M} += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + $for M in reversed(range(MR)): + *c${M} = (int8_t) wasm_i8x16_extract_lane(vout, ${M * 4}); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c b/src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c new file mode 100644 index 00000000000..f01cd13ab43 --- /dev/null +++ b/src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-extaddpair.c @@ -0,0 +1,145 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 1); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (1 * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + const int8_t* restrict a0 = a[0]; + if XNN_UNPREDICTABLE(a0 != zero) { + a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); + } + a += 1; + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= 1 * sizeof(void*); + } while (p != 0); + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc00x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c b/src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c new file mode 100644 index 00000000000..76904e22684 --- /dev/null +++ b/src/qs8-igemm/gen/1x4c8-minmax-wasmsimd-extmul-widen.c @@ -0,0 +1,149 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 1); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (1 * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + const int8_t* restrict a0 = a[0]; + if XNN_UNPREDICTABLE(a0 != zero) { + a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); + } + a += 1; + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0)); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1)); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2)); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3)); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= 1 * sizeof(void*); + } while (p != 0); + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc00x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c b/src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c new file mode 100644 index 00000000000..ce9408a686b --- /dev/null +++ b/src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-extaddpair.c @@ -0,0 +1,183 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 2); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (2 * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr != 2) { + c1 = c0; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + const int8_t* restrict a0 = a[0]; + if XNN_UNPREDICTABLE(a0 != zero) { + a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); + } + const int8_t* restrict a1 = a[1]; + if XNN_UNPREDICTABLE(a1 != zero) { + a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); + } + a += 2; + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= 2 * sizeof(void*); + } while (p != 0); + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc01x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c b/src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c new file mode 100644 index 00000000000..918b4bce52e --- /dev/null +++ b/src/qs8-igemm/gen/2x4c8-minmax-wasmsimd-extmul-widen.c @@ -0,0 +1,191 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 2); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (2 * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr != 2) { + c1 = c0; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + const int8_t* restrict a0 = a[0]; + if XNN_UNPREDICTABLE(a0 != zero) { + a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); + } + const int8_t* restrict a1 = a[1]; + if XNN_UNPREDICTABLE(a1 != zero) { + a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); + } + a += 2; + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0)); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_low_i16x8(vprod1x0)); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_high_i16x8(vprod1x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1)); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_low_i16x8(vprod1x1)); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_high_i16x8(vprod1x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2)); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_low_i16x8(vprod1x2)); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_high_i16x8(vprod1x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3)); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_low_i16x8(vprod1x3)); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_high_i16x8(vprod1x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= 2 * sizeof(void*); + } while (p != 0); + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc01x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c b/src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c new file mode 100644 index 00000000000..856836519f7 --- /dev/null +++ b/src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-extaddpair.c @@ -0,0 +1,222 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 3); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (3 * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + c1 = c0; + } + int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + c2 = c1; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + v128_t vacc2x0 = vacc0x0; + v128_t vacc2x1 = vacc0x1; + v128_t vacc2x2 = vacc0x2; + v128_t vacc2x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + const int8_t* restrict a0 = a[0]; + if XNN_UNPREDICTABLE(a0 != zero) { + a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); + } + const int8_t* restrict a1 = a[1]; + if XNN_UNPREDICTABLE(a1 != zero) { + a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); + } + const int8_t* restrict a2 = a[2]; + if XNN_UNPREDICTABLE(a2 != zero) { + a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); + } + a += 3; + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + const v128_t vxa2 = __builtin_wasm_load64_zero((long long*) a2); + a2 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x0)); + const v128_t vprod2x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb0); + vacc2x0 = wasm_i32x4_add(vacc2x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x1)); + const v128_t vprod2x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb1); + vacc2x1 = wasm_i32x4_add(vacc2x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x2)); + const v128_t vprod2x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb2); + vacc2x2 = wasm_i32x4_add(vacc2x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x3)); + const v128_t vprod2x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb3); + vacc2x3 = wasm_i32x4_add(vacc2x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= 3 * sizeof(void*); + } while (p != 0); + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + const v128_t vacc2x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x0, vacc2x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x0, vacc2x2, 2, 6, 3, 7)); + const v128_t vacc2x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x1, vacc2x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x1, vacc2x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + v128_t vacc2x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x02, vacc2x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x02, vacc2x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + const v128_t vsign2x0123 = wasm_i32x4_lt(vacc2x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + const v128_t vacc2x01 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + const v128_t vprod2x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x01, vmultiplier), vrounding); + const v128_t vacc2x23 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + const v128_t vprod2x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + const v128_t vq31prod2x0123 = wasm_v32x4_shuffle(vprod2x01, vprod2x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + const v128_t vrem2x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod2x0123, vremainder_mask), wasm_i32x4_lt(vq31prod2x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + vacc2x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod2x0123, vshift), wasm_i32x4_gt(vrem2x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + v128_t vacc22x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c2) = (uint16_t) wasm_i16x8_extract_lane(vout, 4); + c2 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c2 = (int8_t) wasm_i8x16_extract_lane(vout, 8); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c b/src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c new file mode 100644 index 00000000000..b5a1f81eb51 --- /dev/null +++ b/src/qs8-igemm/gen/3x4c8-minmax-wasmsimd-extmul-widen.c @@ -0,0 +1,234 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 3); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (3 * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + c1 = c0; + } + int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + c2 = c1; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + v128_t vacc2x0 = vacc0x0; + v128_t vacc2x1 = vacc0x1; + v128_t vacc2x2 = vacc0x2; + v128_t vacc2x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + const int8_t* restrict a0 = a[0]; + if XNN_UNPREDICTABLE(a0 != zero) { + a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); + } + const int8_t* restrict a1 = a[1]; + if XNN_UNPREDICTABLE(a1 != zero) { + a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); + } + const int8_t* restrict a2 = a[2]; + if XNN_UNPREDICTABLE(a2 != zero) { + a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); + } + a += 3; + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + const v128_t vxa2 = __builtin_wasm_load64_zero((long long*) a2); + a2 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0)); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_low_i16x8(vprod1x0)); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_high_i16x8(vprod1x0)); + const v128_t vprod2x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb0); + vacc2x0 = wasm_i32x4_add(vacc2x0, wasm_i32x4_widen_low_i16x8(vprod2x0)); + vacc2x0 = wasm_i32x4_add(vacc2x0, wasm_i32x4_widen_high_i16x8(vprod2x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1)); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_low_i16x8(vprod1x1)); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_high_i16x8(vprod1x1)); + const v128_t vprod2x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb1); + vacc2x1 = wasm_i32x4_add(vacc2x1, wasm_i32x4_widen_low_i16x8(vprod2x1)); + vacc2x1 = wasm_i32x4_add(vacc2x1, wasm_i32x4_widen_high_i16x8(vprod2x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2)); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_low_i16x8(vprod1x2)); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_high_i16x8(vprod1x2)); + const v128_t vprod2x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb2); + vacc2x2 = wasm_i32x4_add(vacc2x2, wasm_i32x4_widen_low_i16x8(vprod2x2)); + vacc2x2 = wasm_i32x4_add(vacc2x2, wasm_i32x4_widen_high_i16x8(vprod2x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3)); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_low_i16x8(vprod1x3)); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_high_i16x8(vprod1x3)); + const v128_t vprod2x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb3); + vacc2x3 = wasm_i32x4_add(vacc2x3, wasm_i32x4_widen_low_i16x8(vprod2x3)); + vacc2x3 = wasm_i32x4_add(vacc2x3, wasm_i32x4_widen_high_i16x8(vprod2x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= 3 * sizeof(void*); + } while (p != 0); + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + const v128_t vacc2x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x0, vacc2x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x0, vacc2x2, 2, 6, 3, 7)); + const v128_t vacc2x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x1, vacc2x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x1, vacc2x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + v128_t vacc2x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x02, vacc2x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x02, vacc2x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + const v128_t vsign2x0123 = wasm_i32x4_lt(vacc2x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + const v128_t vacc2x01 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + const v128_t vprod2x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x01, vmultiplier), vrounding); + const v128_t vacc2x23 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + const v128_t vprod2x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + const v128_t vq31prod2x0123 = wasm_v32x4_shuffle(vprod2x01, vprod2x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + const v128_t vrem2x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod2x0123, vremainder_mask), wasm_i32x4_lt(vq31prod2x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + vacc2x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod2x0123, vshift), wasm_i32x4_gt(vrem2x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + v128_t vacc22x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c2) = (uint16_t) wasm_i16x8_extract_lane(vout, 4); + c2 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c2 = (int8_t) wasm_i8x16_extract_lane(vout, 8); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c b/src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c new file mode 100644 index 00000000000..3d4687d7b09 --- /dev/null +++ b/src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-extaddpair.c @@ -0,0 +1,260 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-igemm/MRx4c8-wasmsimd-extmul-extaddpair.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 4); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (4 * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + c1 = c0; + } + int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + c2 = c1; + } + int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); + if XNN_UNPREDICTABLE(mr != 4) { + c3 = c2; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + v128_t vacc2x0 = vacc0x0; + v128_t vacc2x1 = vacc0x1; + v128_t vacc2x2 = vacc0x2; + v128_t vacc2x3 = vacc0x3; + v128_t vacc3x0 = vacc0x0; + v128_t vacc3x1 = vacc0x1; + v128_t vacc3x2 = vacc0x2; + v128_t vacc3x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + const int8_t* restrict a0 = a[0]; + if XNN_UNPREDICTABLE(a0 != zero) { + a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); + } + const int8_t* restrict a1 = a[1]; + if XNN_UNPREDICTABLE(a1 != zero) { + a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); + } + const int8_t* restrict a2 = a[2]; + if XNN_UNPREDICTABLE(a2 != zero) { + a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); + } + const int8_t* restrict a3 = a[3]; + if XNN_UNPREDICTABLE(a3 != zero) { + a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); + } + a += 4; + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + const v128_t vxa2 = __builtin_wasm_load64_zero((long long*) a2); + a2 += 8; + const v128_t vxa3 = __builtin_wasm_load64_zero((long long*) a3); + a3 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x0)); + const v128_t vprod2x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb0); + vacc2x0 = wasm_i32x4_add(vacc2x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x0)); + const v128_t vprod3x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb0); + vacc3x0 = wasm_i32x4_add(vacc3x0, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod3x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x1)); + const v128_t vprod2x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb1); + vacc2x1 = wasm_i32x4_add(vacc2x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x1)); + const v128_t vprod3x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb1); + vacc3x1 = wasm_i32x4_add(vacc3x1, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod3x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x2)); + const v128_t vprod2x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb2); + vacc2x2 = wasm_i32x4_add(vacc2x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x2)); + const v128_t vprod3x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb2); + vacc3x2 = wasm_i32x4_add(vacc3x2, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod3x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod1x3)); + const v128_t vprod2x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb3); + vacc2x3 = wasm_i32x4_add(vacc2x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod2x3)); + const v128_t vprod3x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb3); + vacc3x3 = wasm_i32x4_add(vacc3x3, __builtin_wasm_extadd_pairwise_i16x8_s_i32x4(vprod3x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= 4 * sizeof(void*); + } while (p != 0); + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + const v128_t vacc2x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x0, vacc2x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x0, vacc2x2, 2, 6, 3, 7)); + const v128_t vacc2x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x1, vacc2x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x1, vacc2x3, 2, 6, 3, 7)); + const v128_t vacc3x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x0, vacc3x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x0, vacc3x2, 2, 6, 3, 7)); + const v128_t vacc3x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x1, vacc3x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x1, vacc3x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + v128_t vacc2x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x02, vacc2x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x02, vacc2x13, 2, 6, 3, 7)); + v128_t vacc3x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x02, vacc3x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x02, vacc3x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + const v128_t vsign2x0123 = wasm_i32x4_lt(vacc2x0123, vzero); + const v128_t vsign3x0123 = wasm_i32x4_lt(vacc3x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + const v128_t vacc2x01 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 0, 4, 1, 5); + const v128_t vacc3x01 = wasm_v32x4_shuffle(vacc3x0123, vsign3x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + const v128_t vprod2x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x01, vmultiplier), vrounding); + const v128_t vacc2x23 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 2, 6, 3, 7); + const v128_t vprod3x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc3x01, vmultiplier), vrounding); + const v128_t vacc3x23 = wasm_v32x4_shuffle(vacc3x0123, vsign3x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + const v128_t vprod2x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x23, vmultiplier), vrounding); + const v128_t vprod3x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc3x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + const v128_t vq31prod2x0123 = wasm_v32x4_shuffle(vprod2x01, vprod2x23, 1, 3, 5, 7); + const v128_t vq31prod3x0123 = wasm_v32x4_shuffle(vprod3x01, vprod3x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + const v128_t vrem2x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod2x0123, vremainder_mask), wasm_i32x4_lt(vq31prod2x0123, vzero)); + const v128_t vrem3x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod3x0123, vremainder_mask), wasm_i32x4_lt(vq31prod3x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + vacc2x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod2x0123, vshift), wasm_i32x4_gt(vrem2x0123, vthreshold)); + vacc3x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod3x0123, vshift), wasm_i32x4_gt(vrem3x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + v128_t vacc23x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c3) = (float) wasm_f32x4_extract_lane(vout, 3); + *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); + c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c3) = (uint16_t) wasm_i16x8_extract_lane(vout, 6); + c3 += 2; + *((uint16_t*) c2) = (uint16_t) wasm_i16x8_extract_lane(vout, 4); + c2 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c3 = (int8_t) wasm_i8x16_extract_lane(vout, 12); + *c2 = (int8_t) wasm_i8x16_extract_lane(vout, 8); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c b/src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c new file mode 100644 index 00000000000..7f75ffc4438 --- /dev/null +++ b/src/qs8-igemm/gen/4x4c8-minmax-wasmsimd-extmul-widen.c @@ -0,0 +1,276 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-igemm/MRx4c8-wasmsimd-extmul-widen.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include + + +void xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen( + size_t mr, + size_t nc, + size_t kc, + size_t ks, + const int8_t** restrict a, + const void* restrict w, + int8_t* restrict c, + size_t cm_stride, + size_t cn_stride, + size_t a_offset, + const int8_t* zero, + const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + assert(mr != 0); + assert(mr <= 4); + assert(nc != 0); + assert(kc != 0); + assert(ks != 0); + assert(ks % (4 * sizeof(void*)) == 0); + assert(a_offset % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + int8_t* c0 = c; + int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + c1 = c0; + } + int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + c2 = c1; + } + int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); + if XNN_UNPREDICTABLE(mr != 4) { + c3 = c2; + } + + const v128_t vzero = wasm_f64x2_splat(0.0); + do { + v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]); + v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]); + v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]); + v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]); + v128_t vacc1x0 = vacc0x0; + v128_t vacc1x1 = vacc0x1; + v128_t vacc1x2 = vacc0x2; + v128_t vacc1x3 = vacc0x3; + v128_t vacc2x0 = vacc0x0; + v128_t vacc2x1 = vacc0x1; + v128_t vacc2x2 = vacc0x2; + v128_t vacc2x3 = vacc0x3; + v128_t vacc3x0 = vacc0x0; + v128_t vacc3x1 = vacc0x1; + v128_t vacc3x2 = vacc0x2; + v128_t vacc3x3 = vacc0x3; + w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); + + size_t p = ks; + do { + const int8_t* restrict a0 = a[0]; + if XNN_UNPREDICTABLE(a0 != zero) { + a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); + } + const int8_t* restrict a1 = a[1]; + if XNN_UNPREDICTABLE(a1 != zero) { + a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); + } + const int8_t* restrict a2 = a[2]; + if XNN_UNPREDICTABLE(a2 != zero) { + a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); + } + const int8_t* restrict a3 = a[3]; + if XNN_UNPREDICTABLE(a3 != zero) { + a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); + } + a += 4; + + size_t k = 0; + while (k < kc) { + const v128_t vxa0 = __builtin_wasm_load64_zero((long long*) a0); + a0 += 8; + const v128_t vxa1 = __builtin_wasm_load64_zero((long long*) a1); + a1 += 8; + const v128_t vxa2 = __builtin_wasm_load64_zero((long long*) a2); + a2 += 8; + const v128_t vxa3 = __builtin_wasm_load64_zero((long long*) a3); + a3 += 8; + + const v128_t vxb0 = __builtin_wasm_load64_zero((long long*) w); + + const v128_t vprod0x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb0); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0)); + vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0)); + const v128_t vprod1x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb0); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_low_i16x8(vprod1x0)); + vacc1x0 = wasm_i32x4_add(vacc1x0, wasm_i32x4_widen_high_i16x8(vprod1x0)); + const v128_t vprod2x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb0); + vacc2x0 = wasm_i32x4_add(vacc2x0, wasm_i32x4_widen_low_i16x8(vprod2x0)); + vacc2x0 = wasm_i32x4_add(vacc2x0, wasm_i32x4_widen_high_i16x8(vprod2x0)); + const v128_t vprod3x0 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb0); + vacc3x0 = wasm_i32x4_add(vacc3x0, wasm_i32x4_widen_low_i16x8(vprod3x0)); + vacc3x0 = wasm_i32x4_add(vacc3x0, wasm_i32x4_widen_high_i16x8(vprod3x0)); + const v128_t vxb1 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 8 * sizeof(int8_t))); + + const v128_t vprod0x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb1); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1)); + vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1)); + const v128_t vprod1x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb1); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_low_i16x8(vprod1x1)); + vacc1x1 = wasm_i32x4_add(vacc1x1, wasm_i32x4_widen_high_i16x8(vprod1x1)); + const v128_t vprod2x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb1); + vacc2x1 = wasm_i32x4_add(vacc2x1, wasm_i32x4_widen_low_i16x8(vprod2x1)); + vacc2x1 = wasm_i32x4_add(vacc2x1, wasm_i32x4_widen_high_i16x8(vprod2x1)); + const v128_t vprod3x1 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb1); + vacc3x1 = wasm_i32x4_add(vacc3x1, wasm_i32x4_widen_low_i16x8(vprod3x1)); + vacc3x1 = wasm_i32x4_add(vacc3x1, wasm_i32x4_widen_high_i16x8(vprod3x1)); + const v128_t vxb2 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 16 * sizeof(int8_t))); + + const v128_t vprod0x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb2); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2)); + vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2)); + const v128_t vprod1x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb2); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_low_i16x8(vprod1x2)); + vacc1x2 = wasm_i32x4_add(vacc1x2, wasm_i32x4_widen_high_i16x8(vprod1x2)); + const v128_t vprod2x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb2); + vacc2x2 = wasm_i32x4_add(vacc2x2, wasm_i32x4_widen_low_i16x8(vprod2x2)); + vacc2x2 = wasm_i32x4_add(vacc2x2, wasm_i32x4_widen_high_i16x8(vprod2x2)); + const v128_t vprod3x2 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb2); + vacc3x2 = wasm_i32x4_add(vacc3x2, wasm_i32x4_widen_low_i16x8(vprod3x2)); + vacc3x2 = wasm_i32x4_add(vacc3x2, wasm_i32x4_widen_high_i16x8(vprod3x2)); + const v128_t vxb3 = __builtin_wasm_load64_zero((long long*) ((uintptr_t) w + 24 * sizeof(int8_t))); + + const v128_t vprod0x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa0, vxb3); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3)); + vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3)); + const v128_t vprod1x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa1, vxb3); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_low_i16x8(vprod1x3)); + vacc1x3 = wasm_i32x4_add(vacc1x3, wasm_i32x4_widen_high_i16x8(vprod1x3)); + const v128_t vprod2x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa2, vxb3); + vacc2x3 = wasm_i32x4_add(vacc2x3, wasm_i32x4_widen_low_i16x8(vprod2x3)); + vacc2x3 = wasm_i32x4_add(vacc2x3, wasm_i32x4_widen_high_i16x8(vprod2x3)); + const v128_t vprod3x3 = __builtin_wasm_extmul_low_i8x16_s_i16x8(vxa3, vxb3); + vacc3x3 = wasm_i32x4_add(vacc3x3, wasm_i32x4_widen_low_i16x8(vprod3x3)); + vacc3x3 = wasm_i32x4_add(vacc3x3, wasm_i32x4_widen_high_i16x8(vprod3x3)); + + w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t)); + k += 8 * sizeof(int8_t); + } + p -= 4 * sizeof(void*); + } while (p != 0); + + const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7)); + const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7)); + const v128_t vacc1x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x0, vacc1x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x0, vacc1x2, 2, 6, 3, 7)); + const v128_t vacc1x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x1, vacc1x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x1, vacc1x3, 2, 6, 3, 7)); + const v128_t vacc2x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x0, vacc2x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x0, vacc2x2, 2, 6, 3, 7)); + const v128_t vacc2x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x1, vacc2x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x1, vacc2x3, 2, 6, 3, 7)); + const v128_t vacc3x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x0, vacc3x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x0, vacc3x2, 2, 6, 3, 7)); + const v128_t vacc3x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x1, vacc3x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x1, vacc3x3, 2, 6, 3, 7)); + + v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7)); + v128_t vacc1x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc1x02, vacc1x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc1x02, vacc1x13, 2, 6, 3, 7)); + v128_t vacc2x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc2x02, vacc2x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc2x02, vacc2x13, 2, 6, 3, 7)); + v128_t vacc3x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc3x02, vacc3x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc3x02, vacc3x13, 2, 6, 3, 7)); + + const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero); + const v128_t vsign1x0123 = wasm_i32x4_lt(vacc1x0123, vzero); + const v128_t vsign2x0123 = wasm_i32x4_lt(vacc2x0123, vzero); + const v128_t vsign3x0123 = wasm_i32x4_lt(vacc3x0123, vzero); + + const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5); + const v128_t vacc1x01 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 0, 4, 1, 5); + const v128_t vacc2x01 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 0, 4, 1, 5); + const v128_t vacc3x01 = wasm_v32x4_shuffle(vacc3x0123, vsign3x0123, 0, 4, 1, 5); + + const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier); + const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding); + const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding); + const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7); + const v128_t vprod1x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x01, vmultiplier), vrounding); + const v128_t vacc1x23 = wasm_v32x4_shuffle(vacc1x0123, vsign1x0123, 2, 6, 3, 7); + const v128_t vprod2x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x01, vmultiplier), vrounding); + const v128_t vacc2x23 = wasm_v32x4_shuffle(vacc2x0123, vsign2x0123, 2, 6, 3, 7); + const v128_t vprod3x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc3x01, vmultiplier), vrounding); + const v128_t vacc3x23 = wasm_v32x4_shuffle(vacc3x0123, vsign3x0123, 2, 6, 3, 7); + + const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding); + const v128_t vprod1x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc1x23, vmultiplier), vrounding); + const v128_t vprod2x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc2x23, vmultiplier), vrounding); + const v128_t vprod3x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc3x23, vmultiplier), vrounding); + + const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7); + const v128_t vq31prod1x0123 = wasm_v32x4_shuffle(vprod1x01, vprod1x23, 1, 3, 5, 7); + const v128_t vq31prod2x0123 = wasm_v32x4_shuffle(vprod2x01, vprod2x23, 1, 3, 5, 7); + const v128_t vq31prod3x0123 = wasm_v32x4_shuffle(vprod3x01, vprod3x23, 1, 3, 5, 7); + + const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask); + const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero)); + const v128_t vrem1x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod1x0123, vremainder_mask), wasm_i32x4_lt(vq31prod1x0123, vzero)); + const v128_t vrem2x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod2x0123, vremainder_mask), wasm_i32x4_lt(vq31prod2x0123, vzero)); + const v128_t vrem3x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod3x0123, vremainder_mask), wasm_i32x4_lt(vq31prod3x0123, vzero)); + + const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold); + const int32_t vshift = params->wasmsimd.shift; + vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold)); + vacc1x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod1x0123, vshift), wasm_i32x4_gt(vrem1x0123, vthreshold)); + vacc2x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod2x0123, vshift), wasm_i32x4_gt(vrem2x0123, vthreshold)); + vacc3x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod3x0123, vshift), wasm_i32x4_gt(vrem3x0123, vthreshold)); + + const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point); + v128_t vacc01x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123), voutput_zero_point); + v128_t vacc23x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123), voutput_zero_point); + + v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); + + const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min); + vout = wasm_i8x16_max(vout, voutput_min); + + const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max); + vout = wasm_i8x16_min(vout, voutput_max); + + if (nc >= 4) { + *((float*) c3) = (float) wasm_f32x4_extract_lane(vout, 3); + *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2); + *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1); + *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0); + + c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); + c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); + c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); + c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); + + a = (const int8_t**restrict) ((uintptr_t) a - ks); + + nc -= 4; + } else { + if (nc & 2) { + *((uint16_t*) c3) = (uint16_t) wasm_i16x8_extract_lane(vout, 6); + c3 += 2; + *((uint16_t*) c2) = (uint16_t) wasm_i16x8_extract_lane(vout, 4); + c2 += 2; + *((uint16_t*) c1) = (uint16_t) wasm_i16x8_extract_lane(vout, 2); + c1 += 2; + *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0); + c0 += 2; + vout = wasm_u32x4_shr(vout, 16); + } + if (nc & 1) { + *c3 = (int8_t) wasm_i8x16_extract_lane(vout, 12); + *c2 = (int8_t) wasm_i8x16_extract_lane(vout, 8); + *c1 = (int8_t) wasm_i8x16_extract_lane(vout, 4); + *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0); + } + + nc = 0; + } + } while (nc != 0); +} diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h index b39f7ad8e18..e37b4fd1172 100644 --- a/src/xnnpack/gemm.h +++ b/src/xnnpack/gemm.h @@ -627,6 +627,16 @@ DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_2x16c8__avx DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_3x16c8__avx512skx) DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_4x16c8__avx512skx) +DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen) +DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen) +DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen) +DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen) + +DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair) +DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair) +DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair) +DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair) + DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_ld64) DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_ld64) DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_ld64) diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h index 455596fc003..dfaf696d6ac 100644 --- a/src/xnnpack/igemm.h +++ b/src/xnnpack/igemm.h @@ -413,6 +413,16 @@ DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_2x16c8__a DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_3x16c8__avx512skx) DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_4x16c8__avx512skx) +DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen) +DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen) +DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen) +DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen) + +DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair) +DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair) +DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair) +DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair) + DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_ld64) DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_ld64) DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_ld64) diff --git a/test/qs8-gemm-minmax.cc b/test/qs8-gemm-minmax.cc index c621242f9a8..cc85cbbf6e1 100644 --- a/test/qs8-gemm-minmax.cc +++ b/test/qs8-gemm-minmax.cc @@ -44066,6 +44066,3438 @@ #endif // XNN_ARCH_X86 || XNN_ARCH_X86_64 +#if XNN_ARCH_WASMSIMD + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cn) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_strided_a) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_strided_a) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_strided_a) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .a_stride(19) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_strided_a) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .a_stride(83) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_a) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_a) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, qmin) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, qmax) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cn) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_strided_a) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_strided_a) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_strided_a) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .a_stride(19) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_strided_a) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .a_stride(83) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_a) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_a) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, qmin) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, qmax) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cn) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_strided_a) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_strided_a) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_strided_a) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .a_stride(19) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_strided_a) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .a_stride(83) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_a) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_a) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, qmin) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, qmax) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cn) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_strided_a) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_strided_a) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_strided_a) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .a_stride(19) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_strided_a) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .a_stride(83) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_a) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_a) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, qmin) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, qmax) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cn) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_strided_a) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_strided_a) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_strided_a) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .a_stride(19) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_strided_a) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .a_stride(83) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_a) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_a) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmin) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmax) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cn) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_strided_a) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_strided_a) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_strided_a) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .a_stride(19) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_strided_a) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .a_stride(83) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_a) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_a) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmin) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmax) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cn) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_strided_a) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_strided_a) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_strided_a) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .a_stride(19) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_strided_a) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .a_stride(83) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_a) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_a) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmin) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmax) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cn) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_strided_a) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_strided_a) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .a_stride(11) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_strided_a) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .a_stride(19) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_strided_a) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .a_stride(83) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_a) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_a) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(k) + .a_stride(43) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmin) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmax) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_GEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } +#endif // XNN_ARCH_WASMSIMD + + #if XNN_ARCH_WASMSIMD TEST(QS8_GEMM_MINMAX_1X4C8__WASMSIMD_LD64, k_eq_8) { GemmMicrokernelTester() diff --git a/test/qs8-gemm-minmax.yaml b/test/qs8-gemm-minmax.yaml index 271a4e30391..5a08fdeaa92 100644 --- a/test/qs8-gemm-minmax.yaml +++ b/test/qs8-gemm-minmax.yaml @@ -198,6 +198,22 @@ k-block: 8 - name: xnn_qs8_gemm_minmax_ukernel_4x16c8__avx512skx k-block: 8 +- name: xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen + k-block: 8 +- name: xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen + k-block: 8 +- name: xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen + k-block: 8 +- name: xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen + k-block: 8 +- name: xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair + k-block: 8 +- name: xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair + k-block: 8 +- name: xnn_qs8_gemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair + k-block: 8 +- name: xnn_qs8_gemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair + k-block: 8 - name: xnn_qs8_gemm_minmax_ukernel_1x4c8__wasmsimd_ld64 k-block: 8 - name: xnn_qs8_gemm_minmax_ukernel_2x4c8__wasmsimd_ld64 diff --git a/test/qs8-igemm-minmax.cc b/test/qs8-igemm-minmax.cc index d9df0a6b993..c48eb95e09e 100644 --- a/test/qs8-igemm-minmax.cc +++ b/test/qs8-igemm-minmax.cc @@ -29974,6 +29974,3534 @@ #endif // XNN_ARCH_X86 || XNN_ARCH_X86_64 +#if XNN_ARCH_WASMSIMD + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cn) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, small_kernel) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, small_kernel_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .ks(3) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_small_kernel) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_small_kernel) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, a_offset) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .a_offset(43) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, zero) { + for (uint32_t mz = 0; mz < 1; mz++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .a_offset(43) + .zero_index(mz) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, qmin) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, qmax) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cn) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, small_kernel) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, small_kernel_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .ks(3) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_small_kernel) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_small_kernel) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, a_offset) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .a_offset(83) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, zero) { + for (uint32_t mz = 0; mz < 2; mz++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .a_offset(83) + .zero_index(mz) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, qmin) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, qmax) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cn) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, small_kernel) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, small_kernel_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .ks(3) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_small_kernel) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_small_kernel) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, a_offset) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .a_offset(127) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, zero) { + for (uint32_t mz = 0; mz < 3; mz++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .a_offset(127) + .zero_index(mz) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, qmin) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, qmax) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cn) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, small_kernel) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, small_kernel_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .ks(3) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_gt_4_small_kernel) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, n_div_4_small_kernel) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, a_offset) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .a_offset(163) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, zero) { + for (uint32_t mz = 0; mz < 4; mz++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .a_offset(163) + .zero_index(mz) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, qmin) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, qmax) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_WIDEN, strided_cm) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cn) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, small_kernel) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, small_kernel_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .ks(3) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_small_kernel) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_small_kernel) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 1; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, a_offset) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .a_offset(43) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, zero) { + for (uint32_t mz = 0; mz < 1; mz++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(k) + .ks(3) + .a_offset(43) + .zero_index(mz) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmin) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmax) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm) { + GemmMicrokernelTester() + .mr(1) + .nr(4) + .kr(8) + .sr(1) + .m(1) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cn) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, small_kernel) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, small_kernel_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .ks(3) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_small_kernel) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_small_kernel) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 2; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, a_offset) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .a_offset(83) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, zero) { + for (uint32_t mz = 0; mz < 2; mz++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(k) + .ks(3) + .a_offset(83) + .zero_index(mz) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmin) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmax) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_2X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm) { + GemmMicrokernelTester() + .mr(2) + .nr(4) + .kr(8) + .sr(1) + .m(2) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cn) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, small_kernel) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, small_kernel_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .ks(3) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_small_kernel) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_small_kernel) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 3; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, a_offset) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .a_offset(127) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, zero) { + for (uint32_t mz = 0; mz < 3; mz++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(k) + .ks(3) + .a_offset(127) + .zero_index(mz) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmin) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmax) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_3X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm) { + GemmMicrokernelTester() + .mr(3) + .nr(4) + .kr(8) + .sr(1) + .m(3) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair); + } +#endif // XNN_ARCH_WASMSIMD + + +#if XNN_ARCH_WASMSIMD + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cn) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_m) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(4) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_eq_8_subtile_n) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(8) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8) { + for (size_t k = 1; k < 8; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_lt_8_subtile) { + for (size_t k = 1; k < 8; k++) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8) { + for (size_t k = 9; k < 16; k++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_gt_8_subtile) { + for (size_t k = 9; k < 16; k++) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8) { + for (size_t k = 16; k <= 80; k += 8) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, k_div_8_subtile) { + for (size_t k = 16; k <= 80; k += 8) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_strided_cn) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_subtile) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_strided_cn) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(n) + .k(k) + .cn_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_subtile) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, small_kernel) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, small_kernel_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .ks(3) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_gt_4_small_kernel) { + for (uint32_t n = 5; n < 8; n++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, n_div_4_small_kernel) { + for (uint32_t n = 8; n <= 12; n += 4) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm_subtile) { + for (size_t k = 1; k <= 40; k += 9) { + for (uint32_t m = 1; m <= 4; m++) { + for (uint32_t n = 1; n <= 4; n++) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(m) + .n(n) + .k(k) + .cm_stride(7) + .iterations(1) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, a_offset) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .a_offset(163) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, zero) { + for (uint32_t mz = 0; mz < 4; mz++) { + for (size_t k = 1; k <= 40; k += 9) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(k) + .ks(3) + .a_offset(163) + .zero_index(mz) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + } + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmin) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .qmin(128) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, qmax) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .qmax(128) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } + + TEST(QS8_IGEMM_MINMAX_4X4C8__WASMSIMD_EXTMUL_EXTADDPAIR, strided_cm) { + GemmMicrokernelTester() + .mr(4) + .nr(4) + .kr(8) + .sr(1) + .m(4) + .n(4) + .k(8) + .cm_stride(7) + .Test(xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair); + } +#endif // XNN_ARCH_WASMSIMD + + #if XNN_ARCH_WASMSIMD TEST(QS8_IGEMM_MINMAX_1X4C8__WASMSIMD_LD64, k_eq_8) { GemmMicrokernelTester() diff --git a/test/qs8-igemm-minmax.yaml b/test/qs8-igemm-minmax.yaml index 26571236a7d..612cad91eeb 100644 --- a/test/qs8-igemm-minmax.yaml +++ b/test/qs8-igemm-minmax.yaml @@ -130,6 +130,22 @@ k-block: 8 - name: xnn_qs8_igemm_minmax_ukernel_4x16c8__avx512skx k-block: 8 +- name: xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_widen + k-block: 8 +- name: xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_widen + k-block: 8 +- name: xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_widen + k-block: 8 +- name: xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_widen + k-block: 8 +- name: xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_extmul_extaddpair + k-block: 8 +- name: xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_extmul_extaddpair + k-block: 8 +- name: xnn_qs8_igemm_minmax_ukernel_3x4c8__wasmsimd_extmul_extaddpair + k-block: 8 +- name: xnn_qs8_igemm_minmax_ukernel_4x4c8__wasmsimd_extmul_extaddpair + k-block: 8 - name: xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_ld64 k-block: 8 - name: xnn_qs8_igemm_minmax_ukernel_2x4c8__wasmsimd_ld64