From 7fd0ae588b3eb45f5c6b18573d71375e4ab24f96 Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 5 Feb 2025 01:34:35 +0100 Subject: [PATCH 1/7] ggml-cpu : add chunking support to mul_mat_id --- ggml/src/ggml-cpu/ggml-cpu.c | 192 ++++++++++++++++++++++------------- tests/test-backend-ops.cpp | 17 +++- 2 files changed, 137 insertions(+), 72 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index e809f05d217a9..01f92aac92638 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -7599,7 +7599,6 @@ UseGgmlGemm2:; if ((nr0 % 2 != 0) || (ne11 % 2 != 0) || ((ir0_end - ir0_start) % 2 != 0) || ((ir1_end - ir1_start) % 2 != 0)) { num_rows_per_vec_dot = 1; } - ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); if (nth >= nchunk0 * nchunk1) { @@ -7612,6 +7611,75 @@ UseGgmlGemm2:; // ggml_compute_forward_mul_mat_id +#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne12 + (i1)] + +struct mmid_row_mapping { + int32_t i1; + int32_t i2; +}; + +static void ggml_compute_forward_mul_mat_id_one_chunk( + struct ggml_tensor * dst, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + const int64_t cur_a, + const int64_t ir0_start, + const int64_t ir0_end, + const int64_t ir1_start, + const int64_t ir1_end, + const char * src0_cur, + const struct mmid_row_mapping * matrix_rows, + const size_t row_size, + const bool src1_cont, + const void * wdata) { + + GGML_TENSOR_BINARY_OP_LOCALS + + const enum ggml_type type = src0->type; + + ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; + enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; + + const int64_t blck_0 = 16; + const int64_t blck_1 = 16; + + float tmp[16]; + + for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { + for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { + for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ++ir1) { + const int64_t _i12 = ir1; // logical row index for this expert + + struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12); + const int id = row_mapping.i1; // selected expert index + + const int64_t i11 = id % ne11; + const int64_t i12 = row_mapping.i2; // row index in src1 + + const int64_t i1 = id; // selected expert index + const int64_t i2 = i12; // row + + // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides + // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using + // the original src1 data pointer, so we should index using the indices directly + // TODO: this is a bit of a hack, we should probably have a better way to handle this + const char * src1_col = (const char *) wdata + + (src1_cont || src1->type != vec_dot_type + ? (i11 + i12*ne11)*row_size + : (i11*nb11 + i12*nb12)); + + float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2)); + + for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { + vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1); + } + + memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir0_end) - iir0)*sizeof(float)); + } + } + } +} + static void ggml_compute_forward_mul_mat_id( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -7629,7 +7697,6 @@ static void ggml_compute_forward_mul_mat_id( const bool src1_cont = ggml_is_contiguous(src1); - ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; @@ -7651,11 +7718,6 @@ static void ggml_compute_forward_mul_mat_id( (char *) params->wdata : (char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t)); - struct mmid_row_mapping { - int32_t i1; - int32_t i2; - }; - int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *)(matrix_row_counts + n_as); // [n_as][ne11] @@ -7670,8 +7732,8 @@ static void ggml_compute_forward_mul_mat_id( GGML_ASSERT(src1->type == GGML_TYPE_F32); for (int64_t i13 = 0; i13 < ne13; ++i13) { - for (int64_t i12 = 0; i12 < ne12; ++i12) { - for (int64_t i11 = ith; i11 < ne11; i11 += nth) { + for (int64_t i12 = ith; i12 < ne12; i12 += nth) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), ne10); @@ -7680,9 +7742,10 @@ static void ggml_compute_forward_mul_mat_id( } } -#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne12 + (i1)] - if (ith == 0) { + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + atomic_store_explicit(¶ms->threadpool->current_chunk, nth, memory_order_relaxed); + // initialize matrix_row_counts memset(matrix_row_counts, 0, n_as*sizeof(int64_t)); @@ -7701,7 +7764,9 @@ static void ggml_compute_forward_mul_mat_id( ggml_barrier(params->threadpool); - // compute each matrix multiplication in sequence + const int64_t rows_total = ggml_nelements(ids); + int64_t rows_processed = 0; + for (int cur_a = 0; cur_a < n_as; ++cur_a) { const int64_t cne1 = matrix_row_counts[cur_a]; @@ -7709,84 +7774,69 @@ static void ggml_compute_forward_mul_mat_id( continue; } - const char * src0_cur = (const char *) src0->data + cur_a*nb02; + rows_processed += cne1; - const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; + const char * src0_cur = (const char *) src0->data + cur_a * nb02; + const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; const size_t row_size = ggml_row_size(vec_dot_type, ne10); - const int64_t nr0 = ne01; // src0 rows - const int64_t nr1 = cne1; // src1 rows - - // distribute the thread work across the inner or outer loop based on which one is larger - - const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows - const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows - - const int64_t ith0 = ith % nth0; - const int64_t ith1 = ith / nth0; - - const int64_t dr0 = (nr0 + nth0 - 1)/nth0; - const int64_t dr1 = (nr1 + nth1 - 1)/nth1; + const int64_t nr0 = ne01; + const int64_t nr1 = cne1; - const int64_t ir010 = dr0*ith0; - const int64_t ir011 = MIN(ir010 + dr0, nr0); + int chunk_size = 16; + if (nr0 == 1 || nr1 == 1) { + chunk_size = 64; + } - const int64_t ir110 = dr1*ith1; - const int64_t ir111 = MIN(ir110 + dr1, nr1); + int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; + int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; - // threads with no work simply yield (not sure if it helps) - //if (ir010 >= ir011 || ir110 >= ir111) { - // sched_yield(); - // continue; - //} + if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) { + nchunk0 = nr0 > nr1 ? nth : 1; + nchunk1 = nr0 > nr1 ? 1 : nth; + } - // block-tiling attempt - const int64_t blck_0 = 16; - const int64_t blck_1 = 16; + const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; + const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; - // attempt to reduce false-sharing (does not seem to make a difference) - float tmp[16]; + int current_chunk = ith; - for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) { - for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) { - for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) { - const int64_t _i12 = ir1; // logical row index for this expert + while (current_chunk < nchunk0 * nchunk1) { + const int64_t ith0 = current_chunk % nchunk0; + const int64_t ith1 = current_chunk / nchunk0; - struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12); - const int id = row_mapping.i1; // selected expert index + const int64_t ir0_start = dr0 * ith0; + const int64_t ir0_end = MIN(ir0_start + dr0, nr0); - const int64_t i11 = id % ne11; - const int64_t i12 = row_mapping.i2; // row index in src1 + const int64_t ir1_start = dr1 * ith1; + const int64_t ir1_end = MIN(ir1_start + dr1, nr1); - const int64_t i1 = id; // selected expert index - const int64_t i2 = i12; // row + ggml_compute_forward_mul_mat_id_one_chunk( + dst, src0, src1, cur_a, + ir0_start, ir0_end, ir1_start, ir1_end, + src0_cur, matrix_rows, row_size, src1_cont, wdata + ); - // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides - // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using - // the original src1 data pointer, so we should index using the indices directly - // TODO: this is a bit of a hack, we should probably have a better way to handle this - const char * src1_col = (const char *) wdata + - (src1_cont || src1->type != vec_dot_type - ? (i11 + i12*ne11)*row_size - : (i11*nb11 + i12*nb12)); + if (nth >= nchunk0 * nchunk1) { + break; + } - float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2)); + current_chunk = atomic_fetch_add_explicit(¶ms->threadpool->current_chunk, 1, memory_order_relaxed); + } - //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) { - // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col); - //} + if (rows_processed == rows_total) { + break; + } - for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) { - vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1); - } + ggml_barrier(params->threadpool); - memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float)); - } - } + if (ith == 0) { + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + atomic_store_explicit(¶ms->threadpool->current_chunk, nth, memory_order_relaxed); } - } -#undef MMID_MATRIX_ROW + ggml_barrier(params->threadpool); + } } // ggml_compute_forward_out_prod diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 4c5c4dd9cfc64..fc5d2a6d9c286 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4329,6 +4329,21 @@ static std::vector> make_test_cases_perf() { } } +#if 0 + for (int bs : {1, 64}) { + for (ggml_type type_a : {GGML_TYPE_Q4_0}) { + for (ggml_type type_b : {GGML_TYPE_F32}) { + int n_experts = 256; + int n_used = 8; + int n_embd = 7168; + int n_ff = 2048; + test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_experts, n_used, true, n_embd, bs, n_ff)); + //test_cases.emplace_back(new test_mul_mat(type_a, type_b, n_embd, bs, n_ff, {1, 1}, {1, 1})); + } + } + } +#endif + for (int K : {3, 5}) { for (int IC : {256, 2560}) { for (int IW_IH : {32, 64, 256}) { @@ -4462,7 +4477,7 @@ int main(int argc, char ** argv) { auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); if (ggml_backend_set_n_threads_fn) { // TODO: better value for n_threads - ggml_backend_set_n_threads_fn(backend, std::thread::hardware_concurrency()); + ggml_backend_set_n_threads_fn(backend, std::thread::hardware_concurrency() / 2); } printf(" Device description: %s\n", ggml_backend_dev_description(dev)); From 0f0d8c3ae72f6cca1e72eae406001e0f64dc4c44 Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 5 Feb 2025 16:17:33 +0100 Subject: [PATCH 2/7] allocate chunk counter in wdata parallelize src1 quantization by column to allows parallelization even when there is only one row --- ggml/src/ggml-cpu/ggml-cpu.c | 116 ++++++++++++++++++++++++----------- 1 file changed, 80 insertions(+), 36 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 01f92aac92638..bee1772f45cea 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -7,10 +7,8 @@ #include "ggml-cpu-impl.h" #include "ggml-cpu.h" #include "ggml-impl.h" -#include "ggml-quants.h" #include "ggml-cpu-quants.h" #include "ggml-threading.h" -#include "amx/amx.h" #include "ggml.h" #if defined(_MSC_VER) || defined(__MINGW32__) @@ -1297,7 +1295,7 @@ struct ggml_threadpool { atomic_int n_graph; // incremented when there is work to be done (i.e each graph) atomic_int GGML_CACHE_ALIGN n_barrier; atomic_int GGML_CACHE_ALIGN n_barrier_passed; - atomic_int current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads. + atomic_int GGML_CACHE_ALIGN current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads. // these are atomic as an annotation for thread-sanitizer atomic_bool stop; // Used for stopping the threadpool altogether @@ -7496,6 +7494,7 @@ UseGgmlGemm1:; if (src1->type != vec_dot_type) { char * wdata = params->wdata; + const size_t nbw0 = ggml_type_size(vec_dot_type); const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); const size_t nbw2 = nbw1*ne11; const size_t nbw3 = nbw2*ne12; @@ -7503,6 +7502,7 @@ UseGgmlGemm1:; assert(params->wsize >= ne13*nbw3); GGML_ASSERT(src1->type == GGML_TYPE_F32); + #if 0 for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { for (int64_t i11 = ith; i11 < ne11; i11 += nth) { @@ -7512,6 +7512,20 @@ UseGgmlGemm1:; } } } + #else + for (int64_t i13 = 0; i13 < ne13; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { + size_t bs = ggml_blck_size(vec_dot_type); + int64_t ne10_block_start = (ith * ne10/bs) / nth; + int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth; + from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10), + (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0), + (ne10_block_end - ne10_block_start) * bs); + } + } + } + #endif } if (ith == 0) { @@ -7611,7 +7625,7 @@ UseGgmlGemm2:; // ggml_compute_forward_mul_mat_id -#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne12 + (i1)] +#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ids->ne[0]*ids->ne[1] + (i1)] struct mmid_row_mapping { int32_t i1; @@ -7622,6 +7636,7 @@ static void ggml_compute_forward_mul_mat_id_one_chunk( struct ggml_tensor * dst, const struct ggml_tensor * src0, const struct ggml_tensor * src1, + const struct ggml_tensor * ids, const int64_t cur_a, const int64_t ir0_start, const int64_t ir0_end, @@ -7680,6 +7695,14 @@ static void ggml_compute_forward_mul_mat_id_one_chunk( } } +static void * incr_ptr_aligned(void ** p, size_t size, size_t align) { + + void * ptr = *p; + ptr = (void *) GGML_PAD((uintptr_t) ptr, align); + *p = (void *) ((char *) ptr + size); + return ptr; +} + static void ggml_compute_forward_mul_mat_id( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -7714,16 +7737,27 @@ static void ggml_compute_forward_mul_mat_id( const int n_ids = ids->ne[0]; // n_expert_used const int n_as = ne02; // n_expert - char * wdata_src1_end = (src1->type == vec_dot_type) ? - (char *) params->wdata : - (char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t)); + void * wdata_cur = params->wdata; - int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] - struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *)(matrix_row_counts + n_as); // [n_as][ne11] + if (src1->type != vec_dot_type) { + incr_ptr_aligned(&wdata_cur, ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t)); + } + + int64_t * matrix_row_counts = // [n_as] + incr_ptr_aligned(&wdata_cur, n_as*sizeof(int64_t), sizeof(int64_t)); + + struct mmid_row_mapping * matrix_rows = // [n_as][ids->ne[0]*ids->ne[1]] + incr_ptr_aligned(&wdata_cur, n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping), sizeof(int64_t)); + + char (*atomic_current_chunk)[CACHE_LINE_SIZE] = // [n_as] + incr_ptr_aligned(&wdata_cur, CACHE_LINE_SIZE * n_as, CACHE_LINE_SIZE); + + GGML_ASSERT(params->wsize >= (size_t)((char *) wdata_cur - (char *) params->wdata)); if (src1->type != vec_dot_type) { char * wdata = params->wdata; + const size_t nbw0 = ggml_type_size(vec_dot_type); const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); const size_t nbw2 = nbw1*ne11; const size_t nbw3 = nbw2*ne12; @@ -7731,6 +7765,7 @@ static void ggml_compute_forward_mul_mat_id( assert(params->wsize >= ne13*nbw3); GGML_ASSERT(src1->type == GGML_TYPE_F32); +#if 0 for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = ith; i12 < ne12; i12 += nth) { for (int64_t i11 = 0; i11 < ne11; ++i11) { @@ -7740,12 +7775,23 @@ static void ggml_compute_forward_mul_mat_id( } } } +#else + for (int64_t i13 = 0; i13 < ne13; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { + size_t bs = ggml_blck_size(vec_dot_type); + int64_t ne10_block_start = (ith * ne10/bs) / nth; + int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth; + from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10), + (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0), + (ne10_block_end - ne10_block_start) * bs); + } + } + } +#endif } if (ith == 0) { - // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - atomic_store_explicit(¶ms->threadpool->current_chunk, nth, memory_order_relaxed); - // initialize matrix_row_counts memset(matrix_row_counts, 0, n_as*sizeof(int64_t)); @@ -7760,13 +7806,16 @@ static void ggml_compute_forward_mul_mat_id( matrix_row_counts[i02] += 1; } } + } else { + // reset current_chunk + for (int cur_a = ith - 1; cur_a < n_as; cur_a += (nth - 1)) { + atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); + *current_chunk_ctr = nth; + } } ggml_barrier(params->threadpool); - const int64_t rows_total = ggml_nelements(ids); - int64_t rows_processed = 0; - for (int cur_a = 0; cur_a < n_as; ++cur_a) { const int64_t cne1 = matrix_row_counts[cur_a]; @@ -7774,7 +7823,7 @@ static void ggml_compute_forward_mul_mat_id( continue; } - rows_processed += cne1; + //rows_processed += cne1; const char * src0_cur = (const char *) src0->data + cur_a * nb02; const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; @@ -7783,6 +7832,7 @@ static void ggml_compute_forward_mul_mat_id( const int64_t nr0 = ne01; const int64_t nr1 = cne1; + //int chunk_size = (nr0 + nr1) / nth; int chunk_size = 16; if (nr0 == 1 || nr1 == 1) { chunk_size = 64; @@ -7801,6 +7851,8 @@ static void ggml_compute_forward_mul_mat_id( int current_chunk = ith; + atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); + while (current_chunk < nchunk0 * nchunk1) { const int64_t ith0 = current_chunk % nchunk0; const int64_t ith1 = current_chunk / nchunk0; @@ -7812,7 +7864,7 @@ static void ggml_compute_forward_mul_mat_id( const int64_t ir1_end = MIN(ir1_start + dr1, nr1); ggml_compute_forward_mul_mat_id_one_chunk( - dst, src0, src1, cur_a, + dst, src0, src1, ids, cur_a, ir0_start, ir0_end, ir1_start, ir1_end, src0_cur, matrix_rows, row_size, src1_cont, wdata ); @@ -7821,21 +7873,8 @@ static void ggml_compute_forward_mul_mat_id( break; } - current_chunk = atomic_fetch_add_explicit(¶ms->threadpool->current_chunk, 1, memory_order_relaxed); - } - - if (rows_processed == rows_total) { - break; + current_chunk = atomic_fetch_add_explicit(current_chunk_ctr, 1, memory_order_relaxed); } - - ggml_barrier(params->threadpool); - - if (ith == 0) { - // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - atomic_store_explicit(¶ms->threadpool->current_chunk, nth, memory_order_relaxed); - } - - ggml_barrier(params->threadpool); } } @@ -13773,14 +13812,19 @@ struct ggml_cplan ggml_graph_plan( cur = 0; const struct ggml_tensor * src0 = node->src[0]; const struct ggml_tensor * src1 = node->src[1]; + const struct ggml_tensor * ids = node->src[2]; const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; + const int n_as = src0->ne[2]; + // src1 if (src1->type != vec_dot_type) { - cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)); + cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)) + sizeof(int64_t); } - const int n_as = src0->ne[2]; - cur += GGML_PAD(cur, sizeof(int64_t)); // align - cur += n_as * sizeof(int64_t); // matrix_row_counts - cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows + // matrix_row_counts + cur += n_as * sizeof(int64_t) + sizeof(int64_t); + // matrix_rows + cur += n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping) + sizeof(int64_t); + // atomic_current_chunk + cur += CACHE_LINE_SIZE*n_as + CACHE_LINE_SIZE; } break; case GGML_OP_OUT_PROD: { From 1b90527d78eb4f688077b39a94b03a0b739d2d03 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 9 Feb 2025 16:22:56 +0100 Subject: [PATCH 3/7] disable for arm --- ggml/src/ggml-cpu/ggml-cpu.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index bee1772f45cea..d3bd0a3e735f8 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -7832,11 +7832,16 @@ static void ggml_compute_forward_mul_mat_id( const int64_t nr0 = ne01; const int64_t nr1 = cne1; - //int chunk_size = (nr0 + nr1) / nth; + +#if defined(__aarch64__) + // disable for ARM + int chunk_size = (nr0 + nr1) / nth; +#else int chunk_size = 16; if (nr0 == 1 || nr1 == 1) { chunk_size = 64; } +#endif // defined(__aarch64__) int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; From b26af62e7e2d828f659a9c0845a3c121bebda36c Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 9 Feb 2025 16:27:19 +0100 Subject: [PATCH 4/7] cleanup --- ggml/src/ggml-cpu/ggml-cpu.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index d3bd0a3e735f8..baa6485626ea1 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -7823,8 +7823,6 @@ static void ggml_compute_forward_mul_mat_id( continue; } - //rows_processed += cne1; - const char * src0_cur = (const char *) src0->data + cur_a * nb02; const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; const size_t row_size = ggml_row_size(vec_dot_type, ne10); From d4bdfc631440429c29cc8d5d069cbea27c90e128 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 9 Feb 2025 17:20:09 +0100 Subject: [PATCH 5/7] better way to disable for arm --- ggml/src/ggml-cpu/ggml-cpu.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 7906423e3a7eb..2bdf843d14f14 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -7824,21 +7824,23 @@ static void ggml_compute_forward_mul_mat_id( const int64_t nr0 = ne01; const int64_t nr1 = cne1; - -#if defined(__aarch64__) - // disable for ARM - int chunk_size = (nr0 + nr1) / nth; -#else int chunk_size = 16; if (nr0 == 1 || nr1 == 1) { chunk_size = 64; } + +#if defined(__aarch64__) + // disable for ARM + const bool disable_chunking = true; +#else + // disable for NUMA + const bool disable_chunking = ggml_is_numa(); #endif // defined(__aarch64__) int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; - if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) { + if (nchunk0 * nchunk1 < nth * 4 || disable_chunking) { nchunk0 = nr0 > nr1 ? nth : 1; nchunk1 = nr0 > nr1 ? 1 : nth; } From 3ceb97d341db562d8bb2dc5bf6ab137e2acb4876 Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 12 Feb 2025 00:33:23 +0100 Subject: [PATCH 6/7] fix uninitialized counter when using 1 thread only --- ggml/src/ggml-cpu/ggml-cpu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 2bdf843d14f14..5272b1c984b58 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -7800,12 +7800,12 @@ static void ggml_compute_forward_mul_mat_id( matrix_row_counts[i02] += 1; } } - } else { - // reset current_chunk - for (int cur_a = ith - 1; cur_a < n_as; cur_a += (nth - 1)) { - atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); - *current_chunk_ctr = nth; - } + } + + // reset current_chunk + for (int cur_a = ith; cur_a < n_as; cur_a += nth) { + atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); + *current_chunk_ctr = nth; } ggml_barrier(params->threadpool); From 946ececd26b21caae2532f347ea8b317fb587496 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 13 Feb 2025 00:57:53 +0100 Subject: [PATCH 7/7] revert test-backend-ops changes --- tests/test-backend-ops.cpp | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 54ec515c71b8c..1bfd41254aa99 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4345,21 +4345,6 @@ static std::vector> make_test_cases_perf() { } } -#if 0 - for (int bs : {1, 64}) { - for (ggml_type type_a : {GGML_TYPE_Q4_0}) { - for (ggml_type type_b : {GGML_TYPE_F32}) { - int n_experts = 256; - int n_used = 8; - int n_embd = 7168; - int n_ff = 2048; - test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_experts, n_used, true, n_embd, bs, n_ff)); - //test_cases.emplace_back(new test_mul_mat(type_a, type_b, n_embd, bs, n_ff, {1, 1}, {1, 1})); - } - } - } -#endif - for (int K : {3, 5}) { for (int IC : {256, 2560}) { for (int IW_IH : {32, 64, 256}) { @@ -4493,7 +4478,7 @@ int main(int argc, char ** argv) { auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); if (ggml_backend_set_n_threads_fn) { // TODO: better value for n_threads - ggml_backend_set_n_threads_fn(backend, std::thread::hardware_concurrency() / 2); + ggml_backend_set_n_threads_fn(backend, std::thread::hardware_concurrency()); } printf(" Device description: %s\n", ggml_backend_dev_description(dev));