Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ggml : testing GPU FP precision via quantized CPY #4698

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions ggml-metal.m
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,10 @@ static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
options = [MTLCompileOptions new];
options.preprocessorMacros = @{ @"QK_K" : @(64) };
#endif
// disable fast math
// NOTE: this seems to have no effect whatsoever
//[options setFastMathEnabled:false];

ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
}

Expand Down
5 changes: 5 additions & 0 deletions ggml-quants.c
Original file line number Diff line number Diff line change
Expand Up @@ -1103,13 +1103,18 @@ void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int
}
}

#include <stdio.h>

void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
static const int qk = QK4_1;

assert(k % qk == 0);

const int nb = k / qk;

printf("d = %9f\n", GGML_FP16_TO_FP32(x[0].d));
printf("m = %9f\n", GGML_FP16_TO_FP32(x[0].m));

for (int i = 0; i < nb; i++) {
const float d = GGML_FP16_TO_FP32(x[i].d);
const float m = GGML_FP16_TO_FP32(x[i].m);
Expand Down
21 changes: 14 additions & 7 deletions tests/test-backend-ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -437,11 +437,12 @@ struct test_case {
double err = nmse(f1.data(), f2.data(), f1.size());
if (err > ud->max_err) {
printf("[%s] NMSE = %f ", ggml_op_desc(t1), err);
//for (int i = 0; i < f1.size(); i++) {
// printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
//}
//printf("\n");
//exit(1);
printf("\n");
for (int i = 0; i < f1.size(); i++) {
printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
}
printf("\n");
exit(1);
ud->ok = false;
}
return true;
Expand Down Expand Up @@ -1459,8 +1460,14 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op

test_cases.emplace_back(new test_dup());

for (ggml_type type : all_types) {
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, type, {256, 10, 10, 1}));
//for (ggml_type type : all_types) {
// test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, type, {256, 10, 10, 1}));
//}

for (ggml_type type : { GGML_TYPE_Q4_1} ) {
for (int i = 0; i < 2048; ++i) {
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, type, {32, 1, 1, 1}));
}
}

test_cases.emplace_back(new test_cont());
Expand Down
Loading