Skip to content

Commit f3c1d85

Browse files
committed
Remove Q4/Q5 bit shuffling without breaking compatibility
1 parent fe60904 commit f3c1d85

File tree

10 files changed

+150
-663
lines changed

10 files changed

+150
-663
lines changed

README.md

+12-12
Original file line numberDiff line numberDiff line change
@@ -330,18 +330,18 @@ As the models are currently fully loaded into memory, you will need adequate dis
330330
331331
Several quantization methods are supported. They differ in the resulting model disk size and inference speed.
332332
333-
| Model | Measure | F16 | Q4_0 | Q4_1 | Q4_2 | Q5_0 | Q5_1 | Q8_0 |
334-
|------:|--------------|-------:|-------:|-------:|-------:|-------:|-------:|-------:|
335-
| 7B | perplexity | 5.9066 | 6.1620 | 6.0910 | 6.1466 | 5.9862 | 5.9481 | 5.9069 |
336-
| 7B | file size | 13.0G | 4.0G | 4.8G | 4.0G | 4.4G | 4.8G | 7.1G |
337-
| 7B | ms/tok @ 4th | 128 | 56 | 61 | 84 | 91 | 95 | 75 |
338-
| 7B | ms/tok @ 8th | 128 | 47 | 55 | 48 | 53 | 59 | 75 |
339-
| 7B | bits/weight | 16.0 | 5.0 | 6.0 | 5.0 | 5.5 | 6.0 | 9.0 |
340-
| 13B | perplexity | 5.2543 | 5.3863 | 5.3607 | 5.3513 | 5.2856 | 5.2706 | 5.2548 |
341-
| 13B | file size | 25.0G | 7.6G | 9.1G | 7.6G | 8.4G | 9.1G | 14G |
342-
| 13B | ms/tok @ 4th | 239 | 104 | 113 | 160 | 176 | 185 | 141 |
343-
| 13B | ms/tok @ 8th | 240 | 85 | 99 | 97 | 108 | 117 | 147 |
344-
| 13B | bits/weight | 16.0 | 5.0 | 6.0 | 5.0 | 5.5 | 6.0 | 9.0 |
333+
| Model | Measure | F16 | Q4_0 | Q4_1 | Q5_0 | Q5_1 | Q8_0 |
334+
|------:|--------------|-------:|-------:|-------:|-------:|-------:|-------:|
335+
| 7B | perplexity | 5.9066 | 6.1620 | 6.0910 | 5.9862 | 5.9481 | 5.9069 |
336+
| 7B | file size | 13.0G | 4.0G | 4.8G | 4.4G | 4.8G | 7.1G |
337+
| 7B | ms/tok @ 4th | 128 | 56 | 61 | 91 | 95 | 75 |
338+
| 7B | ms/tok @ 8th | 128 | 47 | 55 | 53 | 59 | 75 |
339+
| 7B | bits/weight | 16.0 | 5.0 | 6.0 | 5.5 | 6.0 | 9.0 |
340+
| 13B | perplexity | 5.2543 | 5.3863 | 5.3607 | 5.2856 | 5.2706 | 5.2548 |
341+
| 13B | file size | 25.0G | 7.6G | 9.1G | 8.4G | 9.1G | 14G |
342+
| 13B | ms/tok @ 4th | 239 | 104 | 113 | 176 | 185 | 141 |
343+
| 13B | ms/tok @ 8th | 240 | 85 | 99 | 108 | 117 | 147 |
344+
| 13B | bits/weight | 16.0 | 5.0 | 6.0 | 5.5 | 6.0 | 9.0 |
345345
346346
### Perplexity (measuring model quality)
347347

SHA256SUMS

-4
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,12 @@
22
666a4bb533b303bdaf89e1b6a3b6f93535d868de31d903afdc20983dc526c847 models/7B/ggml-model-f16.bin
33
99aeb35f26b577fa2732716cca4d8b5ada39a78ea9b2dca2651fc632b5d101b6 models/7B/ggml-model-q4_0.bin
44
cc061458339a3eb8bcecbf0a825e9924fb7d1a8150f63cd5d091caa99215aafe models/7B/ggml-model-q4_1.bin
5-
25b050337a87344da687a7f2adddc03bd99b7f6c140450e836649f3585fb6496 models/7B/ggml-model-q4_2.bin
65
7e89e242ddc0dd6f060b43ca219ce8b3e8f08959a72cb3c0855df8bb04d46265 models/7B/params.json
76
745bf4e29a4dd6f411e72976d92b452da1b49168a4f41c951cfcc8051823cf08 models/13B/consolidated.00.pth
87
d5ccbcc465c71c0de439a5aeffebe8344c68a519bce70bc7f9f92654ee567085 models/13B/consolidated.01.pth
98
2b206e9b21fb1076f11cafc624e2af97c9e48ea09312a0962153acc20d45f808 models/13B/ggml-model-f16.bin
109
eecb575d325d935157761172e2bf05984dad216eb2b06777b73463cf9b818bab models/13B/ggml-model-q4_0.bin
1110
d9581b5b88e5622532fe897c9f9b0e67a317d22dd27a6f90fa4ab8c6d23ccdbb models/13B/ggml-model-q4_1.bin
12-
75a218a47df03f5f96354656329864613abcb67779412b9bc2282b28c1c3cbaa models/13B/ggml-model-q4_2.bin
1311
4ab77bec4d4405ccb66a97b282574c89a94417e3c32e5f68f37e2876fc21322f models/13B/params.json
1412
e23294a58552d8cdec5b7e8abb87993b97ea6eced4178ff2697c02472539d067 models/30B/consolidated.00.pth
1513
4e077b7136c7ae2302e954860cf64930458d3076fcde9443f4d0e939e95903ff models/30B/consolidated.01.pth
@@ -18,7 +16,6 @@ e23294a58552d8cdec5b7e8abb87993b97ea6eced4178ff2697c02472539d067 models/30B/con
1816
7e1b524061a9f4b27c22a12d6d2a5bf13b8ebbea73e99f218809351ed9cf7d37 models/30B/ggml-model-f16.bin
1917
517b9e525742c42b5478a6280a4b41ec66f46298c57aba7f0453d491682fe42d models/30B/ggml-model-q4_0.bin
2018
7b75ac615fa369ee593493a7e6ef87542bf0350255db928b22c5a24f6d598bcd models/30B/ggml-model-q4_1.bin
21-
aadbc9cf806313a55be570f62884eed289d30c313fac3b7838717e01bd553204 models/30B/ggml-model-q4_2.bin
2219
2c07118ea98d69dbe7810d88520e30288fa994751b337f8fca02b171955f44cb models/30B/params.json
2320
135c563f6b3938114458183afb01adc9a63bef3d8ff7cccc3977e5d3664ecafe models/65B/consolidated.00.pth
2421
9a600b37b19d38c7e43809485f70d17d1dc12206c07efa83bc72bb498a568bde models/65B/consolidated.01.pth
@@ -31,6 +28,5 @@ d27f5b0677d7ff129ceacd73fd461c4d06910ad7787cf217b249948c3f3bc638 models/65B/con
3128
60758f2384d74e423dffddfd020ffed9d3bb186ebc54506f9c4a787d0f5367b0 models/65B/ggml-model-f16.bin
3229
01672072136f8be6ca9d7cebe5f86ed316e8b85851b9fe3de951809233cea4f2 models/65B/ggml-model-q4_0.bin
3330
4743a28aac3e5f32a6e838a815f51d3779de44fbbe251d745251e66c23c5950f models/65B/ggml-model-q4_1.bin
34-
1b6f6588d0e2ecfe6c4d849088e48e5e3083466b962daa32e3261363e21fc5e9 models/65B/ggml-model-q4_2.bin
3531
999ed1659b469ccc2a941714c0a9656fa571d17c9f7c8c7589817ca90edef51b models/65B/params.json
3632
9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 models/tokenizer.model

examples/quantize/quantize.cpp

+5-6
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,11 @@
77
#include <string>
88

99
static const std::map<std::string, llama_ftype> LLAMA_FTYPE_MAP = {
10-
{"q4_0", LLAMA_FTYPE_MOSTLY_Q4_0},
11-
{"q4_1", LLAMA_FTYPE_MOSTLY_Q4_1},
12-
{"q4_2", LLAMA_FTYPE_MOSTLY_Q4_2},
13-
{"q5_0", LLAMA_FTYPE_MOSTLY_Q5_0},
14-
{"q5_1", LLAMA_FTYPE_MOSTLY_Q5_1},
15-
{"q8_0", LLAMA_FTYPE_MOSTLY_Q8_0},
10+
{"q4_0", LLAMA_FTYPE_MOSTLY_Q4_0},
11+
{"q4_1", LLAMA_FTYPE_MOSTLY_Q4_1},
12+
{"q5_0", LLAMA_FTYPE_MOSTLY_Q5_0},
13+
{"q5_1", LLAMA_FTYPE_MOSTLY_Q5_1},
14+
{"q8_0", LLAMA_FTYPE_MOSTLY_Q8_0},
1615
};
1716

1817
bool try_parse_ftype(const std::string & ftype_str, llama_ftype & ftype, std::string & ftype_str_out) {

ggml-cuda.cu

-37
Original file line numberDiff line numberDiff line change
@@ -49,13 +49,6 @@ typedef struct {
4949
} block_q4_1;
5050
static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
5151

52-
#define QK4_2 16
53-
typedef struct {
54-
half d; // delta
55-
uint8_t qs[QK4_2 / 2]; // nibbles / quants
56-
} block_q4_2;
57-
static_assert(sizeof(block_q4_2) == sizeof(ggml_fp16_t) + QK4_2 / 2, "wrong q4_2 block size/padding");
58-
5952
#define QK5_0 32
6053
typedef struct {
6154
half d; // delta
@@ -127,29 +120,6 @@ static __global__ void dequantize_block_q4_1(const void * vx, float * y) {
127120
}
128121
}
129122

130-
static __global__ void dequantize_block_q4_2(const void * vx, float * y) {
131-
const block_q4_2 * x = (const block_q4_2 *) vx;
132-
133-
const int i = blockIdx.x;
134-
135-
const float d = x[i].d;
136-
137-
const uint8_t * pp = x[i].qs;
138-
139-
for (int l = 0; l < QK4_2; l += 2) {
140-
const uint8_t vi = pp[l/2];
141-
142-
const int8_t vi0 = vi & 0xf;
143-
const int8_t vi1 = vi >> 4;
144-
145-
const float v0 = (vi0 - 8)*d;
146-
const float v1 = (vi1 - 8)*d;
147-
148-
y[i*QK4_2 + l + 0] = v0;
149-
y[i*QK4_2 + l + 1] = v1;
150-
}
151-
}
152-
153123
static __global__ void dequantize_block_q5_0(const void * vx, float * y) {
154124
const block_q5_0 * x = (const block_q5_0 *) vx;
155125

@@ -235,11 +205,6 @@ static void dequantize_row_q4_1_cuda(const void * vx, float * y, int k, cudaStre
235205
dequantize_block_q4_1<<<nb, 1, 0, stream>>>(vx, y);
236206
}
237207

238-
static void dequantize_row_q4_2_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
239-
const int nb = k / QK4_2;
240-
dequantize_block_q4_2<<<nb, 1, 0, stream>>>(vx, y);
241-
}
242-
243208
static void dequantize_row_q5_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
244209
const int nb = k / QK5_0;
245210
dequantize_block_q5_0<<<nb, 1, 0, stream>>>(vx, y);
@@ -274,8 +239,6 @@ static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
274239
return dequantize_row_q4_0_cuda;
275240
case GGML_TYPE_Q4_1:
276241
return dequantize_row_q4_1_cuda;
277-
case GGML_TYPE_Q4_2:
278-
return dequantize_row_q4_2_cuda;
279242
case GGML_TYPE_Q5_0:
280243
return dequantize_row_q5_0_cuda;
281244
case GGML_TYPE_Q5_1:

ggml-opencl.c

+1-29
Original file line numberDiff line numberDiff line change
@@ -52,26 +52,6 @@ __kernel void dequantize_row_q4_1(__global struct block_q4_1* blocks, __global f
5252
result[index + 1] = (vi >> 4) * d + m;
5353
}
5454

55-
struct block_q4_2
56-
{
57-
ushort d;
58-
uchar qs[8];
59-
};
60-
61-
__kernel void dequantize_row_q4_2(__global struct block_q4_2* blocks, __global float* result) {
62-
const uint i = get_global_id(0) / 16;
63-
const uint l = get_local_id(0);
64-
65-
const float d = vload_half(0, (__global half*) &blocks[i].d);
66-
67-
const uchar vi = blocks[i].qs[l];
68-
69-
const uint index = i*16 + l*2;
70-
result[index + 0] = ((vi & 0xf) - 8)*d;
71-
result[index + 1] = ((vi >> 4) - 8)*d;
72-
}
73-
74-
7555
struct block_q5_0
7656
{
7757
float d;
@@ -167,7 +147,7 @@ static cl_device_id device;
167147
static cl_context context;
168148
static cl_command_queue queue;
169149
static cl_program program;
170-
static cl_kernel kernel_q4_0, kernel_q4_1, kernel_q4_2, kernel_q5_0, kernel_q5_1, kernel_q8_0;
150+
static cl_kernel kernel_q4_0, kernel_q4_1, kernel_q5_0, kernel_q5_1, kernel_q8_0;
171151
static cl_mem cl_buffer_a, cl_buffer_qb, cl_buffer_b, cl_buffer_c;
172152
static size_t cl_size_a = 0, cl_size_qb = 0, cl_size_b = 0, cl_size_c = 0;
173153

@@ -238,8 +218,6 @@ void ggml_cl_init(void) {
238218
CL_CHECK(err, "clCreateKernel");
239219
kernel_q4_1 = clCreateKernel(program, "dequantize_row_q4_1", &err);
240220
CL_CHECK(err, "clCreateKernel");
241-
kernel_q4_2 = clCreateKernel(program, "dequantize_row_q4_2", &err);
242-
CL_CHECK(err, "clCreateKernel");
243221
kernel_q5_0 = clCreateKernel(program, "dequantize_row_q5_0", &err);
244222
CL_CHECK(err, "clCreateKernel");
245223
kernel_q5_1 = clCreateKernel(program, "dequantize_row_q5_1", &err);
@@ -292,12 +270,6 @@ void ggml_cl_sgemm_wrapper(
292270
local = 16;
293271
size_qb = global * (sizeof(float) * 2 + local) / 32;
294272
break;
295-
case GGML_TYPE_Q4_2:
296-
dequant = true;
297-
kernel = kernel_q4_2;
298-
local = 8;
299-
size_qb = global * (sizeof(ggml_fp16_t) + local) / 16;
300-
break;
301273
case GGML_TYPE_Q5_0:
302274
dequant = true;
303275
kernel = kernel_q5_0;

0 commit comments

Comments
 (0)