Skip to content

Commit 38229d3

Browse files
committed
Fix specify tensors in quantize
1 parent b947b6e commit 38229d3

File tree

1 file changed

+68
-4
lines changed

1 file changed

+68
-4
lines changed

examples/quantize/quantize.cpp

Lines changed: 68 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -122,19 +122,35 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
122122
//
123123
[[noreturn]]
124124
static void usage(const char * executable) {
125-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
125+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
126126
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
127127
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
128128
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
129129
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
130130
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
131131
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
132-
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
133-
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
132+
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n");
133+
printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n\n");
134+
printf("Additional specific tensor quantization types used in the custom quant scheme 'CQS (default is Q2_K):\n");
135+
printf(" --attn-q-type ggml_type: use this ggml_type for the attn_q.weight tensor.\n");
136+
printf(" --attn-k-type ggml_type: use this ggml_type for the attn_k.weight tensor.\n");
137+
printf(" --attn-v-type ggml_type: use this ggml_type for the attn_v.weight tensor.\n");
138+
printf(" --attn-qkv-type ggml_type: use this ggml_type for the attn_qkv.weight tensor.\n");
139+
printf(" --attn-output-type ggml_type: use this ggml_type for the attn_output.weight tensor.\n");
140+
printf(" --ffn-gate-type ggml_type: use this ggml_type for the ffn_gate tensor.\n");
141+
printf(" --ffn-down-type ggml_type: use this ggml_type for the ffn_down tensor.\n");
142+
printf(" --ffn-up-type ggml_type: use this ggml_type for the ffn_up tensor.\n\n");
134143
printf(" --keep-split: will generate quantized model in the same shards as input\n");
135144
printf(" --override-kv KEY=TYPE:VALUE\n");
136-
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
145+
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n\n");
137146
printf("Note: --include-weights and --exclude-weights cannot be used together\n");
147+
printf("Note: The token embeddings tensor is loaded in system RAM, even in case of full GPU/VRAM offload.\n");
148+
printf("Note: The recommanded type for the output tensor is q6_K for the ffn types > iq3_xxs and < q8_0.\n\n");
149+
printf("Note for the Custom Quant Scheme FTYPE:\n");
150+
printf(" Write the specific tensor legacy quants as qN_N, the K-Quants as qN_K, the IQ-Quants as iqN_xx.\n");
151+
printf(" Usually, attn-q-type can be one type below the chosen ffn type, and attn-v-type should be one type above.\n");
152+
printf(" attn-qkv-type replaces the types attn-q, attn-k and attn-v on some models.\n");
153+
//TODO: - eventually - harmonize the CAPS writing of the FTYPEs, and non CAPS writing of the GGML_TYPEs.
138154
printf("\nAllowed quantization types:\n");
139155
for (auto & it : QUANT_OPTIONS) {
140156
if (it.name != "COPY") {
@@ -294,6 +310,54 @@ int main(int argc, char ** argv) {
294310
} else {
295311
usage(argv[0]);
296312
}
313+
} else if (strcmp(argv[arg_idx], "--attn-q-type") == 0) {
314+
if (arg_idx < argc-1) {
315+
params.attn_q_type = parse_ggml_type(argv[++arg_idx]);
316+
} else {
317+
usage(argv[0]);
318+
}
319+
} else if (strcmp(argv[arg_idx], "--attn-k-type") == 0) {
320+
if (arg_idx < argc-1) {
321+
params.attn_k_type = parse_ggml_type(argv[++arg_idx]);
322+
} else {
323+
usage(argv[0]);
324+
}
325+
} else if (strcmp(argv[arg_idx], "--attn-v-type") == 0) {
326+
if (arg_idx < argc-1) {
327+
params.attn_v_type = parse_ggml_type(argv[++arg_idx]);
328+
} else {
329+
usage(argv[0]);
330+
}
331+
} else if (strcmp(argv[arg_idx], "--attn-qkv-type") == 0) {
332+
if (arg_idx < argc-1) {
333+
params.attn_qkv_type = parse_ggml_type(argv[++arg_idx]);
334+
} else {
335+
usage(argv[0]);
336+
}
337+
} else if (strcmp(argv[arg_idx], "--attn-output-type") == 0) {
338+
if (arg_idx < argc-1) {
339+
params.attn_output_type = parse_ggml_type(argv[++arg_idx]);
340+
} else {
341+
usage(argv[0]);
342+
}
343+
} else if (strcmp(argv[arg_idx], "--ffn-gate-type") == 0) {
344+
if (arg_idx < argc-1) {
345+
params.ffn_gate_type = parse_ggml_type(argv[++arg_idx]);
346+
} else {
347+
usage(argv[0]);
348+
}
349+
} else if (strcmp(argv[arg_idx], "--ffn-down-type") == 0) {
350+
if (arg_idx < argc-1) {
351+
params.ffn_down_type = parse_ggml_type(argv[++arg_idx]);
352+
} else {
353+
usage(argv[0]);
354+
}
355+
} else if (strcmp(argv[arg_idx], "--ffn-up-type") == 0) {
356+
if (arg_idx < argc-1) {
357+
params.ffn_up_type = parse_ggml_type(argv[++arg_idx]);
358+
} else {
359+
usage(argv[0]);
360+
}
297361
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
298362
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
299363
usage(argv[0]);

0 commit comments

Comments
 (0)