Skip to content

Commit 17b7151

Browse files
committed
Update IQ3_M attn_k and IQ3_XL token_embd
1 parent e4c506d commit 17b7151

File tree

1 file changed

+5
-4
lines changed

1 file changed

+5
-4
lines changed

src/llama.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15899,11 +15899,12 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1589915899
new_type = GGML_TYPE_IQ2_S;
1590015900
}
1590115901
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_XXS;
15902+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XL) new_type = GGML_TYPE_IQ3_XXS;
1590215903
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
1590315904
else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 || new_type == GGML_TYPE_Q4_0_8_8) {
1590415905
new_type = GGML_TYPE_Q4_0;
1590515906
}
15906-
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) new_type = GGML_TYPE_IQ4_XS;
15907+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL) new_type = GGML_TYPE_IQ4_XS;
1590715908
}
1590815909
} else if (name.find("attn_v.weight") != std::string::npos) {
1590915910
if (qs.model.hparams.n_expert >= 4) {
@@ -16003,9 +16004,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1600316004
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && qs.model.hparams.n_gqa() < 2 && qs.model.hparams.n_expert < 2) {
1600416005
new_type = GGML_TYPE_IQ3_XXS;
1600516006
}
16006-
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL) &&
16007-
(qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
16008-
new_type = GGML_TYPE_IQ4_XS;
16007+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL) {
16008+
if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q5_K;
16009+
else if (qs.model.hparams.n_gqa() >= 2) new_type = GGML_TYPE_IQ4_XS;
1600916010
}
1601016011
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
1601116012
new_type = GGML_TYPE_Q4_K;

0 commit comments

Comments
 (0)