Skip to content

Commit a2500c1

Browse files
committed
Crack down fallback GGML_types
1 parent 75b8800 commit a2500c1

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

src/llama.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19667,10 +19667,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1966719667
case GGML_TYPE_IQ1_M:
1966819668
case GGML_TYPE_Q2_K:
1966919669
case GGML_TYPE_Q3_K:
19670-
case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
19671-
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
19672-
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
19673-
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
19670+
case GGML_TYPE_IQ4_XS:
19671+
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_IQ4_NL; break;
19672+
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_0; break;
19673+
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q5_1; break;
1967419674
default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
1967519675
}
1967619676
if (tensor->ne[0] % ggml_blck_size(new_type) != 0) {

0 commit comments

Comments
 (0)