Skip to content

Commit 9339f91

Browse files
compiladearthw
authored andcommitted
ggml-quants : ternary packing for TriLMs and BitNet b1.58 (ggml-org#8151)
* ggml-quants : 1.625 bpw ternary packing for BitNet 1.58b * ggml-quants : faster 1.625 bpw AVX2 vec_dot Not using a lookup table anymore makes it match q4_0 speed. * gguf-py : fix formatting * llama : remove spaces on empty line * ggml-quants : subtract 1 when back in epi8 This makes the 1.625 bpw type go faster than q4_0. Still not the fastest. * ggml-quants : Q2_2 now faster than Q4_K on with AVX2 * ggml-quants : cleanup Q1_3 code formatting * ggml-quants : ARM NEON vec_dot for q2_2 and q1_3 * ggml-quants : use ceiling division when quantizing q1_3 * convert-hf : simplify BitNet pre-quantization This still results in the exact same tensor weights and scales, but it reveals some weirdness in the current algorithm. * convert-hf : allow converting the weird BitNet 1.3B Its FFN size is 5460 which is not convenient. The offending tensors are kept in F16, which makes the final model 5.01 bpw. * bitnet : replace 1.58b with b1.58, as in the paper * ggml-quants : fix build failure on Windows * ggml-quants : attempt to fix Arm 32-bit support * ggml : add some informative comments in q1_3 vec_dot * ggml : add TQ1_0 and TQ2_0 ternary quantization types * ggml : even faster TQ2_0 * ggml : also faster TQ1_0 Same optimization as for TQ2_0 by offsetting the sum instead of the weights. This makes TQ1_0 almost as fast as Q8_0 on AVX2. * ggml : fix build issues in certain environments * ggml : add NEON vec_dot implementation for TQ1_0 and TQ2_0 * ggml : avoid directly using vmlal_high_s8, for 32-bit ARM compat The compiler seems smart enough to use the same instruction even when using vget_high_s8 instead. * ggml : remove q1_3 and q2_2 No more 1.625 bpw and 2.000 bpw, now instead using 1.6875 bpw and 2.0625 bpw with TQ1_0 and TQ2_0, respectively. * llama : remove the separate scale tensors of BitNet b1.58 They won't be needed, since the remaining ternary quant types have built-in scales. * ggml-quants : rename fields of TQ1_0 and TQ2_0 structs for consistency * ggml-quants : allow using vdotq_s32 in TQ2_0 vec_dot Not yet tested on hardware which supports it, might not work or might not even compile. But also it might. It should make the performance better on recent ARM CPUs. * ggml-quants : remove comment about possible format change of TQ2_0 Making it slightly more convenient for AVX512 but less convenient for everything else is not worth the trouble. * gguf-py : Numpy (de)quantization for TQ1_0 and TQ2_0 * ggml-quants : use roundf instead of nearest_int for TQ1_0 and TQ2_0 This does not change anything for ternary models, since their values should never end up being in halfway cases anyway. * convert : allow direct conversion to TQ1_0 and TQ2_0 The token embeddings and output tensors are kept in F16 to allow quantizing them to Q4_K and Q6_K with llama-quantize. * llama : handle fallback for TQ1_0 and TQ2_0 with Q4_0 Q4_0 is not completely symmetric (so not lossless for ternary models), but it should be good enough. * ggml-quants : allow using ARM dot product instructions for TQ1_0 * ggml-quants : deduplicate TQ1_0 and TQ2_0 __ARM_FEATURE_DOTPROD support * ggml : remove unused ggml_mul special case It would otherwise conflict with the more general optimization coming with Mamba-2. * ggml : handle TQ1_0 and TQ2_0 in dequantization-based operators * test-backend-ops : add TQ1_0 and TQ2_0 comments for later Not yet adding uncommented, because some backends like SYCL and Metal do not properly handle unknown types in supports_op for GGML_OP_MUL_MAT. (and Metal also doesn't handle it with GGML_OP_GET_ROWS) Support for TQ1_0 and TQ2_0 for other backends than CPU will be added in follow-up pull requests.
1 parent 39a25ae commit 9339f91

15 files changed

+937
-35
lines changed

convert_hf_to_gguf.py

+33-14
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,20 @@ def prepare_tensors(self):
308308
):
309309
data_qtype = gguf.GGMLQuantizationType.F32
310310

311+
if data_qtype is False and any(
312+
self.match_model_tensor_name(new_name, key, bid)
313+
for key in (
314+
gguf.MODEL_TENSOR.TOKEN_EMBD,
315+
gguf.MODEL_TENSOR.OUTPUT,
316+
)
317+
):
318+
if self.ftype in (
319+
gguf.LlamaFileType.MOSTLY_TQ1_0,
320+
gguf.LlamaFileType.MOSTLY_TQ2_0,
321+
):
322+
# TODO: use Q4_K and Q6_K
323+
data_qtype = gguf.GGMLQuantizationType.F16
324+
311325
# No override (data_qtype is False), or wants to be quantized (data_qtype is True)
312326
if isinstance(data_qtype, bool):
313327
if self.ftype == gguf.LlamaFileType.ALL_F32:
@@ -318,6 +332,10 @@ def prepare_tensors(self):
318332
data_qtype = gguf.GGMLQuantizationType.BF16
319333
elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
320334
data_qtype = gguf.GGMLQuantizationType.Q8_0
335+
elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0:
336+
data_qtype = gguf.GGMLQuantizationType.TQ1_0
337+
elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0:
338+
data_qtype = gguf.GGMLQuantizationType.TQ2_0
321339
else:
322340
raise ValueError(f"Unknown file type: {self.ftype.name}")
323341

@@ -1623,15 +1641,16 @@ def set_gguf_parameters(self):
16231641
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
16241642
self.gguf_writer.add_rope_scaling_factor(1.0)
16251643

1626-
def weight_quant(self, weight):
1644+
def weight_quant(self, weight: Tensor) -> Tensor:
16271645
dtype = weight.dtype
16281646
weight = weight.float()
1629-
s = 1 / weight.abs().mean().clamp(min=1e-5)
1630-
weight = (weight * s).round().clamp(-1, 1) / s
1631-
scale = weight.abs().max().unsqueeze(0)
1632-
weight = torch.where(weight.abs().less(1e-6), 0, weight).type(dtype)
1633-
weight = torch.sign(weight).type(dtype)
1634-
return weight.type(dtype), scale.type(torch.float32)
1647+
scale = weight.abs().mean().clamp(min=1e-5)
1648+
iscale = 1 / scale
1649+
# TODO: multiply by the scale directly instead of inverting it twice
1650+
# (this is also unnecessarily doubly inverted upstream)
1651+
# ref: https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/af89e318d78a70802061246bf037199d2fb97020/utils_quant.py#L10
1652+
result = (weight * iscale).round().clamp(-1, 1) / iscale
1653+
return result.type(dtype)
16351654

16361655
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
16371656
new_name = self.map_tensor_name(name)
@@ -1646,11 +1665,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
16461665
gguf.MODEL_TENSOR.FFN_GATE,
16471666
]):
16481667
# transform weight into 1/0/-1 (in fp32)
1649-
weight_torch, scale_torch = self.weight_quant(data_torch)
1650-
yield (new_name, weight_torch)
1651-
yield (new_name.removesuffix(".weight") + ".scale", scale_torch)
1652-
else:
1653-
yield (new_name, data_torch)
1668+
data_torch = self.weight_quant(data_torch)
1669+
1670+
yield (new_name, data_torch)
16541671

16551672

16561673
@Model.register("GrokForCausalLM")
@@ -4011,8 +4028,8 @@ def parse_args() -> argparse.Namespace:
40114028
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
40124029
)
40134030
parser.add_argument(
4014-
"--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16",
4015-
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
4031+
"--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16",
4032+
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
40164033
)
40174034
parser.add_argument(
40184035
"--bigendian", action="store_true",
@@ -4099,6 +4116,8 @@ def main() -> None:
40994116
"f16": gguf.LlamaFileType.MOSTLY_F16,
41004117
"bf16": gguf.LlamaFileType.MOSTLY_BF16,
41014118
"q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
4119+
"tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0,
4120+
"tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0,
41024121
"auto": gguf.LlamaFileType.GUESSED,
41034122
}
41044123

examples/quantize/quantize.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
2626
{ "IQ2_M", LLAMA_FTYPE_MOSTLY_IQ2_M, " 2.7 bpw quantization", },
2727
{ "IQ1_S", LLAMA_FTYPE_MOSTLY_IQ1_S, " 1.56 bpw quantization", },
2828
{ "IQ1_M", LLAMA_FTYPE_MOSTLY_IQ1_M, " 1.75 bpw quantization", },
29+
{ "TQ1_0", LLAMA_FTYPE_MOSTLY_TQ1_0, " 1.69 bpw ternarization", },
30+
{ "TQ2_0", LLAMA_FTYPE_MOSTLY_TQ2_0, " 2.06 bpw ternarization", },
2931
{ "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.96G, +3.5199 ppl @ Llama-3-8B", },
3032
{ "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.96G, +3.1836 ppl @ Llama-3-8B", },
3133
{ "IQ3_XXS", LLAMA_FTYPE_MOSTLY_IQ3_XXS, " 3.06 bpw quantization", },

ggml/include/ggml.h

+2
Original file line numberDiff line numberDiff line change
@@ -395,6 +395,8 @@ extern "C" {
395395
GGML_TYPE_Q4_0_4_4 = 31,
396396
GGML_TYPE_Q4_0_4_8 = 32,
397397
GGML_TYPE_Q4_0_8_8 = 33,
398+
GGML_TYPE_TQ1_0 = 34,
399+
GGML_TYPE_TQ2_0 = 35,
398400
GGML_TYPE_COUNT,
399401
};
400402

ggml/src/ggml-common.h

+20
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,25 @@ typedef struct {
227227
} block_q8_0x8;
228228
static_assert(sizeof(block_q8_0x8) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong q8_0x8 block size/padding");
229229

230+
//
231+
// Ternary quantization
232+
//
233+
234+
// 1.6875 bpw
235+
typedef struct {
236+
uint8_t qs[(QK_K - 4 * QK_K / 64) / 5]; // 5 elements per byte (3^5 = 243 < 256)
237+
uint8_t qh[QK_K/64]; // 4 elements per byte
238+
ggml_half d;
239+
} block_tq1_0;
240+
static_assert(sizeof(block_tq1_0) == sizeof(ggml_half) + QK_K / 64 + (QK_K - 4 * QK_K / 64) / 5, "wrong tq1_0 block size/padding");
241+
242+
// 2.0625 bpw
243+
typedef struct {
244+
uint8_t qs[QK_K/4]; // 2 bits per element
245+
ggml_half d;
246+
} block_tq2_0;
247+
static_assert(sizeof(block_tq2_0) == sizeof(ggml_half) + QK_K / 4, "wrong tq2_0 block size/padding");
248+
230249
//
231250
// Super-block quantization structures
232251
//
@@ -361,6 +380,7 @@ typedef struct {
361380
} block_iq3_s;
362381
static_assert(sizeof(block_iq3_s) == sizeof(ggml_half) + 13*(QK_K/32) + IQ3S_N_SCALE, "wrong iq3_s block size/padding");
363382

383+
// 1.5625 bpw
364384
typedef struct {
365385
ggml_half d;
366386
uint8_t qs[QK_K/8];

ggml/src/ggml-impl.h

+4-7
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ typedef __fp16 ggml_fp16_internal_t;
175175

176176
// 32-bit ARM compatibility
177177

178-
// vaddvq_s16
178+
// vaddlvq_s16
179179
// vpaddq_s16
180180
// vpaddq_s32
181181
// vaddvq_s32
@@ -185,12 +185,9 @@ typedef __fp16 ggml_fp16_internal_t;
185185
// vzip1_u8
186186
// vzip2_u8
187187

188-
inline static int32_t vaddvq_s16(int16x8_t v) {
189-
return
190-
(int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
191-
(int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
192-
(int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
193-
(int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
188+
inline static int32_t vaddlvq_s16(int16x8_t v) {
189+
int32x4_t v0 = vreinterpretq_s32_s64(vpaddlq_s32(vpaddlq_s16(v)));
190+
return vgetq_lane_s32(v0, 0) + vgetq_lane_s32(v0, 2);
194191
}
195192

196193
inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {

0 commit comments

Comments
 (0)