Skip to content

Commit

Permalink
LLM: add new qtype woq_int4 to support gemm int4 temporary. (#12706)
Browse files Browse the repository at this point in the history
This PR add temporary qtype woq_int4 to avoid affecting other qtype and models.

Co-authored-by: leonardozcm <leonardo1997zcm@gmail.com>
  • Loading branch information
lalalapotter and leonardozcm authored Jan 15, 2025
1 parent 6d03d06 commit 9930351
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 6 deletions.
1 change: 1 addition & 0 deletions python/llm/src/ipex_llm/ggml/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
"sym_int4_rtn": 31,
"sym_int8_rtn": 32,
"asym_int4_rtn": 33,
"woq_int4": 34,
}

# mixed precison from llama.cpp
Expand Down
5 changes: 3 additions & 2 deletions python/llm/src/ipex_llm/transformers/low_bit_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
SYM_INT4_RTN = ggml_tensor_qtype["sym_int4_rtn"]
SYM_INT8_RTN = ggml_tensor_qtype["sym_int8_rtn"]
ASYM_INT4_RTN = ggml_tensor_qtype["asym_int4_rtn"]
WOQ_INT4 = ggml_tensor_qtype["woq_int4"]
RTN_DTYPE = {
SYM_INT4_RTN: torch.uint8,
ASYM_INT4_RTN: torch.uint8,
Expand Down Expand Up @@ -187,7 +188,7 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int
src = ctypes.c_void_p(tensor.data.data_ptr())

if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5,
Q4_K, Q6_K, FP6_K]:
Q4_K, Q6_K, FP6_K, WOQ_INT4]:
dst_tensor = torch.empty_like(tensor)
elif qtype == ggml_tensor_qtype["sym_int5"]:
QK = ggml.ggml_qk_size(qtype)
Expand All @@ -213,7 +214,7 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int
src = ctypes.c_void_p(tensor.data.data_ptr())

if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5,
Q4_K, Q6_K, FP6_K]:
Q4_K, Q6_K, FP6_K, WOQ_INT4]:
dst_tensor = torch.empty_like(tensor)
elif qtype == ggml_tensor_qtype["sym_int5"]:
QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])
Expand Down
8 changes: 4 additions & 4 deletions python/llm/src/ipex_llm/transformers/models/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from ipex_llm.ggml.quantize import ggml_tensor_qtype
from ipex_llm.transformers.utils import get_xpu_device_name
from ipex_llm.transformers.low_bit_linear import SYM_INT4, SYM_INT8, FP8E5, IQ2_XXS, FP4, FP8E4,\
FP6, ASYM_INT4
FP6, ASYM_INT4, WOQ_INT4

FP8_KV_ALLOC_LENGTH = 512
KV_CACHE_ALLOC_BLOCK_LENGTH = int(os.environ.get("KV_CACHE_ALLOC_BLOCK_LENGTH", 256))
Expand All @@ -33,7 +33,7 @@

def decoding_fast_path_qtype_check(proj):
qtype = getattr(proj, "qtype", None)
return qtype in [SYM_INT4, FP8E5, FP4]
return qtype in [SYM_INT4, FP8E5, FP4, WOQ_INT4]


def init_kv_cache(batch_size, num_heads, head_dim, current_length, max_length, dtype, device):
Expand Down Expand Up @@ -248,7 +248,7 @@ def mlp_fusion_check(x, qtype, training):
return False
if x.device.type != 'xpu':
return False
if qtype not in [SYM_INT4, FP8E5, FP4, IQ2_XXS, FP6]:
if qtype not in [SYM_INT4, FP8E5, FP4, IQ2_XXS, FP6, WOQ_INT4]:
return False
if training or x.requires_grad:
return False
Expand All @@ -263,7 +263,7 @@ def use_xmx(x: torch.Tensor, qtype: int):
device = get_xpu_device_name(x.device)
return (
device in ["arc", "pvc"]
and qtype in [SYM_INT4, SYM_INT8, FP8E4, FP8E5]
and qtype in [SYM_INT4, SYM_INT8, FP8E4, FP8E5, WOQ_INT4]
and (
(device == "pvc" and 1 < x.size(0) <= 16)
or
Expand Down

0 comments on commit 9930351

Please sign in to comment.