diff --git a/vllm/model_executor/layers/quantization/gguf.py b/vllm/model_executor/layers/quantization/gguf.py index 5d4c1c6ec893..c92bcbea540a 100644 --- a/vllm/model_executor/layers/quantization/gguf.py +++ b/vllm/model_executor/layers/quantization/gguf.py @@ -98,6 +98,13 @@ def get_quant_method(self, layer: torch.nn.Module, def _fuse_mul_mat(x: torch.Tensor, qweight: torch.Tensor, qweight_type: int) -> torch.Tensor: + # HACK: when doing chunked prefill we don't generate output tokens + # so input to logits generator is empty which causes invalid parameter + if x.shape[0] == 0: + return torch.empty(x.shape[0], + qweight.shape[0], + dtype=x.dtype, + device=x.device) # there is no need to call any kernel for fp16/bf16 if qweight_type in UNQUANTIZED_TYPES: return x @ qweight.T