forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
…ect#6524) Co-authored-by: mgoin <michael@neuralmagic.com>
- Loading branch information
1 parent
c796fbe
commit 84fb4fb
Showing
13 changed files
with
219 additions
and
49 deletions.
There are no files selected for viewing
11 changes: 11 additions & 0 deletions
11
.buildkite/lm-eval-harness/configs/Qwen2-1.5B-Instruct-FP8W8.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Qwen2-1.5B-Instruct-FP8W8 -b auto -l 1000 -f 5 -t 1 | ||
model_name: "nm-testing/Qwen2-1.5B-Instruct-FP8W8" | ||
tasks: | ||
- name: "gsm8k" | ||
metrics: | ||
- name: "exact_match,strict-match" | ||
value: 0.578 | ||
- name: "exact_match,flexible-extract" | ||
value: 0.585 | ||
limit: 1000 | ||
num_fewshot: 5 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
105 changes: 105 additions & 0 deletions
105
...l_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,105 @@ | ||
from typing import Callable, List, Optional | ||
|
||
import torch | ||
|
||
from vllm.model_executor.layers.quantization.compressed_tensors.schemes import ( | ||
CompressedTensorsScheme) | ||
from vllm.model_executor.layers.quantization.compressed_tensors.utils import ( | ||
QuantizationStrategy) | ||
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp8 import ( | ||
apply_fp8_marlin_linear, prepare_fp8_layer_for_marlin) | ||
from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( | ||
convert_to_channelwise, create_per_channel_scale_param, | ||
create_per_tensor_scale_param) | ||
from vllm.model_executor.utils import set_weight_attrs | ||
|
||
__all__ = ["CompressedTensorsW8A16Fp8"] | ||
|
||
SUPPORTED_STRATEGIES = [ | ||
QuantizationStrategy.CHANNEL, QuantizationStrategy.TENSOR | ||
] | ||
|
||
|
||
class CompressedTensorsW8A16Fp8(CompressedTensorsScheme): | ||
|
||
def __init__(self, strategy: str, is_static_input_scheme: bool): | ||
self.strategy = strategy | ||
self.is_static_input_scheme = is_static_input_scheme | ||
|
||
@classmethod | ||
def get_min_capability(cls) -> int: | ||
# ampere and up | ||
return 80 | ||
|
||
# W8A8-Fp8 kernels support only per-tensor and per-channel cases. | ||
# So if we have a fused module (QKV, MLP) with per tensor scales, | ||
# we expand each scale to its shard's channels. | ||
def process_weights_after_loading(self, layer) -> None: | ||
if self.strategy == QuantizationStrategy.TENSOR: | ||
ws_channelwise = convert_to_channelwise(layer.weight_scale, | ||
layer.logical_widths) | ||
layer.weight_scale = torch.nn.Parameter(ws_channelwise, | ||
requires_grad=False) | ||
|
||
# Weights must be transposed for marlin | ||
layer.weight = torch.nn.Parameter(layer.weight.t(), | ||
requires_grad=False) | ||
|
||
prepare_fp8_layer_for_marlin(layer, strategy="channel") | ||
|
||
def create_weights(self, layer: torch.nn.Module, input_size: int, | ||
output_partition_sizes: List[int], | ||
input_size_per_partition: int, | ||
params_dtype: torch.dtype, weight_loader: Callable, | ||
**kwargs): | ||
|
||
output_size_per_partition = sum(output_partition_sizes) | ||
layer.logical_widths = output_partition_sizes | ||
layer.input_size_per_partition = input_size_per_partition | ||
layer.output_size_per_partition = output_size_per_partition | ||
layer.orig_dtype = params_dtype | ||
|
||
# WEIGHT | ||
weight = torch.nn.Parameter(torch.empty(output_size_per_partition, | ||
input_size_per_partition, | ||
dtype=torch.float8_e4m3fn), | ||
requires_grad=False) | ||
layer.register_parameter("weight", weight) | ||
set_weight_attrs(weight, { | ||
"input_dim": 1, | ||
"output_dim": 0, | ||
"weight_loader": weight_loader, | ||
}) | ||
|
||
# WEIGHT SCALE | ||
layer_kwargs = {"weight_loader": weight_loader} | ||
if self.strategy == QuantizationStrategy.CHANNEL: | ||
weight_scale = create_per_channel_scale_param( | ||
output_partition_sizes, **layer_kwargs) | ||
elif self.strategy == QuantizationStrategy.TENSOR: | ||
weight_scale = create_per_tensor_scale_param( | ||
output_partition_sizes, **layer_kwargs) | ||
else: | ||
raise ValueError( | ||
f"Unsupported weight strategy={self.strategy}, " | ||
f"supported strategies are {SUPPORTED_STRATEGIES}") | ||
layer.register_parameter("weight_scale", weight_scale) | ||
|
||
# INPUT SCALE (to deal with converted checkpoints) | ||
if self.is_static_input_scheme: | ||
input_scale = create_per_tensor_scale_param( | ||
output_partition_sizes, **layer_kwargs) | ||
layer.register_parameter("input_scale", input_scale) | ||
|
||
def apply_weights(self, | ||
layer: torch.nn.Module, | ||
x: torch.Tensor, | ||
bias: Optional[torch.Tensor] = None) -> torch.Tensor: | ||
|
||
return apply_fp8_marlin_linear(input=x, | ||
weight=layer.weight, | ||
weight_scale=layer.weight_scale, | ||
workspace=layer.workspace, | ||
size_n=layer.output_size_per_partition, | ||
size_k=layer.input_size_per_partition, | ||
bias=bias) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.