forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[ Misc ] Support Fp8 via
llm-compressor
(vllm-project#6110)
Co-authored-by: Robert Shaw <rshaw@neuralmagic> Signed-off-by: Alvant <alvasian@yandex.ru>
- Loading branch information
Showing
17 changed files
with
603 additions
and
372 deletions.
There are no files selected for viewing
11 changes: 11 additions & 0 deletions
11
.buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-Instruct-FP8-compressed-tensors.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Meta-Llama-3-8B-FP8-compressed-tensors-test -b 32 -l 250 -f 5 -t 1 | ||
model_name: "nm-testing/Meta-Llama-3-8B-FP8-compressed-tensors-test" | ||
tasks: | ||
- name: "gsm8k" | ||
metrics: | ||
- name: "exact_match,strict-match" | ||
value: 0.752 | ||
- name: "exact_match,flexible-extract" | ||
value: 0.752 | ||
limit: 250 | ||
num_fewshot: 5 |
2 changes: 1 addition & 1 deletion
2
.buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-Instruct-FP8.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
11 changes: 11 additions & 0 deletions
11
.buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-Instruct-INT8-compressed-tensors.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Meta-Llama-3-8B-Instruct-W8-Channel-A8-Dynamic-Per-Token-Test -b "auto" -l 250 -f 5 -t 1 | ||
model_name: "nm-testing/Meta-Llama-3-8B-Instruct-W8-Channel-A8-Dynamic-Per-Token-Test" | ||
tasks: | ||
- name: "gsm8k" | ||
metrics: | ||
- name: "exact_match,strict-match" | ||
value: 0.728 | ||
- name: "exact_match,flexible-extract" | ||
value: 0.728 | ||
limit: 250 | ||
num_fewshot: 5 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,4 @@ | ||
Meta-Llama-3-8B-Instruct.yaml | ||
Meta-Llama-3-8B-Instruct-FP8.yaml | ||
Meta-Llama-3-8B-Instruct-FP8-compressed-tensors.yaml | ||
Meta-Llama-3-8B-Instruct-INT8-compressed-tensors.yaml |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
27 changes: 19 additions & 8 deletions
27
vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,8 +1,19 @@ | ||
from .compressed_tensors_scheme import CompressedTensorsScheme # noqa: F401 | ||
from .compressed_tensors_unquantized import ( # noqa: F401 | ||
CompressedTensorsUnquantized) | ||
from .compressed_tensors_w4a16_24 import ( # noqa: F401 | ||
W4A16SPARSE24_SUPPORTED_BITS, CompressedTensorsW4A16Sparse24) | ||
from .compressed_tensors_w8a8 import CompressedTensorsW8A8 # noqa: F401 | ||
from .compressed_tensors_wNa16 import WNA16_SUPPORTED_BITS # noqa: F401 | ||
from .compressed_tensors_wNa16 import CompressedTensorsWNA16 # noqa: F401 | ||
from .compressed_tensors_scheme import CompressedTensorsScheme | ||
from .compressed_tensors_unquantized import CompressedTensorsUnquantized | ||
from .compressed_tensors_w4a16_24 import (W4A16SPARSE24_SUPPORTED_BITS, | ||
CompressedTensorsW4A16Sparse24) | ||
from .compressed_tensors_w8a8_fp8 import CompressedTensorsW8A8Fp8 | ||
from .compressed_tensors_w8a8_int8 import CompressedTensorsW8A8Int8 | ||
from .compressed_tensors_wNa16 import (WNA16_SUPPORTED_BITS, | ||
CompressedTensorsWNA16) | ||
|
||
__all__ = [ | ||
"CompressedTensorsScheme", | ||
"CompressedTensorsUnquantized", | ||
"CompressedTensorsWNA16", | ||
"CompressedTensorsW4A16Sparse24", | ||
"CompressedTensorsW8A8Int8", | ||
"CompressedTensorsW8A8Fp8", | ||
"WNA16_SUPPORTED_BITS", | ||
"W4A16SPARSE24_SUPPORTED_BITS", | ||
] |
109 changes: 0 additions & 109 deletions
109
.../model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8.py
This file was deleted.
Oops, something went wrong.
Oops, something went wrong.