Skip to content

Commit

Permalink
add configs
Browse files Browse the repository at this point in the history
  • Loading branch information
horheynm committed Dec 20, 2024
1 parent e38c749 commit f7f8459
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 0 deletions.
7 changes: 7 additions & 0 deletions tests/e2e/vLLM/configs/kv_cache_phi3.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
cadence: "nightly"
test_type: "regression"
model: microsoft/Phi-3-mini-4k-instruct
recipe: tests/e2e/vLLM/recipes/kv_cache/default.yaml
dataset_id: HuggingFaceH4/ultrachat_200k
dataset_split: train_sft
scheme: kv_cache_default_phi3
7 changes: 7 additions & 0 deletions tests/e2e/vLLM/configs/kv_cache_tinyllama.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
cadence: "nightly"
test_type: "regression"
model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
recipe: tests/e2e/vLLM/recipes/kv_cache/default.yaml
dataset_id: HuggingFaceH4/ultrachat_200k
dataset_split: train_sft
scheme: kv_cache_default_tinyllama
7 changes: 7 additions & 0 deletions tests/e2e/vLLM/configs/kv_cache_tinyllama_gptq.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
cadence: "nightly"
test_type: "regression"
model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
recipe: tests/e2e/vLLM/recipes/kv_cache/gptq.yaml
dataset_id: HuggingFaceH4/ultrachat_200k
dataset_split: train_sft
scheme: kv_cache_default_tinyllama
6 changes: 6 additions & 0 deletions tests/e2e/vLLM/recipes/kv_cache/default.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
quant_stage:
quant_modifiers:
QuantizationModifier:
kv_cache_scheme:
{num_bits: 8, type: float, symmetric: true, strategy: tensor}

17 changes: 17 additions & 0 deletions tests/e2e/vLLM/recipes/kv_cache/gptq.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
quant_stage:
quant_modifiers:
QuantizationModifier:
kv_cache_scheme:
{num_bits: 8, type: float, symmetric: true, strategy: tensor}
GPTQModifier:
sequential_update: false
ignore: ["lm_head"]
config_groups:
group_0:
weights:
num_bits: 4
type: "int"
symmetric: true
strategy: "channel"
actorder: False
targets: ["Linear"]

0 comments on commit f7f8459

Please sign in to comment.