diff --git a/tests/e2e/vLLM/lm_eval_configs/fp8_dynamic_per_token.yaml b/tests/e2e/vLLM/lm_eval_configs/fp8_dynamic_per_token.yaml index 461353770..fc610bae9 100644 --- a/tests/e2e/vLLM/lm_eval_configs/fp8_dynamic_per_token.yaml +++ b/tests/e2e/vLLM/lm_eval_configs/fp8_dynamic_per_token.yaml @@ -4,5 +4,5 @@ scheme: FP8_DYNAMIC num_fewshot: 5 limit: 1000 task: "gsm8k" -exact_match,flexible-extract: 0.753 -exact_match,strict-match: 0.753 +exact_match,flexible-extract: 0.75 +exact_match,strict-match: 0.75 diff --git a/tests/e2e/vLLM/lm_eval_configs/fp8_static_per_tensor.yaml b/tests/e2e/vLLM/lm_eval_configs/fp8_static_per_tensor.yaml new file mode 100644 index 000000000..0b6d42a46 --- /dev/null +++ b/tests/e2e/vLLM/lm_eval_configs/fp8_static_per_tensor.yaml @@ -0,0 +1,10 @@ +cadence: "weekly" +model: meta-llama/Meta-Llama-3-8B-Instruct +scheme: FP8 +num_fewshot: 5 +limit: 1000 +task: "gsm8k" +dataset_id: HuggingFaceH4/ultrachat_200k +dataset_split: train_sft +exact_match,flexible-extract: 0.75 +exact_match,strict-match: 0.75 diff --git a/tests/e2e/vLLM/lm_eval_configs/int8_w8a8_dynamic_per_token.yaml b/tests/e2e/vLLM/lm_eval_configs/int8_w8a8_dynamic_per_token.yaml index b16f5575a..446ca1e7f 100644 --- a/tests/e2e/vLLM/lm_eval_configs/int8_w8a8_dynamic_per_token.yaml +++ b/tests/e2e/vLLM/lm_eval_configs/int8_w8a8_dynamic_per_token.yaml @@ -1,8 +1,11 @@ cadence: "weekly" model: meta-llama/Meta-Llama-3-8B-Instruct -scheme: INT8 +scheme: INT8_dyn_per_token +recipe: tests/e2e/vLLM/recipes/INT8/recipe_int8_channel_weight_dynamic_per_token.yaml num_fewshot: 5 -limit: 250 +limit: 1000 task: "gsm8k" -exact_match,flexible-extract: 0.728 -exact_match,strict-match: 0.728 +dataset_id: HuggingFaceH4/ultrachat_200k +dataset_split: train_sft +exact_match,flexible-extract: 0.77 +exact_match,strict-match: 0.76 diff --git a/tests/e2e/vLLM/lm_eval_configs/w4a16_actorder_weight.yaml b/tests/e2e/vLLM/lm_eval_configs/w4a16_actorder_weight.yaml new file mode 100644 index 000000000..ca82bb44f --- /dev/null +++ b/tests/e2e/vLLM/lm_eval_configs/w4a16_actorder_weight.yaml @@ -0,0 +1,11 @@ +cadence: "weekly" +model: meta-llama/Meta-Llama-3-8B-Instruct +recipe: tests/e2e/vLLM/recipes/actorder/recipe_w4a16_actorder_weight.yaml +num_fewshot: 5 +limit: 1000 +task: "gsm8k" +dataset_id: HuggingFaceH4/ultrachat_200k +dataset_split: train_sft +exact_match,flexible-extract: 0.72 +exact_match,strict-match: 0.72 +scheme: W4A16_actorder_group \ No newline at end of file diff --git a/tests/e2e/vLLM/lm_eval_configs/w4a16_grouped_quant.yaml b/tests/e2e/vLLM/lm_eval_configs/w4a16_grouped_quant.yaml new file mode 100644 index 000000000..a4c7b6244 --- /dev/null +++ b/tests/e2e/vLLM/lm_eval_configs/w4a16_grouped_quant.yaml @@ -0,0 +1,11 @@ +cadence: "weekly" +model: meta-llama/Meta-Llama-3-8B-Instruct +num_fewshot: 5 +limit: 1000 +task: "gsm8k" +exact_match,flexible-extract: 0.72 +exact_match,strict-match: 0.72 +scheme: W4A16 +dataset_id: HuggingFaceH4/ultrachat_200k +dataset_split: train_sft +quant_type: "GPTQ" \ No newline at end of file diff --git a/tests/e2e/vLLM/recipes/INT8/recipe_int8_channel_weight_dynamic_per_token.yaml b/tests/e2e/vLLM/recipes/INT8/recipe_int8_channel_weight_dynamic_per_token.yaml new file mode 100644 index 000000000..367437e5a --- /dev/null +++ b/tests/e2e/vLLM/recipes/INT8/recipe_int8_channel_weight_dynamic_per_token.yaml @@ -0,0 +1,11 @@ +quant_stage: + quant_modifiers: + SmoothQuantModifier: + smoothing_strength: 0.8 + GPTQModifier: + ignore: [lm_head] + config_groups: + group_0: + weights: {num_bits: 8, type: int, symmetric: true, strategy: channel} + input_activations: {num_bits: 8, type: int, symmetric: true, strategy: token, dynamic: true} + targets: [Linear] diff --git a/tests/e2e/vLLM/recipes/INT8/recipe_int8_channel_weight_static_per_tensor_act.yaml b/tests/e2e/vLLM/recipes/INT8/recipe_int8_channel_weight_static_per_tensor_act.yaml index 2c0094f88..9703872bc 100644 --- a/tests/e2e/vLLM/recipes/INT8/recipe_int8_channel_weight_static_per_tensor_act.yaml +++ b/tests/e2e/vLLM/recipes/INT8/recipe_int8_channel_weight_static_per_tensor_act.yaml @@ -2,7 +2,7 @@ quant_stage: quant_modifiers: SmoothQuantModifier: smoothing_strength: 0.8 - QuantizationModifier: + GPTQModifier: ignore: [lm_head] config_groups: group_0: diff --git a/tests/e2e/vLLM/test_lmeval.py b/tests/e2e/vLLM/test_lmeval.py index f77bda983..4e11123a5 100644 --- a/tests/e2e/vLLM/test_lmeval.py +++ b/tests/e2e/vLLM/test_lmeval.py @@ -68,7 +68,7 @@ def set_up(self): logger.info(self.scheme) self.device = "cuda:0" - self.num_calibration_samples = 256 + self.num_calibration_samples = 512 self.max_seq_length = 2048 def test_lm_eval(self): @@ -104,7 +104,7 @@ def test_lm_eval(self): logger.info("================= Running LM Eval ======================") - model_args = f"pretrained={self.save_dir}" + model_args = f"pretrained={self.save_dir},add_bos_token=True" results = lm_eval.simple_evaluate( model="hf", model_args=model_args,