forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[Model] Adding support for MSFT Phi-3.5-MoE (vllm-project#7729)
Co-authored-by: Your Name <you@example.com> Co-authored-by: Zeqi Lin <zelin@microsoft.com> Co-authored-by: Zeqi Lin <Zeqi.Lin@microsoft.com>
- Loading branch information
1 parent
2684efc
commit 1248e85
Showing
13 changed files
with
1,255 additions
and
82 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,111 @@ | ||
"""Compare the outputs of HF and vLLM for moe models using greedy sampling. | ||
Run `pytest tests/models/test_phimoe.py`. | ||
""" | ||
import pytest | ||
import torch | ||
|
||
from vllm.utils import is_cpu | ||
|
||
from .utils import check_logprobs_close | ||
|
||
MODELS = [ | ||
"microsoft/Phi-3.5-MoE-instruct", | ||
] | ||
|
||
|
||
def test_phimoe_routing_function(): | ||
from vllm.model_executor.models.phimoe import phimoe_routing_function | ||
test_case = { | ||
0: { | ||
"hidden_states": | ||
torch.tensor([1, 2, 3, 4, 5, 6, 7, 8], | ||
dtype=torch.float32, | ||
requires_grad=False).view(4, 2), | ||
"gating_output": | ||
torch.tensor([0.1, 0.2, 0.3, 0.4], | ||
dtype=torch.float32, | ||
requires_grad=False), | ||
"topk": | ||
2, | ||
"renormalize": | ||
False, | ||
}, | ||
1: { | ||
"hidden_states": | ||
torch.tensor([1, 2, 3, 4, 5, 6, 7, 8], | ||
dtype=torch.float32, | ||
requires_grad=False).view(4, 2), | ||
"gating_output": | ||
torch.tensor([0.4, 0.2, 0.3, 0.4], | ||
dtype=torch.float32, | ||
requires_grad=False), | ||
"topk": | ||
2, | ||
"renormalize": | ||
False, | ||
} | ||
} | ||
|
||
ground_truth = { | ||
0: { | ||
"topk_weights": | ||
torch.tensor([1., 1.], dtype=torch.float32, requires_grad=False), | ||
"topk_ids": | ||
torch.tensor([3, 2], dtype=torch.long, requires_grad=False), | ||
}, | ||
1: { | ||
"topk_weights": | ||
torch.tensor([0.5, 1.], dtype=torch.float32, requires_grad=False), | ||
"topk_ids": | ||
torch.tensor([0, 3], dtype=torch.long, requires_grad=False), | ||
} | ||
} | ||
|
||
for test_id in test_case: | ||
topk_weights, topk_ids = phimoe_routing_function(**test_case[test_id]) | ||
assert torch.allclose(topk_weights, | ||
ground_truth[test_id]["topk_weights"]) | ||
assert torch.equal(topk_ids, ground_truth[test_id]["topk_ids"]) | ||
|
||
|
||
def get_gpu_memory(): | ||
try: | ||
props = torch.cuda.get_device_properties(torch.cuda.current_device()) | ||
gpu_memory = props.total_memory / (1024**3) | ||
return gpu_memory | ||
except Exception: | ||
return 0 | ||
|
||
|
||
@pytest.mark.skipif(condition=is_cpu(), | ||
reason="This test takes a lot time to run on CPU, " | ||
"and vllm CI's disk space is not enough for this model.") | ||
@pytest.mark.skipif(condition=get_gpu_memory() < 100, | ||
reason="Skip this test if GPU memory is insufficient.") | ||
@pytest.mark.parametrize("model", MODELS) | ||
@pytest.mark.parametrize("dtype", ["bfloat16"]) | ||
@pytest.mark.parametrize("max_tokens", [64]) | ||
@pytest.mark.parametrize("num_logprobs", [5]) | ||
def test_models( | ||
hf_runner, | ||
vllm_runner, | ||
example_prompts, | ||
model: str, | ||
dtype: str, | ||
max_tokens: int, | ||
num_logprobs: int, | ||
) -> None: | ||
with hf_runner(model, dtype=dtype) as hf_model: | ||
hf_outputs = hf_model.generate_greedy_logprobs_limit( | ||
example_prompts, max_tokens, num_logprobs) | ||
|
||
with vllm_runner(model, dtype=dtype) as vllm_model: | ||
vllm_outputs = vllm_model.generate_greedy_logprobs( | ||
example_prompts, max_tokens, num_logprobs) | ||
check_logprobs_close( | ||
outputs_0_lst=hf_outputs, | ||
outputs_1_lst=vllm_outputs, | ||
name_0="hf", | ||
name_1="vllm", | ||
) |
130 changes: 130 additions & 0 deletions
130
...ayers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,130 @@ | ||
{ | ||
"3328": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 16, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"1024": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 32, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"3072": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"256": { | ||
"BLOCK_SIZE_M": 32, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 128, | ||
"GROUP_SIZE_M": 8, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"768": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 8, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"1792": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 16, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"2560": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"2816": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 16, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"3584": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"1536": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 64, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"2048": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 64, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"512": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 8, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"3840": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 16, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"1280": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 64, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"2304": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"4096": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
} | ||
} |
130 changes: 130 additions & 0 deletions
130
...ayers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,130 @@ | ||
{ | ||
"3840": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 8, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"1792": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 8, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"3584": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 16, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"512": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 16, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"3072": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"2048": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 16, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"2816": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 32, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 8, | ||
"num_stages": 4 | ||
}, | ||
"1280": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 64, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"768": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 1, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"4096": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 8, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"3328": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"2560": { | ||
"BLOCK_SIZE_M": 128, | ||
"BLOCK_SIZE_N": 128, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 8, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"1024": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 32, | ||
"GROUP_SIZE_M": 8, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
}, | ||
"2304": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 16, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"1536": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 32, | ||
"num_warps": 4, | ||
"num_stages": 2 | ||
}, | ||
"256": { | ||
"BLOCK_SIZE_M": 64, | ||
"BLOCK_SIZE_N": 256, | ||
"BLOCK_SIZE_K": 64, | ||
"GROUP_SIZE_M": 1, | ||
"num_warps": 4, | ||
"num_stages": 4 | ||
} | ||
} |
Oops, something went wrong.