Skip to content

Commit f101b40

Browse files
authored
Merge pull request vllm-project#9 from KuntaiDu/kuntai-disagg-refactor
Kuntai disagg refactor
2 parents caaaeb8 + eb751d6 commit f101b40

File tree

772 files changed

+72879
-17171
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

772 files changed

+72879
-17171
lines changed

.buildkite/check-wheel-size.py

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,36 +1,43 @@
11
import os
2+
import sys
23
import zipfile
34

4-
MAX_SIZE_MB = 250
5+
# Read the VLLM_MAX_SIZE_MB environment variable, defaulting to 250 MB
6+
VLLM_MAX_SIZE_MB = int(os.environ.get('VLLM_MAX_SIZE_MB', 250))
57

68

79
def print_top_10_largest_files(zip_file):
10+
"""Print the top 10 largest files in the given zip file."""
811
with zipfile.ZipFile(zip_file, 'r') as z:
912
file_sizes = [(f, z.getinfo(f).file_size) for f in z.namelist()]
1013
file_sizes.sort(key=lambda x: x[1], reverse=True)
1114
for f, size in file_sizes[:10]:
12-
print(f"{f}: {size/(1024*1024)} MBs uncompressed.")
15+
print(f"{f}: {size / (1024 * 1024):.2f} MBs uncompressed.")
1316

1417

1518
def check_wheel_size(directory):
19+
"""Check the size of .whl files in the given directory."""
1620
for root, _, files in os.walk(directory):
17-
for f in files:
18-
if f.endswith(".whl"):
19-
wheel_path = os.path.join(root, f)
20-
wheel_size = os.path.getsize(wheel_path)
21-
wheel_size_mb = wheel_size / (1024 * 1024)
22-
if wheel_size_mb > MAX_SIZE_MB:
23-
print(
24-
f"Wheel {wheel_path} is too large ({wheel_size_mb} MB) "
25-
f"compare to the allowed size ({MAX_SIZE_MB} MB).")
21+
for file_name in files:
22+
if file_name.endswith(".whl"):
23+
wheel_path = os.path.join(root, file_name)
24+
wheel_size_mb = os.path.getsize(wheel_path) / (1024 * 1024)
25+
if wheel_size_mb > VLLM_MAX_SIZE_MB:
26+
print(f"Not allowed: Wheel {wheel_path} is larger "
27+
f"({wheel_size_mb:.2f} MB) than the limit "
28+
f"({VLLM_MAX_SIZE_MB} MB).")
2629
print_top_10_largest_files(wheel_path)
2730
return 1
2831
else:
2932
print(f"Wheel {wheel_path} is within the allowed size "
30-
f"({wheel_size_mb} MB).")
33+
f"({wheel_size_mb:.2f} MB).")
3134
return 0
3235

3336

3437
if __name__ == "__main__":
35-
import sys
36-
sys.exit(check_wheel_size(sys.argv[1]))
38+
if len(sys.argv) < 2:
39+
print("Usage: python check-wheel-size.py <directory>")
40+
sys.exit(1)
41+
42+
directory = sys.argv[1]
43+
sys.exit(check_wheel_size(directory))

.buildkite/lm-eval-harness/configs/DeepSeek-V2-Lite-Chat.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,4 @@ tasks:
99
value: 0.664
1010
limit: 1000
1111
num_fewshot: 5
12+
trust_remote_code: True

.buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-QQQ.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ tasks:
44
- name: "gsm8k"
55
metrics:
66
- name: "exact_match,strict-match"
7-
value: 0.409
7+
value: 0.419
88
- name: "exact_match,flexible-extract"
9-
value: 0.406
9+
value: 0.416
1010
limit: 1000
1111
num_fewshot: 5
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nvidia/Minitron-4B-Base -b auto -l 1000 -f 5 -t 1
2-
model_name: "nvidia/Minitron-4B-Base"
1+
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m mgoin/Minitron-4B-Base-FP8 -b auto -l 1000 -f 5 -t 1
2+
model_name: "mgoin/Minitron-4B-Base-FP8"
33
tasks:
44
- name: "gsm8k"
55
metrics:
66
- name: "exact_match,strict-match"
7-
value: 0.252
7+
value: 0.233
88
- name: "exact_match,flexible-extract"
9-
value: 0.252
9+
value: 0.236
1010
limit: 1000
1111
num_fewshot: 5
Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
11
Meta-Llama-3-8B-Instruct.yaml
2-
Meta-Llama-3-8B-Instruct-FP8.yaml
32
Meta-Llama-3-8B-Instruct-FP8-compressed-tensors.yaml
43
Meta-Llama-3-8B-Instruct-INT8-compressed-tensors.yaml
54
Meta-Llama-3-8B-Instruct-nonuniform-compressed-tensors.yaml
65
Meta-Llama-3-8B-Instruct-Channelwise-compressed-tensors.yaml
7-
Minitron-4B-Base.yaml
6+
Minitron-4B-Base-FP8.yaml
87
Qwen2-1.5B-Instruct-INT8-compressed-tensors.yaml
98
Qwen2-1.5B-Instruct-FP8W8.yaml
109
Meta-Llama-3-8B-QQQ.yaml

.buildkite/lm-eval-harness/test_lm_eval_correctness.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import numpy
1515
import yaml
1616

17-
RTOL = 0.02
17+
RTOL = 0.05
1818
TEST_DATA_FILE = os.environ.get(
1919
"LM_EVAL_TEST_DATA_FILE",
2020
".buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-Instruct.yaml")
@@ -23,9 +23,12 @@
2323

2424

2525
def launch_lm_eval(eval_config):
26+
trust_remote_code = eval_config.get('trust_remote_code', False)
27+
2628
model_args = f"pretrained={eval_config['model_name']}," \
2729
f"tensor_parallel_size={TP_SIZE}," \
28-
f"add_bos_token=true"
30+
f"add_bos_token=true," \
31+
f"trust_remote_code={trust_remote_code}"
2932

3033
results = lm_eval.simple_evaluate(
3134
model="vllm",

.buildkite/nightly-benchmarks/README.md

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,17 +34,18 @@ See [vLLM performance dashboard](https://perf.vllm.ai) for the latest performan
3434

3535
Performance benchmark will be triggered when:
3636
- A PR being merged into vllm.
37-
- Every commit for those PRs with `perf-benchmarks` label.
37+
- Every commit for those PRs with `perf-benchmarks` label AND `ready` label.
3838

3939
Nightly benchmark will be triggered when:
40-
- Every commit for those PRs with `nightly-benchmarks` label.
40+
- Every commit for those PRs with `perf-benchmarks` label and `nightly-benchmarks` label.
4141

4242

4343

4444

4545
## Performance benchmark details
4646

47-
See [descriptions.md](tests/descriptions.md) for detailed descriptions, and use `tests/latency-tests.json`, `tests/throughput-tests.json`, `tests/serving-tests.json` to configure the test cases.
47+
48+
See [performance-benchmarks-descriptions.md](performance-benchmarks-descriptions.md) for detailed descriptions, and use `tests/latency-tests.json`, `tests/throughput-tests.json`, `tests/serving-tests.json` to configure the test cases.
4849

4950

5051
#### Latency test
@@ -68,7 +69,7 @@ Here is an example of one test inside `latency-tests.json`:
6869

6970
In this example:
7071
- The `test_name` attributes is a unique identifier for the test. In `latency-tests.json`, it must start with `latency_`.
71-
- The `parameters` attribute control the command line arguments to be used for `benchmark_latency.py`. Note that please use underline `_` instead of the dash `-` when specifying the command line arguments, and `run-benchmarks-suite.sh` will convert the underline to dash when feeding the arguments to `benchmark_latency.py`. For example, the corresponding command line arguments for `benchmark_latency.py` will be `--model meta-llama/Meta-Llama-3-8B --tensor-parallel-size 1 --load-format dummy --num-iters-warmup 5 --num-iters 15`
72+
- The `parameters` attribute control the command line arguments to be used for `benchmark_latency.py`. Note that please use underline `_` instead of the dash `-` when specifying the command line arguments, and `run-performance-benchmarks.sh` will convert the underline to dash when feeding the arguments to `benchmark_latency.py`. For example, the corresponding command line arguments for `benchmark_latency.py` will be `--model meta-llama/Meta-Llama-3-8B --tensor-parallel-size 1 --load-format dummy --num-iters-warmup 5 --num-iters 15`
7273

7374
Note that the performance numbers are highly sensitive to the value of the parameters. Please make sure the parameters are set correctly.
7475

.buildkite/nightly-benchmarks/benchmark-pipeline.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ steps:
2121
containers:
2222
- image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
2323
command:
24-
- bash .buildkite/nightly-benchmarks/run-benchmarks-suite.sh
24+
- bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
2525
resources:
2626
limits:
2727
nvidia.com/gpu: 8

.buildkite/nightly-benchmarks/tests/descriptions.md renamed to .buildkite/nightly-benchmarks/performance-benchmarks-descriptions.md

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,47 +1,42 @@
11

22
## Latency tests
33

4-
This test suite aims to test vllm's end-to-end latency under a controlled setup.
5-
64
- Input length: 32 tokens.
75
- Output length: 128 tokens.
86
- Batch size: fixed (8).
9-
- Models: llama-3 8B, llama-3 70B, mixtral 8x7B.
7+
- Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
108
- Evaluation metrics: end-to-end latency (mean, median, p99).
119

12-
### Latency benchmarking results
1310

1411
{latency_tests_markdown_table}
1512

16-
## Throughput tests
1713

18-
This test suite aims to test vllm's throughput.
14+
## Throughput tests
1915

2016
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
2117
- Output length: the corresponding output length of these 200 prompts.
2218
- Batch size: dynamically determined by vllm to achieve maximum throughput.
23-
- Models: llama-3 8B, llama-3 70B, mixtral 8x7B.
19+
- Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
2420
- Evaluation metrics: throughput.
2521

26-
### Throughput benchmarking results
2722

2823
{throughput_tests_markdown_table}
2924

30-
## Serving tests
3125

32-
This test suite aims to test vllm's real serving metrics.
26+
## Serving tests
3327

3428
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
3529
- Output length: the corresponding output length of these 200 prompts.
3630
- Batch size: dynamically determined by vllm and the arrival pattern of the requests.
3731
- **Average QPS (query per second)**: 1, 4, 16 and inf. QPS = inf means all requests come at once. For other QPS values, the arrival time of each query is determined using a random Poisson process (with fixed random seed).
38-
- Models: llama-3 8B, llama-3 70B, mixtral 8x7B.
32+
- Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
33+
- We also added a speculative decoding test for llama-3 70B, under QPS 2
3934
- Evaluation metrics: throughput, TTFT (time to the first token, with mean, median and p99), ITL (inter-token latency, with mean, median and p99).
4035

41-
### Serving benchmarking results
4236

4337
{serving_tests_markdown_table}
4438

39+
4540
## json version of the benchmarking tables
4641

4742
This section contains the data of the markdown tables above in JSON format.

.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -174,8 +174,8 @@ def results_to_json(latency, throughput, serving):
174174
# document the result
175175
with open(results_folder / "benchmark_results.md", "w") as f:
176176

177-
results = read_markdown(
178-
"../.buildkite/nightly-benchmarks/tests/descriptions.md")
177+
results = read_markdown("../.buildkite/nightly-benchmarks/" +
178+
"performance-benchmarks-descriptions.md")
179179
results = results.format(
180180
latency_tests_markdown_table=latency_md_table,
181181
throughput_tests_markdown_table=throughput_md_table,

0 commit comments

Comments
 (0)