From 195f5f5c5ca8a9d0717a503fc425d402d7bef010 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Wed, 23 Apr 2025 11:12:42 -0700 Subject: [PATCH 1/2] [Minor] Use large max_num_seqs for A100/B100/B200 Signed-off-by: Woosuk Kwon --- vllm/engine/arg_utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index b6d0bfeac4a4..50b691a552e0 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1630,7 +1630,10 @@ def _set_default_args_v1(self, usage_context: UsageContext) -> None: # This is only used to set default_max_num_batched_tokens device_name = "no-device" - if "h100" in device_name or "h200" in device_name: + large_mem_gpus = [ + "a100", "h100", "h200", "b100", "b200", "mi300x", "mi325x" + ] + if any(gpu in device_name for gpu in large_mem_gpus): # For H100 and H200, we use larger default values. default_max_num_batched_tokens = { UsageContext.LLM_CLASS: 16384, From 95fd298c50451a8cc279bff5101b8e484cb1b4fb Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Wed, 23 Apr 2025 15:31:44 -0700 Subject: [PATCH 2/2] Use memory Signed-off-by: Woosuk Kwon --- vllm/engine/arg_utils.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 119e7613dbd7..00328f56b713 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -35,7 +35,7 @@ from vllm.test_utils import MODEL_WEIGHTS_S3_BUCKET, MODELS_ON_S3 from vllm.transformers_utils.utils import check_gguf_file from vllm.usage.usage_lib import UsageContext -from vllm.utils import FlexibleArgumentParser, is_in_ray_actor +from vllm.utils import FlexibleArgumentParser, GiB_bytes, is_in_ray_actor # yapf: enable @@ -1625,16 +1625,13 @@ def _set_default_args_v1(self, usage_context: UsageContext) -> None: # values for non-H100/H200 GPUs. try: from vllm.platforms import current_platform - device_name = current_platform.get_device_name().lower() + device_memory = current_platform.get_device_total_memory() except Exception: # This is only used to set default_max_num_batched_tokens - device_name = "no-device" + device_memory = 0 - large_mem_gpus = [ - "a100", "h100", "h200", "b100", "b200", "mi300x", "mi325x" - ] - if any(gpu in device_name for gpu in large_mem_gpus): - # For H100 and H200, we use larger default values. + if device_memory >= 70 * GiB_bytes: + # For GPUs like H100 and MI300x, use larger default values. default_max_num_batched_tokens = { UsageContext.LLM_CLASS: 16384, UsageContext.OPENAI_API_SERVER: 8192,