Skip to content

Commit 31a8a2a

Browse files
authored
[Misc] Clean up MM profiling warnings (#25222)
Signed-off-by: Roger Wang <hey@rogerw.io>
1 parent 1a0a04d commit 31a8a2a

File tree

1 file changed

+0
-29
lines changed

1 file changed

+0
-29
lines changed

vllm/multimodal/profiling.py

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -234,19 +234,6 @@ def get_decoder_dummy_data(
234234
prompt_token_ids = mm_inputs["prompt_token_ids"]
235235
total_len = len(prompt_token_ids)
236236

237-
# V0 does not support chunked prefill.
238-
if total_len > seq_len and not envs.VLLM_USE_V1:
239-
# `max_num_batched_tokens` is defined by `SchedulerConfig`
240-
logger.warning_once(
241-
"The sequence length used for profiling (max_num_batched_tokens / max_num_seqs = %d) " # noqa: E501
242-
"is too short to hold the multi-modal embeddings in the worst case (%d tokens in total, out of which %s are reserved for multi-modal embeddings). " # noqa: E501
243-
"This may cause certain multi-modal inputs to fail during inference, even when the input text is short. " # noqa: E501
244-
"To avoid this, you should increase `max_model_len`, reduce `max_num_seqs`, and/or reduce `mm_counts`.", # noqa: E501
245-
seq_len,
246-
total_len,
247-
str(self._get_mm_num_tokens(mm_inputs)),
248-
)
249-
250237
if total_len < seq_len:
251238
prompt_token_ids.extend([0] * (seq_len - total_len))
252239

@@ -270,22 +257,6 @@ def _get_mm_max_tokens(
270257
mm_counts=mm_counts,
271258
)
272259
if max_tokens_per_item is not None:
273-
if mm_counts is None:
274-
total_mm_tokens = sum(max_tokens_per_item.values())
275-
else:
276-
total_mm_tokens = sum(max_tokens_per_item[k] * mm_counts[k]
277-
for k in max_tokens_per_item.keys()
278-
& mm_counts.keys())
279-
if total_mm_tokens > seq_len:
280-
logger.warning_once(
281-
"The sequence length (%d) is smaller than the pre-defined"
282-
" worst-case total number of multimodal tokens (%d). "
283-
"This may cause certain multi-modal inputs to fail during "
284-
"inference. To avoid this, you should increase "
285-
"`max_model_len` or reduce `mm_counts`.",
286-
seq_len,
287-
total_mm_tokens,
288-
)
289260
return max_tokens_per_item
290261

291262
mm_inputs = self._get_dummy_mm_inputs(seq_len, mm_counts)

0 commit comments

Comments
 (0)