We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent af19426 commit 9bf1aedCopy full SHA for 9bf1aed
vllm/v1/core/sched/scheduler.py
@@ -463,10 +463,6 @@ def schedule(self) -> SchedulerOutput:
463
# always padded to the maximum length. If we support other
464
# encoder-decoder models, this will need to be updated if we
465
# want to only allocate what is needed.
466
- assert ("whisper"
467
- in self.vllm_config.model_config.model.lower()), (
468
- "Whisper is the only supported "
469
- "encoder-decoder model.")
470
num_encoder_tokens =\
471
self.scheduler_config.max_num_encoder_input_tokens
472
else:
0 commit comments