Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion vllm/platforms/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import numpy as np
import torch

from vllm.inputs import PromptType
from vllm.inputs import ProcessorInputs, PromptType
from vllm.logger import init_logger

if TYPE_CHECKING:
Expand Down Expand Up @@ -400,6 +400,7 @@ def validate_request(
cls,
prompt: PromptType,
params: Union[SamplingParams, PoolingParams],
processed_inputs: ProcessorInputs,
) -> None:
"""Raises if this request is unsupported on this platform"""

Expand Down
3 changes: 2 additions & 1 deletion vllm/platforms/tpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch

import vllm.envs as envs
from vllm.inputs import PromptType
from vllm.inputs import ProcessorInputs, PromptType
from vllm.logger import init_logger
from vllm.sampling_params import SamplingParams, SamplingType

Expand Down Expand Up @@ -150,6 +150,7 @@ def validate_request(
cls,
prompt: PromptType,
params: Union[SamplingParams, PoolingParams],
processed_inputs: ProcessorInputs,
) -> None:
"""Raises if this request is unsupported on this platform"""
if isinstance(params, SamplingParams):
Expand Down
12 changes: 6 additions & 6 deletions vllm/v1/engine/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,12 +202,6 @@ def process_inputs(

# TODO(woosuk): Support pooling models.
# TODO(woosuk): Support encoder-decoder models.

from vllm.platforms import current_platform
current_platform.validate_request(
prompt=prompt,
params=params,
)
self._validate_lora(lora_request)
self._validate_params(params)
if priority != 0:
Expand All @@ -231,6 +225,12 @@ def process_inputs(
prompt_adapter_request=prompt_adapter_request,
return_mm_hashes=self.use_hash,
)
from vllm.platforms import current_platform
current_platform.validate_request(
prompt=prompt,
params=params,
processed_inputs=processed_inputs,
)
eos_token_id = self.input_preprocessor.get_eos_token_id(lora_request)

self._validate_model_inputs(processed_inputs, lora_request)
Expand Down