File tree Expand file tree Collapse file tree 3 files changed +10
-8
lines changed Expand file tree Collapse file tree 3 files changed +10
-8
lines changed Original file line number Diff line number Diff line change 88import numpy as np
99import torch
1010
11- from vllm .inputs import PromptType
11+ from vllm .inputs import ProcessorInputs , PromptType
1212from vllm .logger import init_logger
1313
1414if TYPE_CHECKING :
@@ -400,6 +400,7 @@ def validate_request(
400400 cls ,
401401 prompt : PromptType ,
402402 params : Union [SamplingParams , PoolingParams ],
403+ processed_inputs : ProcessorInputs ,
403404 ) -> None :
404405 """Raises if this request is unsupported on this platform"""
405406
Original file line number Diff line number Diff line change 55import torch
66
77import vllm .envs as envs
8- from vllm .inputs import PromptType
8+ from vllm .inputs import ProcessorInputs , PromptType
99from vllm .logger import init_logger
1010from vllm .sampling_params import SamplingParams , SamplingType
1111
@@ -150,6 +150,7 @@ def validate_request(
150150 cls ,
151151 prompt : PromptType ,
152152 params : Union [SamplingParams , PoolingParams ],
153+ processed_inputs : ProcessorInputs ,
153154 ) -> None :
154155 """Raises if this request is unsupported on this platform"""
155156 if isinstance (params , SamplingParams ):
Original file line number Diff line number Diff line change @@ -202,12 +202,6 @@ def process_inputs(
202202
203203 # TODO(woosuk): Support pooling models.
204204 # TODO(woosuk): Support encoder-decoder models.
205-
206- from vllm .platforms import current_platform
207- current_platform .validate_request (
208- prompt = prompt ,
209- params = params ,
210- )
211205 self ._validate_lora (lora_request )
212206 self ._validate_params (params )
213207 if priority != 0 :
@@ -231,6 +225,12 @@ def process_inputs(
231225 prompt_adapter_request = prompt_adapter_request ,
232226 return_mm_hashes = self .use_hash ,
233227 )
228+ from vllm .platforms import current_platform
229+ current_platform .validate_request (
230+ prompt = prompt ,
231+ params = params ,
232+ processed_inputs = processed_inputs ,
233+ )
234234 eos_token_id = self .input_preprocessor .get_eos_token_id (lora_request )
235235
236236 self ._validate_model_inputs (processed_inputs , lora_request )
You can’t perform that action at this time.
0 commit comments