File tree Expand file tree Collapse file tree 2 files changed +4
-6
lines changed Expand file tree Collapse file tree 2 files changed +4
-6
lines changed Original file line number Diff line number Diff line change 77import vllm .envs as envs
88from vllm .inputs import PromptType
99from vllm .logger import init_logger
10- from vllm .sampling_params import SamplingParams
10+ from vllm .sampling_params import SamplingParams , SamplingType
1111
1212from .interface import Platform , PlatformEnum , _Backend
1313
@@ -149,3 +149,5 @@ def validate_request(
149149 SamplingParams ) and params .guided_decoding is not None :
150150 raise ValueError ("Structured output is not supported on "
151151 f"{ cls .device_name } ." )
152+ if params .sampling_type == SamplingType .RANDOM_SEED :
153+ raise ValueError ("Torch XLA does not support per-request seed." )
Original file line number Diff line number Diff line change 1414from vllm .multimodal .inputs import PlaceholderRange
1515from vllm .multimodal .processing import EncDecMultiModalProcessor
1616from vllm .multimodal .utils import merge_and_sort_multimodal_metadata
17- from vllm .platforms import current_platform
1817from vllm .pooling_params import PoolingParams
1918from vllm .prompt_adapter .request import PromptAdapterRequest
20- from vllm .sampling_params import SamplingParams , SamplingType
19+ from vllm .sampling_params import SamplingParams
2120from vllm .transformers_utils .tokenizer_group import BaseTokenizerGroup
2221from vllm .v1 .engine import EngineCoreRequest
2322from vllm .v1 .engine .mm_input_cache import MirroredProcessingCache
@@ -78,9 +77,6 @@ def _validate_sampling_params(
7877 params : SamplingParams ,
7978 ) -> None :
8079 self ._validate_structured_output (params )
81- if (current_platform .is_tpu ()
82- and params .sampling_type == SamplingType .RANDOM_SEED ):
83- raise ValueError ("Torch XLA does not support per-request seed." )
8480
8581 if params .allowed_token_ids is None :
8682 return
You can’t perform that action at this time.
0 commit comments