We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 702a015 commit cfee92aCopy full SHA for cfee92a
python/ray/llm/tests/serve/mocks/mock_vllm_engine.py
@@ -584,7 +584,13 @@ async def _start_engine(self) -> EngineClient:
584
return MockPDDisaggVLLMEngineClient(
585
VllmConfig(
586
model_config=ModelConfig(
587
- model=self.llm_config.model_loading_config.model_id
+ model=self.llm_config.model_loading_config.model_id,
588
+ task="auto",
589
+ tokenizer=None,
590
+ tokenizer_mode="auto",
591
+ trust_remote_code=False,
592
+ dtype="auto",
593
+ seed=None,
594
)
595
596
0 commit comments