File tree Expand file tree Collapse file tree 1 file changed +9
-2
lines changed
vllm/model_executor/model_loader Expand file tree Collapse file tree 1 file changed +9
-2
lines changed Original file line number Diff line number Diff line change @@ -165,7 +165,7 @@ def device_loading_context(module: torch.nn.Module,
165165 # New parameters or parameters already on target device are untouched
166166
167167
168- _MODEL_ARCH_BY_HASH = dict [str , tuple [type [nn .Module ], str ]]()
168+ _MODEL_ARCH_BY_HASH = dict [int , tuple [type [nn .Module ], str ]]()
169169"""Caches the outputs of `_get_model_architecture`."""
170170
171171
@@ -215,7 +215,14 @@ def _get_model_architecture(
215215
216216def get_model_architecture (
217217 model_config : ModelConfig ) -> tuple [type [nn .Module ], str ]:
218- key = model_config .compute_hash ()
218+ key = hash ((
219+ model_config .model ,
220+ model_config .convert_type ,
221+ model_config .runner_type ,
222+ model_config .trust_remote_code ,
223+ model_config .model_impl ,
224+ tuple (getattr (model_config .hf_config , "architectures" , [])),
225+ ))
219226 if key in _MODEL_ARCH_BY_HASH :
220227 return _MODEL_ARCH_BY_HASH [key ]
221228
You can’t perform that action at this time.
0 commit comments