File tree Expand file tree Collapse file tree 2 files changed +3
-6
lines changed
transformers_utils/configs Expand file tree Collapse file tree 2 files changed +3
-6
lines changed Original file line number Diff line number Diff line change @@ -70,7 +70,8 @@ def __init__(self,
7070
7171 if self .model is not None :
7272 for k , v in self .model .to_dict ().items ():
73- setattr (self , k , v )
73+ if not hasattr (self , k ):
74+ setattr (self , k , v )
7475
7576 @classmethod
7677 def from_pretrained (
Original file line number Diff line number Diff line change 99from vllm .forward_context import set_forward_context
1010from vllm .logger import init_logger
1111from vllm .model_executor .model_loader import get_model
12- from vllm .model_executor .models import supports_multimodal
1312from vllm .model_executor .models .llama_eagle3 import Eagle3LlamaForCausalLM
1413from vllm .triton_utils import tl , triton
1514from vllm .v1 .attention .backends .flash_attn import FlashAttentionMetadata
@@ -311,10 +310,7 @@ def load_model(self, target_model: nn.Module) -> None:
311310 if self .vllm_config .speculative_config .method != "eagle3" and \
312311 hasattr (target_model , "lm_head" ):
313312 logger .info ("Loading EAGLE LM head weights from the target model." )
314- if supports_multimodal (target_model ):
315- self .model .lm_head = target_model .get_language_model ().lm_head
316- else :
317- self .model .lm_head = target_model .lm_head
313+ self .model .lm_head = target_model .lm_head
318314
319315 @torch .inference_mode ()
320316 def dummy_run (
You can’t perform that action at this time.
0 commit comments