Skip to content

Commit

Permalink
Change Emu2 weight loading to use proper torch.bfloat16
Browse files Browse the repository at this point in the history
  • Loading branch information
Gnurro committed Apr 30, 2024
1 parent ff86080 commit d191b80
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion backends/huggingface_multimodal_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def load_model(model_spec: backends.ModelSpec):
if hasattr(model_spec, 'trust_remote_code'):
if model_spec['trust_remote_code']:
if model_spec['model_type'] == "Emu2":
model = model_type.from_pretrained(hf_model_str, device_map="auto", torch_dtype="bfloat16",
model = model_type.from_pretrained(hf_model_str, device_map="auto", torch_dtype=torch.bfloat16,
trust_remote_code=model_spec['trust_remote_code'])
else:
model = model_type.from_pretrained(hf_model_str, device_map="auto", torch_dtype="auto",
Expand Down

0 comments on commit d191b80

Please sign in to comment.