diff --git a/tests/models/language/generation/test_gemma.py b/tests/models/language/generation/test_gemma.py new file mode 100644 index 000000000000..ed0f0c19a041 --- /dev/null +++ b/tests/models/language/generation/test_gemma.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import numpy as np +import pytest + +MODELS = ["google/gemma-2b", "google/gemma-2-2b", "google/gemma-3-4b-it"] + + +@pytest.mark.parametrize("model", MODELS) +def test_dummy_loader(vllm_runner, model: str) -> None: + with vllm_runner( + model, + load_format="dummy", + ) as llm: + normalizers = llm.collective_rpc(lambda self: self.worker.model_runner. + model.model.normalizer.cpu().item()) + assert np.allclose( + normalizers, + llm.llm_engine.model_config.hf_config.hidden_size**0.5, + rtol=1e-3) diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index 99ed51f8e70a..59c3102add4c 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -281,7 +281,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # data type such as bfloat16, not float32. # See https://github.com/huggingface/transformers/pull/29402 normalizer = self.config.hidden_size**0.5 - self.register_buffer("normalizer", torch.tensor(normalizer)) + self.register_buffer("normalizer", + torch.tensor(normalizer), + persistent=False) self.make_empty_intermediate_tensors = ( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index ce405041b3d4..8beefb2cd0bd 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -267,7 +267,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # data type such as bfloat16, not float32. # See https://github.com/huggingface/transformers/pull/29402 normalizer = self.config.hidden_size**0.5 - self.register_buffer("normalizer", torch.tensor(normalizer)) + self.register_buffer("normalizer", + torch.tensor(normalizer), + persistent=False) self.make_empty_intermediate_tensors = ( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) diff --git a/vllm/model_executor/models/gemma3.py b/vllm/model_executor/models/gemma3.py index e19e0026b3f9..954e48d25f67 100644 --- a/vllm/model_executor/models/gemma3.py +++ b/vllm/model_executor/models/gemma3.py @@ -371,7 +371,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # data type such as bfloat16, not float32. # See https://github.com/huggingface/transformers/pull/29402 normalizer = self.config.hidden_size**0.5 - self.register_buffer("normalizer", torch.tensor(normalizer)) + self.register_buffer("normalizer", + torch.tensor(normalizer), + persistent=False) self.make_empty_intermediate_tensors = ( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size))