diff --git a/vllm/model_executor/layers/fused_moe/__init__.py b/vllm/model_executor/layers/fused_moe/__init__.py index 56ffaf861ac7..799f78284894 100644 --- a/vllm/model_executor/layers/fused_moe/__init__.py +++ b/vllm/model_executor/layers/fused_moe/__init__.py @@ -15,6 +15,7 @@ FusedMoEPermuteExpertsUnpermute, FusedMoEPrepareAndFinalize, ) +from vllm.model_executor.layers.fused_moe.shared_fused_moe import SharedFusedMoE from vllm.model_executor.layers.fused_moe.utils import activation_without_mul from vllm.triton_utils import HAS_TRITON @@ -42,6 +43,7 @@ def get_config() -> Optional[dict[str, Any]]: "FusedMoEPermuteExpertsUnpermute", "FusedMoEActivationFormat", "FusedMoEPrepareAndFinalize", + "SharedFusedMoE", "activation_without_mul", "override_config", "get_config", diff --git a/vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py b/vllm/model_executor/layers/fused_moe/shared_fused_moe.py similarity index 57% rename from vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py rename to vllm/model_executor/layers/fused_moe/shared_fused_moe.py index a8b09a5c3cdb..a678fdae8833 100644 --- a/vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/shared_fused_moe.py @@ -18,13 +18,21 @@ class SharedFusedMoE(FusedMoE): def __init__( self, - shared_experts: torch.nn.Module, + shared_experts: Optional[torch.nn.Module], use_overlapped: bool = True, **kwargs, ): super().__init__(**kwargs) self._shared_experts = shared_experts - self.use_overlapped = use_overlapped + # Disable shared expert overlap if EP is disabled or we are not using + # flashinfer + DP since there is nothing to be gained in this case. + # Disabling the overlap optimization also prevents the shared experts + # from being hidden from torch.compile. + self.use_overlapped = ( + use_overlapped + and not (self.use_ep or self.use_flashinfer_cutlass_kernels) + and self._shared_experts is not None + ) @property def shared_experts(self) -> Optional[torch.nn.Module]: @@ -36,16 +44,19 @@ def forward( router_logits: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: if not self.use_overlapped: - shared_out = self._shared_experts(hidden_states) - - # Reduce outputs if necessary, since the MLP should - # have been created with reduce_results=False. - if ( - self.reduce_results - and self.tp_size > 1 - and self.must_reduce_shared_expert_outputs() - ): - shared_out = tensor_model_parallel_all_reduce(shared_out) + if self._shared_experts is not None: + shared_out = self._shared_experts(hidden_states) + + # Reduce shared expert outputs if necessary, since the MLP + # should have been created with reduce_results=False. + if ( + self.reduce_results + and self.tp_size > 1 + and self.must_reduce_shared_expert_outputs() + ): + shared_out = tensor_model_parallel_all_reduce(shared_out) + else: + shared_out = None fused_out = super().forward( hidden_states=hidden_states, diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index 2123fd9eba15..73e004480398 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -741,6 +741,8 @@ def create_weights( layer.w13_input_scale = None layer.w2_input_scale = None + self.rocm_aiter_moe_enabled = False + def process_weights_after_loading(self, layer: Module) -> None: # Lazy import to avoid importing triton too early. from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( diff --git a/vllm/model_executor/layers/shared_fused_moe/__init__.py b/vllm/model_executor/layers/shared_fused_moe/__init__.py deleted file mode 100644 index b047e9cad04a..000000000000 --- a/vllm/model_executor/layers/shared_fused_moe/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# SPDX-FileCopyrightText: Copyright contributors to the vLLM project -from vllm.model_executor.layers.shared_fused_moe.shared_fused_moe import SharedFusedMoE - -__all__ = ["SharedFusedMoE"] diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index 7db118ca0745..734ae8cbd608 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -13,7 +13,7 @@ from vllm.config.multimodal import BaseDummyOptions from vllm.distributed import get_tensor_model_parallel_rank from vllm.model_executor.layers.activation import get_act_fn -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.linear import ColumnParallelLinear, RowParallelLinear from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig @@ -206,7 +206,7 @@ def forward( return out -class AriaFusedMoE(FusedMoE): +class AriaFusedMoE(SharedFusedMoE): def weight_loader( self, param: nn.Parameter, loaded_weight: torch.Tensor, shard_id: str ) -> None: @@ -260,7 +260,16 @@ def __init__( torch.empty((self.config.moe_num_experts, self.config.hidden_size)) ) + self.shared_experts = LlamaMLP( + config.hidden_size, + config.intermediate_size * config.moe_num_shared_experts, + "silu", + quant_config=quant_config, + bias=config.mlp_bias, + ) + self.experts = AriaFusedMoE( + shared_experts=self.shared_experts, num_experts=config.moe_num_experts, top_k=config.moe_topk, hidden_size=config.hidden_size, @@ -269,13 +278,6 @@ def __init__( reduce_results=True, prefix=f"{prefix}.experts", ) - self.shared_experts = LlamaMLP( - config.hidden_size, - config.intermediate_size * config.moe_num_shared_experts, - "silu", - quant_config=quant_config, - bias=config.mlp_bias, - ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ @@ -291,12 +293,12 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: router_output = torch.nn.functional.linear(hidden_states, self.router_weight) - hidden_states_copy = hidden_states.clone() - # NOTE: hidden_states will be modified inplace by `FusedMoE` sparse_expert_output = self.experts(hidden_states, router_output) - shared_expert_output = self.shared_experts(hidden_states_copy) - return sparse_expert_output + shared_expert_output + if self.shared_experts is not None: + return sparse_expert_output[0] + sparse_expert_output[1] + else: + return sparse_expert_output class AriaTextDecoderLayer(LlamaDecoderLayer): diff --git a/vllm/model_executor/models/bailing_moe.py b/vllm/model_executor/models/bailing_moe.py index 3911ba599069..c016d46e194f 100644 --- a/vllm/model_executor/models/bailing_moe.py +++ b/vllm/model_executor/models/bailing_moe.py @@ -43,7 +43,7 @@ tensor_model_parallel_all_reduce, ) from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -276,22 +276,6 @@ def __init__( # default value for scoring_func self.score_function = "softmax" - self.experts = FusedMoE( - num_experts=self.num_experts, - top_k=self.top_k, - hidden_size=self.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=self.norm_expert_prob, - quant_config=quant_config, - prefix=f"{prefix}.experts", - scoring_func=self.score_function, - e_score_correction_bias=self.gate.expert_bias, - num_expert_group=self.n_group, - topk_group=self.topk_group, - use_grouped_topk=self.use_grouped_topk, - ) - if self.num_shared_experts > 0: if hasattr(config, "moe_shared_expert_intermediate_size"): intermediate_size = config.moe_shared_expert_intermediate_size @@ -308,11 +292,27 @@ def __init__( else: self.shared_experts = None + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, + num_experts=self.num_experts, + top_k=self.top_k, + hidden_size=self.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=self.norm_expert_prob, + quant_config=quant_config, + prefix=f"{prefix}.experts", + scoring_func=self.score_function, + e_score_correction_bias=self.gate.expert_bias, + num_expert_group=self.n_group, + topk_group=self.topk_group, + use_grouped_topk=self.use_grouped_topk, + ) + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_size = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_size) - if self.shared_experts: - shared_output = self.shared_experts(hidden_states) + # router_logits: (num_tokens, n_experts) router_logits = self.gate(hidden_states.to(self.router_dtype)) router_logits = router_logits.to(hidden_states.dtype) @@ -321,9 +321,14 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states=hidden_states, router_logits=router_logits ) + if self.shared_experts is not None: + shared_output, final_hidden_states = final_hidden_states + else: + shared_output = None + final_hidden_states *= self.routed_scaling_factor - if self.shared_experts: + if shared_output is not None: final_hidden_states = final_hidden_states + shared_output if self.tp_size > 1: @@ -475,7 +480,7 @@ def forward( return hidden_states def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index f149b02e5522..1eca6f9ca20e 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -49,7 +49,7 @@ from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import LayerNorm, RMSNorm from vllm.model_executor.layers.linear import ( ColumnParallelLinear, @@ -64,7 +64,6 @@ per_token_group_quant_fp8, ) from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.shared_fused_moe import SharedFusedMoE from vllm.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding, @@ -205,26 +204,6 @@ def __init__( ) if config.n_shared_experts is None: - self.experts = FusedMoE( - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func=config.scoring_func, - # we do scaling outside, set factor to 1.0 to avoid double mul - routed_scaling_factor=1.0, - e_score_correction_bias=self.gate.e_score_correction_bias, - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - is_sequence_parallel=self.is_sequence_parallel, - ) self.shared_experts = None else: intermediate_size = config.moe_intermediate_size * config.n_shared_experts @@ -239,27 +218,27 @@ def __init__( prefix=f"{prefix}.shared_experts", ) - self.experts = SharedFusedMoE( - shared_experts=self.shared_experts, - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func=config.scoring_func, - # we do scaling outside, set factor to 1.0 to avoid double mul - routed_scaling_factor=1.0, - e_score_correction_bias=self.gate.e_score_correction_bias, - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - is_sequence_parallel=self.is_sequence_parallel, - ) + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, + num_experts=config.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + use_grouped_topk=True, + num_expert_group=config.n_group, + topk_group=config.topk_group, + prefix=f"{prefix}.experts", + scoring_func=config.scoring_func, + # we do scaling outside, set factor to 1.0 to avoid double mul + routed_scaling_factor=1.0, + e_score_correction_bias=self.gate.e_score_correction_bias, + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts, + is_sequence_parallel=self.is_sequence_parallel, + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_dim = hidden_states.shape @@ -1293,7 +1272,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.num_moe_layers = config.num_hidden_layers - config.first_k_dense_replace self.num_expert_groups = config.n_group - self.moe_layers: list[FusedMoE] = [] + self.moe_layers: list[SharedFusedMoE] = [] example_moe = None for layer in self.model.layers: if isinstance(layer, PPMissingLayer): @@ -1381,7 +1360,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - expert_params_mapping = FusedMoE.make_expert_params_mapping( + expert_params_mapping = SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/dots1.py b/vllm/model_executor/models/dots1.py index 1ae7457fb215..55f8d4b231f7 100644 --- a/vllm/model_executor/models/dots1.py +++ b/vllm/model_executor/models/dots1.py @@ -42,7 +42,7 @@ tensor_model_parallel_all_reduce, ) from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -145,7 +145,21 @@ def __init__( else: self.gate.e_score_correction_bias = None - self.experts = FusedMoE( + if config.n_shared_experts is not None: + intermediate_size = config.moe_intermediate_size * config.n_shared_experts + self.shared_experts = Dots1MLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=False, + prefix=f"{prefix}.shared_experts", + ) + else: + self.shared_experts = None + + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, num_experts=config.n_routed_experts, top_k=config.num_experts_per_tok, hidden_size=config.hidden_size, @@ -163,29 +177,19 @@ def __init__( e_score_correction_bias=self.gate.e_score_correction_bias, ) - if config.n_shared_experts is not None: - intermediate_size = config.moe_intermediate_size * config.n_shared_experts - self.shared_experts = Dots1MLP( - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - reduce_results=False, - prefix=f"{prefix}.shared_experts", - ) - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) - if self.n_shared_experts is not None: - shared_output = self.shared_experts(hidden_states) + router_logits, _ = self.gate(hidden_states) final_hidden_states = ( self.experts(hidden_states=hidden_states, router_logits=router_logits) * self.routed_scaling_factor ) - if shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + + if self.shared_experts is not None: + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] + if self.tp_size > 1: final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states) return final_hidden_states.view(num_tokens, hidden_dim) @@ -426,7 +430,7 @@ def forward( return hidden_states def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/ernie45_moe.py b/vllm/model_executor/models/ernie45_moe.py index 3cb93177a383..7516cb5abaf9 100644 --- a/vllm/model_executor/models/ernie45_moe.py +++ b/vllm/model_executor/models/ernie45_moe.py @@ -37,7 +37,7 @@ from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -145,18 +145,6 @@ def __init__( torch.empty(config.moe_num_experts, dtype=torch.float32) ) - self.experts = FusedMoE( - num_experts=config.moe_num_experts, - top_k=config.moe_k, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=True, - quant_config=quant_config, - prefix=f"{prefix}.experts", - e_score_correction_bias=self.gate.e_score_correction_bias, - ) - if self.has_shared_experts: intermediate_size = ( config.moe_intermediate_size * config.moe_num_shared_experts @@ -167,16 +155,28 @@ def __init__( hidden_act=config.hidden_act, quant_config=quant_config, prefix=f"{prefix}.shared_experts", - reduce_results=self.experts.must_reduce_shared_expert_outputs(), + reduce_results=False, ) + else: + self.shared_experts = None + + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, + num_experts=config.moe_num_experts, + top_k=config.moe_k, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=True, + quant_config=quant_config, + prefix=f"{prefix}.experts", + e_score_correction_bias=self.gate.e_score_correction_bias, + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) - shared_output = None - if self.has_shared_experts: - shared_output = self.shared_experts(hidden_states) router_logits, _ = self.gate(hidden_states.to(dtype=torch.float32)) @@ -184,8 +184,8 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states=hidden_states, router_logits=router_logits ) - if self.has_shared_experts and shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + if self.has_shared_experts: + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.tp_size > 1: final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( @@ -460,7 +460,7 @@ def forward( def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/ernie45_vl_moe.py b/vllm/model_executor/models/ernie45_vl_moe.py index 51f49b8587e6..2c4989556140 100644 --- a/vllm/model_executor/models/ernie45_vl_moe.py +++ b/vllm/model_executor/models/ernie45_vl_moe.py @@ -37,7 +37,7 @@ from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.logger import init_logger -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( QKVParallelLinear, @@ -74,7 +74,15 @@ class Ernie4_5_VLMoeMLP(Ernie4_5_MoeMLP): - pass + def __init__(self, shared_experts: Optional[torch.nn.Module] = None, **kwargs): + super().__init__(**kwargs) + self.shared_experts = shared_experts + + def forward(self, x): + if self.shared_experts is not None: + return self.shared_experts(x) + super().forward(x) + else: + return super().forward(x) class Ernie4_5_VLMoeAttention(nn.Module): @@ -223,6 +231,21 @@ def __init__( assert text_moe_layer_start_index <= text_moe_layer_end_index + if self.has_shared_experts: + intermediate_size = ( + config.moe_intermediate_size[0] * config.moe_num_shared_experts + ) + self.shared_experts = Ernie4_5_VLMoeMLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + prefix=f"{prefix}.shared_experts", + reduce_results=False, + ) + else: + self.shared_experts = None + if ( layer_idx >= text_moe_layer_start_index and layer_idx <= text_moe_layer_end_index @@ -236,7 +259,8 @@ def __init__( prefix=f"{prefix}.text_experts_gate", ) - self.text_experts = FusedMoE( + self.text_experts = SharedFusedMoE( + shared_experts=self.shared_experts, num_experts=config.moe_num_experts[0], top_k=config.moe_k, hidden_size=config.hidden_size, @@ -249,6 +273,7 @@ def __init__( ) else: self.text_experts = Ernie4_5_VLMoeMLP( + shared_experts=self.shared_experts, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, @@ -271,7 +296,8 @@ def __init__( prefix=f"{prefix}.vision_experts_gate", ) - self.vision_experts = FusedMoE( + self.vision_experts = SharedFusedMoE( + shared_experts=self.shared_experts, num_experts=config.moe_num_experts[1], top_k=config.moe_k, hidden_size=config.hidden_size, @@ -284,6 +310,7 @@ def __init__( ) else: self.vision_experts = Ernie4_5_VLMoeMLP( + shared_experts=self.shared_experts, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, @@ -292,19 +319,6 @@ def __init__( prefix=f"{prefix}.mlp", ) - if self.has_shared_experts: - intermediate_size = ( - config.moe_intermediate_size[0] * config.moe_num_shared_experts - ) - self.shared_experts = Ernie4_5_VLMoeMLP( - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - prefix=f"{prefix}.shared_experts", - reduce_results=self.text_experts.must_reduce_shared_expert_outputs(), - ) - def forward( self, hidden_states: torch.Tensor, @@ -315,9 +329,6 @@ def forward( hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) - if self.has_shared_experts: - shared_output = self.shared_experts(hidden_states) - if visual_token_mask is not None and visual_token_mask.all(): # only vision modal input router_logits, _ = self.vision_experts_gate( @@ -362,8 +373,8 @@ def forward( hidden_states=hidden_states, router_logits=text_router_logits ) - if self.has_shared_experts and shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + if self.has_shared_experts: + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.tp_size > 1: final_hidden_states = ( @@ -649,7 +660,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - expert_params_mapping = FusedMoE.make_expert_params_mapping( + expert_params_mapping = SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/glm4_moe.py b/vllm/model_executor/models/glm4_moe.py index 5db6f297dbf2..b9cdee29417a 100644 --- a/vllm/model_executor/models/glm4_moe.py +++ b/vllm/model_executor/models/glm4_moe.py @@ -42,7 +42,7 @@ ) from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -52,7 +52,6 @@ from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.shared_fused_moe import SharedFusedMoE from vllm.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding, @@ -176,46 +175,29 @@ def __init__( reduce_results=False, prefix=f"{prefix}.shared_experts", ) - self.experts = SharedFusedMoE( - shared_experts=self.shared_experts, - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func="sigmoid", - # we do scaling outside, set factor to 1.0 to avoid double mul - routed_scaling_factor=1.0, - e_score_correction_bias=self.gate.e_score_correction_bias, - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - ) else: - self.experts = FusedMoE( - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func="sigmoid", - # we do scaling outside, set factor to 1.0 to avoid double mul - routed_scaling_factor=1.0, - e_score_correction_bias=self.gate.e_score_correction_bias, - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - ) + self.shared_experts = None + + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, + num_experts=config.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + use_grouped_topk=True, + num_expert_group=config.n_group, + topk_group=config.topk_group, + prefix=f"{prefix}.experts", + scoring_func="sigmoid", + # we do scaling outside, set factor to 1.0 to avoid double mul + routed_scaling_factor=1.0, + e_score_correction_bias=self.gate.e_score_correction_bias, + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts, + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_dim = hidden_states.shape @@ -522,7 +504,7 @@ def make_empty_intermediate_tensors( def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", @@ -677,7 +659,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.num_moe_layers = config.num_hidden_layers - config.first_k_dense_replace self.num_expert_groups = config.n_group - self.moe_layers: list[FusedMoE] = [] + self.moe_layers: list[SharedFusedMoE] = [] example_moe = None for layer in self.model.layers: if isinstance(layer, PPMissingLayer): diff --git a/vllm/model_executor/models/hunyuan_v1.py b/vllm/model_executor/models/hunyuan_v1.py index d33406b7be2b..0b9b7287fdc3 100644 --- a/vllm/model_executor/models/hunyuan_v1.py +++ b/vllm/model_executor/models/hunyuan_v1.py @@ -43,7 +43,7 @@ tensor_model_parallel_all_reduce, ) from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( ColumnParallelLinear, @@ -414,19 +414,6 @@ def __init__( self.physical_expert_start + self.n_local_physical_experts ) - self.experts = FusedMoE( - num_experts=self.n_routed_experts, - top_k=top_k, - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - reduce_results=False, - renormalize=top_k > 1, - quant_config=quant_config, - prefix=f"{prefix}.experts", - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - ) - self.gate = ReplicatedLinear( config.hidden_size, config.num_experts, @@ -454,22 +441,34 @@ def __init__( else: self.shared_mlp = None + self.experts = SharedFusedMoE( + shared_experts=self.shared_mlp, + num_experts=self.n_routed_experts, + top_k=top_k, + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + reduce_results=False, + renormalize=top_k > 1, + quant_config=quant_config, + prefix=f"{prefix}.experts", + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts, + ) + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # NOTE: hidden_states can have either 1D or 2D shape. orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) - shared_output = None - if self.shared_mlp is not None: - shared_output = self.shared_mlp(hidden_states) # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) final_hidden_states = self.experts( hidden_states=hidden_states, router_logits=router_logits ) - if shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + if self.shared_mlp is not None: + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] + if self.tp_size > 1: final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states) @@ -725,7 +724,7 @@ def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: if _is_moe(self.config): # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", @@ -1009,7 +1008,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # Set MoE hyperparameters self.expert_weights = [] self.num_expert_groups = 1 - self.moe_layers: list[FusedMoE] = [] + self.moe_layers: list[SharedFusedMoE] = [] example_layer = None for layer in self.model.layers: if isinstance(layer, PPMissingLayer): diff --git a/vllm/model_executor/models/llama4.py b/vllm/model_executor/models/llama4.py index 075f35a098a4..df7bd9b7f6d1 100644 --- a/vllm/model_executor/models/llama4.py +++ b/vllm/model_executor/models/llama4.py @@ -33,7 +33,7 @@ get_tensor_model_parallel_world_size, tensor_model_parallel_all_gather, ) -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( QKVParallelLinear, @@ -42,7 +42,6 @@ ) from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.shared_fused_moe import SharedFusedMoE from vllm.model_executor.model_loader.weight_utils import ( default_weight_loader, maybe_remap_kv_scale_name, @@ -399,7 +398,7 @@ def load_moe_expert_weights( params_dict: The dictionary of module parameters. loaded_params: The set of already loaded parameters. expert_params_mapping: The mapping of expert parameters. Must be - generated by FusedMoE.make_expert_params_mapping(). + generated by SharedFusedMoE.make_expert_params_mapping(). fused: Whether the expert weights are fused into a single weight tensor or are separate weight tensors for each expert. When fused is True, loaded_weight should have shape of: @@ -522,7 +521,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: fused_experts_params = False # Expert parameter mapping for the case where the expert weights are # not fused into a single weight tensor. - expert_params_mapping = FusedMoE.make_expert_params_mapping( + expert_params_mapping = SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", @@ -530,7 +529,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: ) # Expert parameter mapping for the case where the expert weights are # fused into a single weight tensor. - expert_params_mapping_fused = FusedMoE.make_expert_params_mapping( + expert_params_mapping_fused = SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_up_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="gate_up_proj", diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 61b203a08349..c57299a2d390 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -40,7 +40,7 @@ from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -79,6 +79,7 @@ def __init__( hidden_act: str, quant_config: Optional[QuantizationConfig] = None, reduce_results: bool = True, + expert_gate: Optional[torch.nn.Linear] = None, prefix: str = "", ) -> None: super().__init__() @@ -102,12 +103,17 @@ def __init__( f"Unsupported activation: {hidden_act}. Only silu is supported for now." ) self.act_fn = SiluAndMul() + self.expert_gate = expert_gate def forward(self, x): gate_up, _ = self.gate_up_proj(x) - x = self.act_fn(gate_up) - x, _ = self.down_proj(x) - return x + out = self.act_fn(gate_up) + out, _ = self.down_proj(out) + + if self.expert_gate is not None: + out = F.sigmoid(self.expert_gate(x)) * out + + return out class Qwen2MoeSparseMoeBlock(nn.Module): @@ -126,17 +132,6 @@ def __init__( f"the number of experts {config.num_experts}." ) - self.experts = FusedMoE( - num_experts=config.num_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - prefix=f"{prefix}.experts", - ) - self.gate = ReplicatedLinear( config.hidden_size, config.num_experts, @@ -144,39 +139,47 @@ def __init__( quant_config=None, prefix=f"{prefix}.gate", ) + + self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + if config.shared_expert_intermediate_size > 0: self.shared_expert = Qwen2MoeMLP( hidden_size=config.hidden_size, intermediate_size=config.shared_expert_intermediate_size, hidden_act=config.hidden_act, quant_config=quant_config, - reduce_results=self.experts.must_reduce_shared_expert_outputs(), + reduce_results=False, + expert_gate=self.shared_expert_gate, prefix=f"{prefix}.shared_expert", ) else: self.shared_expert = None - self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + + self.experts = SharedFusedMoE( + shared_experts=self.shared_expert, + num_experts=config.num_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + prefix=f"{prefix}.experts", + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # NOTE: hidden_states can have either 1D or 2D shape. orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) - shared_output = None - if self.shared_expert is not None: - shared_output = self.shared_expert(hidden_states) - if self.shared_expert_gate is not None: - shared_output = ( - F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_output - ) # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) final_hidden_states = self.experts( hidden_states=hidden_states, router_logits=router_logits ) - if shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + if self.shared_expert is not None: + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.tp_size > 1: final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( # noqa E501 final_hidden_states @@ -418,7 +421,7 @@ def forward( def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/qwen3_next.py b/vllm/model_executor/models/qwen3_next.py index cea3faf45a14..50629bb2e4a2 100644 --- a/vllm/model_executor/models/qwen3_next.py +++ b/vllm/model_executor/models/qwen3_next.py @@ -7,7 +7,6 @@ from typing import Optional import torch -import torch.nn.functional as F from einops import rearrange from torch import nn from transformers.activations import ACT2FN @@ -36,7 +35,7 @@ chunk_gated_delta_rule, fused_recurrent_gated_delta_rule, ) -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import GemmaRMSNorm as Qwen3NextRMSNorm from vllm.model_executor.layers.linear import ( ColumnParallelLinear, @@ -136,20 +135,6 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = ""): self.physical_expert_start + self.n_local_physical_experts ) - self.experts = FusedMoE( - num_experts=self.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - prefix=f"{prefix}.experts", - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - is_sequence_parallel=self.is_sequence_parallel, - ) - self.gate = ReplicatedLinear( config.hidden_size, config.num_experts, @@ -158,18 +143,35 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = ""): prefix=f"{prefix}.gate", ) + self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + if config.shared_expert_intermediate_size > 0: self.shared_expert = Qwen3NextMLP( hidden_size=config.hidden_size, intermediate_size=config.shared_expert_intermediate_size, hidden_act=config.hidden_act, quant_config=quant_config, - reduce_results=self.experts.must_reduce_shared_expert_outputs(), + reduce_results=False, + expert_gate=self.shared_expert_gate, prefix=f"{prefix}.shared_expert", ) else: self.shared_expert = None - self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + + self.experts = SharedFusedMoE( + shared_experts=self.shared_expert, + num_experts=self.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + prefix=f"{prefix}.experts", + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts, + is_sequence_parallel=self.is_sequence_parallel, + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # NOTE: hidden_states can have either 1D or 2D shape. @@ -180,22 +182,14 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.is_sequence_parallel: hidden_states = sequence_parallel_chunk(hidden_states) - shared_output = None - if self.shared_expert is not None: - shared_output = self.shared_expert(hidden_states) - if self.shared_expert_gate is not None: - shared_output = ( - F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_output - ) - # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) final_hidden_states = self.experts( hidden_states=hidden_states, router_logits=router_logits ) - if shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + if self.shared_expert is not None: + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.is_sequence_parallel: final_hidden_states = tensor_model_parallel_all_gather( @@ -1008,7 +1002,7 @@ def forward( def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", @@ -1150,7 +1144,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # Set MoE hyperparameters self.expert_weights = [] - self.moe_layers: list[FusedMoE] = [] + self.moe_layers: list[SharedFusedMoE] = [] example_layer = None for layer in self.model.layers: if isinstance(layer, PPMissingLayer):