diff --git a/vllm/model_executor/layers/layernorm.py b/vllm/model_executor/layers/layernorm.py index b3c65e34178a..e8d1fd635505 100644 --- a/vllm/model_executor/layers/layernorm.py +++ b/vllm/model_executor/layers/layernorm.py @@ -45,7 +45,6 @@ def fused_add_rms_norm( def rocm_aiter_rms_norm(x: torch.Tensor, weight: torch.Tensor, variance_epsilon: float) -> torch.Tensor: - import aiter as rocm_aiter if x.dim() > 2: x_original_shape = x.shape diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index c0ed473103ab..9497f15984b7 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -146,11 +146,11 @@ def get_hf_processor( kwargs["fps"] = fps processor = self.ctx.get_hf_processor( Qwen2_5OmniProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, ) if not hasattr(processor, "audio_token"): diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index 4faa0d2c366e..ff53a2775e3d 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -794,11 +794,11 @@ def get_hf_processor( return self.ctx.get_hf_processor( Qwen2_5_VLProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, ) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 3b939a43e924..690b8e02c2fd 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -759,11 +759,11 @@ def get_hf_processor( ) -> Qwen2VLProcessor: return self.ctx.get_hf_processor( Qwen2VLProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, ) diff --git a/vllm/v1/attention/backends/mla/rocm_aiter_mla.py b/vllm/v1/attention/backends/mla/rocm_aiter_mla.py index 9fbca2e955e7..8ad4e542b45b 100644 --- a/vllm/v1/attention/backends/mla/rocm_aiter_mla.py +++ b/vllm/v1/attention/backends/mla/rocm_aiter_mla.py @@ -201,16 +201,9 @@ def _forward_decode( kv_buffer = kv_c_and_k_pe_cache.unsqueeze(2) - if self.num_heads == 16: - # AITER MLA decode kernel only supports - # max_seqlen_q=1 when using 16 heads. - max_seqlen_qo = 1 - else: - # AITER MLA decode Kernel handles arbitrary - # max_seqlen_q values when using 128 heads. - assert attn_metadata.prefill is not None - max_seqlen_qo = attn_metadata.prefill.max_query_len - + # max_seqlen_qo must be 1 except for MTP + # TODO: Find the best value for MTP + max_seqlen_qo = 1 aiter_mla_decode_fwd(q, kv_buffer, o, self.scale, attn_metadata.decode.qo_indptr, max_seqlen_qo, attn_metadata.decode.paged_kv_indptr,