Skip to content

Commit b4eefa8

Browse files
resolve the conflict and format problem
Signed-off-by: cty <ctynb@qq.com>
1 parent b85b585 commit b4eefa8

File tree

1 file changed

+3
-8
lines changed

1 file changed

+3
-8
lines changed

vllm_ascend/worker/model_runner_v1.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -64,14 +64,9 @@
6464
from vllm.v1.utils import bind_kv_cache
6565
from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
6666
from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
67-
from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange
68-
69-
from vllm.v1.worker.utils import (gather_mm_placeholders, sanity_check_mm_encoder_outputs,
70-
scatter_mm_placeholders)
71-
72-
from vllm.multimodal.utils import group_mm_inputs_by_modality
73-
74-
from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding
67+
from vllm.v1.worker.utils import (gather_mm_placeholders,
68+
sanity_check_mm_encoder_outputs,
69+
scatter_mm_placeholders)
7570

7671
from vllm_ascend.ascend_config import get_ascend_config
7772
from vllm_ascend.attention.attention import AttentionMaskBuilder

0 commit comments

Comments
 (0)