Skip to content

Commit f88399d

Browse files
author
q30056305
committed
solve codecheck/ruff issue
1 parent 80a2561 commit f88399d

File tree

3 files changed

+2
-8
lines changed

3 files changed

+2
-8
lines changed

examples/offline_disaggregated_prefill_npu.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,6 @@
1616
# See the License for the specific language governing permissions and
1717
# limitations under the License.
1818
#
19-
20-
"""
21-
This file demonstrates the example usage of disaggregated prefilling
22-
We will launch 2 vllm instances (NPU 0,1 for prefill and NPU 2,3 for decode),
23-
and then transfer the KV cache between them.
24-
"""
2519
import os
2620
import time
2721

vllm_ascend/worker/worker.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
import torch.distributed
2525
from torch import nn
2626
from vllm import envs
27-
from vllm.config import ParallelConfig, VllmConfig
27+
from vllm.config import VllmConfig
2828
from vllm.distributed import (ensure_kv_transfer_initialized,
2929
ensure_model_parallel_initialized,
3030
init_distributed_environment,

vllm_ascend/worker/worker_v1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
import torch.nn as nn
2626
import torch_npu
2727
from vllm import envs
28-
from vllm.config import ParallelConfig, VllmConfig
28+
from vllm.config import VllmConfig
2929
from vllm.distributed import (ensure_kv_transfer_initialized,
3030
ensure_model_parallel_initialized,
3131
init_distributed_environment,

0 commit comments

Comments
 (0)