Skip to content

Commit 2382ad2

Browse files
authored
[ci] fix linter (#13701)
Signed-off-by: youkaichao <youkaichao@gmail.com>
1 parent 3e472d8 commit 2382ad2

File tree

5 files changed

+11
-7
lines changed

5 files changed

+11
-7
lines changed

examples/offline_inference/data_parallel.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,15 +48,16 @@ def main(dp_size, dp_rank, dp_master_ip, dp_master_port, GPUs_per_dp_rank):
4848
max_tokens=16 * (dp_rank + 1))
4949

5050
# Create an LLM.
51-
llm = LLM(model="facebook/opt-125m", tensor_parallel_size=2, enforce_eager=True)
51+
llm = LLM(model="facebook/opt-125m",
52+
tensor_parallel_size=2,
53+
enforce_eager=True)
5254
outputs = llm.generate(prompts, sampling_params)
5355
# Print the outputs.
5456
for output in outputs:
5557
prompt = output.prompt
5658
generated_text = output.outputs[0].text
57-
print(
58-
f"DP rank {dp_rank}, Prompt: {prompt!r}, "
59-
f"Generated text: {generated_text!r}")
59+
print(f"DP rank {dp_rank}, Prompt: {prompt!r}, "
60+
f"Generated text: {generated_text!r}")
6061

6162

6263
if __name__ == "__main__":

vllm/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1372,7 +1372,7 @@ def stateless_init_dp_group(self) -> "ProcessGroup":
13721372

13731373
@staticmethod
13741374
def has_unfinished_dp(dp_group: "ProcessGroup",
1375-
has_unfinished: bool) -> bool:
1375+
has_unfinished: bool) -> bool:
13761376
tensor = torch.tensor([has_unfinished],
13771377
dtype=torch.int32,
13781378
device="cpu")

vllm/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -518,6 +518,7 @@ def get_open_port() -> int:
518518
return port
519519
return _get_open_port()
520520

521+
521522
def _get_open_port() -> int:
522523
port = envs.VLLM_PORT
523524
if port is not None:

vllm/v1/engine/core_client.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def wake_up(self) -> None:
8989

9090
def execute_dummy_batch(self) -> None:
9191
raise NotImplementedError
92-
92+
9393
async def execute_dummy_batch_async(self) -> None:
9494
raise NotImplementedError
9595

@@ -343,6 +343,7 @@ def wake_up(self) -> None:
343343
def execute_dummy_batch(self) -> None:
344344
self._call_utility("execute_dummy_batch")
345345

346+
346347
class AsyncMPClient(MPClient):
347348
"""Asyncio-compatible client for multi-proc EngineCore."""
348349

vllm/v1/worker/gpu_model_runner.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1167,7 +1167,8 @@ def _dummy_run(
11671167
for k, v in self.intermediate_tensors.items()
11681168
})
11691169

1170-
with set_forward_context(None, self.vllm_config, num_tokens=num_tokens):
1170+
with set_forward_context(None, self.vllm_config,
1171+
num_tokens=num_tokens):
11711172
hidden_states = model(
11721173
input_ids=input_ids,
11731174
positions=positions,

0 commit comments

Comments
 (0)