Skip to content

Commit 5d0b40f

Browse files
authored
[CodeStyle] black -> ruff format migration - part 4 (#74657)
1 parent a1b2d05 commit 5d0b40f

20 files changed

+3
-25
lines changed

test/auto_parallel/hybrid_strategy/parallel_api.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -411,7 +411,6 @@ def parallel_model(self, layer):
411411
or paddle.device.cuda.get_device_capability()[0] < 8
412412
)
413413
):
414-
415414
bck = 'p2p'
416415
if self.config.context_parallel is True:
417416
bck = 'p2p'

test/auto_parallel/hybrid_strategy/single_lora_model.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,6 @@ def extra_repr(self):
305305

306306

307307
class LoRAModel(nn.Layer):
308-
309308
def __init__(self, model, lora_config) -> None:
310309
super().__init__()
311310
self.model = self.get_lora_model(model, lora_config)

test/auto_parallel/hybrid_strategy/to_distributed_api_for_llama.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,6 @@ def forward(
428428
hidden_states = inputs_embeds
429429

430430
for idx, (decoder_layer) in enumerate(self.layers):
431-
432431
layer_outputs = decoder_layer(
433432
hidden_states,
434433
position_ids,
@@ -505,7 +504,6 @@ def forward(self, hidden_states, tensor_parallel_output=None):
505504

506505

507506
class LlamaForCausalLM(paddle.nn.Layer):
508-
509507
def __init__(
510508
self,
511509
param_prefix="",
@@ -537,7 +535,6 @@ def forward(
537535
attention_mask=None,
538536
labels=None,
539537
):
540-
541538
outputs = self.llama(
542539
input_ids,
543540
position_ids=position_ids,

test/auto_parallel/pir/auto_parallel_refined_recompute_pir_pass_unittest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818

1919

2020
class TestRefinedRecomputeLlamaAuto(TestRecomputeLlamaAuto):
21-
2221
def run_test_cases(self):
2322
self.config.recompute = True
2423
self.config.recompute_granularity = "full"

test/auto_parallel/pir/test_op_role.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ def test_single(self):
3737
with paddle.pir_utils.IrGuard():
3838
main_program = paddle.base.Program()
3939
with paddle.base.program_guard(main_program):
40-
4140
# op_role = -1
4241
x0 = paddle.static.data(name='x0', shape=[1, 128, 512])
4342
x1 = paddle.nn.functional.relu(x0)

test/auto_parallel/pir/test_pir_1f1b_plan.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919

2020

2121
class TestPIR1F1BPlan(unittest.TestCase):
22-
2322
def test_standalone_executor_1f1b_plan_stage0(self):
2423
base.set_flags({'FLAGS_enable_pir_api': 1})
2524
config = {"num_micro_batches": 8, "pp_stage": 0, "pp_degree": 4}

test/auto_parallel/spmd_rules/test_einsum_rule.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424

2525
# case: bmm
2626
class TestEinsumSPMDRule(unittest.TestCase):
27-
2827
def setUp(self):
2928
self.init_data()
3029
self.init_parallel_setting()

test/cinn/fake_model/naive_multi_fc.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
A fake model with multiple FC layers to test CINN on a more complex model.
1616
"""
1717

18-
1918
import paddle
2019
from paddle import static
2120

test/collective/new_api_per_op_and_group_intranode.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@ def test_scatter(ep_group: Group, mode: str):
8989
m, n = 4096, 8192
9090

9191
if local_rank == 0:
92-
9392
scatter_list = [
9493
paddle.ones(shape=[m, n], dtype=paddle.float32) * (i + 1)
9594
for i in range(num_local_ranks)
@@ -124,7 +123,6 @@ def test_reduce(ep_group: Group, mode: str):
124123
dist.reduce(gbl_x, dst=0, group=ep_group)
125124

126125
if local_rank == 0:
127-
128126
res = paddle.ones(shape=[m, n], dtype=paddle.float32) * (
129127
num_local_ranks * (num_local_ranks + 1) / 2
130128
)
@@ -208,7 +206,6 @@ def test_all_reduce(ep_group: Group, mode: str):
208206

209207

210208
def test_primitive():
211-
212209
dist.init_parallel_env()
213210

214211
ranks = [0, 1]

test/dataset/test_image.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
Description:
1818
This script test image resize,flip and chw.
1919
"""
20+
2021
import os
2122
import unittest
2223

0 commit comments

Comments
 (0)