Skip to content

Commit 15d8a04

Browse files
authored
[CodeStyle] black -> ruff format migration - part 33 (#74747)
1 parent c7fca56 commit 15d8a04

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+949
-939
lines changed

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ repos:
9191
9292
| python/paddle/[k-n].+
9393
94-
# | python/paddle/[o-t].+
94+
| python/paddle/[o-t].+
9595
9696
| python/paddle/[u-z].+
9797
@@ -147,7 +147,7 @@ repos:
147147
148148
# | python/paddle/[k-n].+
149149
150-
| python/paddle/[o-t].+
150+
# | python/paddle/[o-t].+
151151
152152
# | python/paddle/[u-z].+
153153

python/paddle/optimizer/adamw.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -255,9 +255,9 @@ def __init__(
255255
if self._parameter_list:
256256
if isinstance(self._parameter_list[0], dict):
257257
for param_group in self._parameter_list:
258-
assert (
259-
'params' in param_group
260-
), 'params should be set in parameters if parameter groups are optimized in different options'
258+
assert 'params' in param_group, (
259+
'params should be set in parameters if parameter groups are optimized in different options'
260+
)
261261
self._dtype = self._parameter_list[0]['params'][0].dtype
262262
else:
263263
self._dtype = self._parameter_list[0].dtype

python/paddle/optimizer/fusion_utils.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,9 @@ def get_current_device_type():
5252
device_type = current_device.get_device_type()
5353
except:
5454
device_type = "unknown"
55-
assert (
56-
device_type in alignment.keys()
57-
), f"tensor fusion helper now only support {alignment.keys()}, but got device {device_type} instead."
55+
assert device_type in alignment.keys(), (
56+
f"tensor fusion helper now only support {alignment.keys()}, but got device {device_type} instead."
57+
)
5858
__current_device_type__ = device_type
5959
return __current_device_type__
6060

@@ -210,13 +210,13 @@ def reset_meta(
210210
merged_model_params_meta,
211211
buffer_ipc_meta,
212212
):
213-
assert isinstance(
214-
accumulators_meta, dict
215-
), "accumulators_meta must be a dict"
213+
assert isinstance(accumulators_meta, dict), (
214+
"accumulators_meta must be a dict"
215+
)
216216
self.accumulators_meta = accumulators_meta
217-
assert isinstance(
218-
master_weights_meta, dict
219-
), "master_weights_meta must be a dict"
217+
assert isinstance(master_weights_meta, dict), (
218+
"master_weights_meta must be a dict"
219+
)
220220
self.master_weights_meta = master_weights_meta
221221
assert (
222222
isinstance(merged_model_params_meta, dict)
@@ -242,9 +242,9 @@ def sync_partial_param(self, start, end):
242242
assert isinstance(start, int), "start must be an integer"
243243
assert isinstance(end, int), "end must be an integer"
244244
assert start >= 0, "start must be non-negative"
245-
assert (
246-
end <= self.buffer_length
247-
), "end must be less than or equal to the total buffer length"
245+
assert end <= self.buffer_length, (
246+
"end must be less than or equal to the total buffer length"
247+
)
248248
task = async_offload_with_offset(
249249
src_tensor=self.buffer,
250250
dst_tensor=self.cpu_buffer,

python/paddle/optimizer/lr.py

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -238,9 +238,9 @@ def state_dict(self) -> _LRStateDict:
238238
continue
239239
value = self.__dict__[key]
240240
if isinstance(value, Tensor):
241-
assert (
242-
value.size == 1
243-
), "numel of Tensor in state_dict must be 1"
241+
assert value.size == 1, (
242+
"numel of Tensor in state_dict must be 1"
243+
)
244244
value = float(value)
245245
state_dict[key] = value
246246

@@ -598,9 +598,9 @@ def __init__(
598598
last_epoch: int = -1,
599599
verbose: bool = False,
600600
) -> None:
601-
assert (
602-
gamma > 0.0
603-
), " 'gamma' must be a positive number so that the learning rate will decay."
601+
assert gamma > 0.0, (
602+
" 'gamma' must be a positive number so that the learning rate will decay."
603+
)
604604
self.gamma = gamma
605605
super().__init__(learning_rate, last_epoch, verbose)
606606

@@ -812,14 +812,14 @@ def __init__(
812812
last_epoch: int = -1,
813813
verbose: bool = False,
814814
):
815-
assert decay_steps > 0 and isinstance(
816-
decay_steps, int
817-
), " 'decay_steps' must be a positive integer."
815+
assert decay_steps > 0 and isinstance(decay_steps, int), (
816+
" 'decay_steps' must be a positive integer."
817+
)
818818
self.decay_steps = decay_steps
819819
self.end_lr = end_lr
820-
assert (
821-
power > 0.0
822-
), " 'power' must be greater than 0.0 so that the learning rate will decay."
820+
assert power > 0.0, (
821+
" 'power' must be greater than 0.0 so that the learning rate will decay."
822+
)
823823
self.power = power
824824
self.cycle = cycle
825825
super().__init__(learning_rate, last_epoch, verbose)
@@ -955,15 +955,15 @@ def __init__(
955955
f"the type of learning_rate should be [int, float or LRScheduler], the current type is {learning_rate}"
956956
)
957957
self.learning_rate = learning_rate
958-
assert warmup_steps > 0 and isinstance(
959-
warmup_steps, int
960-
), " 'warmup_steps' must be a positive integer."
958+
assert warmup_steps > 0 and isinstance(warmup_steps, int), (
959+
" 'warmup_steps' must be a positive integer."
960+
)
961961
self.warmup_steps = warmup_steps
962962
self.start_lr = start_lr
963963
self.end_lr = end_lr
964-
assert (
965-
end_lr > start_lr
966-
), f"end_lr {end_lr} must be greater than start_lr {start_lr}"
964+
assert end_lr > start_lr, (
965+
f"end_lr {end_lr} must be greater than start_lr {start_lr}"
966+
)
967967
super().__init__(start_lr, last_epoch, verbose)
968968

969969
def state_dict(self) -> _LRStateDict:
@@ -1085,9 +1085,9 @@ def __init__(
10851085
last_epoch: int = -1,
10861086
verbose: bool = False,
10871087
) -> None:
1088-
assert (
1089-
gamma > 0.0 and gamma < 1.0
1090-
), " 'gamma' must be in interval (0.0, 1.0) so that the learning rate will decay."
1088+
assert gamma > 0.0 and gamma < 1.0, (
1089+
" 'gamma' must be in interval (0.0, 1.0) so that the learning rate will decay."
1090+
)
10911091
self.gamma = gamma
10921092
super().__init__(learning_rate, last_epoch, verbose)
10931093

@@ -1321,9 +1321,9 @@ def __init__(
13211321
if gamma >= 1.0:
13221322
raise ValueError('gamma should be < 1.0.')
13231323

1324-
assert step_size > 0 and isinstance(
1325-
step_size, int
1326-
), " 'step_size' must be a positive integer."
1324+
assert step_size > 0 and isinstance(step_size, int), (
1325+
" 'step_size' must be a positive integer."
1326+
)
13271327
self.step_size = step_size
13281328
self.gamma = gamma
13291329
super().__init__(learning_rate, last_epoch, verbose)
@@ -1784,9 +1784,9 @@ def __init__(
17841784
raise TypeError(
17851785
f"The type of 'eta_min' in 'CosineAnnealingDecay' must be 'float, int', but received {type(eta_min)}."
17861786
)
1787-
assert T_max > 0 and isinstance(
1788-
T_max, int
1789-
), " 'T_max' must be a positive integer."
1787+
assert T_max > 0 and isinstance(T_max, int), (
1788+
" 'T_max' must be a positive integer."
1789+
)
17901790
self.T_max = T_max
17911791
self.eta_min = float(eta_min)
17921792
super().__init__(learning_rate, last_epoch, verbose)

python/paddle/optimizer/momentum.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -553,9 +553,7 @@ def _append_optimize_multi_tensor_op(
553553
"use_nesterov": self._use_nesterov,
554554
"regularization_method": self._regularization_method_dict[
555555
key
556-
][
557-
param_group_idx
558-
],
556+
][param_group_idx],
559557
"regularization_coeff": self._regularization_coeff_dict[
560558
key
561559
][param_group_idx],

python/paddle/optimizer/optimizer.py

Lines changed: 31 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -94,14 +94,14 @@ def append_backward_new(
9494
from paddle.incubate.autograd.primx import Transform, orig2prim
9595

9696
program = default_main_program()
97-
assert (
98-
program.num_blocks == 1
99-
), "The append_backward_new interface is designed to process only one block."
97+
assert program.num_blocks == 1, (
98+
"The append_backward_new interface is designed to process only one block."
99+
)
100100
block = program.current_block()
101101
for el in loss_list:
102-
assert (
103-
el.block == block
104-
), 'variable in loss_list should be in current block of main program'
102+
assert el.block == block, (
103+
'variable in loss_list should be in current block of main program'
104+
)
105105

106106
orig2prim(block)
107107
ad = Transform(block)
@@ -280,9 +280,9 @@ def __init__(
280280
if self._parameter_list:
281281
if isinstance(self._parameter_list[0], dict):
282282
for param_group in self._parameter_list:
283-
assert (
284-
'params' in param_group
285-
), 'params should be set in parameters if parameter groups are optimized in different options'
283+
assert 'params' in param_group, (
284+
'params should be set in parameters if parameter groups are optimized in different options'
285+
)
286286
self._dtype = self._parameter_list[0]['params'][0].dtype
287287
else:
288288
self._dtype = self._parameter_list[0].dtype
@@ -477,9 +477,9 @@ def set_state_dict(self, state_dict: dict[str, Tensor]) -> None:
477477
if isinstance(self._learning_rate, LRScheduler):
478478
lr_state_dict = state_dict.get("LR_Scheduler", None)
479479
if not isinstance(self._learning_rate, LambdaDecay):
480-
assert (
481-
lr_state_dict is not None
482-
), "LR_Scheduler state must be included in the state dict except LambdaDecay"
480+
assert lr_state_dict is not None, (
481+
"LR_Scheduler state must be included in the state dict except LambdaDecay"
482+
)
483483
if lr_state_dict:
484484
self._learning_rate.set_state_dict(lr_state_dict)
485485

@@ -495,9 +495,9 @@ def set_state_dict(self, state_dict: dict[str, Tensor]) -> None:
495495
self._accumulators_holder = state_dict
496496
for k, v in self._accumulators.items():
497497
for para_name, var_tmp in v.items():
498-
assert (
499-
var_tmp.name in state_dict
500-
), f"optimizer Tensor {var_tmp.name} not found"
498+
assert var_tmp.name in state_dict, (
499+
f"optimizer Tensor {var_tmp.name} not found"
500+
)
501501

502502
var = var_tmp.value()
503503
tensor = var.get_tensor()
@@ -1112,9 +1112,9 @@ def _add_accumulator(
11121112

11131113
if framework.in_dygraph_mode():
11141114
if len(self._accumulators_holder) > 0:
1115-
assert (
1116-
var_name in self._accumulators_holder
1117-
), f"Optimizer set error, {var_name} should in state dict"
1115+
assert var_name in self._accumulators_holder, (
1116+
f"Optimizer set error, {var_name} should in state dict"
1117+
)
11181118
var.set_value(self._accumulators_holder.pop(var_name))
11191119

11201120
# load scale value for xpu
@@ -1231,9 +1231,9 @@ def _create_optimization_pass(
12311231
target_block = global_block
12321232
current_block = framework.default_main_program().current_block()
12331233
if current_block.idx != global_block.idx:
1234-
assert (
1235-
current_block.backward_block_idx != -1
1236-
), "current block is not global_block, but it doesn't have backward block."
1234+
assert current_block.backward_block_idx != -1, (
1235+
"current block is not global_block, but it doesn't have backward block."
1236+
)
12371237
target_block = framework.default_main_program().blocks[
12381238
current_block.backward_block_idx
12391239
]
@@ -1669,9 +1669,7 @@ def _apply_optimize(
16691669
paddle.static.default_main_program(),
16701670
paddle.static.default_startup_program(),
16711671
):
1672-
auto_dp = (
1673-
paddle.distributed.auto_parallel.auto_dp_utils.in_auto_dp_mode()
1674-
)
1672+
auto_dp = paddle.distributed.auto_parallel.auto_dp_utils.in_auto_dp_mode()
16751673
if auto_dp:
16761674
paddle.distributed.auto_parallel.auto_dp_utils._convert_fake_replicate_grad_to_partial(
16771675
params_grads
@@ -1943,9 +1941,9 @@ def minimize(
19431941
>>> adam.clear_grad()
19441942
19451943
"""
1946-
assert isinstance(
1947-
loss, (Variable, paddle.pir.Value)
1948-
), "The loss should be an Tensor."
1944+
assert isinstance(loss, (Variable, paddle.pir.Value)), (
1945+
"The loss should be an Tensor."
1946+
)
19491947

19501948
parameter_list = parameters if parameters else self._parameter_list
19511949

@@ -1969,9 +1967,9 @@ def _declarative_step(self):
19691967
params = (
19701968
paddle.static.default_main_program().global_block().all_parameters()
19711969
)
1972-
assert not isinstance(
1973-
self._parameter_list[0], dict
1974-
), "Only list of parameters is supported while using optimizer in @paddle.jit.static."
1970+
assert not isinstance(self._parameter_list[0], dict), (
1971+
"Only list of parameters is supported while using optimizer in @paddle.jit.static."
1972+
)
19751973
selected_params = {param.name for param in self._parameter_list}
19761974
parameters = [param for param in params if param.trainable]
19771975
parameters = list(
@@ -2141,9 +2139,9 @@ def _is_dtype_fp16_or_bf16(self, dtype):
21412139
:param dtype: instance of core.VarDesc.VarType
21422140
:return: True if dtype is one of fp16 or bf16, False otherwise
21432141
"""
2144-
assert isinstance(
2145-
dtype, (core.VarDesc.VarType, core.DataType)
2146-
), "The dtype should be an instance of core.VarDesc.VarType or core.DataType."
2142+
assert isinstance(dtype, (core.VarDesc.VarType, core.DataType)), (
2143+
"The dtype should be an instance of core.VarDesc.VarType or core.DataType."
2144+
)
21472145
if isinstance(dtype, core.VarDesc.VarType):
21482146
return (
21492147
dtype == core.VarDesc.VarType.FP16

python/paddle/pir/math_op_patch.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1026,9 +1026,9 @@ def indices(self):
10261026
return _C_ops.sparse_indices(self)
10271027

10281028
def set_shape(self, shape):
1029-
assert (
1030-
paddle.base.dygraph.base.in_to_static_mode()
1031-
), "We only support call 'set_shape' in to_static mode."
1029+
assert paddle.base.dygraph.base.in_to_static_mode(), (
1030+
"We only support call 'set_shape' in to_static mode."
1031+
)
10321032

10331033
if self.is_dense_tensor_type() or self.is_selected_row_type():
10341034
type = paddle.pir.create_shaped_type(self.type(), shape)
@@ -1074,9 +1074,9 @@ def _to(
10741074
if blocking is None:
10751075
blocking = True
10761076
else:
1077-
assert isinstance(
1078-
blocking, bool
1079-
), "blocking value error, must be the True, False or None"
1077+
assert isinstance(blocking, bool), (
1078+
"blocking value error, must be the True, False or None"
1079+
)
10801080

10811081
def transform(t, device, dtype, blocking):
10821082
if dtype is None:

python/paddle/profiler/utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,7 @@ def begin(self) -> None:
128128
if self.event_type not in _AllowedEventTypeList:
129129
warn(
130130
"Only TracerEvent Type in [{}, {}, {}, {}, {}, {},{}]\
131-
can be recorded.".format(
132-
*_AllowedEventTypeList
133-
)
131+
can be recorded.".format(*_AllowedEventTypeList)
134132
)
135133
self.event = None
136134
else:

python/paddle/quantization/config.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,9 @@ def add_qat_layer_mapping(
285285
"""
286286
assert isinstance(source, type) and issubclass(
287287
source, paddle.nn.Layer
288-
), "The source layer to be placed should be a subclass of paddle.nn.Layer"
288+
), (
289+
"The source layer to be placed should be a subclass of paddle.nn.Layer"
290+
)
289291
assert isinstance(target, type) and issubclass(
290292
target, paddle.nn.Layer
291293
), "The target layer should be a subclass of paddle.nn.qat.Layer"

0 commit comments

Comments
 (0)