Skip to content

Commit

Permalink
[CodeStyle][Ruff][BUAA][G-[31-40]] Fix ruff RUF005 diagnostic for 1…
Browse files Browse the repository at this point in the history
…0 files in `python/paddle/` (#67385)
  • Loading branch information
MufanColin authored Aug 15, 2024
1 parent 89d51ba commit e8e7aa2
Show file tree
Hide file tree
Showing 9 changed files with 23 additions and 26 deletions.
2 changes: 1 addition & 1 deletion python/paddle/distributed/ps/utils/public.py
Original file line number Diff line number Diff line change
Expand Up @@ -1354,7 +1354,7 @@ def insert_communicate_op(
outputs={"Out": []},
attrs={
"mode": "forward" if is_forward else "backward",
"send_var_name": entrance_var + ["microbatch_id"],
"send_var_name": [*entrance_var, "microbatch_id"],
"recv_var_name": [],
"message_name": comm_info["block_input_var_name"],
"next_endpoints": next_heter_worker_endpoints,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/transpiler/collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -814,7 +814,7 @@ def _insert_allgather_ops(self):
param = block.vars[op_role_var[i]]
new_grad_var = block.create_var(
name=op_role_var[i] + "_allgather",
shape=[self.allgather_ranks] + list(param.shape),
shape=[self.allgather_ranks, *list(param.shape)],
persistable=False,
dtype=core.VarDesc.VarType.FP32,
stop_gradient=True,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/utils/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ def start_local_trainers(

logger.debug(f"trainer proc env:{current_env}")

cmd = [sys.executable, "-u", training_script] + training_script_args
cmd = [sys.executable, "-u", training_script, *training_script_args]

logger.info(f"start trainer proc:{cmd} env:{proc_env}")

Expand Down
5 changes: 3 additions & 2 deletions python/paddle/distribution/lkj_cholesky.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ def vec_to_tril_matrix(
# Calculate the dimension of the square matrix based on the last but one dimension of `p`
# Define the output shape, which adds two dimensions for the square matrix
shape0 = flatten_shape // last_dim
output_shape = sample_shape + (
output_shape = (
*sample_shape,
shape0 // reduce(operator.mul, sample_shape),
dim,
dim,
Expand Down Expand Up @@ -251,7 +252,7 @@ def _onion(self, sample_shape: Sequence[int]) -> Tensor:
# u_hypersphere[..., 0, :].fill_(0.0)
# u_hypersphere[..., 0, :] = 0.0
u_hypersphere_other = u_hypersphere[..., 1:, :]
zero_shape = tuple(u_hypersphere.shape[:-2]) + (1, self.dim)
zero_shape = (*tuple(u_hypersphere.shape[:-2]), 1, self.dim)
zero_row = paddle.zeros(shape=zero_shape, dtype=u_hypersphere.dtype)
u_hypersphere = paddle.concat([zero_row, u_hypersphere_other], axis=-2)

Expand Down
7 changes: 1 addition & 6 deletions python/paddle/distribution/multinomial.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,12 +159,7 @@ def sample(self, shape: Iterable[int] = ()) -> Tensor:
if not isinstance(shape, Iterable):
raise TypeError('sample shape must be Iterable object.')

samples = self._categorical.sample(
[
self.total_count,
]
+ list(shape)
)
samples = self._categorical.sample([self.total_count, *list(shape)])
return (
paddle.nn.functional.one_hot(samples, self.probs.shape[-1])
.cast(self.probs.dtype)
Expand Down
23 changes: 12 additions & 11 deletions python/paddle/distribution/multivariate_normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def __init__(
scale_tril.shape[:-2], loc.shape[:-1]
)
self.scale_tril = scale_tril.expand(
batch_shape + [scale_tril.shape[-2], scale_tril.shape[-1]]
[*batch_shape, scale_tril.shape[-2], scale_tril.shape[-1]]
)
elif covariance_matrix is not None:
if covariance_matrix.dim() < 2:
Expand All @@ -146,8 +146,11 @@ def __init__(
covariance_matrix.shape[:-2], loc.shape[:-1]
)
self.covariance_matrix = covariance_matrix.expand(
batch_shape
+ [covariance_matrix.shape[-2], covariance_matrix.shape[-1]]
[
*batch_shape,
covariance_matrix.shape[-2],
covariance_matrix.shape[-1],
]
)
else:
if precision_matrix.dim() < 2:
Expand All @@ -160,15 +163,13 @@ def __init__(
precision_matrix.shape[:-2], loc.shape[:-1]
)
self.precision_matrix = precision_matrix.expand(
batch_shape
+ [precision_matrix.shape[-2], precision_matrix.shape[-1]]
[
*batch_shape,
precision_matrix.shape[-2],
precision_matrix.shape[-1],
]
)
self.loc = loc.expand(
batch_shape
+ [
-1,
]
)
self.loc = loc.expand([*batch_shape, -1])
event_shape = self.loc.shape[-1:]

if scale_tril is not None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ def _save_dense_params(self, executor, dirname, context, main_program):
optimizer.type, varname
)

for var_name in [varname] + reshaped_varnames + origin_varnames:
for var_name in [varname, *reshaped_varnames, *origin_varnames]:
var = self._origin_main_program.global_block().vars[var_name]
block.append_op(
type='recv_save',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -904,7 +904,7 @@ def add_large_scale_op(
names_str = ",".join(value_names)
dims_str = ",".join([str(dim) for dim in value_dims])
ids_name = f"kSparseIDs@{param}"
cached_str = ",".join(acture_names + [ids_name])
cached_str = ",".join([*acture_names, ids_name])
init_attr_str = get_initializer_attrs(acture_names)

meta_str = ":".join(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1264,7 +1264,7 @@ def insert_communicate_op(
outputs={"Out": []},
attrs={
"mode": "forward" if is_forward else "backward",
"send_var_name": entrance_var + ["microbatch_id"],
"send_var_name": [*entrance_var, "microbatch_id"],
"recv_var_name": [],
"message_name": comm_info["block_input_var_name"],
"next_endpoints": next_heter_worker_endpoints,
Expand Down Expand Up @@ -1339,7 +1339,7 @@ def replace_ops_by_communicate_op(
outputs={"Out": []},
attrs={
"mode": "forward",
"send_var_name": entrance_var + ["microbatch_id"],
"send_var_name": [*entrance_var, "microbatch_id"],
"recv_var_name": [],
"message_name": comm_info["block_input_var_name"],
"next_endpoints": next_heter_worker_endpoints,
Expand Down

0 comments on commit e8e7aa2

Please sign in to comment.