Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion python/paddle/incubate/fp8/deep_gemm/jit/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def get_jit_include_dir() -> str:
@functools.cache
def get_deep_gemm_version() -> str:
# Update include directories
include_dir = f"{get_jit_include_dir()+'/../../../../include/paddle/fluid/fp8/deep_gemm/include'}"
include_dir = f"{get_jit_include_dir()}/../../../../include/paddle/fluid/fp8/deep_gemm/include"
assert os.path.exists(
include_dir
), f"Cannot find GEMM include directory {include_dir}"
Expand Down
7 changes: 4 additions & 3 deletions python/paddle/incubate/fp8/deep_gemm/jit/interleave_ffma.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,10 @@ def modify_segment(m, name, ffma_lines):
for i in range(num_lines // 2):
dst_reg = parse_registers(ffma_lines[i * 2])[-2]
low_line, high_line = ffma_lines[i * 2], ffma_lines[i * 2 + 1]
low_hex, high_hex = extract_hex_from_line(
low_line
), extract_hex_from_line(high_line)
low_hex, high_hex = (
extract_hex_from_line(low_line),
extract_hex_from_line(high_line),
)
le_bytes.append(
low_hex.to_bytes(8, "little") + high_hex.to_bytes(8, "little")
)
Expand Down
7 changes: 4 additions & 3 deletions python/paddle/incubate/fp8/deep_gemm/jit_kernels/gemm.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,10 @@ def get_best_configs(
for block_m in block_ms:
for block_n in block_ns:
success = False
num_waves, best_num_waves = get_num_waves(
block_m, block_n
), get_num_waves(best_block_m, best_block_n)
num_waves, best_num_waves = (
get_num_waves(block_m, block_n),
get_num_waves(best_block_m, best_block_n),
)
if best_block_m is None or best_block_n is None:
success = True
elif num_waves < best_num_waves:
Expand Down
45 changes: 21 additions & 24 deletions python/paddle/incubate/nn/layer/fused_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,10 +147,9 @@ def __init__(
name: str | None = None,
) -> None:
super().__init__()
assert embed_dim > 0, (
"Expected embed_dim to be greater than 0, "
f"but received {embed_dim}"
)
assert (
embed_dim > 0
), f"Expected embed_dim to be greater than 0, but received {embed_dim}"
self._dtype = self._helper.get_default_dtype()
self._bias_attr = bias_attr
self._weight_attr = weight_attr
Expand Down Expand Up @@ -338,13 +337,12 @@ def __init__(
) -> None:
super().__init__()

assert embed_dim > 0, (
"Expected embed_dim to be greater than 0, "
f"but received {embed_dim}"
)
assert num_heads > 0, (
"Expected nhead to be greater than 0, " f"but received {num_heads}"
)
assert (
embed_dim > 0
), f"Expected embed_dim to be greater than 0, but received {embed_dim}"
assert (
num_heads > 0
), f"Expected nhead to be greater than 0, but received {num_heads}"

self.normalize_before = normalize_before
self._dtype = self._helper.get_default_dtype()
Expand Down Expand Up @@ -830,12 +828,12 @@ def __init__(
self._config.pop("__class__", None) # py3

super().__init__()
assert d_model > 0, (
"Expected d_model to be greater than 0, " f"but received {d_model}"
)
assert nhead > 0, (
"Expected nhead to be greater than 0, " f"but received {nhead}"
)
assert (
d_model > 0
), f"Expected d_model to be greater than 0, but received {d_model}"
assert (
nhead > 0
), f"Expected nhead to be greater than 0, but received {nhead}"
assert dim_feedforward > 0, (
"Expected dim_feedforward to be greater than 0, "
f"but received {dim_feedforward}"
Expand Down Expand Up @@ -1306,13 +1304,12 @@ def __init__(
) -> None:
super().__init__()

assert embed_dim > 0, (
"Expected embed_dim to be greater than 0, "
f"but received {embed_dim}"
)
assert num_heads > 0, (
"Expected nhead to be greater than 0, " f"but received {num_heads}"
)
assert (
embed_dim > 0
), f"Expected embed_dim to be greater than 0, but received {embed_dim}"
assert (
num_heads > 0
), f"Expected nhead to be greater than 0, but received {num_heads}"
assert (
dim_feedforward > 0
), f"Expected dim_feedforward to be greater than 0, but received {dim_feedforward}"
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/incubate/operators/graph_khop_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def graph_khop_sampler(
if return_eids:
if sorted_eids is None:
raise ValueError(
"`sorted_eid` should not be None " "if return_eids is True."
"`sorted_eid` should not be None if return_eids is True."
)
(
edge_src,
Expand Down Expand Up @@ -171,7 +171,7 @@ def graph_khop_sampler(
if return_eids:
if sorted_eids is None:
raise ValueError(
"`sorted_eid` should not be None " "if return_eids is True."
"`sorted_eid` should not be None if return_eids is True."
)
check_variable_and_dtype(
sorted_eids, "Eids", ("int32", "int64"), "graph_khop_sampler"
Expand Down
3 changes: 1 addition & 2 deletions python/paddle/incubate/operators/graph_sample_neighbors.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,7 @@ def graph_sample_neighbors(
if flag_perm_buffer:
if perm_buffer is None:
raise ValueError(
"`perm_buffer` should not be None if `flag_perm_buffer`"
"is True."
"`perm_buffer` should not be None if `flag_perm_buffer` is True."
)

if in_dynamic_or_pir_mode():
Expand Down
20 changes: 9 additions & 11 deletions python/paddle/incubate/optimizer/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,10 +481,9 @@ def _get_op_device_attr(self, op):
else None
)
if device:
assert device[0:3] == 'gpu', (
"Now, only gpu devices are "
"supported in pipeline parallelism."
)
assert (
device[0:3] == 'gpu'
), "Now, only gpu devices are supported in pipeline parallelism."
return device

def _add_op_device_attr_for_op(self, op, idx, block):
Expand Down Expand Up @@ -669,17 +668,16 @@ def _check_validation(self, block):
), f"op ({op.type}) has no {self._op_device_key} attribute."

device = op.attr(self._op_device_key)
assert device, (
"op_device attribute for op " f"{op.type} has not been set."
)
assert (
device
), f"op_device attribute for op {op.type} has not been set."
if device == f"{self._device}:all":
continue

dev_type = device.split(':')[0]
assert dev_type == "gpu", (
"Now only gpu devices are supported "
"for pipeline parallelism."
)
assert (
dev_type == "gpu"
), "Now only gpu devices are supported for pipeline parallelism."

if device not in device_list:
device_list.append(device)
Expand Down
7 changes: 3 additions & 4 deletions python/paddle/io/dataloader/dataloader_iter.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,10 +376,9 @@ def __init__(self, loader):
self._persistent_workers = loader._persistent_workers
self._resume_worker_cnt = 0

assert self._num_workers > 0, (
"Multi-process DataLoader "
f"invalid num_workers({self._num_workers})"
)
assert (
self._num_workers > 0
), f"Multi-process DataLoader invalid num_workers({self._num_workers})"

# subprocess wrokers' result queue
self._data_queue = None
Expand Down
25 changes: 15 additions & 10 deletions python/paddle/io/dataloader/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,14 +87,16 @@ def __init__(self) -> None:

def __getitem__(self, idx: int) -> _T:
raise NotImplementedError(
"'{}' not implement in class "
"{}".format('__getitem__', self.__class__.__name__)
"'{}' not implement in class {}".format(
'__getitem__', self.__class__.__name__
)
)

def __len__(self) -> int:
raise NotImplementedError(
"'{}' not implement in class "
"{}".format('__len__', self.__class__.__name__)
"'{}' not implement in class {}".format(
'__len__', self.__class__.__name__
)
)

if TYPE_CHECKING:
Expand Down Expand Up @@ -268,20 +270,23 @@ def __init__(self) -> None:

def __iter__(self) -> Iterator[_T]:
raise NotImplementedError(
"'{}' not implement in class "
"{}".format('__iter__', self.__class__.__name__)
"'{}' not implement in class {}".format(
'__iter__', self.__class__.__name__
)
)

def __getitem__(self, idx: int) -> Never:
raise RuntimeError(
"'{}' should not be called for IterableDataset"
"{}".format('__getitem__', self.__class__.__name__)
"'{}' should not be called for IterableDataset{}".format(
'__getitem__', self.__class__.__name__
)
)

def __len__(self) -> Never:
raise RuntimeError(
"'{}' should not be called for IterableDataset"
"{}".format('__len__', self.__class__.__name__)
"'{}' should not be called for IterableDataset{}".format(
'__len__', self.__class__.__name__
)
)


Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/dy2static/error.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ def numpy_api_check(self, format_exception, error_line):
func_str = None
for frame in tb:
searched_name = re.search(
fr'({RE_PYMODULE})*{frame.name}',
rf'({RE_PYMODULE})*{frame.name}',
error_line,
)
if searched_name:
Expand Down
16 changes: 11 additions & 5 deletions python/paddle/jit/dy2static/pir_partial_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,10 +315,13 @@ def split_forward_backward(self):
), "Please ensure only split once! don't call split_forward_backward manually."
self.has_splited = True
self.update_op_range()
[
fwd_prog,
bwd_prog,
], prog_attr = paddle.base.libpaddle.pir.split_program(
(
[
fwd_prog,
bwd_prog,
],
prog_attr,
) = paddle.base.libpaddle.pir.split_program(
self.program,
self.x_values,
self.param_values,
Expand Down Expand Up @@ -622,7 +625,10 @@ def __call__(self, program):
)
names = paddle.utils.map_structure(
lambda value: ValuePreservePass.attach_preserved_name(
value, program, value2name, name_generator # noqa: F821
value,
program,
value2name, # noqa: F821
name_generator,
),
self.values,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def visit_FunctionDef(self, node):
# match case like:
# @a.d.g.deco
re_tmp = re.match(
fr'({RE_PYMODULE})*({RE_PYNAME})$',
rf'({RE_PYMODULE})*({RE_PYNAME})$',
deco_full_name,
)
deco_name = re_tmp.group(2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -523,11 +523,14 @@ def compile_function(
from ..breakpoint import BreakpointManager

BreakpointManager().on_event("compile_function")
graph_fn, (
statement_ir,
symbolic_inputs,
_,
symbolic_outputs,
(
graph_fn,
(
statement_ir,
symbolic_inputs,
_,
symbolic_outputs,
),
) = compile_graph_result
compiled_fn_name = f"___graph_fn_{statement_ir.name}"
# prepare function and inputs
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,7 @@ def _is_list_or_tuple_(data):
if len(x.shape) == 5:
if len(out_shape) != 3:
raise ValueError(
"size length should be 3 for " "input 5-D tensor."
"size length should be 3 for input 5-D tensor."
)
if contain_var:
attrs['out_d'] = size_list[0]
Expand Down
9 changes: 3 additions & 6 deletions python/paddle/nn/functional/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -963,8 +963,7 @@ def conv1d_transpose(
else:
if output_padding != 0:
raise ValueError(
'output_padding option is mutually exclusive with '
'output_size'
'output_padding option is mutually exclusive with output_size'
)
if isinstance(output_size, (list, tuple, int)):
output_size = [*convert_to_list(output_size, 1, 'output_size'), 1]
Expand Down Expand Up @@ -1236,8 +1235,7 @@ def conv2d_transpose(
else:
if output_padding != 0:
raise ValueError(
'output_padding option is mutually exclusive with '
'output_size'
'output_padding option is mutually exclusive with output_size'
)
if isinstance(output_size, (list, tuple)):
if _contain_var(output_size):
Expand Down Expand Up @@ -1710,8 +1708,7 @@ def conv3d_transpose(
else:
if output_padding != 0:
raise ValueError(
'output_padding option is mutually exclusive with '
'output_size'
'output_padding option is mutually exclusive with output_size'
)
if isinstance(output_size, (list, tuple, int)):
output_size = convert_to_list(output_size, 3, 'output_size')
Expand Down
12 changes: 4 additions & 8 deletions python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -3917,9 +3917,7 @@ def triplet_margin_with_distance_loss(

if not (input.shape == positive.shape == negative.shape):
raise ValueError(
"input's shape must equal to "
"positive's shape and "
"negative's shape"
"input's shape must equal to positive's shape and negative's shape"
)

distance_function = (
Expand Down Expand Up @@ -4064,9 +4062,7 @@ def triplet_margin_loss(

if not (input.shape == positive.shape == negative.shape):
raise ValueError(
"input's shape must equal to "
"positive's shape and "
"negative's shape"
"input's shape must equal to positive's shape and negative's shape"
)

distance_function = paddle.nn.PairwiseDistance(p, epsilon=epsilon)
Expand Down Expand Up @@ -4420,7 +4416,7 @@ def soft_margin_loss(
)

if not (input.shape == label.shape):
raise ValueError("input's shape must equal to " "label's shape")
raise ValueError("input's shape must equal to label's shape")

label = paddle.cast(label, input.dtype)
out = paddle.log(1 + paddle.exp(-label * input))
Expand Down Expand Up @@ -4678,7 +4674,7 @@ def adaptive_log_softmax_with_loss(
)
else:
raise ValueError(
'0D or 1D label tensor expected, ' 'multi-label not supported'
'0D or 1D label tensor expected, multi-label not supported'
)

is_batched = target_dim > 0
Expand Down
Loading