Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIR] Adapt paddle.assign to pir #57780

Merged
merged 4 commits into from
Sep 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 64 additions & 18 deletions python/paddle/tensor/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2146,15 +2146,24 @@ def assign(x, output=None):
[2.5 2.5]]
"""
# speed up
if x is output and isinstance(x, Variable):
if x is output and isinstance(x, (Variable, paddle.pir.OpResult)):
return x

input = x
helper = LayerHelper('assign', **locals())
check_type(
input,
'input',
(Variable, np.ndarray, list, tuple, float, int, bool),
(
Variable,
paddle.pir.OpResult,
np.ndarray,
list,
tuple,
float,
int,
bool,
),
'assign',
)

Expand All @@ -2167,12 +2176,17 @@ def assign(x, output=None):
# but in_dynamic_mode()==False under @to_static, which means
# isinstance(Tensor, Variable) == False. It will cause return None
# after this api.
if isinstance(input, (Variable, core.eager.Tensor)):
if isinstance(input, (Variable, core.eager.Tensor, paddle.pir.OpResult)):
if in_dynamic_mode():
if output is None:
output = _C_ops.assign(input)
else:
_C_ops.assign_out_(input, output)
elif in_pir_mode():
if output is None:
output = _C_ops.assign(input)
else:
output = _C_ops.assign_out_(input, output)
else:
check_dtype(
input.dtype,
Expand Down Expand Up @@ -2200,19 +2214,25 @@ def assign(x, output=None):
)
elif isinstance(input, np.ndarray):
# We now support the form of [var, VAR...] if the Var.shape=[1,]
if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
if len(input.shape) > 0 and any(
isinstance(x, (Variable, paddle.pir.OpResult)) for x in input
):
# We only deal with the case where the list is nested one level, convert all scalars into variables, and then use stack to process. It is necessary to ensure the consistency of types.
if not all(
x.shape == (1,)
for x in input
if isinstance(x, (Variable, core.eager.Tensor))
if isinstance(
x, (Variable, core.eager.Tensor, paddle.pir.OpResult)
)
):
raise TypeError(
"Unsupport paddle.assign([Variable, Variable...]) with non-scalar variable."
)

def convert_scalar(x):
if not isinstance(x, (Variable, core.eager.Tensor)):
if not isinstance(
x, (Variable, core.eager.Tensor, paddle.pir.OpResult)
):
return assign(x)
return x

Expand All @@ -2237,16 +2257,33 @@ def convert_scalar(x):
"it to float32"
)
dtype = core.VarDesc.VarType.FP32
if dtype == core.VarDesc.VarType.BOOL:

if dtype == core.DataType.FLOAT64:
# Setting FP64 numpy data is not supported in Paddle, so we
# use FP32 here
warnings.warn(
"paddle.assign doesn't support float64 input now due "
"to current platform protobuf data limitation, we convert "
"it to float32"
)
dtype = core.DataType.FLOAT32

if dtype == core.VarDesc.VarType.BOOL or dtype == core.DataType.BOOL:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if dtype == core.VarDesc.VarType.BOOL or dtype == core.DataType.BOOL:
if dtype in [core.VarDesc.VarType.BOOL, core.DataType.BOOL]:

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

感谢,下个pr修改

value_name = "bool_values"
values = [int(v) for v in input.flat]
elif dtype == core.VarDesc.VarType.FP32:
elif (
dtype == core.VarDesc.VarType.FP32 or dtype == core.DataType.FLOAT32
):
value_name = "fp32_values"
values = [float(v) for v in input.flat]
elif dtype == core.VarDesc.VarType.INT32:
elif (
dtype == core.VarDesc.VarType.INT32 or dtype == core.DataType.INT32
):
value_name = "int32_values"
values = [int(v) for v in input.flat]
elif dtype == core.VarDesc.VarType.INT64:
elif (
dtype == core.VarDesc.VarType.INT64 or dtype == core.DataType.INT64
):
value_name = "int64_values"
values = [int(v) for v in input.flat]
else:
Expand All @@ -2260,16 +2297,25 @@ def convert_scalar(x):
"The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it"
)
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
if output is None:
output = zeros(list(input.shape), dtype)
_C_ops.assign_value_(
output,
list(input.shape),
dtype,
values,
_current_expected_place(),
)
if in_dynamic_mode():
_C_ops.assign_value_(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里动态图下没有返回么?还是说其实返回了output本身。如果是后者的话,其实这里可以统一成一个分支?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

动态图没返回,因为是inplace的,而新IR下inplace是需要返回一个新value来表示的,故拆成俩个分支

output,
list(input.shape),
dtype,
values,
_current_expected_place(),
)
else:
output = _C_ops.assign_value_(
output,
list(input.shape),
dtype,
values,
_current_expected_place(),
)
else:
if output is None:
output = helper.create_variable_for_type_inference(
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_assign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,12 @@ def init_input_configs(self):

def test_forward(self):
paddle.enable_static()
self.check_output()
self.check_output(check_new_ir=True)
paddle.disable_static()

def test_backward(self):
paddle.enable_static()
self.check_grad(['X'], 'Out', check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True, check_new_ir=True)
paddle.disable_static()


Expand All @@ -71,12 +71,12 @@ def setUp(self):

def test_forward(self):
paddle.enable_static()
self.check_output()
self.check_output(check_new_ir=True)
paddle.disable_static()

def test_backward(self):
paddle.enable_static()
self.check_grad(['X'], 'Out', check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True, check_new_ir=True)
paddle.disable_static()


Expand All @@ -97,12 +97,12 @@ def setUp(self):

def test_forward(self):
paddle.enable_static()
self.check_output()
self.check_output(check_new_ir=True)
paddle.disable_static()

def test_backward(self):
paddle.enable_static()
self.check_grad(['X'], 'Out', check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True, check_new_ir=True)
paddle.disable_static()


Expand Down
9 changes: 7 additions & 2 deletions test/legacy_test/test_assign_value_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,12 @@
def assign_value_wrapper(
shape=[], dtype=base.core.VarDesc.VarType.FP32, values=0.0
):
tensor = paddle.Tensor()
if paddle.framework.in_dynamic_mode():
tensor = paddle.Tensor()
else:
np_type = paddle.base.data_feeder._PADDLE_DTYPE_2_NUMPY_DTYPE[dtype]
tensor = paddle.zeros(list(shape), np_type)
dtype = paddle.pir.core.convert_np_dtype_to_dtype_(np_type)
return paddle._C_ops.assign_value_(
tensor, shape, dtype, values, framework._current_expected_place()
)
Expand All @@ -49,7 +54,7 @@ def init_data(self):
self.attrs["fp32_values"] = [float(v) for v in self.value.flat]

def test_forward(self):
self.check_output(check_cinn=True)
self.check_output(check_cinn=True, check_new_ir=True)


class TestAssignValueOp2(TestAssignValueOp):
Expand Down