Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PIR API adaptor No.132】 Migrate paddle.Tensor.paddle.Tensor.logcumsumexp #58695

Merged
merged 2 commits into from
Nov 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -4190,7 +4190,7 @@ def logcumsumexp(x, axis=None, dtype=None, name=None):
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = cast(x, dtype)

if in_dynamic_mode():
if in_dynamic_or_pir_mode():
if axis is None:
axis = -1
return _C_ops.logcumsumexp(x, axis, flatten, False, False)
Expand Down
29 changes: 19 additions & 10 deletions test/legacy_test/test_logcumsumexp_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.pir_utils import test_with_pir_api


def np_naive_logcumsumexp(x: np.ndarray, axis: Optional[int] = None):
Expand Down Expand Up @@ -145,7 +146,9 @@ def run_imperative(self):
np.testing.assert_allclose(z, y.numpy(), rtol=1e-05)

def run_static(self, use_gpu=False):
with base.program_guard(base.Program()):
main = paddle.static.Program()
startup = paddle.static.Program()
with paddle.static.program_guard(main, startup):
data_np = np.random.random((5, 4)).astype(np.float32)
x = paddle.static.data('X', [5, 4])
y = paddle.logcumsumexp(x)
Expand All @@ -156,15 +159,15 @@ def run_static(self, use_gpu=False):

place = base.CUDAPlace(0) if use_gpu else base.CPUPlace()
exe = base.Executor(place)
exe.run(base.default_startup_program())
out = exe.run(
main,
feed={'X': data_np},
fetch_list=[
y.name,
y2.name,
y3.name,
y4.name,
y5.name,
y,
y2,
y3,
y4,
y5,
],
)

Expand All @@ -178,13 +181,15 @@ def run_static(self, use_gpu=False):
z = np_logcumsumexp(data_np, axis=-2)
np.testing.assert_allclose(z, out[4], rtol=1e-05)

@test_with_pir_api
def test_cpu(self):
paddle.disable_static(paddle.base.CPUPlace())
self.run_imperative()
paddle.enable_static()

self.run_static()

@test_with_pir_api
def test_gpu(self):
if not base.core.is_compiled_with_cuda():
return
Expand All @@ -194,23 +199,26 @@ def test_gpu(self):

self.run_static(use_gpu=True)

# @test_with_pir_api
def test_name(self):
with base.program_guard(base.Program()):
x = paddle.static.data('x', [3, 4])
y = paddle.logcumsumexp(x, name='out')
self.assertTrue('out' in y.name)

@test_with_pir_api
def test_type_error(self):
with base.program_guard(base.Program()):
main = paddle.static.Program()
startup = paddle.static.Program()
with paddle.static.program_guard(main, startup):
with self.assertRaises(TypeError):
data_np = np.random.random((100, 100), dtype=np.int32)
x = paddle.static.data('X', [100, 100], dtype='int32')
y = paddle.logcumsumexp(x)

place = base.CUDAPlace(0)
exe = base.Executor(place)
exe.run(base.default_startup_program())
out = exe.run(feed={'X': data_np}, fetch_list=[y.name])
out = exe.run(main, feed={'X': data_np}, fetch_list=[y])


def logcumsumexp_wrapper(
Expand Down Expand Up @@ -296,6 +304,7 @@ def check_main(self, x_np, dtype, axis=None):
paddle.enable_static()
return y_np, x_g_np

@test_with_pir_api
def test_main(self):
if not paddle.is_compiled_with_cuda():
return
Expand Down