From 1ed7fc14155724ec33558b845181bca6db082935 Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Fri, 3 Nov 2023 09:31:40 +0000 Subject: [PATCH 01/11] first commit --- python/paddle/nn/functional/loss.py | 6 +++--- test/legacy_test/test_warpctc_op.py | 17 +++++++++++++---- test/legacy_test/test_warprnnt_op.py | 13 +++++++++++-- 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 5faba8b2f31310..185f1d04738027 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -22,7 +22,7 @@ from paddle.utils import deprecated from ...base.data_feeder import check_variable_and_dtype -from ...base.framework import _current_expected_place, in_pir_mode +from ...base.framework import _current_expected_place, in_pir_mode, in_dynamic_or_pir_mode from ...base.layer_helper import LayerHelper from ...common_ops_import import Variable from ...tensor.manipulation import reshape @@ -1889,7 +1889,7 @@ def warpctc( input_length=None, label_length=None, ): - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): if input_length is None or label_length is None: raise ValueError( "input_length and label_length must not be None in dygraph mode!" @@ -2013,7 +2013,7 @@ def rnnt_loss( def warprnnt( input, label, input_length, label_length, blank=0, fastemit_lambda=0.001 ): - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): loss_out = _C_ops.warprnnt( input, label, diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index cb60214bbc6d0a..f47bc1ed8fbee6 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -22,6 +22,7 @@ import paddle import paddle.nn.functional as F from paddle.base import Program, core, program_guard +from paddle.pir_utils import test_with_pir_api CUDA_BLOCK_SIZE = 32 @@ -274,7 +275,7 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir = True) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient @@ -284,6 +285,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.009, check_dygraph=False, + check_pir = True, ) else: self.check_grad( @@ -291,6 +293,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.007, check_dygraph=False, + check_pir = True, ) @@ -394,7 +397,7 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir = True) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient @@ -404,6 +407,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.009, check_dygraph=False, + check_pir = True, ) else: self.check_grad( @@ -411,6 +415,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.007, check_dygraph=False, + check_pir = True, ) @@ -516,14 +521,16 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir = True) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient - self.check_grad(["Logits"], "Loss") + self.check_grad(["Logits"], "Loss", check_pir = True) class TestWarpCTCOpError(unittest.TestCase): + + @test_with_pir_api def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -610,6 +617,7 @@ def test_dygraph_with_lod(): class TestCTCLossAPICase(unittest.TestCase): + @test_with_pir_api def test_class_api(self): self.batch_size = 3 self.num_classes = 15 @@ -660,6 +668,7 @@ def test_class_api(self): np.testing.assert_allclose(loss_pd, loss_np, rtol=1e-05, atol=1) def test_eager_ctcloss(self): + @test_with_pir_api def test_functinal_api(): self.batch_size = 4 self.num_classes = CUDA_BLOCK_SIZE + 2 diff --git a/test/legacy_test/test_warprnnt_op.py b/test/legacy_test/test_warprnnt_op.py index ced735b4310aba..4b97eb64204183 100644 --- a/test/legacy_test/test_warprnnt_op.py +++ b/test/legacy_test/test_warprnnt_op.py @@ -20,6 +20,7 @@ import paddle from paddle import _C_ops from paddle.base import Program, core, program_guard +from paddle.pir_utils import test_with_pir_api paddle.enable_static() @@ -227,7 +228,7 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir = True) def test_check_grad(self): self.outputs["warprnntgrad"] = self.gradient @@ -236,19 +237,21 @@ def test_check_grad(self): ["input"], "loss", numeric_grad_delta=0.009, + check_pir = True, ) else: self.check_grad( ["input"], "loss", numeric_grad_delta=0.009, + check_pir = True, ) class TestWarpRNNTFP64Op(TestWarpRNNTOp): def test_check_output(self): self.acts.astype(np.float64) - self.check_output() + self.check_output(check_pir = True) def test_check_grad(self): self.acts.astype(np.float64) @@ -258,16 +261,20 @@ def test_check_grad(self): ["input"], "loss", numeric_grad_delta=0.009, + check_pir = True, ) else: self.check_grad( ["input"], "loss", numeric_grad_delta=0.009, + check_pir = True, ) class TestWarpRNNTOpError(unittest.TestCase): + + @test_with_pir_api def test_errors(self): print("test_errors") with program_guard(Program(), Program()): @@ -450,6 +457,7 @@ def config(self): dtype=np.float64, ) + @test_with_pir_api def test_functinal_api(self): self.config() @@ -492,6 +500,7 @@ def test_functinal_api(self): ) np.testing.assert_allclose(loss_pd_sum, loss_np_sum, rtol=1e-05, atol=1) + @test_with_pir_api def test_class_api(self): self.config() From 200010b615689ed94c2a6f3d5b6172350de55b57 Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Sat, 4 Nov 2023 13:06:39 +0000 Subject: [PATCH 02/11] fix codestyle && rm TestWarpRNNTOpError.test_errors --- python/paddle/nn/functional/loss.py | 6 +++++- test/legacy_test/test_warpctc_op.py | 17 ++++++++--------- test/legacy_test/test_warprnnt_op.py | 14 ++++++-------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 185f1d04738027..286914dc9f513c 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -22,7 +22,11 @@ from paddle.utils import deprecated from ...base.data_feeder import check_variable_and_dtype -from ...base.framework import _current_expected_place, in_pir_mode, in_dynamic_or_pir_mode +from ...base.framework import ( + _current_expected_place, + in_dynamic_or_pir_mode, + in_pir_mode, +) from ...base.layer_helper import LayerHelper from ...common_ops_import import Variable from ...tensor.manipulation import reshape diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index f47bc1ed8fbee6..0f5553a29e1a77 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -275,7 +275,7 @@ def setUp(self): } def test_check_output(self): - self.check_output(check_pir = True) + self.check_output(check_pir=True) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient @@ -285,7 +285,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.009, check_dygraph=False, - check_pir = True, + check_pir=True, ) else: self.check_grad( @@ -293,7 +293,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.007, check_dygraph=False, - check_pir = True, + check_pir=True, ) @@ -397,7 +397,7 @@ def setUp(self): } def test_check_output(self): - self.check_output(check_pir = True) + self.check_output(check_pir=True) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient @@ -407,7 +407,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.009, check_dygraph=False, - check_pir = True, + check_pir=True, ) else: self.check_grad( @@ -415,7 +415,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.007, check_dygraph=False, - check_pir = True, + check_pir=True, ) @@ -521,15 +521,14 @@ def setUp(self): } def test_check_output(self): - self.check_output(check_pir = True) + self.check_output(check_pir=True) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient - self.check_grad(["Logits"], "Loss", check_pir = True) + self.check_grad(["Logits"], "Loss", check_pir=True) class TestWarpCTCOpError(unittest.TestCase): - @test_with_pir_api def test_errors(self): paddle.enable_static() diff --git a/test/legacy_test/test_warprnnt_op.py b/test/legacy_test/test_warprnnt_op.py index 4b97eb64204183..17ee6928c34eb6 100644 --- a/test/legacy_test/test_warprnnt_op.py +++ b/test/legacy_test/test_warprnnt_op.py @@ -228,7 +228,7 @@ def setUp(self): } def test_check_output(self): - self.check_output(check_pir = True) + self.check_output(check_pir=True) def test_check_grad(self): self.outputs["warprnntgrad"] = self.gradient @@ -237,21 +237,21 @@ def test_check_grad(self): ["input"], "loss", numeric_grad_delta=0.009, - check_pir = True, + check_pir=True, ) else: self.check_grad( ["input"], "loss", numeric_grad_delta=0.009, - check_pir = True, + check_pir=True, ) class TestWarpRNNTFP64Op(TestWarpRNNTOp): def test_check_output(self): self.acts.astype(np.float64) - self.check_output(check_pir = True) + self.check_output(check_pir=True) def test_check_grad(self): self.acts.astype(np.float64) @@ -261,20 +261,18 @@ def test_check_grad(self): ["input"], "loss", numeric_grad_delta=0.009, - check_pir = True, + check_pir=True, ) else: self.check_grad( ["input"], "loss", numeric_grad_delta=0.009, - check_pir = True, + check_pir=True, ) class TestWarpRNNTOpError(unittest.TestCase): - - @test_with_pir_api def test_errors(self): print("test_errors") with program_guard(Program(), Program()): From cc75327ceb69c0a49dc716a93e90ab0c7de71dcc Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Sun, 5 Nov 2023 17:30:50 +0000 Subject: [PATCH 03/11] rm test_with_pir_api of TestWarpCTCOpError.test_errors --- test/legacy_test/test_warpctc_op.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index 0f5553a29e1a77..07ff3cb2979594 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -21,7 +21,8 @@ import paddle import paddle.nn.functional as F -from paddle.base import Program, core, program_guard +from paddle import static +from paddle.base import core from paddle.pir_utils import test_with_pir_api CUDA_BLOCK_SIZE = 32 @@ -529,10 +530,13 @@ def test_check_grad(self): class TestWarpCTCOpError(unittest.TestCase): - @test_with_pir_api def test_errors(self): paddle.enable_static() - with program_guard(Program(), Program()): + main_program = static.Program() + startup_program = static.Program() + with static.program_guard( + main_program=main_program, startup_program=startup_program + ): logits = paddle.static.data( name='logits', shape=[5, 16, 6], dtype='float32' ) From 0bac4db62b4c7407cb16f14c0287386d18cf6b37 Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Tue, 14 Nov 2023 06:55:04 +0000 Subject: [PATCH 04/11] rm check_pir=True of TestWarpCTCOp.test_check_output|test_check_grad --- test/legacy_test/test_warpctc_op.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index 07ff3cb2979594..20365b2c1512d5 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -276,7 +276,7 @@ def setUp(self): } def test_check_output(self): - self.check_output(check_pir=True) + self.check_output() def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient @@ -286,7 +286,6 @@ def test_check_grad(self): "Loss", max_relative_error=0.009, check_dygraph=False, - check_pir=True, ) else: self.check_grad( @@ -294,7 +293,6 @@ def test_check_grad(self): "Loss", max_relative_error=0.007, check_dygraph=False, - check_pir=True, ) From dd215d381af1c6ebb6fef476b8a88bb73d5a34ba Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Tue, 14 Nov 2023 11:45:48 +0000 Subject: [PATCH 05/11] rm @test_with_pir_api --- test/legacy_test/test_warpctc_op.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index 20365b2c1512d5..0d7efd2149365c 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -669,7 +669,6 @@ def test_class_api(self): np.testing.assert_allclose(loss_pd, loss_np, rtol=1e-05, atol=1) def test_eager_ctcloss(self): - @test_with_pir_api def test_functinal_api(): self.batch_size = 4 self.num_classes = CUDA_BLOCK_SIZE + 2 From fe023e5ec55d6b2c0807f28f4034be9a70f7a42d Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Wed, 15 Nov 2023 02:41:42 +0000 Subject: [PATCH 06/11] rm test_with_pir_api of test_eager_ctcloss.test_functinal_api --- test/legacy_test/test_warpctc_op.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index 07ff3cb2979594..d2b59d91c29c91 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -671,8 +671,7 @@ def test_class_api(self): np.testing.assert_allclose(loss_pd, loss_np, rtol=1e-05, atol=1) def test_eager_ctcloss(self): - @test_with_pir_api - def test_functinal_api(): + def test_functional_api(): self.batch_size = 4 self.num_classes = CUDA_BLOCK_SIZE + 2 self.logits_length = np.array([4, 1, 3, 3], dtype=np.int64) @@ -742,7 +741,7 @@ def test_functinal_api(): loss_pd_sum, loss_np_sum, rtol=1e-05, atol=1 ) - test_functinal_api() + test_functional_api() if __name__ == "__main__": From 9d9180f3ed72cb73732966b594b747dc658a2c46 Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Wed, 15 Nov 2023 07:01:08 +0000 Subject: [PATCH 07/11] TestWarpRNNTFP64Op | TestWarpRNNTOp check_pir=False --- test/legacy_test/test_warpctc_op.py | 7 +++---- test/legacy_test/test_warprnnt_op.py | 4 ---- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index 88c8d2734bd40e..2378575c57b2e6 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -21,7 +21,6 @@ import paddle import paddle.nn.functional as F -from paddle import static from paddle.base import core from paddle.pir_utils import test_with_pir_api @@ -530,9 +529,9 @@ def test_check_grad(self): class TestWarpCTCOpError(unittest.TestCase): def test_errors(self): paddle.enable_static() - main_program = static.Program() - startup_program = static.Program() - with static.program_guard( + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard( main_program=main_program, startup_program=startup_program ): logits = paddle.static.data( diff --git a/test/legacy_test/test_warprnnt_op.py b/test/legacy_test/test_warprnnt_op.py index 17ee6928c34eb6..f75c2328309a9c 100644 --- a/test/legacy_test/test_warprnnt_op.py +++ b/test/legacy_test/test_warprnnt_op.py @@ -237,14 +237,12 @@ def test_check_grad(self): ["input"], "loss", numeric_grad_delta=0.009, - check_pir=True, ) else: self.check_grad( ["input"], "loss", numeric_grad_delta=0.009, - check_pir=True, ) @@ -261,14 +259,12 @@ def test_check_grad(self): ["input"], "loss", numeric_grad_delta=0.009, - check_pir=True, ) else: self.check_grad( ["input"], "loss", numeric_grad_delta=0.009, - check_pir=True, ) From 1c87f29e56b95e8403f6df39c681f449df4a777a Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Thu, 23 Nov 2023 12:53:41 +0000 Subject: [PATCH 08/11] rm test_with_pir_api --- test/legacy_test/test_warpctc_op.py | 2 -- test/legacy_test/test_warprnnt_op.py | 3 --- 2 files changed, 5 deletions(-) diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index 2378575c57b2e6..1d3f610b33ea76 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -22,7 +22,6 @@ import paddle import paddle.nn.functional as F from paddle.base import core -from paddle.pir_utils import test_with_pir_api CUDA_BLOCK_SIZE = 32 @@ -617,7 +616,6 @@ def test_dygraph_with_lod(): class TestCTCLossAPICase(unittest.TestCase): - @test_with_pir_api def test_class_api(self): self.batch_size = 3 self.num_classes = 15 diff --git a/test/legacy_test/test_warprnnt_op.py b/test/legacy_test/test_warprnnt_op.py index 51f835f92826c2..a108cbca51b994 100644 --- a/test/legacy_test/test_warprnnt_op.py +++ b/test/legacy_test/test_warprnnt_op.py @@ -20,7 +20,6 @@ import paddle from paddle import _C_ops from paddle.base import Program, core, program_guard -from paddle.pir_utils import test_with_pir_api paddle.enable_static() @@ -449,7 +448,6 @@ def config(self): dtype=np.float64, ) - @test_with_pir_api def test_functinal_api(self): self.config() @@ -492,7 +490,6 @@ def test_functinal_api(self): ) np.testing.assert_allclose(loss_pd_sum, loss_np_sum, rtol=1e-05, atol=1) - @test_with_pir_api def test_class_api(self): self.config() From 53fd9d5c915ca70e017319ecb8717f08c947d250 Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Thu, 23 Nov 2023 12:56:25 +0000 Subject: [PATCH 09/11] rm empty int32 --- python/paddle/tensor/linalg.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 08345581d86d74..091bde960bacb2 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -3500,13 +3500,7 @@ def lstsq(x, y, rcond=None, driver=None, name=None): x, y, rcond, driver ) if driver == "gels": - if in_dynamic_mode(): - rank = paddle.empty(shape=[0], dtype=paddle.int32) - - else: - rank = paddle.empty( - shape=[0], dtype=paddle.base.core.DataType.INT32 - ) + rank = paddle.empty(shape=[0], dtype="int32") singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) From a055ba94cbf9a6a68276bc4502d906b6493e1e15 Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Fri, 24 Nov 2023 02:42:39 +0000 Subject: [PATCH 10/11] add check_pir=True --- test/legacy_test/test_warpctc_op.py | 4 +++- test/legacy_test/test_warprnnt_op.py | 12 +++--------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index 1d3f610b33ea76..5cd31026c72062 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -274,7 +274,7 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient @@ -284,6 +284,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.009, check_dygraph=False, + check_pir=True, ) else: self.check_grad( @@ -291,6 +292,7 @@ def test_check_grad(self): "Loss", max_relative_error=0.007, check_dygraph=False, + check_pir=True, ) diff --git a/test/legacy_test/test_warprnnt_op.py b/test/legacy_test/test_warprnnt_op.py index a108cbca51b994..df50f510d6c8f3 100644 --- a/test/legacy_test/test_warprnnt_op.py +++ b/test/legacy_test/test_warprnnt_op.py @@ -233,9 +233,7 @@ def test_check_grad(self): self.outputs["warprnntgrad"] = self.gradient if core.is_compiled_with_rocm(): self.check_grad( - ["input"], - "loss", - numeric_grad_delta=0.009, + ["input"], "loss", numeric_grad_delta=0.009, check_pir=True ) else: self.check_grad( @@ -253,15 +251,11 @@ def test_check_grad(self): self.outputs["warprnntgrad"] = self.gradient if core.is_compiled_with_rocm(): self.check_grad( - ["input"], - "loss", - numeric_grad_delta=0.009, + ["input"], "loss", numeric_grad_delta=0.009, check_pir=True ) else: self.check_grad( - ["input"], - "loss", - numeric_grad_delta=0.009, + ["input"], "loss", numeric_grad_delta=0.009, check_pir=True ) From 8c96e50dc8b071df1fcb3dfc5f6dd1beb4de18c9 Mon Sep 17 00:00:00 2001 From: drryanhuang Date: Fri, 24 Nov 2023 15:23:52 +0000 Subject: [PATCH 11/11] rm check_pir=True beacause of tuple --- test/legacy_test/test_warpctc_op.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index 5cd31026c72062..1d3f610b33ea76 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -274,7 +274,7 @@ def setUp(self): } def test_check_output(self): - self.check_output(check_pir=True) + self.check_output() def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient @@ -284,7 +284,6 @@ def test_check_grad(self): "Loss", max_relative_error=0.009, check_dygraph=False, - check_pir=True, ) else: self.check_grad( @@ -292,7 +291,6 @@ def test_check_grad(self): "Loss", max_relative_error=0.007, check_dygraph=False, - check_pir=True, )