From 16ca6c7c8cf6a69b306d5e2a72fd8f28515846ca Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Mon, 14 Oct 2024 22:34:50 +0800 Subject: [PATCH 01/39] add is_inference --- paconvert/api_mapping.json | 14 ++++++++++- paconvert/api_matcher.py | 25 +++++++++++++++++++ tests/test_Tensor_is_inference.py | 41 +++++++++++++++++++++++++++++++ tests/test_is_inference.py | 41 +++++++++++++++++++++++++++++++ 4 files changed, 120 insertions(+), 1 deletion(-) create mode 100644 tests/test_Tensor_is_inference.py create mode 100644 tests/test_is_inference.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index a35ff8b29..c7dca808b 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -2088,7 +2088,19 @@ "paddle_api": "paddle.Tensor.is_floating_point", "min_input_args": 0 }, - "torch.Tensor.is_inference": {}, + "torch.Tensor.is_inference": { + "Matcher": "Is_InferenceMatcher", + "paddle_api": "paddle.Tensor.stop_gradient", + "min_input_args": 0 + }, + "torch.is_inference": { + "Matcher": "Is_InferenceMatcher", + "paddle_api": "paddle.Tensor.stop_gradient", + "min_input_args": 1, + "args_list":[ + "input" + ] + }, "torch.Tensor.is_pinned": { "Matcher": "Is_PinnedMatcher", "min_input_args": 0 diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 947b3bb8f..11d4e9ccb 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -753,6 +753,31 @@ def get_paddle_nodes(self, args, kwargs): return ast.parse(code).body +class Is_InferenceMatcher(BaseMatcher): + def generate_aux_code(self): + API_TEMPLATE = textwrap.dedent( + """ + def is_inference(input): + x = not input.stop_gradient + return x + """ + ) + + return API_TEMPLATE + + def generate_code(self, kwargs): + self.write_aux_code() + if len(kwargs) == 0: + kwargs = {"input": self.paddleClass} + API_TEMPLATE = textwrap.dedent( + """ + paddle_aux.is_inference({}) + """ + ) + code = API_TEMPLATE.format(kwargs["input"]) + return code + + class IInfoMatcher(BaseMatcher): def generate_code(self, kwargs): return "{}(dtype={})".format(self.get_paddle_api(), kwargs["type"]) diff --git a/tests/test_Tensor_is_inference.py b/tests/test_Tensor_is_inference.py new file mode 100644 index 000000000..13924f580 --- /dev/null +++ b/tests/test_Tensor_is_inference.py @@ -0,0 +1,41 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.Tensor.is_inference") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = x.is_inference() + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]).is_inference() + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_is_inference.py b/tests/test_is_inference.py new file mode 100644 index 000000000..9f2de957a --- /dev/null +++ b/tests/test_is_inference.py @@ -0,0 +1,41 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.is_inference") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = torch.is_inference(x) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = torch.is_inference(input = x) + """ + ) + obj.run(pytorch_code, ["result"]) From 91de87be07a851abb277c1653ba46436d8b11a45 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Mon, 14 Oct 2024 22:48:43 +0800 Subject: [PATCH 02/39] update --- paconvert/api_matcher.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 11d4e9ccb..85ba89a95 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -755,15 +755,14 @@ def get_paddle_nodes(self, args, kwargs): class Is_InferenceMatcher(BaseMatcher): def generate_aux_code(self): - API_TEMPLATE = textwrap.dedent( + CODE_TEMPLATE = textwrap.dedent( """ - def is_inference(input): - x = not input.stop_gradient - return x + def is_inference(x): + return not x.stop_gradient + """ ) - - return API_TEMPLATE + return CODE_TEMPLATE def generate_code(self, kwargs): self.write_aux_code() @@ -771,7 +770,7 @@ def generate_code(self, kwargs): kwargs = {"input": self.paddleClass} API_TEMPLATE = textwrap.dedent( """ - paddle_aux.is_inference({}) + paddle_aux.is_inference(x={}) """ ) code = API_TEMPLATE.format(kwargs["input"]) From e035d2d4193956bbba82efe5482a39895350e38b Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Mon, 14 Oct 2024 23:55:30 +0800 Subject: [PATCH 03/39] update --- paconvert/api_matcher.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 85ba89a95..5ecdcf241 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -757,11 +757,11 @@ class Is_InferenceMatcher(BaseMatcher): def generate_aux_code(self): CODE_TEMPLATE = textwrap.dedent( """ - def is_inference(x): + def is_inference_aux_func(x): return not x.stop_gradient - """ ) + return CODE_TEMPLATE def generate_code(self, kwargs): @@ -770,7 +770,7 @@ def generate_code(self, kwargs): kwargs = {"input": self.paddleClass} API_TEMPLATE = textwrap.dedent( """ - paddle_aux.is_inference(x={}) + paddle_aux.is_inference_aux_func(x={}) """ ) code = API_TEMPLATE.format(kwargs["input"]) From 29b37e131f6bab8fad4c99c7fb70308424ee620e Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 09:26:47 +0800 Subject: [PATCH 04/39] update --- paconvert/api_mapping.json | 2 -- paconvert/api_matcher.py | 18 ++++-------------- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index c7dca808b..9d92110d3 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -2090,12 +2090,10 @@ }, "torch.Tensor.is_inference": { "Matcher": "Is_InferenceMatcher", - "paddle_api": "paddle.Tensor.stop_gradient", "min_input_args": 0 }, "torch.is_inference": { "Matcher": "Is_InferenceMatcher", - "paddle_api": "paddle.Tensor.stop_gradient", "min_input_args": 1, "args_list":[ "input" diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 5ecdcf241..5227704c7 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -754,23 +754,13 @@ def get_paddle_nodes(self, args, kwargs): class Is_InferenceMatcher(BaseMatcher): - def generate_aux_code(self): - CODE_TEMPLATE = textwrap.dedent( - """ - def is_inference_aux_func(x): - return not x.stop_gradient - """ - ) - - return CODE_TEMPLATE - def generate_code(self, kwargs): - self.write_aux_code() - if len(kwargs) == 0: - kwargs = {"input": self.paddleClass} + if "input" not in kwargs: + kwargs["input"] = self.paddleClass API_TEMPLATE = textwrap.dedent( """ - paddle_aux.is_inference_aux_func(x={}) + x_inference = not {}.stop_gradient + x_inference """ ) code = API_TEMPLATE.format(kwargs["input"]) From 492a2e9b7867842c32aba67eec02125002f084d3 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 10:09:59 +0800 Subject: [PATCH 05/39] add geometric_ --- paconvert/api_mapping.json | 15 ++++++- tests/test_Tensor_geometric_.py | 74 +++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 tests/test_Tensor_geometric_.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 9d92110d3..584f4f023 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -1711,7 +1711,20 @@ "other": "y" } }, - "torch.Tensor.geometric_": {}, + "torch.Tensor.geometric_": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.Tensor.geometric_", + "min_input_args": 1, + "args_list": [ + "p", + "*", + "generator" + ], + "kwargs_change": { + "p": "probs", + "generator":"" + } + }, "torch.Tensor.geqrf": {}, "torch.Tensor.ger": { "Matcher": "GenericMatcher", diff --git a/tests/test_Tensor_geometric_.py b/tests/test_Tensor_geometric_.py new file mode 100644 index 000000000..f550e3b34 --- /dev/null +++ b/tests/test_Tensor_geometric_.py @@ -0,0 +1,74 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.Tensor.geometric_") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]).geometric_(0.5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.geometric_(0.5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.geometric_(p=0.5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.geometric_(p=0.5, generator=None) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.geometric_(generator=None, p=0.5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) From 8063b46b31e8b46d9ccf8338d5c27057efca6e29 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 10:23:27 +0800 Subject: [PATCH 06/39] add cauchy_ --- paconvert/api_mapping.json | 17 +++++++- tests/test_Tensor_cauchy_.py | 85 ++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 tests/test_Tensor_cauchy_.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 584f4f023..a621f262d 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -844,7 +844,22 @@ "memory_format" ] }, - "torch.Tensor.cauchy_": {}, + "torch.Tensor.cauchy_": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.Tensor.cauchy_", + "min_input_args": 0, + "args_list": [ + "median", + "sigma", + "*", + "generator" + ], + "kwargs_change": { + "median": "loc", + "sigma":"scale", + "generator":"" + } + }, "torch.Tensor.cdouble": { "Matcher": "TensorCdoubleMatcher", "paddle_api": "paddle.Tensor.astype", diff --git a/tests/test_Tensor_cauchy_.py b/tests/test_Tensor_cauchy_.py new file mode 100644 index 000000000..3fcccd75b --- /dev/null +++ b/tests/test_Tensor_cauchy_.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.Tensor.cauchy_") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]).cauchy_() + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.cauchy_() + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.cauchy_(median=0) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.cauchy_(median=0, sigma=1) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.cauchy_(median=0, sigma=1, generator=None) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.cauchy_(median=0, generator=None, sigma=1) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) From 702f3b3630b9ff8ab12c86075d93d7547fa693c7 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 10:49:37 +0800 Subject: [PATCH 07/39] add random_ --- paconvert/api_mapping.json | 17 ++++++++- tests/test_Tensor_random_.py | 74 ++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 tests/test_Tensor_random_.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index a621f262d..598c654ca 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -3204,7 +3204,22 @@ "Matcher": "UnchangeMatcher", "min_input_args": 0 }, - "torch.Tensor.random_": {}, + "torch.Tensor.random_": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.Tensor.uniform_", + "min_input_args": 0, + "args_list": [ + "from", + "to", + "*", + "generator" + ], + "kwargs_change": { + "from": "min", + "to": "max", + "generator": "" + } + }, "torch.Tensor.ravel": { "Matcher": "GenericMatcher", "paddle_api": "paddle.Tensor.flatten", diff --git a/tests/test_Tensor_random_.py b/tests/test_Tensor_random_.py new file mode 100644 index 000000000..2350d9443 --- /dev/null +++ b/tests/test_Tensor_random_.py @@ -0,0 +1,74 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.Tensor.random_") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]).random_(0, 5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.random_(0, 5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.random_(0, to=5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + result = input.random_(from=0, to=5, generator=None) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([1, 2, 3]) + result = input.random_(from=0, generator=None, to=5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) From 97acd4bc85288bfce0d474ebfab7fe5d8860aad6 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 11:06:20 +0800 Subject: [PATCH 08/39] update --- tests/test_Tensor_random_.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_Tensor_random_.py b/tests/test_Tensor_random_.py index 2350d9443..cd9dd9d35 100644 --- a/tests/test_Tensor_random_.py +++ b/tests/test_Tensor_random_.py @@ -57,7 +57,7 @@ def test_case_4(): """ import torch input = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]) - result = input.random_(from=0, to=5, generator=None) + result = input.random_(0, to=5, generator=None) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -68,7 +68,7 @@ def test_case_5(): """ import torch input = torch.tensor([1, 2, 3]) - result = input.random_(from=0, generator=None, to=5) + result = input.random_(0, generator=None, to=5) """ ) obj.run(pytorch_code, ["result"], check_value=False) From cc89b2696b1d4a417a336d4c1973461963932c36 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 12:22:10 +0800 Subject: [PATCH 09/39] update --- tests/test_Tensor_random_.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/tests/test_Tensor_random_.py b/tests/test_Tensor_random_.py index cd9dd9d35..dc706b9c5 100644 --- a/tests/test_Tensor_random_.py +++ b/tests/test_Tensor_random_.py @@ -61,14 +61,3 @@ def test_case_4(): """ ) obj.run(pytorch_code, ["result"], check_value=False) - - -def test_case_5(): - pytorch_code = textwrap.dedent( - """ - import torch - input = torch.tensor([1, 2, 3]) - result = input.random_(0, generator=None, to=5) - """ - ) - obj.run(pytorch_code, ["result"], check_value=False) From 05cf0bebe9283557112b6526c89ea43f46f0d0e2 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 12:38:29 +0800 Subject: [PATCH 10/39] add chi2 --- paconvert/api_mapping.json | 12 +++++ tests/test_distributions_chi2_Chi2.py | 75 +++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 tests/test_distributions_chi2_Chi2.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 598c654ca..a1aa58768 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6484,6 +6484,18 @@ "total_count": "1" } }, + "torch.distributions.chi2.Chi2":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.distribution.Chi2", + "min_input_args": 1, + "args_list": [ + "df", + "validate_args" + ], + "kwargs_change": { + "validate_args": "" + } + }, "torch.distributions.Categorical": { "Matcher": "GenericMatcher", "paddle_api": "paddle.distribution.Categorical", diff --git a/tests/test_distributions_chi2_Chi2.py b/tests/test_distributions_chi2_Chi2.py new file mode 100644 index 000000000..66957ee5c --- /dev/null +++ b/tests/test_distributions_chi2_Chi2.py @@ -0,0 +1,75 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.distributions.chi2.Chi2") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.chi2.Chi2(x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.chi2.Chi2(df=x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.chi2.Chi2(df=x, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.chi2.Chi2(validate_args=None, df=x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) From a972b1c1540805aa4a27f8ab28ce4496c38b4254 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 12:51:00 +0800 Subject: [PATCH 11/39] add Constraint --- paconvert/api_mapping.json | 5 ++++ paconvert/api_matcher.py | 24 ++++++++++++++++ ...st_distributions_constraints_Constraint.py | 28 +++++++++++++++---- 3 files changed, 52 insertions(+), 5 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index a1aa58768..c9f034a76 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6538,6 +6538,11 @@ "cache_size": "" } }, + "torch.distributions.constraints.Constraint" : { + "Matcher": "DistributionsConstrainMatcher", + "paddle_api": "paddle.distribution.constraint.Constraint", + "abstract": true + }, "torch.distributions.ContinuousBernoulli": { "Matcher": "GenericMatcher", "paddle_api": "paddle.distribution.ContinuousBernoulli", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 5227704c7..77d8c6dce 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -767,6 +767,30 @@ def generate_code(self, kwargs): return code +class DistributionsConstrainMatcher(BaseMatcher): + def generate_aux_code(self): + API_TEMPLATE = textwrap.dedent( + """ + import paddle + def Distributions_Constraint(): + class DistributionsConstrain: + def check(self, value): + return paddle.distribution.constraint.Constraint()(value) + return DistributionsConstrain() + """ + ) + + return API_TEMPLATE + def generate_code(self, kwargs): + self.write_aux_code() + API_TEMPLATE = textwrap.dedent( + """ + paddle_aux.Distributions_Constraint() + """ + ) + return API_TEMPLATE + + class IInfoMatcher(BaseMatcher): def generate_code(self, kwargs): return "{}(dtype={})".format(self.get_paddle_api(), kwargs["type"]) diff --git a/tests/test_distributions_constraints_Constraint.py b/tests/test_distributions_constraints_Constraint.py index 683ecce39..f4b88ab5f 100644 --- a/tests/test_distributions_constraints_Constraint.py +++ b/tests/test_distributions_constraints_Constraint.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,12 +23,30 @@ def test_case_1(): pytorch_code = textwrap.dedent( """ import torch - result = torch.distributions.constraints.Constraint() + try: + result = torch.distributions.constraints.Constraint().check(1) + except NotImplementedError: + result = torch.tensor(1) """ ) obj.run( pytorch_code, - ["result"], - unsupport=True, - reason="paddle does not support this function temporarily", + ["result"] + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + try: + con = torch.distributions.constraints.Constraint() + result = con.check(value=1) + except NotImplementedError: + result = torch.tensor(1) + """ + ) + obj.run( + pytorch_code, + ["result"] ) From f4aa1d1f040064ed90080baa26c31024cadbe330 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 13:06:58 +0800 Subject: [PATCH 12/39] add Gamma --- paconvert/api_mapping.json | 13 +++++ tests/test_distributions_gamma_Gamma.py | 75 +++++++++++++++++++++++++ 2 files changed, 88 insertions(+) create mode 100644 tests/test_distributions_gamma_Gamma.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index c9f034a76..6ab34cb2b 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6543,6 +6543,19 @@ "paddle_api": "paddle.distribution.constraint.Constraint", "abstract": true }, + "torch.distributions.gamma.Gamma":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.distribution.Gamma", + "min_input_args": 2, + "args_list": [ + "concentration", + "rate", + "validate_args" + ], + "kwargs_change": { + "validate_args": "" + } + }, "torch.distributions.ContinuousBernoulli": { "Matcher": "GenericMatcher", "paddle_api": "paddle.distribution.ContinuousBernoulli", diff --git a/tests/test_distributions_gamma_Gamma.py b/tests/test_distributions_gamma_Gamma.py new file mode 100644 index 000000000..d1466a13a --- /dev/null +++ b/tests/test_distributions_gamma_Gamma.py @@ -0,0 +1,75 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.distributions.gamma.Gamma") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.gamma.Gamma(x, x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.gamma.Gamma(concentration=x, rate=x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.gamma.Gamma(concentration=x, rate=x, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.gamma.Gamma(rate=x, concentration=x, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) From f2e7ddbaf5b2105172e2b0932fa73dfb679dc542 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 13:44:43 +0800 Subject: [PATCH 13/39] update --- paconvert/api_mapping.json | 2 +- paconvert/api_matcher.py | 33 +++++++++++++++++++++++++++------ 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 6ab34cb2b..a09cc70ef 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -2117,7 +2117,7 @@ "min_input_args": 0 }, "torch.Tensor.is_inference": { - "Matcher": "Is_InferenceMatcher", + "Matcher": "TensorIs_InferenceMatcher", "min_input_args": 0 }, "torch.is_inference": { diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 77d8c6dce..b52bc650e 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -753,17 +753,38 @@ def get_paddle_nodes(self, args, kwargs): return ast.parse(code).body -class Is_InferenceMatcher(BaseMatcher): +class TensorIs_InferenceMatcher(BaseMatcher): + def generate_aux_code(self): + API_TEMPLATE = textwrap.dedent( + """ + def Is_Inference(x): + is_inference = not x.stop_gradient + return is_inference + """ + ) + + return API_TEMPLATE def generate_code(self, kwargs): - if "input" not in kwargs: - kwargs["input"] = self.paddleClass + self.write_aux_code() + code = "paddle_aux.Is_Inference(x={})".format(self.paddleClass) + return code + + +class Is_InferenceMatcher(BaseMatcher): + def generate_aux_code(self): API_TEMPLATE = textwrap.dedent( """ - x_inference = not {}.stop_gradient - x_inference + def Is_Inference(x): + is_inference = not x.stop_gradient + return is_inference """ ) - code = API_TEMPLATE.format(kwargs["input"]) + + return API_TEMPLATE + def generate_code(self, kwargs): + self.write_aux_code() + perm = get_unique_name("perm") + code = "paddle_aux.Is_Inference(x={})".format(kwargs["input"]) return code From 329b4b216ba92ccd72aad7e69918ef2d04ab9231 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 14:06:19 +0800 Subject: [PATCH 14/39] add Poisson LKJCholesky --- paconvert/api_mapping.json | 25 +++++++ paconvert/api_matcher.py | 4 +- ..._distributions_lkj_cholesky_LKJCholesky.py | 75 +++++++++++++++++++ tests/test_distributions_poisson_Poisson.py | 75 +++++++++++++++++++ 4 files changed, 177 insertions(+), 2 deletions(-) create mode 100644 tests/test_distributions_lkj_cholesky_LKJCholesky.py create mode 100644 tests/test_distributions_poisson_Poisson.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index a09cc70ef..8afcb4b28 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6434,6 +6434,31 @@ }, "min_input_args": 2 }, + "torch.distributions.lkj_cholesky.LKJCholesky":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.distribution.LKJCholesky", + "min_input_args": 1, + "args_list": [ + "dim", + "concentration", + "validate_args" + ], + "kwargs_change": { + "validate_args": "" + } + }, + "torch.distributions.poisson.Poisson":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.distribution.Poisson", + "min_input_args": 1, + "args_list": [ + "rate", + "validate_args" + ], + "kwargs_change": { + "validate_args": "" + } + }, "torch.distributions.Bernoulli": { "Matcher": "GenericMatcher", "paddle_api": "paddle.distribution.Bernoulli", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index b52bc650e..09fdecb3d 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -757,7 +757,7 @@ class TensorIs_InferenceMatcher(BaseMatcher): def generate_aux_code(self): API_TEMPLATE = textwrap.dedent( """ - def Is_Inference(x): + def Tensor_Is_Inference(x): is_inference = not x.stop_gradient return is_inference """ @@ -766,7 +766,7 @@ def Is_Inference(x): return API_TEMPLATE def generate_code(self, kwargs): self.write_aux_code() - code = "paddle_aux.Is_Inference(x={})".format(self.paddleClass) + code = "paddle_aux.Tensor_Is_Inference(x={})".format(self.paddleClass) return code diff --git a/tests/test_distributions_lkj_cholesky_LKJCholesky.py b/tests/test_distributions_lkj_cholesky_LKJCholesky.py new file mode 100644 index 000000000..72e64013a --- /dev/null +++ b/tests/test_distributions_lkj_cholesky_LKJCholesky.py @@ -0,0 +1,75 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.distributions.lkj_cholesky.LKJCholesky") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.lkj_cholesky.LKJCholesky(3, x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.lkj_cholesky.LKJCholesky(dim=3, concentration=x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.lkj_cholesky.LKJCholesky(dim=3, concentration=x, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.lkj_cholesky.LKJCholesky(concentration=x, dim=3, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) diff --git a/tests/test_distributions_poisson_Poisson.py b/tests/test_distributions_poisson_Poisson.py new file mode 100644 index 000000000..10d6ba91d --- /dev/null +++ b/tests/test_distributions_poisson_Poisson.py @@ -0,0 +1,75 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.distributions.poisson.Poisson") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.poisson.Poisson(x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.poisson.Poisson(rate=x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.poisson.Poisson(rate=x, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.poisson.Poisson(validate_args=None, rate=x, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) \ No newline at end of file From 870d76db2ca395465f2a569ccefd4b41c74267ca Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 14:42:57 +0800 Subject: [PATCH 15/39] update --- paconvert/api_mapping.json | 7 ++--- paconvert/api_matcher.py | 30 +++++++++++++++++++++ tests/test_distributions_poisson_Poisson.py | 2 +- 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 8afcb4b28..a604a9ca9 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6435,17 +6435,14 @@ "min_input_args": 2 }, "torch.distributions.lkj_cholesky.LKJCholesky":{ - "Matcher": "GenericMatcher", + "Matcher": "LKJCholeskyMatcher", "paddle_api": "paddle.distribution.LKJCholesky", "min_input_args": 1, "args_list": [ "dim", "concentration", "validate_args" - ], - "kwargs_change": { - "validate_args": "" - } + ] }, "torch.distributions.poisson.Poisson":{ "Matcher": "GenericMatcher", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 09fdecb3d..955a3ea01 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -770,6 +770,36 @@ def generate_code(self, kwargs): return code +class LKJCholeskyMatcher(BaseMatcher): + def generate_aux_code(self): + API_TEMPLATE = textwrap.dedent( + """ + def LKJCholesky_Aux_Func(dim, concentration, sample_method='onion'): + class LKJCholesky_Aux_Class: + def __init__(self, dim, concentration, sample_method='onion'): + self.lkj = paddle.distribution.LKJCholesky(dim, concentration, sample_method) + def sample(self): + return paddle.unsqueeze(self.lkj.sample(), axis=0) + return LKJCholesky_Aux_Class(dim, concentration, sample_method) + """ + ) + + return API_TEMPLATE + def generate_code(self, kwargs): + self.write_aux_code() + if "validate_args" in kwargs: + del kwargs["validate_args"] + kwargs = self.kwargs_to_str(kwargs) + API_TEMPLATE = textwrap.dedent( + """ + paddle_aux.LKJCholesky_Aux_Func({}) + """ + ) + code = API_TEMPLATE.format(kwargs) + return code + + + class Is_InferenceMatcher(BaseMatcher): def generate_aux_code(self): API_TEMPLATE = textwrap.dedent( diff --git a/tests/test_distributions_poisson_Poisson.py b/tests/test_distributions_poisson_Poisson.py index 10d6ba91d..b11597f55 100644 --- a/tests/test_distributions_poisson_Poisson.py +++ b/tests/test_distributions_poisson_Poisson.py @@ -66,7 +66,7 @@ def test_case_4(): """ import torch x = torch.tensor([1.0]) - result = torch.distributions.poisson.Poisson(validate_args=None, rate=x, validate_args=None).sample() + result = torch.distributions.poisson.Poisson(validate_args=None, rate=x).sample() """ ) obj.run( From 411edfd1e035a22cc68fd3a28110a94243721d38 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 15 Oct 2024 15:06:10 +0800 Subject: [PATCH 16/39] add StudentT PositiveDefiniteTransform --- paconvert/api_mapping.json | 24 ++++++ paconvert/api_matcher.py | 68 +++++++++++++++++ tests/test_distributions_studentT_StudentT.py | 75 +++++++++++++++++++ ...ns_transforms_PositiveDefiniteTransform.py | 50 +++++++++++++ 4 files changed, 217 insertions(+) create mode 100644 tests/test_distributions_studentT_StudentT.py create mode 100644 tests/test_distributions_transforms_PositiveDefiniteTransform.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index a604a9ca9..538d0494c 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6444,6 +6444,30 @@ "validate_args" ] }, + "torch.distributions.studentT.StudentT":{ + "Matcher": "StudentTMatcher", + "paddle_api": "paddle.distribution.StudentT", + "min_input_args": 1, + "args_list": [ + "df", + "loc", + "scale", + "validate_args" + ], + "kwargs_change": { + "validate_args": "" + } + }, + "torch.distributions.transforms.PositiveDefiniteTransform":{ + "Matcher": "TransformsPositiveDefiniteTransformMatcher", + "min_input_args": 0, + "args_list": [ + "cache_size" + ], + "kwargs_change": { + "cache_size": "" + } + }, "torch.distributions.poisson.Poisson":{ "Matcher": "GenericMatcher", "paddle_api": "paddle.distribution.Poisson", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 955a3ea01..5698c7f5b 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -768,12 +768,80 @@ def generate_code(self, kwargs): self.write_aux_code() code = "paddle_aux.Tensor_Is_Inference(x={})".format(self.paddleClass) return code + + +class StudentTMatcher(BaseMatcher): + def generate_aux_code(self): + API_TEMPLATE = textwrap.dedent( + """ + import paddle + def StudentT_Aux_Func(df, loc, scale): + class StudentT_Aux_Class: + def __init__(self, df, loc, scale): + self.df = df + self.loc = paddle.to_tensor(loc) + self.scale = paddle.to_tensor(scale) + self.sT = paddle.distribution.StudentT(self.df, self.loc, self.scale) + def sample(self): + return paddle.reshape(self.sT.sample(), self.df.shape) + return StudentT_Aux_Class(df, loc, scale) + """ + ) + + return API_TEMPLATE + def generate_code(self, kwargs): + self.write_aux_code() + if "validate_args" in kwargs: + del kwargs["validate_args"] + if "loc" not in kwargs: + kwargs["loc"] = 0.1 + if "scale" not in kwargs: + kwargs["scale"] = 1.0 + kwargs = self.kwargs_to_str(kwargs) + API_TEMPLATE = textwrap.dedent( + """ + paddle_aux.StudentT_Aux_Func({}) + """ + ) + code = API_TEMPLATE.format(kwargs) + return code + + +class TransformsPositiveDefiniteTransformMatcher(BaseMatcher): + def generate_aux_code(self): + API_TEMPLATE = textwrap.dedent( + """ + import paddle + class TransformsPositiveDefiniteTransform: + def __call__(self, x): + x = x.tril(-1) + x.diagonal(axis1=-2, axis2=-1).exp().diag_embed() + shape_list = list(range(x.ndim)) + shape_list[-1], shape_list[-2] = shape_list[-2], shape_list[-1] + y = x.transpose(perm=shape_list) + return x @ y + + def inv(self, y): + y = paddle.linalg.cholesky(y) + return y.tril(-1) + y.diagonal(axis1=-2, axis2=-1).log().diag_embed() + """ + ) + + return API_TEMPLATE + def generate_code(self, kwargs): + self.write_aux_code() + API_TEMPLATE = textwrap.dedent( + """ + paddle_aux.TransformsPositiveDefiniteTransform() + """ + ) + return API_TEMPLATE class LKJCholeskyMatcher(BaseMatcher): def generate_aux_code(self): API_TEMPLATE = textwrap.dedent( """ + import paddle def LKJCholesky_Aux_Func(dim, concentration, sample_method='onion'): class LKJCholesky_Aux_Class: def __init__(self, dim, concentration, sample_method='onion'): diff --git a/tests/test_distributions_studentT_StudentT.py b/tests/test_distributions_studentT_StudentT.py new file mode 100644 index 000000000..c57473fe9 --- /dev/null +++ b/tests/test_distributions_studentT_StudentT.py @@ -0,0 +1,75 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.distributions.studentT.StudentT") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.studentT.StudentT(x).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.studentT.StudentT(df=x, loc=0.1, scale=1.0).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.studentT.StudentT(df=x, loc=0.1, scale=1.0, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([1.0]) + result = torch.distributions.studentT.StudentT(scale=1.0, loc=0.1, df=x, validate_args=None).sample() + """ + ) + obj.run( + pytorch_code, + ["result"], check_value=False + ) \ No newline at end of file diff --git a/tests/test_distributions_transforms_PositiveDefiniteTransform.py b/tests/test_distributions_transforms_PositiveDefiniteTransform.py new file mode 100644 index 000000000..5329d048e --- /dev/null +++ b/tests/test_distributions_transforms_PositiveDefiniteTransform.py @@ -0,0 +1,50 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.distributions.transforms.PositiveDefiniteTransform") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([[1.0, 0.0], [2.0, 1.0]]) + a = torch.distributions.transforms.PositiveDefiniteTransform() + result = a(x) + result_inv = a.inv(result) + """ + ) + obj.run( + pytorch_code, + ["result", "result_inv"] + ) + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([[1.0, 0.0], [2.0, 1.0]]) + a = torch.distributions.transforms.PositiveDefiniteTransform(cache_size=0) + result = a(x) + result_inv = a.inv(result) + """ + ) + obj.run( + pytorch_code, + ["result", "result_inv"] + ) \ No newline at end of file From 54a14efaa5fdd37bc77028c3acc420835e4a50b3 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 12:09:34 +0800 Subject: [PATCH 17/39] update --- tests/test_distributions_constraints_Constraint.py | 2 +- tests/test_distributions_lkj_cholesky_LKJCholesky.py | 2 +- tests/test_distributions_studentT_StudentT.py | 2 +- .../test_distributions_transforms_PositiveDefiniteTransform.py | 2 +- tests/test_is_inference.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_distributions_constraints_Constraint.py b/tests/test_distributions_constraints_Constraint.py index f4b88ab5f..5d52a9f35 100644 --- a/tests/test_distributions_constraints_Constraint.py +++ b/tests/test_distributions_constraints_Constraint.py @@ -16,7 +16,7 @@ from apibase import APIBase -obj = APIBase("torch.distributions.constraints.Constraint") +obj = APIBase("torch.distributions.constraints.Constraint", is_aux_api=True) def test_case_1(): diff --git a/tests/test_distributions_lkj_cholesky_LKJCholesky.py b/tests/test_distributions_lkj_cholesky_LKJCholesky.py index 72e64013a..29ec6c09b 100644 --- a/tests/test_distributions_lkj_cholesky_LKJCholesky.py +++ b/tests/test_distributions_lkj_cholesky_LKJCholesky.py @@ -16,7 +16,7 @@ from apibase import APIBase -obj = APIBase("torch.distributions.lkj_cholesky.LKJCholesky") +obj = APIBase("torch.distributions.lkj_cholesky.LKJCholesky", is_aux_api=True) def test_case_1(): diff --git a/tests/test_distributions_studentT_StudentT.py b/tests/test_distributions_studentT_StudentT.py index c57473fe9..67dad5a76 100644 --- a/tests/test_distributions_studentT_StudentT.py +++ b/tests/test_distributions_studentT_StudentT.py @@ -16,7 +16,7 @@ from apibase import APIBase -obj = APIBase("torch.distributions.studentT.StudentT") +obj = APIBase("torch.distributions.studentT.StudentT", is_aux_api=True) def test_case_1(): diff --git a/tests/test_distributions_transforms_PositiveDefiniteTransform.py b/tests/test_distributions_transforms_PositiveDefiniteTransform.py index 5329d048e..27054768a 100644 --- a/tests/test_distributions_transforms_PositiveDefiniteTransform.py +++ b/tests/test_distributions_transforms_PositiveDefiniteTransform.py @@ -16,7 +16,7 @@ from apibase import APIBase -obj = APIBase("torch.distributions.transforms.PositiveDefiniteTransform") +obj = APIBase("torch.distributions.transforms.PositiveDefiniteTransform", is_aux_api=True) def test_case_1(): diff --git a/tests/test_is_inference.py b/tests/test_is_inference.py index 9f2de957a..74981183d 100644 --- a/tests/test_is_inference.py +++ b/tests/test_is_inference.py @@ -17,7 +17,7 @@ from apibase import APIBase -obj = APIBase("torch.is_inference") +obj = APIBase("torch.is_inference", is_aux_api=True) def test_case_1(): From 9d1787da718c71ed5bc026f4f5ff7edf334d8a5a Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 12:22:09 +0800 Subject: [PATCH 18/39] update --- paconvert/api_mapping.json | 2 +- paconvert/api_matcher.py | 22 +++------------------- tests/test_Tensor_is_inference.py | 2 +- 3 files changed, 5 insertions(+), 21 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 538d0494c..e045019a3 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -2117,7 +2117,7 @@ "min_input_args": 0 }, "torch.Tensor.is_inference": { - "Matcher": "TensorIs_InferenceMatcher", + "Matcher": "Is_InferenceMatcher", "min_input_args": 0 }, "torch.is_inference": { diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 5698c7f5b..780d00ca4 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -751,24 +751,7 @@ def get_paddle_nodes(self, args, kwargs): for i in range(1, len(new_args)): code = "{}({}, {})".format(self.get_paddle_api(), code, new_args[i]) return ast.parse(code).body - - -class TensorIs_InferenceMatcher(BaseMatcher): - def generate_aux_code(self): - API_TEMPLATE = textwrap.dedent( - """ - def Tensor_Is_Inference(x): - is_inference = not x.stop_gradient - return is_inference - """ - ) - - return API_TEMPLATE - def generate_code(self, kwargs): - self.write_aux_code() - code = "paddle_aux.Tensor_Is_Inference(x={})".format(self.paddleClass) - return code - + class StudentTMatcher(BaseMatcher): def generate_aux_code(self): @@ -881,7 +864,8 @@ def Is_Inference(x): return API_TEMPLATE def generate_code(self, kwargs): self.write_aux_code() - perm = get_unique_name("perm") + if "input" not in kwargs: + kwargs["input"] = self.paddleClass code = "paddle_aux.Is_Inference(x={})".format(kwargs["input"]) return code diff --git a/tests/test_Tensor_is_inference.py b/tests/test_Tensor_is_inference.py index 13924f580..73c2ad767 100644 --- a/tests/test_Tensor_is_inference.py +++ b/tests/test_Tensor_is_inference.py @@ -17,7 +17,7 @@ from apibase import APIBase -obj = APIBase("torch.Tensor.is_inference") +obj = APIBase("torch.Tensor.is_inference", is_aux_api=True) def test_case_1(): From 92da59ffb9c1fb9c7c9584cef84051ef4473ba1a Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 12:43:17 +0800 Subject: [PATCH 19/39] add remote --- paconvert/api_mapping.json | 12 +++ paconvert/api_matcher.py | 39 ++++++++++ tests/test_distributed_rpc_remote.py | 109 +++++++++++++++++++++++++++ 3 files changed, 160 insertions(+) create mode 100644 tests/test_distributed_rpc_remote.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index e045019a3..15c55b3ce 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6351,6 +6351,18 @@ ], "min_input_args": 1 }, + "torch.distributed.rpc.remote":{ + "Matcher": "RpcRemoteMatcher", + "paddle_api": "paddle.distributed.rpc.rpc_async", + "min_input_args": 2, + "args_list": [ + "to", + "func", + "args", + "kwargs", + "timeout" + ] + }, "torch.distributed.rpc.shutdown": { "Matcher": "GenericMatcher", "paddle_api": "paddle.distributed.rpc.shutdown", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 780d00ca4..1b3010175 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -5208,6 +5208,45 @@ def generate_code(self, kwargs): return code +class RpcRemoteMatcher(BaseMatcher): + def generate_aux_code(self): + CODE_TEMPLATE = textwrap.dedent( + """ + import paddle + import paddle.distributed.rpc as rpc + class rpc_remote: + def __init__(to, func, args=None, kwargs=None, timeout=-1): + self.remote = rpc.rpc_async(to=to, fn=func, args=args, kwargs=kwargs, timeout=timeout) + + def to_here(): + return self.remote.wait() + """ + ) + return CODE_TEMPLATE + + def generate_code(self, kwargs): + self.write_aux_code() + if "args" not in kwargs.keys(): + kwargs["args"] = None + if "kwargs" not in kwargs.keys(): + kwargs["kwargs"] = None + if "timeout" not in kwargs.keys(): + kwargs["timeout"] = -1 + API_TEMPLATE = textwrap.dedent( + """ + paddle_aux.rpc_remote(to={}, func={}, args={}, kwargs={}, timeout={}) + """ + ) + code = API_TEMPLATE.format( + kwargs["to"], + kwargs["func"], + kwargs["args"], + kwargs["kwargs"], + kwargs["timeout"] + ) + return code + + class GetNumThreadsMatcher(BaseMatcher): def generate_code(self, kwargs): API_TEMPLATE = textwrap.dedent( diff --git a/tests/test_distributed_rpc_remote.py b/tests/test_distributed_rpc_remote.py new file mode 100644 index 000000000..588a5cbbb --- /dev/null +++ b/tests/test_distributed_rpc_remote.py @@ -0,0 +1,109 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.distributed.rpc.remote") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import os + import torch + from torch.distributed import rpc + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '29500' + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + rpc.init_rpc( + "worker1", + rank=0, + world_size=1 + ) + r = rpc.remote( + "worker1", + torch.add, + args=(torch.tensor(2), torch.tensor(3)) + ) + result = r.to_here() + rpc.shutdown() + """ + ) + obj.run( + pytorch_code, + ["result"] + ) + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import os + import torch + from torch.distributed import rpc + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '29500' + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + rpc.init_rpc( + "worker1", + rank=0, + world_size=1 + ) + r = rpc.remote( + to="worker1", + func=torch.add, + args=(torch.tensor(2), torch.tensor(3)), + kwargs=None, + timeout=-1 + ) + result = r.to_here() + rpc.shutdown() + """ + ) + obj.run( + pytorch_code, + ["result"] + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import os + import torch + from torch.distributed import rpc + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '29500' + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + rpc.init_rpc( + "worker1", + rank=0, + world_size=1 + ) + r = rpc.remote( + to="worker1", + func=torch.add, + args=(torch.tensor(2), torch.tensor(3)), + timeout=-1, + kwargs=None + ) + result = r.to_here() + rpc.shutdown() + """ + ) + obj.run( + pytorch_code, + ["result"] + ) From 271be5dc4b151eed0101849ff904a7bbd7013abb Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 12:43:25 +0800 Subject: [PATCH 20/39] add remote --- tests/test_distributed_rpc_remote.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_distributed_rpc_remote.py b/tests/test_distributed_rpc_remote.py index 588a5cbbb..780863925 100644 --- a/tests/test_distributed_rpc_remote.py +++ b/tests/test_distributed_rpc_remote.py @@ -16,7 +16,7 @@ from apibase import APIBase -obj = APIBase("torch.distributed.rpc.remote") +obj = APIBase("torch.distributed.rpc.remote", is_aux_api=True) def test_case_1(): From 60b6724025d281734b08c0948cda1a87a37a08d8 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 12:58:46 +0800 Subject: [PATCH 21/39] add remote --- tests/test_distributed_rpc_remote.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/test_distributed_rpc_remote.py b/tests/test_distributed_rpc_remote.py index 780863925..fa8eebe4a 100644 --- a/tests/test_distributed_rpc_remote.py +++ b/tests/test_distributed_rpc_remote.py @@ -39,7 +39,6 @@ def test_case_1(): args=(torch.tensor(2), torch.tensor(3)) ) result = r.to_here() - rpc.shutdown() """ ) obj.run( @@ -69,7 +68,6 @@ def test_case_2(): timeout=-1 ) result = r.to_here() - rpc.shutdown() """ ) obj.run( @@ -78,7 +76,7 @@ def test_case_2(): ) -def test_case_2(): +def test_case_3(): pytorch_code = textwrap.dedent( """ import os @@ -100,7 +98,6 @@ def test_case_2(): kwargs=None ) result = r.to_here() - rpc.shutdown() """ ) obj.run( From 3077803cc22cc2f1420931968a50d7f53810fb4f Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 18:08:35 +0800 Subject: [PATCH 22/39] update --- tests/test_distributed_rpc_remote.py | 52 ++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 6 deletions(-) diff --git a/tests/test_distributed_rpc_remote.py b/tests/test_distributed_rpc_remote.py index fa8eebe4a..f7e4cebac 100644 --- a/tests/test_distributed_rpc_remote.py +++ b/tests/test_distributed_rpc_remote.py @@ -24,10 +24,23 @@ def test_case_1(): """ import os import torch + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + start = 25000 + end = 30000 + for port in range(start, end): + try: + s.bind(('localhost', port)) + s.close() + break + except socket.error: + continue + print("port: " + str(port)) + from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '29500' - os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + os.environ['MASTER_PORT'] = str(port) + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:' + str(port) rpc.init_rpc( "worker1", rank=0, @@ -39,6 +52,7 @@ def test_case_1(): args=(torch.tensor(2), torch.tensor(3)) ) result = r.to_here() + result = rpc.shutdown() """ ) obj.run( @@ -51,10 +65,23 @@ def test_case_2(): """ import os import torch + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + start = 25000 + end = 30000 + for port in range(start, end): + try: + s.bind(('localhost', port)) + s.close() + break + except socket.error: + continue + print("port: " + str(port)) + from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '29500' - os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + os.environ['MASTER_PORT'] = str(port) + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:' + str(port) rpc.init_rpc( "worker1", rank=0, @@ -81,10 +108,23 @@ def test_case_3(): """ import os import torch + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + start = 25000 + end = 30000 + for port in range(start, end): + try: + s.bind(('localhost', port)) + s.close() + break + except socket.error: + continue + print("port: " + str(port)) + from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '29500' - os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + os.environ['MASTER_PORT'] = str(port) + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:' + str(port) rpc.init_rpc( "worker1", rank=0, From c2e0a3c0f375f23fa85a17d5c213167a9be2901d Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 18:24:06 +0800 Subject: [PATCH 23/39] update --- tests/test_distributed_rpc_remote.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_distributed_rpc_remote.py b/tests/test_distributed_rpc_remote.py index f7e4cebac..733df28eb 100644 --- a/tests/test_distributed_rpc_remote.py +++ b/tests/test_distributed_rpc_remote.py @@ -52,7 +52,7 @@ def test_case_1(): args=(torch.tensor(2), torch.tensor(3)) ) result = r.to_here() - result = rpc.shutdown() + rpc.shutdown() """ ) obj.run( @@ -95,6 +95,7 @@ def test_case_2(): timeout=-1 ) result = r.to_here() + rpc.shutdown() """ ) obj.run( @@ -138,6 +139,7 @@ def test_case_3(): kwargs=None ) result = r.to_here() + rpc.shutdown() """ ) obj.run( From 6a60ce082238bab0a036ab8c8779787adeee6067 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 18:30:13 +0800 Subject: [PATCH 24/39] update --- paconvert/api_matcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 1b3010175..5d196036f 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -5215,10 +5215,10 @@ def generate_aux_code(self): import paddle import paddle.distributed.rpc as rpc class rpc_remote: - def __init__(to, func, args=None, kwargs=None, timeout=-1): + def __init__(self, to, func, args=None, kwargs=None, timeout=-1): self.remote = rpc.rpc_async(to=to, fn=func, args=args, kwargs=kwargs, timeout=timeout) - def to_here(): + def to_here(self): return self.remote.wait() """ ) From 6db5c8edc3df1dfd77316c553849b839e9cdfed2 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 18:44:02 +0800 Subject: [PATCH 25/39] update --- paconvert/api_matcher.py | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 5d196036f..85f814ab6 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -5212,11 +5212,9 @@ class RpcRemoteMatcher(BaseMatcher): def generate_aux_code(self): CODE_TEMPLATE = textwrap.dedent( """ - import paddle - import paddle.distributed.rpc as rpc class rpc_remote: - def __init__(self, to, func, args=None, kwargs=None, timeout=-1): - self.remote = rpc.rpc_async(to=to, fn=func, args=args, kwargs=kwargs, timeout=timeout) + def __init__(self, remote_obj): + self.remote = remote_obj def to_here(self): return self.remote.wait() @@ -5226,23 +5224,15 @@ def to_here(self): def generate_code(self, kwargs): self.write_aux_code() - if "args" not in kwargs.keys(): - kwargs["args"] = None - if "kwargs" not in kwargs.keys(): - kwargs["kwargs"] = None - if "timeout" not in kwargs.keys(): - kwargs["timeout"] = -1 + kwargs = self.kwargs_to_str(kwargs) API_TEMPLATE = textwrap.dedent( """ - paddle_aux.rpc_remote(to={}, func={}, args={}, kwargs={}, timeout={}) + remote_obj = paddle.distributed.rpc.rpc_async({}) + paddle_aux.rpc_remote(remote_obj) """ ) code = API_TEMPLATE.format( - kwargs["to"], - kwargs["func"], - kwargs["args"], - kwargs["kwargs"], - kwargs["timeout"] + kwargs ) return code From a743e69ffd7a5625a7ed68c1c50094689415a88d Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 18:51:21 +0800 Subject: [PATCH 26/39] update --- paconvert/api_matcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 85f814ab6..f5ebc49a8 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -5224,6 +5224,7 @@ def to_here(self): def generate_code(self, kwargs): self.write_aux_code() + kwargs['fn'] = kwargs.pop('func') kwargs = self.kwargs_to_str(kwargs) API_TEMPLATE = textwrap.dedent( """ From 082eca9aa59cb8e9e5398234df50802306926641 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 19:06:44 +0800 Subject: [PATCH 27/39] update --- tests/test_distributed_rpc_remote.py | 66 +++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 5 deletions(-) diff --git a/tests/test_distributed_rpc_remote.py b/tests/test_distributed_rpc_remote.py index 733df28eb..502d39341 100644 --- a/tests/test_distributed_rpc_remote.py +++ b/tests/test_distributed_rpc_remote.py @@ -37,6 +37,9 @@ def test_case_1(): continue print("port: " + str(port)) + def add(a, b): + return a+b + from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = str(port) @@ -48,8 +51,8 @@ def test_case_1(): ) r = rpc.remote( "worker1", - torch.add, - args=(torch.tensor(2), torch.tensor(3)) + add, + args=(2, 3) ) result = r.to_here() rpc.shutdown() @@ -78,6 +81,9 @@ def test_case_2(): continue print("port: " + str(port)) + def add(a, b): + return a+b + from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = str(port) @@ -90,7 +96,7 @@ def test_case_2(): r = rpc.remote( to="worker1", func=torch.add, - args=(torch.tensor(2), torch.tensor(3)), + args=(2, 3), kwargs=None, timeout=-1 ) @@ -122,6 +128,9 @@ def test_case_3(): continue print("port: " + str(port)) + def add(a, b): + return a+b + from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = str(port) @@ -133,8 +142,8 @@ def test_case_3(): ) r = rpc.remote( to="worker1", - func=torch.add, - args=(torch.tensor(2), torch.tensor(3)), + func=add, + args=(2, 3), timeout=-1, kwargs=None ) @@ -146,3 +155,50 @@ def test_case_3(): pytorch_code, ["result"] ) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import os + import torch + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + start = 25000 + end = 30000 + for port in range(start, end): + try: + s.bind(('localhost', port)) + s.close() + break + except socket.error: + continue + print("port: " + str(port)) + + def add(a, b): + return a+b + + from torch.distributed import rpc + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = str(port) + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:' + str(port) + rpc.init_rpc( + "worker1", + rank=0, + world_size=1 + ) + r = rpc.remote( + to="worker1", + func=add, + args=None, + timeout=-1, + kwargs={"a": 2, "b": 3} + ) + result = r.to_here() + rpc.shutdown() + """ + ) + obj.run( + pytorch_code, + ["result"] + ) From 6212cd93ebd402cf35902fb97b5f2049d6e3b83f Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 21:53:06 +0800 Subject: [PATCH 28/39] update --- tests/test_distributed_rpc_remote.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/tests/test_distributed_rpc_remote.py b/tests/test_distributed_rpc_remote.py index 502d39341..5ddc065dd 100644 --- a/tests/test_distributed_rpc_remote.py +++ b/tests/test_distributed_rpc_remote.py @@ -37,9 +37,6 @@ def test_case_1(): continue print("port: " + str(port)) - def add(a, b): - return a+b - from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = str(port) @@ -51,8 +48,8 @@ def add(a, b): ) r = rpc.remote( "worker1", - add, - args=(2, 3) + min, + args=(2, 1) ) result = r.to_here() rpc.shutdown() @@ -81,9 +78,6 @@ def test_case_2(): continue print("port: " + str(port)) - def add(a, b): - return a+b - from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = str(port) @@ -95,8 +89,8 @@ def add(a, b): ) r = rpc.remote( to="worker1", - func=torch.add, - args=(2, 3), + func=min, + args=(2, 1), kwargs=None, timeout=-1 ) @@ -128,9 +122,6 @@ def test_case_3(): continue print("port: " + str(port)) - def add(a, b): - return a+b - from torch.distributed import rpc os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = str(port) @@ -142,8 +133,8 @@ def add(a, b): ) r = rpc.remote( to="worker1", - func=add, - args=(2, 3), + func=min, + args=(2, 1), timeout=-1, kwargs=None ) From 890793e2a3c71e7bce4589de79b9c29482c8e466 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Wed, 16 Oct 2024 22:11:02 +0800 Subject: [PATCH 29/39] update --- tests/test_distributed_rpc_remote.py | 47 ---------------------------- 1 file changed, 47 deletions(-) diff --git a/tests/test_distributed_rpc_remote.py b/tests/test_distributed_rpc_remote.py index 5ddc065dd..2de74017a 100644 --- a/tests/test_distributed_rpc_remote.py +++ b/tests/test_distributed_rpc_remote.py @@ -146,50 +146,3 @@ def test_case_3(): pytorch_code, ["result"] ) - - -def test_case_4(): - pytorch_code = textwrap.dedent( - """ - import os - import torch - import socket - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - start = 25000 - end = 30000 - for port in range(start, end): - try: - s.bind(('localhost', port)) - s.close() - break - except socket.error: - continue - print("port: " + str(port)) - - def add(a, b): - return a+b - - from torch.distributed import rpc - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = str(port) - os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:' + str(port) - rpc.init_rpc( - "worker1", - rank=0, - world_size=1 - ) - r = rpc.remote( - to="worker1", - func=add, - args=None, - timeout=-1, - kwargs={"a": 2, "b": 3} - ) - result = r.to_here() - rpc.shutdown() - """ - ) - obj.run( - pytorch_code, - ["result"] - ) From e4864749dd2860439433295b47354a16b9d44b9b Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Thu, 17 Oct 2024 13:38:24 +0800 Subject: [PATCH 30/39] update --- paconvert/api_mapping.json | 11 ++++ paconvert/api_matcher.py | 23 +++++++ ..._distributed_optim_DistributedOptimizer.py | 66 +++++++++++++++++++ 3 files changed, 100 insertions(+) create mode 100644 tests/test_distributed_optim_DistributedOptimizer.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 15c55b3ce..d6a7ddc35 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6363,6 +6363,17 @@ "timeout" ] }, + "torch.distributed.optim.DistributedOptimizer":{ + "Matcher": "DistributedOptimizerMatcher", + "paddle_api": "paddle.distributed.shard_optimizer", + "min_input_args": 3, + "args_list": [ + "optimizer_class", + "params_rref", + "args", + "kwargs" + ] + }, "torch.distributed.rpc.shutdown": { "Matcher": "GenericMatcher", "paddle_api": "paddle.distributed.rpc.shutdown", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index f5ebc49a8..3b9207dab 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -741,6 +741,29 @@ def generate_code(self, kwargs): return code +class DistributedOptimizerMatcher(BaseMatcher): + def generate_code(self, kwargs): + print(kwargs) + if 'lr' in kwargs.keys(): + kwargs['learning_rate'] = kwargs.pop('lr') + if 'params' in kwargs.keys(): + kwargs['parameters'] = kwargs.pop('params') + if 'eps' in kwargs.keys(): + kwargs['epsilon'] = kwargs.pop('eps') + kwargs.pop('params_rref') + opt_class = kwargs.pop('optimizer_class') + API_TEMPLATE = textwrap.dedent( + """ + paddle.distributed.fleet.init(is_collective=True) + strategy = fleet.DistributedStrategy() + opt = {}({}) + paddle.distributed.fleet.distributed_optimizer(opt, strategy=strategy) + """ + ) + code = API_TEMPLATE.format(opt_class, **kwargs) + return code + + class BroadcastShapesMatcher(BaseMatcher): def get_paddle_nodes(self, args, kwargs): if len(args) == 1 and isinstance(args[0], ast.Starred): diff --git a/tests/test_distributed_optim_DistributedOptimizer.py b/tests/test_distributed_optim_DistributedOptimizer.py new file mode 100644 index 000000000..24f0452f8 --- /dev/null +++ b/tests/test_distributed_optim_DistributedOptimizer.py @@ -0,0 +1,66 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase +from optimizer_helper import generate_optimizer_test_code + +obj = APIBase("torch.distributed.optim.DistributedOptimizer") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import os + import torch + import torch.distributed as dist + import torch.nn as nn + import torch.distributed.rpc as rpc + from torch.distributed.optim import DistributedOptimizer + + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '29500' + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + rpc.init_rpc( + "worker1", + rank=0, + world_size=1 + ) + + input_size = 10 + output_size = 1 + model = linear = nn.Linear(input_size, output_size) + + data = torch.randn(batch_size, input_size) + + loss_fn = nn.MSELoss() + target = torch.randn(batch_size, output_size) + + params_rref = rpc.RRef(model) + optimizer_class = torch.optim.SGD + optimizer_args = (params_rref,) + optimizer_kwargs = {'lr': 0.01} + optimizer = DistributedOptimizer(optimizer_class, params_rref, *optimizer_args, **optimizer_kwargs) + + output = params_rref.rpc_sync().forward(data) + loss = loss_fn(output, target) + optimizer.zero_grad() + loss.backward() + optimizer.step() + rpc.shutdown() + + """ + ) + obj.run(pytorch_code, ["result"], rtol=1.0e-5) From 0c741fecc12e2aac3e55c411d1ddf20aaf822dba Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Thu, 17 Oct 2024 20:31:18 +0800 Subject: [PATCH 31/39] update --- paconvert/api_matcher.py | 18 ++-------- ..._distributed_optim_DistributedOptimizer.py | 36 ++++++++++++------- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 3b9207dab..a884efb06 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -821,10 +821,7 @@ def generate_aux_code(self): class TransformsPositiveDefiniteTransform: def __call__(self, x): x = x.tril(-1) + x.diagonal(axis1=-2, axis2=-1).exp().diag_embed() - shape_list = list(range(x.ndim)) - shape_list[-1], shape_list[-2] = shape_list[-2], shape_list[-1] - y = x.transpose(perm=shape_list) - return x @ y + return x @ x.T def inv(self, y): y = paddle.linalg.cholesky(y) @@ -875,21 +872,10 @@ def generate_code(self, kwargs): class Is_InferenceMatcher(BaseMatcher): - def generate_aux_code(self): - API_TEMPLATE = textwrap.dedent( - """ - def Is_Inference(x): - is_inference = not x.stop_gradient - return is_inference - """ - ) - - return API_TEMPLATE def generate_code(self, kwargs): - self.write_aux_code() if "input" not in kwargs: kwargs["input"] = self.paddleClass - code = "paddle_aux.Is_Inference(x={})".format(kwargs["input"]) + code = "{}.stop_gradient".format(kwargs["input"]) return code diff --git a/tests/test_distributed_optim_DistributedOptimizer.py b/tests/test_distributed_optim_DistributedOptimizer.py index 24f0452f8..5419ffcc0 100644 --- a/tests/test_distributed_optim_DistributedOptimizer.py +++ b/tests/test_distributed_optim_DistributedOptimizer.py @@ -28,39 +28,51 @@ def test_case_1(): import torch.distributed as dist import torch.nn as nn import torch.distributed.rpc as rpc + from torch.optim import SGD from torch.distributed.optim import DistributedOptimizer os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '29500' os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + # 初始化RPC rpc.init_rpc( "worker1", rank=0, world_size=1 ) - input_size = 10 - output_size = 1 - model = linear = nn.Linear(input_size, output_size) + class SimpleModel(torch.nn.Module): + def __init__(self): + super(SimpleModel, self).__init__() + self.param = nn.Linear(10, 1) - data = torch.randn(batch_size, input_size) + def forward(self, x): + return self.param(x) + + # 初始化 + data = torch.randn(1, 10) + target = torch.randn(1, 1) + model = SimpleModel() - loss_fn = nn.MSELoss() - target = torch.randn(batch_size, output_size) - - params_rref = rpc.RRef(model) - optimizer_class = torch.optim.SGD + # 创建远程模型 + remote_model_rref = rpc.remote("worker1", model, args=(data)) + # 创建分布式优化器 + optimizer_class = SGD optimizer_args = (params_rref,) optimizer_kwargs = {'lr': 0.01} optimizer = DistributedOptimizer(optimizer_class, params_rref, *optimizer_args, **optimizer_kwargs) - output = params_rref.rpc_sync().forward(data) + # 输出 + output = remote_model_rref.to_here() + loss_fn = nn.MSELoss() loss = loss_fn(output, target) + optimizer.zero_grad() loss.backward() - optimizer.step() + optimizer.step(worker1) rpc.shutdown() + result = 1 """ ) - obj.run(pytorch_code, ["result"], rtol=1.0e-5) + obj.run(pytorch_code, ["result"]) From eca644cfaa2ad4c429344e0dfa6f45ddbd126eb8 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Thu, 17 Oct 2024 21:39:28 +0800 Subject: [PATCH 32/39] update --- paconvert/api_matcher.py | 2 +- tests/test_Tensor_is_inference.py | 4 ++-- tests/test_is_inference.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index a884efb06..b8a79edab 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -784,7 +784,7 @@ def generate_aux_code(self): def StudentT_Aux_Func(df, loc, scale): class StudentT_Aux_Class: def __init__(self, df, loc, scale): - self.df = df + self.df = paddle.to_tensor(df) self.loc = paddle.to_tensor(loc) self.scale = paddle.to_tensor(scale) self.sT = paddle.distribution.StudentT(self.df, self.loc, self.scale) diff --git a/tests/test_Tensor_is_inference.py b/tests/test_Tensor_is_inference.py index 73c2ad767..314c2b871 100644 --- a/tests/test_Tensor_is_inference.py +++ b/tests/test_Tensor_is_inference.py @@ -28,7 +28,7 @@ def test_case_1(): result = x.is_inference() """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): @@ -38,4 +38,4 @@ def test_case_2(): result = torch.tensor([-0.6341, -1.4208, -1.0900, 0.5826]).is_inference() """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_is_inference.py b/tests/test_is_inference.py index 74981183d..2e1f12f84 100644 --- a/tests/test_is_inference.py +++ b/tests/test_is_inference.py @@ -28,7 +28,7 @@ def test_case_1(): result = torch.is_inference(x) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): pytorch_code = textwrap.dedent( @@ -38,4 +38,4 @@ def test_case_2(): result = torch.is_inference(input = x) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) From 1ebbedcf627e595a0013a6bbcef1dd9e9a038351 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Thu, 17 Oct 2024 22:13:11 +0800 Subject: [PATCH 33/39] add DistributedOptimizer --- paconvert/api_mapping.json | 2 +- ..._distributed_optim_DistributedOptimizer.py | 44 ++++++------------- 2 files changed, 14 insertions(+), 32 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index d6a7ddc35..e4f40a4dd 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6365,7 +6365,7 @@ }, "torch.distributed.optim.DistributedOptimizer":{ "Matcher": "DistributedOptimizerMatcher", - "paddle_api": "paddle.distributed.shard_optimizer", + "paddle_api": "paddle.distributed.fleet.distributed_optimizer", "min_input_args": 3, "args_list": [ "optimizer_class", diff --git a/tests/test_distributed_optim_DistributedOptimizer.py b/tests/test_distributed_optim_DistributedOptimizer.py index 5419ffcc0..e002eebe2 100644 --- a/tests/test_distributed_optim_DistributedOptimizer.py +++ b/tests/test_distributed_optim_DistributedOptimizer.py @@ -28,51 +28,33 @@ def test_case_1(): import torch.distributed as dist import torch.nn as nn import torch.distributed.rpc as rpc - from torch.optim import SGD from torch.distributed.optim import DistributedOptimizer os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '29500' os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' - # 初始化RPC rpc.init_rpc( "worker1", rank=0, world_size=1 ) - class SimpleModel(torch.nn.Module): - def __init__(self): - super(SimpleModel, self).__init__() - self.param = nn.Linear(10, 1) + data = nn.Parameter(torch.tensor([[-0.4229, 1.2159, -1.3944, 0.8764, -2.5841, -2.1045, -0.7999, 0.1856, + 0.6989, 0.3954]]), requires_grad=True) + rref1 = rpc.remote("worker1", torch.add, args=(data, 3)) + target = torch.tensor([[1.1352]]) + Loss_fuc = torch.nn.MSELoss() - def forward(self, x): - return self.param(x) - - # 初始化 - data = torch.randn(1, 10) - target = torch.randn(1, 1) - model = SimpleModel() + optimizer = DistributedOptimizer( + torch.optim.SGD, + [rref1], + lr=0.01 + ) - # 创建远程模型 - remote_model_rref = rpc.remote("worker1", model, args=(data)) - # 创建分布式优化器 - optimizer_class = SGD - optimizer_args = (params_rref,) - optimizer_kwargs = {'lr': 0.01} - optimizer = DistributedOptimizer(optimizer_class, params_rref, *optimizer_args, **optimizer_kwargs) - - # 输出 - output = remote_model_rref.to_here() - loss_fn = nn.MSELoss() - loss = loss_fn(output, target) - - optimizer.zero_grad() - loss.backward() - optimizer.step(worker1) + # 打印损失 + print(f"Iteration {i}, Loss: {loss.item()}") rpc.shutdown() - - result = 1 + result = True """ ) obj.run(pytorch_code, ["result"]) From d60ebc6d95f7a4c86c5615c5e059e866b5696508 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Fri, 18 Oct 2024 22:45:45 +0800 Subject: [PATCH 34/39] update --- ..._distributed_optim_DistributedOptimizer.py | 60 ------------------- 1 file changed, 60 deletions(-) delete mode 100644 tests/test_distributed_optim_DistributedOptimizer.py diff --git a/tests/test_distributed_optim_DistributedOptimizer.py b/tests/test_distributed_optim_DistributedOptimizer.py deleted file mode 100644 index e002eebe2..000000000 --- a/tests/test_distributed_optim_DistributedOptimizer.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import textwrap - -from apibase import APIBase -from optimizer_helper import generate_optimizer_test_code - -obj = APIBase("torch.distributed.optim.DistributedOptimizer") - - -def test_case_1(): - pytorch_code = textwrap.dedent( - """ - import os - import torch - import torch.distributed as dist - import torch.nn as nn - import torch.distributed.rpc as rpc - from torch.distributed.optim import DistributedOptimizer - - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '29500' - os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' - rpc.init_rpc( - "worker1", - rank=0, - world_size=1 - ) - - data = nn.Parameter(torch.tensor([[-0.4229, 1.2159, -1.3944, 0.8764, -2.5841, -2.1045, -0.7999, 0.1856, - 0.6989, 0.3954]]), requires_grad=True) - rref1 = rpc.remote("worker1", torch.add, args=(data, 3)) - target = torch.tensor([[1.1352]]) - Loss_fuc = torch.nn.MSELoss() - - optimizer = DistributedOptimizer( - torch.optim.SGD, - [rref1], - lr=0.01 - ) - - # 打印损失 - print(f"Iteration {i}, Loss: {loss.item()}") - rpc.shutdown() - result = True - """ - ) - obj.run(pytorch_code, ["result"]) From 4af9f36af6776bcd770db7ad24dc148730796f64 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Mon, 21 Oct 2024 15:32:36 +0800 Subject: [PATCH 35/39] update --- paconvert/api_mapping.json | 4 +- paconvert/api_matcher.py | 23 ------ ..._distributed_optim_DistributedOptimizer.py | 78 +++++++++++++++++++ 3 files changed, 81 insertions(+), 24 deletions(-) create mode 100644 tests/test_distributed_optim_DistributedOptimizer.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index e4f40a4dd..a5ccc54a7 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6365,13 +6365,15 @@ }, "torch.distributed.optim.DistributedOptimizer":{ "Matcher": "DistributedOptimizerMatcher", - "paddle_api": "paddle.distributed.fleet.distributed_optimizer", "min_input_args": 3, "args_list": [ "optimizer_class", "params_rref", "args", "kwargs" + ], + "unsupport_args": [ + "params_rref" ] }, "torch.distributed.rpc.shutdown": { diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index b8a79edab..01e4724fc 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -741,29 +741,6 @@ def generate_code(self, kwargs): return code -class DistributedOptimizerMatcher(BaseMatcher): - def generate_code(self, kwargs): - print(kwargs) - if 'lr' in kwargs.keys(): - kwargs['learning_rate'] = kwargs.pop('lr') - if 'params' in kwargs.keys(): - kwargs['parameters'] = kwargs.pop('params') - if 'eps' in kwargs.keys(): - kwargs['epsilon'] = kwargs.pop('eps') - kwargs.pop('params_rref') - opt_class = kwargs.pop('optimizer_class') - API_TEMPLATE = textwrap.dedent( - """ - paddle.distributed.fleet.init(is_collective=True) - strategy = fleet.DistributedStrategy() - opt = {}({}) - paddle.distributed.fleet.distributed_optimizer(opt, strategy=strategy) - """ - ) - code = API_TEMPLATE.format(opt_class, **kwargs) - return code - - class BroadcastShapesMatcher(BaseMatcher): def get_paddle_nodes(self, args, kwargs): if len(args) == 1 and isinstance(args[0], ast.Starred): diff --git a/tests/test_distributed_optim_DistributedOptimizer.py b/tests/test_distributed_optim_DistributedOptimizer.py new file mode 100644 index 000000000..1df7dc880 --- /dev/null +++ b/tests/test_distributed_optim_DistributedOptimizer.py @@ -0,0 +1,78 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase +from optimizer_helper import generate_optimizer_test_code + +obj = APIBase("torch.distributed.optim.DistributedOptimizer") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import os + import torch + import torch.distributed as dist + import torch.nn as nn + import torch.distributed.rpc as rpc + from torch.distributed.optim import DistributedOptimizer + + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '29500' + os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:29501' + rpc.init_rpc( + "worker1", + rank=0, + world_size=1 + ) + + class SimpleLinearModel(nn.Module): + def __init__(self, input_size, output_size): + super(SimpleLinearModel, self).__init__() + self.linear = nn.Linear(input_size, output_size) + + def forward(self, x): + return self.linear(x) + + input_size = 10 + output_size = 1 + model = SimpleLinearModel(input_size, output_size) + + data = torch.randn(batch_size, input_size) + + loss_fn = nn.MSELoss() + target = torch.randn(batch_size, output_size) + + params_rref = rpc.RRef(model) + optimizer_class = torch.optim.SGD + optimizer_args = (params_rref,) + optimizer_kwargs = {'lr': 0.01} + optimizer = DistributedOptimizer(optimizer_class, params_rref, *optimizer_args, **optimizer_kwargs) + + output = params_rref.rpc_sync().forward(data) + loss = loss_fn(output, target) + optimizer.zero_grad() + loss.backward() + optimizer.step() + rpc.shutdown() + + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle does not support tensor in DistributedOptimizer",) From 3e2e385b4b6e45485863cb783f2b283ae1bd269d Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Mon, 21 Oct 2024 16:09:05 +0800 Subject: [PATCH 36/39] update --- ..._distributed_optim_DistributedOptimizer.py | 41 +++++-------------- 1 file changed, 10 insertions(+), 31 deletions(-) diff --git a/tests/test_distributed_optim_DistributedOptimizer.py b/tests/test_distributed_optim_DistributedOptimizer.py index 1df7dc880..4426b1141 100644 --- a/tests/test_distributed_optim_DistributedOptimizer.py +++ b/tests/test_distributed_optim_DistributedOptimizer.py @@ -25,8 +25,7 @@ def test_case_1(): """ import os import torch - import torch.distributed as dist - import torch.nn as nn + from torch import optim import torch.distributed.rpc as rpc from torch.distributed.optim import DistributedOptimizer @@ -38,35 +37,15 @@ def test_case_1(): rank=0, world_size=1 ) - - class SimpleLinearModel(nn.Module): - def __init__(self, input_size, output_size): - super(SimpleLinearModel, self).__init__() - self.linear = nn.Linear(input_size, output_size) - - def forward(self, x): - return self.linear(x) - - input_size = 10 - output_size = 1 - model = SimpleLinearModel(input_size, output_size) - - data = torch.randn(batch_size, input_size) - - loss_fn = nn.MSELoss() - target = torch.randn(batch_size, output_size) - - params_rref = rpc.RRef(model) - optimizer_class = torch.optim.SGD - optimizer_args = (params_rref,) - optimizer_kwargs = {'lr': 0.01} - optimizer = DistributedOptimizer(optimizer_class, params_rref, *optimizer_args, **optimizer_kwargs) - - output = params_rref.rpc_sync().forward(data) - loss = loss_fn(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() + # Forward pass. + rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3)) + rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1)) + # Optimizer. + dist_optim = DistributedOptimizer( + optim.SGD, + [rref1, rref2], + lr=0.05, + ) rpc.shutdown() """ From ecb389dceeb98823e32d7f86d2fa848976471f79 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Mon, 21 Oct 2024 16:55:55 +0800 Subject: [PATCH 37/39] update --- paconvert/api_mapping.json | 13 ------------- .../test_distributed_optim_DistributedOptimizer.py | 3 +-- 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index a5ccc54a7..15c55b3ce 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6363,19 +6363,6 @@ "timeout" ] }, - "torch.distributed.optim.DistributedOptimizer":{ - "Matcher": "DistributedOptimizerMatcher", - "min_input_args": 3, - "args_list": [ - "optimizer_class", - "params_rref", - "args", - "kwargs" - ], - "unsupport_args": [ - "params_rref" - ] - }, "torch.distributed.rpc.shutdown": { "Matcher": "GenericMatcher", "paddle_api": "paddle.distributed.rpc.shutdown", diff --git a/tests/test_distributed_optim_DistributedOptimizer.py b/tests/test_distributed_optim_DistributedOptimizer.py index 4426b1141..7c4f2c62f 100644 --- a/tests/test_distributed_optim_DistributedOptimizer.py +++ b/tests/test_distributed_optim_DistributedOptimizer.py @@ -27,7 +27,6 @@ def test_case_1(): import torch from torch import optim import torch.distributed.rpc as rpc - from torch.distributed.optim import DistributedOptimizer os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '29500' @@ -41,7 +40,7 @@ def test_case_1(): rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3)) rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1)) # Optimizer. - dist_optim = DistributedOptimizer( + dist_optim = torch.distributed.optim.DistributedOptimizer( optim.SGD, [rref1, rref2], lr=0.05, From cc0062e165178e1e3f91a648508d9b9d22b98dde Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 22 Oct 2024 15:58:50 +0800 Subject: [PATCH 38/39] upadte api_matcher --- paconvert/api_matcher.py | 44 +++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 01e4724fc..c268b8010 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -758,16 +758,14 @@ def generate_aux_code(self): API_TEMPLATE = textwrap.dedent( """ import paddle - def StudentT_Aux_Func(df, loc, scale): - class StudentT_Aux_Class: - def __init__(self, df, loc, scale): - self.df = paddle.to_tensor(df) - self.loc = paddle.to_tensor(loc) - self.scale = paddle.to_tensor(scale) - self.sT = paddle.distribution.StudentT(self.df, self.loc, self.scale) - def sample(self): - return paddle.reshape(self.sT.sample(), self.df.shape) - return StudentT_Aux_Class(df, loc, scale) + class StudentT_Aux_Class: + def __init__(self, df, loc, scale): + self.df = paddle.to_tensor(df) + self.loc = paddle.to_tensor(loc) + self.scale = paddle.to_tensor(scale) + self.sT = paddle.distribution.StudentT(self.df, self.loc, self.scale) + def sample(self): + return paddle.reshape(self.sT.sample(), self.df.shape) """ ) @@ -783,7 +781,7 @@ def generate_code(self, kwargs): kwargs = self.kwargs_to_str(kwargs) API_TEMPLATE = textwrap.dedent( """ - paddle_aux.StudentT_Aux_Func({}) + paddle_aux.StudentT_Aux_Class({}) """ ) code = API_TEMPLATE.format(kwargs) @@ -822,13 +820,11 @@ def generate_aux_code(self): API_TEMPLATE = textwrap.dedent( """ import paddle - def LKJCholesky_Aux_Func(dim, concentration, sample_method='onion'): - class LKJCholesky_Aux_Class: - def __init__(self, dim, concentration, sample_method='onion'): - self.lkj = paddle.distribution.LKJCholesky(dim, concentration, sample_method) - def sample(self): - return paddle.unsqueeze(self.lkj.sample(), axis=0) - return LKJCholesky_Aux_Class(dim, concentration, sample_method) + class LKJCholesky_Aux_Class: + def __init__(self, dim, concentration, sample_method='onion'): + self.lkj = paddle.distribution.LKJCholesky(dim, concentration, sample_method) + def sample(self): + return paddle.unsqueeze(self.lkj.sample(), axis=0) """ ) @@ -840,7 +836,7 @@ def generate_code(self, kwargs): kwargs = self.kwargs_to_str(kwargs) API_TEMPLATE = textwrap.dedent( """ - paddle_aux.LKJCholesky_Aux_Func({}) + paddle_aux.LKJCholesky_Aux_Class({}) """ ) code = API_TEMPLATE.format(kwargs) @@ -861,11 +857,9 @@ def generate_aux_code(self): API_TEMPLATE = textwrap.dedent( """ import paddle - def Distributions_Constraint(): - class DistributionsConstrain: - def check(self, value): - return paddle.distribution.constraint.Constraint()(value) - return DistributionsConstrain() + class DistributionsConstrain: + def check(self, value): + return paddle.distribution.constraint.Constraint()(value) """ ) @@ -874,7 +868,7 @@ def generate_code(self, kwargs): self.write_aux_code() API_TEMPLATE = textwrap.dedent( """ - paddle_aux.Distributions_Constraint() + paddle_aux.DistributionsConstrain() """ ) return API_TEMPLATE From 55139a74a8224f24ddc2bd138567dafb765ad163 Mon Sep 17 00:00:00 2001 From: decade-afk <3995409050@qq.com> Date: Tue, 22 Oct 2024 18:01:00 +0800 Subject: [PATCH 39/39] update --- paconvert/api_matcher.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index c268b8010..1f5791e88 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -5208,8 +5208,7 @@ def generate_code(self, kwargs): kwargs = self.kwargs_to_str(kwargs) API_TEMPLATE = textwrap.dedent( """ - remote_obj = paddle.distributed.rpc.rpc_async({}) - paddle_aux.rpc_remote(remote_obj) + paddle_aux.rpc_remote(paddle.distributed.rpc.rpc_async({})) """ ) code = API_TEMPLATE.format(