From cf42abfab90155cc889bca346b3439be235ef033 Mon Sep 17 00:00:00 2001 From: LokeZhou Date: Tue, 22 Aug 2023 08:36:17 +0000 Subject: [PATCH] =?UTF-8?q?=E8=BD=AC=E6=8D=A2=E8=A7=84=E5=88=99=20torch.nn?= =?UTF-8?q?.parallel.DistributedDataParallel?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paconvert/api_mapping.json | 31 ++++++++ ...est_nn_parallel_DistributedDataParallel.py | 78 +++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 tests/test_nn_parallel_DistributedDataParallel.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 88b0494ec..fb4297ba9 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -9157,6 +9157,37 @@ "n": 2 } }, + "torch.nn.parallel.DistributedDataParallel": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.DataParallel", + "args_list": [ + "module", + "device_ids", + "output_device", + "dim", + "broadcast_buffers", + "process_group", + "bucket_cap_mb", + "find_unused_parameters", + "check_reduction", + "gradient_as_bucket_view", + "static_graph" + ], + "kwargs_change": { + "module": "layers", + "device_ids": "", + "bucket_cap_mb": "comm_buffer_size", + "check_reduction": "" + }, + "unsupport_args": [ + "output_device", + "dim", + "broadcast_buffers", + "process_group", + "gradient_as_bucket_view", + "static_graph" + ] + }, "torch.nn.utils.clip_grad_norm_": { "Matcher": "GenericMatcher", "paddle_api": "paddle.nn.utils.clip_grad_norm_", diff --git a/tests/test_nn_parallel_DistributedDataParallel.py b/tests/test_nn_parallel_DistributedDataParallel.py new file mode 100644 index 000000000..18721b099 --- /dev/null +++ b/tests/test_nn_parallel_DistributedDataParallel.py @@ -0,0 +1,78 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("paddle.DataParallel") + + +def _test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + torch.distributed.init_process_group( + "nccl", + init_method="tcp://127.0.0.1:23456", + rank=0, + world_size=1 + ) + model = torch.nn.Linear(1, 1, bias=False).cuda() + model = torch.nn.parallel.DistributedDataParallel(model) + result=True + """ + ) + obj.run(pytorch_code, ["result"]) + + +def _test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + torch.distributed.init_process_group( + "nccl", + init_method="tcp://127.0.0.1:23456", + rank=0, + world_size=1 + ) + model = torch.nn.Linear(1, 1, bias=False).cuda() + model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[0]) + result=True + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + torch.distributed.init_process_group( + "nccl", + init_method="tcp://127.0.0.1:23456", + rank=0, + world_size=1 + ) + model = torch.nn.Linear(1, 1, bias=False).cuda() + model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[0],output_device=0) + result=True + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle not support the parameter", + )