diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 82b20f7dd..3fe4d05d2 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -9403,6 +9403,37 @@ "n": 2 } }, + "torch.nn.parallel.DistributedDataParallel": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.DataParallel", + "args_list": [ + "module", + "device_ids", + "output_device", + "dim", + "broadcast_buffers", + "process_group", + "bucket_cap_mb", + "find_unused_parameters", + "check_reduction", + "gradient_as_bucket_view", + "static_graph" + ], + "kwargs_change": { + "module": "layers", + "device_ids": "", + "bucket_cap_mb": "comm_buffer_size", + "check_reduction": "" + }, + "unsupport_args": [ + "output_device", + "dim", + "broadcast_buffers", + "process_group", + "gradient_as_bucket_view", + "static_graph" + ] + }, "torch.nn.utils.clip_grad_norm_": { "Matcher": "GenericMatcher", "paddle_api": "paddle.nn.utils.clip_grad_norm_", diff --git a/tests/test_nn_parallel_DistributedDataParallel.py b/tests/test_nn_parallel_DistributedDataParallel.py new file mode 100644 index 000000000..18721b099 --- /dev/null +++ b/tests/test_nn_parallel_DistributedDataParallel.py @@ -0,0 +1,78 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("paddle.DataParallel") + + +def _test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + torch.distributed.init_process_group( + "nccl", + init_method="tcp://127.0.0.1:23456", + rank=0, + world_size=1 + ) + model = torch.nn.Linear(1, 1, bias=False).cuda() + model = torch.nn.parallel.DistributedDataParallel(model) + result=True + """ + ) + obj.run(pytorch_code, ["result"]) + + +def _test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + torch.distributed.init_process_group( + "nccl", + init_method="tcp://127.0.0.1:23456", + rank=0, + world_size=1 + ) + model = torch.nn.Linear(1, 1, bias=False).cuda() + model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[0]) + result=True + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + torch.distributed.init_process_group( + "nccl", + init_method="tcp://127.0.0.1:23456", + rank=0, + world_size=1 + ) + model = torch.nn.Linear(1, 1, bias=False).cuda() + model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[0],output_device=0) + result=True + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle not support the parameter", + )