Skip to content

Commit

Permalink
转换规则 torch.nn.parallel.DistributedDataParallel (#240)
Browse files Browse the repository at this point in the history
  • Loading branch information
LokeZhou authored Aug 29, 2023
1 parent 6b39905 commit bba951b
Show file tree
Hide file tree
Showing 2 changed files with 109 additions and 0 deletions.
31 changes: 31 additions & 0 deletions paconvert/api_mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -9403,6 +9403,37 @@
"n": 2
}
},
"torch.nn.parallel.DistributedDataParallel": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.DataParallel",
"args_list": [
"module",
"device_ids",
"output_device",
"dim",
"broadcast_buffers",
"process_group",
"bucket_cap_mb",
"find_unused_parameters",
"check_reduction",
"gradient_as_bucket_view",
"static_graph"
],
"kwargs_change": {
"module": "layers",
"device_ids": "",
"bucket_cap_mb": "comm_buffer_size",
"check_reduction": ""
},
"unsupport_args": [
"output_device",
"dim",
"broadcast_buffers",
"process_group",
"gradient_as_bucket_view",
"static_graph"
]
},
"torch.nn.utils.clip_grad_norm_": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.utils.clip_grad_norm_",
Expand Down
78 changes: 78 additions & 0 deletions tests/test_nn_parallel_DistributedDataParallel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import textwrap

from apibase import APIBase

obj = APIBase("paddle.DataParallel")


def _test_case_1():
pytorch_code = textwrap.dedent(
"""
import torch
torch.distributed.init_process_group(
"nccl",
init_method="tcp://127.0.0.1:23456",
rank=0,
world_size=1
)
model = torch.nn.Linear(1, 1, bias=False).cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
result=True
"""
)
obj.run(pytorch_code, ["result"])


def _test_case_2():
pytorch_code = textwrap.dedent(
"""
import torch
torch.distributed.init_process_group(
"nccl",
init_method="tcp://127.0.0.1:23456",
rank=0,
world_size=1
)
model = torch.nn.Linear(1, 1, bias=False).cuda()
model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[0])
result=True
"""
)
obj.run(pytorch_code, ["result"])


def test_case_3():
pytorch_code = textwrap.dedent(
"""
import torch
torch.distributed.init_process_group(
"nccl",
init_method="tcp://127.0.0.1:23456",
rank=0,
world_size=1
)
model = torch.nn.Linear(1, 1, bias=False).cuda()
model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[0],output_device=0)
result=True
"""
)
obj.run(
pytorch_code,
["result"],
unsupport=True,
reason="paddle not support the parameter",
)

0 comments on commit bba951b

Please sign in to comment.