Skip to content

Commit

Permalink
单测case增强4 (#458)
Browse files Browse the repository at this point in the history
* CaseAdd4

* fix softmax

* CaseAdd4

* CaseAdd4
  • Loading branch information
Xuxuanang authored Sep 3, 2024
1 parent 78fd805 commit 6a0bdf9
Show file tree
Hide file tree
Showing 19 changed files with 872 additions and 48 deletions.
13 changes: 8 additions & 5 deletions paconvert/api_mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -11057,11 +11057,13 @@
"layer_norm_eps",
"batch_first",
"norm_first",
"bias",
"device",
"dtype"
],
"kwargs_change": {
"norm_first": "normalize_before",
"bias": "bias_attr",
"device": "",
"dtype": ""
},
Expand Down Expand Up @@ -11093,11 +11095,13 @@
"layer_norm_eps",
"batch_first",
"norm_first",
"bias",
"device",
"dtype"
],
"kwargs_change": {
"norm_first": "normalize_before",
"bias": "bias_attr",
"device": "",
"dtype": ""
},
Expand Down Expand Up @@ -11137,11 +11141,13 @@
"layer_norm_eps",
"batch_first",
"norm_first",
"bias",
"device",
"dtype"
],
"kwargs_change": {
"norm_first": "normalize_before",
"bias": "bias_attr",
"device": "",
"dtype": ""
},
Expand Down Expand Up @@ -12095,7 +12101,7 @@
}
},
"torch.nn.functional.log_softmax": {
"Matcher": "RequireDimMatcher",
"Matcher": "SoftmaxMatcher",
"paddle_api": "paddle.nn.functional.log_softmax",
"args_list": [
"input",
Expand Down Expand Up @@ -12218,9 +12224,6 @@
"kwargs_change": {
"input": "x"
},
"unsupport_args": [
"output_size"
],
"min_input_args": 3
},
"torch.nn.functional.max_unpool2d": {
Expand Down Expand Up @@ -12608,7 +12611,7 @@
"min_input_args": 2
},
"torch.nn.functional.softmax": {
"Matcher": "RequireDimMatcher",
"Matcher": "SoftmaxMatcher",
"paddle_api": "paddle.nn.functional.softmax",
"args_list": [
"input",
Expand Down
69 changes: 69 additions & 0 deletions tests/test_nn_Transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,3 +148,72 @@ def test_case_7():
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_8():
pytorch_code = textwrap.dedent(
"""
import torch
transformer_model = torch.nn.Transformer(d_model=512,
nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048,
dropout=0.1, activation='relu',
custom_encoder=None, custom_decoder=None,
layer_norm_eps=1e-05, batch_first=False,
norm_first=False, bias=False,
device=None, dtype=None)
src = torch.rand((10, 32, 512))
tgt = torch.rand((10, 32, 512))
result = transformer_model(src, tgt)
"""
)
obj.run(
pytorch_code,
["result"],
check_value=False,
unsupport=True,
reason="paddle unsupport layer_norm_eps args",
)


def test_case_9():
pytorch_code = textwrap.dedent(
"""
import torch
transformer_model = torch.nn.Transformer(512,
8, 6, 6, 2048,
0.1, 'relu',
None, None,
1e-05, False,
False, False,
None, None)
src = torch.rand((10, 32, 512))
tgt = torch.rand((10, 32, 512))
result = transformer_model(src, tgt)
"""
)
obj.run(
pytorch_code,
["result"],
check_value=False,
unsupport=True,
reason="paddle unsupport layer_norm_eps args",
)


def test_case_10():
pytorch_code = textwrap.dedent(
"""
import torch
transformer_model = torch.nn.Transformer(d_model=512,
nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048,
dropout=0.1, activation='relu',
custom_encoder=None, custom_decoder=None,
norm_first=False, bias=True, device=None, dtype=None)
src = torch.rand((10, 32, 512))
tgt = torch.rand((10, 32, 512))
result = transformer_model(src, tgt)
"""
)
obj.run(pytorch_code, ["result"], check_value=False)
57 changes: 57 additions & 0 deletions tests/test_nn_TransformerDecoderLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,3 +78,60 @@ def test_case_4():
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_5():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
x = torch.ones(10, 32,512)
tgt = torch.ones(10, 32, 512)
model = nn.TransformerDecoderLayer(d_model=512, nhead=8,dim_feedforward=2048, dropout=0.1,
activation="relu", layer_norm_eps=1e-06, batch_first=False,
norm_first=False, bias=True, device=None, dtype=None)
result = model(tgt,x)
"""
)
obj.run(
pytorch_code,
["result"],
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_6():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
x = torch.ones(10, 32,512)
tgt = torch.ones(10, 32, 512)
model = nn.TransformerDecoderLayer(512, 8,2048, 0.1, "relu", 1e-06, False,
False, True, None, None)
result = model(tgt,x)
"""
)
obj.run(
pytorch_code,
["result"],
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_7():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
x = torch.ones(10, 32,512)
tgt = torch.ones(10, 32, 512)
model = nn.TransformerDecoderLayer(d_model=512, nhead=8,dim_feedforward=2048, dropout=0.1,
activation="relu", layer_norm_eps=1e-06,
norm_first=False, bias=True, device=None, dtype=None)
result = model(tgt,x)
"""
)
obj.run(pytorch_code, ["result"], check_value=False)
61 changes: 61 additions & 0 deletions tests/test_nn_TransformerEncoderLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,3 +123,64 @@ def test_case_7():
"""
)
obj.run(pytorch_code, ["result"], check_value=False)


def test_case_8():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
tgt = torch.ones(10, 32, 512)
model = nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=2048, dropout=0.1,
activation="relu", layer_norm_eps=1e-05, batch_first=False,
norm_first=False, bias=True, device=None, dtype=None)
result = model(tgt)
"""
)
obj.run(
pytorch_code,
["result"],
check_value=False,
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_9():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
tgt = torch.ones(10, 32, 512)
model = nn.TransformerEncoderLayer(512, 8, 2048, 0.1,
"relu", 1e-05, False,
False, True, "cpu", None)
result = model(tgt)
"""
)
obj.run(
pytorch_code,
["result"],
check_value=False,
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_10():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
tgt = torch.ones(10, 32, 512)
model = nn.TransformerEncoderLayer(512, 8,
2048,
0.1, 'relu',
norm_first=False,
device=None,
bias=True,
dtype=torch.float32)
result = model(tgt)
"""
)
obj.run(pytorch_code, ["result"], check_value=False)
18 changes: 18 additions & 0 deletions tests/test_nn_Upsample.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,21 @@ def test_case_6():
obj.run(
pytorch_code, unsupport=True, reason="paddle unsupport recompute_scale_factor "
)


def test_case_7():
pytorch_code = textwrap.dedent(
"""
import torch
input = torch.tensor([[[[ 1.1524, 0.4714, 0.2857],
[-1.2533, -0.9829, -1.0981],
[ 0.1507, -1.1431, -2.0361]],
[[ 0.1024, -0.4482, 0.4137],
[ 0.9385, 0.4565, 0.7702],
[ 0.4135, -0.2587, 0.0482]]]])
m = torch.nn.Upsample(scale_factor=2, align_corners=True, mode='bilinear')
result = m(input)
"""
)
obj.run(pytorch_code, ["result"])
62 changes: 56 additions & 6 deletions tests/test_nn_functional_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,61 @@ def test_case_3():
pytorch_code = textwrap.dedent(
"""
import torch
w0 = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
result = torch.nn.functional.embedding(x,embedding_matrix,padding_idx=0,max_norm=2)
import numpy as np
embedding_matrix = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
x = torch.tensor(np.array([[0,1],[2,3]]))
result = torch.nn.functional.embedding(x, embedding_matrix, padding_idx=0, max_norm=2)
"""
)
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport max_norm")


def test_case_4():
pytorch_code = textwrap.dedent(
"""
import torch
import numpy as np
embedding_matrix = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
x = torch.tensor(np.array([[0,1],[2,3]]))
result = torch.nn.functional.embedding(input=x, weight=embedding_matrix, padding_idx=0, max_norm=2, norm_type=2.0, scale_grad_by_freq=False, sparse=True)
"""
)
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport max_norm ")


def test_case_5():
pytorch_code = textwrap.dedent(
"""
import torch
import numpy as np
embedding_matrix = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
x = torch.tensor(np.array([[0,1],[2,3]]))
result = torch.nn.functional.embedding(input=x, padding_idx=0, max_norm=2, weight=embedding_matrix, scale_grad_by_freq=False, norm_type=2.0, sparse=True)
"""
)
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport max_norm ")


def test_case_6():
pytorch_code = textwrap.dedent(
"""
import torch
import numpy as np
embedding_matrix = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
x = torch.tensor(np.array([[0,1],[2,3]]))
result = torch.nn.functional.embedding(x, embedding_matrix, 0, 2, 2.0, False, True)
"""
)
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport")
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport max_norm ")
Loading

0 comments on commit 6a0bdf9

Please sign in to comment.