Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

单测case增强4 #458

Merged
merged 4 commits into from
Sep 3, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 8 additions & 5 deletions paconvert/api_mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -11054,11 +11054,13 @@
"layer_norm_eps",
"batch_first",
"norm_first",
"bias",
"device",
"dtype"
],
"kwargs_change": {
"norm_first": "normalize_before",
"bias": "bias_attr",
"device": "",
"dtype": ""
},
Expand Down Expand Up @@ -11090,11 +11092,13 @@
"layer_norm_eps",
"batch_first",
"norm_first",
"bias",
"device",
"dtype"
],
"kwargs_change": {
"norm_first": "normalize_before",
"bias": "bias_attr",
"device": "",
"dtype": ""
},
Expand Down Expand Up @@ -11134,11 +11138,13 @@
"layer_norm_eps",
"batch_first",
"norm_first",
"bias",
"device",
"dtype"
],
"kwargs_change": {
"norm_first": "normalize_before",
"bias": "bias_attr",
"device": "",
"dtype": ""
},
Expand Down Expand Up @@ -12092,7 +12098,7 @@
}
},
"torch.nn.functional.log_softmax": {
"Matcher": "RequireDimMatcher",
"Matcher": "SoftmaxMatcher",
"paddle_api": "paddle.nn.functional.log_softmax",
"args_list": [
"input",
Expand Down Expand Up @@ -12215,9 +12221,6 @@
"kwargs_change": {
"input": "x"
},
"unsupport_args": [
"output_size"
],
"min_input_args": 3
},
"torch.nn.functional.max_unpool2d": {
Expand Down Expand Up @@ -12605,7 +12608,7 @@
"min_input_args": 2
},
"torch.nn.functional.softmax": {
"Matcher": "RequireDimMatcher",
"Matcher": "SoftmaxMatcher",
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

RequireDimMatcher是不是没用了可以删掉

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个我在之后的PR再删除吧,因为这个Matcher还涉及到其余几个单测,改动的话需要改其余几个算子的单测,要是都在这里改的话可能提交的文件有些过多了

"paddle_api": "paddle.nn.functional.softmax",
"args_list": [
"input",
Expand Down
4 changes: 2 additions & 2 deletions paconvert/api_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -3829,8 +3829,8 @@ def generate_code(self, kwargs):
class SoftmaxMatcher(BaseMatcher):
def generate_code(self, kwargs):
if "dim" not in kwargs or "None" in kwargs["dim"]:
return None

kwargs.pop("dim", "None")
kwargs["axis"] = 0
return GenericMatcher.generate_code(self, kwargs)


Expand Down
23 changes: 18 additions & 5 deletions tests/test_nn_LogSoftmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,22 @@ def test_case_3():
result = model(x)
"""
)
obj.run(
pytorch_code,
["result"],
unsupport=True,
reason="When dim is None, paddle and pytorch generate different results",
obj.run(pytorch_code, ["result"])


def test_case_4():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
x = torch.tensor([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]])
model = nn.LogSoftmax(dim=None)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

没有dim的情况测一下

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

testcase3就是没有dim的情况 以及这个PR修改的这个文件主要是为了防止CI报错,其实在单测case增强3这个PR中对这两个文件修改是同样的

result = model(x)
"""
)
obj.run(pytorch_code, ["result"])
23 changes: 18 additions & 5 deletions tests/test_nn_Softmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,22 @@ def test_case_3():
result = model(x)
"""
)
obj.run(
pytorch_code,
["result"],
unsupport=True,
reason="When dim is None, paddle and pytorch generate different results",
obj.run(pytorch_code, ["result"])


def test_case_4():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
x = torch.tensor([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 10.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]])
model = nn.Softmax(dim=None)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

没有dim的情况测一下

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

testcase3就是没有dim的情况

result = model(x)
"""
)
obj.run(pytorch_code, ["result"])
51 changes: 51 additions & 0 deletions tests/test_nn_Transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,3 +148,54 @@ def test_case_7():
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_8():
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

新加的bias参数有实质性测试到了的吗

pytorch_code = textwrap.dedent(
"""
import torch
transformer_model = torch.nn.Transformer(d_model=512,
nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048,
dropout=0.1, activation='relu',
custom_encoder=None, custom_decoder=None,
layer_norm_eps=1e-05, batch_first=False,
norm_first=False, bias=False,
device=None, dtype=None)
src = torch.rand((10, 32, 512))
tgt = torch.rand((10, 32, 512))
result = transformer_model(src, tgt)
"""
)
obj.run(
pytorch_code,
["result"],
check_value=False,
unsupport=True,
reason="paddle unsupport layer_norm_eps args",
)


def test_case_9():
pytorch_code = textwrap.dedent(
"""
import torch
transformer_model = torch.nn.Transformer(512,
8, 6, 6, 2048,
0.1, 'relu',
None, None,
1e-05, False,
False, False,
None, None)
src = torch.rand((10, 32, 512))
tgt = torch.rand((10, 32, 512))
result = transformer_model(src, tgt)
"""
)
obj.run(
pytorch_code,
["result"],
check_value=False,
unsupport=True,
reason="paddle unsupport layer_norm_eps args",
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

batch_first测一个

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个在testcase7中有,并且paddle貌似目前也不支持这个参数

)
41 changes: 41 additions & 0 deletions tests/test_nn_TransformerDecoderLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,3 +78,44 @@ def test_case_4():
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_5():
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

新加的bias参数有实质性测试到了的吗

pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
x = torch.ones(10, 32,512)
tgt = torch.ones(10, 32, 512)
model = nn.TransformerDecoderLayer(d_model=512, nhead=8,dim_feedforward=2048, dropout=0.1,
activation="relu", layer_norm_eps=1e-06, batch_first=False,
norm_first=False, bias=True, device=None, dtype=None)
result = model(tgt,x)
"""
)
obj.run(
pytorch_code,
["result"],
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_6():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
x = torch.ones(10, 32,512)
tgt = torch.ones(10, 32, 512)
model = nn.TransformerDecoderLayer(512, 8,2048, 0.1, "relu", 1e-06, False,
False, True, None, None)
result = model(tgt,x)
"""
)
obj.run(
pytorch_code,
["result"],
unsupport=True,
reason="paddle unsupport batch_first args",
)
42 changes: 42 additions & 0 deletions tests/test_nn_TransformerEncoderLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,3 +123,45 @@ def test_case_7():
"""
)
obj.run(pytorch_code, ["result"], check_value=False)


def test_case_8():
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

新加的bias参数有实质性测试到了的吗

pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
tgt = torch.ones(10, 32, 512)
model = nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=2048, dropout=0.1,
activation="relu", layer_norm_eps=1e-05, batch_first=False,
norm_first=False, bias=True, device=None, dtype=None)
result = model(tgt)
"""
)
obj.run(
pytorch_code,
["result"],
check_value=False,
unsupport=True,
reason="paddle unsupport batch_first args",
)


def test_case_9():
pytorch_code = textwrap.dedent(
"""
import torch
import torch.nn as nn
tgt = torch.ones(10, 32, 512)
model = nn.TransformerEncoderLayer(512, 8, 2048, 0.1,
"relu", 1e-05, False,
False, True, "cpu", None)
result = model(tgt)
"""
)
obj.run(
pytorch_code,
["result"],
check_value=False,
unsupport=True,
reason="paddle unsupport batch_first args",
)
18 changes: 18 additions & 0 deletions tests/test_nn_Upsample.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,21 @@ def test_case_6():
obj.run(
pytorch_code, unsupport=True, reason="paddle unsupport recompute_scale_factor "
)


def test_case_7():
pytorch_code = textwrap.dedent(
"""
import torch
input = torch.tensor([[[[ 1.1524, 0.4714, 0.2857],
[-1.2533, -0.9829, -1.0981],
[ 0.1507, -1.1431, -2.0361]],

[[ 0.1024, -0.4482, 0.4137],
[ 0.9385, 0.4565, 0.7702],
[ 0.4135, -0.2587, 0.0482]]]])
m = torch.nn.Upsample(scale_factor=2, align_corners=True, mode='bilinear')
result = m(input)
"""
)
obj.run(pytorch_code, ["result"])
62 changes: 56 additions & 6 deletions tests/test_nn_functional_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,61 @@ def test_case_3():
pytorch_code = textwrap.dedent(
"""
import torch
w0 = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
result = torch.nn.functional.embedding(x,embedding_matrix,padding_idx=0,max_norm=2)
import numpy as np
embedding_matrix = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
x = torch.tensor(np.array([[0,1],[2,3]]))
result = torch.nn.functional.embedding(x, embedding_matrix, padding_idx=0, max_norm=2)
"""
)
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport max_norm")


def test_case_4():
pytorch_code = textwrap.dedent(
"""
import torch
import numpy as np
embedding_matrix = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
x = torch.tensor(np.array([[0,1],[2,3]]))
result = torch.nn.functional.embedding(input=x, weight=embedding_matrix, padding_idx=0, max_norm=2, norm_type=2.0, scale_grad_by_freq=False, sparse=True)
"""
)
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport max_norm ")


def test_case_5():
pytorch_code = textwrap.dedent(
"""
import torch
import numpy as np
embedding_matrix = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
x = torch.tensor(np.array([[0,1],[2,3]]))
result = torch.nn.functional.embedding(input=x, padding_idx=0, max_norm=2, weight=embedding_matrix, scale_grad_by_freq=False, norm_type=2.0, sparse=True)
"""
)
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport max_norm ")


def test_case_6():
pytorch_code = textwrap.dedent(
"""
import torch
import numpy as np
embedding_matrix = torch.Tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
x = torch.tensor(np.array([[0,1],[2,3]]))
result = torch.nn.functional.embedding(x, embedding_matrix, 0, 2, 2.0, False, True)
"""
)
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport")
obj.run(pytorch_code, unsupport=True, reason="paddle unsupport max_norm ")
Loading