Skip to content

Commit

Permalink
单测case覆盖 (#453)
Browse files Browse the repository at this point in the history
* case add

* case add

* case add

* case add

* case add

* fix testcase
  • Loading branch information
Xuxuanang authored Aug 23, 2024
1 parent 32919f0 commit 75a684c
Show file tree
Hide file tree
Showing 19 changed files with 569 additions and 35 deletions.
10 changes: 7 additions & 3 deletions paconvert/api_mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -6029,7 +6029,11 @@
"args_list": [
"graceful",
"timeout"
]
],
"kwargs_change": {
"graceful": "",
"timeout": ""
}
},
"torch.distributed.scatter": {
"Matcher": "ScatterMatcher",
Expand Down Expand Up @@ -11808,8 +11812,8 @@
"input",
"grid",
"mode",
"align_corners",
"padding_mode"
"padding_mode",
"align_corners"
],
"kwargs_change": {
"input": "x"
Expand Down
16 changes: 6 additions & 10 deletions tests/test_cuda_amp_autocast.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,8 @@ def test_case_4():


@pytest.mark.skipif(
condition=not paddle.device.is_compiled_with_cuda()
or not paddle.device.cuda.get_device_properties(0).major >= 8,
reason="computational capabilities less 8",
condition=not paddle.device.is_compiled_with_cuda(),
reason="can only run on paddle with CUDA",
)
def test_case_5():
pytorch_code = textwrap.dedent(
Expand All @@ -110,11 +109,9 @@ def test_case_5():
obj.run(pytorch_code, ["result"])


# generated by validate_unittest autofix, based on test_case_5
@pytest.mark.skipif(
condition=not paddle.device.is_compiled_with_cuda()
or not paddle.device.cuda.get_device_properties(0).major >= 8,
reason="computational capabilities less 8",
condition=not paddle.device.is_compiled_with_cuda(),
reason="can only run on paddle with CUDA",
)
def test_case_6():
pytorch_code = textwrap.dedent(
Expand All @@ -134,9 +131,8 @@ def test_case_6():

# generated by validate_unittest autofix, based on test_case_5
@pytest.mark.skipif(
condition=not paddle.device.is_compiled_with_cuda()
or not paddle.device.cuda.get_device_properties(0).major >= 8,
reason="computational capabilities less 8",
condition=not paddle.device.is_compiled_with_cuda(),
reason="can only run on paddle with CUDA",
)
def test_case_7():
pytorch_code = textwrap.dedent(
Expand Down
12 changes: 12 additions & 0 deletions tests/test_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,3 +130,15 @@ def test_case_9():
"""
)
obj.run(pytorch_code, ["result"])


def test_case_10():
pytorch_code = textwrap.dedent(
"""
import torch
x = torch.tensor([1, 3, 2])
b = torch.tensor([4, 5])
result = torch.diff(x, 1, 0, b, b)
"""
)
obj.run(pytorch_code, ["result"])
105 changes: 99 additions & 6 deletions tests/test_distributed_rpc_shutdown.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,11 @@

import textwrap

import paddle
import pytest
from apibase import APIBase

obj = APIBase("torch.distributed.rpc.shutdown")


@pytest.mark.skipif(
condition=paddle.is_compiled_with_cinn(),
reason="WITH_RPC = OFF, if WITH_CINN = ON.",
)
def test_case_1():
pytorch_code = textwrap.dedent(
"""
Expand Down Expand Up @@ -56,3 +50,102 @@ def test_case_1():
"""
)
obj.run(pytorch_code, ["result"])


def test_case_2():
pytorch_code = textwrap.dedent(
"""
import os
import torch
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
start = 25000
end = 30000
for port in range(start, end):
try:
s.bind(('localhost', port))
s.close()
break
except socket.error:
continue
print("port: " + str(port))
from torch.distributed import rpc
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:' + str(port)
rpc.init_rpc(
"worker1",
rank=0,
world_size=1
)
result = rpc.shutdown(graceful=False, timeout=2)
"""
)
obj.run(pytorch_code, ["result"])


def test_case_3():
pytorch_code = textwrap.dedent(
"""
import os
import torch
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
start = 25000
end = 30000
for port in range(start, end):
try:
s.bind(('localhost', port))
s.close()
break
except socket.error:
continue
print("port: " + str(port))
from torch.distributed import rpc
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:' + str(port)
rpc.init_rpc(
"worker1",
rank=0,
world_size=1
)
result = rpc.shutdown(timeout=2, graceful=False)
"""
)
obj.run(pytorch_code, ["result"])


def test_case_4():
pytorch_code = textwrap.dedent(
"""
import os
import torch
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
start = 25000
end = 30000
for port in range(start, end):
try:
s.bind(('localhost', port))
s.close()
break
except socket.error:
continue
print("port: " + str(port))
from torch.distributed import rpc
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
os.environ['PADDLE_MASTER_ENDPOINT'] = 'localhost:' + str(port)
rpc.init_rpc(
"worker1",
rank=0,
world_size=1
)
result = rpc.shutdown(True, 1)
"""
)
obj.run(pytorch_code, ["result"])
42 changes: 42 additions & 0 deletions tests/test_distributions_AffineTransform.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,3 +45,45 @@ def test_case_2():
"""
)
obj.run(pytorch_code, ["result"])


def test_case_3():
pytorch_code = textwrap.dedent(
"""
import torch
x = torch.tensor(1.)
y = torch.tensor(2.)
affine = torch.distributions.AffineTransform(x, y, 1, 1)
result = affine.forward_shape([1, 2])
"""
)
obj.run(pytorch_code, ["result"])


def test_case_4():
pytorch_code = textwrap.dedent(
"""
import torch
x = torch.tensor(1.)
y = torch.tensor(2.)
affine = torch.distributions.AffineTransform(loc=x, scale=y, event_dim=1, cache_size=1)
result = affine.forward_shape([1, 2])
"""
)
obj.run(pytorch_code, ["result"])


def test_case_5():
pytorch_code = textwrap.dedent(
"""
import torch
x = torch.tensor(1.)
y = torch.tensor(2.)
affine = torch.distributions.AffineTransform(event_dim=1, cache_size=1, loc=x, scale=y)
result = affine.forward_shape([1, 2])
"""
)
obj.run(pytorch_code, ["result"])
24 changes: 24 additions & 0 deletions tests/test_distributions_ExpTransform.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,27 @@ def test_case_1():
"""
)
obj.run(pytorch_code, ["result"])


def test_case_2():
pytorch_code = textwrap.dedent(
"""
import torch
exp = torch.distributions.ExpTransform(cache_size=1)
result = exp.forward_shape([1, 2])
"""
)
obj.run(pytorch_code, ["result"])


def test_case_3():
pytorch_code = textwrap.dedent(
"""
import torch
exp = torch.distributions.ExpTransform(1)
result = exp.forward_shape([1, 2])
"""
)
obj.run(pytorch_code, ["result"])
36 changes: 36 additions & 0 deletions tests/test_distributions_PowerTransform.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,39 @@ def test_case_1():
"""
)
obj.run(pytorch_code, ["result"])


def test_case_2():
pytorch_code = textwrap.dedent(
"""
import torch
power = torch.distributions.PowerTransform(exponent=torch.tensor(2.),cache_size=1)
result = power.forward_shape([1, 2])
"""
)
obj.run(pytorch_code, ["result"])


def test_case_3():
pytorch_code = textwrap.dedent(
"""
import torch
power = torch.distributions.PowerTransform(cache_size=1, exponent=torch.tensor(2.))
result = power.forward_shape([1, 2])
"""
)
obj.run(pytorch_code, ["result"])


def test_case_4():
pytorch_code = textwrap.dedent(
"""
import torch
power = torch.distributions.PowerTransform(torch.tensor(2.), 0)
result = power.forward_shape([1, 2])
"""
)
obj.run(pytorch_code, ["result"])
13 changes: 13 additions & 0 deletions tests/test_histogramdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,3 +87,16 @@ def test_case_5():
"""
)
obj.run(pytorch_code, ["result"])


def test_case_6():
pytorch_code = textwrap.dedent(
"""
import torch
x = torch.tensor([[0., 1.], [1., 0.], [2.,0.], [2., 2.]])
bins = [3,3]
weights = torch.tensor([1., 2., 4., 8.])
result = torch.histogramdd(x, bins)
"""
)
obj.run(pytorch_code, ["result"])
33 changes: 33 additions & 0 deletions tests/test_hub_download_url_to_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,36 @@ def test_case_3():
"""
)
obj.run(pytorch_code, ["result"])


def test_case_4():
pytorch_code = textwrap.dedent(
"""
import torch
result = torch.hub.download_url_to_file(url='https://paddle-paconvert.bj.bcebos.com/model.params', dst='/tmp/temporary_file',
hash_prefix="e1bf0a03102811bb2168e9952fe4edfa09cceb3343278bd4e5876b33b6889e9b", progress=False)
"""
)
obj.run(pytorch_code, ["result"])


def test_case_5():
pytorch_code = textwrap.dedent(
"""
import torch
result = torch.hub.download_url_to_file(url='https://paddle-paconvert.bj.bcebos.com/model.params', dst='/tmp/temporary_file',
hash_prefix="e1bf0a03102811bb2168e9952fe4edfa09cceb3343278bd4e5876b33b6889e9b", progress=False)
"""
)
obj.run(pytorch_code, ["result"])


def test_case_6():
pytorch_code = textwrap.dedent(
"""
import torch
result = torch.hub.download_url_to_file(dst='/tmp/temporary_file',
hash_prefix="e1bf0a03102811bb2168e9952fe4edfa09cceb3343278bd4e5876b33b6889e9b", url='https://paddle-paconvert.bj.bcebos.com/model.params', progress=False)
"""
)
obj.run(pytorch_code, ["result"])
14 changes: 14 additions & 0 deletions tests/test_inference_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,3 +73,17 @@ def test_case_4():
"""
)
obj.run(pytorch_code, ["result"])


def test_case_5():
pytorch_code = textwrap.dedent(
"""
import torch
x = torch.ones(1, 2, 3, requires_grad=True)
@torch.inference_mode(mode= False)
def doubler(x):
return x * 2
result = (doubler(x).requires_grad, doubler(x))
"""
)
obj.run(pytorch_code, ["result"])
Loading

0 comments on commit 75a684c

Please sign in to comment.