Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

new may of test cases, *test=kunlun #39444

Merged
merged 3 commits into from
Feb 14, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
187 changes: 101 additions & 86 deletions python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,92 +24,103 @@
import paddle
from paddle.fluid import Program, program_guard


class TestClipOp(XPUOpTest):
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)

def setUp(self):
self.set_xpu()
self.max_relative_error = 0.006

self.inputs = {}
self.initTestCase()

self.op_type = "clip"
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
if 'Min' in self.inputs:
min_v = self.inputs['Min']
else:
min_v = self.attrs['min']

if 'Max' in self.inputs:
max_v = self.inputs['Max']
else:
max_v = self.attrs['max']

input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input
self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}

def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(self.place)
paddle.disable_static()

def test_check_grad_normal(self):
paddle.enable_static()
self.check_grad_with_place(self.place, ['X'], 'Out')
paddle.disable_static()

def initTestCase(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.1]).astype('float32')


class TestCase1(TestClipOp):
def initTestCase(self):
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0


class TestCase2(TestClipOp):
def initTestCase(self):
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0


class TestCase3(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2


class TestCase4(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32')


class TestCase5(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5
import op_test
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper


class XPUTestClipOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'clip'
self.use_dynamic_create_class = False

class TestClipOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "clip"
self.place = paddle.XPUPlace(0)
self.inputs = {}
self.init_data()
self.set_attrs()
self.set_inputs()
self.outputs = {
'Out': np.clip(self.inputs['X'], self.min_v, self.max_v)
}

def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype

def init_data(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3

def set_inputs(self):
if 'Min' in self.inputs:
min_v = self.inputs['Min']
else:
min_v = self.attrs['min']

if 'Max' in self.inputs:
max_v = self.inputs['Max']
else:
max_v = self.attrs['max']

self.min_v = min_v
self.max_v = max_v
self.max_relative_error = 0.006
input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input

def set_attrs(self):
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max

def init_dtype(self):
self.dtype = self.in_type

def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(self.place)
paddle.disable_static()

class TestClipOp1(TestClipOp):
def init_data(self):
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0

class TestClipOp2(TestClipOp):
def init_data(self):
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0

class TestClipOp3(TestClipOp):
def init_data(self):
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2

class TestClipOp4(TestClipOp):
def init_data(self):
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32')

class TestClipOp5(TestClipOp):
def init_data(self):
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5


class TestClipOpError(unittest.TestCase):
Expand Down Expand Up @@ -212,5 +223,9 @@ def _executed_api(self, x, min=None, max=None):
return x.clip_(min, max)


support_types = get_xpu_op_support_types('clip')
for stype in support_types:
create_test_class(globals(), XPUTestClipOp, stype)

if __name__ == '__main__':
unittest.main()
120 changes: 74 additions & 46 deletions python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,54 +18,78 @@
import numpy as np
import sys
sys.path.append("..")
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle
from paddle.static import Program, program_guard


class TestXPUScaleOp(XPUOpTest):
def setUp(self):
self.op_type = "scale"
self.init_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
self.attrs = {'scale': -2.3, 'use_xpu': True}
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}

def init_type(self):
self.dtype = np.float32

def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)

def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')

import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import compiler, Program, program_guard

# class TestXPUScaleOpInt64(TestXPUScaleOp):
# def init_type(self):
# self.dtype = np.int64


class TestScaleFp16Op(TestXPUScaleOp):
def init_dtype_type(self):
self.dtype = np.float16

def test_check_output(self):
place = core.XPUPlace(0)
self.check_output_with_place(place, atol=0.002)

def test_check_grad(self):
place = core.XPUPlace(0)
self.check_grad_with_place(place, ["X"], "Out", max_relative_error=0.05)
import op_test
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper


class XPUTestScaleOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'scale'
self.use_dynamic_create_class = False

class TestScaleOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "scale"
self.place = paddle.XPUPlace(0)
self.set_inputs()
self.set_attrs()
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}

def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype

def set_inputs(self):
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}

def init_dtype(self):
if "float16" == self.in_type:
self.dtype = np.float16
if "float32" == self.in_type:
self.dtype = np.float32
if "int64" == self.in_type:
self.dtype = np.int64

def set_attrs(self):
self.attrs = {'scale': -2.3}

def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)

class TestScaleOp1(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 3.5}

class TestScaleOp2(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 6.77}

class TestScaleOp3(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -9.19}

class TestScaleOp4(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 0.0}

class TestScaleOp5(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -0.003}


class TestScaleApiStatic(unittest.TestCase):
Expand Down Expand Up @@ -108,5 +132,9 @@ def _executed_api(self, x, scale=1.0, bias=0.0):
return x.scale_(scale, bias)


support_types = get_xpu_op_support_types('scale')
for stype in support_types:
create_test_class(globals(), XPUTestScaleOp, stype)

if __name__ == "__main__":
unittest.main()
Loading