Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 47 additions & 19 deletions test/legacy_test/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,29 +389,38 @@ def convert_uint16_to_float(in_list):
return np.reshape(out, in_list.shape)


def get_places(string_format=False):
def get_places():
places = []
if not string_format:
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not core.is_compiled_with_cuda()
):
places.append(base.CPUPlace())
if core.is_compiled_with_cuda():
places.append(base.CUDAPlace(0))
else:
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not paddle.is_compiled_with_cuda()
):
places.append('cpu')
if paddle.is_compiled_with_cuda():
places.append('gpu')
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not core.is_compiled_with_cuda()
):
places.append(base.CPUPlace())
if core.is_compiled_with_cuda():
places.append(base.CUDAPlace(0))
if is_custom_device():
dev_type = paddle.device.get_all_custom_device_type()[0]
places.append(base.CustomPlace(dev_type, 0))
return places


def get_devices():
devices = []
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not paddle.is_compiled_with_cuda()
):
devices.append('cpu')
if paddle.is_compiled_with_cuda():
devices.append('gpu')
if is_custom_device():
dev_type = paddle.device.get_all_custom_device_type()[0]
devices.append(f'{dev_type}:0')
return devices


def get_device_place():
if core.is_compiled_with_cuda():
return base.CUDAPlace(0)
Expand All @@ -423,6 +432,15 @@ def get_device_place():
return base.CPUPlace()


def is_custom_device():
custom_dev_types = paddle.device.get_all_custom_device_type()
if custom_dev_types and paddle.device.is_compiled_with_custom_device(
custom_dev_types[0]
):
return True
return False


@contextmanager
def auto_parallel_test_guard(test_info_path, generated_test_file_path):
test_info_file, generated_test_file = None, None
Expand Down Expand Up @@ -2902,6 +2920,13 @@ def _get_places(self):
return [place]
else:
return []
elif is_custom_device():
dev_type = paddle.device.get_all_custom_device_type()[0]
place = core.CustomPlace(dev_type, 0)
if core.is_float16_supported(place):
return [place]
else:
return []
else:
return []
places = []
Expand Down Expand Up @@ -2931,6 +2956,9 @@ def _get_places(self):
and not cpu_only
):
places.append(core.CUDAPlace(0))
if is_custom_device():
dev_type = paddle.device.get_all_custom_device_type()[0]
places.append(core.CustomPlace(dev_type, 0))
return places

def check_output(
Expand Down
20 changes: 13 additions & 7 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
convert_float_to_uint16,
get_device_place,
get_places,
is_custom_device,
)
from scipy.special import erf, expit
from utils import static_guard
Expand Down Expand Up @@ -497,7 +498,8 @@ def init_shape(self):


@unittest.skipIf(
not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
not (core.is_compiled_with_cuda() or is_custom_device())
or core.is_compiled_with_rocm(),
"core is not compiled with CUDA",
)
class TestSigmoidBF16(OpTest):
Expand Down Expand Up @@ -1765,7 +1767,8 @@ def init_dtype(self):


@unittest.skipIf(
not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
not (core.is_compiled_with_cuda() or is_custom_device())
or core.is_compiled_with_rocm(),
"core is not compiled with CUDA",
)
class TestSqrtBF16(OpTest):
Expand Down Expand Up @@ -2037,7 +2040,7 @@ def setUp(self):
self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)}
self.outputs = {'Out': out}
self.convert_input_output()
if not core.is_compiled_with_cuda():
if not (core.is_compiled_with_cuda() or is_custom_device()):
self.__class__.no_need_check_grad = True

def init_shape(self):
Expand Down Expand Up @@ -2091,7 +2094,7 @@ def setUp(self):
self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)}
self.outputs = {'Out': out}
self.convert_input_output()
if not core.is_compiled_with_cuda():
if not (core.is_compiled_with_cuda() or is_custom_device()):
self.__class__.no_need_check_grad = True

def init_shape(self):
Expand Down Expand Up @@ -4563,7 +4566,8 @@ def init_shape(self):


@unittest.skipIf(
not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
not (core.is_compiled_with_cuda() or is_custom_device())
or core.is_compiled_with_rocm(),
"core is not compiled with CUDA",
)
class TestSquareBF16(OpTest):
Expand Down Expand Up @@ -4917,7 +4921,8 @@ def init_shape(self):


@unittest.skipIf(
not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
not (core.is_compiled_with_cuda() or is_custom_device())
or core.is_compiled_with_rocm(),
"core is not compiled with CUDA",
)
class TestSoftplusBF16(OpTest):
Expand Down Expand Up @@ -5595,7 +5600,8 @@ def test_errors(self):
# ------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
not (core.is_compiled_with_cuda() or is_custom_device()),
"core is not compiled with CUDA",
)
class TestActCudnn(parent):
def init_kernel_type(self):
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adadelta_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import unittest

import numpy as np
from op_test import OpTest, get_device_place, get_places
from op_test import OpTest, get_device_place, get_devices

import paddle
from paddle import base
Expand Down Expand Up @@ -294,7 +294,7 @@ def _test_adadelta_op_dygraph_place_amp(self, place, use_amp=False):
paddle.enable_static()

def test_main(self):
for place in get_places(string_format=True):
for place in get_devices():
use_amp_list = [True, False]
for use_amp in use_amp_list:
self._test_adadelta_op_dygraph_place_amp(place, use_amp)
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adagrad_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import numpy as np
from op import Operator
from op_test import OpTest, get_device_place, get_places
from op_test import OpTest, get_device_place, get_devices, get_places

import paddle
from paddle.base import core
Expand Down Expand Up @@ -242,7 +242,7 @@ def _test_adagrad_op_dygraph_place_amp(self, place, use_amp=False):
paddle.enable_static()

def test_main(self):
for place in get_places(string_format=True):
for place in get_devices():
use_amp_list = [True, False]
for use_amp in use_amp_list:
self._test_adagrad_op_dygraph_place_amp(place, use_amp)
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adam_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

import numpy as np
from op import Operator
from op_test import OpTest, get_places
from op_test import OpTest, get_devices, get_places

import paddle
from paddle import base
Expand Down Expand Up @@ -1296,7 +1296,7 @@ def _adam_optimize_static(
return out

def _get_places(self):
return get_places(string_format=True)
return get_devices()

def _check_with_place_amp(self, place, use_amp):
# test dygraph mode
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adamax_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import unittest

import numpy as np
from op_test import OpTest, get_device_place, get_places
from op_test import OpTest, get_device_place, get_devices

import paddle

Expand Down Expand Up @@ -275,7 +275,7 @@ def _test_adamax_op_dygraph_place_amp(self, place, use_amp=False):
paddle.enable_static()

def _get_places(self):
return get_places(string_format=True)
return get_devices()

def test_main(self):
for place in self._get_places():
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adamw_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from functools import partial

import numpy as np
from op_test import OpTest, get_places
from op_test import OpTest, get_devices

import paddle
from paddle import base, nn
Expand Down Expand Up @@ -758,7 +758,7 @@ def _test_adamw_op_dygraph_place_amp(self, place, use_amp=False):
optimizer.clear_grad()

def _get_places(self):
places = get_places(string_format=True)
places = get_devices()
if paddle.is_compiled_with_xpu():
places.append('xpu')
return places
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adaptive_log_softmax_with_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import unittest

import numpy as np
from op_test import get_places
from op_test import get_devices, get_places

import paddle
import paddle.optimizer as optim
Expand Down Expand Up @@ -58,7 +58,7 @@ def predict(self, input):
class TestNNAdaptiveLogSoftmaxWithLossAPI(unittest.TestCase):
def setUp(self):
paddle.seed(2024)
self.place = get_places(string_format=True)
self.place = get_devices()
self.log_np = np.random.randn(4, 8).astype('float32')
self.predict_np = np.abs(np.random.randn(64, 8).astype('float32'))

Expand Down
4 changes: 3 additions & 1 deletion test/legacy_test/test_blha_get_max_len_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import unittest

import numpy as np
from op_test import is_custom_device

import paddle
from paddle.base import core
Expand Down Expand Up @@ -109,7 +110,8 @@ def test_static_api(self):


@unittest.skipIf(
not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(),
not (core.is_compiled_with_cuda() or is_custom_device())
and not core.is_compiled_with_xpu(),
"Only support XPU or GPU in CUDA mode.",
)
class TestBlhaGetMaxLenOp_ZeroSize(unittest.TestCase):
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_cartesian_prod.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from itertools import product

import numpy as np
from op_test import get_places
from op_test import get_devices

import paddle
from paddle.base import core
Expand All @@ -36,7 +36,7 @@ def setUp(self):
self.c_np = np.random.random(self.c_shape).astype(self.dtype_np)
self.d_np = np.empty(0, self.dtype_np)

self.place = get_places(string_format=True)
self.place = get_devices()

def init_setting(self):
self.dtype_np = 'float32'
Expand Down Expand Up @@ -119,7 +119,7 @@ def setUp(self):
self.a_np = np.random.random(self.a_shape).astype(self.dtype_np)
self.b_np = np.empty(0, self.dtype_np)

self.place = get_places(string_format=True)
self.place = get_devices()

def init_setting(self):
self.dtype_np = 'float32'
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_cauchy_inplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import unittest

import numpy as np
from op_test import get_places
from op_test import get_devices

import paddle

Expand All @@ -35,7 +35,7 @@ def test_fp64():
tensor_fp64.cauchy_()
self.assertEqual(tensor_fp64.dtype, paddle.float64)

for place in get_places(string_format=True):
for place in get_devices():
paddle.set_device(place)
test_fp32()
test_fp64()
Expand Down Expand Up @@ -92,7 +92,7 @@ def test_cauchy_inplace_distribution(self):
class TestCauchyInplaceEmptyTensor(unittest.TestCase):
def test_cauchy_inplace_op_empty_tensor(self):
test_shapes = [(200, 1), (1, 200)]
for place in get_places(string_format=True):
for place in get_devices():
paddle.set_device(place)
for test_shape in test_shapes:
tensor = paddle.empty(shape=test_shape)
Expand All @@ -118,7 +118,7 @@ def test_grad():
cauchy_grad = tensor_b.grad.numpy()
self.assertTrue((cauchy_grad == 0).all())

for place in get_places(string_format=True):
for place in get_devices():
paddle.set_device(place)
test_grad()

Expand Down
Loading