diff --git a/test/legacy_test/op_test.py b/test/legacy_test/op_test.py index 2381070b95d3bf..5e917cd0f3c31b 100644 --- a/test/legacy_test/op_test.py +++ b/test/legacy_test/op_test.py @@ -389,29 +389,38 @@ def convert_uint16_to_float(in_list): return np.reshape(out, in_list.shape) -def get_places(string_format=False): +def get_places(): places = [] - if not string_format: - if ( - os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower() - in ['1', 'true', 'on'] - or not core.is_compiled_with_cuda() - ): - places.append(base.CPUPlace()) - if core.is_compiled_with_cuda(): - places.append(base.CUDAPlace(0)) - else: - if ( - os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower() - in ['1', 'true', 'on'] - or not paddle.is_compiled_with_cuda() - ): - places.append('cpu') - if paddle.is_compiled_with_cuda(): - places.append('gpu') + if ( + os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower() + in ['1', 'true', 'on'] + or not core.is_compiled_with_cuda() + ): + places.append(base.CPUPlace()) + if core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) + if is_custom_device(): + dev_type = paddle.device.get_all_custom_device_type()[0] + places.append(base.CustomPlace(dev_type, 0)) return places +def get_devices(): + devices = [] + if ( + os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower() + in ['1', 'true', 'on'] + or not paddle.is_compiled_with_cuda() + ): + devices.append('cpu') + if paddle.is_compiled_with_cuda(): + devices.append('gpu') + if is_custom_device(): + dev_type = paddle.device.get_all_custom_device_type()[0] + devices.append(f'{dev_type}:0') + return devices + + def get_device_place(): if core.is_compiled_with_cuda(): return base.CUDAPlace(0) @@ -423,6 +432,15 @@ def get_device_place(): return base.CPUPlace() +def is_custom_device(): + custom_dev_types = paddle.device.get_all_custom_device_type() + if custom_dev_types and paddle.device.is_compiled_with_custom_device( + custom_dev_types[0] + ): + return True + return False + + @contextmanager def auto_parallel_test_guard(test_info_path, generated_test_file_path): test_info_file, generated_test_file = None, None @@ -2902,6 +2920,13 @@ def _get_places(self): return [place] else: return [] + elif is_custom_device(): + dev_type = paddle.device.get_all_custom_device_type()[0] + place = core.CustomPlace(dev_type, 0) + if core.is_float16_supported(place): + return [place] + else: + return [] else: return [] places = [] @@ -2931,6 +2956,9 @@ def _get_places(self): and not cpu_only ): places.append(core.CUDAPlace(0)) + if is_custom_device(): + dev_type = paddle.device.get_all_custom_device_type()[0] + places.append(core.CustomPlace(dev_type, 0)) return places def check_output( diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index a03b55c29008ea..a40ce6f718094d 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -23,6 +23,7 @@ convert_float_to_uint16, get_device_place, get_places, + is_custom_device, ) from scipy.special import erf, expit from utils import static_guard @@ -497,7 +498,8 @@ def init_shape(self): @unittest.skipIf( - not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(), + not (core.is_compiled_with_cuda() or is_custom_device()) + or core.is_compiled_with_rocm(), "core is not compiled with CUDA", ) class TestSigmoidBF16(OpTest): @@ -1765,7 +1767,8 @@ def init_dtype(self): @unittest.skipIf( - not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(), + not (core.is_compiled_with_cuda() or is_custom_device()) + or core.is_compiled_with_rocm(), "core is not compiled with CUDA", ) class TestSqrtBF16(OpTest): @@ -2037,7 +2040,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): self.__class__.no_need_check_grad = True def init_shape(self): @@ -2091,7 +2094,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): self.__class__.no_need_check_grad = True def init_shape(self): @@ -4563,7 +4566,8 @@ def init_shape(self): @unittest.skipIf( - not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(), + not (core.is_compiled_with_cuda() or is_custom_device()) + or core.is_compiled_with_rocm(), "core is not compiled with CUDA", ) class TestSquareBF16(OpTest): @@ -4917,7 +4921,8 @@ def init_shape(self): @unittest.skipIf( - not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(), + not (core.is_compiled_with_cuda() or is_custom_device()) + or core.is_compiled_with_rocm(), "core is not compiled with CUDA", ) class TestSoftplusBF16(OpTest): @@ -5595,7 +5600,8 @@ def test_errors(self): # ------------------ Test Cudnn Activation---------------------- def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestActCudnn(parent): def init_kernel_type(self): diff --git a/test/legacy_test/test_adadelta_op.py b/test/legacy_test/test_adadelta_op.py index 1650f246c25755..9dfa5d3e6380e1 100644 --- a/test/legacy_test/test_adadelta_op.py +++ b/test/legacy_test/test_adadelta_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest, get_device_place, get_places +from op_test import OpTest, get_device_place, get_devices import paddle from paddle import base @@ -294,7 +294,7 @@ def _test_adadelta_op_dygraph_place_amp(self, place, use_amp=False): paddle.enable_static() def test_main(self): - for place in get_places(string_format=True): + for place in get_devices(): use_amp_list = [True, False] for use_amp in use_amp_list: self._test_adadelta_op_dygraph_place_amp(place, use_amp) diff --git a/test/legacy_test/test_adagrad_op.py b/test/legacy_test/test_adagrad_op.py index 0b5d1fef458200..c5497d51f25bd7 100644 --- a/test/legacy_test/test_adagrad_op.py +++ b/test/legacy_test/test_adagrad_op.py @@ -17,7 +17,7 @@ import numpy as np from op import Operator -from op_test import OpTest, get_device_place, get_places +from op_test import OpTest, get_device_place, get_devices, get_places import paddle from paddle.base import core @@ -242,7 +242,7 @@ def _test_adagrad_op_dygraph_place_amp(self, place, use_amp=False): paddle.enable_static() def test_main(self): - for place in get_places(string_format=True): + for place in get_devices(): use_amp_list = [True, False] for use_amp in use_amp_list: self._test_adagrad_op_dygraph_place_amp(place, use_amp) diff --git a/test/legacy_test/test_adam_op.py b/test/legacy_test/test_adam_op.py index c2ebbea1653ad3..4875c0dda23c83 100644 --- a/test/legacy_test/test_adam_op.py +++ b/test/legacy_test/test_adam_op.py @@ -16,7 +16,7 @@ import numpy as np from op import Operator -from op_test import OpTest, get_places +from op_test import OpTest, get_devices, get_places import paddle from paddle import base @@ -1296,7 +1296,7 @@ def _adam_optimize_static( return out def _get_places(self): - return get_places(string_format=True) + return get_devices() def _check_with_place_amp(self, place, use_amp): # test dygraph mode diff --git a/test/legacy_test/test_adamax_op.py b/test/legacy_test/test_adamax_op.py index 8b3532794d0f28..5670e4b2751b71 100644 --- a/test/legacy_test/test_adamax_op.py +++ b/test/legacy_test/test_adamax_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest, get_device_place, get_places +from op_test import OpTest, get_device_place, get_devices import paddle @@ -275,7 +275,7 @@ def _test_adamax_op_dygraph_place_amp(self, place, use_amp=False): paddle.enable_static() def _get_places(self): - return get_places(string_format=True) + return get_devices() def test_main(self): for place in self._get_places(): diff --git a/test/legacy_test/test_adamw_op.py b/test/legacy_test/test_adamw_op.py index 904d87815427ec..1523468a75460d 100644 --- a/test/legacy_test/test_adamw_op.py +++ b/test/legacy_test/test_adamw_op.py @@ -18,7 +18,7 @@ from functools import partial import numpy as np -from op_test import OpTest, get_places +from op_test import OpTest, get_devices import paddle from paddle import base, nn @@ -758,7 +758,7 @@ def _test_adamw_op_dygraph_place_amp(self, place, use_amp=False): optimizer.clear_grad() def _get_places(self): - places = get_places(string_format=True) + places = get_devices() if paddle.is_compiled_with_xpu(): places.append('xpu') return places diff --git a/test/legacy_test/test_adaptive_log_softmax_with_loss.py b/test/legacy_test/test_adaptive_log_softmax_with_loss.py index 6210e1d469bda5..29728b8b25476d 100644 --- a/test/legacy_test/test_adaptive_log_softmax_with_loss.py +++ b/test/legacy_test/test_adaptive_log_softmax_with_loss.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices, get_places import paddle import paddle.optimizer as optim @@ -58,7 +58,7 @@ def predict(self, input): class TestNNAdaptiveLogSoftmaxWithLossAPI(unittest.TestCase): def setUp(self): paddle.seed(2024) - self.place = get_places(string_format=True) + self.place = get_devices() self.log_np = np.random.randn(4, 8).astype('float32') self.predict_np = np.abs(np.random.randn(64, 8).astype('float32')) diff --git a/test/legacy_test/test_blha_get_max_len_op.py b/test/legacy_test/test_blha_get_max_len_op.py index ab8b410a8c15ab..790e654dd4f1f6 100644 --- a/test/legacy_test/test_blha_get_max_len_op.py +++ b/test/legacy_test/test_blha_get_max_len_op.py @@ -15,6 +15,7 @@ import unittest import numpy as np +from op_test import is_custom_device import paddle from paddle.base import core @@ -109,7 +110,8 @@ def test_static_api(self): @unittest.skipIf( - not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(), + not (core.is_compiled_with_cuda() or is_custom_device()) + and not core.is_compiled_with_xpu(), "Only support XPU or GPU in CUDA mode.", ) class TestBlhaGetMaxLenOp_ZeroSize(unittest.TestCase): diff --git a/test/legacy_test/test_cartesian_prod.py b/test/legacy_test/test_cartesian_prod.py index 7246df017f8f7d..f7d0548a76527b 100644 --- a/test/legacy_test/test_cartesian_prod.py +++ b/test/legacy_test/test_cartesian_prod.py @@ -16,7 +16,7 @@ from itertools import product import numpy as np -from op_test import get_places +from op_test import get_devices import paddle from paddle.base import core @@ -36,7 +36,7 @@ def setUp(self): self.c_np = np.random.random(self.c_shape).astype(self.dtype_np) self.d_np = np.empty(0, self.dtype_np) - self.place = get_places(string_format=True) + self.place = get_devices() def init_setting(self): self.dtype_np = 'float32' @@ -119,7 +119,7 @@ def setUp(self): self.a_np = np.random.random(self.a_shape).astype(self.dtype_np) self.b_np = np.empty(0, self.dtype_np) - self.place = get_places(string_format=True) + self.place = get_devices() def init_setting(self): self.dtype_np = 'float32' diff --git a/test/legacy_test/test_cauchy_inplace.py b/test/legacy_test/test_cauchy_inplace.py index 4aa41ce0ca130e..ebe03c9acf8f57 100644 --- a/test/legacy_test/test_cauchy_inplace.py +++ b/test/legacy_test/test_cauchy_inplace.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices import paddle @@ -35,7 +35,7 @@ def test_fp64(): tensor_fp64.cauchy_() self.assertEqual(tensor_fp64.dtype, paddle.float64) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_fp32() test_fp64() @@ -92,7 +92,7 @@ def test_cauchy_inplace_distribution(self): class TestCauchyInplaceEmptyTensor(unittest.TestCase): def test_cauchy_inplace_op_empty_tensor(self): test_shapes = [(200, 1), (1, 200)] - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) for test_shape in test_shapes: tensor = paddle.empty(shape=test_shape) @@ -118,7 +118,7 @@ def test_grad(): cauchy_grad = tensor_b.grad.numpy() self.assertTrue((cauchy_grad == 0).all()) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_grad() diff --git a/test/legacy_test/test_class_center_sample_op.py b/test/legacy_test/test_class_center_sample_op.py index ad8a19acc15770..8302df224bb2de 100644 --- a/test/legacy_test/test_class_center_sample_op.py +++ b/test/legacy_test/test_class_center_sample_op.py @@ -15,10 +15,9 @@ import unittest import numpy as np -from op_test import OpTest, paddle_static_guard +from op_test import OpTest, get_places, paddle_static_guard import paddle -from paddle.base import core def class_center_sample_numpy(label, classes_list, num_samples): @@ -135,9 +134,7 @@ def setUp(self): self.initParams() np.random.seed(self.seed) paddle.framework.random._manual_program_seed(2021) - self.places = [paddle.base.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(paddle.base.CUDAPlace(0)) + self.places = get_places() def initParams(self): self.batch_size = 10 @@ -235,9 +232,7 @@ class TestClassCenterSampleAPIError(unittest.TestCase): def setUp(self): self.initParams() np.random.seed(self.seed) - self.places = [paddle.base.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(paddle.base.CUDAPlace(0)) + self.places = get_places() def initParams(self): self.batch_size = 20 @@ -275,9 +270,7 @@ class TestClassCenterSampleAPIError1(unittest.TestCase): def setUp(self): self.initParams() np.random.seed(self.seed) - self.places = [paddle.base.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(paddle.base.CUDAPlace(0)) + self.places = get_places() def initParams(self): self.batch_size = 5 diff --git a/test/legacy_test/test_combinations.py b/test/legacy_test/test_combinations.py index 1390fa90265895..f2f0e49fdd2748 100644 --- a/test/legacy_test/test_combinations.py +++ b/test/legacy_test/test_combinations.py @@ -16,7 +16,7 @@ from itertools import combinations, combinations_with_replacement import numpy as np -from op_test import get_places +from op_test import get_devices import paddle from paddle.base import Program @@ -47,7 +47,7 @@ def setUp(self): self.modify_setting() self.x_np = np.random.random(self.x_shape).astype(self.dtype_np) - self.place = get_places(string_format=True) + self.place = get_devices() def init_setting(self): self.dtype_np = 'float64' @@ -120,7 +120,7 @@ def modify_setting(self): class TestCombinationsEmpty(unittest.TestCase): def setUp(self): - self.place = get_places(string_format=True) + self.place = get_devices() def test_dygraph(self): paddle.disable_static() diff --git a/test/legacy_test/test_cross_op.py b/test/legacy_test/test_cross_op.py index 573021b0d07f88..601bb87927cef5 100644 --- a/test/legacy_test/test_cross_op.py +++ b/test/legacy_test/test_cross_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16 +from op_test import OpTest, convert_float_to_uint16, is_custom_device import paddle from paddle import base @@ -77,7 +77,8 @@ def init_output(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestCrossFP16Op(TestCrossOp): def initTestCase(self): diff --git a/test/legacy_test/test_determinant_op.py b/test/legacy_test/test_determinant_op.py index 7301fbeafd0610..1362f4a6dd30a9 100644 --- a/test/legacy_test/test_determinant_op.py +++ b/test/legacy_test/test_determinant_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, get_places import paddle @@ -430,9 +430,7 @@ def setUp(self): self.x = np.vectorize(complex)( np.random.random(self.shape), np.random.random(self.shape) ).astype(self.dtype) - self.places = [paddle.CPUPlace()] - if paddle.base.core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() self.out_grad = ( np.array([1 + 0j, 1 + 0j] * 3 * 3) .reshape(2, 3, 3) @@ -502,9 +500,7 @@ def setUp(self): self.x = np.vectorize(complex)( np.random.random(self.shape), np.random.random(self.shape) ).astype(self.dtype) - self.places = [paddle.CPUPlace()] - if paddle.base.core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() self.out_grad = np.array([3 + 0j, 3 + 0j] * 6).reshape(2, 6) self.x_grad_ref_dy = self.get_numeric_grad( self.x, self.shape, self.out_grad diff --git a/test/legacy_test/test_elementwise_mod_op.py b/test/legacy_test/test_elementwise_mod_op.py index 982b1a310093b9..3620215c186114 100644 --- a/test/legacy_test/test_elementwise_mod_op.py +++ b/test/legacy_test/test_elementwise_mod_op.py @@ -16,7 +16,12 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float +from op_test import ( + OpTest, + convert_float_to_uint16, + convert_uint16_to_float, + is_custom_device, +) from utils import dygraph_guard, static_guard import paddle @@ -124,7 +129,8 @@ def test_check_output(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestElementwiseModFP16Op(TestElementwiseModOp): def init_dtype(self): diff --git a/test/legacy_test/test_elementwise_mul_op.py b/test/legacy_test/test_elementwise_mul_op.py index 9f4fcb43bec869..a4f365ea92b1a8 100644 --- a/test/legacy_test/test_elementwise_mul_op.py +++ b/test/legacy_test/test_elementwise_mul_op.py @@ -15,7 +15,12 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci +from op_test import ( + OpTest, + convert_float_to_uint16, + is_custom_device, + skip_check_grad_ci, +) import paddle from paddle import base @@ -472,7 +477,8 @@ def init_input_attr_output(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestElementwiseMulOpFp16(ElementwiseMulOp): def init_dtype(self): diff --git a/test/legacy_test/test_embedding_scale_grad_by_freq.py b/test/legacy_test/test_embedding_scale_grad_by_freq.py index 63e408a88422be..e996fc66c41033 100644 --- a/test/legacy_test/test_embedding_scale_grad_by_freq.py +++ b/test/legacy_test/test_embedding_scale_grad_by_freq.py @@ -15,6 +15,7 @@ import unittest import numpy as np +from op_test import get_places import paddle from paddle.nn.functional import embedding @@ -32,9 +33,7 @@ def ref_embedding_scale_grad_(x, weight_unscaled_grad): class TestEmbeddingAPIScaleGradByFreq(unittest.TestCase): def setUp(self): self.init_data() - self.places = [paddle.CPUPlace()] - if paddle.core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() def init_data(self): self.dtype = "float32" diff --git a/test/legacy_test/test_fused_gate_attention_op.py b/test/legacy_test/test_fused_gate_attention_op.py index 43ee9ab844ee08..49f44c7f9b9d40 100644 --- a/test/legacy_test/test_fused_gate_attention_op.py +++ b/test/legacy_test/test_fused_gate_attention_op.py @@ -20,7 +20,12 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float +from op_test import ( + OpTest, + convert_float_to_uint16, + convert_uint16_to_float, + is_custom_device, +) from test_sparse_attention_op import get_cuda_version import paddle @@ -30,7 +35,8 @@ @unittest.skipIf( - not core.is_compiled_with_cuda(), "Paddle is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "Paddle is not compiled with CUDA", ) class TestFusedGateAttentionOp(OpTest): def setUp(self): @@ -474,7 +480,7 @@ def setUp(self): ] def test_api(self): - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): pass query = paddle.rand(shape=self.query_shape, dtype="float32") diff --git a/test/legacy_test/test_fused_rotary_position_embedding.py b/test/legacy_test/test_fused_rotary_position_embedding.py index ce26cdff7ec858..b3a9ed4a09ffee 100644 --- a/test/legacy_test/test_fused_rotary_position_embedding.py +++ b/test/legacy_test/test_fused_rotary_position_embedding.py @@ -16,6 +16,7 @@ import numpy as np import parameterized as param +from op_test import is_custom_device import paddle from paddle.base import core @@ -158,7 +159,8 @@ def paddle_fused_rotary_position_embedding( @unittest.skipIf( - not core.is_compiled_with_cuda() and not paddle.is_compiled_with_rocm(), + not (core.is_compiled_with_cuda() or is_custom_device()) + and not paddle.is_compiled_with_rocm(), "core is not compiled with CUDA or ROCM ", ) @param.parameterized_class( @@ -693,7 +695,8 @@ def test_error2(): @unittest.skipIf( - not core.is_compiled_with_cuda() and not paddle.is_compiled_with_rocm(), + not (core.is_compiled_with_cuda() or is_custom_device()) + and not paddle.is_compiled_with_rocm(), "core is not compiled with CUDA or ROCM ", ) class TestFusedRotaryPositionEmbeddingZeroSize(unittest.TestCase): diff --git a/test/legacy_test/test_gaussian_random_op.py b/test/legacy_test/test_gaussian_random_op.py index 59b80920234233..c4f860bcc7e973 100644 --- a/test/legacy_test/test_gaussian_random_op.py +++ b/test/legacy_test/test_gaussian_random_op.py @@ -15,7 +15,12 @@ import unittest import numpy as np -from op_test import OpTest, convert_uint16_to_float, paddle_static_guard +from op_test import ( + OpTest, + convert_uint16_to_float, + is_custom_device, + paddle_static_guard, +) import paddle from paddle import base @@ -61,7 +66,8 @@ def verify_output(self, outs): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestGaussianRandomFP16Op(OpTest): def setUp(self): @@ -111,7 +117,8 @@ def gauss_wrapper(shape, mean, std, seed, dtype=np.uint16, name=None): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestGaussianRandomBF16Op(OpTest): def setUp(self): diff --git a/test/legacy_test/test_geometric_inplace.py b/test/legacy_test/test_geometric_inplace.py index 9b5177eac04b8b..baed59705189aa 100644 --- a/test/legacy_test/test_geometric_inplace.py +++ b/test/legacy_test/test_geometric_inplace.py @@ -16,7 +16,7 @@ import numpy as np import scipy.stats -from op_test import get_places +from op_test import get_devices import paddle @@ -36,7 +36,7 @@ def test_fp64(): tensor_fp64.geometric_(probs=0.3) self.assertEqual(tensor_fp64.dtype, paddle.float64) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_fp32() test_fp64() @@ -96,7 +96,7 @@ def test_geometric_inplace_distribution(self): class TestGeometricInplaceEmptyTensor(unittest.TestCase): def test_geometric_inplace_op_empty_tensor(self): test_shapes = [(200, 1), (1, 200)] - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) for test_shape in test_shapes: tensor = paddle.empty(shape=test_shape) @@ -122,7 +122,7 @@ def test_grad(): geometric_grad = tensor_b.grad.numpy() self.assertTrue((geometric_grad == 0).all()) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_grad() diff --git a/test/legacy_test/test_group_norm_op_v2.py b/test/legacy_test/test_group_norm_op_v2.py index 2ae1a72c2c2b29..1a6c5aeafd8781 100644 --- a/test/legacy_test/test_group_norm_op_v2.py +++ b/test/legacy_test/test_group_norm_op_v2.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_places, is_custom_device from utils import dygraph_guard import paddle @@ -243,7 +243,7 @@ def test_numerical_accuracy(self): class TestGroupNormAPIV2_With_General_Dimensions_fp16(unittest.TestCase): def test_numerical_accuracy(self): # fp16 only supported in cuda - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): return paddle.disable_static() shapes = [ @@ -286,7 +286,7 @@ def test_numerical_accuracy(self): class TestGroupNormAPIV2_With_NCL_fp16(unittest.TestCase): def test_numerical_accuracy(self): - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): return paddle.disable_static() shape = (2, 6, 4) @@ -327,7 +327,7 @@ def test_numerical_accuracy(self): class TestGroupNormAPIV2_With_NCDHW_fp16(unittest.TestCase): def test_numerical_accuracy(self): - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): return paddle.disable_static() shape = (2, 6, 4, 2, 2) @@ -368,7 +368,7 @@ def test_numerical_accuracy(self): class TestGroupNormAPIV2_With_NLC_fp16(unittest.TestCase): def test_numerical_accuracy(self): - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): return paddle.disable_static() shape = (2, 4, 6) @@ -409,7 +409,7 @@ def test_numerical_accuracy(self): class TestGroupNormAPIV2_With_NHWC_fp16(unittest.TestCase): def test_numerical_accuracy(self): - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): return paddle.disable_static() shape = (2, 4, 2, 6) @@ -450,7 +450,7 @@ def test_numerical_accuracy(self): class TestGroupNormAPIV2_With_NDHWC_fp16(unittest.TestCase): def test_numerical_accuracy(self): - if not core.is_compiled_with_cuda(): + if not (core.is_compiled_with_cuda() or is_custom_device()): return paddle.disable_static() shape = (2, 4, 2, 2, 6) diff --git a/test/legacy_test/test_imperative_triple_grad.py b/test/legacy_test/test_imperative_triple_grad.py index 2cec3112913fd2..a873b58768279e 100644 --- a/test/legacy_test/test_imperative_triple_grad.py +++ b/test/legacy_test/test_imperative_triple_grad.py @@ -16,6 +16,7 @@ from unittest import TestCase import numpy as np +from op_test import get_devices import paddle from paddle import base @@ -327,9 +328,7 @@ def setUp(self): self.input_numpy_dout = None self.input_numpy_ddx = None self.input_numpy_ddy = None - self.places = ["cpu"] - if paddle.is_compiled_with_cuda(): - self.places.append("gpu") + self.places = get_devices() def actual(self): x = paddle.to_tensor( @@ -657,9 +656,7 @@ def setUp(self): self.input_numpy_dout = None self.input_numpy_ddx = None self.input_numpy_ddy = None - self.places = ["cpu"] - if paddle.is_compiled_with_cuda(): - self.places.append("gpu") + self.places = get_devices() def actual(self): x = paddle.to_tensor( @@ -961,9 +958,7 @@ def setUp(self): self.input_numpy_dout = None self.input_numpy_ddx = None self.input_numpy_ddy = None - self.places = ["cpu"] - if paddle.is_compiled_with_cuda(): - self.places.append("gpu") + self.places = get_devices() def actual(self): x = paddle.to_tensor( diff --git a/test/legacy_test/test_index_add_op.py b/test/legacy_test/test_index_add_op.py index c98652902aa845..b3383e1ce14cef 100644 --- a/test/legacy_test/test_index_add_op.py +++ b/test/legacy_test/test_index_add_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16 +from op_test import OpTest, convert_float_to_uint16, get_devices import paddle from paddle.base import core @@ -199,10 +199,7 @@ def setType(self): self.index_type = np.int32 def setPlace(self): - self.place = [] - self.place.append('cpu') - if paddle.is_compiled_with_cuda(): - self.place.append('gpu') + self.place = get_devices() def config(self): self.axis = 0 diff --git a/test/legacy_test/test_index_fill.py b/test/legacy_test/test_index_fill.py index 32035caa8c3975..147439e7aa929d 100644 --- a/test/legacy_test/test_index_fill.py +++ b/test/legacy_test/test_index_fill.py @@ -16,7 +16,7 @@ from itertools import combinations import numpy as np -from op_test import get_places +from op_test import get_devices import paddle from paddle.base import Program @@ -44,7 +44,7 @@ def setUp(self): self.index_type ) - self.place = get_places(string_format=True) + self.place = get_devices() if self.dtype_np == 'float16' and 'cpu' in self.place: self.place.remove('cpu') @@ -150,7 +150,7 @@ def setUp(self): self.index_type ) - self.place = get_places(string_format=True) + self.place = get_devices() if self.dtype_np == 'float16' and 'cpu' in self.place: self.place.remove('cpu') diff --git a/test/legacy_test/test_index_put_op.py b/test/legacy_test/test_index_put_op.py index 8ef3499026e2b3..722742f2e84f97 100644 --- a/test/legacy_test/test_index_put_op.py +++ b/test/legacy_test/test_index_put_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices import paddle @@ -120,7 +120,7 @@ def init_dtype_type(self): self.accumulate = False def setPlace(self): - self.place = get_places(string_format=True) + self.place = get_devices() if self.dtype_np is np.float16 and "cpu" in self.place: self.place.remove("cpu") @@ -620,7 +620,7 @@ def init_dtype_type(self): self.accumulate = False def setPlace(self): - self.place = get_places(string_format=True) + self.place = get_devices() def test_dygraph_forward(self): paddle.disable_static() @@ -661,7 +661,7 @@ def setUp(self): self.setPlace() def setPlace(self): - self.place = get_places(string_format=True) + self.place = get_devices() def test_backward(self): paddle.disable_static() @@ -1019,7 +1019,7 @@ def init_dtype_type(self): self.index_type_pd = paddle.int64 def setPlace(self): - self.place = get_places(string_format=True) + self.place = get_devices() if self.dtype_np is np.float16 and "cpu" in self.place: self.place.remove("cpu") diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index 41ea4ebbf7625d..fa176448470075 100755 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -2090,9 +2090,7 @@ def test_broadcast_error(self): class TestDygraphInplaceSet(unittest.TestCase): def setUp(self): self.init_data() - self.places = [paddle.CPUPlace()] - if paddle.base.core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() self.support_dtypes = [ 'float32', 'float64', @@ -2274,7 +2272,7 @@ def leaf_inplace_error(): class TestDygraphInplaceSetFP16(TestDygraphInplaceSet): def setUp(self): self.init_data() - self.places = [paddle.CUDAPlace(0)] + self.places = get_places() def init_data(self): self.x_np = np.random.uniform(-5, 5, [7, 20, 2]) @@ -2304,7 +2302,7 @@ def test_inplace_api(self): class TestDygraphInplaceSetBF16(TestDygraphInplaceSet): def setUp(self): self.init_data() - self.places = [paddle.CUDAPlace(0)] + self.places = get_places() def init_data(self): self.x_np = np.random.uniform(-5, 5, [7, 20, 2]) @@ -2329,9 +2327,7 @@ def test_inplace_api(self): class TestDygraphInplaceResize(unittest.TestCase): def setUp(self): self.init_data() - self.places = [paddle.CPUPlace()] - if paddle.base.core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() self.support_dtypes = [ 'float32', 'float64', @@ -2444,7 +2440,7 @@ def argument_error(): class TestDygraphInplaceResizeFP16(TestDygraphInplaceResize): def setUp(self): self.init_data() - self.places = [paddle.CUDAPlace(0)] + self.places = get_places() def init_data(self): self.x_np = np.random.uniform(-5, 5, [3, 10, 2]) @@ -2472,7 +2468,7 @@ def test_inplace_api(self): class TestDygraphInplaceResizeBF16(TestDygraphInplaceResize): def setUp(self): self.init_data() - self.places = [paddle.CUDAPlace(0)] + self.places = get_places() def init_data(self): self.x_np = np.random.uniform(-5, 5, [3, 10, 2]) diff --git a/test/legacy_test/test_ldexp.py b/test/legacy_test/test_ldexp.py index d4edd57e0cb39f..47d3025cd047bc 100644 --- a/test/legacy_test/test_ldexp.py +++ b/test/legacy_test/test_ldexp.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices, get_places import paddle @@ -86,7 +86,7 @@ def check_dtype(input, desired_dtype): class TestLdexpAPIWithDynamic(unittest.TestCase): def setUp(self): - self.places = get_places(string_format=True) + self.places = get_devices() def test_ldexp_dynamic(self): np.random.seed(7) @@ -136,7 +136,7 @@ def test_ldexp_dynamic(self): class TestLdexpAPIWithStatic(unittest.TestCase): def setUp(self): - self.places = get_places(string_format=True) + self.places = get_devices() def test_ldexp_static(self): np.random.seed(7) diff --git a/test/legacy_test/test_linalg_vecdot.py b/test/legacy_test/test_linalg_vecdot.py index 2dafe849ad2bcd..7a251943e6a990 100644 --- a/test/legacy_test/test_linalg_vecdot.py +++ b/test/legacy_test/test_linalg_vecdot.py @@ -17,6 +17,7 @@ import unittest import numpy as np +from op_test import get_places import paddle from paddle.base import core @@ -34,9 +35,7 @@ def setUp(self): self.init_config() self.generate_input() self.generate_expected_output() - self.places = [paddle.CPUPlace()] - if paddle.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() def generate_input(self): np.random.seed(123) diff --git a/test/legacy_test/test_log_normal_inplace.py b/test/legacy_test/test_log_normal_inplace.py index 5cb29367ee7929..e2b25289a34128 100644 --- a/test/legacy_test/test_log_normal_inplace.py +++ b/test/legacy_test/test_log_normal_inplace.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices import paddle @@ -44,7 +44,7 @@ def test_fp64(): tensor_fp64.log_normal_() self.assertEqual(tensor_fp64.dtype, paddle.float64) - places = get_places(string_format=True) + places = get_devices() for place in places: paddle.set_device(place) test_fp32() @@ -105,7 +105,7 @@ def test_log_normal_inplace_op_distribution(self): class TestLogNormalRandomInplaceOpEmptyTensor(unittest.TestCase): def test_log_normal_inplace_op_empty_tensor(self): - places = get_places(string_format=True) + places = get_devices() test_shapes = [(200, 0), (0, 200)] for place in places: paddle.set_device(place) @@ -133,7 +133,7 @@ def test_grad(): log_normal_grad = tensor_b.grad.numpy() self.assertTrue((log_normal_grad == 0).all()) - places = get_places(string_format=True) + places = get_devices() for place in places: paddle.set_device(place) test_grad() diff --git a/test/legacy_test/test_margin_cross_entropy_op.py b/test/legacy_test/test_margin_cross_entropy_op.py index e8f3de35941639..e7bbb93e7a072f 100644 --- a/test/legacy_test/test_margin_cross_entropy_op.py +++ b/test/legacy_test/test_margin_cross_entropy_op.py @@ -15,7 +15,13 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16, paddle_static_guard +from op_test import ( + OpTest, + convert_float_to_uint16, + get_places, + is_custom_device, + paddle_static_guard, +) import paddle from paddle.base import core @@ -329,16 +335,15 @@ def test_check_grad(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestMarginCrossEntropyOpV2(unittest.TestCase): def setUp(self): self.initParams() np.random.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) - self.places = [] - if core.is_compiled_with_cuda(): - self.places.append(paddle.base.CUDAPlace(0)) + self.places = get_places() def initParams(self): self.python_out_sig = ["Loss"] @@ -501,16 +506,15 @@ def init_reduction(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestMarginCrossEntropyOpAPIError(unittest.TestCase): def setUp(self): self.initParams() np.random.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) - self.places = [] - if core.is_compiled_with_cuda(): - self.places.append(paddle.base.CUDAPlace(0)) + self.places = get_places() def initParams(self): self.python_api = python_api diff --git a/test/legacy_test/test_matmul_0_size_op.py b/test/legacy_test/test_matmul_0_size_op.py index fc3f3c3230044b..795ffb1d9ce89a 100644 --- a/test/legacy_test/test_matmul_0_size_op.py +++ b/test/legacy_test/test_matmul_0_size_op.py @@ -14,13 +14,16 @@ import unittest +from op_test import is_custom_device + import paddle from paddle import _C_ops from paddle.base import core @unittest.skipIf( - not core.is_compiled_with_cuda(), "mamtul 0 size only with in cuda" + not (core.is_compiled_with_cuda() or is_custom_device()), + "mamtul 0 size only with in cuda", ) class TestMatmulDygraph(unittest.TestCase): def test_matmul(self): diff --git a/test/legacy_test/test_max_op.py b/test/legacy_test/test_max_op.py index 64e3cd15362003..741024f8059de4 100644 --- a/test/legacy_test/test_max_op.py +++ b/test/legacy_test/test_max_op.py @@ -156,9 +156,7 @@ def setUp(self): self.expect_res = np.max( self.data, axis=tuple(self.axis), keepdims=self.keepdims ) - self.places = [core.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(core.CUDAPlace(0)) + self.places = get_places() def test_static(self): with static_guard(): diff --git a/test/legacy_test/test_mean_op.py b/test/legacy_test/test_mean_op.py index 464f8852ab3861..01ecd450383ec7 100644 --- a/test/legacy_test/test_mean_op.py +++ b/test/legacy_test/test_mean_op.py @@ -828,9 +828,7 @@ def setUp(self): self.x_np = np.random.randint(-1, 10000, self.x_shape).astype( self.dtype ) - self.places = [paddle.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() def test_dygraph(self): for place in self.places: @@ -864,9 +862,7 @@ def setUp(self): self.x_np = np.random.randint(-1, 10000, self.x_shape).astype( self.dtype ) - self.places = [paddle.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() class TestMeanAPIBool(TestMeanAPIInt32): @@ -874,9 +870,7 @@ def setUp(self): self.x_shape = [2, 3, 4, 5] self.dtype = "bool" self.x_np = np.random.uniform(-1, 1, self.x_shape).astype(self.dtype) - self.places = [paddle.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() class TestMeanWithTensorAxis1(TestReduceOPTensorAxisBase): diff --git a/test/legacy_test/test_merged_adam_op.py b/test/legacy_test/test_merged_adam_op.py index e590f7cfa9c900..e474a8978b4fea 100644 --- a/test/legacy_test/test_merged_adam_op.py +++ b/test/legacy_test/test_merged_adam_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices import paddle from paddle import _C_ops @@ -205,7 +205,7 @@ def run_op(use_merged): def test_main(self): for multi_precision in [False, True]: - for place in get_places(string_format=True): + for place in get_devices(): self.check_with_place(place, multi_precision) diff --git a/test/legacy_test/test_min_op.py b/test/legacy_test/test_min_op.py index ef0cc06b117ab7..f162bfcc347938 100644 --- a/test/legacy_test/test_min_op.py +++ b/test/legacy_test/test_min_op.py @@ -143,9 +143,7 @@ def setUp(self): self.expect_res = np.min( self.data, axis=tuple(self.axis), keepdims=self.keepdims ) - self.places = [core.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(core.CUDAPlace(0)) + self.places = get_places() def test_static(self): with static_guard(): diff --git a/test/legacy_test/test_mode_op.py b/test/legacy_test/test_mode_op.py index 227e966b47c05a..8064c53ac5bd9e 100644 --- a/test/legacy_test/test_mode_op.py +++ b/test/legacy_test/test_mode_op.py @@ -15,7 +15,12 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float +from op_test import ( + OpTest, + convert_float_to_uint16, + convert_uint16_to_float, + is_custom_device, +) import paddle from paddle import base @@ -121,7 +126,8 @@ def test_check_grad(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestModeFP16Op(TestModeOp): def init_dtype(self): @@ -168,7 +174,8 @@ def init_args(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestModeFP16OpLastdim(TestModeFP16Op): def init_args(self): @@ -177,7 +184,8 @@ def init_args(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestModeBF16OpLastdim(TestModeBF16Op): def init_args(self): diff --git a/test/legacy_test/test_momentum_op.py b/test/legacy_test/test_momentum_op.py index fb68dc9d91a23c..ec7411770ff3a9 100644 --- a/test/legacy_test/test_momentum_op.py +++ b/test/legacy_test/test_momentum_op.py @@ -16,7 +16,7 @@ import numpy as np from op import Operator -from op_test import OpTest, get_places +from op_test import OpTest, get_devices, get_places import paddle from paddle import base @@ -1036,7 +1036,7 @@ def _check_with_param_group(self, place, use_amp): np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def test_main(self): - for place in get_places(string_format=True): + for place in get_devices(): use_amp_list = [True, False] for use_amp in use_amp_list: self._check_with_place_amp(place, use_amp) diff --git a/test/legacy_test/test_multi_label_soft_margin_loss.py b/test/legacy_test/test_multi_label_soft_margin_loss.py index 29cf724d7e69f3..5f4e8b6e33fa55 100644 --- a/test/legacy_test/test_multi_label_soft_margin_loss.py +++ b/test/legacy_test/test_multi_label_soft_margin_loss.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices import paddle @@ -145,7 +145,7 @@ def test_MultiLabelSoftMarginLoss(self): input = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) label = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) - places = get_places(string_format=True) + places = get_devices() reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: diff --git a/test/legacy_test/test_nadam_op.py b/test/legacy_test/test_nadam_op.py index 509eba6dc66176..e84723ffed7e4a 100644 --- a/test/legacy_test/test_nadam_op.py +++ b/test/legacy_test/test_nadam_op.py @@ -16,7 +16,7 @@ from copy import deepcopy import numpy as np -from op_test import OpTest, get_device_place, get_places +from op_test import OpTest, get_device_place, get_devices, get_places import paddle from paddle import base @@ -460,7 +460,7 @@ def _test_nadam_dygraph_place_amp(self, place, use_amp=False): optimizer.clear_grad() def test_main(self): - for place in get_places(string_format=True): + for place in get_devices(): use_amp_list = [True, False] for use_amp in use_amp_list: self._test_nadam_dygraph_place_amp(place, use_amp) diff --git a/test/legacy_test/test_normal_inplace.py b/test/legacy_test/test_normal_inplace.py index 762595bdd52ae8..775d38fdbaff4d 100644 --- a/test/legacy_test/test_normal_inplace.py +++ b/test/legacy_test/test_normal_inplace.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices import paddle @@ -43,7 +43,7 @@ def test_fp64(): tensor_fp64.normal_() self.assertEqual(tensor_fp64.dtype, paddle.float64) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_fp32() test_fp64() @@ -64,7 +64,7 @@ def test_fp64(): tensor_fp64.normal_() self.assertEqual(tensor_fp64.dtype, paddle.complex128) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_fp32() test_fp64() @@ -164,7 +164,7 @@ def test_normal_inplace_op_distribution(self): class TestNormalRandomInplaceOpEmptyTensor(unittest.TestCase): def test_normal_inplace_op_empty_tensor(self): test_shapes = [(200, 0), (0, 200)] - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) for test_shape in test_shapes: tensor = paddle.empty(shape=test_shape) @@ -190,7 +190,7 @@ def test_grad(): normal_grad = tensor_b.grad.numpy() self.assertTrue((normal_grad == 0).all()) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_grad() @@ -215,7 +215,7 @@ def test_grad(): self.assertTrue((normal_grad.real == 0).all()) self.assertTrue((normal_grad.imag == 0).all()) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_grad() diff --git a/test/legacy_test/test_pad3d_op.py b/test/legacy_test/test_pad3d_op.py index 46c3ab42ab99f3..e1ed377e851841 100644 --- a/test/legacy_test/test_pad3d_op.py +++ b/test/legacy_test/test_pad3d_op.py @@ -15,7 +15,12 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16, get_places +from op_test import ( + OpTest, + convert_float_to_uint16, + get_places, + is_custom_device, +) import paddle import paddle.nn.functional as F @@ -221,7 +226,8 @@ def test_check_output(self): def create_test_fp16(parent): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestPad3dFp16(parent): def get_dtype(self): @@ -304,7 +310,8 @@ def test_check_grad_normal(self): # ----------------Pad3d complex64---------------- def create_test_complex64(parent): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestPad3dComplex64(parent): def get_dtype(self): @@ -344,7 +351,8 @@ def test_check_grad_normal(self): def create_test_complex128(parent): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestPad3dComplex128(parent): def get_dtype(self): diff --git a/test/legacy_test/test_pow.py b/test/legacy_test/test_pow.py index 087c748337bf67..b3f32797cb43d7 100755 --- a/test/legacy_test/test_pow.py +++ b/test/legacy_test/test_pow.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices import paddle from paddle.static import Program, program_guard @@ -79,7 +79,7 @@ class TestPowerAPI(unittest.TestCase): """TestPowerAPI.""" def setUp(self): - self.places = get_places(string_format=True) + self.places = get_devices() def test_power(self): """test_power.""" @@ -227,7 +227,7 @@ class TestPowerAPI_ZeroSize(unittest.TestCase): """TestPowerAPI.""" def setUp(self): - self.places = get_places(string_format=True) + self.places = get_devices() def _test_power(self, shape): np.random.seed(7) diff --git a/test/legacy_test/test_pow_op.py b/test/legacy_test/test_pow_op.py index 9cab82ca7f9755..cd8d5200b6b258 100644 --- a/test/legacy_test/test_pow_op.py +++ b/test/legacy_test/test_pow_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, get_places import paddle from paddle.framework import core @@ -39,9 +39,7 @@ def setUp(self): self.outputs = { 'Out': np.power(self.inputs['X'], self.attrs["factor"]) } - self.places = [core.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(core.CUDAPlace(0)) + self.places = get_places() def custom_setting(self): self.inputs = { diff --git a/test/legacy_test/test_psroi_pool_op.py b/test/legacy_test/test_psroi_pool_op.py index 1f954aa102ee05..aac28c59297ebe 100644 --- a/test/legacy_test/test_psroi_pool_op.py +++ b/test/legacy_test/test_psroi_pool_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np -from op_test import OpTest, get_places +from op_test import OpTest, get_devices, get_places import paddle @@ -228,7 +228,7 @@ def test_dytype_is_float64(): ) np.testing.assert_allclose(out, expect_out, rtol=1e-05) - places = get_places(string_format=True) + places = get_devices() for place in places: paddle.set_device(place) test_output_size_is_int() @@ -282,7 +282,7 @@ def test_dytype_is_float64(): np.testing.assert_allclose(out, expect_out, rtol=1e-05) paddle.disable_static() - places = get_places(string_format=True) + places = get_devices() for place in places: paddle.set_device(place) test_output_size_is_int() diff --git a/test/legacy_test/test_radam_op.py b/test/legacy_test/test_radam_op.py index 27124e841a58d1..23efcbf887ba25 100644 --- a/test/legacy_test/test_radam_op.py +++ b/test/legacy_test/test_radam_op.py @@ -16,7 +16,7 @@ from copy import deepcopy import numpy as np -from op_test import OpTest, get_device_place, get_places +from op_test import OpTest, get_device_place, get_devices, get_places import paddle from paddle import base @@ -471,7 +471,7 @@ def _test_radam_dygraph_place_amp(self, place, use_amp=False): optimizer.clear_grad() def test_main(self): - for place in get_places(string_format=True): + for place in get_devices(): use_amp_list = [True, False] for use_amp in use_amp_list: self._test_radam_dygraph_place_amp(place, use_amp) diff --git a/test/legacy_test/test_random_seed.py b/test/legacy_test/test_random_seed.py index 2af2bfff71551b..2ef5fdc7e4a23d 100644 --- a/test/legacy_test/test_random_seed.py +++ b/test/legacy_test/test_random_seed.py @@ -16,6 +16,7 @@ import unittest import numpy as np +from op_test import is_custom_device import paddle from paddle import base @@ -51,7 +52,10 @@ def test_generator_uniform_random_dygraph(self): x2_np = x2.numpy() x3_np = x3.numpy() - if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(): + if ( + not (core.is_compiled_with_cuda() or is_custom_device()) + and not core.is_compiled_with_xpu() + ): np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) @@ -85,7 +89,7 @@ def test_generator_uniform_random_static(self): out2_res2 = np.array(out2[1]) if ( - not core.is_compiled_with_cuda() + not (core.is_compiled_with_cuda() or is_custom_device()) and not core.is_compiled_with_xpu() ): np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) @@ -107,7 +111,10 @@ def test_gen_dropout_dygraph(self): y_np = y.numpy() y1_np = y1.numpy() - if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(): + if ( + not (core.is_compiled_with_cuda() or is_custom_device()) + and not core.is_compiled_with_xpu() + ): print(">>>>>>> dropout dygraph >>>>>>>") np.testing.assert_allclose(y_np, y1_np, rtol=1e-05) @@ -132,7 +139,10 @@ def test_gen_dropout_static(self): out1_np = np.array(out1[0]) out2_np = np.array(out2[0]) - if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(): + if ( + not (core.is_compiled_with_cuda() or is_custom_device()) + and not core.is_compiled_with_xpu() + ): print(">>>>>>> dropout static >>>>>>>") np.testing.assert_allclose(out1_np, out2_np, rtol=1e-05) @@ -153,7 +163,10 @@ def test_generator_gaussian_random_dygraph(self): x2_np = x2.numpy() x3_np = x3.numpy() - if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(): + if ( + not (core.is_compiled_with_cuda() or is_custom_device()) + and not core.is_compiled_with_xpu() + ): print(">>>>>>> gaussian random dygraph >>>>>>>") np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) @@ -188,7 +201,7 @@ def test_generator_gaussian_random_static(self): out2_res2 = np.array(out2[1]) if ( - not core.is_compiled_with_cuda() + not (core.is_compiled_with_cuda() or is_custom_device()) and not core.is_compiled_with_xpu() ): print(">>>>>>> gaussian random static >>>>>>>") @@ -213,7 +226,10 @@ def test_generator_randint_dygraph(self): x2_np = x2.numpy() x3_np = x3.numpy() - if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(): + if ( + not (core.is_compiled_with_cuda() or is_custom_device()) + and not core.is_compiled_with_xpu() + ): print(">>>>>>> randint dygraph >>>>>>>") np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) @@ -248,7 +264,7 @@ def test_generator_uniform_random_static_1(self): out2_res2 = np.array(out2[1]) if ( - not core.is_compiled_with_cuda() + not (core.is_compiled_with_cuda() or is_custom_device()) and not core.is_compiled_with_xpu() ): np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) @@ -271,7 +287,10 @@ def test_generator_randint_dygraph_1(self): x1_np = x1.numpy() x2_np = x2.numpy() x3_np = x3.numpy() - if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(): + if ( + not (core.is_compiled_with_cuda() or is_custom_device()) + and not core.is_compiled_with_xpu() + ): np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) @@ -305,7 +324,7 @@ def test_generator_ranint_static(self): out2_res2 = np.array(out2[1]) if ( - not core.is_compiled_with_cuda() + not (core.is_compiled_with_cuda() or is_custom_device()) and not core.is_compiled_with_xpu() ): print(">>>>>>> randint static >>>>>>>") @@ -331,7 +350,10 @@ def test_generator_randperm_dygraph(self): x2_np = x2.numpy() x3_np = x3.numpy() - if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu(): + if ( + not (core.is_compiled_with_cuda() or is_custom_device()) + and not core.is_compiled_with_xpu() + ): print(">>>>>>> randperm dygraph >>>>>>>") np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) @@ -366,7 +388,7 @@ def test_generator_randperm_static(self): out2_res2 = np.array(out2[1]) if ( - not core.is_compiled_with_cuda() + not (core.is_compiled_with_cuda() or is_custom_device()) and not core.is_compiled_with_xpu() ): print(">>>>>>> randperm static >>>>>>>") diff --git a/test/legacy_test/test_reduce_op.py b/test/legacy_test/test_reduce_op.py index 76b7a4a5b761a7..85e8b036d2b2fd 100644 --- a/test/legacy_test/test_reduce_op.py +++ b/test/legacy_test/test_reduce_op.py @@ -19,6 +19,7 @@ OpTest, convert_float_to_uint16, get_places, + is_custom_device, skip_check_grad_ci, ) from utils import dygraph_guard, static_guard @@ -192,7 +193,8 @@ def test_check_grad(self): def create_test_fp16_class(parent): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestSumOpFp16(parent): def init_dtype(self): @@ -341,9 +343,7 @@ class TestSumAPIZeroDimKeepDim(unittest.TestCase): def setUp(self): np.random.seed(123) paddle.enable_static() - self.places = [paddle.CPUPlace()] - if paddle.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() def test_static(self): for place in self.places: @@ -2365,9 +2365,7 @@ def setUp(self): "complex64", "complex128", ] - self.places = [base.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(base.CUDAPlace(0)) + self.places = get_places() def calculate_expected_result(self, x_np, axis, keepdim): expected_result = np.all(x_np, axis=axis, keepdims=keepdim) @@ -2454,9 +2452,7 @@ def setUp(self): "complex64", "complex128", ] - self.places = [base.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(base.CUDAPlace(0)) + self.places = get_places() def calculate_expected_result(self, x_np, axis, keepdim): expected_result = np.any(x_np, axis=axis, keepdims=keepdim) diff --git a/test/legacy_test/test_restrict_nonzero.py b/test/legacy_test/test_restrict_nonzero.py index a8d072710f0a7c..62a7607f193491 100644 --- a/test/legacy_test/test_restrict_nonzero.py +++ b/test/legacy_test/test_restrict_nonzero.py @@ -15,13 +15,15 @@ import unittest import numpy as np +from op_test import is_custom_device import paddle from paddle.base import core @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestRestrictNonzero(unittest.TestCase): def test_restrict_nonzero(self): diff --git a/test/legacy_test/test_rmsprop_op.py b/test/legacy_test/test_rmsprop_op.py index 56f682bffabc50..e814eb112ded27 100644 --- a/test/legacy_test/test_rmsprop_op.py +++ b/test/legacy_test/test_rmsprop_op.py @@ -16,7 +16,7 @@ import numpy as np from op import Operator -from op_test import get_device_place, get_places +from op_test import get_device_place, get_devices, get_places import paddle from paddle import base @@ -416,7 +416,7 @@ def _test_rms_op_dygraph_place_amp(self, place, use_amp=False): paddle.enable_static() def test_main(self): - for place in get_places(string_format=True): + for place in get_devices(): use_amp_list = [True, False] for use_amp in use_amp_list: self._test_rms_op_dygraph_place_amp(place, use_amp) diff --git a/test/legacy_test/test_rrelu_op.py b/test/legacy_test/test_rrelu_op.py index 97be548fcdf48f..e00ed4daba380a 100644 --- a/test/legacy_test/test_rrelu_op.py +++ b/test/legacy_test/test_rrelu_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16 +from op_test import OpTest, convert_float_to_uint16, get_device_place import paddle import paddle.nn.functional as F @@ -50,13 +50,7 @@ def setUp(self): self.upper_0 = 0.25 self.upper_1 = 0.33 - self.places = [ - ( - base.CUDAPlace(0) - if core.is_compiled_with_cuda() - else base.CPUPlace() - ) - ] + self.places = [get_device_place()] def check_static_result(self, place): with paddle.static.program_guard( diff --git a/test/legacy_test/test_set_value_op.py b/test/legacy_test/test_set_value_op.py index 2ff97d7ea7defc..c4ad490c8defb3 100644 --- a/test/legacy_test/test_set_value_op.py +++ b/test/legacy_test/test_set_value_op.py @@ -17,7 +17,7 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16, get_places +from op_test import OpTest, convert_float_to_uint16, get_devices import paddle from paddle.base import core @@ -1277,7 +1277,7 @@ def _call_setitem_static_api(self, x): return x def test_api(self): - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) static_out = self._run_static() diff --git a/test/legacy_test/test_sign_op.py b/test/legacy_test/test_sign_op.py index f664f70a3b9917..be6ef62b1c0da0 100644 --- a/test/legacy_test/test_sign_op.py +++ b/test/legacy_test/test_sign_op.py @@ -194,10 +194,7 @@ def run(place): class TestSignComplexAPI(TestSignAPI): def setUp(self): - self.place = [] - self.place.append(base.CPUPlace()) - if core.is_compiled_with_cuda(): - self.place.append(base.CUDAPlace(0)) + self.place = get_places() def test_dygraph(self): with base.dygraph.guard(): diff --git a/test/legacy_test/test_soft_margin_loss.py b/test/legacy_test/test_soft_margin_loss.py index 2bb726b4bcf71c..2dc2d9f76ed600 100644 --- a/test/legacy_test/test_soft_margin_loss.py +++ b/test/legacy_test/test_soft_margin_loss.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import get_places +from op_test import get_devices, get_places import paddle @@ -127,7 +127,7 @@ class TestSoftMarginLoss(unittest.TestCase): def test_SoftMarginLoss(self): input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) types = [np.int32, np.int64, np.float32, np.float64] - places = get_places(string_format=True) + places = get_devices() reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: diff --git a/test/legacy_test/test_sum_op.py b/test/legacy_test/test_sum_op.py index 6d8b4becc9b48e..0746cc46d022a9 100644 --- a/test/legacy_test/test_sum_op.py +++ b/test/legacy_test/test_sum_op.py @@ -889,9 +889,7 @@ class TestSum_BoolToInt64_ZeroSize(unittest.TestCase): def setUp(self): np.random.seed(123) self.shape = [3, 0, 2] - self.places = [base.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(base.CUDAPlace(0)) + self.places = get_places() def check_result( self, dygraph_result, expected_result, axis, keepdim, dtype, place diff --git a/test/legacy_test/test_tensor_type_autocast.py b/test/legacy_test/test_tensor_type_autocast.py index 865fc590bc159a..ee85c391cd415a 100644 --- a/test/legacy_test/test_tensor_type_autocast.py +++ b/test/legacy_test/test_tensor_type_autocast.py @@ -15,6 +15,7 @@ import unittest import numpy as np +from op_test import get_places import paddle @@ -22,9 +23,7 @@ class TestAutocastBase(unittest.TestCase): def setUp(self): self.set_api_and_dtypes() - self.places = [paddle.CPUPlace()] - if paddle.core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() def set_api_and_dtypes(self): pass diff --git a/test/legacy_test/test_trace_op.py b/test/legacy_test/test_trace_op.py index a902b346432792..e5a9228219c7d1 100644 --- a/test/legacy_test/test_trace_op.py +++ b/test/legacy_test/test_trace_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16 +from op_test import OpTest, convert_float_to_uint16, get_places import paddle from paddle import base, tensor @@ -202,9 +202,7 @@ def test_case1(self): class TestTraceAPIZerodimCase(unittest.TestCase): def setUp(self): - self.places = [paddle.CPUPlace()] - if paddle.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) + self.places = get_places() self.x = np.random.random([5, 0, 0, 0]).astype('float32') def test_dygraph(self): diff --git a/test/legacy_test/test_transforms.py b/test/legacy_test/test_transforms.py index a797c4eb7fe6a3..310df4f116104a 100644 --- a/test/legacy_test/test_transforms.py +++ b/test/legacy_test/test_transforms.py @@ -19,7 +19,7 @@ import cv2 import numpy as np -from op_test import get_places +from op_test import get_devices from PIL import Image import paddle @@ -819,7 +819,7 @@ def test_color_jitter_sub_function(self): np_img_gray = (np.random.rand(28, 28, 1) * 255).astype('uint8') tensor_img_gray = F.to_tensor(np_img_gray) - places = get_places(string_format=True) + places = get_devices() def test_adjust_brightness(np_img, tensor_img): result_cv2 = np.array(F.adjust_brightness(np_img, 1.2)) @@ -956,7 +956,7 @@ def test_erase(self): np.testing.assert_equal(np.array(pil_result), expected) np_data = np.random.rand(3, 28, 28).astype('float32') - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) tensor_img = paddle.to_tensor(np_data) expected_tensor = tensor_img.clone() diff --git a/test/legacy_test/test_uniform_random_inplace_op.py b/test/legacy_test/test_uniform_random_inplace_op.py index 7424b5d982d452..5e560acdc7e9e5 100644 --- a/test/legacy_test/test_uniform_random_inplace_op.py +++ b/test/legacy_test/test_uniform_random_inplace_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest, convert_uint16_to_float, get_places +from op_test import OpTest, convert_uint16_to_float, get_devices import paddle from paddle.base import core @@ -44,7 +44,7 @@ def test_fp64(): tensor_fp64.uniform_() self.assertEqual(tensor_fp64.dtype, paddle.float64) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_fp32() test_fp64() @@ -215,7 +215,7 @@ def test_attr_error(): class TestUniformRandomInplaceOpEmptyTensor(unittest.TestCase): def test_uniform_random_inplace_op_empty_tensor(self): test_shapes = [(200, 0), (0, 200)] - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) for test_shape in test_shapes: tensor = paddle.empty(shape=test_shape) @@ -241,7 +241,7 @@ def test_grad(): uniform_grad = tensor_b.grad.numpy() self.assertTrue((uniform_grad == 0).all()) - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) test_grad() diff --git a/test/legacy_test/test_uniform_random_op.py b/test/legacy_test/test_uniform_random_op.py index ce47ce69d32e56..43fe75fed5810d 100644 --- a/test/legacy_test/test_uniform_random_op.py +++ b/test/legacy_test/test_uniform_random_op.py @@ -16,7 +16,12 @@ import numpy as np from op import Operator -from op_test import OpTest, convert_uint16_to_float, get_places +from op_test import ( + OpTest, + convert_uint16_to_float, + get_places, + is_custom_device, +) import paddle from paddle import base @@ -187,7 +192,8 @@ def test_check_api(self): @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not (core.is_compiled_with_cuda() or is_custom_device()), + "core is not compiled with CUDA", ) class TestUniformRandomFP16Op(TestUniformRandomOp): def init_dtype(self): diff --git a/test/legacy_test/test_zero_dim_no_backward_api.py b/test/legacy_test/test_zero_dim_no_backward_api.py index f73d72ad4bcace..55d37af35e823e 100644 --- a/test/legacy_test/test_zero_dim_no_backward_api.py +++ b/test/legacy_test/test_zero_dim_no_backward_api.py @@ -21,7 +21,7 @@ import numpy as np from decorator_helper import prog_scope -from op_test import get_places +from op_test import get_devices import paddle @@ -182,7 +182,7 @@ def test_one_hot_label(self): self.assertEqual(one_hot_label.numpy()[2], 1) def test_unique_consecutive(self): - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) x = paddle.rand([]) y, inverse, counts = paddle.unique_consecutive( @@ -199,7 +199,7 @@ def test_unique_consecutive(self): self.assertEqual(counts.shape, [1]) def test_unique(self): - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) x = paddle.rand([]) y, index, inverse, counts = paddle.unique( diff --git a/test/legacy_test/test_zero_dim_sundry_dygraph_api.py b/test/legacy_test/test_zero_dim_sundry_dygraph_api.py index c0e238bf3fb5f8..bc958ca42bf242 100644 --- a/test/legacy_test/test_zero_dim_sundry_dygraph_api.py +++ b/test/legacy_test/test_zero_dim_sundry_dygraph_api.py @@ -21,7 +21,7 @@ import unittest import numpy as np -from op_test import get_device_place, get_places +from op_test import get_device_place, get_devices import paddle import paddle.nn.functional as F @@ -1691,7 +1691,7 @@ def test_lerp(self): self.assertEqual(y2.grad.shape, []) def test_repeat_interleave(self): - for place in get_places(string_format=True): + for place in get_devices(): paddle.set_device(place) x = paddle.randn(())