Skip to content

Commit

Permalink
Fix bugs of windows CI: skip the unit tests related to devices (#55889)
Browse files Browse the repository at this point in the history
  • Loading branch information
xuxinyi389 authored Aug 2, 2023
1 parent 5d26d79 commit db700d1
Show file tree
Hide file tree
Showing 6 changed files with 58 additions and 0 deletions.
21 changes: 21 additions & 0 deletions test/amp/test_amp_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,15 @@
import paddle
import paddle.nn.functional as F
from paddle import nn
from paddle.fluid import core
from paddle.static import amp


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestAutoCast(AmpTestBase):
def setUp(self):
self._conv = paddle.nn.Conv2D(
Expand Down Expand Up @@ -56,6 +62,11 @@ def forward(self, x):
return out3


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestStaticDecorate(AmpTestBase):
def check_results(
self, use_amp, dtype, level, use_promote, expected_op_calls
Expand Down Expand Up @@ -127,6 +138,11 @@ def test_static_amp_OD(self):
paddle.disable_static()


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestGradScaler(AmpTestBase):
def test_amp_grad_scaler(self):
model = paddle.nn.Conv2D(3, 2, 3)
Expand Down Expand Up @@ -154,6 +170,11 @@ def test_amp_grad_scaler(self):
self.assertTrue('check_finite_and_unscale' not in op_list)


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestFp16Guard(AmpTestBase):
def test_fp16_gurad(self):
paddle.enable_static()
Expand Down
6 changes: 6 additions & 0 deletions test/amp/test_amp_decorate.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import paddle
import paddle.nn.functional as F
from paddle.fluid import core


class ConvBNLayer(paddle.nn.Layer):
Expand Down Expand Up @@ -77,6 +78,11 @@ def forward(self, inputs):
return x


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestAMPDecorate(unittest.TestCase):
def check_results(self, fp32_layers=[], fp16_layers=[]):
for idx in range(len(fp32_layers)):
Expand Down
5 changes: 5 additions & 0 deletions test/amp/test_amp_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,11 @@
from paddle.static.amp import AutoMixedPrecisionLists, fp16_lists


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestAMPList(unittest.TestCase):
def setUp(self):
self.default_black_list = [
Expand Down
5 changes: 5 additions & 0 deletions test/amp/test_amp_master_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ def forward(self, x):
or not core.is_float16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the float16",
)
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestMasterGrad(unittest.TestCase):
def check_results(
self, fp32_grads, op_list, total_steps, accumulate_batchs_num
Expand Down
16 changes: 16 additions & 0 deletions test/amp/test_amp_promote.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,15 @@
from amp_base_models import AmpTestBase, build_conv_model

import paddle
from paddle.fluid import core
from paddle.static import amp


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestStaticAmpPromoteStats(AmpTestBase):
def check_promote_results(
self, use_amp, dtype, level, use_promote, expected_op_calls, debug_info
Expand Down Expand Up @@ -103,6 +109,11 @@ def test_static_amp_o2(self):
)


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestEagerAmpPromoteStats(AmpTestBase):
def check_promote_results(
self, dtype, level, use_promote, expected_op_calls, debug_info
Expand Down Expand Up @@ -172,6 +183,11 @@ def test_o2_promote_off(self):
)


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestEagerAmpPromoteSimple(AmpTestBase):
def setUp(self):
self._conv = paddle.nn.Conv2D(
Expand Down
5 changes: 5 additions & 0 deletions test/legacy_test/test_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1254,6 +1254,11 @@ def test_float32(self):
self.check_with_dtype('float32')


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestMasterWeightSaveForFP16(unittest.TestCase):
'''
For Amp-O2, some optimizer(Momentum, Adam ...) will create master weights for parameters to improve the accuracy.
Expand Down

0 comments on commit db700d1

Please sign in to comment.