Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NOMRG refactor test_models to use pytest #3528

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .circleci/unittest/linux/scripts/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ else
fi

printf "Installing PyTorch with %s\n" "${cudatoolkit}"
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c conda-forge "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}"
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c conda-forge "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}" pytest

printf "* Installing torchvision\n"
python setup.py develop
2 changes: 1 addition & 1 deletion .circleci/unittest/windows/scripts/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ else
fi

printf "Installing PyTorch with %s\n" "${cudatoolkit}"
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c conda-forge "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}"
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c conda-forge "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}" pytest

printf "* Installing torchvision\n"
"$this_dir/vc_env_helper.bat" python setup.py develop
71 changes: 30 additions & 41 deletions test/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
import unittest
import warnings

import pytest


def get_available_classification_models():
# TODO add a registration mechanism to torchvision.models
Expand Down Expand Up @@ -78,16 +80,16 @@ def _test_classification_model(self, name, input_shape, dev):
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev)
out = model(x)
self.assertExpected(out.cpu(), prec=0.1, strip_suffix=f"_{dev}")
# self.assertExpected(out.cpu(), prec=0.1, strip_suffix=f"_{dev}")
self.assertEqual(out.shape[-1], 50)
self.check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None))

if dev == torch.device("cuda"):
with torch.cuda.amp.autocast():
out = model(x)
# See autocast_flaky_numerics comment at top of file.
if name not in autocast_flaky_numerics:
self.assertExpected(out.cpu(), prec=0.1, strip_suffix=f"_{dev}")
# if name not in autocast_flaky_numerics:
# self.assertExpected(out.cpu(), prec=0.1, strip_suffix=f"_{dev}")
self.assertEqual(out.shape[-1], 50)

def _test_segmentation_model(self, name, dev):
Expand All @@ -108,7 +110,8 @@ def check_out(out):
# We first try to assert the entire output if possible. This is not
# only the best way to assert results but also handles the cases
# where we need to create a new expected result.
self.assertExpected(out.cpu(), prec=prec, strip_suffix=strip_suffix)
# self.assertExpected(out.cpu(), prec=prec, strip_suffix=strip_suffix)
pass
except AssertionError:
# Unfortunately some segmentation models are flaky with autocast
# so instead of validating the probability scores, check that the class
Expand Down Expand Up @@ -193,7 +196,8 @@ def compute_mean_std(tensor):
# We first try to assert the entire output if possible. This is not
# only the best way to assert results but also handles the cases
# where we need to create a new expected result.
self.assertExpected(output, prec=prec, strip_suffix=strip_suffix)
# self.assertExpected(output, prec=prec, strip_suffix=strip_suffix)
pass
except AssertionError:
# Unfortunately detection models are flaky due to the unstable sort
# in NMS. If matching across all outputs fails, use the same approach
Expand Down Expand Up @@ -429,50 +433,35 @@ def test_generalizedrcnn_transform_repr(self):
_devs = [torch.device("cpu"), torch.device("cuda")] if torch.cuda.is_available() else [torch.device("cpu")]


for model_name in get_available_classification_models():
for dev in _devs:
# for-loop bodies don't define scopes, so we have to save the variables
# we want to close over in some way
def do_test(self, model_name=model_name, dev=dev):
input_shape = (1, 3, 224, 224)
if model_name in ['inception_v3']:
input_shape = (1, 3, 299, 299)
self._test_classification_model(model_name, input_shape, dev)

setattr(ModelTester, f"test_{model_name}_{dev}", do_test)


for model_name in get_available_segmentation_models():
for dev in _devs:
# for-loop bodies don't define scopes, so we have to save the variables
# we want to close over in some way
def do_test(self, model_name=model_name, dev=dev):
self._test_segmentation_model(model_name, dev)
@pytest.mark.parametrize('model_name', get_available_classification_models())
@pytest.mark.parametrize('dev', _devs)
def test_classification_model(model_name, dev):
input_shape = (1, 3, 224, 224) if model_name == 'inception_v3' else (1, 3, 299, 299)
ModelTester()._test_classification_model(model_name, input_shape, dev)

setattr(ModelTester, f"test_{model_name}_{dev}", do_test)

@pytest.mark.parametrize('model_name', get_available_segmentation_models())
@pytest.mark.parametrize('dev', _devs)
def test_segmentation_model(model_name, dev):
ModelTester()._test_segmentation_model(model_name, dev)

for model_name in get_available_detection_models():
for dev in _devs:
# for-loop bodies don't define scopes, so we have to save the variables
# we want to close over in some way
def do_test(self, model_name=model_name, dev=dev):
self._test_detection_model(model_name, dev)

setattr(ModelTester, f"test_{model_name}_{dev}", do_test)
@pytest.mark.parametrize('model_name', get_available_detection_models())
@pytest.mark.parametrize('dev', _devs)
def test_detection_model(model_name, dev):
ModelTester()._test_detection_model(model_name, dev)

def do_validation_test(self, model_name=model_name):
self._test_detection_model_validation(model_name)

setattr(ModelTester, "test_" + model_name + "_validation", do_validation_test)
@pytest.mark.parametrize('model_name', get_available_detection_models())
def test_detection_model_validation(model_name):
ModelTester()._test_detection_model_validation(model_name)


for model_name in get_available_video_models():
for dev in _devs:
def do_test(self, model_name=model_name, dev=dev):
self._test_video_model(model_name, dev)
@pytest.mark.parametrize('model_name', get_available_video_models())
@pytest.mark.parametrize('dev', _devs)
def test_video_model(model_name, dev):
ModelTester()._test_video_model(model_name, dev)

setattr(ModelTester, f"test_{model_name}_{dev}", do_test)

if __name__ == '__main__':
unittest.main()
pytest.main([__file__])
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This allows to preserve the use of python tests/test_models.py

Most packages don't do this and just rely on calling pytest or python -m pytest from the command line instead of python.