Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug/sg 1247 reoarganize tests #1789

Merged
merged 16 commits into from
Jan 26, 2024
Merged
18 changes: 14 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ commands:

jobs:
build:
parallelism: 16 # Adjust based on your needs
BloodAxe marked this conversation as resolved.
Show resolved Hide resolved
environment:
CIRCLE_COMPARE_URL: << pipeline.project.git_url >>/compare/<< pipeline.git.base_revision >>..<<pipeline.git.revision>>
parameters:
Expand Down Expand Up @@ -210,14 +211,23 @@ jobs:
. venv/bin/activate
python3 -m pip install pytorch-quantization==2.1.2 --extra-index-url https://pypi.ngc.nvidia.com
python3 -m pip install onnx_graphsurgeon==0.3.27 --extra-index-url https://pypi.ngc.nvidia.com

- run:
name: run tests with coverage
name: run tests with coverage in parallel
no_output_timeout: 30m
command: |
. venv/bin/activate
coverage run --source=super_gradients -m unittest tests/deci_core_unit_test_suite_runner.py
coverage report
coverage html # open htmlcov/index.html in a browser
# Split test files across parallel containers
TEST_FILES=$(circleci tests glob "tests/unit_tests/*test*.py" | circleci tests split --split-by=timings)
# Run tests with coverage for the assigned subset of files
echo "Running tests on the following files: $TEST_FILES"
for file in $TEST_FILES; do
echo "Running $file"
coverage run --source=super_gradients -m unittest $file
done
# If needed, each container can generate a partial coverage report
coverage report -m


- store_artifacts:
path: htmlcov
Expand Down
2 changes: 0 additions & 2 deletions tests/unit_tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from tests.unit_tests.train_after_test_test import CallTrainAfterTestTest
from tests.unit_tests.zero_weight_decay_on_bias_bn_test import ZeroWdForBnBiasTest
from tests.unit_tests.save_ckpt_test import SaveCkptListUnitTest
from tests.unit_tests.all_architectures_test import AllArchitecturesTest
from tests.unit_tests.average_meter_test import TestAverageMeter
from tests.unit_tests.repvgg_unit_test import TestRepVgg
from tests.unit_tests.test_without_train_test import TestWithoutTrainTest
Expand All @@ -35,7 +34,6 @@
"CrashTipTest",
"ZeroWdForBnBiasTest",
"SaveCkptListUnitTest",
"AllArchitecturesTest",
"TestAverageMeter",
"TestRepVgg",
"TestWithoutTrainTest",
Expand Down
35 changes: 0 additions & 35 deletions tests/unit_tests/all_architectures_test.py

This file was deleted.

6 changes: 3 additions & 3 deletions tests/unit_tests/loss_loggings_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(self):

class LossLoggingsTest(unittest.TestCase):
def test_single_item_logging(self):
trainer = Trainer("test_single_item_logging", model_checkpoints_location="local")
trainer = Trainer("test_single_item_logging")
dataloader = classification_test_dataloader(batch_size=10)

model = models.get(Models.RESNET18, arch_params={"num_classes": 5})
Expand All @@ -51,7 +51,7 @@ def test_single_item_logging(self):
self.assertListEqual(trainer.loss_logging_items_names, ["CrossEntropyLoss"])

def test_multiple_unnamed_components_loss_logging(self):
trainer = Trainer("test_multiple_unnamed_components_loss_logging", model_checkpoints_location="local")
trainer = Trainer("test_multiple_unnamed_components_loss_logging")
dataloader = classification_test_dataloader(batch_size=10)

model = models.get(Models.RESNET18, arch_params={"num_classes": 5})
Expand All @@ -75,7 +75,7 @@ def test_multiple_unnamed_components_loss_logging(self):
self.assertListEqual(trainer.loss_logging_items_names, ["CriterionWithUnnamedComponents/loss_0", "CriterionWithUnnamedComponents/loss_1"])

def test_multiple_named_components_loss_logging(self):
trainer = Trainer("test_multiple_named_components_loss_logging", model_checkpoints_location="local")
trainer = Trainer("test_multiple_named_components_loss_logging")
dataloader = classification_test_dataloader(batch_size=10)

model = models.get(Models.RESNET18, arch_params={"num_classes": 5})
Expand Down
21 changes: 4 additions & 17 deletions tests/unit_tests/shelfnet_unit_test.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
import unittest

from super_gradients.training.models import ShelfNet18_LW, ShelfNet34_LW, ShelfNet50, ShelfNet101
from super_gradients.training.models import ShelfNet18_LW, ShelfNet34_LW


class TestShelfNet(unittest.TestCase):
Expand All @@ -11,27 +11,14 @@ def test_shelfnet_creation(self):
:return:
"""
dummy_input = torch.randn(1, 3, 512, 512)

shelfnet18_model = ShelfNet18_LW(num_classes=21)
# VALIDATES INNER CONV LIST WAS INITIALIZED CORRECTLY
self.assertTrue(shelfnet18_model.conv_out_list)

shelfnet34_model = ShelfNet34_LW(num_classes=21)
# VALIDATES INNER CONV LIST WAS INITIALIZED CORRECTLY
self.assertTrue(shelfnet34_model.conv_out_list)

shelfnet50_model = ShelfNet50(num_classes=21)
# VALIDATES INNER CONV LIST WAS INITIALIZED CORRECTLY
self.assertTrue(shelfnet50_model.conv_out_list)

shelfnet101_model = ShelfNet101(num_classes=21)
# VALIDATES INNER CONV LIST WAS INITIALIZED CORRECTLY
self.assertTrue(shelfnet101_model.conv_out_list)

for model in [shelfnet18_model, shelfnet34_model, shelfnet50_model, shelfnet101_model]:
# FIXME: FIX MODEL FORWARD TESTING FOR SHELFNET50 and 101
for model in [shelfnet18_model, shelfnet34_model]:
model.eval()
with torch.no_grad():
output = model(dummy_input, aux=False)
output = model(dummy_input)
self.assertIsNotNone(output)


Expand Down
63 changes: 0 additions & 63 deletions tests/unit_tests/train_logging_test.py

This file was deleted.