Skip to content

Commit

Permalink
Update half precision test cases to support Pytorch v1.12 (#165)
Browse files Browse the repository at this point in the history
* - Explicitly dictate device=cpu in test cases to prevent mixed device
when running on local device

- Move half-precision test case into gpu_test.py since pytorch v1.12 does not
support half precision on cpu

* Added pytorch v1.12 test to workflow
  • Loading branch information
mert-kurttutan authored Aug 31, 2022
1 parent c3216f6 commit c77c7e7
Show file tree
Hide file tree
Showing 9 changed files with 52 additions and 38 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
fail-fast: false
matrix:
python-version: ["3.7", "3.8", "3.9"]
pytorch-version: ["1.4.0", "1.5.1", "1.6.0", "1.7.1", "1.8", "1.9", "1.10", "1.11"]
pytorch-version: ["1.4.0", "1.5.1", "1.6.0", "1.7.1", "1.8", "1.9", "1.10", "1.11", "1.12"]
exclude:
- python-version: 3.7
pytorch-version: 1.11
Expand Down
26 changes: 0 additions & 26 deletions tests/exceptions_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,32 +47,6 @@ def test_input_size_possible_exceptions() -> None:
summary(test, input_size="hello")


def test_input_size_half_precision() -> None:
test = torch.nn.Linear(2, 5).half()
with pytest.warns(
UserWarning,
match=(
"Half precision is not supported with input_size parameter, and "
"may output incorrect results. Try passing input_data directly."
),
):
summary(test, dtypes=[torch.float16], input_size=(10, 2), device="cpu")

with pytest.warns(
UserWarning,
match=(
"Half precision is not supported on cpu. Set the `device` field or "
"pass `input_data` using the correct device."
),
):
summary(
test,
dtypes=[torch.float16],
input_data=torch.randn((10, 2), dtype=torch.float16, device="cpu"),
device="cpu",
)


def test_exception() -> None:
input_size = (1, 1, 28, 28)
summary(EdgeCaseModel(throw_error=False), input_size=input_size)
Expand Down
14 changes: 14 additions & 0 deletions tests/gpu_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,17 @@ def test_single_layer_network_on_gpu_device() -> None:

assert results.total_params == 15
assert results.trainable_params == 15

@staticmethod
def test_input_size_half_precision() -> None:
# run this test case in gpu since
# half precision is not supported in pytorch v1.12
test = torch.nn.Linear(2, 5).half().to(torch.device("cuda"))
with pytest.warns(
UserWarning,
match=(
"Half precision is not supported with input_size parameter, and "
"may output incorrect results. Try passing input_data directly."
),
):
summary(test, dtypes=[torch.float16], input_size=(10, 2), device="cuda")
4 changes: 2 additions & 2 deletions tests/test_output/linear_model_half.out
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
==========================================================================================
Layer (type:depth-idx) Output Shape Param #
==========================================================================================
LinearModel -- --
LinearModel [64, 1] --
├─Sequential: 1-1 [64, 1] --
│ └─Linear: 2-1 [64, 128] 16,512
│ └─ReLU: 2-2 [64, 128] --
Expand All @@ -22,7 +22,7 @@ Estimated Total Size (MB): 0.30
==========================================================================================
Layer (type:depth-idx) Output Shape Param #
==========================================================================================
LinearModel -- --
LinearModel [64, 1] --
├─Sequential: 1-1 [64, 1] --
│ └─Linear: 2-1 [64, 128] 16,512
│ └─ReLU: 2-2 [64, 128] --
Expand Down
6 changes: 3 additions & 3 deletions tests/test_output/lstm_half.out
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
========================================================================================================================
Layer (type (var_name)) Kernel Shape Output Shape Param # Mult-Adds
========================================================================================================================
LSTMNet -- -- -- --
├─Embedding (embedding) [300, 20] [1, 100, 300] 6,000 6,000
LSTMNet (LSTMNet) -- [100, 20] -- --
├─Embedding (embedding) -- [1, 100, 300] 6,000 6,000
├─LSTM (encoder) -- [1, 100, 512] 3,768,320 376,832,000
├─Linear (decoder) [512, 20] [1, 100, 20] 10,260 10,260
├─Linear (decoder) -- [1, 100, 20] 10,260 10,260
========================================================================================================================
Total params: 3,784,580
Trainable params: 3,784,580
Expand Down
2 changes: 1 addition & 1 deletion tests/test_output/single_input_half.out
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
==========================================================================================
Layer (type:depth-idx) Output Shape Param #
==========================================================================================
SingleInputNet -- --
SingleInputNet [2, 10] --
├─Conv2d: 1-1 [2, 10, 24, 24] 260
├─Conv2d: 1-2 [2, 20, 8, 8] 5,020
├─Dropout2d: 1-3 [2, 20, 8, 8] --
Expand Down
15 changes: 15 additions & 0 deletions tests/test_output/single_layer_network_on_gpu_device.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
==========================================================================================
Layer (type:depth-idx) Output Shape Param #
==========================================================================================
Linear [1, 5] 15
==========================================================================================
Total params: 15
Trainable params: 15
Non-trainable params: 0
Total mult-adds (M): 0.00
==========================================================================================
Input size (MB): 0.00
Forward/backward pass size (MB): 0.00
Params size (MB): 0.00
Estimated Total Size (MB): 0.00
==========================================================================================
16 changes: 12 additions & 4 deletions tests/torchinfo_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,8 +291,13 @@ def test_device() -> None:


def test_pack_padded() -> None:
x = torch.ones([20, 128]).long()
# use explicit device=cpu
# see: https://github.com/pytorch/pytorch/issues/43227
device = torch.device("cpu")

x = torch.ones([20, 128]).long().to(device)
# fmt: off

y = torch.Tensor([
13, 12, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9,
Expand All @@ -301,10 +306,10 @@ def test_pack_padded() -> None:
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
]).long()
]).long().to(device)
# fmt: on

summary(PackPaddedLSTM(), input_data=x, lengths=y)
summary(PackPaddedLSTM(), input_data=x, lengths=y, device=device)


def test_module_dict() -> None:
Expand Down Expand Up @@ -370,7 +375,10 @@ def test_namedtuple() -> None:
model = NamedTuple()
input_size = [(2, 1, 28, 28), (2, 1, 28, 28)]
named_tuple = model.Point(*input_size)
summary(model, input_size=input_size, z=named_tuple)

# explicitly use cpu to prevent mixed device
# when cuda is available
summary(model, input_size=input_size, z=named_tuple, device=torch.device("cpu"))


def test_return_dict() -> None:
Expand Down
5 changes: 4 additions & 1 deletion tests/torchinfo_xl_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,11 @@ def test_frozen_layers() -> None:


def test_eval_order_doesnt_matter() -> None:
# prevent mixed device if cuda is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

input_size = (1, 3, 224, 224)
input_tensor = torch.ones(input_size)
input_tensor = torch.ones(input_size).to(device)

model1 = torchvision.models.resnet18(pretrained=True)
model1.eval()
Expand Down

0 comments on commit c77c7e7

Please sign in to comment.