Skip to content

Commit

Permalink
set max torch for bnb
Browse files Browse the repository at this point in the history
  • Loading branch information
awaelchli committed Jun 30, 2024
1 parent 7524247 commit 65b86c5
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions tests/tests_fabric/plugins/precision/test_bitsandbytes.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __init__(self):
precision.convert_module(model)


@RunIf(min_cuda_gpus=1)
@RunIf(min_cuda_gpus=1, max_torch="2.4")
@pytest.mark.skipif(not _BITSANDBYTES_AVAILABLE, reason="bitsandbytes unavailable")
@pytest.mark.parametrize(
("args", "expected"),
Expand Down Expand Up @@ -232,7 +232,7 @@ def __init__(self):
assert model.l.weight.dtype == expected


@RunIf(min_cuda_gpus=1, min_torch="2.1")
@RunIf(min_cuda_gpus=1, min_torch="2.1", max_torch="2.4")
@pytest.mark.skipif(not _BITSANDBYTES_AVAILABLE, reason="bitsandbytes unavailable")
def test_load_quantized_checkpoint(tmp_path):
"""Test that a checkpoint saved from a quantized model can be loaded back into a quantized model."""
Expand Down

0 comments on commit 65b86c5

Please sign in to comment.