@@ -913,7 +913,7 @@ def test_weight_only_quant_force_mixed_mm(self, device, dtype):
913913 if dtype == torch .bfloat16 and torch .cuda .get_device_capability () < (8 , 0 ):
914914 self .skipTest ("test requires SM capability of at least (8, 0)." )
915915 from torch ._inductor import config
916- mixed_mm_key , mixed_mm_val = ("mixed_mm_choice" , "triton" ) if TORCH_VERSION_AT_LEAST_2_4 else ("force_mixed_mm" , True )
916+ mixed_mm_key , mixed_mm_val = ("mixed_mm_choice" , "triton" ) if TORCH_VERSION_AT_LEAST_2_5 else ("force_mixed_mm" , True )
917917
918918 with config .patch ({
919919 "epilogue_fusion" : True ,
@@ -943,7 +943,7 @@ def test_weight_only_quant_use_mixed_mm(self, device, dtype):
943943 self .skipTest ("test requires SM capability of at least (8, 0)." )
944944 torch .manual_seed (0 )
945945 from torch ._inductor import config
946- mixed_mm_key , mixed_mm_val = ("mixed_mm_choice" , "triton" ) if TORCH_VERSION_AT_LEAST_2_4 else ("force_mixed_mm" , True )
946+ mixed_mm_key , mixed_mm_val = ("mixed_mm_choice" , "triton" ) if TORCH_VERSION_AT_LEAST_2_5 else ("force_mixed_mm" , True )
947947
948948 with config .patch ({
949949 "epilogue_fusion" : False ,
@@ -1222,7 +1222,7 @@ def test_autoquant_one_input(self, device, dtype, m, k, n):
12221222 (1 , 32 , 128 , 128 ),
12231223 (32 , 32 , 128 , 128 ),
12241224 ]))
1225- @unittest .skipIf (not TORCH_VERSION_AT_LEAST_2_4 , "autoquant requires 2.4 +." )
1225+ @unittest .skipIf (not TORCH_VERSION_AT_LEAST_2_5 , "autoquant requires 2.5 +." )
12261226 def test_autoquant_compile (self , device , dtype , m1 , m2 , k , n ):
12271227 undo_recommended_configs ()
12281228 if device != "cuda" or not torch .cuda .is_available ():
@@ -1254,7 +1254,7 @@ def test_autoquant_compile(self, device, dtype, m1, m2, k, n):
12541254 self .assertTrue (sqnr >= 30 )
12551255
12561256 @parameterized .expand (COMMON_DEVICE_DTYPE )
1257- @unittest .skipIf (not TORCH_VERSION_AT_LEAST_2_4 , "autoquant requires 2.4 +." )
1257+ @unittest .skipIf (not TORCH_VERSION_AT_LEAST_2_5 , "autoquant requires 2.5 +." )
12581258 def test_autoquant_manual (self , device , dtype ):
12591259 undo_recommended_configs ()
12601260 if device != "cuda" or not torch .cuda .is_available ():
@@ -1295,7 +1295,7 @@ def test_autoquant_manual(self, device, dtype):
12951295 (1 , 32 , 128 , 128 ),
12961296 (32 , 32 , 128 , 128 ),
12971297 ]))
1298- @unittest .skipIf (not TORCH_VERSION_AT_LEAST_2_4 , "autoquant requires 2.4 +." )
1298+ @unittest .skipIf (not TORCH_VERSION_AT_LEAST_2_5 , "autoquant requires 2.5 +." )
12991299 def test_autoquant_kwargs (self , device , dtype , m1 , m2 , k , n ):
13001300 undo_recommended_configs ()
13011301 if device != "cuda" or not torch .cuda .is_available ():
@@ -1478,7 +1478,7 @@ def forward(self, x):
14781478
14791479class TestUtils (unittest .TestCase ):
14801480 @parameterized .expand (COMMON_DEVICE_DTYPE )
1481- @unittest .skipIf (not TORCH_VERSION_AT_LEAST_2_4 , "autoquant requires 2.4 +." )
1481+ @unittest .skipIf (not TORCH_VERSION_AT_LEAST_2_5 , "autoquant requires 2.5 +." )
14821482 def test_get_model_size_autoquant (self , device , dtype ):
14831483 if device != "cuda" and dtype != torch .bfloat16 :
14841484 self .skipTest (f"autoquant currently does not support { device } " )
0 commit comments