@@ -120,9 +120,7 @@ def test_reproducible_batch_sampler():
120
120
resumed_seen_batches = []
121
121
for b in dataloader_ :
122
122
resumed_seen_batches .append (b )
123
- # temporarily disable this while running on torch nightly
124
- if "dev" not in torch .__version__ :
125
- assert all ([(b1 == b2 ).all () for b1 , b2 in zip (seen_batches [resume_epoch ], resumed_seen_batches )])
123
+ assert all ([(b1 == b2 ).all () for b1 , b2 in zip (seen_batches [resume_epoch ], resumed_seen_batches )])
126
124
127
125
128
126
def _test_keep_random_state (with_numpy ):
@@ -333,11 +331,9 @@ def _(engine):
333
331
334
332
@pytest .mark .skipif ("win" in sys .platform , reason = "Skip extremely slow test on Windows/MacOSX" )
335
333
def test_resume_random_dataloader_from_epoch ():
336
- # temporarily disable this while running on torch nightly
337
- if "dev" not in torch .__version__ :
338
- _test_resume_random_dataloader_from_epoch ("cpu" , setup_sampler )
339
- _test_resume_random_dataloader_from_epoch ("cpu" , setup_sampler , sampler_type = "weighted" )
340
- _test_resume_random_dataloader_from_epoch ("cpu" , setup_sampler , sampler_type = "distributed" )
334
+ _test_resume_random_dataloader_from_epoch ("cpu" , setup_sampler )
335
+ _test_resume_random_dataloader_from_epoch ("cpu" , setup_sampler , sampler_type = "weighted" )
336
+ _test_resume_random_dataloader_from_epoch ("cpu" , setup_sampler , sampler_type = "distributed" )
341
337
342
338
343
339
class AugmentedData :
@@ -445,11 +441,9 @@ def _(engine):
445
441
446
442
@pytest .mark .skipif ("win" in sys .platform , reason = "Skip extremely slow test on Windows/MacOSX" )
447
443
def test_resume_random_dataloader_from_iter ():
448
- # temporarily disable this while running on torch nightly
449
- if "dev" not in torch .__version__ :
450
- _test_resume_random_dataloader_from_iter ("cpu" , setup_sampler )
451
- _test_resume_random_dataloader_from_iter ("cpu" , setup_sampler , sampler_type = "weighted" )
452
- _test_resume_random_dataloader_from_iter ("cpu" , setup_sampler , sampler_type = "distributed" )
444
+ _test_resume_random_dataloader_from_iter ("cpu" , setup_sampler )
445
+ _test_resume_random_dataloader_from_iter ("cpu" , setup_sampler , sampler_type = "weighted" )
446
+ _test_resume_random_dataloader_from_iter ("cpu" , setup_sampler , sampler_type = "distributed" )
453
447
454
448
455
449
def _test_resume_random_data_iterator_from_epoch (device ):
@@ -808,12 +802,10 @@ def write_data_grads_weights(e):
808
802
def test_gradients_on_resume_cpu (dirname ):
809
803
with pytest .raises (AssertionError ):
810
804
_test_gradients_on_resume (dirname , "cpu" , with_dataaugs = True , save_iter = 25 )
811
- # temporarily disable this while running on torch nightly
812
- if "dev" not in torch .__version__ :
813
- _test_gradients_on_resume (dirname , "cpu" , with_dataaugs = False , save_iter = 25 )
814
- # resume from epoch
815
- _test_gradients_on_resume (dirname , "cpu" , with_dataaugs = True , save_epoch = 3 )
816
- _test_gradients_on_resume (dirname , "cpu" , with_dataaugs = False , save_epoch = 3 )
805
+ _test_gradients_on_resume (dirname , "cpu" , with_dataaugs = False , save_iter = 25 )
806
+ # resume from epoch
807
+ _test_gradients_on_resume (dirname , "cpu" , with_dataaugs = True , save_epoch = 3 )
808
+ _test_gradients_on_resume (dirname , "cpu" , with_dataaugs = False , save_epoch = 3 )
817
809
818
810
819
811
@pytest .mark .skipif (not torch .cuda .is_available (), reason = "Skip if no GPU" )
0 commit comments