diff --git a/test/torchaudio_unittest/example/emformer_rnnt/test_librispeech_lightning.py b/test/torchaudio_unittest/example/emformer_rnnt/test_librispeech_lightning.py index fef23dcce8..1ee8ec44e9 100644 --- a/test/torchaudio_unittest/example/emformer_rnnt/test_librispeech_lightning.py +++ b/test/torchaudio_unittest/example/emformer_rnnt/test_librispeech_lightning.py @@ -55,7 +55,6 @@ class TestLibriSpeechRNNTModule(TorchaudioTestCase): @classmethod def setUpClass(cls) -> None: super().setUpClass() - torch.random.manual_seed(31) @parameterized.expand( [ diff --git a/test/torchaudio_unittest/example/emformer_rnnt/test_mustc_lightning.py b/test/torchaudio_unittest/example/emformer_rnnt/test_mustc_lightning.py index fdf0998154..c3e42606bb 100644 --- a/test/torchaudio_unittest/example/emformer_rnnt/test_mustc_lightning.py +++ b/test/torchaudio_unittest/example/emformer_rnnt/test_mustc_lightning.py @@ -49,7 +49,6 @@ class TestMuSTCRNNTModule(TorchaudioTestCase): @classmethod def setUpClass(cls) -> None: super().setUpClass() - torch.random.manual_seed(31) @parameterized.expand( [ diff --git a/test/torchaudio_unittest/example/emformer_rnnt/test_tedlium3_lightning.py b/test/torchaudio_unittest/example/emformer_rnnt/test_tedlium3_lightning.py index 56454881d6..e1804dfcfd 100644 --- a/test/torchaudio_unittest/example/emformer_rnnt/test_tedlium3_lightning.py +++ b/test/torchaudio_unittest/example/emformer_rnnt/test_tedlium3_lightning.py @@ -53,7 +53,6 @@ class TestTEDLIUM3RNNTModule(TorchaudioTestCase): @classmethod def setUpClass(cls) -> None: super().setUpClass() - torch.random.manual_seed(31) @parameterized.expand( [ diff --git a/test/torchaudio_unittest/example/hubert/test_crop_audio_label.py b/test/torchaudio_unittest/example/hubert/test_crop_audio_label.py index 1dd6e98b0e..706db0edfe 100644 --- a/test/torchaudio_unittest/example/hubert/test_crop_audio_label.py +++ b/test/torchaudio_unittest/example/hubert/test_crop_audio_label.py @@ -9,7 +9,6 @@ class TestCropAudioLabel(TorchaudioTestCase): @classmethod def setUpClass(cls) -> None: super().setUpClass() - torch.random.manual_seed(31) @parameterized.expand( [ diff --git a/test/torchaudio_unittest/functional/autograd_impl.py b/test/torchaudio_unittest/functional/autograd_impl.py index 5e4694729d..8e51686283 100644 --- a/test/torchaudio_unittest/functional/autograd_impl.py +++ b/test/torchaudio_unittest/functional/autograd_impl.py @@ -232,28 +232,23 @@ def test_bandreject_biquad(self, central_freq, Q): self.assert_grad(F.bandreject_biquad, (x, sr, central_freq, Q)) def test_deemph_biquad(self): - torch.random.manual_seed(2434) x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=1) self.assert_grad(F.deemph_biquad, (x, 44100)) def test_flanger(self): - torch.random.manual_seed(2434) x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1) self.assert_grad(F.flanger, (x, 44100)) def test_gain(self): - torch.random.manual_seed(2434) x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1) self.assert_grad(F.gain, (x, 1.1)) def test_overdrive(self): - torch.random.manual_seed(2434) x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1) self.assert_grad(F.gain, (x,)) @parameterized.expand([(True,), (False,)]) def test_phaser(self, sinusoidal): - torch.random.manual_seed(2434) sr = 8000 x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1) self.assert_grad(F.phaser, (x, sr, sinusoidal)) diff --git a/test/torchaudio_unittest/functional/batch_consistency_test.py b/test/torchaudio_unittest/functional/batch_consistency_test.py index 355a5c610d..97188968eb 100644 --- a/test/torchaudio_unittest/functional/batch_consistency_test.py +++ b/test/torchaudio_unittest/functional/batch_consistency_test.py @@ -52,7 +52,6 @@ def test_griffinlim(self): momentum = 0.99 n_iter = 32 length = 1000 - torch.random.manual_seed(0) batch = torch.rand(self.batch_size, 1, 201, 6) kwargs = { "window": window, @@ -80,7 +79,6 @@ def test_griffinlim(self): def test_detect_pitch_frequency(self, sample_rate, n_channels): # Use different frequencies to ensure each item in the batch returns a # different answer. - torch.manual_seed(0) frequencies = torch.randint(100, 1000, [self.batch_size]) waveforms = torch.stack( [ @@ -103,7 +101,6 @@ def test_detect_pitch_frequency(self, sample_rate, n_channels): ] ) def test_amplitude_to_DB(self, top_db): - torch.manual_seed(0) spec = torch.rand(self.batch_size, 2, 100, 100) * 200 amplitude_mult = 20.0 @@ -137,7 +134,6 @@ def test_amplitude_to_DB_itemwise_clamps(self): top_db = 20.0 # Make a batch of noise - torch.manual_seed(0) spec = torch.rand([2, 2, 100, 100]) * 200 # Make one item blow out the other spec[0] += 50 @@ -158,7 +154,6 @@ def test_amplitude_to_DB_not_channelwise_clamps(self): db_mult = math.log10(max(amin, ref)) top_db = 40.0 - torch.manual_seed(0) spec = torch.rand([1, 2, 100, 100]) * 200 # Make one channel blow out the other spec[:, 0] += 50 @@ -173,7 +168,6 @@ def test_amplitude_to_DB_not_channelwise_clamps(self): assert (difference >= 1e-5).any() def test_contrast(self): - torch.random.manual_seed(0) waveforms = torch.rand(self.batch_size, 2, 100) - 0.5 kwargs = { "enhancement_amount": 80.0, @@ -182,7 +176,6 @@ def test_contrast(self): self.assert_batch_consistency(func, inputs=(waveforms,)) def test_dcshift(self): - torch.random.manual_seed(0) waveforms = torch.rand(self.batch_size, 2, 100) - 0.5 kwargs = { "shift": 0.5, @@ -192,7 +185,6 @@ def test_dcshift(self): self.assert_batch_consistency(func, inputs=(waveforms,)) def test_overdrive(self): - torch.random.manual_seed(0) waveforms = torch.rand(self.batch_size, 2, 100) - 0.5 kwargs = { "gain": 45, @@ -215,7 +207,6 @@ def test_phaser(self): self.assert_batch_consistency(func, inputs=(batch,)) def test_flanger(self): - torch.random.manual_seed(0) waveforms = torch.rand(self.batch_size, 2, 100) - 0.5 sample_rate = 44100 kwargs = { @@ -234,7 +225,6 @@ def test_flanger(self): name_func=_name_from_args, ) def test_sliding_window_cmn(self, center, norm_vars): - torch.manual_seed(0) spectrogram = torch.rand(self.batch_size, 2, 1024, 1024) * 200 kwargs = { "center": center, @@ -281,7 +271,6 @@ def test_compute_kaldi_pitch(self): def test_lfilter(self): signal_length = 2048 - torch.manual_seed(2434) x = torch.randn(self.batch_size, signal_length) a = torch.rand(self.batch_size, 3) b = torch.rand(self.batch_size, 3) @@ -289,7 +278,6 @@ def test_lfilter(self): def test_filtfilt(self): signal_length = 2048 - torch.manual_seed(2434) x = torch.randn(self.batch_size, signal_length) a = torch.rand(self.batch_size, 3) b = torch.rand(self.batch_size, 3) @@ -319,7 +307,6 @@ def test_psd_with_mask(self): self.assert_batch_consistency(F.psd, (specgram, mask)) def test_mvdr_weights_souden(self): - torch.random.manual_seed(2434) batch_size = 2 channel = 4 n_fft_bin = 10 @@ -332,7 +319,6 @@ def test_mvdr_weights_souden(self): self.assert_batch_consistency(func, (psd_noise, psd_speech)) def test_mvdr_weights_souden_with_tensor(self): - torch.random.manual_seed(2434) batch_size = 2 channel = 4 n_fft_bin = 10 @@ -343,7 +329,6 @@ def test_mvdr_weights_souden_with_tensor(self): self.assert_batch_consistency(F.mvdr_weights_souden, (psd_noise, psd_speech, reference_channel)) def test_mvdr_weights_rtf(self): - torch.random.manual_seed(2434) batch_size = 2 channel = 4 n_fft_bin = 129 @@ -356,7 +341,6 @@ def test_mvdr_weights_rtf(self): self.assert_batch_consistency(func, (rtf, psd_noise)) def test_mvdr_weights_rtf_with_tensor(self): - torch.random.manual_seed(2434) batch_size = 2 channel = 4 n_fft_bin = 129 @@ -367,7 +351,6 @@ def test_mvdr_weights_rtf_with_tensor(self): self.assert_batch_consistency(F.mvdr_weights_rtf, (rtf, psd_noise, reference_channel)) def test_rtf_evd(self): - torch.random.manual_seed(2434) batch_size = 2 channel = 4 n_fft_bin = 5 @@ -382,7 +365,6 @@ def test_rtf_evd(self): ] ) def test_rtf_power(self, n_iter): - torch.random.manual_seed(2434) channel = 4 batch_size = 2 n_fft_bin = 10 @@ -402,7 +384,6 @@ def test_rtf_power(self, n_iter): ] ) def test_rtf_power_with_tensor(self, n_iter): - torch.random.manual_seed(2434) channel = 4 batch_size = 2 n_fft_bin = 10 @@ -417,7 +398,6 @@ def test_rtf_power_with_tensor(self, n_iter): self.assert_batch_consistency(func, (psd_speech, psd_noise, reference_channel)) def test_apply_beamforming(self): - torch.random.manual_seed(2434) sr = 8000 n_fft = 400 batch_size, num_channels = 2, 3 diff --git a/test/torchaudio_unittest/functional/functional_cpu_test.py b/test/torchaudio_unittest/functional/functional_cpu_test.py index 2f76dad2f9..b4e3cc0938 100644 --- a/test/torchaudio_unittest/functional/functional_cpu_test.py +++ b/test/torchaudio_unittest/functional/functional_cpu_test.py @@ -35,7 +35,6 @@ def _smoke_test(self, format, compression, check_num_frames): The purpose of this test suite is to verify that apply_codec functionalities do not exhibit abnormal behaviors. """ - torch.random.manual_seed(42) sample_rate = 8000 num_frames = 3 * sample_rate num_channels = 2 diff --git a/test/torchaudio_unittest/functional/librosa_compatibility_test_impl.py b/test/torchaudio_unittest/functional/librosa_compatibility_test_impl.py index 4b7f807f30..4de769f1be 100644 --- a/test/torchaudio_unittest/functional/librosa_compatibility_test_impl.py +++ b/test/torchaudio_unittest/functional/librosa_compatibility_test_impl.py @@ -131,7 +131,6 @@ def test_phase_vocoder(self, rate): hop_length = 256 num_freq = 1025 num_frames = 400 - torch.random.manual_seed(42) # Due to cummulative sum, numerical error in using torch.float32 will # result in bottom right values of the stretched sectrogram to not diff --git a/test/torchaudio_unittest/functional/torchscript_consistency_impl.py b/test/torchaudio_unittest/functional/torchscript_consistency_impl.py index 871eb166d0..961f905a25 100644 --- a/test/torchaudio_unittest/functional/torchscript_consistency_impl.py +++ b/test/torchaudio_unittest/functional/torchscript_consistency_impl.py @@ -269,7 +269,6 @@ def test_lfilter(self): self._assert_consistency(F.lfilter, (waveform, a_coeffs, b_coeffs, True, True)) def test_filtfilt(self): - torch.manual_seed(296) waveform = common_utils.get_whitenoise(sample_rate=8000) b_coeffs = torch.rand(4, device=waveform.device, dtype=waveform.dtype) a_coeffs = torch.rand(4, device=waveform.device, dtype=waveform.dtype) @@ -531,7 +530,6 @@ def func(tensor): self._assert_consistency(func, (waveform,)) def test_flanger(self): - torch.random.manual_seed(40) waveform = torch.rand(2, 100) - 0.5 def func(tensor): diff --git a/test/torchaudio_unittest/models/conformer/conformer_test_impl.py b/test/torchaudio_unittest/models/conformer/conformer_test_impl.py index 9f40baf3e1..be1aa138b4 100644 --- a/test/torchaudio_unittest/models/conformer/conformer_test_impl.py +++ b/test/torchaudio_unittest/models/conformer/conformer_test_impl.py @@ -26,7 +26,6 @@ def _gen_inputs(self, input_dim, batch_size, num_frames): def setUp(self): super().setUp() - torch.random.manual_seed(31) def test_torchscript_consistency_forward(self): r"""Verify that scripting Conformer does not change the behavior of method `forward`.""" diff --git a/test/torchaudio_unittest/models/ctc_decoder_test.py b/test/torchaudio_unittest/models/ctc_decoder_test.py index e6999126a4..55533bb107 100644 --- a/test/torchaudio_unittest/models/ctc_decoder_test.py +++ b/test/torchaudio_unittest/models/ctc_decoder_test.py @@ -9,7 +9,6 @@ TorchaudioTestCase, ) - NUM_TOKENS = 8 @@ -38,7 +37,6 @@ def _get_decoder(self, tokens=None, use_lm=True, use_lexicon=True, **kwargs): def _get_emissions(self): B, T, N = 4, 15, NUM_TOKENS - torch.manual_seed(0) emissions = torch.rand(B, T, N) return emissions diff --git a/test/torchaudio_unittest/models/rnnt/rnnt_test_impl.py b/test/torchaudio_unittest/models/rnnt/rnnt_test_impl.py index 1c060c41f1..c12e039e37 100644 --- a/test/torchaudio_unittest/models/rnnt/rnnt_test_impl.py +++ b/test/torchaudio_unittest/models/rnnt/rnnt_test_impl.py @@ -54,7 +54,6 @@ def _get_transcriber_input(self): input_dim = input_config["input_dim"] right_context_length = input_config["right_context_length"] - torch.random.manual_seed(31) input = torch.rand(batch_size, max_input_length + right_context_length, input_dim).to( device=self.device, dtype=self.dtype ) @@ -68,7 +67,6 @@ def _get_transcriber_streaming_input(self): input_dim = input_config["input_dim"] right_context_length = input_config["right_context_length"] - torch.random.manual_seed(31) input = torch.rand(batch_size, segment_length + right_context_length, input_dim).to( device=self.device, dtype=self.dtype ) @@ -83,7 +81,6 @@ def _get_predictor_input(self): num_symbols = input_config["num_symbols"] max_target_length = input_config["max_target_length"] - torch.random.manual_seed(31) input = torch.randint(0, num_symbols, (batch_size, max_target_length)).to(device=self.device, dtype=torch.int32) lengths = torch.randint(1, max_target_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32) return input, lengths @@ -95,7 +92,6 @@ def _get_joiner_input(self): max_target_length = input_config["max_target_length"] input_dim = input_config["encoding_dim"] - torch.random.manual_seed(31) utterance_encodings = torch.rand(batch_size, joiner_max_input_length, input_dim).to( device=self.device, dtype=self.dtype ) diff --git a/test/torchaudio_unittest/models/rnnt_decoder/rnnt_decoder_test_impl.py b/test/torchaudio_unittest/models/rnnt_decoder/rnnt_decoder_test_impl.py index ab0b0dfcd0..8560564596 100644 --- a/test/torchaudio_unittest/models/rnnt_decoder/rnnt_decoder_test_impl.py +++ b/test/torchaudio_unittest/models/rnnt_decoder/rnnt_decoder_test_impl.py @@ -46,8 +46,6 @@ def _get_model(self): def test_torchscript_consistency_forward(self): r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `forward`.""" - torch.random.manual_seed(31) - input_config = self._get_input_config() batch_size = input_config["batch_size"] max_input_length = input_config["max_input_length"] @@ -74,8 +72,6 @@ def test_torchscript_consistency_forward(self): def test_torchscript_consistency_infer(self): r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `infer`.""" - torch.random.manual_seed(31) - input_config = self._get_input_config() segment_length = input_config["segment_length"] right_context_length = input_config["right_context_length"] diff --git a/test/torchaudio_unittest/models/wav2vec2/fairseq_integration_test.py b/test/torchaudio_unittest/models/wav2vec2/fairseq_integration_test.py index daeebb3841..9bc05cf2c1 100644 --- a/test/torchaudio_unittest/models/wav2vec2/fairseq_integration_test.py +++ b/test/torchaudio_unittest/models/wav2vec2/fairseq_integration_test.py @@ -134,7 +134,6 @@ def test_import_wave2vec2_pretraining_model(self, config, _): """Wav2vec2 pretraining models from fairseq can be imported and yields the same results""" batch_size, num_frames = 3, 1024 - torch.manual_seed(0) original = self._get_model(config).eval() imported = import_fairseq_model(original).eval() @@ -149,7 +148,6 @@ def test_import_hubert_pretraining_model(self, config, factory_func): """HuBERT pretraining models from fairseq can be imported and yields the same results""" batch_size, num_frames = 3, 1024 - torch.manual_seed(0) original = self._get_model(config).eval() imported = import_fairseq_model(original).eval() @@ -241,7 +239,6 @@ def test_recreate_finetuning_model(self, config, factory_func): reloaded.eval() # Without mask - torch.manual_seed(0) x = torch.randn(batch_size, num_frames) ref, _ = imported(x) hyp, _ = reloaded(x) diff --git a/test/torchaudio_unittest/models/wav2vec2/huggingface_intergration_test.py b/test/torchaudio_unittest/models/wav2vec2/huggingface_intergration_test.py index 72307c461f..3fd37639b0 100644 --- a/test/torchaudio_unittest/models/wav2vec2/huggingface_intergration_test.py +++ b/test/torchaudio_unittest/models/wav2vec2/huggingface_intergration_test.py @@ -89,7 +89,6 @@ def _get_model(self, config): raise ValueError(f'Unexpected arch: {config["architectures"]}') def _test_import_pretrain(self, original, imported, config): - torch.manual_seed(0) # FeatureExtractor x = torch.randn(3, 1024) ref = original.feature_extractor(x).transpose(1, 2) @@ -173,7 +172,6 @@ def test_import_finetune(self, config, _): self._test_import_finetune(original, imported, config) def _test_recreate(self, imported, reloaded, config): - torch.manual_seed(0) # FeatureExtractor x = torch.randn(3, 1024) ref, _ = imported.feature_extractor(x, None) diff --git a/test/torchaudio_unittest/models/wav2vec2/model_test.py b/test/torchaudio_unittest/models/wav2vec2/model_test.py index 4a5642cde8..a4f07ded74 100644 --- a/test/torchaudio_unittest/models/wav2vec2/model_test.py +++ b/test/torchaudio_unittest/models/wav2vec2/model_test.py @@ -48,7 +48,6 @@ def _smoke_test(self, model, device, dtype): model = model.to(device=device, dtype=dtype) model = model.eval() - torch.manual_seed(0) batch_size, num_frames = 3, 1024 waveforms = torch.randn(batch_size, num_frames, device=device, dtype=dtype) @@ -84,7 +83,6 @@ def _feature_extractor_test(self, model): model.eval() num_layers = len(model.encoder.transformer.layers) - torch.manual_seed(0) waveforms = torch.randn(batch_size, num_frames) lengths = torch.randint( low=0, @@ -119,7 +117,6 @@ def test_extract_feature(self, factory_func): def _test_batch_consistency(self, model): model.eval() batch_size, max_frames = 5, 5 * 1024 - torch.manual_seed(0) waveforms = torch.randn(batch_size, max_frames) input_lengths = torch.tensor([i * 3200 for i in range(1, 6)]) @@ -148,7 +145,6 @@ def test_finetune_batch_consistency(self, factory_func): def _test_zero_length(self, model): model.eval() - torch.manual_seed(0) batch_size = 3 waveforms = torch.randn(batch_size, 1024) input_lengths = torch.zeros(batch_size) @@ -172,7 +168,6 @@ def _test_torchscript(self, model): batch_size, num_frames = 3, 1024 - torch.manual_seed(0) waveforms = torch.randn(batch_size, num_frames) lengths = torch.randint( low=0, @@ -220,7 +215,6 @@ def _test_quantize_smoke_test(self, model): # A lazy way to check that Modules are different assert str(quantized) != str(model), "Dynamic quantization did not modify the module." - torch.manual_seed(0) waveforms = torch.randn(batch_size, num_frames) lengths = torch.randint( low=0, @@ -250,7 +244,6 @@ def _test_quantize_torchscript(self, model): # A lazy way to check that Modules are different assert str(quantized) != str(model), "Dynamic quantization did not modify the module." - torch.manual_seed(0) waveforms = torch.randn(batch_size, num_frames) lengths = torch.randint( low=0, diff --git a/test/torchaudio_unittest/transforms/transforms_test_impl.py b/test/torchaudio_unittest/transforms/transforms_test_impl.py index fd0ea65b44..9caeea4156 100644 --- a/test/torchaudio_unittest/transforms/transforms_test_impl.py +++ b/test/torchaudio_unittest/transforms/transforms_test_impl.py @@ -44,7 +44,6 @@ def test_InverseMelScale(self): # Run transform transform = T.InverseMelScale(n_stft, n_mels=n_mels, sample_rate=sample_rate).to(self.device, self.dtype) - torch.random.manual_seed(0) result = transform(input) # Compare