Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove possible manual seeds from test files. #2436

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ class TestLibriSpeechRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)

@parameterized.expand(
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ class TestMuSTCRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)

@parameterized.expand(
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ class TestTEDLIUM3RNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)

@parameterized.expand(
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ class TestCropAudioLabel(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)

@parameterized.expand(
[
Expand Down
5 changes: 0 additions & 5 deletions test/torchaudio_unittest/functional/autograd_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,28 +232,23 @@ def test_bandreject_biquad(self, central_freq, Q):
self.assert_grad(F.bandreject_biquad, (x, sr, central_freq, Q))

def test_deemph_biquad(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=1)
self.assert_grad(F.deemph_biquad, (x, 44100))

def test_flanger(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
self.assert_grad(F.flanger, (x, 44100))

def test_gain(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
self.assert_grad(F.gain, (x, 1.1))

def test_overdrive(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
self.assert_grad(F.gain, (x,))

@parameterized.expand([(True,), (False,)])
def test_phaser(self, sinusoidal):
torch.random.manual_seed(2434)
sr = 8000
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
self.assert_grad(F.phaser, (x, sr, sinusoidal))
Expand Down
22 changes: 0 additions & 22 deletions test/torchaudio_unittest/functional/batch_consistency_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,10 @@ def assert_batch_consistency(self, functional, inputs, atol=1e-8, rtol=1e-5, see
for i in range(1, len(inputs)):
self.assertEqual(inputs[i].size(0), n)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = [[ele[i].clone() for ele in inputs] for i in range(n)]
items_result = torch.stack([functional(*items_input[i]) for i in range(n)])

# Batch the input and run
torch.random.manual_seed(seed)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These two seeds cannot be removed. This helper function is supposed to run functions on the same seed twice and compare the results.

batch_input = [ele.clone() for ele in inputs]
batch_result = functional(*batch_input)

Expand All @@ -52,7 +50,6 @@ def test_griffinlim(self):
momentum = 0.99
n_iter = 32
length = 1000
torch.random.manual_seed(0)
batch = torch.rand(self.batch_size, 1, 201, 6)
kwargs = {
"window": window,
Expand Down Expand Up @@ -80,7 +77,6 @@ def test_griffinlim(self):
def test_detect_pitch_frequency(self, sample_rate, n_channels):
# Use different frequencies to ensure each item in the batch returns a
# different answer.
torch.manual_seed(0)
frequencies = torch.randint(100, 1000, [self.batch_size])
waveforms = torch.stack(
[
Expand All @@ -103,7 +99,6 @@ def test_detect_pitch_frequency(self, sample_rate, n_channels):
]
)
def test_amplitude_to_DB(self, top_db):
torch.manual_seed(0)
spec = torch.rand(self.batch_size, 2, 100, 100) * 200

amplitude_mult = 20.0
Expand Down Expand Up @@ -137,7 +132,6 @@ def test_amplitude_to_DB_itemwise_clamps(self):
top_db = 20.0

# Make a batch of noise
torch.manual_seed(0)
spec = torch.rand([2, 2, 100, 100]) * 200
# Make one item blow out the other
spec[0] += 50
Expand All @@ -158,7 +152,6 @@ def test_amplitude_to_DB_not_channelwise_clamps(self):
db_mult = math.log10(max(amin, ref))
top_db = 40.0

torch.manual_seed(0)
spec = torch.rand([1, 2, 100, 100]) * 200
# Make one channel blow out the other
spec[:, 0] += 50
Expand All @@ -173,7 +166,6 @@ def test_amplitude_to_DB_not_channelwise_clamps(self):
assert (difference >= 1e-5).any()

def test_contrast(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
kwargs = {
"enhancement_amount": 80.0,
Expand All @@ -182,7 +174,6 @@ def test_contrast(self):
self.assert_batch_consistency(func, inputs=(waveforms,))

def test_dcshift(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
kwargs = {
"shift": 0.5,
Expand All @@ -192,7 +183,6 @@ def test_dcshift(self):
self.assert_batch_consistency(func, inputs=(waveforms,))

def test_overdrive(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
kwargs = {
"gain": 45,
Expand All @@ -215,7 +205,6 @@ def test_phaser(self):
self.assert_batch_consistency(func, inputs=(batch,))

def test_flanger(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
sample_rate = 44100
kwargs = {
Expand All @@ -234,7 +223,6 @@ def test_flanger(self):
name_func=_name_from_args,
)
def test_sliding_window_cmn(self, center, norm_vars):
torch.manual_seed(0)
spectrogram = torch.rand(self.batch_size, 2, 1024, 1024) * 200
kwargs = {
"center": center,
Expand Down Expand Up @@ -281,15 +269,13 @@ def test_compute_kaldi_pitch(self):

def test_lfilter(self):
signal_length = 2048
torch.manual_seed(2434)
x = torch.randn(self.batch_size, signal_length)
a = torch.rand(self.batch_size, 3)
b = torch.rand(self.batch_size, 3)
self.assert_batch_consistency(F.lfilter, inputs=(x, a, b))

def test_filtfilt(self):
signal_length = 2048
torch.manual_seed(2434)
x = torch.randn(self.batch_size, signal_length)
a = torch.rand(self.batch_size, 3)
b = torch.rand(self.batch_size, 3)
Expand Down Expand Up @@ -319,7 +305,6 @@ def test_psd_with_mask(self):
self.assert_batch_consistency(F.psd, (specgram, mask))

def test_mvdr_weights_souden(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 10
Expand All @@ -332,7 +317,6 @@ def test_mvdr_weights_souden(self):
self.assert_batch_consistency(func, (psd_noise, psd_speech))

def test_mvdr_weights_souden_with_tensor(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 10
Expand All @@ -343,7 +327,6 @@ def test_mvdr_weights_souden_with_tensor(self):
self.assert_batch_consistency(F.mvdr_weights_souden, (psd_noise, psd_speech, reference_channel))

def test_mvdr_weights_rtf(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 129
Expand All @@ -356,7 +339,6 @@ def test_mvdr_weights_rtf(self):
self.assert_batch_consistency(func, (rtf, psd_noise))

def test_mvdr_weights_rtf_with_tensor(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 129
Expand All @@ -367,7 +349,6 @@ def test_mvdr_weights_rtf_with_tensor(self):
self.assert_batch_consistency(F.mvdr_weights_rtf, (rtf, psd_noise, reference_channel))

def test_rtf_evd(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 5
Expand All @@ -382,7 +363,6 @@ def test_rtf_evd(self):
]
)
def test_rtf_power(self, n_iter):
torch.random.manual_seed(2434)
channel = 4
batch_size = 2
n_fft_bin = 10
Expand All @@ -402,7 +382,6 @@ def test_rtf_power(self, n_iter):
]
)
def test_rtf_power_with_tensor(self, n_iter):
torch.random.manual_seed(2434)
channel = 4
batch_size = 2
n_fft_bin = 10
Expand All @@ -417,7 +396,6 @@ def test_rtf_power_with_tensor(self, n_iter):
self.assert_batch_consistency(func, (psd_speech, psd_noise, reference_channel))

def test_apply_beamforming(self):
torch.random.manual_seed(2434)
sr = 8000
n_fft = 400
batch_size, num_channels = 2, 3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def _smoke_test(self, format, compression, check_num_frames):
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
torch.random.manual_seed(42)
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,6 @@ def test_phase_vocoder(self, rate):
hop_length = 256
num_freq = 1025
num_frames = 400
torch.random.manual_seed(42)

# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,6 @@ def test_lfilter(self):
self._assert_consistency(F.lfilter, (waveform, a_coeffs, b_coeffs, True, True))

def test_filtfilt(self):
torch.manual_seed(296)
waveform = common_utils.get_whitenoise(sample_rate=8000)
b_coeffs = torch.rand(4, device=waveform.device, dtype=waveform.dtype)
a_coeffs = torch.rand(4, device=waveform.device, dtype=waveform.dtype)
Expand Down Expand Up @@ -531,7 +530,6 @@ def func(tensor):
self._assert_consistency(func, (waveform,))

def test_flanger(self):
torch.random.manual_seed(40)
waveform = torch.rand(2, 100) - 0.5

def func(tensor):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ def _gen_inputs(self, input_dim, batch_size, num_frames):

def setUp(self):
super().setUp()
torch.random.manual_seed(31)

def test_torchscript_consistency_forward(self):
r"""Verify that scripting Conformer does not change the behavior of method `forward`."""
Expand Down
1 change: 0 additions & 1 deletion test/torchaudio_unittest/models/ctc_decoder_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ def _get_decoder(self, tokens=None, use_lm=True, use_lexicon=True, **kwargs):
def _get_emissions(self):
B, T, N = 4, 15, NUM_TOKENS

torch.manual_seed(0)
emissions = torch.rand(B, T, N)

return emissions
Expand Down
4 changes: 0 additions & 4 deletions test/torchaudio_unittest/models/rnnt/rnnt_test_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def _get_transcriber_input(self):
input_dim = input_config["input_dim"]
right_context_length = input_config["right_context_length"]

torch.random.manual_seed(31)
input = torch.rand(batch_size, max_input_length + right_context_length, input_dim).to(
device=self.device, dtype=self.dtype
)
Expand All @@ -68,7 +67,6 @@ def _get_transcriber_streaming_input(self):
input_dim = input_config["input_dim"]
right_context_length = input_config["right_context_length"]

torch.random.manual_seed(31)
input = torch.rand(batch_size, segment_length + right_context_length, input_dim).to(
device=self.device, dtype=self.dtype
)
Expand All @@ -83,7 +81,6 @@ def _get_predictor_input(self):
num_symbols = input_config["num_symbols"]
max_target_length = input_config["max_target_length"]

torch.random.manual_seed(31)
input = torch.randint(0, num_symbols, (batch_size, max_target_length)).to(device=self.device, dtype=torch.int32)
lengths = torch.randint(1, max_target_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32)
return input, lengths
Expand All @@ -95,7 +92,6 @@ def _get_joiner_input(self):
max_target_length = input_config["max_target_length"]
input_dim = input_config["encoding_dim"]

torch.random.manual_seed(31)
utterance_encodings = torch.rand(batch_size, joiner_max_input_length, input_dim).to(
device=self.device, dtype=self.dtype
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ def _get_model(self):
def test_torchscript_consistency_forward(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `forward`."""

torch.random.manual_seed(31)

input_config = self._get_input_config()
batch_size = input_config["batch_size"]
max_input_length = input_config["max_input_length"]
Expand All @@ -74,8 +72,6 @@ def test_torchscript_consistency_forward(self):
def test_torchscript_consistency_infer(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `infer`."""

torch.random.manual_seed(31)

input_config = self._get_input_config()
segment_length = input_config["segment_length"]
right_context_length = input_config["right_context_length"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,6 @@ def test_import_wave2vec2_pretraining_model(self, config, _):
"""Wav2vec2 pretraining models from fairseq can be imported and yields the same results"""
batch_size, num_frames = 3, 1024

torch.manual_seed(0)
original = self._get_model(config).eval()
imported = import_fairseq_model(original).eval()

Expand All @@ -149,7 +148,6 @@ def test_import_hubert_pretraining_model(self, config, factory_func):
"""HuBERT pretraining models from fairseq can be imported and yields the same results"""
batch_size, num_frames = 3, 1024

torch.manual_seed(0)
original = self._get_model(config).eval()
imported = import_fairseq_model(original).eval()

Expand Down Expand Up @@ -207,7 +205,6 @@ def test_import_finetuning_model(self, config, _):
imported = import_fairseq_model(original).eval()

# Without mask
torch.manual_seed(0)
x = torch.randn(batch_size, num_frames)
ref = original(x, torch.zeros_like(x))["encoder_out"].transpose(0, 1)
hyp, _ = imported(x)
Expand Down Expand Up @@ -241,7 +238,6 @@ def test_recreate_finetuning_model(self, config, factory_func):
reloaded.eval()

# Without mask
torch.manual_seed(0)
x = torch.randn(batch_size, num_frames)
ref, _ = imported(x)
hyp, _ = reloaded(x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ def _get_model(self, config):
raise ValueError(f'Unexpected arch: {config["architectures"]}')

def _test_import_pretrain(self, original, imported, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref = original.feature_extractor(x).transpose(1, 2)
Expand Down Expand Up @@ -173,7 +172,6 @@ def test_import_finetune(self, config, _):
self._test_import_finetune(original, imported, config)

def _test_recreate(self, imported, reloaded, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref, _ = imported.feature_extractor(x, None)
Expand Down
Loading