Skip to content

Commit

Permalink
[VITS] Fix init test (huggingface#25945)
Browse files Browse the repository at this point in the history
* [VITS] Fix init test

* add flaky decorator

* style

* max attempts

Co-authored-by: Yih-Dar <2521628+ydshieh@users.noreply.github.com>

* style

---------

Co-authored-by: Yih-Dar <2521628+ydshieh@users.noreply.github.com>
  • Loading branch information
2 people authored and blbadger committed Nov 8, 2023
1 parent a49b103 commit 89a36a0
Showing 1 changed file with 34 additions and 18 deletions.
52 changes: 34 additions & 18 deletions tests/models/vits/test_modeling_vits.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

from transformers import PretrainedConfig, VitsConfig
from transformers.testing_utils import (
is_flaky,
is_torch_available,
require_torch,
slow,
Expand Down Expand Up @@ -80,6 +81,10 @@ def __init__(
duration_predictor_filter_channels=16,
prior_encoder_num_flows=2,
upsample_initial_channel=16,
upsample_rates=[8, 2],
upsample_kernel_sizes=[16, 4],
resblock_kernel_sizes=[3, 7],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]],
):
self.parent = parent
self.batch_size = batch_size
Expand All @@ -96,6 +101,10 @@ def __init__(
self.duration_predictor_filter_channels = duration_predictor_filter_channels
self.prior_encoder_num_flows = prior_encoder_num_flows
self.upsample_initial_channel = upsample_initial_channel
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes

def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2)
Expand Down Expand Up @@ -126,6 +135,10 @@ def get_config(self):
duration_predictor_filter_channels=self.duration_predictor_filter_channels,
posterior_encoder_num_wavenet_layers=self.num_hidden_layers,
upsample_initial_channel=self.upsample_initial_channel,
upsample_rates=self.upsample_rates,
upsample_kernel_sizes=self.upsample_kernel_sizes,
resblock_kernel_sizes=self.resblock_kernel_sizes,
resblock_dilation_sizes=self.resblock_dilation_sizes,
)

def create_and_check_model_forward(self, config, inputs_dict):
Expand All @@ -135,7 +148,7 @@ def create_and_check_model_forward(self, config, inputs_dict):
attention_mask = inputs_dict["attention_mask"]

result = model(input_ids, attention_mask=attention_mask)
self.parent.assertEqual(result.waveform.shape, (self.batch_size, 11008))
self.parent.assertEqual((self.batch_size, 624), result.waveform.shape)


@require_torch
Expand Down Expand Up @@ -168,30 +181,33 @@ def test_model_forward(self):
def test_determinism(self):
pass

# TODO: Fix me (ydshieh)
@unittest.skip("currently failing")
@is_flaky(
max_attempts=3,
description="Weight initialisation for the VITS conv layers sometimes exceeds the kaiming normal range",
)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

uniform_init_parms = [
"emb_rel_k",
"emb_rel_v",
"conv_1",
"conv_2",
"conv_pre",
"conv_post",
"conv_proj",
"conv_dds",
"project",
"wavenet.in_layers",
"wavenet.res_skip_layers",
"upsampler",
"resblocks",
]

configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"emb_rel_k",
"emb_rel_v",
"conv_1",
"conv_2",
"conv_pre",
"conv_post",
"conv_proj",
"conv_dds",
"project",
"wavenet.in_layers",
"wavenet.res_skip_layers",
"upsampler",
"resblocks",
]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
Expand Down

0 comments on commit 89a36a0

Please sign in to comment.