forked from gemelo-ai/vocos
-
Notifications
You must be signed in to change notification settings - Fork 0
/
vocos.yaml
90 lines (81 loc) · 2.35 KB
/
vocos.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# pytorch_lightning==1.8.6
seed_everything: 4444
data:
class_path: vocos.dataset.VocosDataModule
init_args:
train_params:
filelist_path: /home/husein/ssd3/train-vocoder.txt
sampling_rate: 24000
num_samples: 16384
batch_size: 64
num_workers: 8
val_params:
filelist_path: /home/husein/ssd3/test-vocoder.txt
sampling_rate: 24000
num_samples: 48384
batch_size: 16
num_workers: 8
model:
class_path: vocos.experiment.VocosExp
init_args:
sample_rate: 24000
initial_learning_rate: 5e-4
mel_loss_coeff: 45
mrd_loss_coeff: 0.1
num_warmup_steps: 0 # Optimizers warmup steps
pretrain_mel_steps: 0 # 0 means GAN objective from the first iteration
# automatic evaluation
evaluate_utmos: true
evaluate_pesq: true
evaluate_periodicty: true
feature_extractor:
class_path: vocos.feature_extractors.MelSpectrogramFeatures
init_args:
sample_rate: 24000
n_fft: 1024
hop_length: 256
n_mels: 100
padding: center
backbone:
class_path: vocos.models.VocosBackbone
init_args:
input_channels: 100
dim: 512
intermediate_dim: 1536
num_layers: 8
head:
class_path: vocos.heads.ISTFTHead
init_args:
dim: 512
n_fft: 1024
hop_length: 256
padding: center
trainer:
logger:
class_path: pytorch_lightning.loggers.WandbLogger
init_args:
name: malaysian_vocos_mel
project: malaysian_vocos_mel
callbacks:
- class_path: pytorch_lightning.callbacks.LearningRateMonitor
- class_path: pytorch_lightning.callbacks.ModelSummary
init_args:
max_depth: 2
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
monitor: step
filename: model_{epoch}_{step}
save_top_k: 3
save_last: true
every_n_train_steps: 200
dirpath: malaysian_vocos_mel
- class_path: vocos.helpers.GradNormCallback
# Lightning calculates max_steps across all optimizer steps (rather than number of batches)
# This equals to 1M steps per generator and 1M per discriminator
max_steps: 2000000
# You might want to limit val batches when evaluating all the metrics, as they are time-consuming
limit_val_batches: 10
accelerator: gpu
strategy: ddp
gpus: 2
log_every_n_steps: 10