This repository has been archived by the owner on Jan 1, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 185
/
config.yaml
115 lines (104 loc) · 2.74 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
defaults:
- dset: debug
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
# Dataset related
sample_rate: 8000
segment: 4
stride: 1 # in seconds, how much to stride between training examples
pad: true # if training sample is too short, pad it
cv_maxlen: 8
validfull: 1 # use entire samples at valid
# Logging and printing, and does not impact training
num_prints: 5
device: cuda
num_workers: 5
verbose: 0
show: 0 # just show the model and its size and exit
# Checkpointing, by default automatically load last checkpoint
checkpoint: True
continue_from: '' # Only pass the name of the exp, like `exp_dset=wham`
# this arg is ignored for the naming of the exp!
continue_best: false
restart: False # Ignore existing checkpoints
checkpoint_file: checkpoint.th
history_file: history.json
samples_dir: samples
# Other stuff
seed: 2036
dummy: # use this if you want twice the same exp, with a name
# Evaluation stuff
pesq: false # compute pesq?
eval_every: 10
keep_last: 0
# Optimization related
optim: adam
lr: 5e-4
beta2: 0.999
stft_loss: False
stft_sc_factor: .5
stft_mag_factor: .5
epochs: 100
batch_size: 4
max_norm: 5
# learning rate scheduling
lr_sched: step # can be either step or plateau
step:
step_size: 2
gamma: 0.98
plateau:
factor: 0.5
patience: 5
# Models
model: swave # either demucs or dwave
swave:
N: 128
L: 8
H: 128
R: 6
C: 2
input_normalize: False
# Experiment launching, distributed
ddp: false
ddp_backend: nccl
rendezvous_file: ./rendezvous
# Internal config, don't set manually
rank:
world_size:
# Hydra config
hydra:
run:
dir: ./outputs/exp_${hydra.job.override_dirname}
job:
config:
# configuration for the ${hydra.job.override_dirname} runtime variable
override_dirname:
kv_sep: '='
item_sep: ','
# Remove all paths, as the / in them would mess up things
# Remove params that would not impact the training itself
# Remove all slurm and submit params.
# This is ugly I know...
exclude_keys: [
'hydra.job_logging.handles.file.filename',
'dset.train', 'dset.valid', 'dset.test', 'dset.mix_json', 'dset.mix_dir',
'num_prints', 'continue_from',
'device', 'num_workers', 'print_freq', 'restart', 'verbose',
'log', 'ddp', 'ddp_backend', 'rendezvous_file', 'rank', 'world_size']
job_logging:
handlers:
file:
class: logging.FileHandler
mode: w
formatter: colorlog
filename: trainer.log
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stderr
hydra_logging:
handlers:
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stderr