Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix loading sampler state dict. #421

Merged
merged 3 commits into from
Aug 6, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 8 additions & 18 deletions egs/librispeech/ASR/pruned_transducer_stateless/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,9 +457,6 @@ def load_checkpoint_if_available(
if "cur_epoch" in saved_params:
params["start_epoch"] = saved_params["cur_epoch"]

if "cur_batch_idx" in saved_params:
params["cur_batch_idx"] = saved_params["cur_batch_idx"]

return saved_params


Expand Down Expand Up @@ -665,13 +662,7 @@ def maybe_log_weights(tag: str):
global_step=params.batch_idx_train,
)

cur_batch_idx = params.get("cur_batch_idx", 0)

for batch_idx, batch in enumerate(train_dl):
if batch_idx < cur_batch_idx:
continue
cur_batch_idx = batch_idx

params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])

Expand Down Expand Up @@ -719,7 +710,6 @@ def maybe_log_weights(tag: str):
params.batch_idx_train > 0
and params.batch_idx_train % params.save_every_n == 0
):
params.cur_batch_idx = batch_idx
save_checkpoint_with_global_batch_idx(
out_dir=params.exp_dir,
global_batch_idx=params.batch_idx_train,
Expand All @@ -729,7 +719,6 @@ def maybe_log_weights(tag: str):
sampler=train_dl.sampler,
rank=rank,
)
del params.cur_batch_idx
remove_checkpoints(
out_dir=params.exp_dir,
topk=params.keep_last_k,
Expand Down Expand Up @@ -884,13 +873,14 @@ def remove_short_and_long_utt(c: Cut):
valid_cuts += librispeech.dev_other_cuts()
valid_dl = librispeech.valid_dataloaders(valid_cuts)

scan_pessimistic_batches_for_oom(
model=model,
train_dl=train_dl,
optimizer=optimizer,
sp=sp,
params=params,
)
if params.start_batch <= 0:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=train_dl,
optimizer=optimizer,
sp=sp,
params=params,
)

for epoch in range(params.start_epoch, params.num_epochs):
fix_random_seed(params.seed + epoch)
Expand Down
13 changes: 1 addition & 12 deletions egs/librispeech/ASR/pruned_transducer_stateless2/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -503,9 +503,6 @@ def load_checkpoint_if_available(
if "cur_epoch" in saved_params:
params["start_epoch"] = saved_params["cur_epoch"]

if "cur_batch_idx" in saved_params:
params["cur_batch_idx"] = saved_params["cur_batch_idx"]

return saved_params


Expand Down Expand Up @@ -715,13 +712,7 @@ def train_one_epoch(

tot_loss = MetricsTracker()

cur_batch_idx = params.get("cur_batch_idx", 0)

for batch_idx, batch in enumerate(train_dl):
if batch_idx < cur_batch_idx:
continue
cur_batch_idx = batch_idx

params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])

Expand Down Expand Up @@ -756,7 +747,6 @@ def train_one_epoch(
params.batch_idx_train > 0
and params.batch_idx_train % params.save_every_n == 0
):
params.cur_batch_idx = batch_idx
save_checkpoint_with_global_batch_idx(
out_dir=params.exp_dir,
global_batch_idx=params.batch_idx_train,
Expand All @@ -768,7 +758,6 @@ def train_one_epoch(
scaler=scaler,
rank=rank,
)
del params.cur_batch_idx
remove_checkpoints(
out_dir=params.exp_dir,
topk=params.keep_last_k,
Expand Down Expand Up @@ -935,7 +924,7 @@ def remove_short_and_long_utt(c: Cut):
valid_cuts += librispeech.dev_other_cuts()
valid_dl = librispeech.valid_dataloaders(valid_cuts)

if not params.print_diagnostics:
if params.start_batch <= 0 and not params.print_diagnostics:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=train_dl,
Expand Down
17 changes: 9 additions & 8 deletions egs/librispeech/ASR/pruned_transducer_stateless3/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -1040,14 +1040,15 @@ def run(rank, world_size, args):
# It's time consuming to include `giga_train_dl` here
# for dl in [train_dl, giga_train_dl]:
for dl in [train_dl]:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=dl,
optimizer=optimizer,
sp=sp,
params=params,
warmup=0.0 if params.start_epoch == 0 else 1.0,
)
if params.start_batch <= 0:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=dl,
optimizer=optimizer,
sp=sp,
params=params,
warmup=0.0 if params.start_epoch == 0 else 1.0,
)

scaler = GradScaler(enabled=params.use_fp16)
if checkpoints and "grad_scaler" in checkpoints:
Expand Down
13 changes: 1 addition & 12 deletions egs/librispeech/ASR/pruned_transducer_stateless4/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,9 +525,6 @@ def load_checkpoint_if_available(
if "cur_epoch" in saved_params:
params["start_epoch"] = saved_params["cur_epoch"]

if "cur_batch_idx" in saved_params:
params["cur_batch_idx"] = saved_params["cur_batch_idx"]

return saved_params


Expand Down Expand Up @@ -748,13 +745,7 @@ def train_one_epoch(

tot_loss = MetricsTracker()

cur_batch_idx = params.get("cur_batch_idx", 0)

for batch_idx, batch in enumerate(train_dl):
if batch_idx < cur_batch_idx:
continue
cur_batch_idx = batch_idx

params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])

Expand Down Expand Up @@ -796,7 +787,6 @@ def train_one_epoch(
params.batch_idx_train > 0
and params.batch_idx_train % params.save_every_n == 0
):
params.cur_batch_idx = batch_idx
save_checkpoint_with_global_batch_idx(
out_dir=params.exp_dir,
global_batch_idx=params.batch_idx_train,
Expand All @@ -809,7 +799,6 @@ def train_one_epoch(
scaler=scaler,
rank=rank,
)
del params.cur_batch_idx
remove_checkpoints(
out_dir=params.exp_dir,
topk=params.keep_last_k,
Expand Down Expand Up @@ -984,7 +973,7 @@ def remove_short_and_long_utt(c: Cut):
valid_cuts += librispeech.dev_other_cuts()
valid_dl = librispeech.valid_dataloaders(valid_cuts)

if not params.print_diagnostics:
if params.start_batch <= 0 and not params.print_diagnostics:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=train_dl,
Expand Down
13 changes: 1 addition & 12 deletions egs/librispeech/ASR/pruned_transducer_stateless5/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,9 +512,6 @@ def load_checkpoint_if_available(
if "cur_epoch" in saved_params:
params["start_epoch"] = saved_params["cur_epoch"]

if "cur_batch_idx" in saved_params:
params["cur_batch_idx"] = saved_params["cur_batch_idx"]

return saved_params


Expand Down Expand Up @@ -735,13 +732,7 @@ def train_one_epoch(

tot_loss = MetricsTracker()

cur_batch_idx = params.get("cur_batch_idx", 0)

for batch_idx, batch in enumerate(train_dl):
if batch_idx < cur_batch_idx:
continue
cur_batch_idx = batch_idx

params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])

Expand Down Expand Up @@ -787,7 +778,6 @@ def train_one_epoch(
params.batch_idx_train > 0
and params.batch_idx_train % params.save_every_n == 0
):
params.cur_batch_idx = batch_idx
save_checkpoint_with_global_batch_idx(
out_dir=params.exp_dir,
global_batch_idx=params.batch_idx_train,
Expand All @@ -800,7 +790,6 @@ def train_one_epoch(
scaler=scaler,
rank=rank,
)
del params.cur_batch_idx
remove_checkpoints(
out_dir=params.exp_dir,
topk=params.keep_last_k,
Expand Down Expand Up @@ -973,7 +962,7 @@ def remove_short_and_long_utt(c: Cut):
valid_cuts += librispeech.dev_other_cuts()
valid_dl = librispeech.valid_dataloaders(valid_cuts)

if not params.print_diagnostics:
if params.start_batch <= 0 and not params.print_diagnostics:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=train_dl,
Expand Down
13 changes: 1 addition & 12 deletions egs/librispeech/ASR/pruned_transducer_stateless6/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -507,9 +507,6 @@ def load_checkpoint_if_available(
if "cur_epoch" in saved_params:
params["start_epoch"] = saved_params["cur_epoch"]

if "cur_batch_idx" in saved_params:
params["cur_batch_idx"] = saved_params["cur_batch_idx"]

return saved_params


Expand Down Expand Up @@ -754,13 +751,7 @@ def train_one_epoch(

tot_loss = MetricsTracker()

cur_batch_idx = params.get("cur_batch_idx", 0)

for batch_idx, batch in enumerate(train_dl):
if batch_idx < cur_batch_idx:
continue
cur_batch_idx = batch_idx

params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])

Expand Down Expand Up @@ -802,7 +793,6 @@ def train_one_epoch(
params.batch_idx_train > 0
and params.batch_idx_train % params.save_every_n == 0
):
params.cur_batch_idx = batch_idx
save_checkpoint_with_global_batch_idx(
out_dir=params.exp_dir,
global_batch_idx=params.batch_idx_train,
Expand All @@ -815,7 +805,6 @@ def train_one_epoch(
scaler=scaler,
rank=rank,
)
del params.cur_batch_idx
remove_checkpoints(
out_dir=params.exp_dir,
topk=params.keep_last_k,
Expand Down Expand Up @@ -990,7 +979,7 @@ def remove_short_and_long_utt(c: Cut):
valid_cuts += librispeech.dev_other_cuts()
valid_dl = librispeech.valid_dataloaders(valid_cuts)

if not params.print_diagnostics:
if params.start_batch <= 0 and not params.print_diagnostics:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=train_dl,
Expand Down