Skip to content

Commit

Permalink
Update no trainer examples for QA and Semantic Segmentation (#18474)
Browse files Browse the repository at this point in the history
* swag_no_trainer updated for with gather_metrics

* Removed unused variable samples_seen

* updated examples with gather_for_metrics
  • Loading branch information
kiansierra authored Aug 4, 2022
1 parent d2704c4 commit 0bf1e1a
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 26 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -698,7 +698,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
step = 0
# create a numpy array and fill it with -100.
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32)
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics
for i, output_logit in enumerate(start_or_end_logits): # populate columns
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
# And after every iteration we have to change the step
Expand Down Expand Up @@ -876,11 +876,11 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100)
cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100)

all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy())
all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())

max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor

Expand Down Expand Up @@ -936,11 +936,11 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100)
cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100)

all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy())
all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())

max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor

Expand Down
10 changes: 5 additions & 5 deletions examples/pytorch/question-answering/run_qa_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
step = 0
# create a numpy array and fill it with -100.
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics
for i, output_logit in enumerate(start_or_end_logits): # populate columns
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
# And after every iteration we have to change the step
Expand Down Expand Up @@ -901,8 +901,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)

all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())

max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor

Expand Down Expand Up @@ -940,8 +940,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)

all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())

max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
# concatenate the numpy array
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,6 @@ def preprocess_val(example_batch):

logger.info("***** Running evaluation *****")
model.eval()
samples_seen = 0
for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)):
with torch.no_grad():
outputs = model(**batch)
Expand All @@ -615,15 +614,7 @@ def preprocess_val(example_batch):
)
predictions = upsampled_logits.argmax(dim=1)

predictions, references = accelerator.gather((predictions, batch["labels"]))

# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader) - 1:
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))

metric.add_batch(
predictions=predictions,
Expand Down

0 comments on commit 0bf1e1a

Please sign in to comment.