Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update no trainer examples for QA and Semantic Segmentation #18474

Merged
merged 5 commits into from
Aug 4, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -698,7 +698,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
step = 0
# create a numpy array and fill it with -100.
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32)
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics
for i, output_logit in enumerate(start_or_end_logits): # populate columns
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
# And after every iteration we have to change the step
Expand Down Expand Up @@ -876,11 +876,11 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100)
cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100)

all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy())
all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())

max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor

Expand Down Expand Up @@ -936,11 +936,11 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100)
cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100)

all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy())
all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())

max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor

Expand Down
10 changes: 5 additions & 5 deletions examples/pytorch/question-answering/run_qa_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
step = 0
# create a numpy array and fill it with -100.
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics
for i, output_logit in enumerate(start_or_end_logits): # populate columns
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
# And after every iteration we have to change the step
Expand Down Expand Up @@ -901,8 +901,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)

all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())

max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor

Expand Down Expand Up @@ -940,8 +940,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)

all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())

max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
# concatenate the numpy array
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,6 @@ def preprocess_val(example_batch):

logger.info("***** Running evaluation *****")
model.eval()
samples_seen = 0
for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)):
with torch.no_grad():
outputs = model(**batch)
Expand All @@ -615,15 +614,7 @@ def preprocess_val(example_batch):
)
predictions = upsampled_logits.argmax(dim=1)

predictions, references = accelerator.gather((predictions, batch["labels"]))

# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader) - 1:
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))

metric.add_batch(
predictions=predictions,
Expand Down