Skip to content

Commit

Permalink
fix logit-to-multi-hot conversion in example (#26936)
Browse files Browse the repository at this point in the history
* fix logit to multi-hot converstion

* add comments

* typo
  • Loading branch information
ranchlai authored Oct 23, 2023
1 parent 093848d commit f71c9cc
Showing 1 changed file with 5 additions and 2 deletions.
7 changes: 5 additions & 2 deletions examples/pytorch/text-classification/run_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -655,7 +655,7 @@ def compute_metrics(p: EvalPrediction):
preds = np.squeeze(preds)
result = metric.compute(predictions=preds, references=p.label_ids)
elif is_multi_label:
preds = np.array([np.where(p > 0.5, 1, 0) for p in preds])
preds = np.array([np.where(p > 0, 1, 0) for p in preds]) # convert logits to multi-hot encoding
# Micro F1 is commonly used in multi-label classification
result = metric.compute(predictions=preds, references=p.label_ids, average="micro")
else:
Expand Down Expand Up @@ -721,7 +721,10 @@ def compute_metrics(p: EvalPrediction):
if is_regression:
predictions = np.squeeze(predictions)
elif is_multi_label:
predictions = np.array([np.where(p > 0.5, 1, 0) for p in predictions])
# Convert logits to multi-hot encoding. We compare the logits to 0 instead of 0.5, because the sigmoid is not applied.
# You can also pass `preprocess_logits_for_metrics=lambda logits, labels: nn.functional.sigmoid(logits)` to the Trainer
# and set p > 0.5 below (less efficient in this case)
predictions = np.array([np.where(p > 0, 1, 0) for p in predictions])
else:
predictions = np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, "predict_results.txt")
Expand Down

0 comments on commit f71c9cc

Please sign in to comment.