Skip to content

Commit

Permalink
Add correct/incorrect to summary card for classification/non-binary e…
Browse files Browse the repository at this point in the history
…valuations
  • Loading branch information
manivoxel51 committed Jan 14, 2025
1 parent a42cd44 commit c0fd4b7
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,8 @@ export default function Evaluation(props: EvaluationProps) {
const isBinaryClassification =
evaluationType === "classification" && evaluationMethod === "binary";
const showTpFpFn = isObjectDetection || isBinaryClassification;
const isNoneBinaryClassification =
isClassification && evaluationMethod !== "binary";
const infoRows = [
{
id: "evaluation_key",
Expand Down Expand Up @@ -465,6 +467,24 @@ export default function Evaluation(props: EvaluationProps) {
: false,
hide: !showTpFpFn,
},
{
id: true,
property: "Correct",
value: evaluationMetrics.num_correct,
compareValue: compareEvaluationMetrics.num_correct,
lesserIsBetter: false,
filterable: true,
hide: !isNoneBinaryClassification,
},
{
id: false,
property: "Incorrect",
value: evaluationMetrics.num_incorrect,
compareValue: compareEvaluationMetrics.num_incorrect,
lesserIsBetter: false,
filterable: true,
hide: !isNoneBinaryClassification,
},
];

const perClassPerformance = {};
Expand Down
15 changes: 15 additions & 0 deletions plugins/panels/model_evaluation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,11 @@ def get_mask_targets(self, dataset, gt_field):

return None

def get_correct_incorrect(self, results):
correct = np.count_nonzero(results.ypred == results.ytrue)
incorrect = np.count_nonzero(results.ypred != results.ytrue)
return correct, incorrect

def load_evaluation(self, ctx):
view_state = ctx.panel.get_state("view") or {}
eval_key = view_state.get("key")
Expand Down Expand Up @@ -362,6 +367,16 @@ def load_evaluation(self, ctx):
)
metrics["mAP"] = self.get_map(results)
metrics["mAR"] = self.get_mar(results)

if (
info.config.type == "classification"
and info.config.method != "binary"
):
(
metrics["num_correct"],
metrics["num_incorrect"],
) = self.get_correct_incorrect(results)

evaluation_data = {
"metrics": metrics,
"info": serialized_info,
Expand Down

0 comments on commit c0fd4b7

Please sign in to comment.