-
Notifications
You must be signed in to change notification settings - Fork 5
/
evaluation.py
122 lines (83 loc) · 3.69 KB
/
evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Taken from https://github.com/carpenterlab/unet4nuclei/blob/master/unet4nuclei/utils/evaluation.py
# Aug 06 / 2018
import numpy as np
import pandas as pd
def intersection_over_union(ground_truth, prediction):
# Count objects
true_objects = len(np.unique(ground_truth))
pred_objects = len(np.unique(prediction))
# Compute intersection
h = np.histogram2d(ground_truth.flatten(), prediction.flatten(), bins=(true_objects,pred_objects))
intersection = h[0]
# Area of objects
area_true = np.histogram(ground_truth, bins=true_objects)[0]
area_pred = np.histogram(prediction, bins=pred_objects)[0]
# Calculate union
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
union = union[1:,1:]
# Compute Intersection over Union
union[union == 0] = 1e-9
IOU = intersection/union
return IOU
def measures_at(threshold, IOU):
matches = IOU > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Extra objects
false_negatives = np.sum(matches, axis=1) == 0 # Missed objects
assert np.all(np.less_equal(true_positives, 1))
assert np.all(np.less_equal(false_positives, 1))
assert np.all(np.less_equal(false_negatives, 1))
TP, FP, FN = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
f1 = 2*TP / (2*TP + FP + FN + 1e-9)
official_score = TP / (TP + FP + FN + 1e-9)
precision = TP / (TP + FP + 1e-9)
recall = TP / (TP + FN + 1e-9)
return f1, TP, FP, FN, official_score, precision, recall
# Compute Average Precision for all IoU thresholds
def compute_af1_results(ground_truth, prediction, results, image_name):
# Compute IoU
IOU = intersection_over_union(ground_truth, prediction)
if IOU.shape[0] > 0:
jaccard = np.max(IOU, axis=0).mean()
else:
jaccard = 0.0
# Calculate F1 score at all thresholds
for t in np.arange(0.5, 1.0, 0.05):
f1, tp, fp, fn, os, prec, rec = measures_at(t, IOU)
res = {"Image": image_name, "Threshold": t, "F1": f1, "Jaccard": jaccard,
"TP": tp, "FP": fp, "FN": fn, "Official_Score": os, "Precision": prec, "Recall": rec}
row = len(results)
results.loc[row] = res
return results
# Count number of False Negatives at 0.7 IoU
def get_false_negatives(ground_truth, prediction, results, image_name, threshold=0.7):
# Compute IoU
IOU = intersection_over_union(ground_truth, prediction)
true_objects = len(np.unique(ground_truth))
if true_objects <= 1:
return results
area_true = np.histogram(ground_truth, bins=true_objects)[0][1:]
true_objects -= 1
# Identify False Negatives
matches = IOU > threshold
false_negatives = np.sum(matches, axis=1) == 0 # Missed objects
data = np.asarray([
area_true.copy(),
np.array(false_negatives, dtype=np.int32)
])
results = pd.concat([results, pd.DataFrame(data=data.T, columns=["Area", "False_Negative"])])
return results
# Count the number of splits and merges
def get_splits_and_merges(ground_truth, prediction, results, image_name):
# Compute IoU
IOU = intersection_over_union(ground_truth, prediction)
matches = IOU > 0.1
merges = np.sum(matches, axis=0) > 1
splits = np.sum(matches, axis=1) > 1
r = {"Image_Name":image_name, "Merges":np.sum(merges), "Splits":np.sum(splits)}
results.loc[len(results)+1] = r
return results