-
Notifications
You must be signed in to change notification settings - Fork 2
/
evaluation_method.py
190 lines (176 loc) · 6.26 KB
/
evaluation_method.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
#!/usr/bin/env python
import argparse
import os
import sys
from ast import arg
from time import time
import numpy as np
import pandas as pd
from lvos.evaluation import LVOSEvaluation as LVOSEvaluation_SP
from lvos.evaluation_mp import LVOSEvaluation as LVOSEvaluation_MP
from lvos.evaluation_mt import LVOSEvaluation as LVOSEvaluation_MT
default_lvos_path = None
time_start = time()
parser = argparse.ArgumentParser()
parser.add_argument(
"--lvos_path",
type=str,
help="Path to the LVOS folder containing the JPEGImages, Annotations, "
"ImageSets, Annotations_unsupervised folders",
required=False,
default=default_lvos_path,
)
parser.add_argument(
"--set", type=str, help="Subset to evaluate the results", default="valid"
)
parser.add_argument(
"--mp_nums",
type=int,
default=64,
help="Multiple process numbers",
)
parser.add_argument(
"--m_class",
type=str,
default="mp",
help="Multiple processing or multiple threads.",
choices=["mp", "mt"],
)
parser.add_argument(
"--use_cache",
action="store_true",
help="Whether to use cached file to speed up evalution. Recommend to set it to true to save time.",
default=False,
)
parser.add_argument(
"--task",
type=str,
help="Task to evaluate the results",
default="semi-supervised",
choices=["semi-supervised", "unsupervised_multiple", "unsupervised_single"],
)
parser.add_argument(
"--results_path",
type=str,
help="Path to the folder containing the sequences folders",
required=True,
)
args, _ = parser.parse_known_args()
if args.mp_nums <= 1:
args.mp_nums = 1
LVOSEvaluation = LVOSEvaluation_SP
print(f"Evaluating with single processing.")
else:
if args.m_class == "mp":
LVOSEvaluation = LVOSEvaluation_MP
print(f"Evaluating with multiple processing with {args.mp_nums} processes.")
else:
LVOSEvaluation = LVOSEvaluation_MT
print(f"Evaluating with multiple threads with {args.mp_nums} threads.")
csv_name_global = f"global_results-{args.set}.csv"
csv_name_per_sequence = f"per-sequence_results-{args.set}.csv"
# Check if the method has been evaluated before, if so read the results, otherwise compute the results
csv_name_global_path = os.path.join(args.results_path, csv_name_global)
csv_name_per_sequence_path = os.path.join(args.results_path, csv_name_per_sequence)
if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path):
print("Using precomputed results...")
table_g = pd.read_csv(csv_name_global_path)
table_seq = pd.read_csv(csv_name_per_sequence_path)
else:
print(f"Evaluating sequences for the {args.task} task...")
# Create dataset and evaluate
if args.mp_nums <= 1:
dataset_eval = LVOSEvaluation(
lvos_root=args.lvos_path,
task=args.task,
gt_set=args.set,
use_cache=args.use_cache,
)
else:
dataset_eval = LVOSEvaluation(
lvos_root=args.lvos_path,
task=args.task,
gt_set=args.set,
mp_procs=args.mp_nums,
use_cache=args.use_cache,
)
metrics_res, metrics_res_seen, metrics_res_unseen = dataset_eval.evaluate(
args.results_path
)
J, F, V = metrics_res["J"], metrics_res["F"], metrics_res["V"]
J_seen, F_seen, V_seen = (
metrics_res_seen["J"],
metrics_res_seen["F"],
metrics_res_seen["V"],
)
J_unseen, F_unseen, V_unseen = (
metrics_res_unseen["J"],
metrics_res_unseen["F"],
metrics_res_unseen["V"],
)
# Generate dataframe for the general results
if (
args.task == "semi-supervised"
or args.task == "unsupervised_multiple"
or args.task == "unsupervised_single"
):
g_measures = [
"J&F-Mean",
"J-Mean",
"J-seen-Mean",
"J-unseen-Mean",
"F-Mean",
"F-seen-Mean",
"F-unseen-Mean",
"V-Mean",
"V-seen-Mean",
"V-unseen-Mean",
]
final_mean = (
(np.mean(J_seen["M"]) + np.mean(F_seen["M"]))
+ (np.mean(J_unseen["M"]) + np.mean(F_unseen["M"]))
) / 4.0
g_res = np.array(
[
final_mean,
(np.mean(J_seen["M"]) + np.mean(J_unseen["M"])) / 2,
np.mean(J_seen["M"]),
np.mean(J_unseen["M"]),
(np.mean(F_seen["M"]) + np.mean(F_unseen["M"])) / 2,
np.mean(F_seen["M"]),
np.mean(F_unseen["M"]),
(np.mean(V_seen["M"]) + np.mean(V_unseen["M"])) / 2,
np.mean(V_seen["M"]),
np.mean(V_unseen["M"]),
]
)
else:
raise NotImplementedError("Unknown task.")
g_res = np.reshape(g_res, [1, len(g_res)])
table_g = pd.DataFrame(data=g_res, columns=g_measures)
with open(csv_name_global_path, "w") as f:
table_g.to_csv(f, index=False, float_format="%.3f")
print(f"Global results saved in {csv_name_global_path}")
# Generate a dataframe for the per sequence results
seq_names = list(J["M_per_object"].keys())
seq_measures = ["Sequence", "J-Mean", "F-Mean", "V-Mean"]
J_per_object = [J["M_per_object"][x] for x in seq_names]
F_per_object = [F["M_per_object"][x] for x in seq_names]
V_per_object = [V["M_per_object"][x] for x in seq_names]
table_seq = pd.DataFrame(
data=list(zip(seq_names, J_per_object, F_per_object, V_per_object)),
columns=seq_measures,
)
with open(csv_name_per_sequence_path, "w") as f:
table_seq.to_csv(f, index=False, float_format="%.3f")
print(f"Per-sequence results saved in {csv_name_per_sequence_path}")
# Print the results
sys.stdout.write(
f"--------------------------- Global results for {args.set} ---------------------------\n"
)
print(table_g.to_string(index=False))
sys.stdout.write(f"\n---------- Per sequence results for {args.set} ----------\n")
print(table_seq.to_string(index=False))
total_time = time() - time_start
sys.stdout.write("\nTotal time:" + str(total_time))
sys.stdout.write("\n")