Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Enhance]: Support automatic statistics metrics to excel and fix total_epochs #4693

Merged
merged 1 commit into from
Mar 1, 2021
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 62 additions & 1 deletion .dev_scripts/gather_benchmark_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,16 @@
import mmcv
from gather_models import get_final_results

try:
import xlrd
except ImportError:
xlrd = None
try:
import xlutils
from xlutils.copy import copy
except ImportError:
xlutils = None


def parse_args():
parser = argparse.ArgumentParser(
Expand All @@ -19,6 +29,10 @@ def parse_args():
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--excel', type=str, help='input path of excel to be recorded')
parser.add_argument(
'--ncol', type=int, help='Number of column to be modified or appended')

args = parser.parse_args()
return args
Expand All @@ -27,6 +41,26 @@ def parse_args():
if __name__ == '__main__':
args = parse_args()

if args.excel:
assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
'at the same time'
if xlrd is None:
raise RuntimeError(
'xlrd is not installed,'
'Please use “pip install xlrd==1.2.0” to install')
if xlutils is None:
raise RuntimeError(
'xlutils is not installed,'
'Please use “pip install xlutils==2.0.0” to install')
readbook = xlrd.open_workbook(args.excel)
sheet = readbook.sheet_by_name('Sheet1')
sheet_info = {}
total_nrows = sheet.nrows
for i in range(3, sheet.nrows):
sheet_info[sheet.row_values(i)[0]] = i
xlrw = copy(readbook)
table = xlrw.get_sheet(0)

root_path = args.root
metrics_out = args.out
benchmark_json_path = args.benchmark_json
Expand All @@ -40,7 +74,7 @@ def parse_args():
if osp.exists(result_path):
# 1 read config
cfg = mmcv.Config.fromfile(config)
total_epochs = cfg.total_epochs
total_epochs = cfg.runner.max_epochs
final_results = cfg.evaluation.metric
if not isinstance(final_results, list):
final_results = [final_results]
Expand All @@ -64,7 +98,30 @@ def parse_args():
if model_performance is None:
print(f'log file error: {log_json_path}')
continue
for performance in model_performance:
if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:
metric = round(model_performance[performance] * 100, 1)
model_performance[performance] = metric
result_dict[config] = model_performance

# update and append excel content
if args.excel:
if 'AR@1000' in model_performance:
metrics = f'{model_performance["AR@1000"]}(AR@1000)'
elif 'segm_mAP' in model_performance:
metrics = f'{model_performance["bbox_mAP"]}/' \
f'{model_performance["segm_mAP"]}'
else:
metrics = f'{model_performance["bbox_mAP"]}'

row_num = sheet_info.get(config, None)
if row_num:
table.write(row_num, args.ncol, metrics)
else:
table.write(total_nrows, 0, config)
table.write(total_nrows, args.ncol, metrics)
total_nrows += 1

else:
print(f'{config} not exist: {ckpt_path}')
else:
Expand All @@ -79,3 +136,7 @@ def parse_args():
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
if args.excel:
filename, sufflx = osp.splitext(args.excel)
xlrw.save(f'{filename}_o{sufflx}')
print(f'>>> Output {filename}_o{sufflx}')