Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix missing gt_visib_fracts and max_num_estimates_per_image error #158

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bop_toolkit_lib/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
meshlab_server_path = r"/path/to/meshlabserver.exe"

# Number of workers for the parallel evaluation of pose errors.
num_workers = 10
num_workers = 20

# use torch to calculate the errors
use_gpu = False
25 changes: 21 additions & 4 deletions bop_toolkit_lib/dataset_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def get_model_params(datasets_path, dataset_name, model_type=None):
"hopev2": list(range(1, 29)),
"hot3d": list(range(1, 34)),
"handal": list(range(1, 41)),
"robi": list(range(1, 8)),
}[dataset_name]

# ID's of objects with ambiguous views evaluated using the ADI pose error
Expand All @@ -110,6 +111,7 @@ def get_model_params(datasets_path, dataset_name, model_type=None):
"hopev2": [],
"hot3d": [1, 2, 3, 5, 22, 24, 25, 29, 30, 32],
"handal": [26, 35, 36, 37, 38, 39, 40],
"robi": [1, 2, 4, 7],
}[dataset_name]

# T-LESS includes two types of object models, CAD and reconstructed.
Expand Down Expand Up @@ -175,9 +177,9 @@ def get_split_params(datasets_path, dataset_name, split, split_type=None):
depth_ext = ".tif"

p["im_modalities"] = ["rgb", "depth"]
# for Classic datasets, test modality is implicit...
# for Classic datasets, test modality is implicit...
p["eval_modality"] = None
# ...and only one set of annotation is present in the dataset
# ...and only one set of annotation is present in the dataset
# (e.g. scene_gt.json instead of scene_gt_rgb.json, scene_gt_gray1.json etc.)
modalities_have_separate_annotations = False
exts = None # has to be set if modalities_have_separate_annotations is True
Expand Down Expand Up @@ -371,7 +373,7 @@ def get_split_params(datasets_path, dataset_name, split, split_type=None):
p["depth_range"] = None # Not calculated yet.
p["azimuth_range"] = None # Not calculated yet.
p["elev_range"] = None # Not calculated yet.

# HOPEV2.
elif dataset_name == "hopev2":
p["scene_ids"] = {
Expand Down Expand Up @@ -440,13 +442,28 @@ def hot3d_eval_modality(scene_id):

supported_error_types = ["ad", "add", "adi", "mssd", "mspd"]

elif dataset_name[:4] == "robi":
rgb_ext = ".png"
# easy to mask mistake, use get_present_scene_ids instead
base_path = join(datasets_path, dataset_name)
split_path = join(base_path, split)
if split_type is not None:
split_path += "_" + split_type
p["split_path"] = split_path
p["scene_ids"] = get_present_scene_ids(dp_split=p)
p["im_size"] = {
"realsense": (1280, 720),
"ensenso": (1280, 1024),
}.get(split_type, (1280, 1024))
if split_type in ["realsense", "ensenso"]:
rgb_ext = ".bmp"
else:
raise ValueError("Unknown BOP dataset ({}).".format(dataset_name))

base_path = join(datasets_path, dataset_name)
split_path = join(base_path, split)
if split_type is not None:
if split_type == "pbr":
if split_type == "pbr" and dataset_name not in ["robi"]:
p["scene_ids"] = list(range(50))
split_path += "_" + split_type

Expand Down
10 changes: 5 additions & 5 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
scipy==1.5.1
scipy
kiwisolver==1.3.1
matplotlib==2.2.4
matplotlib
imageio==2.5.0
pypng==0.0.19
Cython==0.29.24
Cython
PyOpenGL==3.1.0
triangle>=20190115.2
glumpy==1.1.0
glumpy
opencv-python>=4.3.0.36
Pillow>=8.2.0
git+https://github.com/MartinSmeyer/cocoapi.git@v1.0#subdirectory=PythonAPI
vispy>=0.6.5
webdataset>=0.1.62
numpy
numpy
6 changes: 4 additions & 2 deletions scripts/eval_calc_errors_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@
),
"num_workers": config.num_workers, # Number of parallel workers for the calculation of errors.
"eval_mode": "localization", # Options: 'localization', 'detection'.
"max_num_estimates_per_image": 100, # Maximum number of estimates per image. Only used for detection tasks.
}
################################################################################

Expand Down Expand Up @@ -330,7 +331,7 @@
est_per_object[obj_id]["gt_visib_fract"].append(
scene_gt_info[im_id][gt_id]["visib_fract"]
)
# BOP24 datasets do not have "cam_K" keys but "cam_model"
# BOP24 datasets do not have "cam_K" keys but "cam_model"
# TODO: handle the case of H3 dataset which are pinhole but have "cam_model" key (like HANDAL)
if "cam_K" in scene_camera[im_id]:
est_per_object[obj_id]["cam_K"].append(
Expand Down Expand Up @@ -398,10 +399,11 @@
"obj_id": obj_id,
"est_id": est_id,
"score": score,
"gt_visib_fract": gt_visib_fract,
"gt_visib_fracts": {},
"errors": {},
}
scene_errs[key_name]["errors"][gt_id] = [errors[i]]
scene_errs[key_name]["gt_visib_fracts"][gt_id] = [gt_visib_fract]

scene_errs = [v for k, v in scene_errs.items()]
del est_per_object
Expand Down