Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix hidden points in skeleton comparison in quality checks #7191

Merged
merged 8 commits into from
Nov 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
### Fixed

- Incorrect handling of the hidden points in skeletons in quality comparisons
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

AFAICS, the first change in quality_reports.py isn't related to skeletons, so it probably deserves another entry.

(<https://github.com/opencv/cvat/pull/7191>)
23 changes: 15 additions & 8 deletions cvat/apps/quality_control/quality_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -648,6 +648,7 @@ def _match_segments(
for a, _ in itertools.zip_longest(a_segms, range(max_anns), fillvalue=None)
]
)
distances[~np.isfinite(distances)] = 1
distances[distances > 1 - dist_thresh] = 1

if a_segms and b_segms:
Expand Down Expand Up @@ -686,7 +687,7 @@ def _match_segments(
return matches, mispred, a_unmatched, b_unmatched


def _OKS(a, b, sigma=0.1, bbox=None, scale=None, visibility=None):
def _OKS(a, b, sigma=0.1, bbox=None, scale=None, visibility_a=None, visibility_b=None):
"""
Object Keypoint Similarity metric.
https://cocodataset.org/#keypoints-eval
Expand All @@ -697,20 +698,25 @@ def _OKS(a, b, sigma=0.1, bbox=None, scale=None, visibility=None):
if len(p1) != len(p2):
return 0

if visibility is None:
visibility = np.ones(len(p1))
if visibility_a is None:
visibility_a = np.full(len(p1), True)
else:
visibility = np.asarray(visibility, dtype=float)
visibility_a = np.asarray(visibility_a, dtype=bool)

if visibility_b is None:
visibility_b = np.full(len(p2), True)
else:
visibility_b = np.asarray(visibility_b, dtype=bool)

if not scale:
if bbox is None:
bbox = dm.ops.mean_bbox([a, b])
scale = bbox[2] * bbox[3]

dists = np.linalg.norm(p1 - p2, axis=1)
return np.sum(visibility * np.exp(-(dists**2) / (2 * scale * (2 * sigma) ** 2))) / np.sum(
visibility
)
return np.sum(
visibility_a * visibility_b * np.exp(-(dists**2) / (2 * scale * (2 * sigma) ** 2))
) / np.sum(visibility_a | visibility_b, dtype=float)


@define(kw_only=True)
Expand All @@ -727,7 +733,8 @@ def distance(self, a: dm.Points, b: dm.Points) -> float:
b,
sigma=self.sigma,
bbox=bbox,
visibility=[v == dm.Points.Visibility.visible for v in a.visibility],
visibility_a=[v == dm.Points.Visibility.visible for v in a.visibility],
visibility_b=[v == dm.Points.Visibility.visible for v in b.visibility],
)


Expand Down
2 changes: 1 addition & 1 deletion tests/python/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ for i, color in enumerate(colormap):
To backup DB and data volume, please use commands below.

```console
docker exec test_cvat_server_1 python manage.py dumpdata --indent 2 --natural-foreign --exclude=auth.permission --exclude=contenttypes > shared/assets/cvat_db/data.json
docker exec test_cvat_server_1 python manage.py dumpdata --indent 2 --natural-foreign --exclude=auth.permission --exclude=contenttypes --exclude=django_rq > shared/assets/cvat_db/data.json
docker exec test_cvat_server_1 tar -cjv /home/django/data > shared/assets/cvat_db/cvat_data.tar.bz2
```

Expand Down
28 changes: 18 additions & 10 deletions tests/python/rest_api/test_quality_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,13 @@ def _test_list_reports_403(self, user, task_id, **kwargs):
def test_can_list_quality_reports(self, admin_user, quality_reports):
parent_report = next(r for r in quality_reports if r["task_id"])
task_id = parent_report["task_id"]
reports = [parent_report] + [
r for r in quality_reports if r["parent_id"] == parent_report["id"]

reports = [
r
for r in quality_reports
if r["task_id"] == task_id
or r["parent_id"]
and quality_reports[r["parent_id"]]["task_id"] == task_id
SpecLad marked this conversation as resolved.
Show resolved Hide resolved
]

self._test_list_reports_200(admin_user, task_id, expected_data=reports)
Expand Down Expand Up @@ -680,16 +685,17 @@ def _get_field_samples(self, field: str) -> Tuple[Any, List[Dict[str, Any]]]:
return job_id, job_conflicts
elif field == "task_id":
# This field is not included in the response
task_report = next(r for r in self.report_samples if r["task_id"])
task_reports = {task_report["id"]} | {
r["id"] for r in self.report_samples if r["parent_id"] == task_report["id"]
task_reports = [r for r in self.report_samples if r["task_id"]]
task_report_ids = {r["id"] for r in task_reports}
task_report_ids |= {
r["id"] for r in self.report_samples if r["parent_id"] in task_report_ids
}
task_conflicts = [
c
for c in self.samples
if self._get_field(c, self._map_field("report_id")) in task_reports
if self._get_field(c, self._map_field("report_id")) in task_report_ids
]
return task_report["task_id"], task_conflicts
return task_reports[0]["task_id"], task_conflicts
elif field == "org_id":
org_id = self.task_samples[
next(
Expand Down Expand Up @@ -1088,7 +1094,7 @@ def test_report_summary(self, task_id, tasks, jobs, quality_reports):
assert summary["frame_share"] == summary["frame_count"] / task["size"]

def test_unmodified_task_produces_the_same_metrics(self, admin_user, quality_reports):
old_report = next(r for r in quality_reports if r["task_id"])
old_report = max((r for r in quality_reports if r["task_id"]), key=lambda r: r["id"])
task_id = old_report["task_id"]

new_report = self.create_quality_report(admin_user, task_id)
Expand Down Expand Up @@ -1121,7 +1127,9 @@ def test_modified_task_produces_different_metrics(
):
gt_job = next(j for j in jobs if j["type"] == "ground_truth")
task_id = gt_job["task_id"]
old_report = next(r for r in quality_reports if r["task_id"] == task_id)
old_report = max(
(r for r in quality_reports if r["task_id"] == task_id), key=lambda r: r["id"]
)
job_labels = [
l
for l in labels
Expand Down Expand Up @@ -1171,7 +1179,7 @@ def test_modified_task_produces_different_metrics(
def test_settings_affect_metrics(
self, admin_user, quality_reports, quality_settings, task_id, parameter
):
old_report = next(r for r in quality_reports if r["task_id"])
old_report = max((r for r in quality_reports if r["task_id"]), key=lambda r: r["id"])
task_id = old_report["task_id"]

settings = deepcopy(next(s for s in quality_settings if s["task_id"] == task_id))
Expand Down
Loading
Loading