From 32906949dc16db1246a9bdcba5d57ceb04d3e3ab Mon Sep 17 00:00:00 2001 From: Thibault N Date: Fri, 3 Dec 2021 11:00:17 +0100 Subject: [PATCH] fixe crop in abs pos --- aloscene/bounding_boxes_2d.py | 44 +++++++++++++++++++++-------------- aloscene/points_2d.py | 9 +++++-- unittest/test_boxes.py | 23 ++++++++++++++++-- unittest/test_points2d.py | 24 +++++++++++++++++++ 4 files changed, 78 insertions(+), 22 deletions(-) create mode 100644 unittest/test_points2d.py diff --git a/aloscene/bounding_boxes_2d.py b/aloscene/bounding_boxes_2d.py index 5e146f38..174bb518 100644 --- a/aloscene/bounding_boxes_2d.py +++ b/aloscene/bounding_boxes_2d.py @@ -185,7 +185,10 @@ def xyxy(self) -> BoundingBoxes2D: if tensor.boxes_format == "xcyc": labels = tensor.drop_children() # Convert from xcyc to xyxy - n_tensor = torch.cat([tensor[:, :2] - (tensor[:, 2:] / 2), tensor[:, :2] + (tensor[:, 2:] / 2)], dim=1,) + n_tensor = torch.cat( + [tensor[:, :2] - (tensor[:, 2:] / 2), tensor[:, :2] + (tensor[:, 2:] / 2)], + dim=1, + ) n_tensor.boxes_format = "xyxy" n_tensor.set_children(labels) return n_tensor @@ -195,7 +198,10 @@ def xyxy(self) -> BoundingBoxes2D: labels = tensor.drop_children() tensor.rename_(None) # Convert from yxyx to xyxy - n_tensor = torch.cat([tensor[:, :2].flip([1]), tensor[:, 2:].flip([1])], dim=1,) + n_tensor = torch.cat( + [tensor[:, :2].flip([1]), tensor[:, 2:].flip([1])], + dim=1, + ) tensor.reset_names() n_tensor.reset_names() n_tensor.boxes_format = "xyxy" @@ -236,7 +242,10 @@ def yxyx(self) -> BoundingBoxes2D: labels = tensor.drop_children() tensor.rename_(None) # Convert from xyxy to yxyx - yxyx_boxes = torch.cat([tensor[:, :2].flip([1]), tensor[:, 2:].flip([1])], dim=1,) + yxyx_boxes = torch.cat( + [tensor[:, :2].flip([1]), tensor[:, 2:].flip([1])], + dim=1, + ) yxyx_boxes.reset_names() tensor.reset_names() yxyx_boxes.boxes_format = "yxyx" @@ -480,7 +489,6 @@ def get_view(self, frame: Tensor = None, size: tuple = None, labels_set: str = N color = (0, 1, 0) cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), color, 3) - # Return the view to display return View(frame, **kwargs) @@ -694,7 +702,10 @@ def _crop(self, H_crop: tuple, W_crop: tuple, **kwargs): # Put back the instance into the same state as before if absolute: - cropped_boxes = cropped_boxes.abs_pos(frame_size) + n_frame_size = ((H_crop[1] - H_crop[0]) * frame_size[0], (W_crop[1] - W_crop[0]) * frame_size[1]) + cropped_boxes = cropped_boxes.abs_pos(n_frame_size) + else: + cropped_boxes.frame_size = None cropped_boxes = cropped_boxes.get_with_format(boxes_format) @@ -715,7 +726,6 @@ def fit_to_padded_size(self): offset_y = (self.padded_size[0][0], self.padded_size[0][1]) offset_x = (self.padded_size[1][0], self.padded_size[1][1]) - if not self.absolute: boxes = self.abs_pos((100, 100)).xcyc() h_shift = boxes.frame_size[0] * offset_y[0] @@ -771,24 +781,22 @@ def _pad(self, offset_y: tuple, offset_x: tuple, pad_boxes: bool = True, **kwarg padded_size = n_boxes.padded_size prev_padded_size = ( - ( - (padded_size[0][0] * pr_frame_size[0]), - (padded_size[0][1] * pr_frame_size[0]) - ), - ( - (padded_size[1][0] * pr_frame_size[1]), - (padded_size[1][1] * pr_frame_size[1]) - ) + ((padded_size[0][0] * pr_frame_size[0]), (padded_size[0][1] * pr_frame_size[0])), + ((padded_size[1][0] * pr_frame_size[1]), (padded_size[1][1] * pr_frame_size[1])), ) n_padded_size = ( ( - prev_padded_size[0][0] + offset_y[0] * (prev_padded_size[0][0] + prev_padded_size[0][1] + pr_frame_size[0]), - prev_padded_size[0][1] + offset_y[1] * (prev_padded_size[0][0] + prev_padded_size[0][1] + pr_frame_size[0]), + prev_padded_size[0][0] + + offset_y[0] * (prev_padded_size[0][0] + prev_padded_size[0][1] + pr_frame_size[0]), + prev_padded_size[0][1] + + offset_y[1] * (prev_padded_size[0][0] + prev_padded_size[0][1] + pr_frame_size[0]), ), ( - prev_padded_size[1][0] + offset_x[0] * (prev_padded_size[1][0] + prev_padded_size[1][1] + pr_frame_size[1]), - prev_padded_size[1][1] + offset_x[1] * (prev_padded_size[1][0] + prev_padded_size[1][1] + pr_frame_size[1]), + prev_padded_size[1][0] + + offset_x[0] * (prev_padded_size[1][0] + prev_padded_size[1][1] + pr_frame_size[1]), + prev_padded_size[1][1] + + offset_x[1] * (prev_padded_size[1][0] + prev_padded_size[1][1] + pr_frame_size[1]), ), ) diff --git a/aloscene/points_2d.py b/aloscene/points_2d.py index c2409b6b..ce9f5f1f 100644 --- a/aloscene/points_2d.py +++ b/aloscene/points_2d.py @@ -444,9 +444,10 @@ def _crop(self, H_crop: tuple, W_crop: tuple, **kwargs) -> Points2D: absolute = self.absolute frame_size = self.frame_size + points_format = self.points_format - # Get a new set of bbox + # Get a new set of points n_points = self.abs_pos((100, 100)).xy() # Retrieve crop coordinates @@ -467,7 +468,11 @@ def _crop(self, H_crop: tuple, W_crop: tuple, **kwargs) -> Points2D: # Put back the instance into the same state as before if absolute: - cropped_points = cropped_points.abs_pos(frame_size) + n_frame_size = ((H_crop[1] - H_crop[0]) * frame_size[0], (W_crop[1] - W_crop[0]) * frame_size[1]) + cropped_points = cropped_points.abs_pos(n_frame_size) + else: + cropped_points.frame_size = None + cropped_points = cropped_points.get_with_format(points_format) return cropped_points diff --git a/unittest/test_boxes.py b/unittest/test_boxes.py index c50c0bc7..891107a1 100644 --- a/unittest/test_boxes.py +++ b/unittest/test_boxes.py @@ -1,8 +1,8 @@ # from aloscene.renderer import View from alodataset import WaymoDataset # , Split import aloscene - import torch +import numpy as np waymo_dataset = WaymoDataset(sample=True) TEST_FRAME = 0 @@ -375,6 +375,24 @@ def test_boxes_slice(): assert len(n_frame.boxes2d["gt_boxes_2d"].shape) == 2 +def test_crop_abs(): + image = np.zeros((3, 843, 1500)) + boxes = [[298, 105, 50, 50], [1250, 105, 50, 50], [298, 705, 50, 50], [1250, 705, 50, 50]] + frame = aloscene.Frame(image) + labels = aloscene.Labels([0, 0, 0, 0], labels_names=["boxes"]) + boxes = aloscene.BoundingBoxes2D( + boxes, boxes_format="xcyc", frame_size=(frame.H, frame.W), absolute=True, labels=labels + ) + frame.append_boxes2d(boxes) + # frame.get_view().render() + frame = frame.crop(H_crop=(0.0, 0.5), W_crop=(0.0, 0.5)) + # frame.get_view().render() + # frame.get_view().render() + assert torch.allclose(frame.boxes2d[0].as_tensor(), boxes[0].as_tensor()) + assert np.allclose(frame.boxes2d.frame_size[0], frame.HW[0]) + assert np.allclose(frame.boxes2d.frame_size[1], frame.HW[1]) + + if __name__ == "__main__": test_boxes_from_dt() test_boxes_rel_xcyc() @@ -383,5 +401,6 @@ def test_boxes_slice(): test_boxes_abs_xcyc() test_boxes_abs_yxyx() test_boxes_abs_xyxy() - # test_padded_boxes() # Outdated + # test_padded_boxes() Outdated test_boxes_slice() + test_crop_abs() diff --git a/unittest/test_points2d.py b/unittest/test_points2d.py new file mode 100644 index 00000000..fc4a078c --- /dev/null +++ b/unittest/test_points2d.py @@ -0,0 +1,24 @@ +# from aloscene.renderer import View +from alodataset import WaymoDataset # , Split +import aloscene +import torch +import numpy as np + + +def test_crop_abs(): + image = np.zeros((3, 843, 1500)) + corners = [[298, 105], [1250, 105], [298, 705], [1250, 705]] + frame = aloscene.Frame(image) + labels = aloscene.Labels([0, 0, 0, 0], labels_names=["corners"]) + corners = aloscene.Points2D( + corners, points_format="xy", frame_size=(frame.H, frame.W), absolute=True, labels=labels + ) + frame.append_points2d(corners) + frame = frame.crop(H_crop=(0.0, 0.5), W_crop=(0.0, 0.5)) + assert torch.allclose(frame.points2d[0].as_tensor(), corners[0].as_tensor()) + assert np.allclose(frame.points2d.frame_size[0], frame.HW[0]) + assert np.allclose(frame.points2d.frame_size[1], frame.HW[1]) + + +if __name__ == "__main__": + test_crop_abs()