Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ros2 gem #295

Merged
merged 10 commits into from
Nov 23, 2022
165 changes: 105 additions & 60 deletions projects/opendr_ws/src/perception/scripts/object_detection_2d_gem.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,28 +18,27 @@
import torch
import message_filters
import cv2
import time
import numpy as np
from vision_msgs.msg import Detection2DArray
from sensor_msgs.msg import Image as ROS_Image
from opendr_bridge import ROSBridge
from opendr.perception.object_detection_2d import GemLearner
from opendr.perception.object_detection_2d import draw
from opendr.perception.object_detection_2d import draw_bounding_boxes
from opendr.engine.data import Image


class GemNode:
jelledouwe marked this conversation as resolved.
Show resolved Hide resolved

def __init__(self,
input_color_topic="/camera/color/image_raw",
input_infra_topic="/camera/infra/image_raw",
output_color_topic="/opendr/color_detection_annotated",
output_infra_topic="/opendr/infra_detection_annotated",
detection_annotations_topic="/opendr/detections",
device="cuda",
pts_color=None,
pts_infra=None,
):
def __init__(
self,
input_color_topic="/camera/color/image_raw",
input_infra_topic="/camera/infra/image_raw",
output_color_topic="/opendr/color_detection_annotated",
output_infra_topic="/opendr/infra_detection_annotated",
jelledouwe marked this conversation as resolved.
Show resolved Hide resolved
detection_annotations_topic="/opendr/detections",
jelledouwe marked this conversation as resolved.
Show resolved Hide resolved
device="cuda",
pts_color=None,
pts_infra=None,
):
"""
Creates a ROS Node for object detection with GEM
:param input_color_topic: Topic from which we are reading the input color image
Expand All @@ -66,7 +65,7 @@ def __init__(self,
opendr/perception/object_detection2d/utils module.
:type pts_infra: {list, numpy.ndarray}
"""
rospy.init_node('gem', anonymous=True)
rospy.init_node("gem", anonymous=True)
if output_color_topic is not None:
self.rgb_publisher = rospy.Publisher(output_color_topic, ROS_Image, queue_size=10)
else:
Expand All @@ -81,37 +80,91 @@ def __init__(self,
else:
self.detection_publisher = None
if pts_infra is None:
pts_infra = np.array([[478, 248], [465, 338], [458, 325], [468, 256],
[341, 240], [335, 310], [324, 321], [311, 383],
[434, 365], [135, 384], [67, 257], [167, 206],
[124, 131], [364, 276], [424, 269], [277, 131],
[41, 310], [202, 320], [188, 318], [188, 308],
[196, 241], [499, 317], [311, 164], [220, 216],
[435, 352], [213, 363], [390, 364], [212, 368],
[390, 370], [467, 324], [415, 364]])
pts_infra = np.array(
[
[478, 248],
[465, 338],
[458, 325],
[468, 256],
[341, 240],
[335, 310],
[324, 321],
[311, 383],
[434, 365],
[135, 384],
[67, 257],
[167, 206],
[124, 131],
[364, 276],
[424, 269],
[277, 131],
[41, 310],
[202, 320],
[188, 318],
[188, 308],
[196, 241],
[499, 317],
[311, 164],
[220, 216],
[435, 352],
[213, 363],
[390, 364],
[212, 368],
[390, 370],
[467, 324],
[415, 364],
]
)
rospy.logwarn(
'\nUsing default calibration values for pts_infra!' +
'\nThese are probably incorrect.' +
'\nThe correct values for pts_infra can be found by running get_color_infra_alignment.py.' +
'\nThis file is located in the opendr/perception/object_detection2d/utils module.'
"\nUsing default calibration values for pts_infra!"
+ "\nThese are probably incorrect."
+ "\nThe correct values for pts_infra can be found by running get_color_infra_alignment.py."
+ "\nThis file is located in the opendr/perception/object_detection2d/utils module."
tsampazk marked this conversation as resolved.
Show resolved Hide resolved
)
if pts_color is None:
pts_color = np.array([[910, 397], [889, 572], [874, 552], [891, 411],
[635, 385], [619, 525], [603, 544], [576, 682],
[810, 619], [216, 688], [90, 423], [281, 310],
[193, 163], [684, 449], [806, 431], [504, 170],
[24, 538], [353, 552], [323, 550], [323, 529],
[344, 387], [961, 533], [570, 233], [392, 336],
[831, 610], [378, 638], [742, 630], [378, 648],
[742, 640], [895, 550], [787, 630]])
pts_color = np.array(
[
[910, 397],
[889, 572],
[874, 552],
[891, 411],
[635, 385],
[619, 525],
[603, 544],
[576, 682],
[810, 619],
[216, 688],
[90, 423],
[281, 310],
[193, 163],
[684, 449],
[806, 431],
[504, 170],
[24, 538],
[353, 552],
[323, 550],
[323, 529],
[344, 387],
[961, 533],
[570, 233],
[392, 336],
[831, 610],
[378, 638],
[742, 630],
[378, 648],
[742, 640],
[895, 550],
[787, 630],
]
)
rospy.logwarn(
'\nUsing default calibration values for pts_color!' +
'\nThese are probably incorrect.' +
'\nThe correct values for pts_color can be found by running get_color_infra_alignment.py.' +
'\nThis file is located in the opendr/perception/object_detection2d/utils module.'
"\nUsing default calibration values for pts_color!"
+ "\nThese are probably incorrect."
+ "\nThe correct values for pts_color can be found by running get_color_infra_alignment.py."
+ "\nThis file is located in the opendr/perception/object_detection2d/utils module."
tsampazk marked this conversation as resolved.
Show resolved Hide resolved
)
# Object classes
self.classes = ['N/A', 'chair', 'cycle', 'bin', 'laptop', 'drill', 'rocker']
self.classes = ["N/A", "chair", "cycle", "bin", "laptop", "drill", "rocker"]

# Estimating Homography matrix for aligning infra with RGB
self.h, status = cv2.findHomography(pts_infra, pts_color)
Expand All @@ -121,11 +174,12 @@ def __init__(self,
# Initialize the detection estimation
model_backbone = "resnet50"

self.gem_learner = GemLearner(backbone=model_backbone,
num_classes=7,
device=device,
)
self.gem_learner.fusion_method = 'sc_avg'
self.gem_learner = GemLearner(
backbone=model_backbone,
num_classes=7,
device=device,
)
self.gem_learner.fusion_method = "sc_avg"
self.gem_learner.download(path=".", verbose=True)

# Subscribers
jelledouwe marked this conversation as resolved.
Show resolved Hide resolved
Expand Down Expand Up @@ -153,20 +207,11 @@ def callback(self, msg_rgb, msg_ir):
"""
# Convert images to OpenDR standard
image_rgb = self.bridge.from_ros_image(msg_rgb).opencv()
image_ir_raw = self.bridge.from_ros_image(msg_ir, 'bgr8').opencv()
image_ir_raw = self.bridge.from_ros_image(msg_ir, "bgr8").opencv()
image_ir = cv2.warpPerspective(image_ir_raw, self.h, (image_rgb.shape[1], image_rgb.shape[0]))

# Perform inference on images
start = time.time()
boxes, w_sensor1, _ = self.gem_learner.infer(image_rgb, image_ir)
end = time.time()

# Calculate fps
fps = 1 / (end - start)
self.fps_list.append(fps)
if len(self.fps_list) > 10:
del self.fps_list[0]
mean_fps = sum(self.fps_list) / len(self.fps_list)

# Annotate image and publish results:
if self.detection_publisher is not None:
Expand All @@ -176,25 +221,25 @@ def callback(self, msg_rgb, msg_ir):
# e.g., opendr_detection = self.bridge.from_ros_bounding_box_list(ros_detection)

if self.rgb_publisher is not None:
plot_rgb = draw(image_rgb, boxes, w_sensor1, mean_fps)
plot_rgb = draw_bounding_boxes(image_rgb, boxes, class_names=self.classes)
message = self.bridge.to_ros_image(Image(np.uint8(plot_rgb)))
self.rgb_publisher.publish(message)
if self.ir_publisher is not None:
plot_ir = draw(image_ir, boxes, w_sensor1, mean_fps)
plot_ir = draw_bounding_boxes(image_ir, boxes, class_names=self.classes)
message = self.bridge.to_ros_image(Image(np.uint8(plot_ir)))
self.ir_publisher.publish(message)


if __name__ == '__main__':
if __name__ == "__main__":
# Select the device for running the
try:
if torch.cuda.is_available():
print("GPU found.")
device = 'cuda'
device = "cuda"
else:
print("GPU not found. Using CPU instead.")
device = 'cpu'
device = "cpu"
except:
device = 'cpu'
device = "cpu"
detection_estimation_node = GemNode(device=device)
detection_estimation_node.listen()
tsampazk marked this conversation as resolved.
Show resolved Hide resolved
Loading