From 1939f63e2d77ad7de68fc8eaa7eedb10c10ed55f Mon Sep 17 00:00:00 2001 From: aselimc Date: Tue, 31 Jan 2023 15:31:41 +0100 Subject: [PATCH 01/13] feat(efficientLPS): Re-implemented the ROS2 nodes for EfficientLPS and PCL2 publisher --- .../src/opendr_bridge/opendr_bridge/bridge.py | 75 +++++++- ...anoptic_segmentation_efficient_lps_node.py | 174 ++++++++++++++++++ .../point_cloud_2_publisher_node.py | 105 +++++++++++ .../src/opendr_perception/setup.py | 4 +- 4 files changed, 356 insertions(+), 2 deletions(-) create mode 100644 projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py create mode 100644 projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py diff --git a/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py b/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py index ef5ac06698..1e1d8a1b46 100644 --- a/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py +++ b/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import struct import numpy as np from opendr.engine.data import Image, PointCloud, Timeseries from opendr.engine.target import ( @@ -20,7 +21,10 @@ ) from cv_bridge import CvBridge from std_msgs.msg import String, ColorRGBA, Header -from sensor_msgs.msg import Image as ImageMsg, PointCloud as PointCloudMsg, ChannelFloat32 as ChannelFloat32Msg +from sensor_msgs.msg import ( + Image as ImageMsg, PointCloud as PointCloudMsg, ChannelFloat32 as ChannelFloat32Msg, + PointField as PointFieldMsg, PointCloud2 as PointCloud2Msg +) from vision_msgs.msg import ( Detection2DArray, Detection2D, BoundingBox2D, ObjectHypothesisWithPose, Detection3D, Detection3DArray, BoundingBox3D as BoundingBox3DMsg, @@ -33,6 +37,7 @@ Point ) from opendr_interface.msg import OpenDRPose2D, OpenDRPose2DKeypoint, OpenDRPose3D, OpenDRPose3DKeypoint +from sensor_msgs import point_cloud2 as pc2 class ROS2Bridge: @@ -362,6 +367,74 @@ def to_ros_point_cloud(self, point_cloud, time_stamp): return ros_point_cloud + def from_ros_point_cloud2(self, point_cloud: PointCloud2Msg): + """ + Converts a ROS PointCloud2 message into an OpenDR PointCloud + :param point_cloud: ROS PointCloud2 to be converted + :type point_cloud: sensor_msgs.msg.PointCloud2 + :return: OpenDR PointCloud + :rtype: engine.data.PointCloud + """ + + points = pc2.read_points_list(point_cloud, field_names=[f.name for f in point_cloud.fields]) + result = PointCloud(points) + + return result + + def to_ros_point_cloud2(self, point_cloud: PointCloud, timestamp: str, channels: str=None): + + """ + Converts an OpenDR PointCloud message into a ROS PointCloud2 + :param: OpenDR PointCloud + :type: engine.data.PointCloud + :param: channels to be included in the PointCloud2 message. Available channels names are ["rgb", "rgba"] + :type: str + :return message: ROS PointCloud2 + :rtype message: sensor_msgs.msg.PointCloud2 + """ + + header = Header() + header.stamp = timestamp + header.frame_id = "base_link" + + channel_count = point_cloud.data.shape[-1] - 3 + + fields = [PointFieldMsg(name="x", offset=0, datatype=PointFieldMsg.FLOAT32, count=1), + PointFieldMsg(name="y", offset=4, datatype=PointFieldMsg.FLOAT32, count=1), + PointFieldMsg(name="z", offset=8, datatype=PointFieldMsg.FLOAT32, count=1)] + if channels == 'rgb' or channels == 'rgba': + fields.append(PointFieldMsg(name="rgba", offset=12, datatype=PointFieldMsg.UINT32, count=1)) + else: + for i in range(channel_count): + fields.append(PointFieldMsg(name="channel_" + str(i), offset=12 + i * 4, datatype=PointFieldMsg.FLOAT32, count=1)) + + points = [] + + for point in point_cloud.data: + pt = [point[0], point[1], point[2]] + for channel in range(channel_count): + if channels == 'rgb' or channels == 'rgba': + r = int(point[3]) + g = int(point[4]) + b = int(point[5]) + + if channels == 'rgb': + a = 255 + else: + a = int(point[6]) + + rgba = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0] + pt.append(rgba) + break + + else: + pt.append(point[3 + channel]) + points.append(pt) + + ros_point_cloud2 = pc2.create_cloud(header, fields, points) + + return ros_point_cloud2 + def from_ros_boxes_3d(self, ros_boxes_3d): """ Converts a ROS2 Detection3DArray message into an OpenDR BoundingBox3D object. diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py new file mode 100644 index 0000000000..d887ed37ae --- /dev/null +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +# Copyright 2020-2022 OpenDR European Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from pathlib import Path +import argparse +from typing import Optional, List + +import numpy as np +import rclpy +from rclpy.node import Node +import matplotlib +from sensor_msgs.msg import PointCloud2 as ROS_PointCloud2 + +from opendr_ros2_bridge import ROS2Bridge +from opendr.perception.panoptic_segmentation import EfficientLpsLearner + +# Avoid having a matplotlib GUI in a separate thread in the visualize() function +matplotlib.use('Agg') + + +class EfficientLpsNode(Node): + + def __init__(self, + input_pcl_topic: str, + checkpoint: str, + output_rgb_visualization_topic: Optional[str] = None + ): + """ + Initialize the EfficientLPS ROS node and create an instance of the respective learner class. + :param input_pcl_topic: The name of the input point cloud topic. + :type input_pcl_topic: str + :param checkpoint: The path to the checkpoint file or the name of the pre-trained model. + :type checkpoint: str + :param output_rgb_visualization_topic: The name of the output RGB visualization topic. + :type output_rgb_visualization_topic: str + """ + super().__init__('opendr_efficient_lps_node') + + self.input_pcl_topic = input_pcl_topic + self.checkpoint = checkpoint + self.output_rgb_visualization_topic = output_rgb_visualization_topic + + + # Initialize all ROS2 related things + self._bridge = ROS2Bridge() + self._visualization_publisher = None + + # Initialize the panoptic segmentation network + config_file = Path(sys.modules[ + EfficientLpsLearner.__module__].__file__).parent / 'configs' / 'singlegpu_semantickitti.py' + self._learner = EfficientLpsLearner(str(config_file)) + + # Other + self._tmp_folder = Path(__file__).parent.parent / 'tmp' / 'efficientlps' + self._tmp_folder.mkdir(exist_ok=True, parents=True) + + def _init_learner(self) -> bool: + """ + The model can be initialized via + 1. downloading pre-trained weights for SemanticKITTI. + 2. passing a path to an existing checkpoint file. + This has not been done in the __init__() function since logging is available only once the node is registered. + """ + if self.checkpoint in ['semantickitti']: + file_path = EfficientLpsLearner.download(str(self._tmp_folder), + trained_on=self.checkpoint) + self.checkpoint = file_path + + if self._learner.load(self.checkpoint): + self.get_logger().info('Successfully loaded the checkpoint.') + return True + else: + self.get_logger().error('Failed to load the checkpoint.') + return False + + def _init_publisher(self): + """ + Set up the publishers as requested by the user. + """ + if self.output_rgb_visualization_topic is not None: + self._visualization_publisher = self.create_publisher(ROS_PointCloud2, + self.output_rgb_visualization_topic, + 10) + def _init_subscriber(self): + """ + Subscribe to all relevant topics. + """ + self.pointcloud2_subscriber = self.create_subscription(ROS_PointCloud2, + self.input_pcl_topic, + self.callback, + 1) + + def listen(self): + """ + Start the node and begin processing input data. The order of the function calls ensures that the node does not + try to process input images without being in a trained state. + """ + if self._init_learner(): + self._init_publisher() + self._init_subscriber() + self.get_logger().info('EfficientLPS node started!') + rclpy.spin(self) + + # Destroy the node explicitly + # (optional - otherwise it will be done automatically + # when the garbage collector destroys the node object) + self.destroy_node() + rclpy.shutdown() + + def callback(self, data: ROS_PointCloud2): + """ + Predict the panoptic segmentation map from the input point cloud and publish the results. + :param data: PointCloud2 data message + :type data: sensor_msgs.msg.PointCloud2 + """ + + pointcloud = self._bridge.from_ros_point_cloud2(data) + + try: + prediction = self._learner.infer(pointcloud) + + except Exception as e: + self.get_logger().error('Failed to perform inference: {}'.format(e)) + return + + try: + # The output topics are only published if there is at least one subscriber + if self._visualization_publisher is not None and self._visualization_publisher.get_subscription_count() > 0: + pointcloud_visualization = EfficientLpsLearner.visualize(pointcloud, + prediction, + return_pointcloud=True, + return_pointcloud_type="panoptic") + ros_pointcloud2_msg = self._bridge.to_ros_point_cloud2(pointcloud_visualization, + self.get_clock().now().to_msg(), + channels='rgb') + self._visualization_publisher.publish(ros_pointcloud2_msg) + + except Exception as e: + self.get_logger().error('Failed to publish the results: {}'.format(e)) + + +def main(args=None): + rclpy.init(args=args) + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-i', '--input_point_cloud_2_topic', type=str, default='/opendr/dataset_point_cloud2', + help='Point Cloud 2 topic provided by either a \ + point_cloud_2_publisher_node or any other 3D Point Cloud 2 Node') + parser.add_argument('-c', '--checkpoint', type=str, default='semantickitti', + help='Download pretrained models [semantickitti] or load from the provided path') + parser.add_argument('-o', '--output_rgb_visualization_topic', type=str, default="/opendr/panoptic", + help='Publish the rgb visualization on this topic') + + args = parser.parse_args() + efficient_lps_node = EfficientLpsNode(args.input_pcl_topic, + args.checkpoint, + args.output_rgb_visualization_topic) + efficient_lps_node.listen() + + +if __name__ == '__main__': + main() diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py new file mode 100644 index 0000000000..6025ddc18d --- /dev/null +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3.6 +# Copyright 2020-2022 OpenDR European Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import time + +import rclpy +from rclpy.node import Node +from sensor_msgs.msg import PointCloud2 as ROS_PointCloud2 +from opendr_ros2_bridge import ROS2Bridge + +from opendr.perception.panoptic_segmentation import SemanticKittiDataset + + +class PointCloud2DatasetNode(Node): + + def __init__(self, + path: str = './datasets/semantickitti', + split: str = 'valid', + output_point_cloud_2_topic: str = "/opendr/dataset_point_cloud2" + ): + super().__init__("opendr_point_cloud_2_dataset_node") + + """ + Creates a ROS Node for publishing a PointCloud2 message from a DatasetIterator + :param dataset: DatasetIterator from which we are reading the point cloud + :type dataset: DatasetIterator + :param output_point_cloud_2_topic: Topic to which we are publishing the point cloud + :type output_point_cloud_2_topic: str + """ + + self.path = path + self.split = split + self._ros2_bridge = ROS2Bridge() + + if output_point_cloud_2_topic is not None: + self.output_point_cloud_2_publisher = self.create_publisher( + output_point_cloud_2_topic, ROS_PointCloud2, queue_size=10 + ) + + def start(self): + """ + Starts the ROS Node + """ + if self._init_dataset(): + self.get_logger().info("Starting point cloud 2 dataset node") + i = 0 + print("Starting point cloud 2 publisher") + while rclpy.ok(): + print("Publishing point cloud 2 message") + point_cloud = self.dataset[i % len(self.dataset)][0] + self.get_logger().info("Publishing point_cloud_2 [" + str(i) + "]") + message = self._ros2_bridge.to_ros_point_cloud2(point_cloud, + self.get_clock().now().to_msg(), + ROS_PointCloud2) + self.point_cloud_2_publisher.publish(message) + i += 1 + + time.sleep(0.1) + + def _init_dataset(self): + try: + self.dataset = SemanticKittiDataset(path=self.path, split=self.split) + return True + except FileNotFoundError: + self.get_logger().error("Dataset not found. Please download the dataset and extract it or enter available path.") + return False + +def main(args=None): + rclpy.init(args=args) + + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-d', '--dataset_path', type=str, default='./datasets/semantickitti', + help='listen to pointclouds on this topic') + parser.add_argument('-s', '--split', type=str, default='valid', + help='split of the dataset to use (train, valid, test)') + parser.add_argument('-o', '--output_point_cloud_2_topic', type=str, default='/opendr/dataset_point_cloud2', + help='topic for the output point cloud') + args = parser.parse_args() + + dataset_node = PointCloud2DatasetNode(args.dataset_path, args.split, args.output_point_cloud_2_topic) + + dataset_node.start() + + # Destroy the node explicitly + # (optional - otherwise it will be done automatically + # when the garbage collector destroys the node object) + dataset_node.destroy_node() + rclpy.shutdown() + +if __name__ == "__main__": + main() diff --git a/projects/opendr_ws_2/src/opendr_perception/setup.py b/projects/opendr_ws_2/src/opendr_perception/setup.py index 1f34005136..14644423e1 100644 --- a/projects/opendr_ws_2/src/opendr_perception/setup.py +++ b/projects/opendr_ws_2/src/opendr_perception/setup.py @@ -32,10 +32,12 @@ 'object_tracking_2d_siamrpn = opendr_perception.object_tracking_2d_siamrpn_node:main', 'face_detection_retinaface = opendr_perception.face_detection_retinaface_node:main', 'semantic_segmentation_bisenet = opendr_perception.semantic_segmentation_bisenet_node:main', - 'panoptic_segmentation = opendr_perception.panoptic_segmentation_efficient_ps_node:main', + 'panoptic_segmentation_efficient_ps = opendr_perception.panoptic_segmentation_efficient_ps_node:main', + 'panoptic_segmentation_efficient_lps = opendr_perception.panoptic_segmentation_efficient_lps_node:main', 'face_recognition = opendr_perception.face_recognition_node:main', 'fall_detection = opendr_perception.fall_detection_node:main', 'point_cloud_dataset = opendr_perception.point_cloud_dataset_node:main', + 'point_cloud_2_publisher = opendr_perception.point_cloud_2_publisher_node:main', 'image_dataset = opendr_perception.image_dataset_node:main', 'object_detection_3d_voxel = opendr_perception.object_detection_3d_voxel_node:main', 'object_tracking_3d_ab3dmot = opendr_perception.object_tracking_3d_ab3dmot_node:main', From 8360f6967fbf672c11fb6bd7547fabfe6397d1e6 Mon Sep 17 00:00:00 2001 From: aselimc Date: Tue, 31 Jan 2023 15:37:34 +0100 Subject: [PATCH 02/13] fix(efficientLPS): Small argument variable naming fix --- .../panoptic_segmentation_efficient_lps_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py index d887ed37ae..f116f69f7e 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py @@ -164,7 +164,7 @@ def main(args=None): help='Publish the rgb visualization on this topic') args = parser.parse_args() - efficient_lps_node = EfficientLpsNode(args.input_pcl_topic, + efficient_lps_node = EfficientLpsNode(args.input_point_cloud_2_topic, args.checkpoint, args.output_rgb_visualization_topic) efficient_lps_node.listen() From 5e6bfd05f933f30419b56ab9afd7b2a8f7bb3202 Mon Sep 17 00:00:00 2001 From: aselimc Date: Thu, 2 Feb 2023 14:12:37 +0100 Subject: [PATCH 03/13] docs(efficientLPS): Doc update for ROS2 node list --- projects/opendr_ws_2/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/projects/opendr_ws_2/README.md b/projects/opendr_ws_2/README.md index 4379f69595..ad23ad474e 100755 --- a/projects/opendr_ws_2/README.md +++ b/projects/opendr_ws_2/README.md @@ -67,7 +67,7 @@ Currently, apart from tools, opendr_ws_2 contains the following ROS2 nodes (cate 6. [2D Object Detection](src/opendr_perception/README.md#2d-object-detection-ros2-nodes) 7. [2D Single Object Tracking](src/opendr_perception/README.md#2d-single-object-tracking-ros2-node) 8. [2D Object Tracking](src/opendr_perception/README.md#2d-object-tracking-ros2-nodes) -9. [Panoptic Segmentation](src/opendr_perception/README.md#panoptic-segmentation-ros2-node) +9. [Vision Based Panoptic Segmentation](src/opendr_perception/README.md#vision-based-panoptic-segmentation-ros-node) 10. [Semantic Segmentation](src/opendr_perception/README.md#semantic-segmentation-ros2-node) 11. [Image-based Facial Emotion Estimation](src/opendr_perception/README.md#image-based-facial-emotion-estimation-ros2-node) 12. [Landmark-based Facial Expression Recognition](src/opendr_perception/README.md#landmark-based-facial-expression-recognition-ros2-node) @@ -84,5 +84,6 @@ Currently, apart from tools, opendr_ws_2 contains the following ROS2 nodes (cate ## Point cloud input 1. [3D Object Detection Voxel](src/opendr_perception/README.md#3d-object-detection-voxel-ros2-node) 2. [3D Object Tracking AB3DMOT](src/opendr_perception/README.md#3d-object-tracking-ab3dmot-ros2-node) +3. [LiDAR Based Panoptic Segmentation](src/opendr_perception/README.md#lidar-based-panoptic-segmentation-ros-node) ## Biosignal input 1. [Heart Anomaly Detection](src/opendr_perception/README.md#heart-anomaly-detection-ros2-node) From e797eb63d096d6e7a317d7058001380e73a255e1 Mon Sep 17 00:00:00 2001 From: aselimc Date: Fri, 3 Feb 2023 15:24:44 +0100 Subject: [PATCH 04/13] feat(efficientLPS): ROS2 Node added test data support and updated docs --- .../src/opendr_perception/README.md | 68 ++++++++++++++++--- .../point_cloud_2_publisher_node.py | 5 ++ 2 files changed, 65 insertions(+), 8 deletions(-) diff --git a/projects/opendr_ws_2/src/opendr_perception/README.md b/projects/opendr_ws_2/src/opendr_perception/README.md index 1fce5f935d..9d324f45e6 100755 --- a/projects/opendr_ws_2/src/opendr_perception/README.md +++ b/projects/opendr_ws_2/src/opendr_perception/README.md @@ -392,9 +392,11 @@ Make sure to change the default input topic of the tracking node if you are not ### Panoptic Segmentation ROS2 Node -You can find the panoptic segmentation ROS2 node python script [here](./opendr_perception/panoptic_segmentation_efficient_ps_node.py) to inspect the code and modify it as you wish to fit your needs. +A ROS node for performing panoptic segmentation on a specified RGB image stream using the [EfficientPS](../../../../src/opendr/perception/panoptic_segmentation/README.md#efficientps-efficient-panoptic-segmentation) network. + +You can find the vision based panoptic segmentation (EfficientPS) ROS node python script [here](./opendr_perception/panoptic_segmentation_efficient_ps_node.py) to inspect the code and modify it as you wish to fit your needs. The node makes use of the toolkit's [panoptic segmentation tool](../../../../src/opendr/perception/panoptic_segmentation/efficient_ps/efficient_ps_learner.py) whose documentation can be found [here](../../../../docs/reference/efficient-ps.md) -and additional information about Efficient PS [here](../../../../src/opendr/perception/panoptic_segmentation/README.md). +and additional information about EfficientPS [here](../../../../src/opendr/perception/panoptic_segmentation/README.md). #### Instructions for basic usage: @@ -403,16 +405,16 @@ and additional information about Efficient PS [here](../../../../src/opendr/perc 2. You are then ready to start the panoptic segmentation node: ```shell - ros2 run opendr_perception panoptic_segmentation_efficient_ps + rosrun opendr_perception panoptic_segmentation_efficient_ps_node.py ``` The following optional arguments are available: - - `-h or --help`: show a help message and exit - - `-i or --input_rgb_image_topic INPUT_RGB_IMAGE_TOPIC` : listen to RGB images on this topic (default=`/image_raw`) - - `-oh --output_heatmap_topic OUTPUT_HEATMAP_TOPIC`: publish the semantic and instance maps on this topic as `OUTPUT_HEATMAP_TOPIC/semantic` and `OUTPUT_HEATMAP_TOPIC/instance`, `None` to stop the node from publishing on this topic (default=`/opendr/panoptic`) - - `-ov --output_rgb_image_topic OUTPUT_RGB_IMAGE_TOPIC`: publish the panoptic segmentation map as an RGB image on this topic or a more detailed overview if using the `--detailed_visualization` flag, `None` to stop the node from publishing on this topic (default=`opendr/panoptic/rgb_visualization`) - - `--detailed_visualization`: generate a combined overview of the input RGB image and the semantic, instance, and panoptic segmentation maps and publish it on `OUTPUT_RGB_IMAGE_TOPIC` (default=deactivated) + - `-h, --help`: show a help message and exit + - `-i or --input_rgb_image_topic INPUT_RGB_IMAGE_TOPIC` : listen to RGB images on this topic (default=`/usb_cam/image_raw`) - `--checkpoint CHECKPOINT` : download pretrained models [cityscapes, kitti] or load from the provided path (default=`cityscapes`) + - `-oh or --output_heatmap_topic OUTPUT_RGB_IMAGE_TOPIC`: publish the semantic and instance maps on this topic as `OUTPUT_HEATMAP_TOPIC/semantic` and `OUTPUT_HEATMAP_TOPIC/instance` (default=`/opendr/panoptic`) + - `-ov or --output_rgb_image_topic OUTPUT_RGB_IMAGE_TOPIC`: publish the panoptic segmentation map as an RGB image on `VISUALIZATION_TOPIC` or a more detailed overview if using the `--detailed_visualization` flag (default=`/opendr/panoptic/rgb_visualization`) + - `--detailed_visualization`: generate a combined overview of the input RGB image and the semantic, instance, and panoptic segmentation maps and publish it on `OUTPUT_RGB_IMAGE_TOPIC` (default=deactivated) 3. Default output topics: - Output images: `/opendr/panoptic/semantic`, `/opendr/panoptic/instance`, `/opendr/panoptic/rgb_visualization` @@ -802,6 +804,37 @@ whose documentation can be found [here](../../../../docs/reference/object-tracki For viewing the output, refer to the [notes above.](#notes) + +### LiDAR Based Panoptic Segmentation ROS Node +A ROS node for performing panoptic segmentation on a specified pointcloud stream using the [EfficientLPS](../../../../src/opendr/perception/panoptic_segmentation/README.md#efficientlps-efficient-lidar-panoptic-segmentation) network. + +You can find the lidar based panoptic segmentation ROS node python script [here](./opendr_perception/panoptic_segmentation_efficient_lps_node.py). You can further also find the point cloud 2 publisher ROS node python script [here](./opendr_perception/point_cloud_2_publisher_node.py), and more explanation [here](#point-cloud-2-publisher-ros-node).You can inspect the codes and make changes as you wish to fit your needs. +The EfficientLPS node makes use of the toolkit's [panoptic segmentation tool](../../../../src/opendr/perception/panoptic_segmentation/efficient_lps/efficient_lps_learner.py) whose documentation can be found [here](../../../../docs/reference/efficient-lps.md) +and additional information about EfficientLPS [here](../../../../src/opendr/perception/panoptic_segmentation/README.md). + +#### Instructions for basic usage: + +1. First one needs to download SemanticKITTI dataset into POINTCLOUD_LOCATION as it is described in the [Panoptic Segmentation Datasets](../../../../src/opendr/perception/panoptic_segmentation/datasets/README.md). Then, once the SPLIT type is specified (train, test or "valid", default "valid"), the point **Point Cloud 2 Publisher** can be started using the following line: + +- ```shell + ros2 run opendr_perception point_cloud_2_publisher_node.py -d POINTCLOUD_LOCATION -s SPLIT + ``` +2. After starting the **PointCloud2 Publisher**, one can start **EfficientLPS Node** using the following line: + +- ```shell + ros2 run opendr_perception panoptic_segmentation_efficient_lps_node.py /opendr/dataset_point_cloud2 + ``` + + The following optional arguments are available: + - `-h, --help`: show a help message and exit + - `-i or --input_point_cloud_2_topic INPUT_POINTCLOUD2_TOPIC` : Point Cloud 2 topic provided by either a point_cloud_2_publisher_node or any other 3D Point Cloud 2 Node (default=`/opendr/dataset_point_cloud2`) + - `-c or --checkpoint CHECKPOINT` : download pretrained models [semantickitti] or load from the provided path (default=`semantickitti`) + - `-o or --output_rgb_visualization_topic OUTPUT_RGB_VISUALIZATION_TOPIC`: publish the RGB visualization on this topic on `OUTPUT_RGB_VISUALIZATION_TOPIC` (default=`/opendr/panoptic`) + +3. Default output topics: + - Detection messages: `/opendr/panoptic` + + ---- ## Biosignal input @@ -883,3 +916,22 @@ The following optional arguments are available: - `-f or --fps FPS`: data fps (default=`10`) - `-d or --dataset_path DATASET_PATH`: path to a dataset, if it does not exist, nano KITTI dataset will be downloaded there (default=`/KITTI/opendr_nano_kitti`) - `-ks or --kitti_subsets_path KITTI_SUBSETS_PATH`: path to KITTI subsets, used only if a KITTI dataset is downloaded (default=`../../src/opendr/perception/object_detection_3d/datasets/nano_kitti_subsets`) + +### Point Cloud 2 Publisher ROS Node + +The point cloud 2 dataset publisher, publishes point cloud 2 messages from pre-downloaded dataset SemanticKITTI. It is currently being used by the ROS node [LiDAR Based Panoptic Segmentation ROS Node](#lidar-based-panoptic-segmentation-ros-node). + +You can create an instance of this node with any `DatasetIterator` object that returns `(PointCloud, Target)` as elements, +to use alongside other nodes and datasets. +You can inspect [the node](./opendr_perception/point_cloud_2_publisher_node.py) and modify it to your needs for other point cloud datasets. + +To get a point cloud from a dataset on the disk, you can start a `point_cloud_2_publisher_node.py` node as: +```shell +ros2 run opendr_perception point_cloud_2_publisher_node.py +``` +The following optional arguments are available: + - `-h or --help`: show a help message and exit + - `-d or --dataset_path DATASET_PATH`: path of the SemanticKITTI dataset to publish the point cloud 2 message (default=`./datasets/semantickitti`) + - `-s or --split SPLIT`: split of the dataset to use, only (train, valid, test) are available (default=`valid`) + - `-o or --output_point_cloud_2_topic OUTPUT_POINT_CLOUD_2_TOPIC`: topic name to publish the data (default=`/opendr/dataset_point_cloud2`) + - `-t or --test_data`: Add this argument if you want to only test this node with the test data available in our server \ No newline at end of file diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py index 6025ddc18d..0e051c7659 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py @@ -23,6 +23,7 @@ from opendr_ros2_bridge import ROS2Bridge from opendr.perception.panoptic_segmentation import SemanticKittiDataset +from opendr.perception.panoptic_segmentation import EfficientLpsLearner class PointCloud2DatasetNode(Node): @@ -89,8 +90,12 @@ def main(args=None): help='split of the dataset to use (train, valid, test)') parser.add_argument('-o', '--output_point_cloud_2_topic', type=str, default='/opendr/dataset_point_cloud2', help='topic for the output point cloud') + parser.add_argument('-t', '--test_data', action='store_true', + help='Use uploaded test data on the FTP server') args = parser.parse_args() + if args.test_data: + args.dataset_path = EfficientLpsLearner.download(args.dataset_path, mode="test_data", prepare_data=True) dataset_node = PointCloud2DatasetNode(args.dataset_path, args.split, args.output_point_cloud_2_topic) dataset_node.start() From 37758a079972a8500543c62759e3cf6ff5dceaf6 Mon Sep 17 00:00:00 2001 From: aselimc Date: Fri, 3 Feb 2023 15:36:50 +0100 Subject: [PATCH 05/13] docs(efficientLPS): Fix small mistakes in the docs --- projects/opendr_ws_2/README.md | 4 ++-- projects/opendr_ws_2/src/opendr_perception/README.md | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/projects/opendr_ws_2/README.md b/projects/opendr_ws_2/README.md index ad23ad474e..ea4e845297 100755 --- a/projects/opendr_ws_2/README.md +++ b/projects/opendr_ws_2/README.md @@ -67,7 +67,7 @@ Currently, apart from tools, opendr_ws_2 contains the following ROS2 nodes (cate 6. [2D Object Detection](src/opendr_perception/README.md#2d-object-detection-ros2-nodes) 7. [2D Single Object Tracking](src/opendr_perception/README.md#2d-single-object-tracking-ros2-node) 8. [2D Object Tracking](src/opendr_perception/README.md#2d-object-tracking-ros2-nodes) -9. [Vision Based Panoptic Segmentation](src/opendr_perception/README.md#vision-based-panoptic-segmentation-ros-node) +9. [Vision Based Panoptic Segmentation](src/opendr_perception/README.md#vision-based-panoptic-segmentation-ros2-node) 10. [Semantic Segmentation](src/opendr_perception/README.md#semantic-segmentation-ros2-node) 11. [Image-based Facial Emotion Estimation](src/opendr_perception/README.md#image-based-facial-emotion-estimation-ros2-node) 12. [Landmark-based Facial Expression Recognition](src/opendr_perception/README.md#landmark-based-facial-expression-recognition-ros2-node) @@ -84,6 +84,6 @@ Currently, apart from tools, opendr_ws_2 contains the following ROS2 nodes (cate ## Point cloud input 1. [3D Object Detection Voxel](src/opendr_perception/README.md#3d-object-detection-voxel-ros2-node) 2. [3D Object Tracking AB3DMOT](src/opendr_perception/README.md#3d-object-tracking-ab3dmot-ros2-node) -3. [LiDAR Based Panoptic Segmentation](src/opendr_perception/README.md#lidar-based-panoptic-segmentation-ros-node) +3. [LiDAR Based Panoptic Segmentation](src/opendr_perception/README.md#lidar-based-panoptic-segmentation-ros2-node) ## Biosignal input 1. [Heart Anomaly Detection](src/opendr_perception/README.md#heart-anomaly-detection-ros2-node) diff --git a/projects/opendr_ws_2/src/opendr_perception/README.md b/projects/opendr_ws_2/src/opendr_perception/README.md index 9d324f45e6..f278bfcddc 100755 --- a/projects/opendr_ws_2/src/opendr_perception/README.md +++ b/projects/opendr_ws_2/src/opendr_perception/README.md @@ -390,7 +390,7 @@ whose documentation can be found here: [Deep Sort docs](../../../../docs/referen An [image dataset node](#image-dataset-ros2-node) is also provided to be used along these nodes. Make sure to change the default input topic of the tracking node if you are not using the USB cam node. -### Panoptic Segmentation ROS2 Node +### Vision Based Panoptic Segmentation ROS2 Node A ROS node for performing panoptic segmentation on a specified RGB image stream using the [EfficientPS](../../../../src/opendr/perception/panoptic_segmentation/README.md#efficientps-efficient-panoptic-segmentation) network. @@ -405,7 +405,7 @@ and additional information about EfficientPS [here](../../../../src/opendr/perce 2. You are then ready to start the panoptic segmentation node: ```shell - rosrun opendr_perception panoptic_segmentation_efficient_ps_node.py + ros2 run opendr_perception panoptic_segmentation_efficient_ps_node.py ``` The following optional arguments are available: @@ -805,7 +805,7 @@ whose documentation can be found [here](../../../../docs/reference/object-tracki For viewing the output, refer to the [notes above.](#notes) -### LiDAR Based Panoptic Segmentation ROS Node +### LiDAR Based Panoptic Segmentation ROS2 Node A ROS node for performing panoptic segmentation on a specified pointcloud stream using the [EfficientLPS](../../../../src/opendr/perception/panoptic_segmentation/README.md#efficientlps-efficient-lidar-panoptic-segmentation) network. You can find the lidar based panoptic segmentation ROS node python script [here](./opendr_perception/panoptic_segmentation_efficient_lps_node.py). You can further also find the point cloud 2 publisher ROS node python script [here](./opendr_perception/point_cloud_2_publisher_node.py), and more explanation [here](#point-cloud-2-publisher-ros-node).You can inspect the codes and make changes as you wish to fit your needs. From 2c717b818595877bb95952c5658d519376bd6218 Mon Sep 17 00:00:00 2001 From: aselimc Date: Fri, 3 Feb 2023 15:40:35 +0100 Subject: [PATCH 06/13] docs(efficientLPS): Fix small typos in the docs --- projects/opendr_ws_2/src/opendr_perception/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/projects/opendr_ws_2/src/opendr_perception/README.md b/projects/opendr_ws_2/src/opendr_perception/README.md index f278bfcddc..a8da962b4f 100755 --- a/projects/opendr_ws_2/src/opendr_perception/README.md +++ b/projects/opendr_ws_2/src/opendr_perception/README.md @@ -405,7 +405,7 @@ and additional information about EfficientPS [here](../../../../src/opendr/perce 2. You are then ready to start the panoptic segmentation node: ```shell - ros2 run opendr_perception panoptic_segmentation_efficient_ps_node.py + ros2 run opendr_perception panoptic_segmentation_efficient_ps ``` The following optional arguments are available: @@ -817,12 +817,12 @@ and additional information about EfficientLPS [here](../../../../src/opendr/perc 1. First one needs to download SemanticKITTI dataset into POINTCLOUD_LOCATION as it is described in the [Panoptic Segmentation Datasets](../../../../src/opendr/perception/panoptic_segmentation/datasets/README.md). Then, once the SPLIT type is specified (train, test or "valid", default "valid"), the point **Point Cloud 2 Publisher** can be started using the following line: - ```shell - ros2 run opendr_perception point_cloud_2_publisher_node.py -d POINTCLOUD_LOCATION -s SPLIT + ros2 run opendr_perception point_cloud_2_publisher -d POINTCLOUD_LOCATION -s SPLIT ``` 2. After starting the **PointCloud2 Publisher**, one can start **EfficientLPS Node** using the following line: - ```shell - ros2 run opendr_perception panoptic_segmentation_efficient_lps_node.py /opendr/dataset_point_cloud2 + ros2 run opendr_perception panoptic_segmentation_efficient_lps /opendr/dataset_point_cloud2 ``` The following optional arguments are available: @@ -917,7 +917,7 @@ The following optional arguments are available: - `-d or --dataset_path DATASET_PATH`: path to a dataset, if it does not exist, nano KITTI dataset will be downloaded there (default=`/KITTI/opendr_nano_kitti`) - `-ks or --kitti_subsets_path KITTI_SUBSETS_PATH`: path to KITTI subsets, used only if a KITTI dataset is downloaded (default=`../../src/opendr/perception/object_detection_3d/datasets/nano_kitti_subsets`) -### Point Cloud 2 Publisher ROS Node +### Point Cloud 2 Publisher ROS2 Node The point cloud 2 dataset publisher, publishes point cloud 2 messages from pre-downloaded dataset SemanticKITTI. It is currently being used by the ROS node [LiDAR Based Panoptic Segmentation ROS Node](#lidar-based-panoptic-segmentation-ros-node). @@ -927,7 +927,7 @@ You can inspect [the node](./opendr_perception/point_cloud_2_publisher_node.py) To get a point cloud from a dataset on the disk, you can start a `point_cloud_2_publisher_node.py` node as: ```shell -ros2 run opendr_perception point_cloud_2_publisher_node.py +ros2 run opendr_perception point_cloud_2_publisher ``` The following optional arguments are available: - `-h or --help`: show a help message and exit From 46d55052d3b8dcd1b72485b1b318b787a249bb1b Mon Sep 17 00:00:00 2001 From: aselimc Date: Fri, 3 Feb 2023 16:00:54 +0100 Subject: [PATCH 07/13] style(efficientLPS): Fix pep8 --- .../src/opendr_bridge/opendr_bridge/bridge.py | 5 +++- ...anoptic_segmentation_efficient_lps_node.py | 23 +++++++++---------- .../point_cloud_2_publisher_node.py | 5 ++-- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py b/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py index 1e1d8a1b46..cd919c245e 100644 --- a/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py +++ b/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py @@ -406,7 +406,10 @@ def to_ros_point_cloud2(self, point_cloud: PointCloud, timestamp: str, channels: fields.append(PointFieldMsg(name="rgba", offset=12, datatype=PointFieldMsg.UINT32, count=1)) else: for i in range(channel_count): - fields.append(PointFieldMsg(name="channel_" + str(i), offset=12 + i * 4, datatype=PointFieldMsg.FLOAT32, count=1)) + fields.append(PointFieldMsg(name="channel_" + str(i), + offset=12 + i * 4, + datatype=PointFieldMsg.FLOAT32, + count=1)) points = [] diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py index f116f69f7e..cdbf524062 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py @@ -18,7 +18,6 @@ import argparse from typing import Optional, List -import numpy as np import rclpy from rclpy.node import Node import matplotlib @@ -52,7 +51,6 @@ def __init__(self, self.input_pcl_topic = input_pcl_topic self.checkpoint = checkpoint self.output_rgb_visualization_topic = output_rgb_visualization_topic - # Initialize all ROS2 related things self._bridge = ROS2Bridge() @@ -76,7 +74,7 @@ def _init_learner(self) -> bool: """ if self.checkpoint in ['semantickitti']: file_path = EfficientLpsLearner.download(str(self._tmp_folder), - trained_on=self.checkpoint) + trained_on=self.checkpoint) self.checkpoint = file_path if self._learner.load(self.checkpoint): @@ -92,17 +90,18 @@ def _init_publisher(self): """ if self.output_rgb_visualization_topic is not None: self._visualization_publisher = self.create_publisher(ROS_PointCloud2, - self.output_rgb_visualization_topic, + self.output_rgb_visualization_topic, 10) + def _init_subscriber(self): """ Subscribe to all relevant topics. """ - self.pointcloud2_subscriber = self.create_subscription(ROS_PointCloud2, + self.pointcloud2_subscriber = self.create_subscription(ROS_PointCloud2, self.input_pcl_topic, self.callback, 1) - + def listen(self): """ Start the node and begin processing input data. The order of the function calls ensures that the node does not @@ -126,22 +125,22 @@ def callback(self, data: ROS_PointCloud2): :param data: PointCloud2 data message :type data: sensor_msgs.msg.PointCloud2 """ - + pointcloud = self._bridge.from_ros_point_cloud2(data) - try: + try: prediction = self._learner.infer(pointcloud) except Exception as e: self.get_logger().error('Failed to perform inference: {}'.format(e)) return - + try: # The output topics are only published if there is at least one subscriber if self._visualization_publisher is not None and self._visualization_publisher.get_subscription_count() > 0: pointcloud_visualization = EfficientLpsLearner.visualize(pointcloud, - prediction, - return_pointcloud=True, + prediction, + return_pointcloud=True, return_pointcloud_type="panoptic") ros_pointcloud2_msg = self._bridge.to_ros_point_cloud2(pointcloud_visualization, self.get_clock().now().to_msg(), @@ -164,7 +163,7 @@ def main(args=None): help='Publish the rgb visualization on this topic') args = parser.parse_args() - efficient_lps_node = EfficientLpsNode(args.input_point_cloud_2_topic, + efficient_lps_node = EfficientLpsNode(args.input_point_cloud_2_topic, args.checkpoint, args.output_rgb_visualization_topic) efficient_lps_node.listen() diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py index 0e051c7659..ed9101f478 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py @@ -58,13 +58,13 @@ def start(self): """ if self._init_dataset(): self.get_logger().info("Starting point cloud 2 dataset node") - i = 0 + i = 0 print("Starting point cloud 2 publisher") while rclpy.ok(): print("Publishing point cloud 2 message") point_cloud = self.dataset[i % len(self.dataset)][0] self.get_logger().info("Publishing point_cloud_2 [" + str(i) + "]") - message = self._ros2_bridge.to_ros_point_cloud2(point_cloud, + message = self._ros2_bridge.to_ros_point_cloud2(point_cloud, self.get_clock().now().to_msg(), ROS_PointCloud2) self.point_cloud_2_publisher.publish(message) @@ -80,6 +80,7 @@ def _init_dataset(self): self.get_logger().error("Dataset not found. Please download the dataset and extract it or enter available path.") return False + def main(args=None): rclpy.init(args=args) From 87fb99cf8b12fc240f4be0b1c36af879c2b8316c Mon Sep 17 00:00:00 2001 From: aselimc Date: Fri, 3 Feb 2023 16:03:26 +0100 Subject: [PATCH 08/13] style(efficientLPS): Fix pep8 --- .../panoptic_segmentation_efficient_lps_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py index cdbf524062..31bbe8a414 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py @@ -16,7 +16,7 @@ import sys from pathlib import Path import argparse -from typing import Optional, List +from typing import Optional import rclpy from rclpy.node import Node From a315376a35e14b71c61ba90faf4a92dd3e774ed7 Mon Sep 17 00:00:00 2001 From: aselimc Date: Tue, 7 Feb 2023 12:33:11 +0100 Subject: [PATCH 09/13] docs(efficientLPS): Fix license dates --- .../panoptic_segmentation_efficient_lps_node.py | 2 +- .../opendr_perception/point_cloud_2_publisher_node.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py index 31bbe8a414..a2716924e2 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright 2020-2022 OpenDR European Project +# Copyright 2020-2023 OpenDR European Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py index ed9101f478..2561794847 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python3.6 -# Copyright 2020-2022 OpenDR European Project +#!/usr/bin/env python +# Copyright 2020-2023 OpenDR European Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 489e2eccf24b097516606e7e2e3528e5eb10eeb5 Mon Sep 17 00:00:00 2001 From: aselimc Date: Tue, 7 Feb 2023 12:53:24 +0100 Subject: [PATCH 10/13] refactor(efficientLPS): Rename the output topic of EfficientLPS ROS2 node --- .../src/opendr_perception/README.md | 2 +- ...panoptic_segmentation_efficient_lps_node.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/projects/opendr_ws_2/src/opendr_perception/README.md b/projects/opendr_ws_2/src/opendr_perception/README.md index a8da962b4f..3198ac631f 100755 --- a/projects/opendr_ws_2/src/opendr_perception/README.md +++ b/projects/opendr_ws_2/src/opendr_perception/README.md @@ -829,7 +829,7 @@ and additional information about EfficientLPS [here](../../../../src/opendr/perc - `-h, --help`: show a help message and exit - `-i or --input_point_cloud_2_topic INPUT_POINTCLOUD2_TOPIC` : Point Cloud 2 topic provided by either a point_cloud_2_publisher_node or any other 3D Point Cloud 2 Node (default=`/opendr/dataset_point_cloud2`) - `-c or --checkpoint CHECKPOINT` : download pretrained models [semantickitti] or load from the provided path (default=`semantickitti`) - - `-o or --output_rgb_visualization_topic OUTPUT_RGB_VISUALIZATION_TOPIC`: publish the RGB visualization on this topic on `OUTPUT_RGB_VISUALIZATION_TOPIC` (default=`/opendr/panoptic`) + - `-o or --output_heatmap_pointcloud_topic OUTPUT_HEATMAP_POINTCLOUD_TOPIC`: publish the 3D heatmap pointcloud on `OUTPUT_HEATMAP_POINTCLOUD_TOPIC` (default=`/opendr/panoptic`) 3. Default output topics: - Detection messages: `/opendr/panoptic` diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py index a2716924e2..d36770d9bf 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py @@ -35,7 +35,7 @@ class EfficientLpsNode(Node): def __init__(self, input_pcl_topic: str, checkpoint: str, - output_rgb_visualization_topic: Optional[str] = None + output_heatmap_pointcloud_topic: Optional[str] = None ): """ Initialize the EfficientLPS ROS node and create an instance of the respective learner class. @@ -43,14 +43,14 @@ def __init__(self, :type input_pcl_topic: str :param checkpoint: The path to the checkpoint file or the name of the pre-trained model. :type checkpoint: str - :param output_rgb_visualization_topic: The name of the output RGB visualization topic. - :type output_rgb_visualization_topic: str + :param output_heatmap_pointcloud_topic: topic for the output 3D heatmap point cloud + :type output_heatmap_pointcloud_topic: Optional[str] """ super().__init__('opendr_efficient_lps_node') self.input_pcl_topic = input_pcl_topic self.checkpoint = checkpoint - self.output_rgb_visualization_topic = output_rgb_visualization_topic + self.output_heatmap_pointcloud_topic = output_heatmap_pointcloud_topic # Initialize all ROS2 related things self._bridge = ROS2Bridge() @@ -88,9 +88,9 @@ def _init_publisher(self): """ Set up the publishers as requested by the user. """ - if self.output_rgb_visualization_topic is not None: + if self.output_heatmap_pointcloud_topic is not None: self._visualization_publisher = self.create_publisher(ROS_PointCloud2, - self.output_rgb_visualization_topic, + self.output_heatmap_pointcloud_topic, 10) def _init_subscriber(self): @@ -159,13 +159,13 @@ def main(args=None): point_cloud_2_publisher_node or any other 3D Point Cloud 2 Node') parser.add_argument('-c', '--checkpoint', type=str, default='semantickitti', help='Download pretrained models [semantickitti] or load from the provided path') - parser.add_argument('-o', '--output_rgb_visualization_topic', type=str, default="/opendr/panoptic", - help='Publish the rgb visualization on this topic') + parser.add_argument('-o', '--output_heatmap_pointcloud_topic', type=str, default="/opendr/panoptic", + help='Publish the output 3D heatmap point cloud on this topic') args = parser.parse_args() efficient_lps_node = EfficientLpsNode(args.input_point_cloud_2_topic, args.checkpoint, - args.output_rgb_visualization_topic) + args.output_heatmap_pointcloud_topic) efficient_lps_node.listen() From 2adb0a69349dbc5945d55c4fef70c103186e5949 Mon Sep 17 00:00:00 2001 From: aselimc Date: Tue, 14 Feb 2023 14:31:10 +0100 Subject: [PATCH 11/13] feat(efficientLPS): Small function/library call fixes on ROS2 nodes --- .../src/opendr_bridge/opendr_bridge/bridge.py | 2 +- .../panoptic_segmentation_efficient_lps_node.py | 2 +- .../opendr_perception/point_cloud_2_publisher_node.py | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py b/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py index cd919c245e..155132eb30 100644 --- a/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py +++ b/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py @@ -37,7 +37,7 @@ Point ) from opendr_interface.msg import OpenDRPose2D, OpenDRPose2DKeypoint, OpenDRPose3D, OpenDRPose3DKeypoint -from sensor_msgs import point_cloud2 as pc2 +from sensor_msgs_py import point_cloud2 as pc2 class ROS2Bridge: diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py index d36770d9bf..06e9300f91 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py @@ -23,7 +23,7 @@ import matplotlib from sensor_msgs.msg import PointCloud2 as ROS_PointCloud2 -from opendr_ros2_bridge import ROS2Bridge +from opendr_bridge import ROS2Bridge from opendr.perception.panoptic_segmentation import EfficientLpsLearner # Avoid having a matplotlib GUI in a separate thread in the visualize() function diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py index 2561794847..611b216622 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py @@ -20,7 +20,7 @@ import rclpy from rclpy.node import Node from sensor_msgs.msg import PointCloud2 as ROS_PointCloud2 -from opendr_ros2_bridge import ROS2Bridge +from opendr_bridge import ROS2Bridge from opendr.perception.panoptic_segmentation import SemanticKittiDataset from opendr.perception.panoptic_segmentation import EfficientLpsLearner @@ -48,9 +48,9 @@ def __init__(self, self._ros2_bridge = ROS2Bridge() if output_point_cloud_2_topic is not None: - self.output_point_cloud_2_publisher = self.create_publisher( - output_point_cloud_2_topic, ROS_PointCloud2, queue_size=10 - ) + self.output_point_cloud_2_publisher = self.create_publisher(ROS_PointCloud2, + output_point_cloud_2_topic, + 10) def start(self): """ @@ -67,7 +67,7 @@ def start(self): message = self._ros2_bridge.to_ros_point_cloud2(point_cloud, self.get_clock().now().to_msg(), ROS_PointCloud2) - self.point_cloud_2_publisher.publish(message) + self.output_point_cloud_2_publisher.publish(message) i += 1 time.sleep(0.1) From a83380ed0ff96a25cad546a0c7c9be95060c2450 Mon Sep 17 00:00:00 2001 From: tsampazk <27914645+tsampazk@users.noreply.github.com> Date: Wed, 15 Feb 2023 12:08:30 +0200 Subject: [PATCH 12/13] Added missing dependency for new efficient lps node --- bin/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/install.sh b/bin/install.sh index ddced59961..1561e9e918 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -48,7 +48,7 @@ fi # ROS2 package dependencies if [[ ${ROS_DISTRO} == "foxy" || ${ROS_DISTRO} == "humble" ]]; then echo "Installing ROS2 dependencies" - sudo apt-get -y install python3-lark ros-$ROS_DISTRO-usb-cam ros-$ROS_DISTRO-webots-ros2 python3-colcon-common-extensions ros-$ROS_DISTRO-vision-msgs + sudo apt-get -y install python3-lark ros-$ROS_DISTRO-usb-cam ros-$ROS_DISTRO-webots-ros2 python3-colcon-common-extensions ros-$ROS_DISTRO-vision-msgs ros-$ROS_DISTRO-sensor-msgs-py LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/ros/$ROS_DISTRO/lib/controller cd $OPENDR_HOME/projects/opendr_ws_2/ git clone --depth 1 --branch ros2 https://github.com/ros-drivers/audio_common src/audio_common From da944f9bda1c2e0de3af63c2480f1de36e93dbb9 Mon Sep 17 00:00:00 2001 From: aselimc Date: Thu, 16 Feb 2023 11:21:27 +0100 Subject: [PATCH 13/13] feat(Continual-SLAM): Make pointcloud2 publisher publish a proper error message --- .../point_cloud_2_publisher_node.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py index 611b216622..48bb3f4fce 100644 --- a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py @@ -58,19 +58,19 @@ def start(self): """ if self._init_dataset(): self.get_logger().info("Starting point cloud 2 dataset node") - i = 0 - print("Starting point cloud 2 publisher") - while rclpy.ok(): - print("Publishing point cloud 2 message") - point_cloud = self.dataset[i % len(self.dataset)][0] - self.get_logger().info("Publishing point_cloud_2 [" + str(i) + "]") - message = self._ros2_bridge.to_ros_point_cloud2(point_cloud, - self.get_clock().now().to_msg(), - ROS_PointCloud2) - self.output_point_cloud_2_publisher.publish(message) - i += 1 - - time.sleep(0.1) + i = 0 + print("Starting point cloud 2 publisher") + while rclpy.ok(): + print("Publishing point cloud 2 message") + point_cloud = self.dataset[i % len(self.dataset)][0] + self.get_logger().info("Publishing point_cloud_2 [" + str(i) + "]") + message = self._ros2_bridge.to_ros_point_cloud2(point_cloud, + self.get_clock().now().to_msg(), + ROS_PointCloud2) + self.output_point_cloud_2_publisher.publish(message) + i += 1 + + time.sleep(0.1) def _init_dataset(self): try: