diff --git a/bin/install.sh b/bin/install.sh index ddced59961..1561e9e918 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -48,7 +48,7 @@ fi # ROS2 package dependencies if [[ ${ROS_DISTRO} == "foxy" || ${ROS_DISTRO} == "humble" ]]; then echo "Installing ROS2 dependencies" - sudo apt-get -y install python3-lark ros-$ROS_DISTRO-usb-cam ros-$ROS_DISTRO-webots-ros2 python3-colcon-common-extensions ros-$ROS_DISTRO-vision-msgs + sudo apt-get -y install python3-lark ros-$ROS_DISTRO-usb-cam ros-$ROS_DISTRO-webots-ros2 python3-colcon-common-extensions ros-$ROS_DISTRO-vision-msgs ros-$ROS_DISTRO-sensor-msgs-py LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/ros/$ROS_DISTRO/lib/controller cd $OPENDR_HOME/projects/opendr_ws_2/ git clone --depth 1 --branch ros2 https://github.com/ros-drivers/audio_common src/audio_common diff --git a/projects/opendr_ws_2/README.md b/projects/opendr_ws_2/README.md index 2d66a446cb..968619fb75 100644 --- a/projects/opendr_ws_2/README.md +++ b/projects/opendr_ws_2/README.md @@ -67,7 +67,7 @@ Currently, apart from tools, opendr_ws_2 contains the following ROS2 nodes (cate 6. [2D Object Detection](src/opendr_perception/README.md#2d-object-detection-ros2-nodes) 7. [2D Single Object Tracking](src/opendr_perception/README.md#2d-single-object-tracking-ros2-node) 8. [2D Object Tracking](src/opendr_perception/README.md#2d-object-tracking-ros2-nodes) -9. [Panoptic Segmentation](src/opendr_perception/README.md#panoptic-segmentation-ros2-node) +9. [Vision Based Panoptic Segmentation](src/opendr_perception/README.md#vision-based-panoptic-segmentation-ros2-node) 10. [Semantic Segmentation](src/opendr_perception/README.md#semantic-segmentation-ros2-node) 11. [Binary High Resolution](src/opendr_perception/README.md#binary-high-resolution-ros2-node) 12. [Image-based Facial Emotion Estimation](src/opendr_perception/README.md#image-based-facial-emotion-estimation-ros2-node) @@ -86,5 +86,6 @@ Currently, apart from tools, opendr_ws_2 contains the following ROS2 nodes (cate ## Point cloud input 1. [3D Object Detection Voxel](src/opendr_perception/README.md#3d-object-detection-voxel-ros2-node) 2. [3D Object Tracking AB3DMOT](src/opendr_perception/README.md#3d-object-tracking-ab3dmot-ros2-node) +3. [LiDAR Based Panoptic Segmentation](src/opendr_perception/README.md#lidar-based-panoptic-segmentation-ros2-node) ## Biosignal input 1. [Heart Anomaly Detection](src/opendr_perception/README.md#heart-anomaly-detection-ros2-node) diff --git a/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py b/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py index ef5ac06698..155132eb30 100644 --- a/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py +++ b/projects/opendr_ws_2/src/opendr_bridge/opendr_bridge/bridge.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import struct import numpy as np from opendr.engine.data import Image, PointCloud, Timeseries from opendr.engine.target import ( @@ -20,7 +21,10 @@ ) from cv_bridge import CvBridge from std_msgs.msg import String, ColorRGBA, Header -from sensor_msgs.msg import Image as ImageMsg, PointCloud as PointCloudMsg, ChannelFloat32 as ChannelFloat32Msg +from sensor_msgs.msg import ( + Image as ImageMsg, PointCloud as PointCloudMsg, ChannelFloat32 as ChannelFloat32Msg, + PointField as PointFieldMsg, PointCloud2 as PointCloud2Msg +) from vision_msgs.msg import ( Detection2DArray, Detection2D, BoundingBox2D, ObjectHypothesisWithPose, Detection3D, Detection3DArray, BoundingBox3D as BoundingBox3DMsg, @@ -33,6 +37,7 @@ Point ) from opendr_interface.msg import OpenDRPose2D, OpenDRPose2DKeypoint, OpenDRPose3D, OpenDRPose3DKeypoint +from sensor_msgs_py import point_cloud2 as pc2 class ROS2Bridge: @@ -362,6 +367,77 @@ def to_ros_point_cloud(self, point_cloud, time_stamp): return ros_point_cloud + def from_ros_point_cloud2(self, point_cloud: PointCloud2Msg): + """ + Converts a ROS PointCloud2 message into an OpenDR PointCloud + :param point_cloud: ROS PointCloud2 to be converted + :type point_cloud: sensor_msgs.msg.PointCloud2 + :return: OpenDR PointCloud + :rtype: engine.data.PointCloud + """ + + points = pc2.read_points_list(point_cloud, field_names=[f.name for f in point_cloud.fields]) + result = PointCloud(points) + + return result + + def to_ros_point_cloud2(self, point_cloud: PointCloud, timestamp: str, channels: str=None): + + """ + Converts an OpenDR PointCloud message into a ROS PointCloud2 + :param: OpenDR PointCloud + :type: engine.data.PointCloud + :param: channels to be included in the PointCloud2 message. Available channels names are ["rgb", "rgba"] + :type: str + :return message: ROS PointCloud2 + :rtype message: sensor_msgs.msg.PointCloud2 + """ + + header = Header() + header.stamp = timestamp + header.frame_id = "base_link" + + channel_count = point_cloud.data.shape[-1] - 3 + + fields = [PointFieldMsg(name="x", offset=0, datatype=PointFieldMsg.FLOAT32, count=1), + PointFieldMsg(name="y", offset=4, datatype=PointFieldMsg.FLOAT32, count=1), + PointFieldMsg(name="z", offset=8, datatype=PointFieldMsg.FLOAT32, count=1)] + if channels == 'rgb' or channels == 'rgba': + fields.append(PointFieldMsg(name="rgba", offset=12, datatype=PointFieldMsg.UINT32, count=1)) + else: + for i in range(channel_count): + fields.append(PointFieldMsg(name="channel_" + str(i), + offset=12 + i * 4, + datatype=PointFieldMsg.FLOAT32, + count=1)) + + points = [] + + for point in point_cloud.data: + pt = [point[0], point[1], point[2]] + for channel in range(channel_count): + if channels == 'rgb' or channels == 'rgba': + r = int(point[3]) + g = int(point[4]) + b = int(point[5]) + + if channels == 'rgb': + a = 255 + else: + a = int(point[6]) + + rgba = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0] + pt.append(rgba) + break + + else: + pt.append(point[3 + channel]) + points.append(pt) + + ros_point_cloud2 = pc2.create_cloud(header, fields, points) + + return ros_point_cloud2 + def from_ros_boxes_3d(self, ros_boxes_3d): """ Converts a ROS2 Detection3DArray message into an OpenDR BoundingBox3D object. diff --git a/projects/opendr_ws_2/src/opendr_perception/README.md b/projects/opendr_ws_2/src/opendr_perception/README.md index e0fd21d13a..6a36be68f0 100755 --- a/projects/opendr_ws_2/src/opendr_perception/README.md +++ b/projects/opendr_ws_2/src/opendr_perception/README.md @@ -390,11 +390,13 @@ whose documentation can be found here: [Deep Sort docs](../../../../docs/referen An [image dataset node](#image-dataset-ros2-node) is also provided to be used along these nodes. Make sure to change the default input topic of the tracking node if you are not using the USB cam node. -### Panoptic Segmentation ROS2 Node +### Vision Based Panoptic Segmentation ROS2 Node -You can find the panoptic segmentation ROS2 node python script [here](./opendr_perception/panoptic_segmentation_efficient_ps_node.py) to inspect the code and modify it as you wish to fit your needs. +A ROS node for performing panoptic segmentation on a specified RGB image stream using the [EfficientPS](../../../../src/opendr/perception/panoptic_segmentation/README.md#efficientps-efficient-panoptic-segmentation) network. + +You can find the vision based panoptic segmentation (EfficientPS) ROS node python script [here](./opendr_perception/panoptic_segmentation_efficient_ps_node.py) to inspect the code and modify it as you wish to fit your needs. The node makes use of the toolkit's [panoptic segmentation tool](../../../../src/opendr/perception/panoptic_segmentation/efficient_ps/efficient_ps_learner.py) whose documentation can be found [here](../../../../docs/reference/efficient-ps.md) -and additional information about Efficient PS [here](../../../../src/opendr/perception/panoptic_segmentation/README.md). +and additional information about EfficientPS [here](../../../../src/opendr/perception/panoptic_segmentation/README.md). #### Instructions for basic usage: @@ -407,12 +409,12 @@ and additional information about Efficient PS [here](../../../../src/opendr/perc ``` The following optional arguments are available: - - `-h or --help`: show a help message and exit - - `-i or --input_rgb_image_topic INPUT_RGB_IMAGE_TOPIC` : listen to RGB images on this topic (default=`/image_raw`) - - `-oh --output_heatmap_topic OUTPUT_HEATMAP_TOPIC`: publish the semantic and instance maps on this topic as `OUTPUT_HEATMAP_TOPIC/semantic` and `OUTPUT_HEATMAP_TOPIC/instance`, `None` to stop the node from publishing on this topic (default=`/opendr/panoptic`) - - `-ov --output_rgb_image_topic OUTPUT_RGB_IMAGE_TOPIC`: publish the panoptic segmentation map as an RGB image on this topic or a more detailed overview if using the `--detailed_visualization` flag, `None` to stop the node from publishing on this topic (default=`opendr/panoptic/rgb_visualization`) - - `--detailed_visualization`: generate a combined overview of the input RGB image and the semantic, instance, and panoptic segmentation maps and publish it on `OUTPUT_RGB_IMAGE_TOPIC` (default=deactivated) + - `-h, --help`: show a help message and exit + - `-i or --input_rgb_image_topic INPUT_RGB_IMAGE_TOPIC` : listen to RGB images on this topic (default=`/usb_cam/image_raw`) - `--checkpoint CHECKPOINT` : download pretrained models [cityscapes, kitti] or load from the provided path (default=`cityscapes`) + - `-oh or --output_heatmap_topic OUTPUT_RGB_IMAGE_TOPIC`: publish the semantic and instance maps on this topic as `OUTPUT_HEATMAP_TOPIC/semantic` and `OUTPUT_HEATMAP_TOPIC/instance` (default=`/opendr/panoptic`) + - `-ov or --output_rgb_image_topic OUTPUT_RGB_IMAGE_TOPIC`: publish the panoptic segmentation map as an RGB image on `VISUALIZATION_TOPIC` or a more detailed overview if using the `--detailed_visualization` flag (default=`/opendr/panoptic/rgb_visualization`) + - `--detailed_visualization`: generate a combined overview of the input RGB image and the semantic, instance, and panoptic segmentation maps and publish it on `OUTPUT_RGB_IMAGE_TOPIC` (default=deactivated) 3. Default output topics: - Output images: `/opendr/panoptic/semantic`, `/opendr/panoptic/instance`, `/opendr/panoptic/rgb_visualization` @@ -855,6 +857,37 @@ whose documentation can be found [here](../../../../docs/reference/object-tracki For viewing the output, refer to the [notes above.](#notes) + +### LiDAR Based Panoptic Segmentation ROS2 Node +A ROS node for performing panoptic segmentation on a specified pointcloud stream using the [EfficientLPS](../../../../src/opendr/perception/panoptic_segmentation/README.md#efficientlps-efficient-lidar-panoptic-segmentation) network. + +You can find the lidar based panoptic segmentation ROS node python script [here](./opendr_perception/panoptic_segmentation_efficient_lps_node.py). You can further also find the point cloud 2 publisher ROS node python script [here](./opendr_perception/point_cloud_2_publisher_node.py), and more explanation [here](#point-cloud-2-publisher-ros-node).You can inspect the codes and make changes as you wish to fit your needs. +The EfficientLPS node makes use of the toolkit's [panoptic segmentation tool](../../../../src/opendr/perception/panoptic_segmentation/efficient_lps/efficient_lps_learner.py) whose documentation can be found [here](../../../../docs/reference/efficient-lps.md) +and additional information about EfficientLPS [here](../../../../src/opendr/perception/panoptic_segmentation/README.md). + +#### Instructions for basic usage: + +1. First one needs to download SemanticKITTI dataset into POINTCLOUD_LOCATION as it is described in the [Panoptic Segmentation Datasets](../../../../src/opendr/perception/panoptic_segmentation/datasets/README.md). Then, once the SPLIT type is specified (train, test or "valid", default "valid"), the point **Point Cloud 2 Publisher** can be started using the following line: + +- ```shell + ros2 run opendr_perception point_cloud_2_publisher -d POINTCLOUD_LOCATION -s SPLIT + ``` +2. After starting the **PointCloud2 Publisher**, one can start **EfficientLPS Node** using the following line: + +- ```shell + ros2 run opendr_perception panoptic_segmentation_efficient_lps /opendr/dataset_point_cloud2 + ``` + + The following optional arguments are available: + - `-h, --help`: show a help message and exit + - `-i or --input_point_cloud_2_topic INPUT_POINTCLOUD2_TOPIC` : Point Cloud 2 topic provided by either a point_cloud_2_publisher_node or any other 3D Point Cloud 2 Node (default=`/opendr/dataset_point_cloud2`) + - `-c or --checkpoint CHECKPOINT` : download pretrained models [semantickitti] or load from the provided path (default=`semantickitti`) + - `-o or --output_heatmap_pointcloud_topic OUTPUT_HEATMAP_POINTCLOUD_TOPIC`: publish the 3D heatmap pointcloud on `OUTPUT_HEATMAP_POINTCLOUD_TOPIC` (default=`/opendr/panoptic`) + +3. Default output topics: + - Detection messages: `/opendr/panoptic` + + ---- ## Biosignal input @@ -936,3 +969,22 @@ The following optional arguments are available: - `-f or --fps FPS`: data fps (default=`10`) - `-d or --dataset_path DATASET_PATH`: path to a dataset, if it does not exist, nano KITTI dataset will be downloaded there (default=`/KITTI/opendr_nano_kitti`) - `-ks or --kitti_subsets_path KITTI_SUBSETS_PATH`: path to KITTI subsets, used only if a KITTI dataset is downloaded (default=`../../src/opendr/perception/object_detection_3d/datasets/nano_kitti_subsets`) + +### Point Cloud 2 Publisher ROS2 Node + +The point cloud 2 dataset publisher, publishes point cloud 2 messages from pre-downloaded dataset SemanticKITTI. It is currently being used by the ROS node [LiDAR Based Panoptic Segmentation ROS Node](#lidar-based-panoptic-segmentation-ros-node). + +You can create an instance of this node with any `DatasetIterator` object that returns `(PointCloud, Target)` as elements, +to use alongside other nodes and datasets. +You can inspect [the node](./opendr_perception/point_cloud_2_publisher_node.py) and modify it to your needs for other point cloud datasets. + +To get a point cloud from a dataset on the disk, you can start a `point_cloud_2_publisher_node.py` node as: +```shell +ros2 run opendr_perception point_cloud_2_publisher +``` +The following optional arguments are available: + - `-h or --help`: show a help message and exit + - `-d or --dataset_path DATASET_PATH`: path of the SemanticKITTI dataset to publish the point cloud 2 message (default=`./datasets/semantickitti`) + - `-s or --split SPLIT`: split of the dataset to use, only (train, valid, test) are available (default=`valid`) + - `-o or --output_point_cloud_2_topic OUTPUT_POINT_CLOUD_2_TOPIC`: topic name to publish the data (default=`/opendr/dataset_point_cloud2`) + - `-t or --test_data`: Add this argument if you want to only test this node with the test data available in our server \ No newline at end of file diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py new file mode 100644 index 0000000000..06e9300f91 --- /dev/null +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/panoptic_segmentation_efficient_lps_node.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +# Copyright 2020-2023 OpenDR European Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from pathlib import Path +import argparse +from typing import Optional + +import rclpy +from rclpy.node import Node +import matplotlib +from sensor_msgs.msg import PointCloud2 as ROS_PointCloud2 + +from opendr_bridge import ROS2Bridge +from opendr.perception.panoptic_segmentation import EfficientLpsLearner + +# Avoid having a matplotlib GUI in a separate thread in the visualize() function +matplotlib.use('Agg') + + +class EfficientLpsNode(Node): + + def __init__(self, + input_pcl_topic: str, + checkpoint: str, + output_heatmap_pointcloud_topic: Optional[str] = None + ): + """ + Initialize the EfficientLPS ROS node and create an instance of the respective learner class. + :param input_pcl_topic: The name of the input point cloud topic. + :type input_pcl_topic: str + :param checkpoint: The path to the checkpoint file or the name of the pre-trained model. + :type checkpoint: str + :param output_heatmap_pointcloud_topic: topic for the output 3D heatmap point cloud + :type output_heatmap_pointcloud_topic: Optional[str] + """ + super().__init__('opendr_efficient_lps_node') + + self.input_pcl_topic = input_pcl_topic + self.checkpoint = checkpoint + self.output_heatmap_pointcloud_topic = output_heatmap_pointcloud_topic + + # Initialize all ROS2 related things + self._bridge = ROS2Bridge() + self._visualization_publisher = None + + # Initialize the panoptic segmentation network + config_file = Path(sys.modules[ + EfficientLpsLearner.__module__].__file__).parent / 'configs' / 'singlegpu_semantickitti.py' + self._learner = EfficientLpsLearner(str(config_file)) + + # Other + self._tmp_folder = Path(__file__).parent.parent / 'tmp' / 'efficientlps' + self._tmp_folder.mkdir(exist_ok=True, parents=True) + + def _init_learner(self) -> bool: + """ + The model can be initialized via + 1. downloading pre-trained weights for SemanticKITTI. + 2. passing a path to an existing checkpoint file. + This has not been done in the __init__() function since logging is available only once the node is registered. + """ + if self.checkpoint in ['semantickitti']: + file_path = EfficientLpsLearner.download(str(self._tmp_folder), + trained_on=self.checkpoint) + self.checkpoint = file_path + + if self._learner.load(self.checkpoint): + self.get_logger().info('Successfully loaded the checkpoint.') + return True + else: + self.get_logger().error('Failed to load the checkpoint.') + return False + + def _init_publisher(self): + """ + Set up the publishers as requested by the user. + """ + if self.output_heatmap_pointcloud_topic is not None: + self._visualization_publisher = self.create_publisher(ROS_PointCloud2, + self.output_heatmap_pointcloud_topic, + 10) + + def _init_subscriber(self): + """ + Subscribe to all relevant topics. + """ + self.pointcloud2_subscriber = self.create_subscription(ROS_PointCloud2, + self.input_pcl_topic, + self.callback, + 1) + + def listen(self): + """ + Start the node and begin processing input data. The order of the function calls ensures that the node does not + try to process input images without being in a trained state. + """ + if self._init_learner(): + self._init_publisher() + self._init_subscriber() + self.get_logger().info('EfficientLPS node started!') + rclpy.spin(self) + + # Destroy the node explicitly + # (optional - otherwise it will be done automatically + # when the garbage collector destroys the node object) + self.destroy_node() + rclpy.shutdown() + + def callback(self, data: ROS_PointCloud2): + """ + Predict the panoptic segmentation map from the input point cloud and publish the results. + :param data: PointCloud2 data message + :type data: sensor_msgs.msg.PointCloud2 + """ + + pointcloud = self._bridge.from_ros_point_cloud2(data) + + try: + prediction = self._learner.infer(pointcloud) + + except Exception as e: + self.get_logger().error('Failed to perform inference: {}'.format(e)) + return + + try: + # The output topics are only published if there is at least one subscriber + if self._visualization_publisher is not None and self._visualization_publisher.get_subscription_count() > 0: + pointcloud_visualization = EfficientLpsLearner.visualize(pointcloud, + prediction, + return_pointcloud=True, + return_pointcloud_type="panoptic") + ros_pointcloud2_msg = self._bridge.to_ros_point_cloud2(pointcloud_visualization, + self.get_clock().now().to_msg(), + channels='rgb') + self._visualization_publisher.publish(ros_pointcloud2_msg) + + except Exception as e: + self.get_logger().error('Failed to publish the results: {}'.format(e)) + + +def main(args=None): + rclpy.init(args=args) + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-i', '--input_point_cloud_2_topic', type=str, default='/opendr/dataset_point_cloud2', + help='Point Cloud 2 topic provided by either a \ + point_cloud_2_publisher_node or any other 3D Point Cloud 2 Node') + parser.add_argument('-c', '--checkpoint', type=str, default='semantickitti', + help='Download pretrained models [semantickitti] or load from the provided path') + parser.add_argument('-o', '--output_heatmap_pointcloud_topic', type=str, default="/opendr/panoptic", + help='Publish the output 3D heatmap point cloud on this topic') + + args = parser.parse_args() + efficient_lps_node = EfficientLpsNode(args.input_point_cloud_2_topic, + args.checkpoint, + args.output_heatmap_pointcloud_topic) + efficient_lps_node.listen() + + +if __name__ == '__main__': + main() diff --git a/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py new file mode 100644 index 0000000000..48bb3f4fce --- /dev/null +++ b/projects/opendr_ws_2/src/opendr_perception/opendr_perception/point_cloud_2_publisher_node.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# Copyright 2020-2023 OpenDR European Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import time + +import rclpy +from rclpy.node import Node +from sensor_msgs.msg import PointCloud2 as ROS_PointCloud2 +from opendr_bridge import ROS2Bridge + +from opendr.perception.panoptic_segmentation import SemanticKittiDataset +from opendr.perception.panoptic_segmentation import EfficientLpsLearner + + +class PointCloud2DatasetNode(Node): + + def __init__(self, + path: str = './datasets/semantickitti', + split: str = 'valid', + output_point_cloud_2_topic: str = "/opendr/dataset_point_cloud2" + ): + super().__init__("opendr_point_cloud_2_dataset_node") + + """ + Creates a ROS Node for publishing a PointCloud2 message from a DatasetIterator + :param dataset: DatasetIterator from which we are reading the point cloud + :type dataset: DatasetIterator + :param output_point_cloud_2_topic: Topic to which we are publishing the point cloud + :type output_point_cloud_2_topic: str + """ + + self.path = path + self.split = split + self._ros2_bridge = ROS2Bridge() + + if output_point_cloud_2_topic is not None: + self.output_point_cloud_2_publisher = self.create_publisher(ROS_PointCloud2, + output_point_cloud_2_topic, + 10) + + def start(self): + """ + Starts the ROS Node + """ + if self._init_dataset(): + self.get_logger().info("Starting point cloud 2 dataset node") + i = 0 + print("Starting point cloud 2 publisher") + while rclpy.ok(): + print("Publishing point cloud 2 message") + point_cloud = self.dataset[i % len(self.dataset)][0] + self.get_logger().info("Publishing point_cloud_2 [" + str(i) + "]") + message = self._ros2_bridge.to_ros_point_cloud2(point_cloud, + self.get_clock().now().to_msg(), + ROS_PointCloud2) + self.output_point_cloud_2_publisher.publish(message) + i += 1 + + time.sleep(0.1) + + def _init_dataset(self): + try: + self.dataset = SemanticKittiDataset(path=self.path, split=self.split) + return True + except FileNotFoundError: + self.get_logger().error("Dataset not found. Please download the dataset and extract it or enter available path.") + return False + + +def main(args=None): + rclpy.init(args=args) + + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-d', '--dataset_path', type=str, default='./datasets/semantickitti', + help='listen to pointclouds on this topic') + parser.add_argument('-s', '--split', type=str, default='valid', + help='split of the dataset to use (train, valid, test)') + parser.add_argument('-o', '--output_point_cloud_2_topic', type=str, default='/opendr/dataset_point_cloud2', + help='topic for the output point cloud') + parser.add_argument('-t', '--test_data', action='store_true', + help='Use uploaded test data on the FTP server') + args = parser.parse_args() + + if args.test_data: + args.dataset_path = EfficientLpsLearner.download(args.dataset_path, mode="test_data", prepare_data=True) + dataset_node = PointCloud2DatasetNode(args.dataset_path, args.split, args.output_point_cloud_2_topic) + + dataset_node.start() + + # Destroy the node explicitly + # (optional - otherwise it will be done automatically + # when the garbage collector destroys the node object) + dataset_node.destroy_node() + rclpy.shutdown() + +if __name__ == "__main__": + main() diff --git a/projects/opendr_ws_2/src/opendr_perception/setup.py b/projects/opendr_ws_2/src/opendr_perception/setup.py index bfdd8bfddd..a2a21291ee 100644 --- a/projects/opendr_ws_2/src/opendr_perception/setup.py +++ b/projects/opendr_ws_2/src/opendr_perception/setup.py @@ -32,10 +32,12 @@ 'object_tracking_2d_siamrpn = opendr_perception.object_tracking_2d_siamrpn_node:main', 'face_detection_retinaface = opendr_perception.face_detection_retinaface_node:main', 'semantic_segmentation_bisenet = opendr_perception.semantic_segmentation_bisenet_node:main', - 'panoptic_segmentation = opendr_perception.panoptic_segmentation_efficient_ps_node:main', + 'panoptic_segmentation_efficient_ps = opendr_perception.panoptic_segmentation_efficient_ps_node:main', + 'panoptic_segmentation_efficient_lps = opendr_perception.panoptic_segmentation_efficient_lps_node:main', 'face_recognition = opendr_perception.face_recognition_node:main', 'fall_detection = opendr_perception.fall_detection_node:main', 'point_cloud_dataset = opendr_perception.point_cloud_dataset_node:main', + 'point_cloud_2_publisher = opendr_perception.point_cloud_2_publisher_node:main', 'image_dataset = opendr_perception.image_dataset_node:main', 'object_detection_3d_voxel = opendr_perception.object_detection_3d_voxel_node:main', 'object_tracking_3d_ab3dmot = opendr_perception.object_tracking_3d_ab3dmot_node:main',