Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: sync upstream #674

Merged
merged 26 commits into from
Jul 24, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
d0685fc
feat(radar object tracking): add new object tracking package for rada…
YoshiRi Jul 20, 2023
a0914e7
feat(utils): add function to visualize drivable lanes (#4282)
satoshi-ota Jul 20, 2023
1ccf8b7
feat(yabloc_pose_initializer): make yabloc independent of dnn model b…
KYabuuchi Jul 20, 2023
5eca537
feat(avoidance): output slow down wall marker (#4341)
satoshi-ota Jul 20, 2023
a28bb2a
fix(avoidance): guard umconfortable stop (#4342)
satoshi-ota Jul 20, 2023
969cb78
fix(behavior_velocity_planner): expend the ego_lane_with_next_lane ar…
beyzanurkaya Jul 20, 2023
2c58e5e
refactor(object_merger): read parameters from autoware_launch (#4339)
ismetatabay Jul 20, 2023
fc5ec80
refactor(image_projection_based_fusion): convert input topic names to…
ismetatabay Jul 20, 2023
d70c718
fix(utils): fix unstable drivable area generation (#4293)
satoshi-ota Jul 21, 2023
902adb6
feat(tier4_perception_launch): add radar faraway detection (#4330)
scepter914 Jul 21, 2023
f42ca17
feat(avoidance): flexible avoidance path generation (#4298)
satoshi-ota Jul 21, 2023
9daf414
refactor(lane_change): use a single function for isEndOfLane (#4338)
purewater0901 Jul 21, 2023
af60ad5
fix(avoidance): overwrite backward path velocity (#4354)
satoshi-ota Jul 21, 2023
0e07aa8
chore(workflows): update build-and-test-differential to use self host…
xmfcx Jul 21, 2023
5183938
chore(tensorrt_common): fix spell-check (#4343)
kminoda Jul 21, 2023
9dad706
refactor(utils): rename debug utils files (#4355)
satoshi-ota Jul 21, 2023
6752bdb
fix(out_of_lane): prevent crash when current footprint is empty (#4351)
maxime-clem Jul 21, 2023
14bb892
ci: add label checks before running self-hosted runner (#4368)
mitsudome-r Jul 22, 2023
24cd57f
fix(roi_cluster_fusion): fuse unknown roi (#4123)
badai-nguyen Jul 22, 2023
365f16e
fix(start_planner): fix geometric pull out start pose (#4362)
kosuke55 Jul 22, 2023
cbaaa69
refactor(avoidance): parameterize magic number (#4116)
satoshi-ota Jul 22, 2023
01cea10
feat(behavior_velocity_planner): enable auto mode without rtc_auto_mo…
kyoichi-sugahara Jul 22, 2023
10d2ad5
fix(perception_utils): fix `-Werror=reorder` (#4369)
satoshi-ota Jul 23, 2023
bb3a3c3
feat(obstacle_avoidance_planner): make MPT's output not std::optional…
takayuki5168 Jul 23, 2023
ff8c637
refactor: use tier4_autoware_utils::intersect (#4313)
takayuki5168 Jul 23, 2023
8e6c02e
feat(path_smoother): make EB's output not std::optional (#4371)
takayuki5168 Jul 23, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion localization/yabloc/yabloc_pose_initializer/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ target_link_libraries(${TARGET} ${PCL_LIBRARIES} Sophus::Sophus)
# Semantic segmentation
install(PROGRAMS
src/semantic_segmentation/semantic_segmentation_core.py
src/semantic_segmentation/semantic_segmentation_node.py
src/semantic_segmentation/semantic_segmentation_server.py
DESTINATION lib/${PROJECT_NAME}
)
Expand Down
14 changes: 14 additions & 0 deletions localization/yabloc/yabloc_pose_initializer/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,20 @@ This package contains some nodes related to initial pose estimation.
- [camera_pose_initializer](#camera_pose_initializer)
- [semantic_segmentation_server](#semantic_segmentation_server)

Ideally, this package downloads a pre-trained semantic segmentation model during the build and loads it at runtime for initialization.
However, to handle cases where network connectivity is not available at build time, **the default behavior is not to download the model during build.**
Even if the model is not downloaded, initialization will still complete, but the accuracy may be compromised.

<!-- cspell: ignore DDOWNLOAD_ARTIFACTS -->

To download the model, please specify `--cmake-args -DDOWNLOAD_ARTIFACTS=ON` to the build command.

```bash
colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release -DDOWNLOAD_ARTIFACTS=ON --packages-select yabloc_pose_initializer
```

For detailed information about the downloaded contents, please consult the `download.cmake` file in this package.

## Note

This package makes use of external code. The trained files are provided by apollo. The trained files are automatically downloaded when you build.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ class CameraPoseInitializer : public rclcpp::Node
PoseCovStamped create_rectified_initial_pose(
const Eigen::Vector3f & pos, double yaw_angle_rad, const PoseCovStamped & src_msg);

bool estimate_pose(const Eigen::Vector3f & position, double & yaw_angle_rad, double yaw_std_rad);
std::optional<double> estimate_pose(
const Eigen::Vector3f & position, const double yaw_angle_rad, const double yaw_std_rad);
};
} // namespace yabloc

Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,6 @@
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>

#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>

namespace yabloc
{
CameraPoseInitializer::CameraPoseInitializer()
Expand Down Expand Up @@ -74,7 +70,7 @@ cv::Mat bitwise_and_3ch(const cv::Mat src1, const cv::Mat src2)
return merged;
}

int count_nonzero(cv::Mat image_3ch)
int count_non_zero(cv::Mat image_3ch)
{
std::vector<cv::Mat> images;
cv::split(image_3ch, images);
Expand All @@ -85,20 +81,20 @@ int count_nonzero(cv::Mat image_3ch)
return count;
}

bool CameraPoseInitializer::estimate_pose(
const Eigen::Vector3f & position, double & yaw_angle_rad, double yaw_std_rad)
std::optional<double> CameraPoseInitializer::estimate_pose(
const Eigen::Vector3f & position, const double yaw_angle_rad, const double yaw_std_rad)
{
if (!projector_module_->define_project_func()) {
return false;
return std::nullopt;
}
if (!lane_image_) {
RCLCPP_WARN_STREAM(get_logger(), "vector map is not ready ");
return false;
return std::nullopt;
}
// TODO(KYabuuchi) check latest_image_msg's time stamp, too
if (!latest_image_msg_.has_value()) {
RCLCPP_WARN_STREAM(get_logger(), "source image is not ready");
return false;
return std::nullopt;
}

Image segmented_image;
Expand All @@ -110,10 +106,22 @@ bool CameraPoseInitializer::estimate_pose(
using namespace std::literals::chrono_literals;
std::future_status status = result_future.wait_for(1000ms);
if (status == std::future_status::ready) {
segmented_image = result_future.get()->dst_image;
const auto response = result_future.get();
if (response->success) {
segmented_image = response->dst_image;
} else {
RCLCPP_WARN_STREAM(get_logger(), "segmentation service failed unexpectedly");
// NOTE: Even if the segmentation service fails, the function will still return the
// yaw_angle_rad as it is and complete the initialization. The service fails
// when the DNN model is not downloaded. Ideally, initialization should rely on
// segmentation, but this implementation allows initialization even in cases where network
// connectivity is not available.
return yaw_angle_rad;
}
} else {
RCLCPP_ERROR_STREAM(get_logger(), "segmentation service exited unexpectedly");
return false;
RCLCPP_ERROR_STREAM(
get_logger(), "segmentation service did not return within the expected time");
return std::nullopt;
}
}

Expand Down Expand Up @@ -149,30 +157,18 @@ bool CameraPoseInitializer::estimate_pose(
if (lane_angle_rad) {
gain = 2 + std::cos((lane_angle_rad.value() - angle_rad) / 2.0);
}
const float score = gain * count_nonzero(dst);

// DEBUG:
constexpr bool imshow = false;
if (imshow) {
cv::Mat show_image;
cv::hconcat(std::vector<cv::Mat>{rotated_image, vector_map_image, dst}, show_image);
cv::imshow("and operator", show_image);
cv::waitKey(50);
}
// If count_non_zero() returns 0 everywhere, the orientation is chosen by the only gain
const float score = gain * (1 + count_non_zero(dst));

scores.push_back(score);
angles_rad.push_back(angle_rad);
}

{
size_t max_index =
std::distance(scores.begin(), std::max_element(scores.begin(), scores.end()));
yaw_angle_rad = angles_rad.at(max_index);
}

marker_module_->publish_marker(scores, angles_rad, position);

return true;
const size_t max_index =
std::distance(scores.begin(), std::max_element(scores.begin(), scores.end()));
return angles_rad.at(max_index);
}

void CameraPoseInitializer::on_map(const HADMapBin & msg)
Expand All @@ -193,8 +189,6 @@ void CameraPoseInitializer::on_service(
{
RCLCPP_INFO_STREAM(get_logger(), "CameraPoseInitializer on_service");

response->success = false;

const auto query_pos_with_cov = request->pose_with_covariance;
const auto query_pos = request->pose_with_covariance.pose.pose.position;
const auto orientation = request->pose_with_covariance.pose.pose.orientation;
Expand All @@ -203,12 +197,14 @@ void CameraPoseInitializer::on_service(
RCLCPP_INFO_STREAM(get_logger(), "Given initial position " << pos_vec3f.transpose());

// Estimate orientation
const auto header = request->pose_with_covariance.header;
double yaw_angle_rad = 2 * std::atan2(orientation.z, orientation.w);
if (estimate_pose(pos_vec3f, yaw_angle_rad, yaw_std_rad)) {
const double initial_yaw_angle_rad = 2 * std::atan2(orientation.z, orientation.w);
const auto yaw_angle_rad_opt = estimate_pose(pos_vec3f, initial_yaw_angle_rad, yaw_std_rad);
if (yaw_angle_rad_opt.has_value()) {
response->success = true;
response->pose_with_covariance =
create_rectified_initial_pose(pos_vec3f, yaw_angle_rad, query_pos_with_cov);
create_rectified_initial_pose(pos_vec3f, yaw_angle_rad_opt.value(), query_pos_with_cov);
} else {
response->success = false;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ void MarkerModule::publish_marker(
auto minmax = std::minmax_element(scores.begin(), scores.end());
auto normalize = [minmax](int score) -> float {
return static_cast<float>(score - *minmax.first) /
static_cast<float>(*minmax.second - *minmax.first);
std::max(1e-4f, static_cast<float>(*minmax.second - *minmax.first));
};

MarkerArray array;
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys

from cv_bridge import CvBridge
Expand All @@ -23,6 +24,12 @@
from sensor_msgs.msg import Image
from yabloc_pose_initializer.srv import SemanticSegmentation

# cspell: ignore DDOWNLOAD
ERROR_MESSAGE = """\
The yabloc_pose_initializer is not working correctly because the DNN model has not been downloaded correctly.
To download models, "-DDOWNLOAD_ARTIFACTS=ON" is required at build time.
Please see the README of yabloc_pose_initializer for more information."""


class SemanticSegmentationServer(Node):
def __init__(self):
Expand All @@ -31,26 +38,43 @@ def __init__(self):
model_path = self.declare_parameter("model_path", "").value

self.get_logger().info("model path: " + model_path)
self.dnn_ = core.SemanticSegmentationCore(model_path)
self.bridge_ = CvBridge()

if os.path.exists(model_path):
self.dnn_ = core.SemanticSegmentationCore(model_path)
else:
self.dnn_ = None
self.__print_error_message()

self.srv = self.create_service(
SemanticSegmentation, "semantic_segmentation_srv", self.on_service
)

def __print_error_message(self):
messages = ERROR_MESSAGE.split("\n")
for message in messages:
self.get_logger().error(message)

def on_service(self, request, response):
response.dst_image = self.__inference(request.src_image)
if self.dnn_:
response.dst_image = self.__inference(request.src_image)
response.success = True
else:
self.__print_error_message()
response.success = False
response.dst_image = request.src_image

return response

def __inference(self, msg: Image):
stamp = msg.header.stamp
self.get_logger().info("Subscribed image: " + str(stamp))

src_image = self.bridge_.imgmsg_to_cv2(msg)
mask = self.dnn_.inference(src_image)

mask = self.dnn_.inference(src_image)
dst_msg = self.bridge_.cv2_to_imgmsg(mask)
dst_msg.encoding = "bgr8"

return dst_msg


Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
sensor_msgs/Image src_image
---
bool success
sensor_msgs/Image dst_image
Loading