Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Release Candidate March 2018 #55

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ bool VisualReprojectionError<CameraType, DistortionType>::Evaluate(
if (projection_failed && J_keypoint_wrt_intrinsics_ptr != nullptr) {
J_keypoint_wrt_intrinsics_ptr->setZero();
}
if (projection_failed && J_keypoint_wrt_intrinsics_ptr != nullptr) {
if (projection_failed && J_keypoint_wrt_distortion_ptr != nullptr) {
J_keypoint_wrt_distortion_ptr->setZero();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <map-resources/resource-typedefs.h>
#include <opencv2/core/mat.hpp>
#include <opencv2/core/types.hpp>
#include <resources-common/point-cloud.h>

namespace dense_reconstruction {
namespace stereo {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <resources-common/point-cloud.h>

#include "dense-reconstruction/stereo-camera-utils.h"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,6 @@
<depend>maplab_common</depend>
<depend>opencv3_catkin</depend>
<depend>posegraph</depend>
<depend>resources_common</depend>
<depend>vi_map</depend>
</package>
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <Eigen/Dense>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <resources-common/point-cloud.h>

namespace dense_reconstruction {
namespace stereo {
Expand Down Expand Up @@ -128,7 +129,7 @@ void convertDisparityMapToPointCloud(
pointcloud->colors.push_back(b);
}
}
CHECK_LE(pointcloud->size(), 3 * max_size);
CHECK_LE(pointcloud->size(), static_cast<size_t>(3 * max_size));
}

// Convert disparity map to a depth map in the target camera frame.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ void computeDepthForStereoCamerasOfMission(
const aslam::Transformation& T_C2_C1, const vi_map::MissionId& mission_id,
const backend::ResourceType& depth_resource_type, vi_map::VIMap* vi_map) {
CHECK_NOTNULL(vi_map);
CHECK_GT(kSupportedDepthTypes.count(depth_resource_type), 0)
CHECK_GT(kSupportedDepthTypes.count(depth_resource_type), 0u)
<< "This depth type is not supported! type: "
<< backend::ResourceTypeNames[static_cast<int>(depth_resource_type)];

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,8 @@

#include <Eigen/Core>
#include <aslam/cameras/camera.h>
#include <aslam/common/memory.h>
#include <aslam/common/occupancy-grid.h>
#include <aslam/frames/visual-nframe.h>
#include <aslam/matcher/match.h>
#include <aslam/tracker/feature-tracker.h>
#include <gflags/gflags.h>
#include <opencv2/video/tracking.hpp>
#include <aslam/frames/visual-frame.h>
#include <opencv2/features2d/features2d.hpp>

#include "feature-tracking/feature-tracking-types.h"

Expand All @@ -21,37 +16,23 @@ namespace feature_tracking {
class FeatureDetectorExtractor {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
explicit FeatureDetectorExtractor(const aslam::Camera& camera);
explicit FeatureDetectorExtractor(
const aslam::Camera& camera,
const FeatureTrackingExtractorSettings& extractor_settings,
const FeatureTrackingDetectorSettings& detector_settings);

void detectAndExtractFeatures(aslam::VisualFrame* frame) const;
cv::Ptr<cv::DescriptorExtractor> getExtractorPtr() const;

private:
void initialize();

/// \brief A simple non-maximum suppression algorithm that erases keypoints
/// in a specified radius around a queried keypoint if their response
/// is below ratio_threshold times the response of the queried
/// keypoint.
/// @param[in] radius: Radius around queried keypoint that is searched for
/// other keypoints to potentially suppress them.
/// @param[in] ratio_threshold: Suppress keypoints if their response is lower
/// than this threshold times the response of
/// the queried keypoint. A lower threshold should
/// make the suppression more robust. However,
/// this will result in more keypoints remaining
/// close to each other.
/// @param[out] keypoints: A subset of the keypoints will be erased according
/// to above criteria.
const aslam::Camera& camera_;
const FeatureTrackingExtractorSettings extractor_settings_;
const FeatureTrackingDetectorSettings detector_settings_;

cv::Ptr<cv::FeatureDetector> detector_;
cv::Ptr<cv::DescriptorExtractor> extractor_;

public:
// Descriptor extractor settings are stored in this struct.
const SweFeatureTrackingExtractorSettings extractor_settings_;
// Feature detector settings are stored in this struct.
const SweFeatureTrackingDetectorSettings detector_settings_;
};

} // namespace feature_tracking
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ struct SimpleBriskFeatureTrackingSettings {
const double matching_image_space_distance_threshold_px;
};

struct SweFeatureTrackingExtractorSettings {
struct FeatureTrackingExtractorSettings {
enum class DescriptorType { kOcvFreak, kBrisk };
SweFeatureTrackingExtractorSettings();
FeatureTrackingExtractorSettings();
DescriptorType convertStringToDescriptorType(
const std::string& descriptor_string);
/// Type of descriptor used by SWE.
Expand All @@ -36,8 +36,8 @@ struct SweFeatureTrackingExtractorSettings {
float freak_pattern_scale;
};

struct SweFeatureTrackingDetectorSettings {
SweFeatureTrackingDetectorSettings();
struct FeatureTrackingDetectorSettings {
FeatureTrackingDetectorSettings();

// Settings for the non-maximum suppression algorithm.
bool detector_use_nonmaxsuppression;
Expand Down Expand Up @@ -80,6 +80,15 @@ struct SweFeatureTrackingDetectorSettings {
size_t min_tracking_distance_to_image_border_px;

double keypoint_uncertainty_px;

// Settings for gridded detector to ensure a certain distribution of keypoints
// across the image.
bool gridded_detector_use_gridded;
double gridded_detector_cell_num_features_scaler;
size_t gridded_detector_cell_num_features;
size_t gridded_detector_num_grid_cols;
size_t gridded_detector_num_grid_rows;
size_t gridded_detector_num_threads_per_image;
};

} // namespace feature_tracking
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,218 @@
#ifndef FEATURE_TRACKING_GRIDDED_DETECTOR_H_
#define FEATURE_TRACKING_GRIDDED_DETECTOR_H_
#include <algorithm>
#include <array>
#include <vector>

#include <aslam/frames/visual-frame.h>
#include <aslam/tracker/tracking-helpers.h>
#include <maplab-common/parallel-process.h>
#include <maplab-common/threading-helpers.h>
#include <opencv2/core/version.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/xfeatures2d.hpp>

namespace feature_tracking {

// TODO(magehrig): Local non-maximum suppression only on octave level for
// improved scale invariance.
inline void localNonMaximumSuppression(
size_t image_height, const float radius, const float ratio_threshold,
std::vector<cv::KeyPoint>* keypoints) {
CHECK_NOTNULL(keypoints);
CHECK_GT(radius, 0.0f);
CHECK_GT(ratio_threshold, 0.0f);
CHECK_LE(ratio_threshold, 1.0f);
CHECK_GT(image_height, 0u);

if (keypoints->empty()) {
return;
}

const float radius_sq = radius * radius;
const size_t num_keypoints = keypoints->size();

struct KeyPointData {
KeyPointData(const cv::KeyPoint& keypoint, const size_t _keypoint_index)
: coordinates{keypoint.pt.x, keypoint.pt.y}, // NOLINT
response(keypoint.response),
keypoint_index(_keypoint_index) {}
std::array<float, 2> coordinates;
float response;
size_t keypoint_index;
};

typedef std::vector<KeyPointData>::const_iterator KeyPointDataIterator;

std::function<bool(const KeyPointData&, KeyPointDataIterator)> // NOLINT
IsInsideCircle = [radius_sq](
const KeyPointData& keypoint_1,
KeyPointDataIterator keypoint_iterator_2) -> bool {
const float x_diff =
keypoint_1.coordinates[0] - keypoint_iterator_2->coordinates[0];
const float y_diff =
keypoint_1.coordinates[1] - keypoint_iterator_2->coordinates[1];
return (x_diff * x_diff + y_diff * y_diff) < radius_sq;
};

std::function<size_t(const int, const int, const int)> Clamp =
[]( // NOLINT
const int lower, const int upper, const int in) -> size_t {
return std::min<int>(std::max<int>(in, lower), upper);
};

std::vector<KeyPointData> keypoint_data_vector;
keypoint_data_vector.reserve(num_keypoints);
for (size_t i = 0u; i < num_keypoints; ++i) {
keypoint_data_vector.emplace_back((*keypoints)[i], i);
}

// Create LUT of keypoints in y axis.
std::sort(
keypoint_data_vector.begin(), keypoint_data_vector.end(),
[](const KeyPointData& lhs, const KeyPointData& rhs) -> bool {
return lhs.coordinates[1] < rhs.coordinates[1];
});

std::vector<size_t> corner_row_LUT;
corner_row_LUT.reserve(image_height);

size_t num_kpts_below_y = 0u;
for (size_t y = 0u; y < image_height; ++y) {
while (num_kpts_below_y < num_keypoints &&
y > keypoint_data_vector[num_kpts_below_y].coordinates[1]) {
++num_kpts_below_y;
}
corner_row_LUT.push_back(num_kpts_below_y);
}
CHECK_EQ(num_kpts_below_y, keypoint_data_vector.size());

// Create a list of keypoints to reject.
std::vector<bool> erase_keypoints(num_keypoints, false);

for (size_t i = 0u; i < num_keypoints; ++i) {
const KeyPointData& current_keypoint_data = keypoint_data_vector[i];
const size_t y_top = Clamp(
0, static_cast<int>(image_height - 1),
std::floor(current_keypoint_data.coordinates[1] - radius));
const size_t y_bottom = Clamp(
0, static_cast<int>(image_height - 1),
std::ceil(current_keypoint_data.coordinates[1] + radius));
CHECK_LT(y_top, image_height);
CHECK_LE(y_bottom, image_height);

CHECK_LT(corner_row_LUT[y_top], keypoint_data_vector.size());
CHECK_LE(corner_row_LUT[y_bottom], keypoint_data_vector.size());
KeyPointDataIterator nearest_corners_begin =
keypoint_data_vector.begin() + corner_row_LUT[y_top];
KeyPointDataIterator nearest_corners_end =
keypoint_data_vector.begin() + corner_row_LUT[y_bottom];

for (KeyPointDataIterator it = nearest_corners_begin;
it != nearest_corners_end; ++it) {
if (it->keypoint_index == current_keypoint_data.keypoint_index ||
erase_keypoints[it->keypoint_index] ||
!IsInsideCircle(current_keypoint_data, it)) {
continue;
}
const float response_threshold =
ratio_threshold * current_keypoint_data.response;
if (response_threshold > it->response) {
erase_keypoints[it->keypoint_index] = true;
}
}
}

// Remove the flaged non-maximum keypoints.
std::vector<bool>::iterator it_erase = erase_keypoints.begin();

std::vector<cv::KeyPoint>::iterator it_erase_from = std::remove_if(
keypoints->begin(), keypoints->end(),
[&it_erase](const cv::KeyPoint & /*keypoint*/) -> bool {
return *it_erase++ == true;
});
keypoints->erase(it_erase_from, keypoints->end());
}

inline void detectKeypointsGridded(
const cv::Ptr<cv::FeatureDetector>& detector, const cv::Mat& image,
const cv::Mat& detection_mask, const bool detector_use_nonmaxsuppression,
const float detector_nonmaxsuppression_radius,
const float detector_nonmaxsuppression_ratio_threshold,
const size_t orb_detector_number_features, const size_t max_feature_count,
const size_t gridded_detector_cell_num_features,
const size_t gridded_detector_num_grid_cols,
const size_t gridded_detector_num_grid_rows,
const size_t gridded_detector_num_threads_per_image,
std::vector<cv::KeyPoint>* keypoints) {
CHECK_NOTNULL(keypoints)->clear();
CHECK_GT(gridded_detector_num_grid_cols, 0u);
CHECK_GT(gridded_detector_num_grid_rows, 0u);
CHECK_GT(gridded_detector_cell_num_features, 0u);

if (image.empty() ||
max_feature_count <
gridded_detector_num_grid_rows * gridded_detector_num_grid_cols) {
keypoints->clear();
return;
}
keypoints->reserve(orb_detector_number_features);

std::mutex m_keypoints;
auto detectFeaturesOfGridCells = [&](const std::vector<size_t>& range) {
for (const int cell_idx : range) {
const int celly = cell_idx / gridded_detector_num_grid_cols;
const int cellx = cell_idx - celly * gridded_detector_num_grid_cols;

const cv::Range row_range(
(celly * image.rows) / gridded_detector_num_grid_rows,
((celly + 1) * image.rows) / gridded_detector_num_grid_rows);
const cv::Range col_range(
(cellx * image.cols) / gridded_detector_num_grid_cols,
((cellx + 1) * image.cols) / gridded_detector_num_grid_cols);

cv::Mat sub_image = image(row_range, col_range);
cv::Mat sub_mask;
if (!detection_mask.empty()) {
sub_mask = detection_mask(row_range, col_range);
}

std::vector<cv::KeyPoint> sub_keypoints;
sub_keypoints.reserve(gridded_detector_cell_num_features);
detector->detect(sub_image, sub_keypoints, sub_mask);

std::vector<cv::KeyPoint>::iterator it = sub_keypoints.begin(),
end = sub_keypoints.end();
for (; it != end; ++it) {
it->pt.x += col_range.start;
it->pt.y += row_range.start;
}

if (detector_use_nonmaxsuppression) {
localNonMaximumSuppression(
image.rows, detector_nonmaxsuppression_radius,
detector_nonmaxsuppression_ratio_threshold, &sub_keypoints);
}

std::unique_lock<std::mutex> lock(m_keypoints);
keypoints->insert(
keypoints->end(), sub_keypoints.begin(), sub_keypoints.end());
}
};

size_t num_threads;
if (gridded_detector_num_threads_per_image != 0)
num_threads = gridded_detector_num_threads_per_image;
else
num_threads =
gridded_detector_num_grid_cols * gridded_detector_num_grid_rows / 2;
CHECK_GT(num_threads, 0u);
common::ParallelProcess(
gridded_detector_num_grid_cols * gridded_detector_num_grid_rows,
detectFeaturesOfGridCells, /*kAlwaysParallelize=*/true, num_threads);
cv::KeyPointsFilter::retainBest(*keypoints, max_feature_count);
}

} // namespace feature_tracking

#endif // FEATURE_TRACKING_GRIDDED_DETECTOR_H_
Loading