Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(tensorrt_classifier)!: fix namespace and directory structure #8009

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
control/autoware_pid_longitudinal_controller/** mamoru.sobue@tier4.jp takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp
control/autoware_pure_pursuit/** takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp
control/autoware_shift_decider/** takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp
control/autoware_smart_mpc_trajectory_follower/** kosuke.takeuchi@tier4.jp masayuki.aino@proxima-ai-tech.com takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp

Check warning on line 58 in .github/CODEOWNERS

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (masayuki)

Check warning on line 58 in .github/CODEOWNERS

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (aino)
control/autoware_trajectory_follower_base/** takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp
control/autoware_trajectory_follower_node/** takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp
control/autoware_vehicle_cmd_gate/** takamasa.horibe@tier4.jp tomoya.kimura@tier4.jp
Expand Down Expand Up @@ -136,7 +136,7 @@
perception/autoware_raindrop_cluster_filter/** dai.nguyen@tier4.jp yoshi.ri@tier4.jp yukihiro.saito@tier4.jp
perception/shape_estimation/** yoshi.ri@tier4.jp yukihiro.saito@tier4.jp
perception/simple_object_merger/** satoshi.tanaka@tier4.jp shunsuke.miura@tier4.jp yoshi.ri@tier4.jp
perception/tensorrt_classifier/** kotaro.uetake@tier4.jp shunsuke.miura@tier4.jp
perception/autoware_tensorrt_classifier/** kotaro.uetake@tier4.jp shunsuke.miura@tier4.jp
perception/tensorrt_yolox/** dan.umeda@tier4.jp manato.hirabayashi@tier4.jp
perception/traffic_light_arbiter/** kenzo.lobos@tier4.jp shunsuke.miura@tier4.jp
perception/traffic_light_classifier/** shunsuke.miura@tier4.jp tao.zhong@tier4.jp yukihiro.saito@tier4.jp
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.5)
project(tensorrt_classifier)
project(autoware_tensorrt_classifier)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -O3 -Wno-write-strings -fopenmp -Wall")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
* DEALINGS IN THE SOFTWARE.
*/

#ifndef TENSORRT_CLASSIFIER__CALIBRATOR_HPP_
#define TENSORRT_CLASSIFIER__CALIBRATOR_HPP_
#ifndef AUTOWARE__TENSORRT_CLASSIFIER__CALIBRATOR_HPP_
#define AUTOWARE__TENSORRT_CLASSIFIER__CALIBRATOR_HPP_
#include "cuda_utils/cuda_check_error.hpp"
#include "cuda_utils/cuda_unique_ptr.hpp"

Expand All @@ -52,7 +52,7 @@
#include <string>
#include <vector>

namespace tensorrt_classifier
namespace autoware::tensorrt_classifier
{
class ImageStream
{
Expand Down Expand Up @@ -524,6 +524,6 @@ class Int8MinMaxCalibrator : public nvinfer1::IInt8MinMaxCalibrator
// std for preprocessing
std::vector<float> m_std;
};
} // namespace tensorrt_classifier
} // namespace autoware::tensorrt_classifier

#endif // TENSORRT_CLASSIFIER__CALIBRATOR_HPP_
#endif // AUTOWARE__TENSORRT_CLASSIFIER__CALIBRATOR_HPP_
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef TENSORRT_CLASSIFIER__PREPROCESS_H_
#define TENSORRT_CLASSIFIER__PREPROCESS_H_
#ifndef AUTOWARE__TENSORRT_CLASSIFIER__PREPROCESS_H_
#define AUTOWARE__TENSORRT_CLASSIFIER__PREPROCESS_H_

#include <cublas_v2.h>
#include <cuda.h>
Expand Down Expand Up @@ -178,4 +178,4 @@ extern void multi_scale_resize_bilinear_letterbox_nhwc_to_nchw32_batch_gpu(
float * dst, unsigned char * src, int d_w, int d_h, int d_c, Roi * d_roi, int s_w, int s_h,
int s_c, int batch, float norm, cudaStream_t stream);

#endif // TENSORRT_CLASSIFIER__PREPROCESS_H_
#endif // AUTOWARE__TENSORRT_CLASSIFIER__PREPROCESS_H_
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef TENSORRT_CLASSIFIER__TENSORRT_CLASSIFIER_HPP_
#define TENSORRT_CLASSIFIER__TENSORRT_CLASSIFIER_HPP_
#ifndef AUTOWARE__TENSORRT_CLASSIFIER__TENSORRT_CLASSIFIER_HPP_
#define AUTOWARE__TENSORRT_CLASSIFIER__TENSORRT_CLASSIFIER_HPP_

#include <cuda_utils/cuda_check_error.hpp>
#include <cuda_utils/cuda_unique_ptr.hpp>
Expand All @@ -25,7 +25,7 @@
#include <string>
#include <vector>

namespace tensorrt_classifier
namespace autoware::tensorrt_classifier
{
using cuda_utils::CudaUniquePtr;
using cuda_utils::CudaUniquePtrHost;
Expand Down Expand Up @@ -129,6 +129,6 @@ class TrtClassifier
int batch_size_;
CudaUniquePtrHost<float[]> out_prob_h_;
};
} // namespace tensorrt_classifier
} // namespace autoware::tensorrt_classifier

#endif // TENSORRT_CLASSIFIER__TENSORRT_CLASSIFIER_HPP_
#endif // AUTOWARE__TENSORRT_CLASSIFIER__TENSORRT_CLASSIFIER_HPP_
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
<?xml version="1.0"?>
<package format="3">
<name>tensorrt_classifier</name>
<name>autoware_tensorrt_classifier</name>
<version>0.0.1</version>
<description>tensorrt classifier wrapper</description>

<author email="dan.umeda@tier4.jp">Dan Umeda</author>
<author email="mingyu.li@tier4.jp">Mingyu Li</author>
<maintainer email="kotaro.uetake@tier4.jp">Kotaro Uetake</maintainer>
<maintainer email="shunsuke.miura@tier4.jp">Shunsuke Miura</maintainer>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <autoware/tensorrt_classifier/preprocess.h>
#include <stdio.h>
#include <stdlib.h>
#include <tensorrt_classifier/preprocess.h>

#include <algorithm>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <tensorrt_classifier/calibrator.hpp>
#include <tensorrt_classifier/tensorrt_classifier.hpp>
#include <autoware/tensorrt_classifier/calibrator.hpp>
#include <autoware/tensorrt_classifier/tensorrt_classifier.hpp>

#include <autoware/tensorrt_classifier/preprocess.h>
#include <omp.h>
#include <tensorrt_classifier/preprocess.h>

#include <algorithm>
#include <functional>
Expand Down Expand Up @@ -95,7 +95,7 @@
return fileList;
}

namespace tensorrt_classifier
namespace autoware::tensorrt_classifier
{
TrtClassifier::TrtClassifier(
const std::string & model_path, const std::string & precision,
Expand All @@ -120,37 +120,38 @@
if (calibration_image_list_path != "") {
calibration_images = loadImageList(calibration_image_list_path, "");
}
tensorrt_classifier::ImageStream stream(max_batch_size, input_dims, calibration_images);
autoware::tensorrt_classifier::ImageStream stream(
max_batch_size, input_dims, calibration_images);
fs::path calibration_table{model_path};
std::string calibName = "";
std::string ext = "";
if (build_config.calib_type_str == "Entropy") {
ext = "EntropyV2-";
} else if (
build_config.calib_type_str == "Legacy" || build_config.calib_type_str == "Percentile") {
ext = "Legacy-";
} else {
ext = "MinMax-";
}
ext += "calibration.table";
calibration_table.replace_extension(ext);
fs::path histogram_table{model_path};
ext = "histogram.table";
histogram_table.replace_extension(ext);

std::unique_ptr<nvinfer1::IInt8Calibrator> calibrator;
if (build_config.calib_type_str == "Entropy") {
calibrator.reset(
new tensorrt_classifier::Int8EntropyCalibrator(stream, calibration_table, mean_, std_));
calibrator.reset(new autoware::tensorrt_classifier::Int8EntropyCalibrator(
stream, calibration_table, mean_, std_));
} else if (
build_config.calib_type_str == "Legacy" || build_config.calib_type_str == "Percentile") {
double quantile = 0.999999;
double cutoff = 0.999999;
calibrator.reset(new tensorrt_classifier::Int8LegacyCalibrator(
calibrator.reset(new autoware::tensorrt_classifier::Int8LegacyCalibrator(
stream, calibration_table, histogram_table, mean_, std_, true, quantile, cutoff));
} else {
calibrator.reset(
new tensorrt_classifier::Int8MinMaxCalibrator(stream, calibration_table, mean_, std_));
calibrator.reset(new autoware::tensorrt_classifier::Int8MinMaxCalibrator(
stream, calibration_table, mean_, std_));

Check warning on line 154 in perception/autoware_tensorrt_classifier/src/tensorrt_classifier.cpp

View check run for this annotation

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ Getting worse: Complex Method

TrtClassifier::TrtClassifier already has high cyclomatic complexity, and now it increases in Lines of Code from 83 to 84. This function has many conditional statements (e.g. if, for, while), leading to lower code health. Avoid adding more conditionals and code to it without refactoring.
}
trt_common_ = std::make_unique<tensorrt_common::TrtCommon>(
model_path, precision, std::move(calibrator), batch_config, max_workspace_size, build_config);
Expand Down Expand Up @@ -384,4 +385,4 @@
}
return true;
}
} // namespace tensorrt_classifier
} // namespace autoware::tensorrt_classifier
2 changes: 1 addition & 1 deletion perception/traffic_light_classifier/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

<build_depend>autoware_cmake</build_depend>

<depend>autoware_tensorrt_classifier</depend>
<depend>cuda_utils</depend>
<depend>cv_bridge</depend>
<depend>image_transport</depend>
Expand All @@ -22,7 +23,6 @@
<depend>rclcpp</depend>
<depend>rclcpp_components</depend>
<depend>sensor_msgs</depend>
<depend>tensorrt_classifier</depend>
<depend>tensorrt_common</depend>
<depend>tier4_perception_msgs</depend>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ CNNClassifier::CNNClassifier(rclcpp::Node * node_ptr) : node_ptr_(node_ptr)
batch_size_ = input_dim.d[0];

tensorrt_common::BatchConfig batch_config{batch_size_, batch_size_, batch_size_};
classifier_ = std::make_unique<tensorrt_classifier::TrtClassifier>(
classifier_ = std::make_unique<autoware::tensorrt_classifier::TrtClassifier>(
model_file_path, precision, batch_config, mean_, std_);
if (node_ptr_->declare_parameter("build_only", false)) {
RCLCPP_INFO(node_ptr_->get_logger(), "TensorRT engine is built and shutdown node.");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@

#include "classifier_interface.hpp"

#include <autoware/tensorrt_classifier/tensorrt_classifier.hpp>
#include <cuda_utils/cuda_unique_ptr.hpp>
#include <cuda_utils/stream_unique_ptr.hpp>
#include <image_transport/image_transport.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <rclcpp/rclcpp.hpp>
#include <tensorrt_classifier/tensorrt_classifier.hpp>
#include <tensorrt_common/tensorrt_common.hpp>

#include <tier4_perception_msgs/msg/traffic_light_element.hpp>
Expand Down Expand Up @@ -111,7 +111,7 @@ class CNNClassifier : public ClassifierInterface

rclcpp::Node * node_ptr_;
int batch_size_;
std::unique_ptr<tensorrt_classifier::TrtClassifier> classifier_;
std::unique_ptr<autoware::tensorrt_classifier::TrtClassifier> classifier_;
image_transport::Publisher image_pub_;
std::vector<std::string> labels_;
std::vector<float> mean_;
Expand Down
Loading