forked from open-mmlab/mmdetection3d
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
apply cmake-format and refactor cmake scripts of building onnx and te… (
open-mmlab#99) * apply cmake-format and refactor cmake scripts of building onnx and tensorrt ops * add static target * suppress 'CMAKE_CUDA_ARCHITECTURES' warning when cmake with version 18 or later is used * fix typo
- Loading branch information
Showing
8 changed files
with
144 additions
and
121 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,43 +1,43 @@ | ||
cmake_minimum_required (VERSION 3.10) | ||
project (mmdeploy_backend_ops) | ||
cmake_minimum_required(VERSION 3.10) | ||
project(mmdeploy_backend_ops) | ||
|
||
# ONNXRUNTIME config | ||
|
||
# enable onnxruntime | ||
option(BUILD_ONNXRUNTIME_OPS "enable ONNXRUNTIME ops" OFF) | ||
# ONNXRUNTIME search path | ||
if (BUILD_ONNXRUNTIME_OPS) | ||
if (NOT DEFINED ONNXRUNTIME_DIR) | ||
set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR}) | ||
endif() | ||
if (NOT ONNXRUNTIME_DIR) | ||
if(BUILD_ONNXRUNTIME_OPS) | ||
if(NOT DEFINED ONNXRUNTIME_DIR) | ||
set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR}) | ||
endif() | ||
if(NOT ONNXRUNTIME_DIR) | ||
message(ERROR " ONNXRUNTIME_DIR is not found.") | ||
endif() | ||
endif() | ||
endif() | ||
|
||
# TensorRT config | ||
|
||
# enable tensorrt | ||
option(BUILD_TENSORRT_OPS "enable TensorRT ops" OFF) | ||
# TensorRT search path | ||
if (BUILD_TENSORRT_OPS) | ||
if (NOT DEFINED TENSORRT_DIR) | ||
set(TENSORRT_DIR $ENV{TENSORRT_DIR}) | ||
endif() | ||
if(BUILD_TENSORRT_OPS) | ||
if(NOT DEFINED TENSORRT_DIR) | ||
set(TENSORRT_DIR $ENV{TENSORRT_DIR}) | ||
endif() | ||
endif() | ||
|
||
# NCNN config | ||
|
||
# enable ncnn | ||
option(BUILD_NCNN_OPS "enable NCNN ops" OFF) | ||
# NCNN search path | ||
if (BUILD_NCNN_OPS) | ||
if (NOT DEFINED NCNN_DIR) | ||
set(NCNN_DIR $ENV{NCNN_DIR}) | ||
endif() | ||
if (NOT NCNN_DIR) | ||
if(BUILD_NCNN_OPS) | ||
if(NOT DEFINED NCNN_DIR) | ||
set(NCNN_DIR $ENV{NCNN_DIR}) | ||
endif() | ||
if(NOT NCNN_DIR) | ||
message(ERROR " NCNN_DIR is not found.") | ||
endif() | ||
endif() | ||
endif() | ||
|
||
add_subdirectory (backend_ops) | ||
add_subdirectory(backend_ops) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,15 +1,14 @@ | ||
|
||
find_package(Protobuf) | ||
|
||
if(PROTOBUF_FOUND) | ||
protobuf_generate_cpp(ONNX_PROTO_SRCS ONNX_PROTO_HDRS ${NCNN_DIR}/tools/onnx/onnx.proto) | ||
add_executable(onnx2ncnn onnx2ncnn.cpp ${ONNX_PROTO_SRCS} ${ONNX_PROTO_HDRS}) | ||
target_include_directories(onnx2ncnn | ||
PRIVATE | ||
${PROTOBUF_INCLUDE_DIR} | ||
${CMAKE_CURRENT_BINARY_DIR}) | ||
target_link_libraries(onnx2ncnn PRIVATE ${PROTOBUF_LIBRARIES}) | ||
protobuf_generate_cpp(ONNX_PROTO_SRCS ONNX_PROTO_HDRS | ||
${NCNN_DIR}/tools/onnx/onnx.proto) | ||
add_executable(onnx2ncnn onnx2ncnn.cpp ${ONNX_PROTO_SRCS} ${ONNX_PROTO_HDRS}) | ||
target_include_directories(onnx2ncnn PRIVATE ${PROTOBUF_INCLUDE_DIR} | ||
${CMAKE_CURRENT_BINARY_DIR}) | ||
target_link_libraries(onnx2ncnn PRIVATE ${PROTOBUF_LIBRARIES}) | ||
|
||
else() | ||
message(FATAL_ERROR "Protobuf not found, onnx model convert tool won't be built") | ||
message( | ||
FATAL_ERROR "Protobuf not found, onnx model convert tool won't be built") | ||
endif() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,18 +1,5 @@ | ||
# add plugin source | ||
set(PLUGIN_LISTS constantofshape | ||
expand | ||
gather | ||
shape | ||
tensorslice | ||
topk) | ||
|
||
foreach(PLUGIN_ITER ${PLUGIN_LISTS}) | ||
file(GLOB PLUGIN_OPS_SRCS ${PLUGIN_ITER}/*.cpp) | ||
file(GLOB PLUGIN_OPS_HEADS ${PLUGIN_ITER}/*.h) | ||
set(BACKEND_OPS_SRCS ${BACKEND_OPS_SRCS} ${PLUGIN_OPS_SRCS} ${PLUGIN_OPS_HEADS}) | ||
endforeach(PLUGIN_ITER) | ||
|
||
set(BACKEND_OPS_SRCS ${BACKEND_OPS_SRCS} ncnn_ops_register.cpp) | ||
file(GLOB_RECURSE BACKEND_OPS_SRCS *.cpp) | ||
|
||
add_library(${SHARED_TARGET} SHARED ${BACKEND_OPS_SRCS}) | ||
target_link_libraries(${SHARED_TARGET} ncnn) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,72 +1,112 @@ | ||
set(TARGET_NAME mmlab_tensorrt_ops) | ||
set(SHARED_TARGET ${TARGET_NAME}) | ||
set(STATIC_TARGET ${TARGET_NAME}_static) | ||
|
||
# to suppress 'CMAKE_CUDA_ARCHITECTURES' warning when cmake version is >= 18.0 | ||
cmake_policy(SET CMP0104 OLD) | ||
# cuda | ||
FIND_PACKAGE(CUDA REQUIRED) | ||
INCLUDE_DIRECTORIES(/usr/local/cuda/include) | ||
find_package(CUDA REQUIRED) | ||
include_directories(${CUDA_INCLUDE_DIRS}) | ||
enable_language(CUDA) | ||
|
||
set(CMAKE_POSITION_INDEPENDENT_CODE ON) | ||
|
||
if(MSVC) | ||
set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc.exe) | ||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=/wd4819,/wd4828") | ||
if(HAVE_CXX_FLAG_UTF_8) | ||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=/utf-8") | ||
endif() | ||
else() | ||
set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc) | ||
# Explicitly set the cuda host compiler. Because the default host compiler # | ||
# selected by cmake maybe wrong. | ||
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER}) | ||
set(CUDA_NVCC_FLAGS | ||
"${CUDA_NVCC_FLAGS} -Xcompiler=-fPIC,-Wall,-fvisibility=hidden") | ||
endif() | ||
|
||
# set virtual compute architecture and real ones | ||
set(_NVCC_FLAGS) | ||
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_52,code=sm_52") | ||
if(CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "8") | ||
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_60,code=sm_60") | ||
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_61,code=sm_61") | ||
endif() | ||
if(CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "9") | ||
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_70,code=sm_70") | ||
endif() | ||
if(CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "10") | ||
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_72,code=sm_72") | ||
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75") | ||
endif() | ||
if(CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "11") | ||
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80") | ||
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_86,code=sm_86") | ||
endif() | ||
|
||
set(CUDA_NVCC_FLAGS_DEBUG "-g -O0") | ||
set(CUDA_NVCC_FLAGS_RELEASE "-O3") | ||
set(CUDA_NVCC_FLAGS "-std=c++11 ${CUDA_NVCC_FLAGS}") | ||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${CUDA_NVCC_FLAGS} ${_NVCC_FLAGS}") | ||
|
||
# tensorrt | ||
find_path(TENSORRT_INCLUDE_DIR NvInfer.h | ||
HINTS ${TENSORRT_DIR} ${CUDA_TOOLKIT_ROOT_DIR} | ||
PATH_SUFFIXES include) | ||
if (TENSORRT_INCLUDE_DIR) | ||
MESSAGE(STATUS " Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}") | ||
find_path( | ||
TENSORRT_INCLUDE_DIR NvInfer.h | ||
HINTS ${TENSORRT_DIR} ${CUDA_TOOLKIT_ROOT_DIR} | ||
PATH_SUFFIXES include) | ||
if(TENSORRT_INCLUDE_DIR) | ||
message(STATUS " Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}") | ||
else() | ||
MESSAGE(ERROR " Cannot found TensorRT headers") | ||
message(ERROR " Cannot found TensorRT headers") | ||
endif() | ||
|
||
find_library(TENSORRT_LIBRARY_INFER nvinfer | ||
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR} | ||
PATH_SUFFIXES lib lib64 lib/x64) | ||
find_library(TENSORRT_LIBRARY_PARSERS nvparsers | ||
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR} | ||
PATH_SUFFIXES lib lib64 lib/x64) | ||
find_library(TENSORRT_LIBRARY_INFER_PLUGIN nvinfer_plugin | ||
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR} | ||
PATH_SUFFIXES lib lib64 lib/x64) | ||
find_library( | ||
TENSORRT_LIBRARY_INFER nvinfer | ||
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR} | ||
PATH_SUFFIXES lib lib64 lib/x64) | ||
find_library( | ||
TENSORRT_LIBRARY_INFER_PLUGIN nvinfer_plugin | ||
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR} | ||
PATH_SUFFIXES lib lib64 lib/x64) | ||
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} | ||
${TENSORRT_LIBRARY_PARSERS} | ||
${TENSORRT_LIBRARY_INFER_PLUGIN} | ||
) | ||
if (TENSORRT_LIBRARY_INFER AND TENSORRT_LIBRARY_PARSERS AND TENSORRT_LIBRARY_INFER_PLUGIN) | ||
MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}") | ||
${TENSORRT_LIBRARY_INFER_PLUGIN}) | ||
if(TENSORRT_LIBRARY_INFER | ||
AND TENSORRT_LIBRARY_INFER_PLUGIN) | ||
message(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}") | ||
else() | ||
MESSAGE(ERROR " Cannot found TensorRT libs") | ||
message(FATAL_ERROR " Cannot found TensorRT libs") | ||
endif() | ||
find_package_handle_standard_args( | ||
TENSORRT DEFAULT_MSG TENSORRT_INCLUDE_DIR TENSORRT_LIBRARY) | ||
find_package_handle_standard_args(TENSORRT DEFAULT_MSG TENSORRT_INCLUDE_DIR | ||
TENSORRT_LIBRARY) | ||
if(NOT TENSORRT_FOUND) | ||
message(ERROR " Cannot find TensorRT library.") | ||
message(ERROR " Cannot find TensorRT library.") | ||
endif() | ||
INCLUDE_DIRECTORIES(${TENSORRT_INCLUDE_DIR}) | ||
include_directories(${TENSORRT_INCLUDE_DIR}) | ||
|
||
# cudnn | ||
include_directories(${CUDNN_DIR}/include) | ||
link_directories(${CUDNN_DIR}/lib64) | ||
|
||
# cub | ||
if (NOT DEFINED CUB_ROOT_DIR) | ||
add_definitions(-DTHRUST_IGNORE_DEPRECATED_CPP_DIALECT) | ||
if(NOT DEFINED CUB_ROOT_DIR) | ||
set(CUB_ROOT_DIR "${PROJECT_SOURCE_DIR}/third_party/cub") | ||
endif() | ||
INCLUDE_DIRECTORIES(${CUB_ROOT_DIR}) | ||
include_directories(${CUB_ROOT_DIR}) | ||
|
||
# add plugin source | ||
set(PLUGIN_LISTS scatternd | ||
nms | ||
roi_align | ||
batched_nms | ||
instance_norm | ||
modulated_deform_conv | ||
multi_level_roi_align | ||
grid_sampler) | ||
# include_directories(${CMAKE_CURRENT_SOURCE_DIR}/common) | ||
file(GLOB_RECURSE BACKEND_OPS_SRCS *.cpp *.cu) | ||
|
||
foreach(PLUGIN_ITER ${PLUGIN_LISTS}) | ||
file(GLOB PLUGIN_OPS_SRCS ${PLUGIN_ITER}/*.cpp ${PLUGIN_ITER}/*.cu) | ||
file(GLOB PLUGIN_OPS_HEADS ${PLUGIN_ITER}/*.h ${PLUGIN_ITER}/*.hpp ${PLUGIN_ITER}/*.cuh) | ||
set(BACKEND_OPS_SRCS ${BACKEND_OPS_SRCS} ${PLUGIN_OPS_SRCS} ${PLUGIN_OPS_HEADS}) | ||
endforeach(PLUGIN_ITER) | ||
|
||
list(APPEND BACKEND_OPS_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/common_impl/trt_cuda_helper.cu") | ||
|
||
set(INFER_PLUGIN_LIB ${TENSORRT_LIBRARY} cublas cudnn) | ||
set(INFER_PLUGIN_LIB ${TENSORRT_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} cudnn) | ||
|
||
cuda_add_library(${SHARED_TARGET} MODULE ${BACKEND_OPS_SRCS}) | ||
target_link_libraries(${SHARED_TARGET} ${INFER_PLUGIN_LIB}) | ||
target_include_directories(${SHARED_TARGET} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common) | ||
target_include_directories(${SHARED_TARGET} | ||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common) | ||
|
||
cuda_add_library(${STATIC_TARGET} STATIC ${BACKEND_OPS_SRCS}) | ||
target_link_libraries(${STATIC_TARGET} ${INFER_PLUGIN_LIB}) | ||
target_include_directories(${STATIC_TARGET} | ||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common) |