Skip to content

Commit

Permalink
Fix linking to torch deploy and run server (pytorch#227)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch/torchrec#227

Properly link to torch deploy and run server

todo:
- update installation readme with instructions from following quip doc: https://fb.quip.com/35YWAWdgW6aW

Reviewed By: zyan0

Differential Revision: D35697855

fbshipit-source-id: 794d43f572cb7ba78eb4686deb8d792a2ea6f522
  • Loading branch information
s4ayub authored and facebook-github-bot committed Apr 20, 2022
1 parent 6417bef commit ba62d19
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 13 deletions.
48 changes: 36 additions & 12 deletions torchrec/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,49 @@ if(NOT DEFINED TORCH_DEPLOY_LIB_PATH)
)
endif()

set(CMAKE_C_FLAGS "-Wl,-export-dynamic")
set(CMAKE_EXE_LINKER_FLAGS "-Wl,-export-dynamic")
# abi and other flags

if(DEFINED GLIBCXX_USE_CXX11_ABI)
if(${GLIBCXX_USE_CXX11_ABI} EQUAL 1)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=1")
set(TORCH_CXX_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=1")
endif()
endif()

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")

# dependencies

find_package(Torch REQUIRED)
find_package(folly REQUIRED)
find_package(gflags REQUIRED)

include_directories(${Torch_INCLUDE_DIRS})
include_directories(${folly_INCLUDE_DIRS})
include_directories(${PYTORCH_FMT_INCLUDE_PATH})

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall ${TORCH_CXX_FLAGS}")
set(CMAKE_CXX_STANDARD 17)

# torch deploy library

add_library(torch_deploy_internal STATIC
{$DEPLOY_INTERPRETER_PATH}/libtorch_deployinterpreter.o
{$DEPLOY_SRC_PATH}/deploy.cpp
{$DEPLOY_SRC_PATH}/deploy/loader.cpp
{$DEPLOY_SRC_PATH}/path_environment.cpp
{$DEPLOY_SRC_PATH}/elf_file.cpp)

# For python builtins. caffe2_interface_library properly
# makes use of the --whole-archive option.
target_link_libraries(torch_deploy_internal PRIVATE
crypt pthread dl util m z ffi lzma readline nsl ncursesw panelw
)
target_link_libraries(torch_deploy_internal
PUBLIC shm torch ${PYTORCH_LIB_FMT}
)
caffe2_interface_library(torch_deploy_internal torch_deploy)

# inference library

# for our own header files
Expand All @@ -46,13 +75,9 @@ add_library(inference SHARED
src/ResultSplit.cpp
)

# whole archive is needed to link against the static library
target_link_libraries(inference "-Wl,--whole-archive" ${TORCH_DEPLOY_LIB_PATH})
target_link_libraries(
inference
"-Wl,--no-whole-archive"
"${TORCH_LIBRARIES}"
${FOLLY_LIBRARIES}
# -rdynamic is needed to link against the static library
target_link_libraries(inference "-Wl,--no-as-needed -rdynamic"
dl torch_deploy "${TORCH_LIBRARIES}" ${FOLLY_LIBRARIES}
)

# for generated protobuf
Expand All @@ -79,8 +104,7 @@ target_link_libraries(pred_grpc_proto
# server

add_executable(server server.cpp)
target_link_libraries(inference "-Wl,--whole-archive" ${TORCH_DEPLOY_LIB_PATH})
target_link_libraries(server "-Wl,--no-whole-archive"
target_link_libraries(server
inference
pred_grpc_proto
"${TORCH_LIBRARIES}"
Expand Down
12 changes: 11 additions & 1 deletion torchrec/inference/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <folly/futures/Future.h>
#include <folly/io/IOBuf.h>
#include <torch/csrc/deploy/deploy.h>
#include <torch/csrc/deploy/path_environment.h>
#include <torch/torch.h>

#include <glog/logging.h>
Expand Down Expand Up @@ -43,6 +44,11 @@ DEFINE_int32(max_batch_size, 2048, "");
DEFINE_string(server_address, "0.0.0.0", "");
DEFINE_string(server_port, "50051", "");

DEFINE_string(
python_packages_path,
"",
"Used to load the packages that you 'extern' with torch.package");

namespace {

std::unique_ptr<torchrec::PredictionRequest> toTorchRecRequest(
Expand Down Expand Up @@ -201,8 +207,12 @@ int main(int argc, char* argv[]) {
std::vector<torchrec::BatchQueueCb> batchQueueCbs;
std::unordered_map<std::string, std::string> batchingMetadataMap;

std::shared_ptr<torch::deploy::Environment> env =
std::make_shared<torch::deploy::PathEnvironment>(
FLAGS_python_packages_path);

auto manager = std::make_shared<torch::deploy::InterpreterManager>(
FLAGS_n_gpu * FLAGS_n_interp_per_gpu);
FLAGS_n_gpu * FLAGS_n_interp_per_gpu, env);
{
torch::deploy::Package package = manager->loadPackage(FLAGS_package_path);
auto I = package.acquireSession();
Expand Down

0 comments on commit ba62d19

Please sign in to comment.