Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cherry-pick #36653

Merged
merged 2 commits into from
Oct 25, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cmake/external/lite.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
set(LITE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/lite)

if(NOT LITE_GIT_TAG)
set(LITE_GIT_TAG 4ab64daecc11fbf74fffdc6a4733f388472e7d5d)
set(LITE_GIT_TAG 62fc737d4a553bca738f96b0402b28f26a8d2d4f)
endif()

if(NOT CUDA_ARCH_NAME)
Expand Down Expand Up @@ -197,9 +197,9 @@ set(LITE_SHARED_LIB ${LITE_BINARY_DIR}/${LITE_OUTPUT_BIN_DIR}/cxx/lib/libpaddle_
if (LITE_WITH_NNADAPTER)
set(LITE_NNADAPTER_LIB ${LITE_BINARY_DIR}/${LITE_OUTPUT_BIN_DIR}/cxx/lib/libnnadapter.so)
if (NNADAPTER_WITH_HUAWEI_ASCEND_NPU)
external_lite_libs(lite_nnadapter ${LITE_BINARY_DIR}/${LITE_OUTPUT_BIN_DIR}/cxx/lib/libnnadapter.so ${LITE_BINARY_DIR}/${LITE_OUTPUT_BIN_DIR}/cxx/lib/libnnadapter_driver_huawei_ascend_npu.so)
external_lite_libs(lite_nnadapter ${LITE_BINARY_DIR}/${LITE_OUTPUT_BIN_DIR}/cxx/lib/libnnadapter.so ${LITE_BINARY_DIR}/${LITE_OUTPUT_BIN_DIR}/cxx/lib/libhuawei_ascend_npu.so)
set(LITE_DEPS lite_full_static lite_nnadapter)
set(LITE_NNADAPTER_NPU_LIB ${LITE_BINARY_DIR}/${LITE_OUTPUT_BIN_DIR}/cxx/lib/libnnadapter_driver_huawei_ascend_npu.so)
set(LITE_NNADAPTER_NPU_LIB ${LITE_BINARY_DIR}/${LITE_OUTPUT_BIN_DIR}/cxx/lib/libhuawei_ascend_npu.so)
endif()
else()
set(LITE_DEPS lite_full_static)
Expand Down
7 changes: 6 additions & 1 deletion paddle/fluid/framework/ir/fc_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,12 @@ FCFusePass::FCFusePass() {
.IsTensor()
.End()
.AddAttr("axis")
.IsNumGE(1)
.IsNumMatch<int>([](int axis) -> bool {
if (axis == -1 || axis >= 1) {
return true;
}
return false;
})
.End();

AddOpCompat(OpCompat("relu"))
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/inference/analysis/argument.h
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ struct Argument {
DECL_ARGUMENT_FIELD(xpu_autotune_file, XpuAutotuneFile, std::string);
DECL_ARGUMENT_FIELD(xpu_precision, XpuPrecision, std::string);
DECL_ARGUMENT_FIELD(xpu_adaptive_seqlen, XpuAdaptiveSeqlen, bool);
DECL_ARGUMENT_FIELD(xpu_device_id, XpuDeviceId, int);

DECL_ARGUMENT_FIELD(use_nnadapter, UseNNAdapter, bool);
DECL_ARGUMENT_FIELD(nnadapter_model_cache_dir, NNAdapterModelCacheDir,
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/inference/analysis/ir_pass_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@ void IRPassManager::CreatePasses(Argument *argument,
new std::string(argument->xpu_autotune_file()));
pass->Set("precision", new std::string(argument->xpu_precision()));
pass->Set("adaptive_seqlen", new bool(argument->xpu_adaptive_seqlen()));
pass->Set("xpu_device_id", new int(argument->xpu_device_id()));
// NNAdapter Related
pass->Set("use_nnadapter", new bool(argument->use_nnadapter()));
pass->Set("nnadapter_model_cache_dir",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,7 @@ void LiteSubgraphPass::SetUpEngine(
bool use_gpu = Get<bool>("use_gpu");
bool enable_int8 = Get<bool>("enable_int8");
bool use_xpu = Get<bool>("use_xpu");
int xpu_device_id = Get<int>("xpu_device_id");
int xpu_l3_workspace_size = Get<int>("xpu_l3_workspace_size");
int cpu_math_library_num_threads = Get<int>("cpu_math_library_num_threads");
bool locked = Get<bool>("locked");
Expand Down Expand Up @@ -305,6 +306,7 @@ void LiteSubgraphPass::SetUpEngine(
};
config.cpu_math_library_num_threads = cpu_math_library_num_threads;
config.xpu_l3_workspace_size = xpu_l3_workspace_size;
config.device_id = xpu_device_id;
config.locked = locked;
config.autotune = autotune;
config.autotune_file = autotune_file;
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/inference/api/analysis_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -619,6 +619,7 @@ void AnalysisPredictor::PrepareArgument() {
argument_.SetXpuAutotuneFile(config_.xpu_autotune_file_);
argument_.SetXpuPrecision(config_.xpu_precision_);
argument_.SetXpuAdaptiveSeqlen(config_.xpu_adaptive_seqlen_);
argument_.SetXpuDeviceId(config_.xpu_device_id_);
// NNAdapter related
argument_.SetUseNNAdapter(config_.NNAdapter().use_nnadapter);
argument_.SetNNAdapterDeviceNames(
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/inference/lite/engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ paddle::lite_api::PaddlePredictor* EngineManager::Create(
lite_cxx_config.set_xpu_conv_autotune(cfg.autotune, cfg.autotune_file);
lite_cxx_config.set_xpu_multi_encoder_method(cfg.precision,
cfg.adaptive_seqlen);
lite_cxx_config.set_xpu_dev_per_thread(cfg.device_id);
#endif

#ifdef LITE_SUBGRAPH_WITH_NPU
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/inference/lite/engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@ struct EngineConfig {
std::vector<std::string> neglected_passes;
lite_api::LiteModelType model_type{lite_api::LiteModelType::kProtobuf};
bool model_from_memory{true};
// TODO(wilber): now only works for xpu, lite gpu can support device_id or
// not?
int device_id = 0;

// for xpu
size_t xpu_l3_workspace_size;
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/inference/utils/io_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,9 @@ void SerializeShapeRangeInfo(
void DeserializeShapeRangeInfo(
const std::string &path, paddle::inference::proto::ShapeRangeInfos *info) {
int fd = open(path.c_str(), O_RDONLY);
if (fd == -1) {
PADDLE_THROW(platform::errors::NotFound("File [%s] is not found.", path));
}
google::protobuf::io::FileInputStream *is =
new google::protobuf::io::FileInputStream(fd);
google::protobuf::TextFormat::Parse(is, info);
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/inference/utils/io_utils_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,4 +118,7 @@ TEST(shape_info_io, read_and_write) {
std::vector<std::string> names{"test1"};
paddle::inference::UpdateShapeRangeInfo(path, min_shape, max_shape, opt_shape,
names);
ASSERT_THROW(paddle::inference::DeserializeShapeRangeInfo(
"no_exists_file", &min_shape, &max_shape, &opt_shape);
, paddle::platform::EnforceNotMet);
}
4 changes: 2 additions & 2 deletions paddle/scripts/paddle_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ function cmake_base() {
-DWITH_GLOO=${gloo_flag}
-DWITH_LITE=${WITH_LITE:-OFF}
-DWITH_XPU=${WITH_XPU:-OFF}
-DLITE_GIT_TAG=_release/v2.10
-DLITE_GIT_TAG=release/v2.10
-DWITH_UNITY_BUILD=${WITH_UNITY_BUILD:-OFF}
-DWITH_XPU_BKCL=${WITH_XPU_BKCL:-OFF}
-DWITH_ARM=${WITH_ARM:-OFF}
Expand Down Expand Up @@ -266,7 +266,7 @@ EOF
-DWITH_PSCORE=${distibuted_flag} \
-DWITH_PSLIB=${WITH_PSLIB:-OFF} \
-DWITH_GLOO=${gloo_flag} \
-DLITE_GIT_TAG=_release/v2.10 \
-DLITE_GIT_TAG=release/v2.10 \
-DWITH_XPU=${WITH_XPU:-OFF} \
-DXPU_SDK_ROOT=${XPU_SDK_ROOT:-""} \
-DWITH_LITE=${WITH_LITE:-OFF} \
Expand Down
4 changes: 2 additions & 2 deletions tools/check_api_approvals.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@ function add_failed(){
api_params_diff=`python ${PADDLE_ROOT}/tools/check_api_compatible.py ${PADDLE_ROOT}/paddle/fluid/API_DEV.spec ${PADDLE_ROOT}/paddle/fluid/API_PR.spec`
api_spec_diff=`python ${PADDLE_ROOT}/tools/diff_api.py ${PADDLE_ROOT}/paddle/fluid/API_DEV.spec.api ${PADDLE_ROOT}/paddle/fluid/API_PR.spec.api`
if [ "$api_spec_diff" != "" -o "${api_params_diff}" != "" ]; then
echo_line="You must have one RD (XiaoguangHu01 or lanxianghit) approval for API change.\n"
echo_line="You must have one RD (XiaoguangHu01, lanxianghit or Superjomn) approval for API change.\n"
echo_line="${echo_line} and one TPM approval for API change: \n"
echo_line="${echo_line} jzhang533/ZhangJun, dingjiaweiww/DingJiaWei, Heeenrrry/LiKunLun, TCChenlong/ChenLong for general APIs\n"
echo_line="${echo_line} PangHua/XiangHui for distributed related APIs\n"
echo_line="${echo_line} twismon/WangYunKai, CheQiXiao/CheQiXiao for inference related APIs.\n"

check_approval 1 46782768 47554610
check_approval 1 46782768 47554610 328693
check_approval 1 29231 23093488 28379894 11935832 2682285 12050047 50894398
fi

Expand Down