diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 4e2b9fbe28597..a66ce609ba896 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -278,15 +278,8 @@ bool AnalysisPredictor::CreateExecutor() { #endif } else if (config_.NNAdapter().use_nnadapter) { if (config_.lite_engine_enabled()) { -#ifdef LITE_SUBGRAPH_WITH_NNADAPTER - // Currently, Paddle-Lite's NNAdapter user interface only supports the - // transfer - // of Host data pointers. If it is currently used as a subgraph, execution - // efficiency will be sacrificed, so it is temporarily set to cpu place. - // And, the current lite engine of xpu must execute all parts of the - // model. place_ = paddle::platform::CPUPlace(); -#else +#ifndef LITE_SUBGRAPH_WITH_NNADAPTER PADDLE_THROW( platform::errors::Unavailable("You tried to use an NNAdapter lite " "engine, but Paddle was not compiled " diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 513f3669a19ce..6ed67e6c79259 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -61,6 +61,24 @@ TEST(AnalysisPredictor, analysis_off) { ASSERT_TRUE(predictor->Run(inputs, &outputs)); } +TEST(AnalysisPredictor, lite_nn_adapter_npu) { + AnalysisConfig config; + config.SetModel(FLAGS_dirname); + config.EnableLiteEngine(); + config.NNAdapter() + .Disable() + .Enable() + .SetDeviceNames({"huawei_ascend_npu"}) + .SetContextProperties("HUAWEI_ASCEND_NPU_SELECTED_DEVICE_IDS=0") + .SetModelCacheDir("cache_dirr") + .SetSubgraphPartitionConfigPath("") + .SetModelCacheBuffers("c1", {'c'}); +#ifndef LITE_SUBGRAPH_WITH_NNADAPTER + EXPECT_THROW(CreatePaddlePredictor(config), + paddle::platform::EnforceNotMet); +#endif +} + TEST(AnalysisPredictor, analysis_on) { AnalysisConfig config; config.SetModel(FLAGS_dirname);