Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Inference] Add config.Summary api #34122

Merged
merged 3 commits into from
Jul 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/inference/api/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ if(WITH_MKLDNN)
set(mkldnn_quantizer_cfg ${mkldnn_quantizer_cfg} PARENT_SCOPE)
endif()

cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder)
cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder table_printer)
cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc)

if(WITH_CRYPTO)
Expand Down
97 changes: 97 additions & 0 deletions paddle/fluid/inference/api/analysis_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <string>
#include "paddle/fluid/inference/api/paddle_analysis_config.h"
#include "paddle/fluid/inference/api/paddle_pass_builder.h"
#include "paddle/fluid/inference/utils/table_printer.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
Expand Down Expand Up @@ -666,4 +668,99 @@ void AnalysisConfig::PartiallyRelease() {

void AnalysisConfig::EnableGpuMultiStream() { thread_local_stream_ = true; }

std::string AnalysisConfig::Summary() {
const std::vector<std::string> header{"Option", "Value"};
paddle::inference::TablePrinter os(header);

if (!model_dir_.empty()) {
os.InsertRow({"model_dir", model_dir_});
}
if (!(prog_file_.empty() && params_file_.empty())) {
os.InsertRow({"model_file", prog_file_});
os.InsertRow({"params_file", params_file_});
}
if (model_from_memory_) {
os.InsertRow({"model_from_memory", params_file_});
}
os.InsetDivider();

// cpu info
os.InsertRow(
{"cpu_math_thread", std::to_string(cpu_math_library_num_threads_)});
os.InsertRow({"enable_mkdlnn", use_mkldnn_ ? "true" : "false"});
os.InsertRow(
{"mkldnn_cache_capacity", std::to_string(mkldnn_cache_capacity_)});
os.InsetDivider();

auto Precision2String =
[](paddle::AnalysisConfig::Precision prec) -> std::string {
if (prec == Precision::kFloat32)
return "fp32";
else if (prec == Precision::kHalf)
return "fp16";
else if (prec == Precision::kInt8)
return "int8";
else
return "None";
};
// gpu info
os.InsertRow({"use_gpu", use_gpu_ ? "true" : "false"});
if (use_gpu_) {
os.InsertRow({"gpu_device_id", std::to_string(gpu_device_id_)});
os.InsertRow({"memory_pool_init_size",
std::to_string(memory_pool_init_size_mb_) + "MB"});
os.InsertRow(
{"thread_local_stream", thread_local_stream_ ? "true" : "false"});

os.InsertRow({"use_tensorrt", use_tensorrt_ ? "true" : "false"});
if (use_tensorrt_) {
os.InsertRow({"tensorrt_precision_mode",
Precision2String(tensorrt_precision_mode_)});
os.InsertRow({"tensorrt_workspace_size",
std::to_string(tensorrt_workspace_size_)});
os.InsertRow(
{"tensorrt_max_batch_size", std::to_string(tensorrt_max_batchsize_)});
os.InsertRow({"tensorrt_min_subgraph_size",
std::to_string(tensorrt_min_subgraph_size_)});
os.InsertRow({"tensorrt_use_static_engine",
trt_use_static_engine_ ? "true" : "false"});
os.InsertRow(
{"tensorrt_use_calib_mode", trt_use_calib_mode_ ? "true" : "false"});

// dynamic_shape
os.InsertRow({"tensorrt_enable_dynamic_shape",
min_input_shape_.empty() ? "false" : "true"});

os.InsertRow({"tensorrt_use_oss", trt_use_oss_ ? "true" : "false"});
os.InsertRow({"tensorrt_use_dla", trt_use_dla_ ? "true" : "false"});
if (trt_use_dla_) {
os.InsertRow({"tensorrt_dla_core", std::to_string(trt_dla_core_)});
}
}
}
os.InsetDivider();

// xpu info
os.InsertRow({"use_xpu", use_xpu_ ? "true" : "false"});
if (use_xpu_) {
os.InsertRow({"xpu_device_id", std::to_string(xpu_device_id_)});
os.InsertRow(
{"xpu_l3_workspace_size", std::to_string(xpu_l3_workspace_size_)});
}
os.InsetDivider();

if (use_lite_) {
os.InsertRow({"use_lite", use_lite_ ? "true" : "false"});
}

// ir info
os.InsertRow({"ir_optim", enable_ir_optim_ ? "true" : "false"});
os.InsertRow({"ir_debug", ir_debug_ ? "true" : "false"});
os.InsertRow({"memory_optim", enable_memory_optim_ ? "true" : "false"});
os.InsertRow({"enable_profile", with_profile_ ? "true" : "false"});
os.InsertRow({"enable_log", with_glog_info_ ? "true" : "false"});

return os.PrintTable();
}

} // namespace paddle
4 changes: 4 additions & 0 deletions paddle/fluid/inference/api/analysis_predictor_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ TEST(AnalysisPredictor, analysis_off) {
AnalysisConfig config;
config.SetModel(FLAGS_dirname);
config.SwitchIrOptim(false);
LOG(INFO) << config.Summary();

auto _predictor = CreatePaddlePredictor<AnalysisConfig>(config);
auto* predictor = static_cast<AnalysisPredictor*>(_predictor.get());
Expand Down Expand Up @@ -68,6 +69,7 @@ TEST(AnalysisPredictor, analysis_on) {
#else
config.DisableGpu();
#endif
LOG(INFO) << config.Summary();

auto _predictor = CreatePaddlePredictor<AnalysisConfig>(config);
auto* predictor = static_cast<AnalysisPredictor*>(_predictor.get());
Expand Down Expand Up @@ -104,6 +106,7 @@ TEST(AnalysisPredictor, ZeroCopy) {
AnalysisConfig config;
config.SetModel(FLAGS_dirname);
config.SwitchUseFeedFetchOps(false);
LOG(INFO) << config.Summary();
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);

auto w0 = predictor->GetInputTensor("firstw");
Expand Down Expand Up @@ -144,6 +147,7 @@ TEST(AnalysisPredictor, Clone) {
config.SetModel(FLAGS_dirname);
config.SwitchUseFeedFetchOps(true);
config.SwitchIrOptim(true);
LOG(INFO) << config.Summary();

std::vector<std::unique_ptr<PaddlePredictor>> predictors;
predictors.emplace_back(CreatePaddlePredictor(config));
Expand Down
5 changes: 5 additions & 0 deletions paddle/fluid/inference/api/paddle_analysis_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -604,6 +604,11 @@ struct PD_INFER_DECL AnalysisConfig {
void EnableGpuMultiStream();
void PartiallyRelease();

///
/// \brief Print the summary of config.
///
std::string Summary();

protected:
// Update the config.
void Update();
Expand Down
7 changes: 7 additions & 0 deletions paddle/fluid/inference/capi_exp/pd_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -403,5 +403,12 @@ __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses(
std::vector<std::string> passes = config->pass_builder()->AllPasses();
return paddle_infer::CvtVecToOneDimArrayCstr(passes);
}
const char* PD_ConfigSummary(__pd_keep PD_Config* pd_config) {
CHECK_AND_CONVERT_PD_CONFIG;
auto sum_str = config->Summary();
char* c = reinterpret_cast<char*>(malloc(sum_str.length() + 1));
snprintf(c, sum_str.length() + 1, "%s", sum_str.c_str());
return c;
}

} // extern "C"
8 changes: 8 additions & 0 deletions paddle/fluid/inference/capi_exp/pd_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -612,6 +612,14 @@ PADDLE_CAPI_EXPORT extern void PD_ConfigAppendPass(
///
PADDLE_CAPI_EXPORT extern __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses(
__pd_keep PD_Config* pd_config);
///
/// \brief Get information of config.
/// Attention, Please release the string manually.
///
/// \return Return config info.
///
PADDLE_CAPI_EXPORT extern const char* PD_ConfigSummary(
__pd_keep PD_Config* pd_config);

#ifdef __cplusplus
} // extern "C"
Expand Down
12 changes: 12 additions & 0 deletions paddle/fluid/inference/goapi/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -733,3 +733,15 @@ func (config *Config) AllPasses() []string {
C.PD_OneDimArrayCstrDestroy(cPasses)
return passes
}

///
/// \brief Get information of config.
///
/// \return Return config info.
///
func (config *Config) Summary() string {
cSummary := C.PD_ConfigSummary(config.c)
summary := C.GoString(cSummary)
C.free(unsafe.Pointer(cSummary))
return summary
}
2 changes: 2 additions & 0 deletions paddle/fluid/inference/goapi/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ func TestNewConfig(t *testing.T) {

config.DeletePass("test_pass")
t.Logf("After DeletePass, AllPasses:%+v", config.AllPasses())

t.Log(config.Summary())
}

func TestLite(t *testing.T) {
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/inference/utils/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,5 @@ cc_library(benchmark SRCS benchmark.cc DEPS enforce)
cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark)
cc_library(infer_io_utils SRCS io_utils.cc DEPS paddle_inference_api lod_tensor)
cc_test(infer_io_utils_tester SRCS io_utils_tester.cc DEPS infer_io_utils)
cc_library(table_printer SRCS table_printer.cc)
cc_test(test_table_printer SRCS table_printer_tester.cc DEPS table_printer)
Loading