@@ -22,13 +22,13 @@ namespace inference {
2222
2323using paddle::PaddleTensor;
2424
25- void profile (bool use_mkldnn = false , bool use_bfloat16 = false );
25+ void profile (bool use_onednn = false , bool use_bfloat16 = false );
2626std::vector<std::vector<paddle::PaddleTensor>> LoadInputData ();
27- void CompareNativeAndAnalysisWrapper (bool use_mkldnn = false );
27+ void CompareNativeAndAnalysisWrapper (bool use_onednn = false );
2828std::vector<paddle::PaddleTensor> ParseInputStreamToVector (
2929 const std::string &line);
3030
31- AnalysisConfig SetConfig (bool use_mkldnn = false , bool use_bfloat16 = false );
31+ AnalysisConfig SetConfig (bool use_onednn = false , bool use_bfloat16 = false );
3232
3333template <typename T>
3434paddle::PaddleTensor ParseTensor (const std::string &field);
@@ -50,15 +50,15 @@ TEST(Analyzer_bert, profile) {
5050}
5151
5252#ifdef PADDLE_WITH_DNNL
53- TEST (Analyzer_bert, profile_mkldnn ) {
54- auto use_mkldnn = true ;
55- profile (use_mkldnn );
53+ TEST (Analyzer_bert, profile_onednn ) {
54+ auto use_onednn = true ;
55+ profile (use_onednn );
5656}
5757
58- TEST (Analyzer_bert, profile_mkldnn_bf16 ) {
59- auto use_mkldnn = true ;
58+ TEST (Analyzer_bert, profile_onednn_bf16 ) {
59+ auto use_onednn = true ;
6060 auto use_bfloat16 = true ;
61- profile (use_mkldnn , use_bfloat16);
61+ profile (use_onednn , use_bfloat16);
6262}
6363#endif
6464
@@ -70,8 +70,8 @@ TEST(Analyzer_bert, compare) {
7070}
7171#ifdef PADDLE_WITH_DNNL
7272TEST (Analyzer_bert, compare_mkldnn) {
73- auto use_mkldnn = true ;
74- CompareNativeAndAnalysisWrapper (use_mkldnn );
73+ auto use_onednn = true ;
74+ CompareNativeAndAnalysisWrapper (use_onednn );
7575}
7676#endif
7777
@@ -135,8 +135,8 @@ TEST(Analyzer_bert, transfer_scope_cache) {
135135 " The size of data cache is not equal to thread number." ));
136136}
137137
138- void profile (bool use_mkldnn , bool use_bfloat16) {
139- auto config (SetConfig (use_mkldnn , use_bfloat16));
138+ void profile (bool use_onednn , bool use_bfloat16) {
139+ auto config (SetConfig (use_onednn , use_bfloat16));
140140 std::vector<std::vector<PaddleTensor>> outputs;
141141 auto inputs = LoadInputData ();
142142 TestPrediction (reinterpret_cast <const PaddlePredictor::Config *>(&config),
@@ -168,8 +168,8 @@ std::vector<std::vector<paddle::PaddleTensor>> LoadInputData() {
168168 return inputs;
169169}
170170
171- void CompareNativeAndAnalysisWrapper (bool use_mkldnn ) {
172- auto cfg (SetConfig (use_mkldnn ));
171+ void CompareNativeAndAnalysisWrapper (bool use_onednn ) {
172+ auto cfg (SetConfig (use_onednn ));
173173 auto inputs = LoadInputData ();
174174 CompareNativeAndAnalysis (
175175 reinterpret_cast <const PaddlePredictor::Config *>(&cfg), inputs);
@@ -201,12 +201,12 @@ std::vector<paddle::PaddleTensor> ParseInputStreamToVector(
201201 return tensors;
202202}
203203
204- AnalysisConfig SetConfig (bool use_mkldnn , bool use_bfloat16) {
204+ AnalysisConfig SetConfig (bool use_onednn , bool use_bfloat16) {
205205 AnalysisConfig config;
206206 config.SetModel (FLAGS_infer_model);
207207 config.DisableFCPadding ();
208208
209- if (use_mkldnn ) {
209+ if (use_onednn ) {
210210 config.EnableONEDNN ();
211211 }
212212
0 commit comments