diff --git a/paddle/fluid/framework/naive_executor.cc b/paddle/fluid/framework/naive_executor.cc index 651ac23e52fe1..3a85d30386cd5 100644 --- a/paddle/fluid/framework/naive_executor.cc +++ b/paddle/fluid/framework/naive_executor.cc @@ -183,5 +183,6 @@ void NaiveExecutor::ResetTrtOps(int num) { } #endif } + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/inference/analysis/analyzer.cc b/paddle/fluid/inference/analysis/analyzer.cc index 9615100f32ad3..4aadb34d7b354 100644 --- a/paddle/fluid/inference/analysis/analyzer.cc +++ b/paddle/fluid/inference/analysis/analyzer.cc @@ -38,8 +38,7 @@ void Analyzer::RunAnalysis(Argument *argument) { if (!disable_logs) { string::PrettyLogH1("--- Running analysis [%s]", pass); } - if (!argument->enable_analysis_optim() && pass == "ir_analysis_pass") - continue; + if (!argument->enable_ir_optim() && pass == "ir_analysis_pass") continue; auto *ptr = PassRegistry::Global().Retreive(pass); PADDLE_ENFORCE_NOT_NULL(ptr, diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index 1df8d06dd89ca..3f5be92f5a3e6 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -31,7 +31,7 @@ TEST(Analyzer, analysis_without_tensorrt) { Argument argument; argument.SetDisableLogs(false); argument.SetModelDir(FLAGS_inference_model_dir); - argument.SetEnableAnalysisOptim(false); + argument.SetEnableIrOptim(false); argument.SetUseGPU(false); argument.SetAnalysisPasses({"ir_graph_build_pass", "ir_analysis_pass", @@ -44,7 +44,7 @@ TEST(Analyzer, analysis_without_tensorrt) { TEST(Analyzer, analysis_with_tensorrt) { Argument argument; argument.SetDisableLogs(false); - argument.SetEnableAnalysisOptim(false); + argument.SetEnableIrOptim(false); argument.SetTensorRtMaxBatchSize(3); argument.SetTensorRtWorkspaceSize(1 << 20); argument.SetModelDir(FLAGS_inference_model_dir); diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 7451512b92484..8a66fec338100 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -42,8 +42,6 @@ namespace paddle { namespace inference { namespace analysis { -using framework::ir::Graph; - #ifdef PADDLE_WITH_MKLDNN using VarQuantScale = std::unordered_map>; @@ -148,7 +146,7 @@ struct Argument { DECL_ARGUMENT_FIELD(model_params_path, ModelParamsPath, std::string); DECL_ARGUMENT_FIELD(model_from_memory, ModelFromMemory, bool); DECL_ARGUMENT_FIELD(optim_cache_dir, OptimCacheDir, std::string); - DECL_ARGUMENT_FIELD(enable_analysis_optim, EnableAnalysisOptim, bool); + DECL_ARGUMENT_FIELD(enable_ir_optim, EnableIrOptim, bool); // For JITLayer DECL_ARGUMENT_FIELD(skip_load_params, SkipLoadParams, bool); diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h index e8d719ddb659d..e891da8e6d19f 100644 --- a/paddle/fluid/inference/analysis/helper.h +++ b/paddle/fluid/inference/analysis/helper.h @@ -153,25 +153,6 @@ T &GetFromScope(const framework::Scope &scope, const std::string &name) { return *var->GetMutable(); } -static framework::proto::ProgramDesc LoadProgramDesc( - const std::string &model_path) { - std::ifstream fin(model_path, std::ios::in | std::ios::binary); - PADDLE_ENFORCE_EQ( - fin.is_open(), - true, - platform::errors::NotFound( - "Cannot open file %s, please confirm whether the file exists", - model_path)); - fin.seekg(0, std::ios::end); - std::string buffer(fin.tellg(), ' '); - fin.seekg(0, std::ios::beg); - fin.read(&buffer[0], buffer.size()); - fin.close(); - framework::proto::ProgramDesc program_desc; - program_desc.ParseFromString(buffer); - return program_desc; -} - static bool FileExists(const std::string &filepath) { std::ifstream file(filepath); bool exists = file.is_open(); diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index c96d04263f967..eb12479e2616d 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -37,15 +37,6 @@ using string::PrettyLogEndl; using string::Style; IRPassManager::IRPassManager(Argument *argument) { - ARGUMENT_CHECK_FIELD(argument, main_program); - graph_ = std::unique_ptr(new Graph(argument->main_program())); - if (argument->Has("scope")) { - auto *scope_ptr = argument->scope_ptr(); - PADDLE_ENFORCE_NOT_NULL(scope_ptr, - platform::errors::PreconditionNotMet( - "The scope ptr should not be nullptr.")); - graph_->SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr); - } disable_logs_ = argument->disable_logs(); ARGUMENT_CHECK_FIELD(argument, ir_analysis_passes); diff --git a/paddle/fluid/inference/analysis/passes/CMakeLists.txt b/paddle/fluid/inference/analysis/passes/CMakeLists.txt index 126e2500c4890..fa074f962eb3d 100644 --- a/paddle/fluid/inference/analysis/passes/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/passes/CMakeLists.txt @@ -30,17 +30,6 @@ cc_library( inference_op_replace_pass SRCS inference_op_replace_pass.cc DEPS analysis_pass graph_to_program_pass) -if(WITH_TESTING) - cc_library( - ir_graph_clean_pass - SRCS ir_graph_clean_pass.cc - DEPS analysis_pass gtest) -else() - cc_library( - ir_graph_clean_pass - SRCS ir_graph_clean_pass.cc - DEPS analysis_pass) -endif() cc_library( analysis_passes @@ -52,8 +41,7 @@ cc_library( memory_optim_pass convert_to_mixed_precision inference_op_replace_pass - ir_graph_to_program_pass - ir_graph_clean_pass) + ir_graph_to_program_pass) set(analysis_deps ${analysis_deps} analysis_passes subgraph_detector diff --git a/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc b/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc index 789865a52882f..f14d8468a2a96 100644 --- a/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc +++ b/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc @@ -32,8 +32,6 @@ #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/var_desc.h" -#include "paddle/fluid/inference/analysis/argument.h" -#include "paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h" #include "paddle/fluid/inference/io.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/data_type.h" @@ -406,14 +404,21 @@ void ConvertToMixedPrecisionPass::LoadAndPrepare() { main_graph_ = std::unique_ptr( new framework::ir::Graph(*program_desc_)); - // Remove all control var - IrInferCleanGraphPass pass; - Argument arg; - arg.SetMainGraphNotOwned(main_graph_.get()); - pass.Run(&arg); - vars_appear_multi_in_one_block_.resize(program_desc_->Size()); FindVarsInMultiBlock(); + for (size_t i = 0; i < main_graph_->SubGraphsSize(); ++i) { + auto* graph = main_graph_->GetSubGraph(i); + graphes_.push_back(graph); + + for (auto* node : graph->Nodes()) { + if (!node->IsVar()) continue; + if (!name2node_.count(node->Name())) { + name2node_[node->Name()] = node; + } + } + } + + ProcessCircleCases(); } void ConvertToMixedPrecisionPass::FindVarsInMultiBlock() { diff --git a/paddle/fluid/inference/analysis/passes/inference_op_replace_pass.cc b/paddle/fluid/inference/analysis/passes/inference_op_replace_pass.cc index ed45ec3301d1d..126d16933fd82 100644 --- a/paddle/fluid/inference/analysis/passes/inference_op_replace_pass.cc +++ b/paddle/fluid/inference/analysis/passes/inference_op_replace_pass.cc @@ -40,7 +40,7 @@ void InferenceOpReplacePass::RunImpl(Argument* argument) { } std::string InferenceOpReplacePass::repr() const { - return "inference-op-replace-pass"; + return "inference_op_replace_pass"; } } // namespace analysis diff --git a/paddle/fluid/inference/analysis/passes/ir_analysis_pass.cc b/paddle/fluid/inference/analysis/passes/ir_analysis_pass.cc index 53398a69536b9..12b18ac53e368 100644 --- a/paddle/fluid/inference/analysis/passes/ir_analysis_pass.cc +++ b/paddle/fluid/inference/analysis/passes/ir_analysis_pass.cc @@ -105,7 +105,7 @@ void IrAnalysisPass::CollectFusionStatis(Argument* argument) { framework::ir::kFuseStatisAttr)); } -std::string IrAnalysisPass::repr() const { return "ir-analysis-pass"; } +std::string IrAnalysisPass::repr() const { return "ir_analysis_pass"; } } // namespace analysis } // namespace inference diff --git a/paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc b/paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc index e07eaa64615c8..df0ffc534b71c 100644 --- a/paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc +++ b/paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc @@ -64,7 +64,8 @@ void IrGraphBuildPass::RunImpl(Argument *argument) { "set.")); } - auto graph = std::unique_ptr(new Graph(argument->main_program())); + auto graph = std::unique_ptr( + new framework::ir::Graph(argument->main_program())); argument->SetMainGraph(graph.release()); auto *scope_ptr = argument->scope_ptr(); PADDLE_ENFORCE_NOT_NULL(scope_ptr, @@ -125,7 +126,7 @@ std::unique_ptr IrGraphBuildPass::LoadModel( } } -std::string IrGraphBuildPass::repr() const { return "ir-graph-build-pass"; } +std::string IrGraphBuildPass::repr() const { return "ir_graph_build_pass"; } } // namespace analysis } // namespace inference diff --git a/paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.cc b/paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.cc deleted file mode 100644 index 6c18c62563716..0000000000000 --- a/paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.cc +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h" - -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/graph_pattern_detector.h" -#include "paddle/fluid/framework/ir/node.h" - -namespace paddle { -namespace inference { -namespace analysis { - -void IrInferCleanGraphPass::RunImpl(Argument* argument) { - auto& graph = argument->main_graph(); - auto is_valid_node = [](framework::ir::Node* x) { - return x && IsControlDepVar(*x) && x->IsVar() && !x->Var(); - }; - - std::unordered_set invalid_nodes; - int valid_op = 0; - for (auto* node : graph.Nodes()) { - PADDLE_ENFORCE_NOT_NULL(node, - platform::errors::PreconditionNotMet( - "The node should not be nullptr.")); - if (is_valid_node(node)) { - invalid_nodes.insert(node); - } else if (node->IsOp()) { - ++valid_op; - } - } - - GraphSafeRemoveNodes(&graph, invalid_nodes); -} - -} // namespace analysis -} // namespace inference -} // namespace paddle diff --git a/paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h b/paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h deleted file mode 100644 index a4d60e91e8455..0000000000000 --- a/paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include - -#include "paddle/fluid/inference/analysis/analysis_pass.h" - -namespace paddle { -namespace inference { -namespace analysis { - -struct Argument; - -class IrInferCleanGraphPass : public AnalysisPass { - public: - void RunImpl(Argument *argument) override; - - std::string repr() const override { return "ir_graph_clean_pass"; } -}; - -} // namespace analysis -} // namespace inference -} // namespace paddle diff --git a/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.cc b/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.cc index 999fb4ad8d764..3d86f7bf399a9 100644 --- a/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.cc +++ b/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.cc @@ -31,7 +31,7 @@ void IrGraphToProgramPass::RunImpl(Argument *argument) { new int(argument->memory_optim_sort_kind())); } - std::unique_ptr graph(argument->main_graph_ptr()); + std::unique_ptr graph(argument->main_graph_ptr()); // Direct using ProgramDesc desc(argument->main_program()) may cause // incomplete copies of information. diff --git a/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h b/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h index 5b20667d62ab6..8e90eb0e20d57 100644 --- a/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h +++ b/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h @@ -28,7 +28,7 @@ class IrGraphToProgramPass : public AnalysisPass { public: void RunImpl(Argument *argument) override; - std::string repr() const override { return "ir-graph-to-param-pass"; } + std::string repr() const override { return "ir_graph_to_param_pass"; } }; } // namespace analysis diff --git a/paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.cc b/paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.cc index 5ec9ca03fafc3..8961cbb5b6e47 100644 --- a/paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.cc +++ b/paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.cc @@ -169,7 +169,7 @@ void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) { } std::string IrParamsSyncAmongDevicesPass::repr() const { - return "ir-params-sync-among-devices-pass"; + return "ir_params_sync_among_devices_pass"; } } // namespace analysis diff --git a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc index 775b61e9494ee..63aaa7d97967a 100644 --- a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc +++ b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc @@ -295,7 +295,7 @@ void UpdateOpDescsByReuse( } } -std::string MemoryOptimizePass::repr() const { return "memory optimize pass"; } +std::string MemoryOptimizePass::repr() const { return "memory_optimize_pass"; } void MemoryOptimizePass::RunImpl(Argument* argument) { // Memory optimization. diff --git a/paddle/fluid/inference/analysis/passes/passes.cc b/paddle/fluid/inference/analysis/passes/passes.cc index 19aab1a948dd2..cd65757d08f3f 100644 --- a/paddle/fluid/inference/analysis/passes/passes.cc +++ b/paddle/fluid/inference/analysis/passes/passes.cc @@ -18,7 +18,6 @@ #include "paddle/fluid/inference/analysis/passes/inference_op_replace_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_analysis_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h" -#include "paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.h" #include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h" @@ -34,8 +33,6 @@ PassRegistry::PassRegistry() { std::unique_ptr(new IrAnalysisPass)); passes_.emplace("ir_graph_build_pass", std::unique_ptr(new IrGraphBuildPass)); - passes_.emplace("ir_graph_clean_pass", - std::unique_ptr(new IrInferCleanGraphPass)); passes_.emplace("memory_optimize_pass", std::unique_ptr(new MemoryOptimizePass)); passes_.emplace( diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 4c669fdec3032..be68013abd2dd 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -758,13 +758,7 @@ void AnalysisConfig::Update() { ((use_custom_device() ^ pass_builder_->use_custom_device()))) { if (use_gpu()) { pass_builder_.reset(new GpuPassStrategy); - - if (use_tensorrt_) { - // Append after the Affine_channel_conv_fuse pass. - pass_builder()->InsertPass(3, "tensorrt_subgraph_pass"); - } } else if (use_ipu()) { - VLOG(1) << "IpuPassStrategy has been used for new."; pass_builder_.reset(new IpuPassStrategy); } else if (use_xpu()) { PADDLE_ENFORCE_EQ( @@ -964,9 +958,6 @@ void AnalysisConfig::Update() { "but did not have the option -DWITH_CUSTOM_DEVICE compiled.")); #endif } - if (ir_debug_) { - pass_builder()->TurnOnDebug(); - } } std::string AnalysisConfig::SerializeInfoCache() { diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 25848b2f1e6cb..84f4177623590 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1065,7 +1065,7 @@ void AnalysisPredictor::PrepareArgument() { argument_.SetUseGPU(config_.use_gpu()); argument_.SetUseFcPadding(config_.use_fc_padding()); argument_.SetGPUDeviceId(config_.gpu_device_id()); - argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_); + argument_.SetEnableIrOptim(config_.enable_ir_optim_); argument_.SetEnableMemoryOptim(config_.enable_memory_optim()); argument_.SetModelFromMemory(config_.model_from_memory_); // Analyze inference_program @@ -1210,38 +1210,22 @@ void AnalysisPredictor::PrepareArgument() { } #endif - auto passes = config_.pass_builder()->AllPasses(); + auto *pass_builder = config_.pass_builder(); if (model_precision_ != phi::DataType::FLOAT32) { LOG(INFO) << "Model is mixed precision type with " << model_precision_ << ", we will use a new PassStrategy. Note that only the GPU " "backend is supported for now."; - passes.clear(); + pass_builder->ClearPasses(); + const auto &deleted_passes = pass_builder->GetAllDeletedPasses(); if (config_.tensorrt_engine_enabled()) { for (const auto &pass : kTrtLowerPrecisionPasses) { - passes.push_back(pass); + if (deleted_passes.count(pass)) continue; + pass_builder->AppendPass(pass); } } else if (config_.use_gpu()) { for (const auto &pass : kGpuLowerPrecisionPasses) { - passes.push_back(pass); - } - } - - const auto &deleted_passes = config_.pass_builder()->GetAllDeletedPasses(); - for (const auto &it : deleted_passes) { - auto iterator = std::find(passes.begin(), passes.end(), it); - if (iterator != passes.end()) { - passes.erase(iterator); - } - } - - if (config_.ir_debug_) { - auto it = std::begin(passes); - while (it != std::end(passes)) { - if (*it != "graph_viz_pass") { - it = passes.insert(it + 1, "graph_viz_pass"); - } else { - ++it; - } + if (deleted_passes.count(pass)) continue; + pass_builder->AppendPass(pass); } } } @@ -1267,8 +1251,8 @@ void AnalysisPredictor::PrepareArgument() { } } argument_.SetDisableLogs(config_.glog_info_disabled()); - argument_.SetIrAnalysisPasses(passes); - argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses()); + argument_.SetIrAnalysisPasses(pass_builder->AllPasses()); + argument_.SetAnalysisPasses(pass_builder->AnalysisPasses()); argument_.SetScopeNotOwned(scope_.get()); // mixed precison. @@ -2127,7 +2111,9 @@ std::unique_ptr AnalysisPredictor::Clone(void *stream) { } x->predictor_stream_ = stream; x->Init(scope_, inference_program_); +#ifdef PADDLE_WITH_TENSORRT x->executor_->ResetTrtOps(++AnalysisPredictor::clone_num_); +#endif return std::unique_ptr(x); } diff --git a/paddle/fluid/inference/api/mkldnn_quantizer.cc b/paddle/fluid/inference/api/mkldnn_quantizer.cc index bca2cde0fc2c6..293236b111630 100755 --- a/paddle/fluid/inference/api/mkldnn_quantizer.cc +++ b/paddle/fluid/inference/api/mkldnn_quantizer.cc @@ -604,10 +604,8 @@ void AnalysisPredictor::MkldnnQuantizer::PrepareArgument() const { if (predictor_.config_.ir_debug_) builder->TurnOnDebug(); auto passes = builder->AllPasses(); predictor_.argument_.SetIrAnalysisPasses(passes); - predictor_.argument_.SetAnalysisPasses({"ir_graph_clean_pass", - "ir_analysis_pass", - "memory_optimize_pass", - "ir_graph_to_program_pass"}); + predictor_.argument_.SetAnalysisPasses( + {"ir_analysis_pass", "memory_optimize_pass", "ir_graph_to_program_pass"}); predictor_.argument_.SetQuantVarScales(scales_); } diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index cd97382785395..c8083e87dd8f0 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -115,7 +115,6 @@ class PD_INFER_DECL PaddlePassBuilder { /// \cond Protected std::vector analysis_passes_{ {"ir_graph_build_pass", - "ir_graph_clean_pass", "ir_analysis_pass", "ir_params_sync_among_devices_pass", "adjust_cudnn_workspace_size_pass",