diff --git a/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc b/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc index 4bb2fca3b6e95..a05a096daf928 100644 --- a/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc +++ b/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc @@ -786,6 +786,8 @@ void AutoMixedPrecisionPass::SetVarPrecision() const { if (real_in_var_node->Var()->Persistable()) { real_in_var_node->Var()->SetDataType( framework::TransToProtoVarType(low_precision_)); + VLOG(4) << real_in_var_node->Var()->Name() + << "'s data type was set to low precision"; vars_convert_to_low_precision_.insert(in_var_name); } } @@ -804,6 +806,8 @@ void AutoMixedPrecisionPass::SetVarPrecision() const { real_out_var_node->Var()->SetDataType( framework::TransToProtoVarType(low_precision_)); + VLOG(4) << real_out_var_node->Var()->Name() + << "'s data type was set to low precision"; if (real_out_var_node->Var()->Persistable()) { vars_convert_to_low_precision_.insert(out_var_name); } @@ -823,6 +827,8 @@ void AutoMixedPrecisionPass::SetVarPrecision() const { if (vars_convert_to_low_precision_.count(var_name)) { var_node->Var()->SetDataType( framework::TransToProtoVarType(low_precision_)); + VLOG(4) << var_node->Var()->Name() + << "'s data type was set to low precision"; } } } diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 295e72c43ce8f..c6415902349e6 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -35,7 +35,7 @@ get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) get_property(phi_modules GLOBAL PROPERTY PHI_MODULES) get_property(ir_targets GLOBAL PROPERTY IR_TARGETS) get_property(not_infer_modules GLOBAL PROPERTY NOT_INFER_MODULES) -set(utils_modules pretty_log string_helper benchmark utf8proc) +set(utils_modules pretty_log string_helper utf8proc) if(NOT WITH_GFLAGS) set(utils_modules ${utils_modules} paddle_flags) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 97b5ff15adba2..10b0d5d766e72 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1830,6 +1830,9 @@ void AnalysisPredictor::PrepareArgument() { } LOG(INFO) << "This model run in GPU mixed precision mode with no ir " "optimization."; + if (config_.ir_debug_) { + pass_builder->TurnOnDebug(); + } } else { LOG(INFO) << "Ir optimization is turned off, no ir pass will be executed."; diff --git a/paddle/fluid/inference/utils/CMakeLists.txt b/paddle/fluid/inference/utils/CMakeLists.txt index 8e148beb0c170..0ad2cb0e3f0c8 100644 --- a/paddle/fluid/inference/utils/CMakeLists.txt +++ b/paddle/fluid/inference/utils/CMakeLists.txt @@ -1,8 +1,3 @@ -cc_library( - benchmark - SRCS benchmark.cc - DEPS enforce common) - cc_library( infer_io_utils SRCS io_utils.cc @@ -15,10 +10,3 @@ cc_library( cc_library(table_printer SRCS table_printer.cc) proto_library(shape_range_info_proto SRCS shape_range_info.proto) - -if(WITH_ONNXRUNTIME AND WIN32) - # Copy onnxruntime for some c++ test in Windows, since the test will - # be build only in CI, so suppose the generator in Windows is Ninja. - copy_onnx(test_benchmark) - copy_onnx(test_table_printer) -endif() diff --git a/paddle/fluid/inference/utils/benchmark.cc b/paddle/fluid/inference/utils/benchmark.cc deleted file mode 100644 index 24bc99ed183fa..0000000000000 --- a/paddle/fluid/inference/utils/benchmark.cc +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/inference/utils/benchmark.h" - -#include - -#include "paddle/fluid/platform/enforce.h" - -namespace paddle { -namespace inference { - -std::string Benchmark::SerializeToString() const { - std::stringstream ss; - ss << "-----------------------------------------------------\n"; - ss << "name\t"; - ss << "batch_size\t"; - ss << "num_threads\t"; - ss << "latency\t"; - ss << "qps"; - ss << '\n'; - - ss << name_ << "\t"; - ss << batch_size_ << "\t\t"; - ss << num_threads_ << "\t"; - ss << latency_ << "\t"; - ss << 1000.0 / latency_; - ss << '\n'; - return ss.str(); -} -void Benchmark::PersistToFile(const std::string &path) const { - std::ofstream file(path, std::ios::app); - PADDLE_ENFORCE_EQ( - file.is_open(), - true, - platform::errors::Unavailable("Can not open %s to add benchmark.", path)); - file << SerializeToString(); - file.flush(); - file.close(); -} - -} // namespace inference -} // namespace paddle diff --git a/paddle/fluid/inference/utils/benchmark.h b/paddle/fluid/inference/utils/benchmark.h deleted file mode 100644 index 56789843c3728..0000000000000 --- a/paddle/fluid/inference/utils/benchmark.h +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once -#include -#include -#include - -#include "paddle/utils/test_macros.h" - -namespace paddle { -namespace inference { - -/* - * Helper class to calculate the performance. - */ -struct TEST_API Benchmark { - int batch_size() const { return batch_size_; } - void SetBatchSize(int x) { batch_size_ = x; } - - int num_threads() const { return num_threads_; } - void SetNumThreads(int x) { num_threads_ = x; } - - bool use_gpu() const { return use_gpu_; } - void SetUseGpu() { use_gpu_ = true; } - - float latency() const { return latency_; } - void SetLatency(float x) { latency_ = x; } - - const std::string& name() const { return name_; } - void SetName(const std::string& name) { name_ = name; } - - std::string SerializeToString() const; - void PersistToFile(const std::string& path) const; - - private: - bool use_gpu_{false}; - int batch_size_{0}; - float latency_; - int num_threads_{1}; - std::string name_; -}; - -} // namespace inference -} // namespace paddle diff --git a/paddle/fluid/pir/drr/include/drr_pattern_base.h b/paddle/fluid/pir/drr/include/drr_pattern_base.h index e079fed999a13..9e541dc53183f 100644 --- a/paddle/fluid/pir/drr/include/drr_pattern_base.h +++ b/paddle/fluid/pir/drr/include/drr_pattern_base.h @@ -28,7 +28,6 @@ class IrContext; namespace paddle { namespace drr { -class DrrRewritePattern; class DrrPatternContext; class DrrPatternBase { diff --git a/test/cpp/fluid/CMakeLists.txt b/test/cpp/fluid/CMakeLists.txt index 65e22439b5cbf..6e006b16ad6ef 100644 --- a/test/cpp/fluid/CMakeLists.txt +++ b/test/cpp/fluid/CMakeLists.txt @@ -2,8 +2,6 @@ add_subdirectory(memory) add_subdirectory(benchmark) add_subdirectory(framework) -add_subdirectory(inference) - if(WITH_CINN) add_subdirectory(cinn) endif() diff --git a/test/cpp/fluid/inference/CMakeLists.txt b/test/cpp/fluid/inference/CMakeLists.txt deleted file mode 100644 index 512d2b1553c8c..0000000000000 --- a/test/cpp/fluid/inference/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_subdirectory(utils) diff --git a/test/cpp/fluid/inference/utils/CMakeLists.txt b/test/cpp/fluid/inference/utils/CMakeLists.txt deleted file mode 100644 index 9daceced5766d..0000000000000 --- a/test/cpp/fluid/inference/utils/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -if(WITH_TESTING) - if(NOT APPLE) - inference_base_test(infer_io_utils_tester SRCS io_utils_tester.cc DEPS - paddle_inference_shared common) - inference_base_test(test_benchmark SRCS benchmark_tester.cc DEPS - paddle_inference_shared benchmark) - inference_base_test(test_table_printer SRCS table_printer_tester.cc DEPS - paddle_inference_shared pir) - endif() -endif() - -if(WITH_ONNXRUNTIME AND WIN32) - # Copy onnxruntime for some c++ test in Windows, since the test will - # be build only in CI, so suppose the generator in Windows is Ninja. - copy_onnx(infer_io_utils_tester) - copy_onnx(test_benchmark) - copy_onnx(test_table_printer) -endif() diff --git a/test/cpp/fluid/inference/utils/benchmark_tester.cc b/test/cpp/fluid/inference/utils/benchmark_tester.cc deleted file mode 100644 index 8f7614cb10a44..0000000000000 --- a/test/cpp/fluid/inference/utils/benchmark_tester.cc +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include "paddle/fluid/inference/utils/benchmark.h" - -using namespace paddle::inference; // NOLINT -TEST(Benchmark, basic) { - Benchmark benchmark; - benchmark.SetName("key0"); - benchmark.SetBatchSize(10); - benchmark.SetUseGpu(); - benchmark.SetLatency(220); - LOG(INFO) << "benchmark:\n" << benchmark.SerializeToString(); -} - -TEST(Benchmark, PersistToFile) { - Benchmark benchmark; - benchmark.SetName("key0"); - benchmark.SetBatchSize(10); - benchmark.SetUseGpu(); - benchmark.SetLatency(220); - - benchmark.PersistToFile("1.log"); - benchmark.PersistToFile("2.log"); - benchmark.PersistToFile("3.log"); -} diff --git a/test/cpp/fluid/inference/utils/io_utils_tester.cc b/test/cpp/fluid/inference/utils/io_utils_tester.cc deleted file mode 100644 index 756027fb6cb9b..0000000000000 --- a/test/cpp/fluid/inference/utils/io_utils_tester.cc +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include - -#include "paddle/fluid/inference/api/helper.h" -#include "paddle/fluid/inference/utils/io_utils.h" - -namespace paddle { -namespace inference { -namespace { - -bool pd_tensor_equal(const paddle::PaddleTensor& ref, - const paddle::PaddleTensor& t) { - bool is_equal = true; - VLOG(3) << "ref.name: " << ref.name << ", t.name: " << t.name; - VLOG(3) << "ref.dtype: " << ref.dtype << ", t.dtype: " << t.dtype; - VLOG(3) << "ref.lod_level: " << ref.lod.size() - << ", t.dtype: " << t.lod.size(); - VLOG(3) << "ref.data_len: " << ref.data.length() - << ", t.data_len: " << t.data.length(); - return is_equal && (ref.name == t.name) && (ref.lod == t.lod) && - (ref.dtype == t.dtype) && - (std::memcmp(ref.data.data(), t.data.data(), ref.data.length()) == 0); -} - -template -void test_io_utils() { - std::vector input({6, 8}); - paddle::PaddleTensor in; - in.name = "Hello"; - in.shape = {1, 2}; - in.lod = std::vector>{{0, 1}}; - in.data = paddle::PaddleBuf(static_cast(input.data()), - input.size() * sizeof(T)); - in.dtype = paddle::inference::PaddleTensorGetDType(); - std::stringstream ss; - paddle::inference::SerializePDTensorToStream(&ss, in); - paddle::PaddleTensor out; - paddle::inference::DeserializePDTensorToStream(ss, &out); - ASSERT_TRUE(pd_tensor_equal(in, out)); -} -} // namespace -} // namespace inference -} // namespace paddle - -TEST(infer_io_utils, float32) { paddle::inference::test_io_utils(); } - -TEST(infer_io_utils, tensors) { - // Create a float32 tensor. - std::vector input_fp32({1.1f, 3.2f, 5.0f, 8.2f}); - paddle::PaddleTensor in_fp32; - in_fp32.name = "Tensor.fp32_0"; - in_fp32.shape = {2, 2}; - in_fp32.data = paddle::PaddleBuf(static_cast(input_fp32.data()), - input_fp32.size() * sizeof(float)); - in_fp32.dtype = paddle::inference::PaddleTensorGetDType(); - - // Create a int64 tensor. - std::vector input_int64({5, 8}); - paddle::PaddleTensor in_int64; - in_int64.name = "Tensor.int64_0"; - in_int64.shape = {1, 2}; - in_int64.lod = std::vector>{{0, 1}}; - in_int64.data = paddle::PaddleBuf(static_cast(input_int64.data()), - input_int64.size() * sizeof(int64_t)); - in_int64.dtype = paddle::inference::PaddleTensorGetDType(); - - // Serialize tensors. - std::vector tensors_in({in_fp32}); - std::string file_path = "./io_utils_tensors"; - paddle::inference::SerializePDTensorsToFile(file_path, tensors_in); - - // Deserialize tensors. - std::vector tensors_out; - paddle::inference::DeserializePDTensorsToFile(file_path, &tensors_out); - - // Check results. - ASSERT_EQ(tensors_in.size(), tensors_out.size()); - for (size_t i = 0; i < tensors_in.size(); ++i) { - ASSERT_TRUE( - paddle::inference::pd_tensor_equal(tensors_in[i], tensors_out[i])); - } -} - -TEST(shape_info_io, read_and_write) { - const std::string path = "test_shape_info_io"; - std::map> min_shape, max_shape, opt_shape; - std::map> min_value, max_value, opt_value; - min_shape.insert( - std::make_pair("test1", std::vector{1, 3, 112, 112})); - max_shape.insert( - std::make_pair("test1", std::vector{1, 3, 224, 224})); - opt_shape.insert( - std::make_pair("test1", std::vector{1, 3, 224, 224})); - min_value.insert( - std::make_pair("test1", std::vector{1, 3, 112, 112})); - max_value.insert( - std::make_pair("test1", std::vector{1, 3, 224, 224})); - opt_value.insert( - std::make_pair("test1", std::vector{1, 3, 224, 224})); - paddle::inference::SerializeShapeRangeInfo( - path, min_shape, max_shape, opt_shape, min_value, max_value, opt_value); - min_shape.clear(); - max_shape.clear(); - opt_shape.clear(); - min_value.clear(); - max_value.clear(); - opt_value.clear(); - opt_shape.insert( - std::make_pair("test2", std::vector{1, 3, 224, 224})); - paddle::inference::DeserializeShapeRangeInfo(path, - &min_shape, - &max_shape, - &opt_shape, - &min_value, - &max_value, - &opt_value); - - min_shape.insert(std::make_pair("test1", std::vector{1, 3, 56, 56})); - std::vector names{"test1"}; - paddle::inference::UpdateShapeRangeInfo(path, - min_shape, - max_shape, - opt_shape, - min_value, - max_value, - opt_value, - names, - names); - - ASSERT_THROW(paddle::inference::DeserializeShapeRangeInfo("no_exists_file", - &min_shape, - &max_shape, - &opt_shape, - &min_value, - &max_value, - &opt_value); - , paddle::platform::EnforceNotMet); -} diff --git a/test/cpp/fluid/inference/utils/table_printer_tester.cc b/test/cpp/fluid/inference/utils/table_printer_tester.cc deleted file mode 100644 index fc482807b2854..0000000000000 --- a/test/cpp/fluid/inference/utils/table_printer_tester.cc +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include "paddle/fluid/inference/utils/table_printer.h" - -namespace paddle { -namespace inference {} // namespace inference -} // namespace paddle - -TEST(table_printer, output) { - std::vector header{"config", "value"}; - paddle::inference::TablePrinter table(header); - - // model_dir - table.InsertRow({"model_dir", "./model_dir"}); - // model - table.InsertRow({"model_file", "./model.pdmodel"}); - table.InsertRow({"params_file", "./model.pdiparams"}); - - table.InsetDivider(); - // gpu - table.InsertRow({"use_gpu", "true"}); - table.InsertRow({"gpu_device_id", "0"}); - table.InsertRow({"memory_pool_init_size", "100MB"}); - table.InsertRow({"thread_local_stream", "false"}); - table.InsetDivider(); - - // trt precision - table.InsertRow({"use_trt", "true"}); - table.InsertRow({"trt_precision", "fp32"}); - table.InsertRow({"enable_dynamic_shape", "true"}); - table.InsertRow({"DisableTensorRtOPs", "{}"}); - table.InsertRow({"EnableVarseqlen", "ON"}); - table.InsertRow({"tensorrt_dla_enabled", "ON"}); - table.InsetDivider(); - - // lite - table.InsertRow({"use_lite", "ON"}); - table.InsetDivider(); - - // xpu - table.InsertRow({"use_xpu", "true"}); - table.InsertRow({"xpu_device_id", "0"}); - table.InsetDivider(); - - // ir - table.InsertRow({"ir_optim", "true"}); - table.InsertRow({"ir_debug", "false"}); - table.InsertRow({"enable_memory_optim", "false"}); - table.InsertRow({"EnableProfile", "false"}); - table.InsertRow({"glog_info_disabled", "false"}); - table.InsetDivider(); - - // cpu - table.InsertRow({"CpuMathLibrary", "4"}); - // mkldnn - table.InsertRow({"enable_mkldnn", "false"}); - table.InsertRow({"mkldnn_cache_capacity", "10"}); - - // a long string - table.InsertRow( - {"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ a long string " - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", - "------------------------------------------ a long value " - "-----------------------------------------------------"}); - - LOG(INFO) << table.PrintTable(); -} diff --git a/test/cpp/inference/api/tester_helper.h b/test/cpp/inference/api/tester_helper.h index a410df859fe45..a5d60ca6eec97 100644 --- a/test/cpp/inference/api/tester_helper.h +++ b/test/cpp/inference/api/tester_helper.h @@ -34,7 +34,6 @@ #include "paddle/fluid/inference/api/analysis_predictor.h" #include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" -#include "paddle/fluid/inference/utils/benchmark.h" #include "paddle/fluid/platform/profiler/event_tracing.h" #include "test/cpp/inference/api/config_printer.h" #include "test/cpp/inference/test_helper.h" @@ -69,9 +68,6 @@ PD_DEFINE_int32(num_threads, PD_DEFINE_bool(use_analysis, true, "Running the inference program in analysis mode."); -PD_DEFINE_bool(record_benchmark, - false, - "Record benchmark after profiling the model"); PD_DEFINE_double(accuracy, 1e-3, "Result Accuracy."); PD_DEFINE_double(quantized_accuracy, 2e-2, "Result Quantized Accuracy."); PD_DEFINE_bool(zero_copy, false, "Use ZeroCopy to speedup Feed/Fetch."); @@ -594,14 +590,6 @@ void PredictionRun(PaddlePredictor *predictor, if (sample_latency != nullptr) *sample_latency = batch_latency / FLAGS_batch_size; - - if (FLAGS_record_benchmark) { - Benchmark benchmark; - benchmark.SetName(FLAGS_model_name); - benchmark.SetBatchSize(FLAGS_batch_size); - benchmark.SetLatency(batch_latency); - benchmark.PersistToFile("benchmark_record.txt"); - } } void TestOneThreadPrediction( diff --git a/test/cpp/inference/test.cmake b/test/cpp/inference/test.cmake index 0c8fcf0b3a66c..3d489220f2747 100644 --- a/test/cpp/inference/test.cmake +++ b/test/cpp/inference/test.cmake @@ -114,10 +114,9 @@ function(inference_base_test_build TARGET) endif() if("${base_test_DEPS};" MATCHES "paddle_inference_shared;") list(REMOVE_ITEM base_test_DEPS paddle_inference_shared) - target_link_libraries( - ${TARGET} $ - $) - add_dependencies(${TARGET} paddle_inference_shared benchmark) + target_link_libraries(${TARGET} + $) + add_dependencies(${TARGET} paddle_inference_shared) elseif("${base_test_DEPS};" MATCHES "paddle_inference_c_shared;") list(REMOVE_ITEM base_test_DEPS paddle_inference_c_shared) target_link_libraries(${TARGET} diff --git a/tools/parallel_UT_rule.py b/tools/parallel_UT_rule.py index 5b315049c5025..ef2eb620eddda 100755 --- a/tools/parallel_UT_rule.py +++ b/tools/parallel_UT_rule.py @@ -28,7 +28,6 @@ 'test_fc_gru_fuse_pass_cc', 'device_worker_test', 'test_custom_conj', - 'infer_io_utils_tester', 'test_transpose_bf16_mkldnn_op', 'test_container', 'cpu_helper_test', @@ -73,7 +72,6 @@ 'test_pybind_interface', 'test_io_save_load', 'test_fusion_lstm_int8_mkldnn_op', - 'test_benchmark', 'test_protobuf', 'test_tdm_sampler_op', 'test_teacher_student_sigmoid_loss_op', @@ -482,7 +480,6 @@ 'test_communicator_half_async', 'test_dynrnn_gradient_check', 'test_pool2d_bf16_mkldnn_op', - 'test_table_printer', 'test_framework_debug_str', 'test_dist_fleet_ps2', 'test_collective_scatter_api', @@ -1924,7 +1921,6 @@ 'test_bpr_loss_op', 'test_boxps', 'test_bipartite_match_op', - 'test_benchmark', 'test_beam_search_op', 'test_batch_sampler', 'test_batch_norm_act_fuse_pass', @@ -1968,7 +1964,6 @@ 'lodtensor_printer_test', 'test_dispatch_jit', 'inlined_vector_test', - 'infer_io_utils_tester', 'graph_to_program_pass_test', 'graph_test', 'graph_helper_test', @@ -2174,7 +2169,6 @@ 'test_auto_parallel_api', 'test_tensor_copy_from', 'test_analyzer_capi_exp_xpu', - 'test_table_printer', 'test_egr_task_autocodegen', 'test_static_save_load_bf16', 'test_parallel_executor_run_cinn',