diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index c7d947c58039efa80d5b8336bc5db99cd89cee82..9e49dea9e674f135cd31a07a113532012769286f 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -27,7 +27,7 @@ if(WITH_MKLDNN) set(mkldnn_quantizer_cfg ${mkldnn_quantizer_cfg} PARENT_SCOPE) endif() -cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder) +cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder table_printer) cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc) if(WITH_CRYPTO) diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 58b0a3536a4d98fec8b38d40c3f4431e80c27894..b515f7050e510bcdc0e8dec21c8ea10a956a2bf1 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -12,8 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include #include "paddle/fluid/inference/api/paddle_analysis_config.h" #include "paddle/fluid/inference/api/paddle_pass_builder.h" +#include "paddle/fluid/inference/utils/table_printer.h" #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/gpu_info.h" @@ -719,4 +721,99 @@ void AnalysisConfig::PartiallyRelease() { void AnalysisConfig::EnableGpuMultiStream() { thread_local_stream_ = true; } +std::string AnalysisConfig::Summary() { + const std::vector header{"Option", "Value"}; + paddle::inference::TablePrinter os(header); + + if (!model_dir_.empty()) { + os.InsertRow({"model_dir", model_dir_}); + } + if (!(prog_file_.empty() && params_file_.empty())) { + os.InsertRow({"model_file", prog_file_}); + os.InsertRow({"params_file", params_file_}); + } + if (model_from_memory_) { + os.InsertRow({"model_from_memory", params_file_}); + } + os.InsetDivider(); + + // cpu info + os.InsertRow( + {"cpu_math_thread", std::to_string(cpu_math_library_num_threads_)}); + os.InsertRow({"enable_mkdlnn", use_mkldnn_ ? "true" : "false"}); + os.InsertRow( + {"mkldnn_cache_capacity", std::to_string(mkldnn_cache_capacity_)}); + os.InsetDivider(); + + auto Precision2String = + [](paddle::AnalysisConfig::Precision prec) -> std::string { + if (prec == Precision::kFloat32) + return "fp32"; + else if (prec == Precision::kHalf) + return "fp16"; + else if (prec == Precision::kInt8) + return "int8"; + else + return "None"; + }; + // gpu info + os.InsertRow({"use_gpu", use_gpu_ ? "true" : "false"}); + if (use_gpu_) { + os.InsertRow({"gpu_device_id", std::to_string(gpu_device_id_)}); + os.InsertRow({"memory_pool_init_size", + std::to_string(memory_pool_init_size_mb_) + "MB"}); + os.InsertRow( + {"thread_local_stream", thread_local_stream_ ? "true" : "false"}); + + os.InsertRow({"use_tensorrt", use_tensorrt_ ? "true" : "false"}); + if (use_tensorrt_) { + os.InsertRow({"tensorrt_precision_mode", + Precision2String(tensorrt_precision_mode_)}); + os.InsertRow({"tensorrt_workspace_size", + std::to_string(tensorrt_workspace_size_)}); + os.InsertRow( + {"tensorrt_max_batch_size", std::to_string(tensorrt_max_batchsize_)}); + os.InsertRow({"tensorrt_min_subgraph_size", + std::to_string(tensorrt_min_subgraph_size_)}); + os.InsertRow({"tensorrt_use_static_engine", + trt_use_static_engine_ ? "true" : "false"}); + os.InsertRow( + {"tensorrt_use_calib_mode", trt_use_calib_mode_ ? "true" : "false"}); + + // dynamic_shape + os.InsertRow({"tensorrt_enable_dynamic_shape", + min_input_shape_.empty() ? "false" : "true"}); + + os.InsertRow({"tensorrt_use_oss", trt_use_oss_ ? "true" : "false"}); + os.InsertRow({"tensorrt_use_dla", trt_use_dla_ ? "true" : "false"}); + if (trt_use_dla_) { + os.InsertRow({"tensorrt_dla_core", std::to_string(trt_dla_core_)}); + } + } + } + os.InsetDivider(); + + // xpu info + os.InsertRow({"use_xpu", use_xpu_ ? "true" : "false"}); + if (use_xpu_) { + os.InsertRow({"xpu_device_id", std::to_string(xpu_device_id_)}); + os.InsertRow( + {"xpu_l3_workspace_size", std::to_string(xpu_l3_workspace_size_)}); + } + os.InsetDivider(); + + if (use_lite_) { + os.InsertRow({"use_lite", use_lite_ ? "true" : "false"}); + } + + // ir info + os.InsertRow({"ir_optim", enable_ir_optim_ ? "true" : "false"}); + os.InsertRow({"ir_debug", ir_debug_ ? "true" : "false"}); + os.InsertRow({"memory_optim", enable_memory_optim_ ? "true" : "false"}); + os.InsertRow({"enable_profile", with_profile_ ? "true" : "false"}); + os.InsertRow({"enable_log", with_glog_info_ ? "true" : "false"}); + + return os.PrintTable(); +} + } // namespace paddle diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 464db9d4d3ea27207d69d6e73c42b2e9a52828b2..703d65a6fc688cb06ecd7fc16c228518d2fe1261 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -34,6 +34,7 @@ TEST(AnalysisPredictor, analysis_off) { AnalysisConfig config; config.SetModel(FLAGS_dirname); config.SwitchIrOptim(false); + LOG(INFO) << config.Summary(); auto _predictor = CreatePaddlePredictor(config); auto* predictor = static_cast(_predictor.get()); @@ -68,6 +69,7 @@ TEST(AnalysisPredictor, analysis_on) { #else config.DisableGpu(); #endif + LOG(INFO) << config.Summary(); auto _predictor = CreatePaddlePredictor(config); auto* predictor = static_cast(_predictor.get()); @@ -104,6 +106,7 @@ TEST(AnalysisPredictor, ZeroCopy) { AnalysisConfig config; config.SetModel(FLAGS_dirname); config.SwitchUseFeedFetchOps(false); + LOG(INFO) << config.Summary(); auto predictor = CreatePaddlePredictor(config); auto w0 = predictor->GetInputTensor("firstw"); @@ -144,6 +147,7 @@ TEST(AnalysisPredictor, Clone) { config.SetModel(FLAGS_dirname); config.SwitchUseFeedFetchOps(true); config.SwitchIrOptim(true); + LOG(INFO) << config.Summary(); std::vector> predictors; predictors.emplace_back(CreatePaddlePredictor(config)); diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 58d02d8d1e74f284a98be076afa99fe1d44f04b6..04ebe0efaed2cf330f557214daee35ddcc49dac7 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -622,6 +622,11 @@ struct PD_INFER_DECL AnalysisConfig { void EnableGpuMultiStream(); void PartiallyRelease(); + /// + /// \brief Print the summary of config. + /// + std::string Summary(); + protected: // Update the config. void Update(); diff --git a/paddle/fluid/inference/capi_exp/pd_config.cc b/paddle/fluid/inference/capi_exp/pd_config.cc index bd96f401233e9165806f5fe64e08c8ef3b6cadbc..78b7ad67f531052dda7dfc98031f1c854a4bf1d2 100644 --- a/paddle/fluid/inference/capi_exp/pd_config.cc +++ b/paddle/fluid/inference/capi_exp/pd_config.cc @@ -417,5 +417,12 @@ __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses( std::vector passes = config->pass_builder()->AllPasses(); return paddle_infer::CvtVecToOneDimArrayCstr(passes); } +const char* PD_ConfigSummary(__pd_keep PD_Config* pd_config) { + CHECK_AND_CONVERT_PD_CONFIG; + auto sum_str = config->Summary(); + char* c = reinterpret_cast(malloc(sum_str.length() + 1)); + snprintf(c, sum_str.length() + 1, "%s", sum_str.c_str()); + return c; +} } // extern "C" diff --git a/paddle/fluid/inference/capi_exp/pd_config.h b/paddle/fluid/inference/capi_exp/pd_config.h index ac0ed8c86895a5223324b9777369bea30c0b9e2f..b59b407c39eb48c564a52b807a1b0c1cdce175cb 100644 --- a/paddle/fluid/inference/capi_exp/pd_config.h +++ b/paddle/fluid/inference/capi_exp/pd_config.h @@ -636,6 +636,14 @@ PADDLE_CAPI_EXPORT extern void PD_ConfigAppendPass( /// PADDLE_CAPI_EXPORT extern __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses( __pd_keep PD_Config* pd_config); +/// +/// \brief Get information of config. +/// Attention, Please release the string manually. +/// +/// \return Return config info. +/// +PADDLE_CAPI_EXPORT extern const char* PD_ConfigSummary( + __pd_keep PD_Config* pd_config); #ifdef __cplusplus } // extern "C" diff --git a/paddle/fluid/inference/goapi/config.go b/paddle/fluid/inference/goapi/config.go index 866ae0e38b79e8f8e6f2342cdceed4b7a9216b53..7c24731df7be1c2ba49db4f64f0289071f97d82c 100644 --- a/paddle/fluid/inference/goapi/config.go +++ b/paddle/fluid/inference/goapi/config.go @@ -760,3 +760,15 @@ func (config *Config) AllPasses() []string { C.PD_OneDimArrayCstrDestroy(cPasses) return passes } + +/// +/// \brief Get information of config. +/// +/// \return Return config info. +/// +func (config *Config) Summary() string { + cSummary := C.PD_ConfigSummary(config.c) + summary := C.GoString(cSummary) + C.free(unsafe.Pointer(cSummary)) + return summary +} diff --git a/paddle/fluid/inference/goapi/config_test.go b/paddle/fluid/inference/goapi/config_test.go index e7b2c956a924ae201be3cbc9a8a299ab053d8142..c3b270adb8f99b8d78c67e4f32344cf5a84ff8a8 100644 --- a/paddle/fluid/inference/goapi/config_test.go +++ b/paddle/fluid/inference/goapi/config_test.go @@ -85,6 +85,8 @@ func TestNewConfig(t *testing.T) { config.DeletePass("test_pass") t.Logf("After DeletePass, AllPasses:%+v", config.AllPasses()) + + t.Log(config.Summary()) } func TestLite(t *testing.T) { diff --git a/paddle/fluid/inference/utils/CMakeLists.txt b/paddle/fluid/inference/utils/CMakeLists.txt index 956cd739371ce70873c69c37cc9f80bdb42fa6af..0a034c0de4732ba3721270c56a326ac24299a276 100644 --- a/paddle/fluid/inference/utils/CMakeLists.txt +++ b/paddle/fluid/inference/utils/CMakeLists.txt @@ -2,3 +2,5 @@ cc_library(benchmark SRCS benchmark.cc DEPS enforce) cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark) cc_library(infer_io_utils SRCS io_utils.cc DEPS paddle_inference_api lod_tensor) cc_test(infer_io_utils_tester SRCS io_utils_tester.cc DEPS infer_io_utils) +cc_library(table_printer SRCS table_printer.cc) +cc_test(test_table_printer SRCS table_printer_tester.cc DEPS table_printer) diff --git a/paddle/fluid/inference/utils/table_printer.cc b/paddle/fluid/inference/utils/table_printer.cc new file mode 100644 index 0000000000000000000000000000000000000000..bd19320cbe64768863df3f7fb126f3d81492e496 --- /dev/null +++ b/paddle/fluid/inference/utils/table_printer.cc @@ -0,0 +1,210 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/utils/table_printer.h" + +#ifdef WIN32 +// suppress the min and max definitions in Windef.h. +#define NOMINMAX +#include +#else +#include +#include +#endif + +#include +#include +#include +#include +#include +#include + +namespace paddle { +namespace inference { + +std::string TablePrinter::PrintTable() { + std::stringstream ss; + ss << "\n"; + + CalcLayout(); + + AddRowDivider(ss); + AddRow(ss, 0); + AddRowDivider(ss); + + for (size_t i = 1; i < data_.size(); ++i) { + if (data_[i].empty()) { + AddRowDivider(ss); + } else { + AddRow(ss, i); + } + } + + AddRowDivider(ss); + + return std::move(ss.str()); +} + +TablePrinter::TablePrinter(const std::vector& header) { + size_t terminal_witdh = 500; +#ifdef WIN32 + CONSOLE_SCREEN_BUFFER_INFO csbi; + int ret = GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi); + if (ret && (csbi.dwSize.X != 0)) { + terminal_witdh = csbi.dwSize.X; + } +#else + struct winsize terminal_size; + int status = ioctl(STDOUT_FILENO, TIOCGWINSZ, &terminal_size); + if (status == 0 && terminal_size.ws_col != 0) { + terminal_witdh = terminal_size.ws_col; + } +#endif + + size_t num_cols = header.size(); + for (size_t i = 0; i < num_cols; ++i) { + widths_.emplace_back(0); + } + + terminal_witdh = terminal_witdh - (2 * num_cols) - (num_cols + 1); + int avg_width = terminal_witdh / num_cols; + + for (size_t i = 0; i < num_cols; ++i) { + shares_.emplace_back(avg_width); + } + + InsertRow(header); +} + +void TablePrinter::InsertRow(const std::vector& row) { + std::vector> table_row; + size_t max_height = 0; + + for (size_t i = 0; i < row.size(); ++i) { + table_row.emplace_back(std::vector()); + std::stringstream ss(row[i]); + std::string line; + size_t max_width = 0; + while (std::getline(ss, line, '\n')) { + table_row[i].emplace_back(line); + if (line.length() > max_width) max_width = line.length(); + } + + if (max_width > widths_[i]) widths_[i] = max_width; + + size_t num_lines = table_row[i].size(); + if (num_lines > max_height) max_height = num_lines; + } + + heights_.emplace_back(max_height); + data_.emplace_back(table_row); +} + +void TablePrinter::InsetDivider() { + heights_.emplace_back(1); + data_.emplace_back(std::vector>()); +} + +void TablePrinter::CalcLayout() { + size_t field_num = widths_.size(); + std::vector idx(field_num); + std::iota(idx.begin(), idx.end(), 0); + + std::stable_sort(idx.begin(), idx.end(), [this](size_t i1, size_t i2) { + return this->widths_[i1] < this->widths_[i2]; + }); + + for (auto it = idx.begin(); it != idx.end(); ++it) { + // If a column not used all the space allocated to it + if (widths_[*it] < shares_[*it]) { + float remain = shares_[*it] - widths_[*it]; + shares_[*it] -= remain; + + if (it == idx.end() - 1) break; + + auto next_it = it + 1; + float remain_per_column = remain / (idx.end() - next_it); + for (; next_it != idx.end(); ++next_it) { + shares_[*next_it] += remain_per_column; + } + } + } + + for (auto it = idx.begin(); it != idx.end(); ++it) { + shares_[*it] = static_cast(shares_[*it]); + } + + // For each record. + for (size_t i = 0; i < data_.size(); ++i) { + // For each field in the record. + for (size_t j = 0; j < data_[i].size(); ++j) { + // For each line in the field. + for (size_t line_index = 0; line_index < data_[i][j].size(); + ++line_index) { + std::string line = data_[i][j][line_index]; + size_t num_rows = (line.length() + shares_[j] - 1) / shares_[j]; + + // If the number of rows required for this record is larger than 1, we + // will break that line and put it in multiple lines + if (num_rows > 1) { + data_[i][j].erase(data_[i][j].begin() + line_index); + for (size_t k = 0; k < num_rows; ++k) { + size_t start = + std::min(static_cast(k * shares_[j]), line.length()); + size_t end = std::min(static_cast((k + 1) * shares_[j]), + line.length()); + data_[i][j].insert(data_[i][j].begin() + line_index + k, + line.substr(start, end - start)); + } + + // update line_index + line_index += num_rows - 1; + } + + if (heights_[i] < (num_rows - 1 + data_[i][j].size())) + heights_[i] += num_rows - 1; + } + } + } +} + +void TablePrinter::AddRowDivider(std::stringstream& ss) { + ss << "+"; + for (auto share : shares_) { + for (size_t j = 0; j < share + 2; ++j) ss << "-"; + ss << "+"; + } + ss << "\n"; +} + +void TablePrinter::AddRow(std::stringstream& ss, size_t row_idx) { + auto row = data_[row_idx]; + size_t max_height = heights_[row_idx]; + + for (size_t h = 0; h < max_height; ++h) { + ss << "|" << std::left; + for (size_t i = 0; i < row.size(); ++i) { + if (h < row[i].size()) { + ss << " " << std::setw(shares_[i]) << row[i][h] << " |"; + } else { + ss << " " << std::setw(shares_[i]) << " " + << " |"; + } + } + ss << "\n"; + } +} + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/utils/table_printer.h b/paddle/fluid/inference/utils/table_printer.h new file mode 100644 index 0000000000000000000000000000000000000000..f0a01c8c1f8297f98513909c83ac794d0b7a3685 --- /dev/null +++ b/paddle/fluid/inference/utils/table_printer.h @@ -0,0 +1,71 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace paddle { +namespace inference { + +// +// A simple table printer. +// +class TablePrinter { + public: + explicit TablePrinter(const std::vector& header); + + // Insert a row at the end of the table + void InsertRow(const std::vector& row); + + // Insert a divider. + void InsetDivider(); + + std::string PrintTable(); + + private: + // Update the `shares_` such that all the excess + // amount of space not used a column is fairly allocated + // to the other columns + void CalcLayout(); + + // Add a row divider + void AddRowDivider(std::stringstream& ss); + + // Append a row to `table`. This function handles the cases where a wrapping + // occurs. + void AddRow(std::stringstream& ss, size_t row_idx); + + private: + // Max row width. + std::vector widths_; + + // Max row height. + std::vector heights_; + + // Fair share of every column + std::vector shares_; + + // A vector of vectors of vectors containing data items for every column + // The record is stored in a vector of string, where each of the vector items + // contains a single line from the record. For example, ["Item 1", "Item 2", + // "Item 3 line 1\n Item 3 line 2"] will be stored as [["Item 1"], ["Item 2"], + // ["Item + // 3 line 1", "Item 3 line 2"]] + std::vector>> data_; +}; + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/utils/table_printer_tester.cc b/paddle/fluid/inference/utils/table_printer_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..f56d2527d730c2f2e34dcd6a2a19ef149a98d7db --- /dev/null +++ b/paddle/fluid/inference/utils/table_printer_tester.cc @@ -0,0 +1,81 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/utils/table_printer.h" +#include +#include + +namespace paddle { +namespace inference {} // namespace inference +} // namespace paddle + +TEST(table_printer, output) { + std::vector header{"config", "value"}; + paddle::inference::TablePrinter table(header); + + // model_dir + table.InsertRow({"model_dir", "./model_dir"}); + // model + table.InsertRow({"model_file", "./model.pdmodel"}); + table.InsertRow({"params_file", "./model.pdiparams"}); + + table.InsetDivider(); + // gpu + table.InsertRow({"use_gpu", "true"}); + table.InsertRow({"gpu_device_id", "0"}); + table.InsertRow({"memory_pool_init_size", "100MB"}); + table.InsertRow({"thread_local_stream", "false"}); + table.InsetDivider(); + + // trt precision + table.InsertRow({"use_trt", "true"}); + table.InsertRow({"trt_precision", "fp32"}); + table.InsertRow({"enable_dynamic_shape", "true"}); + table.InsertRow({"DisableTensorRtOPs", "{}"}); + table.InsertRow({"EnableTensorRtOSS", "ON"}); + table.InsertRow({"tensorrt_dla_enabled", "ON"}); + table.InsetDivider(); + + // lite + table.InsertRow({"use_lite", "ON"}); + table.InsetDivider(); + + // xpu + table.InsertRow({"use_xpu", "true"}); + table.InsertRow({"xpu_device_id", "0"}); + table.InsetDivider(); + + // ir + table.InsertRow({"ir_optim", "true"}); + table.InsertRow({"ir_debug", "false"}); + table.InsertRow({"enable_memory_optim", "false"}); + table.InsertRow({"EnableProfile", "false"}); + table.InsertRow({"glog_info_disabled", "false"}); + table.InsetDivider(); + + // cpu + table.InsertRow({"CpuMathLibrary", "4"}); + // mkldnn + table.InsertRow({"enable_mkldnn", "false"}); + table.InsertRow({"mkldnn_cache_capacity", "10"}); + + // a long string + table.InsertRow( + {"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ a long string " + "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", + "------------------------------------------ a long value " + "-----------------------------------------------------"}); + + LOG(INFO) << table.PrintTable(); +} diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 6a949ba2a60103cb824de08df58c807dd6c70197..ecef0c350b67850f8f0c7591d542059f94a08771 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -456,6 +456,7 @@ void BindAnalysisConfig(py::module *m) { .def(py::init()) .def(py::init()) .def(py::init()) + .def("summary", &AnalysisConfig::Summary) .def("set_model", (void (AnalysisConfig::*)(const std::string &)) & AnalysisConfig::SetModel) .def("set_model", (void (AnalysisConfig::*)(const std::string &, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py index fab287b5eeba440dcb4c4750eb69af640208fac2..1d9f989782962f40f7fc7978a1e0484be137ebc1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py @@ -138,7 +138,7 @@ class InferencePassTest(unittest.TestCase): config.enable_mkldnn() if self.enable_mkldnn_bfloat16: config.enable_mkldnn_bfloat16() - + print('config summary:', config.summary()) return config def check_output(self, atol=1e-5):