未验证 提交 831c1c6c 编写于 作者: W Wilber 提交者: GitHub

[Inference] Add config.Summary api (#34122)

上级 2891ce64
...@@ -27,7 +27,7 @@ if(WITH_MKLDNN) ...@@ -27,7 +27,7 @@ if(WITH_MKLDNN)
set(mkldnn_quantizer_cfg ${mkldnn_quantizer_cfg} PARENT_SCOPE) set(mkldnn_quantizer_cfg ${mkldnn_quantizer_cfg} PARENT_SCOPE)
endif() endif()
cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder) cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder table_printer)
cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc) cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc)
if(WITH_CRYPTO) if(WITH_CRYPTO)
......
...@@ -12,8 +12,10 @@ ...@@ -12,8 +12,10 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <string>
#include "paddle/fluid/inference/api/paddle_analysis_config.h" #include "paddle/fluid/inference/api/paddle_analysis_config.h"
#include "paddle/fluid/inference/api/paddle_pass_builder.h" #include "paddle/fluid/inference/api/paddle_pass_builder.h"
#include "paddle/fluid/inference/utils/table_printer.h"
#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/gpu_info.h"
...@@ -719,4 +721,99 @@ void AnalysisConfig::PartiallyRelease() { ...@@ -719,4 +721,99 @@ void AnalysisConfig::PartiallyRelease() {
void AnalysisConfig::EnableGpuMultiStream() { thread_local_stream_ = true; } void AnalysisConfig::EnableGpuMultiStream() { thread_local_stream_ = true; }
std::string AnalysisConfig::Summary() {
const std::vector<std::string> header{"Option", "Value"};
paddle::inference::TablePrinter os(header);
if (!model_dir_.empty()) {
os.InsertRow({"model_dir", model_dir_});
}
if (!(prog_file_.empty() && params_file_.empty())) {
os.InsertRow({"model_file", prog_file_});
os.InsertRow({"params_file", params_file_});
}
if (model_from_memory_) {
os.InsertRow({"model_from_memory", params_file_});
}
os.InsetDivider();
// cpu info
os.InsertRow(
{"cpu_math_thread", std::to_string(cpu_math_library_num_threads_)});
os.InsertRow({"enable_mkdlnn", use_mkldnn_ ? "true" : "false"});
os.InsertRow(
{"mkldnn_cache_capacity", std::to_string(mkldnn_cache_capacity_)});
os.InsetDivider();
auto Precision2String =
[](paddle::AnalysisConfig::Precision prec) -> std::string {
if (prec == Precision::kFloat32)
return "fp32";
else if (prec == Precision::kHalf)
return "fp16";
else if (prec == Precision::kInt8)
return "int8";
else
return "None";
};
// gpu info
os.InsertRow({"use_gpu", use_gpu_ ? "true" : "false"});
if (use_gpu_) {
os.InsertRow({"gpu_device_id", std::to_string(gpu_device_id_)});
os.InsertRow({"memory_pool_init_size",
std::to_string(memory_pool_init_size_mb_) + "MB"});
os.InsertRow(
{"thread_local_stream", thread_local_stream_ ? "true" : "false"});
os.InsertRow({"use_tensorrt", use_tensorrt_ ? "true" : "false"});
if (use_tensorrt_) {
os.InsertRow({"tensorrt_precision_mode",
Precision2String(tensorrt_precision_mode_)});
os.InsertRow({"tensorrt_workspace_size",
std::to_string(tensorrt_workspace_size_)});
os.InsertRow(
{"tensorrt_max_batch_size", std::to_string(tensorrt_max_batchsize_)});
os.InsertRow({"tensorrt_min_subgraph_size",
std::to_string(tensorrt_min_subgraph_size_)});
os.InsertRow({"tensorrt_use_static_engine",
trt_use_static_engine_ ? "true" : "false"});
os.InsertRow(
{"tensorrt_use_calib_mode", trt_use_calib_mode_ ? "true" : "false"});
// dynamic_shape
os.InsertRow({"tensorrt_enable_dynamic_shape",
min_input_shape_.empty() ? "false" : "true"});
os.InsertRow({"tensorrt_use_oss", trt_use_oss_ ? "true" : "false"});
os.InsertRow({"tensorrt_use_dla", trt_use_dla_ ? "true" : "false"});
if (trt_use_dla_) {
os.InsertRow({"tensorrt_dla_core", std::to_string(trt_dla_core_)});
}
}
}
os.InsetDivider();
// xpu info
os.InsertRow({"use_xpu", use_xpu_ ? "true" : "false"});
if (use_xpu_) {
os.InsertRow({"xpu_device_id", std::to_string(xpu_device_id_)});
os.InsertRow(
{"xpu_l3_workspace_size", std::to_string(xpu_l3_workspace_size_)});
}
os.InsetDivider();
if (use_lite_) {
os.InsertRow({"use_lite", use_lite_ ? "true" : "false"});
}
// ir info
os.InsertRow({"ir_optim", enable_ir_optim_ ? "true" : "false"});
os.InsertRow({"ir_debug", ir_debug_ ? "true" : "false"});
os.InsertRow({"memory_optim", enable_memory_optim_ ? "true" : "false"});
os.InsertRow({"enable_profile", with_profile_ ? "true" : "false"});
os.InsertRow({"enable_log", with_glog_info_ ? "true" : "false"});
return os.PrintTable();
}
} // namespace paddle } // namespace paddle
...@@ -34,6 +34,7 @@ TEST(AnalysisPredictor, analysis_off) { ...@@ -34,6 +34,7 @@ TEST(AnalysisPredictor, analysis_off) {
AnalysisConfig config; AnalysisConfig config;
config.SetModel(FLAGS_dirname); config.SetModel(FLAGS_dirname);
config.SwitchIrOptim(false); config.SwitchIrOptim(false);
LOG(INFO) << config.Summary();
auto _predictor = CreatePaddlePredictor<AnalysisConfig>(config); auto _predictor = CreatePaddlePredictor<AnalysisConfig>(config);
auto* predictor = static_cast<AnalysisPredictor*>(_predictor.get()); auto* predictor = static_cast<AnalysisPredictor*>(_predictor.get());
...@@ -68,6 +69,7 @@ TEST(AnalysisPredictor, analysis_on) { ...@@ -68,6 +69,7 @@ TEST(AnalysisPredictor, analysis_on) {
#else #else
config.DisableGpu(); config.DisableGpu();
#endif #endif
LOG(INFO) << config.Summary();
auto _predictor = CreatePaddlePredictor<AnalysisConfig>(config); auto _predictor = CreatePaddlePredictor<AnalysisConfig>(config);
auto* predictor = static_cast<AnalysisPredictor*>(_predictor.get()); auto* predictor = static_cast<AnalysisPredictor*>(_predictor.get());
...@@ -104,6 +106,7 @@ TEST(AnalysisPredictor, ZeroCopy) { ...@@ -104,6 +106,7 @@ TEST(AnalysisPredictor, ZeroCopy) {
AnalysisConfig config; AnalysisConfig config;
config.SetModel(FLAGS_dirname); config.SetModel(FLAGS_dirname);
config.SwitchUseFeedFetchOps(false); config.SwitchUseFeedFetchOps(false);
LOG(INFO) << config.Summary();
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config); auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
auto w0 = predictor->GetInputTensor("firstw"); auto w0 = predictor->GetInputTensor("firstw");
...@@ -144,6 +147,7 @@ TEST(AnalysisPredictor, Clone) { ...@@ -144,6 +147,7 @@ TEST(AnalysisPredictor, Clone) {
config.SetModel(FLAGS_dirname); config.SetModel(FLAGS_dirname);
config.SwitchUseFeedFetchOps(true); config.SwitchUseFeedFetchOps(true);
config.SwitchIrOptim(true); config.SwitchIrOptim(true);
LOG(INFO) << config.Summary();
std::vector<std::unique_ptr<PaddlePredictor>> predictors; std::vector<std::unique_ptr<PaddlePredictor>> predictors;
predictors.emplace_back(CreatePaddlePredictor(config)); predictors.emplace_back(CreatePaddlePredictor(config));
......
...@@ -622,6 +622,11 @@ struct PD_INFER_DECL AnalysisConfig { ...@@ -622,6 +622,11 @@ struct PD_INFER_DECL AnalysisConfig {
void EnableGpuMultiStream(); void EnableGpuMultiStream();
void PartiallyRelease(); void PartiallyRelease();
///
/// \brief Print the summary of config.
///
std::string Summary();
protected: protected:
// Update the config. // Update the config.
void Update(); void Update();
......
...@@ -417,5 +417,12 @@ __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses( ...@@ -417,5 +417,12 @@ __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses(
std::vector<std::string> passes = config->pass_builder()->AllPasses(); std::vector<std::string> passes = config->pass_builder()->AllPasses();
return paddle_infer::CvtVecToOneDimArrayCstr(passes); return paddle_infer::CvtVecToOneDimArrayCstr(passes);
} }
const char* PD_ConfigSummary(__pd_keep PD_Config* pd_config) {
CHECK_AND_CONVERT_PD_CONFIG;
auto sum_str = config->Summary();
char* c = reinterpret_cast<char*>(malloc(sum_str.length() + 1));
snprintf(c, sum_str.length() + 1, "%s", sum_str.c_str());
return c;
}
} // extern "C" } // extern "C"
...@@ -636,6 +636,14 @@ PADDLE_CAPI_EXPORT extern void PD_ConfigAppendPass( ...@@ -636,6 +636,14 @@ PADDLE_CAPI_EXPORT extern void PD_ConfigAppendPass(
/// ///
PADDLE_CAPI_EXPORT extern __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses( PADDLE_CAPI_EXPORT extern __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses(
__pd_keep PD_Config* pd_config); __pd_keep PD_Config* pd_config);
///
/// \brief Get information of config.
/// Attention, Please release the string manually.
///
/// \return Return config info.
///
PADDLE_CAPI_EXPORT extern const char* PD_ConfigSummary(
__pd_keep PD_Config* pd_config);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
......
...@@ -760,3 +760,15 @@ func (config *Config) AllPasses() []string { ...@@ -760,3 +760,15 @@ func (config *Config) AllPasses() []string {
C.PD_OneDimArrayCstrDestroy(cPasses) C.PD_OneDimArrayCstrDestroy(cPasses)
return passes return passes
} }
///
/// \brief Get information of config.
///
/// \return Return config info.
///
func (config *Config) Summary() string {
cSummary := C.PD_ConfigSummary(config.c)
summary := C.GoString(cSummary)
C.free(unsafe.Pointer(cSummary))
return summary
}
...@@ -85,6 +85,8 @@ func TestNewConfig(t *testing.T) { ...@@ -85,6 +85,8 @@ func TestNewConfig(t *testing.T) {
config.DeletePass("test_pass") config.DeletePass("test_pass")
t.Logf("After DeletePass, AllPasses:%+v", config.AllPasses()) t.Logf("After DeletePass, AllPasses:%+v", config.AllPasses())
t.Log(config.Summary())
} }
func TestLite(t *testing.T) { func TestLite(t *testing.T) {
......
...@@ -2,3 +2,5 @@ cc_library(benchmark SRCS benchmark.cc DEPS enforce) ...@@ -2,3 +2,5 @@ cc_library(benchmark SRCS benchmark.cc DEPS enforce)
cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark) cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark)
cc_library(infer_io_utils SRCS io_utils.cc DEPS paddle_inference_api lod_tensor) cc_library(infer_io_utils SRCS io_utils.cc DEPS paddle_inference_api lod_tensor)
cc_test(infer_io_utils_tester SRCS io_utils_tester.cc DEPS infer_io_utils) cc_test(infer_io_utils_tester SRCS io_utils_tester.cc DEPS infer_io_utils)
cc_library(table_printer SRCS table_printer.cc)
cc_test(test_table_printer SRCS table_printer_tester.cc DEPS table_printer)
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/utils/table_printer.h"
#ifdef WIN32
// suppress the min and max definitions in Windef.h.
#define NOMINMAX
#include <Windows.h>
#else
#include <sys/ioctl.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <iomanip>
#include <numeric>
#include <sstream>
#include <string>
#include <vector>
namespace paddle {
namespace inference {
std::string TablePrinter::PrintTable() {
std::stringstream ss;
ss << "\n";
CalcLayout();
AddRowDivider(ss);
AddRow(ss, 0);
AddRowDivider(ss);
for (size_t i = 1; i < data_.size(); ++i) {
if (data_[i].empty()) {
AddRowDivider(ss);
} else {
AddRow(ss, i);
}
}
AddRowDivider(ss);
return std::move(ss.str());
}
TablePrinter::TablePrinter(const std::vector<std::string>& header) {
size_t terminal_witdh = 500;
#ifdef WIN32
CONSOLE_SCREEN_BUFFER_INFO csbi;
int ret = GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
if (ret && (csbi.dwSize.X != 0)) {
terminal_witdh = csbi.dwSize.X;
}
#else
struct winsize terminal_size;
int status = ioctl(STDOUT_FILENO, TIOCGWINSZ, &terminal_size);
if (status == 0 && terminal_size.ws_col != 0) {
terminal_witdh = terminal_size.ws_col;
}
#endif
size_t num_cols = header.size();
for (size_t i = 0; i < num_cols; ++i) {
widths_.emplace_back(0);
}
terminal_witdh = terminal_witdh - (2 * num_cols) - (num_cols + 1);
int avg_width = terminal_witdh / num_cols;
for (size_t i = 0; i < num_cols; ++i) {
shares_.emplace_back(avg_width);
}
InsertRow(header);
}
void TablePrinter::InsertRow(const std::vector<std::string>& row) {
std::vector<std::vector<std::string>> table_row;
size_t max_height = 0;
for (size_t i = 0; i < row.size(); ++i) {
table_row.emplace_back(std::vector<std::string>());
std::stringstream ss(row[i]);
std::string line;
size_t max_width = 0;
while (std::getline(ss, line, '\n')) {
table_row[i].emplace_back(line);
if (line.length() > max_width) max_width = line.length();
}
if (max_width > widths_[i]) widths_[i] = max_width;
size_t num_lines = table_row[i].size();
if (num_lines > max_height) max_height = num_lines;
}
heights_.emplace_back(max_height);
data_.emplace_back(table_row);
}
void TablePrinter::InsetDivider() {
heights_.emplace_back(1);
data_.emplace_back(std::vector<std::vector<std::string>>());
}
void TablePrinter::CalcLayout() {
size_t field_num = widths_.size();
std::vector<size_t> idx(field_num);
std::iota(idx.begin(), idx.end(), 0);
std::stable_sort(idx.begin(), idx.end(), [this](size_t i1, size_t i2) {
return this->widths_[i1] < this->widths_[i2];
});
for (auto it = idx.begin(); it != idx.end(); ++it) {
// If a column not used all the space allocated to it
if (widths_[*it] < shares_[*it]) {
float remain = shares_[*it] - widths_[*it];
shares_[*it] -= remain;
if (it == idx.end() - 1) break;
auto next_it = it + 1;
float remain_per_column = remain / (idx.end() - next_it);
for (; next_it != idx.end(); ++next_it) {
shares_[*next_it] += remain_per_column;
}
}
}
for (auto it = idx.begin(); it != idx.end(); ++it) {
shares_[*it] = static_cast<size_t>(shares_[*it]);
}
// For each record.
for (size_t i = 0; i < data_.size(); ++i) {
// For each field in the record.
for (size_t j = 0; j < data_[i].size(); ++j) {
// For each line in the field.
for (size_t line_index = 0; line_index < data_[i][j].size();
++line_index) {
std::string line = data_[i][j][line_index];
size_t num_rows = (line.length() + shares_[j] - 1) / shares_[j];
// If the number of rows required for this record is larger than 1, we
// will break that line and put it in multiple lines
if (num_rows > 1) {
data_[i][j].erase(data_[i][j].begin() + line_index);
for (size_t k = 0; k < num_rows; ++k) {
size_t start =
std::min(static_cast<size_t>(k * shares_[j]), line.length());
size_t end = std::min(static_cast<size_t>((k + 1) * shares_[j]),
line.length());
data_[i][j].insert(data_[i][j].begin() + line_index + k,
line.substr(start, end - start));
}
// update line_index
line_index += num_rows - 1;
}
if (heights_[i] < (num_rows - 1 + data_[i][j].size()))
heights_[i] += num_rows - 1;
}
}
}
}
void TablePrinter::AddRowDivider(std::stringstream& ss) {
ss << "+";
for (auto share : shares_) {
for (size_t j = 0; j < share + 2; ++j) ss << "-";
ss << "+";
}
ss << "\n";
}
void TablePrinter::AddRow(std::stringstream& ss, size_t row_idx) {
auto row = data_[row_idx];
size_t max_height = heights_[row_idx];
for (size_t h = 0; h < max_height; ++h) {
ss << "|" << std::left;
for (size_t i = 0; i < row.size(); ++i) {
if (h < row[i].size()) {
ss << " " << std::setw(shares_[i]) << row[i][h] << " |";
} else {
ss << " " << std::setw(shares_[i]) << " "
<< " |";
}
}
ss << "\n";
}
}
} // namespace inference
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
namespace paddle {
namespace inference {
//
// A simple table printer.
//
class TablePrinter {
public:
explicit TablePrinter(const std::vector<std::string>& header);
// Insert a row at the end of the table
void InsertRow(const std::vector<std::string>& row);
// Insert a divider.
void InsetDivider();
std::string PrintTable();
private:
// Update the `shares_` such that all the excess
// amount of space not used a column is fairly allocated
// to the other columns
void CalcLayout();
// Add a row divider
void AddRowDivider(std::stringstream& ss);
// Append a row to `table`. This function handles the cases where a wrapping
// occurs.
void AddRow(std::stringstream& ss, size_t row_idx);
private:
// Max row width.
std::vector<float> widths_;
// Max row height.
std::vector<float> heights_;
// Fair share of every column
std::vector<float> shares_;
// A vector of vectors of vectors containing data items for every column
// The record is stored in a vector of string, where each of the vector items
// contains a single line from the record. For example, ["Item 1", "Item 2",
// "Item 3 line 1\n Item 3 line 2"] will be stored as [["Item 1"], ["Item 2"],
// ["Item
// 3 line 1", "Item 3 line 2"]]
std::vector<std::vector<std::vector<std::string>>> data_;
};
} // namespace inference
} // namespace paddle
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/utils/table_printer.h"
#include <glog/logging.h>
#include <gtest/gtest.h>
namespace paddle {
namespace inference {} // namespace inference
} // namespace paddle
TEST(table_printer, output) {
std::vector<std::string> header{"config", "value"};
paddle::inference::TablePrinter table(header);
// model_dir
table.InsertRow({"model_dir", "./model_dir"});
// model
table.InsertRow({"model_file", "./model.pdmodel"});
table.InsertRow({"params_file", "./model.pdiparams"});
table.InsetDivider();
// gpu
table.InsertRow({"use_gpu", "true"});
table.InsertRow({"gpu_device_id", "0"});
table.InsertRow({"memory_pool_init_size", "100MB"});
table.InsertRow({"thread_local_stream", "false"});
table.InsetDivider();
// trt precision
table.InsertRow({"use_trt", "true"});
table.InsertRow({"trt_precision", "fp32"});
table.InsertRow({"enable_dynamic_shape", "true"});
table.InsertRow({"DisableTensorRtOPs", "{}"});
table.InsertRow({"EnableTensorRtOSS", "ON"});
table.InsertRow({"tensorrt_dla_enabled", "ON"});
table.InsetDivider();
// lite
table.InsertRow({"use_lite", "ON"});
table.InsetDivider();
// xpu
table.InsertRow({"use_xpu", "true"});
table.InsertRow({"xpu_device_id", "0"});
table.InsetDivider();
// ir
table.InsertRow({"ir_optim", "true"});
table.InsertRow({"ir_debug", "false"});
table.InsertRow({"enable_memory_optim", "false"});
table.InsertRow({"EnableProfile", "false"});
table.InsertRow({"glog_info_disabled", "false"});
table.InsetDivider();
// cpu
table.InsertRow({"CpuMathLibrary", "4"});
// mkldnn
table.InsertRow({"enable_mkldnn", "false"});
table.InsertRow({"mkldnn_cache_capacity", "10"});
// a long string
table.InsertRow(
{"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ a long string "
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~",
"------------------------------------------ a long value "
"-----------------------------------------------------"});
LOG(INFO) << table.PrintTable();
}
...@@ -456,6 +456,7 @@ void BindAnalysisConfig(py::module *m) { ...@@ -456,6 +456,7 @@ void BindAnalysisConfig(py::module *m) {
.def(py::init<const AnalysisConfig &>()) .def(py::init<const AnalysisConfig &>())
.def(py::init<const std::string &>()) .def(py::init<const std::string &>())
.def(py::init<const std::string &, const std::string &>()) .def(py::init<const std::string &, const std::string &>())
.def("summary", &AnalysisConfig::Summary)
.def("set_model", (void (AnalysisConfig::*)(const std::string &)) & .def("set_model", (void (AnalysisConfig::*)(const std::string &)) &
AnalysisConfig::SetModel) AnalysisConfig::SetModel)
.def("set_model", (void (AnalysisConfig::*)(const std::string &, .def("set_model", (void (AnalysisConfig::*)(const std::string &,
......
...@@ -138,7 +138,7 @@ class InferencePassTest(unittest.TestCase): ...@@ -138,7 +138,7 @@ class InferencePassTest(unittest.TestCase):
config.enable_mkldnn() config.enable_mkldnn()
if self.enable_mkldnn_bfloat16: if self.enable_mkldnn_bfloat16:
config.enable_mkldnn_bfloat16() config.enable_mkldnn_bfloat16()
print('config summary:', config.summary())
return config return config
def check_output(self, atol=1e-5): def check_output(self, atol=1e-5):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册