提交 71d9097a 编写于 作者: T Tao Luo

fix analyzer_test runs error in native_config

test=develop
上级 d09d6ead
...@@ -62,7 +62,7 @@ std::ostream &operator<<(std::ostream &os, ...@@ -62,7 +62,7 @@ std::ostream &operator<<(std::ostream &os,
const contrib::AnalysisConfig &config) { const contrib::AnalysisConfig &config) {
os << GenSpaces(num_spaces) << "contrib::AnalysisConfig {\n"; os << GenSpaces(num_spaces) << "contrib::AnalysisConfig {\n";
num_spaces++; num_spaces++;
os << *reinterpret_cast<const NativeConfig *>(&config); os << config.ToNativeConfig();
if (!config.model_from_memory()) { if (!config.model_from_memory()) {
os << GenSpaces(num_spaces) << "prog_file: " << config.prog_file() << "\n"; os << GenSpaces(num_spaces) << "prog_file: " << config.prog_file() << "\n";
os << GenSpaces(num_spaces) << "param_file: " << config.params_file() os << GenSpaces(num_spaces) << "param_file: " << config.params_file()
......
...@@ -54,11 +54,13 @@ namespace paddle { ...@@ -54,11 +54,13 @@ namespace paddle {
namespace inference { namespace inference {
void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) { void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) {
const auto *analysis_config =
reinterpret_cast<const contrib::AnalysisConfig *>(config);
if (use_analysis) { if (use_analysis) {
LOG(INFO) << *reinterpret_cast<const contrib::AnalysisConfig *>(config); LOG(INFO) << *analysis_config;
return; return;
} }
LOG(INFO) << *reinterpret_cast<const NativeConfig *>(config); LOG(INFO) << analysis_config->ToNativeConfig();
} }
void CompareResult(const std::vector<PaddleTensor> &outputs, void CompareResult(const std::vector<PaddleTensor> &outputs,
...@@ -96,12 +98,13 @@ void CompareResult(const std::vector<PaddleTensor> &outputs, ...@@ -96,12 +98,13 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
std::unique_ptr<PaddlePredictor> CreateTestPredictor( std::unique_ptr<PaddlePredictor> CreateTestPredictor(
const PaddlePredictor::Config *config, bool use_analysis = true) { const PaddlePredictor::Config *config, bool use_analysis = true) {
const auto *analysis_config =
reinterpret_cast<const contrib::AnalysisConfig *>(config);
if (use_analysis) { if (use_analysis) {
return CreatePaddlePredictor<contrib::AnalysisConfig>( return CreatePaddlePredictor<contrib::AnalysisConfig>(*analysis_config);
*(reinterpret_cast<const contrib::AnalysisConfig *>(config)));
} }
return CreatePaddlePredictor<NativeConfig>( auto native_config = analysis_config->ToNativeConfig();
*(reinterpret_cast<const NativeConfig *>(config))); return CreatePaddlePredictor<NativeConfig>(native_config);
} }
size_t GetSize(const PaddleTensor &out) { return VecReduceToInt(out.shape); } size_t GetSize(const PaddleTensor &out) { return VecReduceToInt(out.shape); }
...@@ -328,10 +331,7 @@ void CompareNativeAndAnalysis( ...@@ -328,10 +331,7 @@ void CompareNativeAndAnalysis(
const std::vector<std::vector<PaddleTensor>> &inputs) { const std::vector<std::vector<PaddleTensor>> &inputs) {
PrintConfig(config, true); PrintConfig(config, true);
std::vector<PaddleTensor> native_outputs, analysis_outputs; std::vector<PaddleTensor> native_outputs, analysis_outputs;
const auto *analysis_config = TestOneThreadPrediction(config, inputs, &native_outputs, false);
reinterpret_cast<const contrib::AnalysisConfig *>(config);
auto native_config = analysis_config->ToNativeConfig();
TestOneThreadPrediction(&native_config, inputs, &native_outputs, false);
TestOneThreadPrediction(config, inputs, &analysis_outputs, true); TestOneThreadPrediction(config, inputs, &analysis_outputs, true);
CompareResult(analysis_outputs, native_outputs); CompareResult(analysis_outputs, native_outputs);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册