未验证 提交 79ed1c76 编写于 作者: T tensor-tang 提交者: GitHub

fix bn fuse vardesc and add model saver (#17143)

* fix bn fuse vardesc and add model saver

test=develop

* unify save model in test helper

test=develop

* fix mkdir on windows

test=develop

* remove magic number use bn bias var desc

test=develop
上级 4e1bc6e8
......@@ -136,18 +136,21 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
return;
}
// Get batch norm bias
auto* bn_bias_tensor =
scope->FindVar(bn_bias->Name())->GetMutable<LoDTensor>();
// Create eltwise_y (conv bias) variable
VarDesc eltwise_y_in_desc(
patterns::PDNodeName(name_scope_, "eltwise_y_in"));
eltwise_y_in_desc.SetShape(framework::vectorize(bn_bias_tensor->dims()));
eltwise_y_in_desc.SetDataType(bn_bias_tensor->type());
eltwise_y_in_desc.SetLoDLevel(bn_bias->Var()->GetLoDLevel());
eltwise_y_in_desc.SetPersistable(true);
auto* eltwise_y_in_node = g->CreateVarNode(&eltwise_y_in_desc);
auto* eltwise_y_in_tensor =
scope->Var(eltwise_y_in_node->Name())->GetMutable<LoDTensor>();
// Get batch norm bias
auto* bn_bias_tensor =
scope->FindVar(bn_bias->Name())->GetMutable<LoDTensor>();
// Initialize eltwise_y
eltwise_y_in_tensor->Resize(bn_bias_tensor->dims());
std::fill_n(eltwise_y_in_tensor->mutable_data<float>(platform::CPUPlace()),
......
......@@ -86,6 +86,9 @@ inference_analysis_test(test_analyzer_small_dam SRCS analyzer_dam_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${DAM_SMALL_INSTALL_DIR}/model --infer_data=${DAM_SMALL_INSTALL_DIR}/data.txt --max_turn_num=1 SERIAL)
# save model
inference_analysis_api_test(test_analyzer_save_model ${DAM_SMALL_INSTALL_DIR} analyzer_save_model_tester.cc SERIAL)
# chinese_ner
set(CHINESE_NER_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/chinese_ner")
download_model_and_data(${CHINESE_NER_INSTALL_DIR} "chinese_ner_model.tar.gz" "chinese_ner-data.txt.tar.gz")
......
......@@ -171,9 +171,7 @@ void SetConfig(AnalysisConfig *cfg) {
}
void SetOptimConfig(AnalysisConfig *cfg) {
std::string optimModelPath =
FLAGS_infer_model.substr(0, FLAGS_infer_model.find_last_of("/")) +
"/saved_optim_model";
std::string optimModelPath = FLAGS_infer_model + "/saved_optim_model";
cfg->SetModel(optimModelPath + "/model", optimModelPath + "/params");
cfg->SwitchIrOptim(true);
cfg->SwitchSpecifyInputNames();
......@@ -327,16 +325,10 @@ TEST(Analyzer_dam, compare_determine) {
// Save optim model
TEST(Analyzer_dam, save_optim_model) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::string optimModelPath =
FLAGS_infer_model.substr(0, FLAGS_infer_model.find_last_of("/")) +
"/saved_optim_model";
std::string optimModelPath = FLAGS_infer_model + "/saved_optim_model";
mkdir(optimModelPath.c_str(), 0777);
auto predictor = CreateTestPredictor(
reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
FLAGS_use_analysis);
(static_cast<AnalysisPredictor *>(predictor.get()))
->SaveOptimModel(optimModelPath);
SetConfig(&cfg);
SaveOptimModel(&cfg, optimModelPath);
}
void CompareOptimAndOrig(const PaddlePredictor::Config *orig_config,
......
......@@ -33,9 +33,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
}
void SetOptimConfig(AnalysisConfig *cfg) {
std::string optimModelPath =
FLAGS_infer_model.substr(0, FLAGS_infer_model.find_last_of("/")) +
"/saved_optim_model";
std::string optimModelPath = FLAGS_infer_model + "/saved_optim_model";
cfg->SetModel(optimModelPath + "/model", optimModelPath + "/params");
cfg->DisableGpu();
cfg->SwitchIrOptim();
......@@ -107,16 +105,10 @@ TEST(Analyzer_resnet50, compare_determine) {
// Save optim model
TEST(Analyzer_resnet50, save_optim_model) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::string optimModelPath =
FLAGS_infer_model.substr(0, FLAGS_infer_model.find_last_of("/")) +
"/saved_optim_model";
std::string optimModelPath = FLAGS_infer_model + "/saved_optim_model";
mkdir(optimModelPath.c_str(), 0777);
auto predictor = CreateTestPredictor(
reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
FLAGS_use_analysis);
(static_cast<AnalysisPredictor *>(predictor.get()))
->SaveOptimModel(optimModelPath);
SetConfig(&cfg);
SaveOptimModel(&cfg, optimModelPath);
}
void CompareOptimAndOrig(const PaddlePredictor::Config *orig_config,
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
void SetConfig(AnalysisConfig *cfg) {
cfg->SwitchSpecifyInputNames();
cfg->SwitchIrOptim(true);
cfg->SwitchIrDebug();
}
int GetNumOps(const AnalysisConfig &cfg) {
int num_ops;
auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
GetFuseStatis(static_cast<AnalysisPredictor *>(predictor.get()), &num_ops);
return num_ops;
}
TEST(Analyzer, save_model) {
AnalysisConfig cfg;
SetConfig(&cfg);
cfg.SetModel(FLAGS_infer_model + "/__model__", FLAGS_infer_model + "/param");
std::string optimModelPath = FLAGS_infer_model + "/saved_optim_model";
mkdir(optimModelPath.c_str(), 0777);
SaveOptimModel(&cfg, optimModelPath);
cfg.pass_builder()->ClearPasses();
int origin_num_ops = GetNumOps(cfg);
cfg.SetModel(optimModelPath + "/model", optimModelPath + "/params");
int fused_num_ops = GetNumOps(cfg);
CHECK_LE(fused_num_ops, origin_num_ops);
}
} // namespace inference
} // namespace paddle
......@@ -552,6 +552,13 @@ void CompareAnalysisAndZeroCopy(
CompareResult(analysis_outputs, zerocopy_outputs);
}
void SaveOptimModel(AnalysisConfig *cfg, const std::string &dstPath) {
auto predictor = CreateTestPredictor(
reinterpret_cast<const PaddlePredictor::Config *>(cfg),
FLAGS_use_analysis);
(static_cast<AnalysisPredictor *>(predictor.get()))->SaveOptimModel(dstPath);
}
template <typename T>
std::string LoDTensorSummary(const framework::LoDTensor &tensor) {
std::stringstream ss;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册