未验证 提交 5de01e8a 编写于 作者: Y Yuanle Liu 提交者: GitHub

[Paddle Inference] clean unused code (#48392)

上级 ca552933
...@@ -195,5 +195,6 @@ void NaiveExecutor::ResetTrtOps(int num) { ...@@ -195,5 +195,6 @@ void NaiveExecutor::ResetTrtOps(int num) {
} }
#endif #endif
} }
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -38,8 +38,7 @@ void Analyzer::RunAnalysis(Argument *argument) { ...@@ -38,8 +38,7 @@ void Analyzer::RunAnalysis(Argument *argument) {
if (!disable_logs) { if (!disable_logs) {
string::PrettyLogH1("--- Running analysis [%s]", pass); string::PrettyLogH1("--- Running analysis [%s]", pass);
} }
if (!argument->enable_analysis_optim() && pass == "ir_analysis_pass") if (!argument->enable_ir_optim() && pass == "ir_analysis_pass") continue;
continue;
auto *ptr = PassRegistry::Global().Retreive(pass); auto *ptr = PassRegistry::Global().Retreive(pass);
PADDLE_ENFORCE_NOT_NULL(ptr, PADDLE_ENFORCE_NOT_NULL(ptr,
......
...@@ -31,7 +31,7 @@ TEST(Analyzer, analysis_without_tensorrt) { ...@@ -31,7 +31,7 @@ TEST(Analyzer, analysis_without_tensorrt) {
Argument argument; Argument argument;
argument.SetDisableLogs(false); argument.SetDisableLogs(false);
argument.SetModelDir(FLAGS_inference_model_dir); argument.SetModelDir(FLAGS_inference_model_dir);
argument.SetEnableAnalysisOptim(false); argument.SetEnableIrOptim(false);
argument.SetUseGPU(false); argument.SetUseGPU(false);
argument.SetAnalysisPasses({"ir_graph_build_pass", argument.SetAnalysisPasses({"ir_graph_build_pass",
"ir_analysis_pass", "ir_analysis_pass",
...@@ -44,7 +44,7 @@ TEST(Analyzer, analysis_without_tensorrt) { ...@@ -44,7 +44,7 @@ TEST(Analyzer, analysis_without_tensorrt) {
TEST(Analyzer, analysis_with_tensorrt) { TEST(Analyzer, analysis_with_tensorrt) {
Argument argument; Argument argument;
argument.SetDisableLogs(false); argument.SetDisableLogs(false);
argument.SetEnableAnalysisOptim(false); argument.SetEnableIrOptim(false);
argument.SetTensorRtMaxBatchSize(3); argument.SetTensorRtMaxBatchSize(3);
argument.SetTensorRtWorkspaceSize(1 << 20); argument.SetTensorRtWorkspaceSize(1 << 20);
argument.SetModelDir(FLAGS_inference_model_dir); argument.SetModelDir(FLAGS_inference_model_dir);
......
...@@ -42,8 +42,6 @@ namespace paddle { ...@@ -42,8 +42,6 @@ namespace paddle {
namespace inference { namespace inference {
namespace analysis { namespace analysis {
using framework::ir::Graph;
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
using VarQuantScale = using VarQuantScale =
std::unordered_map<std::string, std::pair<bool, phi::DenseTensor>>; std::unordered_map<std::string, std::pair<bool, phi::DenseTensor>>;
...@@ -148,7 +146,7 @@ struct Argument { ...@@ -148,7 +146,7 @@ struct Argument {
DECL_ARGUMENT_FIELD(model_params_path, ModelParamsPath, std::string); DECL_ARGUMENT_FIELD(model_params_path, ModelParamsPath, std::string);
DECL_ARGUMENT_FIELD(model_from_memory, ModelFromMemory, bool); DECL_ARGUMENT_FIELD(model_from_memory, ModelFromMemory, bool);
DECL_ARGUMENT_FIELD(optim_cache_dir, OptimCacheDir, std::string); DECL_ARGUMENT_FIELD(optim_cache_dir, OptimCacheDir, std::string);
DECL_ARGUMENT_FIELD(enable_analysis_optim, EnableAnalysisOptim, bool); DECL_ARGUMENT_FIELD(enable_ir_optim, EnableIrOptim, bool);
// For JITLayer // For JITLayer
DECL_ARGUMENT_FIELD(skip_load_params, SkipLoadParams, bool); DECL_ARGUMENT_FIELD(skip_load_params, SkipLoadParams, bool);
......
...@@ -153,25 +153,6 @@ T &GetFromScope(const framework::Scope &scope, const std::string &name) { ...@@ -153,25 +153,6 @@ T &GetFromScope(const framework::Scope &scope, const std::string &name) {
return *var->GetMutable<T>(); return *var->GetMutable<T>();
} }
static framework::proto::ProgramDesc LoadProgramDesc(
const std::string &model_path) {
std::ifstream fin(model_path, std::ios::in | std::ios::binary);
PADDLE_ENFORCE_EQ(
fin.is_open(),
true,
platform::errors::NotFound(
"Cannot open file %s, please confirm whether the file exists",
model_path));
fin.seekg(0, std::ios::end);
std::string buffer(fin.tellg(), ' ');
fin.seekg(0, std::ios::beg);
fin.read(&buffer[0], buffer.size());
fin.close();
framework::proto::ProgramDesc program_desc;
program_desc.ParseFromString(buffer);
return program_desc;
}
static bool FileExists(const std::string &filepath) { static bool FileExists(const std::string &filepath) {
std::ifstream file(filepath); std::ifstream file(filepath);
bool exists = file.is_open(); bool exists = file.is_open();
......
...@@ -36,15 +36,6 @@ using string::PrettyLogEndl; ...@@ -36,15 +36,6 @@ using string::PrettyLogEndl;
using string::Style; using string::Style;
IRPassManager::IRPassManager(Argument *argument) { IRPassManager::IRPassManager(Argument *argument) {
ARGUMENT_CHECK_FIELD(argument, main_program);
graph_ = std::unique_ptr<Graph>(new Graph(argument->main_program()));
if (argument->Has("scope")) {
auto *scope_ptr = argument->scope_ptr();
PADDLE_ENFORCE_NOT_NULL(scope_ptr,
platform::errors::PreconditionNotMet(
"The scope ptr should not be nullptr."));
graph_->SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr);
}
disable_logs_ = argument->disable_logs(); disable_logs_ = argument->disable_logs();
ARGUMENT_CHECK_FIELD(argument, ir_analysis_passes); ARGUMENT_CHECK_FIELD(argument, ir_analysis_passes);
......
...@@ -30,17 +30,6 @@ cc_library( ...@@ -30,17 +30,6 @@ cc_library(
inference_op_replace_pass inference_op_replace_pass
SRCS inference_op_replace_pass.cc SRCS inference_op_replace_pass.cc
DEPS analysis_pass graph_to_program_pass) DEPS analysis_pass graph_to_program_pass)
if(WITH_TESTING)
cc_library(
ir_graph_clean_pass
SRCS ir_graph_clean_pass.cc
DEPS analysis_pass gtest)
else()
cc_library(
ir_graph_clean_pass
SRCS ir_graph_clean_pass.cc
DEPS analysis_pass)
endif()
cc_library( cc_library(
analysis_passes analysis_passes
...@@ -52,8 +41,7 @@ cc_library( ...@@ -52,8 +41,7 @@ cc_library(
memory_optim_pass memory_optim_pass
convert_to_mixed_precision convert_to_mixed_precision
inference_op_replace_pass inference_op_replace_pass
ir_graph_to_program_pass ir_graph_to_program_pass)
ir_graph_clean_pass)
set(analysis_deps set(analysis_deps
${analysis_deps} analysis_passes subgraph_detector ${analysis_deps} analysis_passes subgraph_detector
......
...@@ -32,8 +32,6 @@ ...@@ -32,8 +32,6 @@
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/framework/var_desc.h"
#include "paddle/fluid/inference/analysis/argument.h"
#include "paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h"
#include "paddle/fluid/inference/io.h" #include "paddle/fluid/inference/io.h"
#include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
...@@ -358,12 +356,6 @@ void ConvertToMixedPrecisionPass::LoadAndPrepare() { ...@@ -358,12 +356,6 @@ void ConvertToMixedPrecisionPass::LoadAndPrepare() {
} }
} }
// Remove all control var
IrInferCleanGraphPass pass;
Argument arg;
arg.SetMainGraphNotOwned(main_graph_.get());
pass.Run(&arg);
ProcessCircleCases(); ProcessCircleCases();
} }
......
...@@ -40,7 +40,7 @@ void InferenceOpReplacePass::RunImpl(Argument* argument) { ...@@ -40,7 +40,7 @@ void InferenceOpReplacePass::RunImpl(Argument* argument) {
} }
std::string InferenceOpReplacePass::repr() const { std::string InferenceOpReplacePass::repr() const {
return "inference-op-replace-pass"; return "inference_op_replace_pass";
} }
} // namespace analysis } // namespace analysis
......
...@@ -58,7 +58,7 @@ void IrAnalysisPass::CollectFusionStatis(Argument* argument) { ...@@ -58,7 +58,7 @@ void IrAnalysisPass::CollectFusionStatis(Argument* argument) {
framework::ir::kFuseStatisAttr)); framework::ir::kFuseStatisAttr));
} }
std::string IrAnalysisPass::repr() const { return "ir-analysis-pass"; } std::string IrAnalysisPass::repr() const { return "ir_analysis_pass"; }
} // namespace analysis } // namespace analysis
} // namespace inference } // namespace inference
......
...@@ -64,7 +64,8 @@ void IrGraphBuildPass::RunImpl(Argument *argument) { ...@@ -64,7 +64,8 @@ void IrGraphBuildPass::RunImpl(Argument *argument) {
"set.")); "set."));
} }
auto graph = std::unique_ptr<Graph>(new Graph(argument->main_program())); auto graph = std::unique_ptr<framework::ir::Graph>(
new framework::ir::Graph(argument->main_program()));
argument->SetMainGraph(graph.release()); argument->SetMainGraph(graph.release());
auto *scope_ptr = argument->scope_ptr(); auto *scope_ptr = argument->scope_ptr();
PADDLE_ENFORCE_NOT_NULL(scope_ptr, PADDLE_ENFORCE_NOT_NULL(scope_ptr,
...@@ -128,7 +129,7 @@ std::unique_ptr<framework::ProgramDesc> IrGraphBuildPass::LoadModel( ...@@ -128,7 +129,7 @@ std::unique_ptr<framework::ProgramDesc> IrGraphBuildPass::LoadModel(
} }
} }
std::string IrGraphBuildPass::repr() const { return "ir-graph-build-pass"; } std::string IrGraphBuildPass::repr() const { return "ir_graph_build_pass"; }
} // namespace analysis } // namespace analysis
} // namespace inference } // namespace inference
......
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/ir/node.h"
namespace paddle {
namespace inference {
namespace analysis {
void IrInferCleanGraphPass::RunImpl(Argument* argument) {
auto& graph = argument->main_graph();
auto is_valid_node = [](framework::ir::Node* x) {
return x && IsControlDepVar(*x) && x->IsVar() && !x->Var();
};
std::unordered_set<const framework::ir::Node*> invalid_nodes;
int valid_op = 0;
for (auto* node : graph.Nodes()) {
PADDLE_ENFORCE_NOT_NULL(node,
platform::errors::PreconditionNotMet(
"The node should not be nullptr."));
if (is_valid_node(node)) {
invalid_nodes.insert(node);
} else if (node->IsOp()) {
++valid_op;
}
}
GraphSafeRemoveNodes(&graph, invalid_nodes);
}
} // namespace analysis
} // namespace inference
} // namespace paddle
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <unordered_set>
#include "paddle/fluid/inference/analysis/analysis_pass.h"
namespace paddle {
namespace inference {
namespace analysis {
struct Argument;
class IrInferCleanGraphPass : public AnalysisPass {
public:
void RunImpl(Argument *argument) override;
std::string repr() const override { return "ir_graph_clean_pass"; }
};
} // namespace analysis
} // namespace inference
} // namespace paddle
...@@ -31,7 +31,7 @@ void IrGraphToProgramPass::RunImpl(Argument *argument) { ...@@ -31,7 +31,7 @@ void IrGraphToProgramPass::RunImpl(Argument *argument) {
new int(argument->memory_optim_sort_kind())); new int(argument->memory_optim_sort_kind()));
} }
std::unique_ptr<Graph> graph(argument->main_graph_ptr()); std::unique_ptr<framework::ir::Graph> graph(argument->main_graph_ptr());
// Direct using ProgramDesc desc(argument->main_program()) may cause // Direct using ProgramDesc desc(argument->main_program()) may cause
// incomplete copies of information. // incomplete copies of information.
......
...@@ -28,7 +28,7 @@ class IrGraphToProgramPass : public AnalysisPass { ...@@ -28,7 +28,7 @@ class IrGraphToProgramPass : public AnalysisPass {
public: public:
void RunImpl(Argument *argument) override; void RunImpl(Argument *argument) override;
std::string repr() const override { return "ir-graph-to-param-pass"; } std::string repr() const override { return "ir_graph_to_param_pass"; }
}; };
} // namespace analysis } // namespace analysis
......
...@@ -167,7 +167,7 @@ void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) { ...@@ -167,7 +167,7 @@ void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) {
} }
std::string IrParamsSyncAmongDevicesPass::repr() const { std::string IrParamsSyncAmongDevicesPass::repr() const {
return "ir-params-sync-among-devices-pass"; return "ir_params_sync_among_devices_pass";
} }
} // namespace analysis } // namespace analysis
......
...@@ -295,7 +295,7 @@ void UpdateOpDescsByReuse( ...@@ -295,7 +295,7 @@ void UpdateOpDescsByReuse(
} }
} }
std::string MemoryOptimizePass::repr() const { return "memory optimize pass"; } std::string MemoryOptimizePass::repr() const { return "memory_optimize_pass"; }
void MemoryOptimizePass::RunImpl(Argument* argument) { void MemoryOptimizePass::RunImpl(Argument* argument) {
// Memory optimization. // Memory optimization.
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "paddle/fluid/inference/analysis/passes/inference_op_replace_pass.h" #include "paddle/fluid/inference/analysis/passes/inference_op_replace_pass.h"
#include "paddle/fluid/inference/analysis/passes/ir_analysis_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_analysis_pass.h"
#include "paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h"
#include "paddle/fluid/inference/analysis/passes/ir_graph_clean_pass.h"
#include "paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h"
#include "paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.h"
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h" #include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
...@@ -34,8 +33,6 @@ PassRegistry::PassRegistry() { ...@@ -34,8 +33,6 @@ PassRegistry::PassRegistry() {
std::unique_ptr<AnalysisPass>(new IrAnalysisPass)); std::unique_ptr<AnalysisPass>(new IrAnalysisPass));
passes_.emplace("ir_graph_build_pass", passes_.emplace("ir_graph_build_pass",
std::unique_ptr<AnalysisPass>(new IrGraphBuildPass)); std::unique_ptr<AnalysisPass>(new IrGraphBuildPass));
passes_.emplace("ir_graph_clean_pass",
std::unique_ptr<AnalysisPass>(new IrInferCleanGraphPass));
passes_.emplace("memory_optimize_pass", passes_.emplace("memory_optimize_pass",
std::unique_ptr<AnalysisPass>(new MemoryOptimizePass)); std::unique_ptr<AnalysisPass>(new MemoryOptimizePass));
passes_.emplace( passes_.emplace(
......
...@@ -770,13 +770,7 @@ void AnalysisConfig::Update() { ...@@ -770,13 +770,7 @@ void AnalysisConfig::Update() {
((use_custom_device() ^ pass_builder_->use_custom_device()))) { ((use_custom_device() ^ pass_builder_->use_custom_device()))) {
if (use_gpu()) { if (use_gpu()) {
pass_builder_.reset(new GpuPassStrategy); pass_builder_.reset(new GpuPassStrategy);
if (use_tensorrt_) {
// Append after the Affine_channel_conv_fuse pass.
pass_builder()->InsertPass(3, "tensorrt_subgraph_pass");
}
} else if (use_ipu()) { } else if (use_ipu()) {
VLOG(1) << "IpuPassStrategy has been used for new.";
pass_builder_.reset(new IpuPassStrategy); pass_builder_.reset(new IpuPassStrategy);
} else if (use_xpu()) { } else if (use_xpu()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
...@@ -982,9 +976,6 @@ void AnalysisConfig::Update() { ...@@ -982,9 +976,6 @@ void AnalysisConfig::Update() {
"but did not have the option -DWITH_CUSTOM_DEVICE compiled.")); "but did not have the option -DWITH_CUSTOM_DEVICE compiled."));
#endif #endif
} }
if (ir_debug_) {
pass_builder()->TurnOnDebug();
}
} }
std::string AnalysisConfig::SerializeInfoCache() { std::string AnalysisConfig::SerializeInfoCache() {
......
...@@ -1074,7 +1074,7 @@ void AnalysisPredictor::PrepareArgument() { ...@@ -1074,7 +1074,7 @@ void AnalysisPredictor::PrepareArgument() {
argument_.SetUseGPU(config_.use_gpu()); argument_.SetUseGPU(config_.use_gpu());
argument_.SetUseFcPadding(config_.use_fc_padding()); argument_.SetUseFcPadding(config_.use_fc_padding());
argument_.SetGPUDeviceId(config_.gpu_device_id()); argument_.SetGPUDeviceId(config_.gpu_device_id());
argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_); argument_.SetEnableIrOptim(config_.enable_ir_optim_);
argument_.SetEnableMemoryOptim(config_.enable_memory_optim()); argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
argument_.SetModelFromMemory(config_.model_from_memory_); argument_.SetModelFromMemory(config_.model_from_memory_);
// Analyze inference_program // Analyze inference_program
...@@ -1223,48 +1223,35 @@ void AnalysisPredictor::PrepareArgument() { ...@@ -1223,48 +1223,35 @@ void AnalysisPredictor::PrepareArgument() {
} }
#endif #endif
auto passes = config_.pass_builder()->AllPasses(); auto *pass_builder = config_.pass_builder();
if (model_precision_ != phi::DataType::FLOAT32) { if (model_precision_ != phi::DataType::FLOAT32) {
LOG(INFO) << "Model is mixed precision type with " << model_precision_ LOG(INFO) << "Model is mixed precision type with " << model_precision_
<< ", we will use a new PassStrategy. Note that only the GPU " << ", we will use a new PassStrategy. Note that only the GPU "
"backend is supported for now."; "backend is supported for now.";
passes.clear(); pass_builder->ClearPasses();
const auto &deleted_passes = pass_builder->GetAllDeletedPasses();
if (config_.tensorrt_engine_enabled()) { if (config_.tensorrt_engine_enabled()) {
for (const auto &pass : kTrtLowerPrecisionPasses) { for (const auto &pass : kTrtLowerPrecisionPasses) {
passes.push_back(pass); if (deleted_passes.count(pass)) continue;
pass_builder->AppendPass(pass);
} }
} else if (config_.use_gpu()) { } else if (config_.use_gpu()) {
for (const auto &pass : kGpuLowerPrecisionPasses) { for (const auto &pass : kGpuLowerPrecisionPasses) {
passes.push_back(pass); if (deleted_passes.count(pass)) continue;
} pass_builder->AppendPass(pass);
}
const auto &deleted_passes = config_.pass_builder()->GetAllDeletedPasses();
for (const auto &it : deleted_passes) {
auto iterator = std::find(passes.begin(), passes.end(), it);
if (iterator != passes.end()) {
passes.erase(iterator);
}
}
if (config_.ir_debug_) {
auto it = std::begin(passes);
while (it != std::end(passes)) {
if (*it != "graph_viz_pass") {
it = passes.insert(it + 1, "graph_viz_pass");
} else {
++it;
}
} }
} }
} }
if (config_.ir_debug_) {
pass_builder->TurnOnDebug();
}
if (!config_.ir_optim()) { if (!config_.ir_optim()) {
passes.clear(); argument_.SetEnableIrOptim(false);
LOG(INFO) << "ir_optim is turned off, no IR pass will be executed"; LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
} }
argument_.SetDisableLogs(config_.glog_info_disabled()); argument_.SetDisableLogs(config_.glog_info_disabled());
argument_.SetIrAnalysisPasses(passes); argument_.SetIrAnalysisPasses(pass_builder->AllPasses());
argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses()); argument_.SetAnalysisPasses(pass_builder->AnalysisPasses());
argument_.SetScopeNotOwned(scope_.get()); argument_.SetScopeNotOwned(scope_.get());
// mixed precison. // mixed precison.
...@@ -2138,7 +2125,9 @@ std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone(void *stream) { ...@@ -2138,7 +2125,9 @@ std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone(void *stream) {
} }
x->predictor_stream_ = stream; x->predictor_stream_ = stream;
x->Init(scope_, inference_program_); x->Init(scope_, inference_program_);
#ifdef PADDLE_WITH_TENSORRT
x->executor_->ResetTrtOps(++AnalysisPredictor::clone_num_); x->executor_->ResetTrtOps(++AnalysisPredictor::clone_num_);
#endif
return std::unique_ptr<PaddlePredictor>(x); return std::unique_ptr<PaddlePredictor>(x);
} }
......
...@@ -606,10 +606,8 @@ void AnalysisPredictor::MkldnnQuantizer::PrepareArgument() const { ...@@ -606,10 +606,8 @@ void AnalysisPredictor::MkldnnQuantizer::PrepareArgument() const {
if (predictor_.config_.ir_debug_) builder->TurnOnDebug(); if (predictor_.config_.ir_debug_) builder->TurnOnDebug();
auto passes = builder->AllPasses(); auto passes = builder->AllPasses();
predictor_.argument_.SetIrAnalysisPasses(passes); predictor_.argument_.SetIrAnalysisPasses(passes);
predictor_.argument_.SetAnalysisPasses({"ir_graph_clean_pass", predictor_.argument_.SetAnalysisPasses(
"ir_analysis_pass", {"ir_analysis_pass", "memory_optimize_pass", "ir_graph_to_program_pass"});
"memory_optimize_pass",
"ir_graph_to_program_pass"});
predictor_.argument_.SetQuantVarScales(scales_); predictor_.argument_.SetQuantVarScales(scales_);
} }
......
...@@ -115,7 +115,6 @@ class PD_INFER_DECL PaddlePassBuilder { ...@@ -115,7 +115,6 @@ class PD_INFER_DECL PaddlePassBuilder {
/// \cond Protected /// \cond Protected
std::vector<std::string> analysis_passes_{ std::vector<std::string> analysis_passes_{
{"ir_graph_build_pass", {"ir_graph_build_pass",
"ir_graph_clean_pass",
"ir_analysis_pass", "ir_analysis_pass",
"ir_params_sync_among_devices_pass", "ir_params_sync_among_devices_pass",
"adjust_cudnn_workspace_size_pass", "adjust_cudnn_workspace_size_pass",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册