未验证 提交 7f0c1f0d 编写于 作者: W WangZhen 提交者: GitHub

Remove redundant code in pe engine (#46110) (#46145)

上级 1c7e95cc
......@@ -85,7 +85,6 @@ void PEEngine::CreateGraphAndPE() {
graph_ = std::make_shared<Graph>(program_desc, start_op_index, end_op_index);
inner_pe_ = std::make_shared<ParallelExecutor>(
place_, &scope_, execution_strategy, build_strategy, graph_.get());
inner_pe_->PrepareVariables(&scope_);
inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames());
}
......@@ -97,14 +96,8 @@ std::vector<Tensor> PEEngine::operator()(const std::vector<Tensor> &inputs) {
std::vector<DenseTensor> PEEngine::operator()(
const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
// update op_handle scope_map in pe->executor_->Graph
std::unordered_map<framework::Scope *, framework::Scope *> scope_map = {
{inner_pe_->GetLocalScopes().front(), &scope_}};
inner_pe_->ResetOpHandleScopeMapOfGraphs(scope_map);
// need to recreate tmp variables in new scope
inner_pe_->PrepareVariables(&scope_);
inner_pe_->RunWithoutFetch(info_->OutputArgNames());
std::vector<DenseTensor> outputs;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册