未验证 提交 67c700b4 编写于 作者: A Aurelius84 提交者: GitHub

[Dy2Stat] Add cache for Executor and Context in run_program_op (#28421)

上级 d6753e1e
...@@ -268,6 +268,7 @@ cc_library(parallel_executor SRCS parallel_executor.cc DEPS ...@@ -268,6 +268,7 @@ cc_library(parallel_executor SRCS parallel_executor.cc DEPS
graph build_strategy collective_helper graph build_strategy collective_helper
fast_threaded_ssa_graph_executor variable_helper) fast_threaded_ssa_graph_executor variable_helper)
cc_library(executor_cache SRCS executor_cache.cc DEPS executor)
cc_test(dist_multi_trainer_test SRCS dist_multi_trainer_test.cc DEPS cc_test(dist_multi_trainer_test SRCS dist_multi_trainer_test.cc DEPS
conditional_block_op executor) conditional_block_op executor)
cc_library(prune SRCS prune.cc DEPS framework_proto boost) cc_library(prune SRCS prune.cc DEPS framework_proto boost)
......
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/executor_cache.h"
#include <string>
#include <unordered_set>
#include <vector>
namespace paddle {
namespace framework {
namespace details {
static void AppendSkipDeletionVars(const std::vector<std::string> &append_vars,
std::vector<std::string> *all_vars) {
for (auto &var : append_vars) {
all_vars->emplace_back(var);
}
}
static void AppendSafeEagerDeletionSkipVars(
const framework::ProgramDesc &program,
std::vector<std::string> *skip_vars) {
const framework::BlockDesc &block = program.Block(0);
const std::vector<framework::OpDesc *> &all_ops = block.AllOps();
std::unordered_set<std::string> grad_op_output;
std::unordered_set<std::string> grad_op_input;
for (const framework::OpDesc *op : all_ops) {
int op_role = BOOST_GET_CONST(
int, op->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
if ((op_role & static_cast<int>(framework::OpRole::kBackward)) == 0) {
continue;
}
for (const std::string &in_arg_name : op->InputArgumentNames()) {
grad_op_input.emplace(in_arg_name);
}
for (const std::string &out_arg_name : op->OutputArgumentNames()) {
grad_op_output.emplace(out_arg_name);
}
}
// For the grad op input variables, if it is not output of grad_op, it may
// be output of forward op and we should set the variables as skip_var to
// prevent it being deleted when grad op is called multiple times.
for (const std::string &var_name : grad_op_input) {
if (grad_op_output.find(var_name) == grad_op_output.end()) {
skip_vars->emplace_back(var_name);
}
}
}
} // namespace details
// C++11 removes the need for manual locking. Concurrent execution shall wait if
// a static local variable is already being initialized.
// https://stackoverflow.com/questions/11711920/how-to-implement-multithread-safe-singleton-in-c11-without-using-mutex
ExecutorInfoCache &ExecutorInfoCache::Instance() {
static ExecutorInfoCache g_exe_cache_info_map;
return g_exe_cache_info_map;
}
std::shared_ptr<framework::ExecutorPrepareContext> GetExecutorInfoFromCache(
const framework::Executor &exe, const framework::ExecutionContext &ctx,
const std::vector<std::vector<std::string>> &ctx_output_names,
bool is_grad) {
auto *program = ctx.Attr<BlockDesc *>("global_block")->Program();
auto &cached_exe_info = framework::ExecutorInfoCache::Instance();
auto cache_key = framework::ExecutorInfoCache::KeyType(program, is_grad);
if (!cached_exe_info.Has(cache_key)) {
VLOG(1) << "create exe_info for program: " << program
<< " is_grad: " << is_grad;
// skip delete vars
std::vector<std::string> skip_vars;
for (auto &output_names : ctx_output_names) {
details::AppendSkipDeletionVars(output_names, &skip_vars);
}
if (is_grad) {
details::AppendSafeEagerDeletionSkipVars(*program, &skip_vars);
}
VLOG(2) << "Prepare to skip " << skip_vars.size()
<< " var(s): " << string::join_strings(skip_vars, ' ');
std::shared_ptr<framework::ExecutorPrepareContext> exe_ctx =
std::move(exe.Prepare(*program, /*block_id=*/0, skip_vars));
cached_exe_info.Insert(cache_key, exe_ctx);
return exe_ctx;
} else {
VLOG(1) << "get exe_info from cache by program: " << program
<< " is_grad: " << is_grad;
return cached_exe_info.Get(cache_key);
}
}
} // namespace framework
} // namespace paddle
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/platform/macros.h"
namespace paddle {
namespace framework {
class ExecutorInfoCache {
public:
/*
* The ExecutorPrepareContext is different while running forward program and
* backward program. We add bool value into cached key to distinguish this.
*/
using KeyType = std::pair<const framework::ProgramDesc*, /*is_grad*/ bool>;
struct HashPair {
template <class T1, class T2>
size_t operator()(const std::pair<T1, T2>& p) const noexcept {
size_t seed = 10;
hash_combine(&seed, p.first);
hash_combine(&seed, p.second);
return seed;
}
template <typename T>
void hash_combine(size_t* seed, const T& val) const {
std::hash<T> hasher;
(*seed) ^= hasher(val) + 0x9e3779b9 + ((*seed) << 6) + ((*seed >> 2));
}
};
static ExecutorInfoCache& Instance();
std::shared_ptr<framework::ExecutorPrepareContext> Get(
const KeyType& key) const {
PADDLE_ENFORCE_EQ(
Has(key), true,
platform::errors::NotFound(
"(programDesc: %s, is_grad: %s) doesn't exist in ExecutorInfoCache",
key.first, key.second));
return info_map_.at(key);
}
bool Has(const KeyType& key) const {
return info_map_.find(key) != info_map_.end();
}
void Insert(const KeyType& key,
std::shared_ptr<framework::ExecutorPrepareContext> exe_ctx) {
PADDLE_ENFORCE_NE(
Has(key), true,
platform::errors::NotFound(
"(programDesc: %s, is_grad: %s) has existed in ExecutorInfoCache",
key.first, key.second));
info_map_.insert(std::make_pair(key, exe_ctx));
}
private:
ExecutorInfoCache() = default;
std::unordered_map<
KeyType, std::shared_ptr<framework::ExecutorPrepareContext>, HashPair>
info_map_;
DISABLE_COPY_AND_ASSIGN(ExecutorInfoCache);
};
std::shared_ptr<framework::ExecutorPrepareContext> GetExecutorInfoFromCache(
const framework::Executor& exe, const framework::ExecutionContext& ctx,
const std::vector<std::vector<std::string>>& ctx_output_names,
bool is_grad);
} // namespace framework
} // namespace paddle
...@@ -64,9 +64,11 @@ if(WITH_COVERAGE OR WIN32 OR WITH_NV_JETSON) ...@@ -64,9 +64,11 @@ if(WITH_COVERAGE OR WIN32 OR WITH_NV_JETSON)
SET(OP_MKL_DEPS ${OP_MKL_DEPS} pyramid_hash_op) SET(OP_MKL_DEPS ${OP_MKL_DEPS} pyramid_hash_op)
endif() endif()
register_operators(EXCLUDES py_func_op warpctc_op dgc_op lstm_op register_operators(EXCLUDES py_func_op warpctc_op dgc_op lstm_op run_program_op
sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS}) sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS})
op_library(run_program_op SRCS run_program_op.cc run_program_op.cu.cc DEPS executor_cache ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS})
if (WITH_GPU) if (WITH_GPU)
# warpctc_op needs cudnn 7 above # warpctc_op needs cudnn 7 above
if (${CUDNN_MAJOR_VERSION} VERSION_LESS 7) if (${CUDNN_MAJOR_VERSION} VERSION_LESS 7)
......
...@@ -16,12 +16,15 @@ limitations under the License. */ ...@@ -16,12 +16,15 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <iterator> #include <iterator>
#include <memory>
#include <string> #include <string>
#include <unordered_map>
#include <unordered_set> #include <unordered_set>
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/executor_cache.h"
#include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
...@@ -156,46 +159,6 @@ static void ShareVarsFromScope(const std::vector<Variable *> &vars, ...@@ -156,46 +159,6 @@ static void ShareVarsFromScope(const std::vector<Variable *> &vars,
} }
} }
static void AppendSkipDeletionVars(const std::vector<std::string> &append_vars,
std::vector<std::string> *all_vars) {
for (auto &var : append_vars) {
all_vars->emplace_back(var);
}
}
static void AppendSafeEagerDeletionSkipVars(
const framework::ProgramDesc &program,
std::vector<std::string> *skip_vars) {
const framework::BlockDesc &block = program.Block(0);
const std::vector<framework::OpDesc *> &all_ops = block.AllOps();
std::unordered_set<std::string> grad_op_output;
std::unordered_set<std::string> grad_op_input;
for (const framework::OpDesc *op : all_ops) {
int op_role = BOOST_GET_CONST(
int, op->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
if ((op_role & static_cast<int>(framework::OpRole::kBackward)) == 0) {
continue;
}
for (const std::string &in_arg_name : op->InputArgumentNames()) {
grad_op_input.emplace(in_arg_name);
}
for (const std::string &out_arg_name : op->OutputArgumentNames()) {
grad_op_output.emplace(out_arg_name);
}
}
// For the grad op input variables, if it is not output of grad_op, it may
// be output of forward op and we should set the variables as skip_var to
// prevent it being deleted when grad op is called multiple times.
for (const std::string &var_name : grad_op_input) {
if (grad_op_output.find(var_name) == grad_op_output.end()) {
skip_vars->emplace_back(var_name);
}
}
}
} // namespace details } // namespace details
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
...@@ -217,8 +180,6 @@ class RunProgramOpKernel : public framework::OpKernel<T> { ...@@ -217,8 +180,6 @@ class RunProgramOpKernel : public framework::OpKernel<T> {
param_names = ctx.InputNames("Params"); param_names = ctx.InputNames("Params");
} }
auto *block = ctx.Attr<BlockDesc *>("global_block");
auto *program = block->Program();
auto start_op_index = ctx.Attr<int64_t>("start_op_index"); auto start_op_index = ctx.Attr<int64_t>("start_op_index");
auto end_op_index = ctx.Attr<int64_t>("end_op_index"); auto end_op_index = ctx.Attr<int64_t>("end_op_index");
auto is_test = ctx.Attr<bool>("is_test"); auto is_test = ctx.Attr<bool>("is_test");
...@@ -233,14 +194,8 @@ class RunProgramOpKernel : public framework::OpKernel<T> { ...@@ -233,14 +194,8 @@ class RunProgramOpKernel : public framework::OpKernel<T> {
// Step 2. prepare executor and init persistable variables // Step 2. prepare executor and init persistable variables
framework::Executor exe(ctx.GetPlace()); framework::Executor exe(ctx.GetPlace());
auto exe_ctx = framework::GetExecutorInfoFromCache(
// skip delete vars exe, ctx, {output_var_names}, /*is_grad=*/false);
std::vector<std::string> skip_vars;
details::AppendSkipDeletionVars(output_var_names, &skip_vars);
VLOG(2) << "Prepare to skip " << skip_vars.size()
<< " var(s): " << string::join_strings(skip_vars, ' ');
auto exe_ctx = exe.Prepare(*program, 0, skip_vars);
// NOTE(Aurelius84): While training some models, forward can be called many // NOTE(Aurelius84): While training some models, forward can be called many
// times and then apply backpropagation all at once, such as Reinforcement // times and then apply backpropagation all at once, such as Reinforcement
...@@ -259,7 +214,8 @@ class RunProgramOpKernel : public framework::OpKernel<T> { ...@@ -259,7 +214,8 @@ class RunProgramOpKernel : public framework::OpKernel<T> {
// Step 3. run ops // Step 3. run ops
exe.RunPartialPreparedContext(exe_ctx.get(), &scope, start_op_index, exe.RunPartialPreparedContext(exe_ctx.get(), &scope, start_op_index,
end_op_index, /*create_local_scope=*/false, end_op_index, /*create_local_scope=*/false,
/*create_vars=*/true, /*keep_kids=*/!is_test); /*create_vars=*/true,
/*keep_kids=*/!is_test);
// Step 4. Get Output // Step 4. Get Output
details::ShareVarsFromScope(output_vars, output_var_names, &scope); details::ShareVarsFromScope(output_vars, output_var_names, &scope);
...@@ -305,8 +261,6 @@ class RunProgramGradOpKernel : public framework::OpKernel<T> { ...@@ -305,8 +261,6 @@ class RunProgramGradOpKernel : public framework::OpKernel<T> {
} }
auto *block = ctx.Attr<BlockDesc *>("global_block"); auto *block = ctx.Attr<BlockDesc *>("global_block");
auto *program = block->Program();
auto orig_end_op_index = ctx.Attr<int64_t>("end_op_index"); auto orig_end_op_index = ctx.Attr<int64_t>("end_op_index");
// NOTE: skip `shape` and `fill_constant` op created by // NOTE: skip `shape` and `fill_constant` op created by
// fluid.backward.gradients, one forward output will generate one `shape` // fluid.backward.gradients, one forward output will generate one `shape`
...@@ -332,20 +286,12 @@ class RunProgramGradOpKernel : public framework::OpKernel<T> { ...@@ -332,20 +286,12 @@ class RunProgramGradOpKernel : public framework::OpKernel<T> {
// Step 2. prepare executor and scope // Step 2. prepare executor and scope
framework::Executor exe(ctx.GetPlace()); framework::Executor exe(ctx.GetPlace());
auto exe_ctx = framework::GetExecutorInfoFromCache(
// skip delete vars exe, ctx, {input_grad_var_names, param_grad_names},
std::vector<std::string> skip_vars; /*is_grad=*/true);
details::AppendSkipDeletionVars(input_grad_var_names, &skip_vars);
details::AppendSkipDeletionVars(param_grad_names, &skip_vars);
details::AppendSafeEagerDeletionSkipVars(*program, &skip_vars);
VLOG(2) << "Prepare to skip " << skip_vars.size()
<< " var(s): " << string::join_strings(skip_vars, ' ');
auto exe_ctx = exe.Prepare(*program, 0, skip_vars);
details::ShareVarsIntoScope(output_grad_vars, output_grad_var_names, details::ShareVarsIntoScope(output_grad_vars, output_grad_var_names,
&scope); &scope);
// Debug info: scope info when run end // Debug info: scope info when run end
VLOG(3) << framework::GenScopeTreeDebugInfo(out_scope_vec->front()); VLOG(3) << framework::GenScopeTreeDebugInfo(out_scope_vec->front());
......
...@@ -167,6 +167,9 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -167,6 +167,9 @@ class RunProgramOpTest(unittest.TestCase):
return outputs return outputs
def calc_dygraph_output(self, place): def calc_dygraph_output(self, place):
self.program_desc, self.fwd_op_num = self.get_program_desc()
self.attrs = self.prepare_attrs()
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
inputs = self.prepare_dygraph_input(place) inputs = self.prepare_dygraph_input(place)
outputs = self.prepare_dygraph_output() outputs = self.prepare_dygraph_output()
...@@ -179,6 +182,9 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -179,6 +182,9 @@ class RunProgramOpTest(unittest.TestCase):
return outputs['Out'] return outputs['Out']
def calc_dygraph_grad(self, place): def calc_dygraph_grad(self, place):
self.program_desc, self.fwd_op_num = self.get_program_desc()
self.attrs = self.prepare_attrs()
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
# Step 1. run forward # Step 1. run forward
inputs, input_param_list = self.prepare_dygraph_input(place, True) inputs, input_param_list = self.prepare_dygraph_input(place, True)
...@@ -241,10 +247,6 @@ class TestRunProgramOpWithFC(RunProgramOpTest): ...@@ -241,10 +247,6 @@ class TestRunProgramOpWithFC(RunProgramOpTest):
} }
} }
self.program_desc, self.fwd_op_num = self.get_program_desc()
self.attrs = self.prepare_attrs()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -298,10 +300,6 @@ class TestRunProgramOpWithEmbedding(RunProgramOpTest): ...@@ -298,10 +300,6 @@ class TestRunProgramOpWithEmbedding(RunProgramOpTest):
} }
} }
self.program_desc, self.fwd_op_num = self.get_program_desc()
self.attrs = self.prepare_attrs()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册