未验证 提交 0e3a1d35 编写于 作者: C Chen Weihang 提交者: GitHub

[Cherry-pick] [Complex] Simplify prepared op impl to improve performance (#30153) (#30215)

* simplify prepared op impl to improve performance

* fix kunlun compile error

* continue fix kunlun compile error

* only transform diff place when dtype diff

* fix failed unittests

* remove useless file

* polish impl by review comment
上级 3ce4d34d
...@@ -101,6 +101,11 @@ inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) { ...@@ -101,6 +101,11 @@ inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) {
return ret; return ret;
} }
inline bool NeedTransformDataType(const OpKernelType& l,
const OpKernelType& r) {
return (l.data_type_ != r.data_type_);
}
inline bool NeedTransform(const OpKernelType& l, const OpKernelType& r) { inline bool NeedTransform(const OpKernelType& l, const OpKernelType& r) {
return (!platform::places_are_same_class(l.place_, r.place_)) || return (!platform::places_are_same_class(l.place_, r.place_)) ||
(l.data_type_ != r.data_type_) || (l.data_type_ != r.data_type_) ||
......
...@@ -376,12 +376,14 @@ static void OpBaseRunImpl(const framework::OperatorBase& op, ...@@ -376,12 +376,14 @@ static void OpBaseRunImpl(const framework::OperatorBase& op,
* after the execution of op, but the original input is directly * after the execution of op, but the original input is directly
* overwritten in the previous dynamic graph implemention. * overwritten in the previous dynamic graph implemention.
*/ */
auto expected_kernel_key = auto prepared_op = PreparedOp::Prepare(ins, outs, *op_kernel, place, attrs);
GetExpectedKernelKey<VarType>(ins, outs, *op_kernel, place, attrs); auto tmp_ins_ptr =
auto prepared_op = PreparedOp::Prepare(*op_kernel, expected_kernel_key); PrepareData<VarType>(*op_kernel, ins, prepared_op.kernel_type());
auto tmp_ins = PrepareData<VarType>(*op_kernel, ins, expected_kernel_key); if (tmp_ins_ptr == nullptr) {
prepared_op.Run(ins, outs, attrs);
prepared_op.Run(tmp_ins, outs, attrs); } else {
prepared_op.Run(*tmp_ins_ptr, outs, attrs);
}
VLOG(4) << LayerDebugString(op.Type(), ins, outs); VLOG(4) << LayerDebugString(op.Type(), ins, outs);
} }
......
...@@ -76,16 +76,35 @@ PreparedOp::PreparedOp(const framework::OperatorBase& op, ...@@ -76,16 +76,35 @@ PreparedOp::PreparedOp(const framework::OperatorBase& op,
func_(func), func_(func),
dev_ctx_(dev_ctx) {} dev_ctx_(dev_ctx) {}
PreparedOp PreparedOp::Prepare( template <typename VarType>
const framework::OperatorWithKernel& op, PreparedOp PrepareImpl(const NameVarMap<VarType>& ins,
const framework::OpKernelType& expected_kernel_key) { const NameVarMap<VarType>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs) {
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(expected_kernel_key.place_); auto* dev_ctx = pool.Get(place);
framework::RuntimeContext ctx({}, {});
#ifdef PADDLE_WITH_MKLDNN
// MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
// GetKernelType functions, so we need to copy the attributes there.
// Const qualifier of Attrs had to be discarded to overwrite it.
if (FLAGS_use_mkldnn) {
auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
mutable_op_attrs = attrs;
}
#endif
// check if op[type] has kernel registered. // 1. get expected kernel key
auto expected_kernel_key =
op.GetExpectedKernelType(DygraphExecutionContext<VarType>(
op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs));
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
// 2. check if op[type] has kernel registered.
auto& all_op_kernels = op.AllOpKernels(); auto& all_op_kernels = op.AllOpKernels();
auto kernels_iter = all_op_kernels.find(op.Type()); auto kernels_iter = all_op_kernels.find(op.Type());
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
kernels_iter, all_op_kernels.end(), kernels_iter, all_op_kernels.end(),
platform::errors::NotFound( platform::errors::NotFound(
...@@ -93,18 +112,43 @@ PreparedOp PreparedOp::Prepare( ...@@ -93,18 +112,43 @@ PreparedOp PreparedOp::Prepare(
op.Type())); op.Type()));
auto& kernels = kernels_iter->second; auto& kernels = kernels_iter->second;
framework::RuntimeContext ctx({}, {});
auto kernel_iter = kernels.find(expected_kernel_key); auto kernel_iter = kernels.find(expected_kernel_key);
#ifdef PADDLE_WITH_XPU
if (kernel_iter == kernels.end() &&
is_xpu_place(expected_kernel_key.place_)) {
expected_kernel_key.place_ = platform::CPUPlace();
kernel_iter = kernels.find(expected_kernel_key);
}
#endif
// TODO(jiabin): Add operator.cc's line 1000 part back when we need that case // TODO(jiabin): Add operator.cc's line 1000 part back when we need that case
PADDLE_ENFORCE_NE(kernel_iter, kernels.end(), PADDLE_ENFORCE_NE(kernel_iter, kernels.end(),
platform::errors::NotFound( platform::errors::NotFound(
"Operator %s does not have kernel for %s.", op.Type(), "Operator %s does not have kernel for %s.", op.Type(),
KernelTypeToString(expected_kernel_key))); KernelTypeToString(expected_kernel_key)));
if (!(expected_kernel_key.place_ == place)) {
dev_ctx = pool.Get(expected_kernel_key.place_);
}
return PreparedOp(op, ctx, expected_kernel_key, kernel_iter->second, dev_ctx); return PreparedOp(op, ctx, expected_kernel_key, kernel_iter->second, dev_ctx);
} }
PreparedOp PreparedOp::Prepare(const NameVarMap<VarBase>& ins,
const NameVarMap<VarBase>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs) {
return PrepareImpl<VarBase>(ins, outs, op, place, attrs);
}
PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
const NameVarMap<VariableWrapper>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs) {
return PrepareImpl<VariableWrapper>(ins, outs, op, place, attrs);
}
template <typename VarType> template <typename VarType>
static void PreparedOpRunImpl( static void PreparedOpRunImpl(
const framework::OperatorBase& op, const framework::RuntimeContext& ctx, const framework::OperatorBase& op, const framework::RuntimeContext& ctx,
......
...@@ -64,66 +64,16 @@ void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) { ...@@ -64,66 +64,16 @@ void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) {
} }
} }
#ifdef PADDLE_WITH_XPU
static void ReplaceXPUKernelIfNotExists(
const framework::OperatorWithKernel& op,
framework::OpKernelType* expected_kernel_key) {
auto& all_op_kernels = op.AllOpKernels();
auto kernels_iter = all_op_kernels.find(op.Type());
PADDLE_ENFORCE_NE(
kernels_iter, all_op_kernels.end(),
platform::errors::NotFound(
"There are no kernels which are registered in the %s operator.",
op.Type()));
auto& kernels = kernels_iter->second;
auto kernel_iter = kernels.find(*expected_kernel_key);
if (kernel_iter == kernels.end() &&
is_xpu_place(expected_kernel_key->place_)) {
expected_kernel_key->place_ = platform::CPUPlace();
}
}
#endif
template <typename VarType> template <typename VarType>
framework::OpKernelType GetExpectedKernelKey( std::shared_ptr<NameVarMap<VarType>> PrepareData(
const NameVarMap<VarType>& ins, const NameVarMap<VarType>& outs,
const framework::OperatorWithKernel& op, const platform::Place& place,
const framework::AttributeMap& attrs) {
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(place);
framework::RuntimeContext ctx({}, {});
#ifdef PADDLE_WITH_MKLDNN
// MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
// GetKernelType functions, so we need to copy the attributes there.
// Const qualifier of Attrs had to be discarded to overwrite it.
if (FLAGS_use_mkldnn) {
auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
mutable_op_attrs = attrs;
}
#endif
auto expected_kernel_key =
op.GetExpectedKernelType(DygraphExecutionContext<VarType>(
op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs));
#ifdef PADDLE_WITH_XPU
ReplaceXPUKernelIfNotExists(op, &expected_kernel_key);
#endif
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
return expected_kernel_key;
}
template <typename VarType>
NameVarMap<VarType> PrepareData(
const framework::OperatorWithKernel& op, const NameVarMap<VarType>& ins, const framework::OperatorWithKernel& op, const NameVarMap<VarType>& ins,
const framework::OpKernelType& expected_kernel_key) { const framework::OpKernelType& expected_kernel_key) {
NameVarMap<VarType> tmp_ins(ins); std::shared_ptr<NameVarMap<VarType>> tmp_ins_ptr = nullptr;
for (auto& name_pair : tmp_ins) { for (const auto& name_pair : ins) {
for (auto& var_base : name_pair.second) { for (size_t i = 0; i < name_pair.second.size(); ++i) {
const auto* tensor = GetTensorFromVar(var_base->Var()); auto& var_base = name_pair.second[i];
SetForwardDataTypeOfGradVar(var_base); SetForwardDataTypeOfGradVar(var_base);
const auto* tensor = GetTensorFromVar(var_base->Var());
if (tensor && tensor->IsInitialized()) { if (tensor && tensor->IsInitialized()) {
auto kernel_type_for_var = op.GetKernelTypeForVar( auto kernel_type_for_var = op.GetKernelTypeForVar(
name_pair.first, *tensor, expected_kernel_key); name_pair.first, *tensor, expected_kernel_key);
...@@ -133,17 +83,28 @@ NameVarMap<VarType> PrepareData( ...@@ -133,17 +83,28 @@ NameVarMap<VarType> PrepareData(
VLOG(3) << "Transform Variable " << var_base->Name() << " from " VLOG(3) << "Transform Variable " << var_base->Name() << " from "
<< kernel_type_for_var << " to " << expected_kernel_key; << kernel_type_for_var << " to " << expected_kernel_key;
framework::Tensor out; framework::Tensor out;
auto tmp_var = std::make_shared<VarType>(var_base->Name());
tmp_var->SetType(var_base->Type());
TransformData(expected_kernel_key, kernel_type_for_var, *tensor, TransformData(expected_kernel_key, kernel_type_for_var, *tensor,
&out); &out);
SetTensorToVariable(var_base->Var(), out, tmp_var->MutableVar()); if (NeedTransformDataType(kernel_type_for_var, expected_kernel_key)) {
var_base = tmp_var; // To avoid NameVarMap copy construction overhead in general
// scenarios, if inplace transformed, return original input directly
if (tmp_ins_ptr == nullptr) {
tmp_ins_ptr = std::make_shared<NameVarMap<VarType>>(ins);
}
auto tmp_var = std::make_shared<VarType>(var_base->Name());
tmp_var->SetType(var_base->Type());
SetTensorToVariable(var_base->Var(), out, tmp_var->MutableVar());
(*tmp_ins_ptr)[name_pair.first][i] = tmp_var;
} else {
// if dtype is same, transform inplace will not change the original
// value, transform inplace to avoid multiple copy
SetTensorToVariable(var_base->Var(), out, var_base->MutableVar());
}
} }
} }
} }
} }
return tmp_ins; return tmp_ins_ptr;
} }
class PreparedOp { class PreparedOp {
...@@ -154,8 +115,17 @@ class PreparedOp { ...@@ -154,8 +115,17 @@ class PreparedOp {
const framework::OperatorWithKernel::OpKernelFunc& func, const framework::OperatorWithKernel::OpKernelFunc& func,
platform::DeviceContext* dev_ctx); platform::DeviceContext* dev_ctx);
static PreparedOp Prepare(const framework::OperatorWithKernel& op, static PreparedOp Prepare(const NameVarMap<VarBase>& ins,
const framework::OpKernelType& expected_kernel_key); const NameVarMap<VarBase>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs);
static PreparedOp Prepare(const NameVarMap<VariableWrapper>& ins,
const NameVarMap<VariableWrapper>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs);
void Run(const NameVarMap<VarBase>& in, const NameVarMap<VarBase>& out, void Run(const NameVarMap<VarBase>& in, const NameVarMap<VarBase>& out,
const framework::AttributeMap& attrs); const framework::AttributeMap& attrs);
...@@ -164,6 +134,8 @@ class PreparedOp { ...@@ -164,6 +134,8 @@ class PreparedOp {
const NameVarMap<VariableWrapper>& outs, const NameVarMap<VariableWrapper>& outs,
const framework::AttributeMap& attrs); const framework::AttributeMap& attrs);
const framework::OpKernelType& kernel_type() const { return kernel_type_; }
private: private:
const framework::OperatorBase& op_; const framework::OperatorBase& op_;
const framework::RuntimeContext& ctx_; const framework::RuntimeContext& ctx_;
......
...@@ -90,12 +90,10 @@ TEST(test_prepare_op, test_prepare_op) { ...@@ -90,12 +90,10 @@ TEST(test_prepare_op, test_prepare_op) {
CreateVarNameMap(info, "split", outs, false); CreateVarNameMap(info, "split", outs, false);
auto op = framework::OpRegistry::CreateOp("split", var_in_map, var_out_map, auto op = framework::OpRegistry::CreateOp("split", var_in_map, var_out_map,
split_attr_map); split_attr_map);
auto expected_kernel_key = GetExpectedKernelKey<imperative::VarBase>(
ins, outs, dynamic_cast<framework::OperatorWithKernel&>(*op), place,
split_attr_map);
ASSERT_NO_FATAL_FAILURE(PreparedOp preparedOp = PreparedOp::Prepare( ASSERT_NO_FATAL_FAILURE(PreparedOp preparedOp = PreparedOp::Prepare(
ins, outs,
dynamic_cast<framework::OperatorWithKernel&>(*op), dynamic_cast<framework::OperatorWithKernel&>(*op),
expected_kernel_key)); place, split_attr_map));
} }
const framework::Tensor* GetTensorFromVar(const framework::Variable& var); const framework::Tensor* GetTensorFromVar(const framework::Variable& var);
...@@ -107,6 +105,7 @@ TEST(test_prepare_op, test_get_tensor_from_var) { ...@@ -107,6 +105,7 @@ TEST(test_prepare_op, test_get_tensor_from_var) {
auto* ts = GetTensorFromVar(*vout_error->MutableVar()); auto* ts = GetTensorFromVar(*vout_error->MutableVar());
ASSERT_TRUE(ts != nullptr); ASSERT_TRUE(ts != nullptr);
} }
#if defined(PADDLE_WITH_CUDA) #if defined(PADDLE_WITH_CUDA)
TEST(test_prepare_op, test_prepare_data) { TEST(test_prepare_op, test_prepare_data) {
std::shared_ptr<imperative::VarBase> vin( std::shared_ptr<imperative::VarBase> vin(
...@@ -143,13 +142,13 @@ TEST(test_prepare_op, test_prepare_data) { ...@@ -143,13 +142,13 @@ TEST(test_prepare_op, test_prepare_data) {
attr_map); attr_map);
// test if it can be transformed to GPU place // test if it can be transformed to GPU place
auto expected_kernel_key = GetExpectedKernelKey<imperative::VarBase>( auto prepared_op = PreparedOp::Prepare(
ins, outs, dynamic_cast<framework::OperatorWithKernel&>(*op), gpu_place, ins, outs, dynamic_cast<framework::OperatorWithKernel&>(*op), gpu_place,
attr_map); attr_map);
imperative::NameVarBaseMap tmp_ins = PrepareData<imperative::VarBase>( PrepareData<imperative::VarBase>(
dynamic_cast<framework::OperatorWithKernel&>(*op), ins, dynamic_cast<framework::OperatorWithKernel&>(*op), ins,
expected_kernel_key); prepared_op.kernel_type());
for (const auto& name_pair : tmp_ins) { for (const auto& name_pair : ins) {
for (const auto& vb : name_pair.second) { for (const auto& vb : name_pair.second) {
ASSERT_TRUE(platform::is_same_place( ASSERT_TRUE(platform::is_same_place(
vb->Var().Get<framework::LoDTensor>().place(), gpu_place)); vb->Var().Get<framework::LoDTensor>().place(), gpu_place));
...@@ -192,13 +191,13 @@ void TestPrepareDataSamePlace(framework::AttributeMap attr_map) { ...@@ -192,13 +191,13 @@ void TestPrepareDataSamePlace(framework::AttributeMap attr_map) {
attr_map); attr_map);
// test if it never transferred on GPU place // test if it never transferred on GPU place
auto expected_kernel_key = GetExpectedKernelKey<imperative::VarBase>( auto prepared_op = PreparedOp::Prepare(
ins, outs, dynamic_cast<framework::OperatorWithKernel&>(*op), cpu_place, ins, outs, dynamic_cast<framework::OperatorWithKernel&>(*op), cpu_place,
attr_map); attr_map);
imperative::NameVarBaseMap tmp_ins = PrepareData<imperative::VarBase>( PrepareData<imperative::VarBase>(
dynamic_cast<framework::OperatorWithKernel&>(*op), ins, dynamic_cast<framework::OperatorWithKernel&>(*op), ins,
expected_kernel_key); prepared_op.kernel_type());
for (const auto& name_pair : tmp_ins) { for (const auto& name_pair : ins) {
for (const auto& vb : name_pair.second) { for (const auto& vb : name_pair.second) {
ASSERT_TRUE(platform::is_same_place( ASSERT_TRUE(platform::is_same_place(
vb->Var().Get<framework::LoDTensor>().place(), cpu_place)); vb->Var().Get<framework::LoDTensor>().place(), cpu_place));
......
...@@ -519,7 +519,7 @@ class TestStridedSliceAPI(unittest.TestCase): ...@@ -519,7 +519,7 @@ class TestStridedSliceAPI(unittest.TestCase):
np.random.randn(2, 10), place=paddle.CUDAPinnedPlace()) np.random.randn(2, 10), place=paddle.CUDAPinnedPlace())
self.assertTrue(x.place.is_cuda_pinned_place()) self.assertTrue(x.place.is_cuda_pinned_place())
y = x[:, ::2] y = x[:, ::2]
self.assertTrue(x.place.is_cuda_pinned_place()) self.assertFalse(x.place.is_cuda_pinned_place())
self.assertFalse(y.place.is_cuda_pinned_place()) self.assertFalse(y.place.is_cuda_pinned_place())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册