From 0e3a1d35b10e6cc48512a4848fe5f6da6fc7dfec Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Fri, 8 Jan 2021 00:32:57 -0600 Subject: [PATCH] [Cherry-pick] [Complex] Simplify prepared op impl to improve performance (#30153) (#30215) * simplify prepared op impl to improve performance * fix kunlun compile error * continue fix kunlun compile error * only transform diff place when dtype diff * fix failed unittests * remove useless file * polish impl by review comment --- paddle/fluid/framework/op_kernel_type.h | 5 + paddle/fluid/imperative/layer.cc | 14 +-- paddle/fluid/imperative/prepared_operator.cc | 60 ++++++++++-- paddle/fluid/imperative/prepared_operator.h | 98 +++++++------------ .../fluid/imperative/tests/test_prepare_op.cc | 23 +++-- .../tests/unittests/test_strided_slice_op.py | 2 +- 6 files changed, 112 insertions(+), 90 deletions(-) diff --git a/paddle/fluid/framework/op_kernel_type.h b/paddle/fluid/framework/op_kernel_type.h index f4e60bb9b7..e903b079c2 100644 --- a/paddle/fluid/framework/op_kernel_type.h +++ b/paddle/fluid/framework/op_kernel_type.h @@ -101,6 +101,11 @@ inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) { return ret; } +inline bool NeedTransformDataType(const OpKernelType& l, + const OpKernelType& r) { + return (l.data_type_ != r.data_type_); +} + inline bool NeedTransform(const OpKernelType& l, const OpKernelType& r) { return (!platform::places_are_same_class(l.place_, r.place_)) || (l.data_type_ != r.data_type_) || diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index e7c5726dac..57cde16a88 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -376,12 +376,14 @@ static void OpBaseRunImpl(const framework::OperatorBase& op, * after the execution of op, but the original input is directly * overwritten in the previous dynamic graph implemention. */ - auto expected_kernel_key = - GetExpectedKernelKey(ins, outs, *op_kernel, place, attrs); - auto prepared_op = PreparedOp::Prepare(*op_kernel, expected_kernel_key); - auto tmp_ins = PrepareData(*op_kernel, ins, expected_kernel_key); - - prepared_op.Run(tmp_ins, outs, attrs); + auto prepared_op = PreparedOp::Prepare(ins, outs, *op_kernel, place, attrs); + auto tmp_ins_ptr = + PrepareData(*op_kernel, ins, prepared_op.kernel_type()); + if (tmp_ins_ptr == nullptr) { + prepared_op.Run(ins, outs, attrs); + } else { + prepared_op.Run(*tmp_ins_ptr, outs, attrs); + } VLOG(4) << LayerDebugString(op.Type(), ins, outs); } diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index ba4b1d4c98..30ad06d9bc 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -76,16 +76,35 @@ PreparedOp::PreparedOp(const framework::OperatorBase& op, func_(func), dev_ctx_(dev_ctx) {} -PreparedOp PreparedOp::Prepare( - const framework::OperatorWithKernel& op, - const framework::OpKernelType& expected_kernel_key) { +template +PreparedOp PrepareImpl(const NameVarMap& ins, + const NameVarMap& outs, + const framework::OperatorWithKernel& op, + const platform::Place& place, + const framework::AttributeMap& attrs) { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto* dev_ctx = pool.Get(expected_kernel_key.place_); + auto* dev_ctx = pool.Get(place); + framework::RuntimeContext ctx({}, {}); + +#ifdef PADDLE_WITH_MKLDNN + // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and + // GetKernelType functions, so we need to copy the attributes there. + // Const qualifier of Attrs had to be discarded to overwrite it. + if (FLAGS_use_mkldnn) { + auto& mutable_op_attrs = const_cast(op.Attrs()); + mutable_op_attrs = attrs; + } +#endif - // check if op[type] has kernel registered. + // 1. get expected kernel key + auto expected_kernel_key = + op.GetExpectedKernelType(DygraphExecutionContext( + op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs)); + VLOG(3) << "expected_kernel_key:" << expected_kernel_key; + + // 2. check if op[type] has kernel registered. auto& all_op_kernels = op.AllOpKernels(); auto kernels_iter = all_op_kernels.find(op.Type()); - PADDLE_ENFORCE_NE( kernels_iter, all_op_kernels.end(), platform::errors::NotFound( @@ -93,18 +112,43 @@ PreparedOp PreparedOp::Prepare( op.Type())); auto& kernels = kernels_iter->second; - - framework::RuntimeContext ctx({}, {}); auto kernel_iter = kernels.find(expected_kernel_key); +#ifdef PADDLE_WITH_XPU + if (kernel_iter == kernels.end() && + is_xpu_place(expected_kernel_key.place_)) { + expected_kernel_key.place_ = platform::CPUPlace(); + kernel_iter = kernels.find(expected_kernel_key); + } +#endif // TODO(jiabin): Add operator.cc's line 1000 part back when we need that case PADDLE_ENFORCE_NE(kernel_iter, kernels.end(), platform::errors::NotFound( "Operator %s does not have kernel for %s.", op.Type(), KernelTypeToString(expected_kernel_key))); + if (!(expected_kernel_key.place_ == place)) { + dev_ctx = pool.Get(expected_kernel_key.place_); + } + return PreparedOp(op, ctx, expected_kernel_key, kernel_iter->second, dev_ctx); } +PreparedOp PreparedOp::Prepare(const NameVarMap& ins, + const NameVarMap& outs, + const framework::OperatorWithKernel& op, + const platform::Place& place, + const framework::AttributeMap& attrs) { + return PrepareImpl(ins, outs, op, place, attrs); +} + +PreparedOp PreparedOp::Prepare(const NameVarMap& ins, + const NameVarMap& outs, + const framework::OperatorWithKernel& op, + const platform::Place& place, + const framework::AttributeMap& attrs) { + return PrepareImpl(ins, outs, op, place, attrs); +} + template static void PreparedOpRunImpl( const framework::OperatorBase& op, const framework::RuntimeContext& ctx, diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index 7952c453ee..95186efc58 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -64,66 +64,16 @@ void SetForwardDataTypeOfGradVar(const std::shared_ptr& var) { } } -#ifdef PADDLE_WITH_XPU -static void ReplaceXPUKernelIfNotExists( - const framework::OperatorWithKernel& op, - framework::OpKernelType* expected_kernel_key) { - auto& all_op_kernels = op.AllOpKernels(); - auto kernels_iter = all_op_kernels.find(op.Type()); - PADDLE_ENFORCE_NE( - kernels_iter, all_op_kernels.end(), - platform::errors::NotFound( - "There are no kernels which are registered in the %s operator.", - op.Type())); - - auto& kernels = kernels_iter->second; - auto kernel_iter = kernels.find(*expected_kernel_key); - if (kernel_iter == kernels.end() && - is_xpu_place(expected_kernel_key->place_)) { - expected_kernel_key->place_ = platform::CPUPlace(); - } -} -#endif - template -framework::OpKernelType GetExpectedKernelKey( - const NameVarMap& ins, const NameVarMap& outs, - const framework::OperatorWithKernel& op, const platform::Place& place, - const framework::AttributeMap& attrs) { - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto* dev_ctx = pool.Get(place); - framework::RuntimeContext ctx({}, {}); - -#ifdef PADDLE_WITH_MKLDNN - // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and - // GetKernelType functions, so we need to copy the attributes there. - // Const qualifier of Attrs had to be discarded to overwrite it. - if (FLAGS_use_mkldnn) { - auto& mutable_op_attrs = const_cast(op.Attrs()); - mutable_op_attrs = attrs; - } -#endif - - auto expected_kernel_key = - op.GetExpectedKernelType(DygraphExecutionContext( - op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs)); -#ifdef PADDLE_WITH_XPU - ReplaceXPUKernelIfNotExists(op, &expected_kernel_key); -#endif - VLOG(3) << "expected_kernel_key:" << expected_kernel_key; - - return expected_kernel_key; -} - -template -NameVarMap PrepareData( +std::shared_ptr> PrepareData( const framework::OperatorWithKernel& op, const NameVarMap& ins, const framework::OpKernelType& expected_kernel_key) { - NameVarMap tmp_ins(ins); - for (auto& name_pair : tmp_ins) { - for (auto& var_base : name_pair.second) { - const auto* tensor = GetTensorFromVar(var_base->Var()); + std::shared_ptr> tmp_ins_ptr = nullptr; + for (const auto& name_pair : ins) { + for (size_t i = 0; i < name_pair.second.size(); ++i) { + auto& var_base = name_pair.second[i]; SetForwardDataTypeOfGradVar(var_base); + const auto* tensor = GetTensorFromVar(var_base->Var()); if (tensor && tensor->IsInitialized()) { auto kernel_type_for_var = op.GetKernelTypeForVar( name_pair.first, *tensor, expected_kernel_key); @@ -133,17 +83,28 @@ NameVarMap PrepareData( VLOG(3) << "Transform Variable " << var_base->Name() << " from " << kernel_type_for_var << " to " << expected_kernel_key; framework::Tensor out; - auto tmp_var = std::make_shared(var_base->Name()); - tmp_var->SetType(var_base->Type()); TransformData(expected_kernel_key, kernel_type_for_var, *tensor, &out); - SetTensorToVariable(var_base->Var(), out, tmp_var->MutableVar()); - var_base = tmp_var; + if (NeedTransformDataType(kernel_type_for_var, expected_kernel_key)) { + // To avoid NameVarMap copy construction overhead in general + // scenarios, if inplace transformed, return original input directly + if (tmp_ins_ptr == nullptr) { + tmp_ins_ptr = std::make_shared>(ins); + } + auto tmp_var = std::make_shared(var_base->Name()); + tmp_var->SetType(var_base->Type()); + SetTensorToVariable(var_base->Var(), out, tmp_var->MutableVar()); + (*tmp_ins_ptr)[name_pair.first][i] = tmp_var; + } else { + // if dtype is same, transform inplace will not change the original + // value, transform inplace to avoid multiple copy + SetTensorToVariable(var_base->Var(), out, var_base->MutableVar()); + } } } } } - return tmp_ins; + return tmp_ins_ptr; } class PreparedOp { @@ -154,8 +115,17 @@ class PreparedOp { const framework::OperatorWithKernel::OpKernelFunc& func, platform::DeviceContext* dev_ctx); - static PreparedOp Prepare(const framework::OperatorWithKernel& op, - const framework::OpKernelType& expected_kernel_key); + static PreparedOp Prepare(const NameVarMap& ins, + const NameVarMap& outs, + const framework::OperatorWithKernel& op, + const platform::Place& place, + const framework::AttributeMap& attrs); + + static PreparedOp Prepare(const NameVarMap& ins, + const NameVarMap& outs, + const framework::OperatorWithKernel& op, + const platform::Place& place, + const framework::AttributeMap& attrs); void Run(const NameVarMap& in, const NameVarMap& out, const framework::AttributeMap& attrs); @@ -164,6 +134,8 @@ class PreparedOp { const NameVarMap& outs, const framework::AttributeMap& attrs); + const framework::OpKernelType& kernel_type() const { return kernel_type_; } + private: const framework::OperatorBase& op_; const framework::RuntimeContext& ctx_; diff --git a/paddle/fluid/imperative/tests/test_prepare_op.cc b/paddle/fluid/imperative/tests/test_prepare_op.cc index b9ad5306f0..ea009a4f5a 100644 --- a/paddle/fluid/imperative/tests/test_prepare_op.cc +++ b/paddle/fluid/imperative/tests/test_prepare_op.cc @@ -90,12 +90,10 @@ TEST(test_prepare_op, test_prepare_op) { CreateVarNameMap(info, "split", outs, false); auto op = framework::OpRegistry::CreateOp("split", var_in_map, var_out_map, split_attr_map); - auto expected_kernel_key = GetExpectedKernelKey( - ins, outs, dynamic_cast(*op), place, - split_attr_map); ASSERT_NO_FATAL_FAILURE(PreparedOp preparedOp = PreparedOp::Prepare( + ins, outs, dynamic_cast(*op), - expected_kernel_key)); + place, split_attr_map)); } const framework::Tensor* GetTensorFromVar(const framework::Variable& var); @@ -107,6 +105,7 @@ TEST(test_prepare_op, test_get_tensor_from_var) { auto* ts = GetTensorFromVar(*vout_error->MutableVar()); ASSERT_TRUE(ts != nullptr); } + #if defined(PADDLE_WITH_CUDA) TEST(test_prepare_op, test_prepare_data) { std::shared_ptr vin( @@ -143,13 +142,13 @@ TEST(test_prepare_op, test_prepare_data) { attr_map); // test if it can be transformed to GPU place - auto expected_kernel_key = GetExpectedKernelKey( + auto prepared_op = PreparedOp::Prepare( ins, outs, dynamic_cast(*op), gpu_place, attr_map); - imperative::NameVarBaseMap tmp_ins = PrepareData( + PrepareData( dynamic_cast(*op), ins, - expected_kernel_key); - for (const auto& name_pair : tmp_ins) { + prepared_op.kernel_type()); + for (const auto& name_pair : ins) { for (const auto& vb : name_pair.second) { ASSERT_TRUE(platform::is_same_place( vb->Var().Get().place(), gpu_place)); @@ -192,13 +191,13 @@ void TestPrepareDataSamePlace(framework::AttributeMap attr_map) { attr_map); // test if it never transferred on GPU place - auto expected_kernel_key = GetExpectedKernelKey( + auto prepared_op = PreparedOp::Prepare( ins, outs, dynamic_cast(*op), cpu_place, attr_map); - imperative::NameVarBaseMap tmp_ins = PrepareData( + PrepareData( dynamic_cast(*op), ins, - expected_kernel_key); - for (const auto& name_pair : tmp_ins) { + prepared_op.kernel_type()); + for (const auto& name_pair : ins) { for (const auto& vb : name_pair.second) { ASSERT_TRUE(platform::is_same_place( vb->Var().Get().place(), cpu_place)); diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index 8b2cf56c88..71550c8f24 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -519,7 +519,7 @@ class TestStridedSliceAPI(unittest.TestCase): np.random.randn(2, 10), place=paddle.CUDAPinnedPlace()) self.assertTrue(x.place.is_cuda_pinned_place()) y = x[:, ::2] - self.assertTrue(x.place.is_cuda_pinned_place()) + self.assertFalse(x.place.is_cuda_pinned_place()) self.assertFalse(y.place.is_cuda_pinned_place()) -- GitLab