diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index 3b92d2e2d889137c005ae6de9be6942b5af49bd3..02715cfd3f2f824010fac9e3e08a00b80f116ca2 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -79,15 +79,14 @@ void eltwise_forward(const framework::ExecutionContext &ctx, paddle::platform::errors::PreconditionNotMet( "Operator DNNL eletwise_forward must use CPUPlace")); auto &dev_ctx = ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); const auto *x = ctx.Input("X"); auto *y = ctx.Output("Out"); bool is_inplaced = x->IsSharedBufferWith(*y); - platform::ActivationMKLDNNHandler handler(algorithm, ctx, dev_ctx, - ctx.GetPlace(), x, - ctx.InputName("X"), is_inplaced); + platform::ActivationMKLDNNHandler handler(algorithm, ctx, mkldnn_engine, ctx.GetPlace(), x); auto src_memory_p = handler.AcquireSrcMemory(x); auto dst_memory_p = is_inplaced ? src_memory_p : handler.AcquireDstMemory(y); diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index f846f7010e095a654c9b78cc2ad8785dacbfbcc6..3668105ccfd544fde3ca9ffca26c4e9ae18c205c 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -857,7 +857,7 @@ class BinaryMKLDNNHandler : public platform::MKLDNNHandlerNoCachingTdata(); return this->AcquireMemoryFromPrimitive( - this->fwd_pd_->src1_desc(), to_void_cast(input_data), "@src1_mem_p"); + this->fwd_pd_->src1_desc(), to_void_cast(input_data)); } private: @@ -980,24 +980,15 @@ class ReductionMKLDNNHandler template class ActivationMKLDNNHandler - : public MKLDNNHandlerT { public: ActivationMKLDNNHandler(mkldnn::algorithm algorithm, const framework::ExecutionContext& ctx, - const MKLDNNDeviceContext& dev_ctx, Place cpu_place, - const framework::Tensor* in_x, - const std::string& unique_name, bool is_inplaced) - : platform::MKLDNNHandlerT( - dev_ctx, dev_ctx.GetEngine(), cpu_place, - is_inplaced ? platform::CreateKey( - dev_ctx, framework::vectorize(in_x->dims()), "a", - algorithm, unique_name) - : platform::CreateKey( - dev_ctx, framework::vectorize(in_x->dims()), "a", - unique_name)) { - if (!this->isCached()) { + const mkldnn::engine engine, Place cpu_place, + const framework::Tensor* in_x) + : platform::MKLDNNHandlerNoCachingT(engine, cpu_place) { float alpha = ctx.HasAttr("alpha") ? ctx.Attr("alpha") : 0; float beta = ctx.HasAttr("beta") ? ctx.Attr("beta") : 0; // eltwise_linear means we are in scale op @@ -1035,20 +1026,14 @@ class ActivationMKLDNNHandler this->AcquireForwardPrimitiveDescriptor( mkldnn::prop_kind::forward_training, algorithm, md, alpha, beta); - } } ActivationMKLDNNHandler(mkldnn::algorithm algorithm, const framework::ExecutionContext& ctx, - const MKLDNNDeviceContext& dev_ctx, Place cpu_place, - const framework::Tensor* in_x, const Tensor* out_grad, - const std::string& unique_name) - : platform::MKLDNNHandlerT( - dev_ctx, dev_ctx.GetEngine(), cpu_place, - platform::CreateKey(dev_ctx, framework::vectorize(in_x->dims()), - "a", unique_name)) { - if (!this->isBwdCached()) { + const mkldnn::engine engine, Place cpu_place, + const framework::Tensor* in_x, const Tensor* out_grad) + : platform::MKLDNNHandlerNoCachingT(engine, cpu_place) { float alpha = ctx.HasAttr("alpha") ? ctx.Attr("alpha") : 0; float beta = ctx.HasAttr("beta") ? ctx.Attr("beta") : 0; @@ -1076,15 +1061,12 @@ class ActivationMKLDNNHandler mkldnn::prop_kind::forward_training, algorithm, src_md, alpha, beta); this->AcquireBackwardPrimitiveDescriptor(algorithm, diff_dst_md, src_md, alpha, beta); - } } std::shared_ptr AcquireBackwardSrcMemory( const framework::Tensor* input) { const T* input_data = input->data(); - return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(), - to_void_cast(input_data), - "@bwd-src_mem_p"); + return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(), to_void_cast(input_data)); } }; @@ -1635,11 +1617,6 @@ using ConvMKLDNNHandler = mkldnn::convolution_backward_data, mkldnn::convolution_backward_weights>; -using ConvTransposeMKLDNNHandler = - ConvMKLDNNTemplateHandler; - template static std::shared_ptr SetDstMemory( const framework::ExecutionContext& ctx, framework::Tensor* output,