diff --git a/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc index 0638e42873376bcec6e4de61494da46d1f0073d1..04cd60be964a3967a45e73122324c4b3fdf0b3d0 100644 --- a/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc @@ -30,6 +30,18 @@ using framework::DataLayout; using mkldnn::stream; using platform::GetMKLDNNFormat; +std::string CreateKey(const paddle::framework::ExecutionContext& ctx, + const std::vector& src_tz, const float scale_data, + const bool is_negative) { + std::string key; + key.reserve(platform::MKLDNNHandler::MaxKeyLength); + platform::MKLDNNHandler::AppendKeyDims(&key, src_tz); + platform::MKLDNNHandler::AppendKey(&key, std::to_string(scale_data)); + platform::MKLDNNHandler::AppendKey(&key, std::to_string(is_negative)); + platform::MKLDNNHandler::AppendKey(&key, ctx.op().Output("Output")); + return key; +} + template class QuantOpKernel : public framework::OpKernel { public: @@ -47,32 +59,61 @@ class QuantOpKernel : public framework::OpKernel { const T* input_data = input->data(); - mkldnn::primitive_attr attri; - int mask = 0; - attri.set_output_scales(mask, {scale_data}); - - auto src_md = platform::MKLDNNMemDesc({src_tz}, memory::data_type::f32, - input->format()); - auto src_pd = mkldnn::memory::primitive_desc(src_md, engine); - auto src_memory = - std::make_shared(src_pd, to_void_cast(input_data)); - std::shared_ptr src_memory_p = - std::shared_ptr(new primitive::at(*src_memory)); - bool is_negative = ctx.Attr("is_negative_input"); - std::shared_ptr dst_pd; + std::string key = CreateKey(ctx, src_tz, scale_data, is_negative); + const std::string key_prim = key + "@reorder_p"; + const std::string key_src_mem = key + "@src_mem"; + const std::string key_dst_mem = key + "@dst_mem"; + + std::shared_ptr src_memory; std::shared_ptr dst_memory; - if (is_negative) { - platform::ConvMKLDNNHandler::SetDstMemory( - ctx, output, dst_tz, engine, dst_pd, dst_memory); + std::shared_ptr reorder_p; + reorder_p = std::static_pointer_cast(dev_ctx.GetBlob(key_prim)); + + if (reorder_p == nullptr) { + mkldnn::primitive_attr attri; + int mask = 0; + attri.set_output_scales(mask, {scale_data}); + + auto src_md = platform::MKLDNNMemDesc({src_tz}, memory::data_type::f32, + input->format()); + auto src_pd = mkldnn::memory::primitive_desc(src_md, engine); + src_memory = + std::make_shared(src_pd, to_void_cast(input_data)); + std::shared_ptr src_memory_p = + std::shared_ptr(new primitive::at(*src_memory)); + + std::shared_ptr dst_pd; + if (is_negative) { + platform::ConvMKLDNNHandler::SetDstMemory( + ctx, output, dst_tz, engine, dst_pd, dst_memory); + } else { + platform::ConvMKLDNNHandler::SetDstMemory( + ctx, output, dst_tz, engine, dst_pd, dst_memory); + } + auto reorder_pd = std::shared_ptr( + new reorder::primitive_desc(src_pd, *dst_pd, attri)); + reorder_p = std::shared_ptr( + new reorder(*reorder_pd, *src_memory_p, *dst_memory)); + + dev_ctx.SetBlob(key_prim, reorder_p); + dev_ctx.SetBlob(key_src_mem, src_memory); + dev_ctx.SetBlob(key_dst_mem, dst_memory); } else { - platform::ConvMKLDNNHandler::SetDstMemory( - ctx, output, dst_tz, engine, dst_pd, dst_memory); + src_memory = std::static_pointer_cast( + dev_ctx.GetBlob(key_src_mem)); + src_memory->set_data_handle(to_void_cast(input_data)); + + dst_memory = std::static_pointer_cast( + dev_ctx.GetBlob(key_dst_mem)); + auto place = ctx.GetPlace(); + if (is_negative) { + dst_memory->set_data_handle(output->mutable_data(place)); + } else { + dst_memory->set_data_handle(output->mutable_data(place)); + } } - auto reorder_pd = std::shared_ptr( - new reorder::primitive_desc(src_pd, *dst_pd, attri)); - auto reorder_p = std::shared_ptr( - new reorder(*reorder_pd, *src_memory_p, *dst_memory)); + pipeline.push_back(*reorder_p); stream(stream::kind::eager).submit(pipeline).wait(); output->set_layout(DataLayout::kMKLDNN);