diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 2058b868e29d36006ceb497eda89504a96a7bf9b..3770551796625e6c6864429f460bc7a41566c035 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -560,18 +560,18 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { } - static std::unordered_map> md_map; - bool md_reuse = true; - auto user_src_md_key = key + "@user_src_md"; - if (GetMdMap(md_map, user_src_md_key) == nullptr){ - md_reuse = false; //we suppose all mds are reused if the first md is in the map. - } - auto user_weights_md_key = key + "@user_weights_md"; + //static std::unordered_map> md_map; + //bool md_reuse = true; + //auto user_src_md_key = key + "@user_src_md"; + //if (GetMdMap(md_map, user_src_md_key) == nullptr){ + // md_reuse = false; //we suppose all mds are reused if the first md is in the map. + //} + //auto user_weights_md_key = key + "@user_weights_md"; std::shared_ptr user_src_md; std::shared_ptr user_weights_md; std::vector pipeline; //std::cout<<"md_reuse = "<type()), input->format()))); @@ -579,12 +579,12 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { {weights_tz}, platform::MKLDNNGetDataType(), (g == 1) ? mkldnn::memory::format::oihw : mkldnn::memory::format::goihw))); - SetMdMap(md_map, user_src_md_key, user_src_md); - SetMdMap(md_map, user_weights_md_key, user_weights_md); - } else{ - user_src_md = GetMdMap(md_map, user_src_md_key); - user_weights_md = GetMdMap(md_map, user_weights_md_key); - } +// SetMdMap(md_map, user_src_md_key, user_src_md); +// SetMdMap(md_map, user_weights_md_key, user_weights_md); +// } else{ +// user_src_md = GetMdMap(md_map, user_src_md_key); +// user_weights_md = GetMdMap(md_map, user_weights_md_key); +// } /* create memory descriptor for convolution without specified format * ('any') which lets a primitive (convolution in this case) choose @@ -597,16 +597,16 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { std::shared_ptr conv_pd; auto bias_tz = paddle::framework::vectorize2int(bias->dims()); - auto src_md_key = key + "@src_md"; - auto weights_md_key = key + "@weights_md_key"; - auto dst_md_key = key + "@dst_md_key"; - auto bias_md_key = key + "@bias_md_key"; + //auto src_md_key = key + "@src_md"; + //auto weights_md_key = key + "@weights_md_key"; + //auto dst_md_key = key + "@dst_md_key"; + //auto bias_md_key = key + "@bias_md_key"; std::shared_ptr src_md; std::shared_ptr weights_md; std::shared_ptr dst_md; if(is_INT8){ - if(!md_reuse){ + //if(!md_reuse){ src_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( src_tz, memory::data_type::u8, chosen_memory_format))); weights_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( @@ -621,25 +621,25 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { if(force_fp32_output) dst_dt = paddle::framework::ToMKLDNNDataType(std::type_index(typeid(float))); dst_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(dst_tz, dst_dt, chosen_memory_format))); - SetMdMap(md_map, src_md_key, src_md); - SetMdMap(md_map, weights_md_key, weights_md); - SetMdMap(md_map, dst_md_key, dst_md); - } else{ - src_md = GetMdMap(md_map, src_md_key); - weights_md = GetMdMap(md_map, weights_md_key); - dst_md = GetMdMap(md_map, dst_md_key); - } + //SetMdMap(md_map, src_md_key, src_md); + //SetMdMap(md_map, weights_md_key, weights_md); + //SetMdMap(md_map, dst_md_key, dst_md); + //} else{ + // src_md = GetMdMap(md_map, src_md_key); + // weights_md = GetMdMap(md_map, weights_md_key); + // dst_md = GetMdMap(md_map, dst_md_key); + //} // create a conv primitive descriptor and save it for usage in backward if (bias) { std::shared_ptr bias_md; - if(!md_reuse){ + //if(!md_reuse){ bias_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( bias_tz, memory::data_type::s32, memory::format::x))); - SetMdMap(md_map, bias_md_key, bias_md); - } else{ - bias_md = GetMdMap(md_map, bias_md_key); - } + // SetMdMap(md_map, bias_md_key, bias_md); + //} else{ + // bias_md = GetMdMap(md_map, bias_md_key); + //} conv_pd = ConvFwdPrimitiveDesc(*src_md, *weights_md, *bias_md, *dst_md, strides, paddings, mkldnn_engine, @@ -652,31 +652,31 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { output_shift_scale, sum_scale[0], is_test); } } else{ - if(!md_reuse){ + //if(!md_reuse){ src_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( src_tz, platform::MKLDNNGetDataType(), chosen_memory_format))); weights_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( weights_tz, platform::MKLDNNGetDataType(), chosen_memory_format))); dst_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( dst_tz, platform::MKLDNNGetDataType(), chosen_memory_format))); - SetMdMap(md_map, src_md_key, src_md); - SetMdMap(md_map, weights_md_key, weights_md); - SetMdMap(md_map, dst_md_key, dst_md); - } else{ - src_md = GetMdMap(md_map, src_md_key); - weights_md = GetMdMap(md_map, weights_md_key); - dst_md = GetMdMap(md_map, dst_md_key); - } + // SetMdMap(md_map, src_md_key, src_md); + // SetMdMap(md_map, weights_md_key, weights_md); + // SetMdMap(md_map, dst_md_key, dst_md); + //} else{ + // src_md = GetMdMap(md_map, src_md_key); + // weights_md = GetMdMap(md_map, weights_md_key); + // dst_md = GetMdMap(md_map, dst_md_key); + //} // create a conv primitive descriptor and save it for usage in backward if (bias) { std::shared_ptr bias_md; - if(!md_reuse){ + //if(!md_reuse){ bias_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( bias_tz, platform::MKLDNNGetDataType(), memory::format::x))); - SetMdMap(md_map, bias_md_key, bias_md); - } else{ - bias_md = GetMdMap(md_map, bias_md_key); - } + // SetMdMap(md_map, bias_md_key, bias_md); + //} else{ + // bias_md = GetMdMap(md_map, bias_md_key); + //} conv_pd = ConvFwdPrimitiveDesc(*src_md, *weights_md, *bias_md, *dst_md, strides, paddings, mkldnn_engine, fuse_relu, fuse_residual_conn, is_test); @@ -714,7 +714,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { std::shared_ptr dst_memory_p; bool need_s8_to_u8 = false; - auto user_residual_md_key = key + "@user_residual_md"; + //auto user_residual_md_key = key + "@user_residual_md"; if(fuse_residual_conn) { auto residual_param = ctx.Input("ResidualData"); PADDLE_ENFORCE_EQ(output->dims(), residual_param->dims(), @@ -723,17 +723,17 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto residual_dt = paddle::framework::ToMKLDNNDataType(residual_param->type()); if(residual_param->format() != handler.GetDstFormat()) { std::shared_ptr user_residual_md; - if(!md_reuse){ + //if(!md_reuse){ auto residual_data_tz = paddle::framework::vectorize2int(residual_param->dims()); auto residual_data_type = paddle::framework::ToMKLDNNDataType(residual_param->type()); user_residual_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( residual_data_tz, residual_data_type, residual_param->format()))); - SetMdMap(md_map, user_residual_md_key, user_residual_md); - } else{ - user_residual_md = GetMdMap(md_map, user_residual_md_key); - } + //SetMdMap(md_map, user_residual_md_key, user_residual_md); + //} else{ + // user_residual_md = GetMdMap(md_map, user_residual_md_key); + //} if(is_INT8){ PADDLE_ENFORCE( force_fp32_output == false, @@ -818,17 +818,17 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { // create convolution op primitive std::shared_ptr conv_p; //auto scale_bias_key = key + "@scale_bias"; - auto user_bias_md_key = key + "@user_bias_md"; + //auto user_bias_md_key = key + "@user_bias_md"; if (bias) { const float* bias_data = bias->data(); std::shared_ptr user_bias_md; - if(!md_reuse){ + //if(!md_reuse){ user_bias_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc( {bias_tz}, platform::MKLDNNGetDataType(), memory::format::x))); - SetMdMap(md_map, user_bias_md_key, user_bias_md); - } else{ - user_bias_md = GetMdMap(md_map, user_bias_md_key); - } + // SetMdMap(md_map, user_bias_md_key, user_bias_md); + //} else{ + // user_bias_md = GetMdMap(md_map, user_bias_md_key); + //} auto user_bias_memory_p = handler.AcquireBiasMemory(*user_bias_md, to_void_cast(bias_data)); std::shared_ptr bias_memory_p; @@ -898,25 +898,25 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { return {{0.0f}}; } - void SetMdMap(std::unordered_map> &md_map, - const std::string& name, std::shared_ptr mds) const { - auto it = md_map.find(name); - if (it == md_map.end()) { - md_map[name] = mds; // create new blob - } else { - (*it).second = mds; // set data to existing blob - } - return; - } - - std::shared_ptr GetMdMap(std::unordered_map> md_map, - const std::string& name) const { - auto it = md_map.find(name); - if (it != md_map.end()) { - return (*it).second; - } - return nullptr; - } + //void SetMdMap(std::unordered_map> &md_map, + // const std::string& name, std::shared_ptr mds) const { + // auto it = md_map.find(name); + // if (it == md_map.end()) { + // md_map[name] = mds; // create new blob + // } else { + // (*it).second = mds; // set data to existing blob + // } + // return; + //} + + //std::shared_ptr GetMdMap(std::unordered_map> md_map, + // const std::string& name) const { + // auto it = md_map.find(name); + // if (it != md_map.end()) { + // return (*it).second; + // } + // return nullptr; + //} mkldnn::primitive_attr CreatePostOps(bool fuse_relu, bool fuse_residual_conn, const std::vector output_shift_scale, float sum_scale) const {