diff --git a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc index 18680fe678b5d4d14ccf19fc46bc6ce77d1e3e92..a7f6bc512ffcedf135db6bf285986fb6192be083 100644 --- a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc @@ -284,7 +284,13 @@ class FCMKLDNNHandler std::shared_ptr AcquireWeightsMemoryWithReorder( const phi::DenseTensor* weights, const std::vector& scale_data) { - const std::string weights_key = this->memory_key_ + "@weights"; + const std::string weights_base_key = this->memory_key_ + "@weights"; + std::string weights_key; + weights_key.reserve(128); + weights_key = phi::funcs::ExtendKeyWithThreadInfoIfNeeded( + dev_ctx_, + phi::funcs::CreateKey( + dev_ctx_, weights_base_key, this->fwd_pd_->weights_desc())); auto memory_p = std::static_pointer_cast( this->dev_ctx_.GetBlob(weights_key)); @@ -410,7 +416,8 @@ class FCMKLDNNKernel : public framework::OpKernel { phi::funcs::CreateKey(dev_ctx, ctx.InputName("Input"), ctx.InputName("W"), - phi::vectorize(x->dims()))); + phi::vectorize(x->dims()), + phi::vectorize(weights->dims()))); auto inner_product_cache = std::static_pointer_cast(dev_ctx.GetBlob(cache_key)); diff --git a/paddle/phi/backends/onednn/onednn_helper.h b/paddle/phi/backends/onednn/onednn_helper.h index 84e36a26ca48714adf8262322bfefee9eb867f22..1d61004b36161ffdc3f16a134a4c2d7ff52dde9c 100644 --- a/paddle/phi/backends/onednn/onednn_helper.h +++ b/paddle/phi/backends/onednn/onednn_helper.h @@ -154,6 +154,12 @@ inline void AppendKey(std::string* key, const T& num) { key->append(std::to_string(num)); } +template <> +inline void AppendKey(std::string* key, + const dnnl::memory::format_kind& format) { + key->append(std::to_string(static_cast(format))); +} + template <> inline void AppendKey(std::string* key, const dnnl::memory::format_tag& format) { @@ -171,6 +177,25 @@ inline void AppendKey(std::string* key, const dnnl::algorithm& algorithm) { key->append(std::to_string(static_cast(algorithm))); } +template <> +inline void AppendKey(std::string* key, const dnnl::memory::dims& dims) { + for (size_t i = 0; i < dims.size(); i++) { + AppendKey(key, static_cast(dims[i])); + } +} + +template <> +inline void AppendKey(std::string* key, const dnnl::memory::desc& md) { + AppendKey(key, md.get_dims()); + AppendKey(key, md.get_data_type()); + AppendKey(key, md.get_format_kind()); + AppendKey(key, md.get_inner_blks()); + AppendKey(key, md.get_inner_idxs()); + AppendKey(key, md.get_inner_nblks()); + AppendKey(key, md.get_padded_dims()); + AppendKey(key, md.get_strides()); +} + template <> inline void AppendKey(std::string* key, const dnnl::normalization_flags& flags) {