diff --git a/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc index 114daaecb59369658191b382a0471d30448a7462..78862a5559dab78dbce2af48c7b10febed39287a 100644 --- a/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc @@ -144,6 +144,7 @@ class ConcatMKLDNNOpKernel : public paddle::framework::OpKernel { platform::errors::InvalidArgument( "The axis is expected to be in range of [%d, %d), but got %d", -rank, rank, concat_axis)); + platform::MKLDNNDeviceContext::tls().log_lib_version(); if (concat_axis < 0) { concat_axis = concat_axis + rank; } diff --git a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc index 6f0987deeabf50de1cd91a5f7fc0a461b35fa1f6..9e0bdeee6b38b127e96fdbe0015a228ac720750b 100644 --- a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc @@ -561,6 +561,7 @@ class FCMKLDNNOpKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( platform::is_cpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("FC MKL-DNN must use CPUPlace.")); + platform::MKLDNNDeviceContext::tls().log_lib_version(); auto input = ctx.Input("Input"); auto w = ctx.Input("W"); auto bias = ctx.Input("Bias"); diff --git a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc index 1f2216cbed2b256b15df21956da6741affd8b296..9f1fcf5bd0fbeb49a114c37c6c39bd1612a3359e 100644 --- a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc @@ -379,6 +379,7 @@ class DNNLMatMulKernel : public framework::OpKernel { platform::errors::Unimplemented( "DNNL matmul doesn't support multiple heads.")); } + platform::MKLDNNDeviceContext::tls().log_lib_version(); ExecuteMatMul(ctx); } }; diff --git a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc index 4f0b7cab47efe33fbb297542975590b44f62c75a..5abb7bf406a979bd9aedf4bf7e7d713b82dbba69 100644 --- a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc @@ -342,6 +342,7 @@ class MulMKLDNNKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet( "Operator DNNL Mul must use CPUPlace")); + platform::MKLDNNDeviceContext::tls().log_lib_version(); auto &dev_ctx = ctx.template device_context(); const auto &mkldnn_engine = dev_ctx.GetEngine(); diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 4922fbeacc619eaa6b3e189ff81b292a9e035ea0..297466e8e5a624359406c5551941ceaa73e5c5c5 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -466,6 +466,15 @@ MKLDNNDeviceContextThreadLocals::Body::get_cur_paddle_data_layout(void) { return cur_paddle_data_layout; } +void MKLDNNDeviceContextThreadLocals::Body::log_lib_version(void) { + if (!said_once) { + said_once = true; + auto dv = dnnl::version(); + LOG(INFO) << "oneDNN v" << dv->major << "." << dv->minor << "." + << dv->patch; + } +} + void MKLDNNDeviceContext::ResetBlobMap() { std::lock_guard lock(*p_mutex_); if (!block_next_cache_clearing_) { diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 620e2d41c13aff8c70b0b96487cf8d541699d64b..de4c4a8363552d4ddc61de31052c997fc76a39c8 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -466,6 +466,7 @@ class MKLDNNDeviceContextThreadLocals { typedef MKLDNNDeviceContextThreadLocals self; struct Body { + bool said_once = false; size_t cur_mkldnn_session_id; // Current data input shape string. // - For fixed-shape, it's a null string in default. @@ -485,6 +486,7 @@ class MKLDNNDeviceContextThreadLocals { void set_cur_input_shape_cache_capacity(int input_shape_cache_capacity); void set_cur_paddle_data_layout(framework::DataLayout dl); framework::DataLayout get_cur_paddle_data_layout(void); + void log_lib_version(void); }; MKLDNNDeviceContextThreadLocals() = default; MKLDNNDeviceContextThreadLocals(const MKLDNNDeviceContextThreadLocals& c) = diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 90266f6c2099b9667a78f9ca6d29c7ceec2a74bb..96f8fd29c7964c0a21156413f27dcafea9a1eaea 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -45,6 +45,7 @@ class MKLDNNHandlerT { key_common_(base_key), fwd_pd_(nullptr), bwd_pd_(nullptr) { + platform::MKLDNNDeviceContext::tls().log_lib_version(); if (platform::MKLDNNDeviceContext::tls().get_cur_mkldnn_session_id() != platform::MKLDNNDeviceContextThreadLocals::kMKLDNNSessionID_Default) { key_ = key_common_; @@ -311,6 +312,7 @@ class MKLDNNHandler { MKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine, const std::string& base_key) : dev_ctx_(dev_ctx), engine_(engine), key_common_(base_key) { + platform::MKLDNNDeviceContext::tls().log_lib_version(); if (platform::MKLDNNDeviceContext::tls().get_cur_mkldnn_session_id() != platform::MKLDNNDeviceContextThreadLocals::kMKLDNNSessionID_Default) { key_ = key_common_;