From 676995c86cb4b49f9a41c7a32c5e054b16201753 Mon Sep 17 00:00:00 2001 From: Yihua Xu Date: Fri, 22 Feb 2019 11:36:19 +0800 Subject: [PATCH] Optimze Gelu with MKL Erf function (#15770) * Optimize for gelu operator * Set up the low accuracy mode of MKL ERF function. test=develop * Only enable MKLML ERF when OS is linux * Use the speical mklml version included vmsErf function to verify gelu mkl kernel. test=develop * Add the CUDA macro to avoid NVCC's compile issue. test=develop * Add the TODO comments for mklml library modification. test=develop * Clean Code test=develop * Add the comment of marco for NVCC compiler. test=develop --- cmake/external/mklml.cmake | 6 ++++-- paddle/fluid/operators/activation_op.h | 22 ++++++++++++++++++++++ paddle/fluid/operators/math/blas.h | 8 ++++++++ paddle/fluid/operators/math/blas_impl.h | 23 +++++++++++++++++++++++ paddle/fluid/platform/dynload/mklml.h | 2 ++ 5 files changed, 59 insertions(+), 2 deletions(-) diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index 54826cedb8..32a9368a9f 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -39,8 +39,10 @@ IF(WIN32) SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.lib) SET(MKLML_SHARED_LIB ${MKLML_LIB_DIR}/mklml.dll) SET(MKLML_SHARED_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.dll) -ELSE() - SET(MKLML_VER "mklml_lnx_${TIME_VERSION}" CACHE STRING "" FORCE) +ELSE() + #TODO(intel-huying): + # Now enable Erf function in mklml library temporarily, it will be updated as offical version later. + SET(MKLML_VER "VsErf_mklml_lnx_${TIME_VERSION}" CACHE STRING "" FORCE) SET(MKLML_URL "http://paddlepaddledeps.cdn.bcebos.com/${MKLML_VER}.tgz" CACHE STRING "" FORCE) SET(MKLML_LIB ${MKLML_LIB_DIR}/libmklml_intel.so) SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5.so) diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index c7df3ea58a..e8f5530b78 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -11,6 +11,7 @@ limitations under the License. */ #pragma once #include +#include #include #include #include @@ -24,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/float16.h" #ifdef PADDLE_WITH_MKLDNN @@ -301,8 +303,28 @@ template struct GeluFunctor : public BaseActivationFunctor { template void operator()(Device d, X x, Out out) const { +// Because the execute or device context can not be deliver here, it keep the +// marco for NVCC. +#if defined(PADDLE_WITH_MKLML) && !defined(_WIN32) && !defined(__APPLE__) && \ + !defined(__OSX__) && !defined(PADDLE_WITH_CUDA) + auto x_data = x.data(); + auto out_data = out.data(); + int n = std::min(x.size(), out.size()); + + std::memset(out_data, 0, n * sizeof(T)); + math::CBlas::AXPY(n, static_cast(M_SQRT1_2), x_data, 1, out_data, 1); + math::CBlas::VMERF(n, out_data, out_data, VML_LA); + for (int i = 0; i < n; i++) { + out_data[i] += static_cast(1); + } + math::CBlas::VMUL(n, x_data, out_data, out_data); + for (int i = 0; i < n; i++) { + out_data[i] *= static_cast(0.5); + } +#else auto temp = (x * static_cast(M_SQRT1_2)).erf(); out.device(d) = x * static_cast(0.5) * (static_cast(1) + temp); +#endif } }; diff --git a/paddle/fluid/operators/math/blas.h b/paddle/fluid/operators/math/blas.h index f67f57827b..ce8109f64d 100644 --- a/paddle/fluid/operators/math/blas.h +++ b/paddle/fluid/operators/math/blas.h @@ -184,6 +184,9 @@ class Blas { template void VINV(int n, const T* a, T* y) const; + template + void VMERF(int n, const T* a, T* y, int64_t mode) const; + private: const DeviceContext& context_; }; @@ -290,6 +293,11 @@ class BlasT : private Blas { Base()->template VINV(args...); } + template + void VMERF(ARGS... args) const { + Base()->template VMERF(args...); + } + private: const Blas* Base() const { return static_cast*>(this); diff --git a/paddle/fluid/operators/math/blas_impl.h b/paddle/fluid/operators/math/blas_impl.h index 972366bc09..ba995dabec 100644 --- a/paddle/fluid/operators/math/blas_impl.h +++ b/paddle/fluid/operators/math/blas_impl.h @@ -123,6 +123,11 @@ struct CBlas { static void VINV(ARGS... args) { platform::dynload::vsInv(args...); } + + template + static void VMERF(ARGS... args) { + platform::dynload::vmsErf(args...); + } }; template <> @@ -223,6 +228,11 @@ struct CBlas { static void VINV(ARGS... args) { platform::dynload::vdInv(args...); } + + template + static void VMERF(ARGS... args) { + platform::dynload::vmdErf(args...); + } }; #else @@ -625,6 +635,19 @@ void Blas::VINV(int n, const T *a, T *y) const { #endif } +template <> +template +void Blas::VMERF(int n, const T *a, T *y, + int64_t mode) const { +#ifdef PADDLE_WITH_MKLML + CBlas::VMERF(n, a, y, mode); +#else + for (int i = 0; i < n; ++i) { + y[i] = std::erf(a[i]); + } +#endif +} + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/platform/dynload/mklml.h b/paddle/fluid/platform/dynload/mklml.h index a260cda491..a5b846f500 100644 --- a/paddle/fluid/platform/dynload/mklml.h +++ b/paddle/fluid/platform/dynload/mklml.h @@ -86,6 +86,8 @@ extern void* mklml_dso_handle; __macro(vdPowx); \ __macro(vsInv); \ __macro(vdInv); \ + __macro(vmsErf); \ + __macro(vmdErf); \ __macro(MKL_Set_Num_Threads) MKLML_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_MKLML_WRAP); -- GitLab