Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
2d0ff6a3
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2d0ff6a3
编写于
9月 28, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add vexp and unit test
上级
b3c63f40
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
318 addition
and
121 deletion
+318
-121
paddle/fluid/operators/math/CMakeLists.txt
paddle/fluid/operators/math/CMakeLists.txt
+2
-1
paddle/fluid/operators/math/jit_kernel.h
paddle/fluid/operators/math/jit_kernel.h
+6
-0
paddle/fluid/operators/math/jit_kernel_blas.cc
paddle/fluid/operators/math/jit_kernel_blas.cc
+42
-116
paddle/fluid/operators/math/jit_kernel_exp.cc
paddle/fluid/operators/math/jit_kernel_exp.cc
+115
-0
paddle/fluid/operators/math/jit_kernel_macro.h
paddle/fluid/operators/math/jit_kernel_macro.h
+94
-0
paddle/fluid/operators/math/jit_kernel_test.cc
paddle/fluid/operators/math/jit_kernel_test.cc
+59
-4
未找到文件。
paddle/fluid/operators/math/CMakeLists.txt
浏览文件 @
2d0ff6a3
...
@@ -76,5 +76,6 @@ if(WITH_GPU)
...
@@ -76,5 +76,6 @@ if(WITH_GPU)
endif
()
endif
()
cc_test
(
concat_test SRCS concat_test.cc DEPS concat
)
cc_test
(
concat_test SRCS concat_test.cc DEPS concat
)
cc_test
(
cpu_vec_test SRCS cpu_vec_test.cc DEPS blas cpu_info
)
cc_test
(
cpu_vec_test SRCS cpu_vec_test.cc DEPS blas cpu_info
)
cc_library
(
jit_kernel SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_lstm.cc DEPS cpu_info cblas
)
cc_library
(
jit_kernel_exp SRCS jit_kernel_exp.cc DEPS cpu_info cblas activation_functions
)
cc_library
(
jit_kernel SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_lstm.cc DEPS cpu_info cblas jit_kernel_exp
)
cc_test
(
jit_kernel_test SRCS jit_kernel_test.cc DEPS jit_kernel
)
cc_test
(
jit_kernel_test SRCS jit_kernel_test.cc DEPS jit_kernel
)
paddle/fluid/operators/math/jit_kernel.h
浏览文件 @
2d0ff6a3
...
@@ -82,6 +82,12 @@ class VScalKernel : public Kernel {
...
@@ -82,6 +82,12 @@ class VScalKernel : public Kernel {
virtual
void
Compute
(
const
int
n
,
const
T
a
,
T
*
x
)
=
0
;
virtual
void
Compute
(
const
int
n
,
const
T
a
,
T
*
x
)
=
0
;
};
};
template
<
typename
T
>
class
VExpKernel
:
public
Kernel
{
public:
virtual
void
Compute
(
const
int
n
,
const
T
*
x
,
T
*
y
)
=
0
;
};
template
<
typename
T
>
template
<
typename
T
>
class
LSTMKernel
:
public
Kernel
{
class
LSTMKernel
:
public
Kernel
{
public:
public:
...
...
paddle/fluid/operators/math/jit_kernel_blas.cc
浏览文件 @
2d0ff6a3
...
@@ -14,6 +14,7 @@ limitations under the License. */
...
@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/math/jit_kernel.h"
#include "paddle/fluid/operators/math/jit_kernel.h"
#include <string>
#include <string>
#include "paddle/fluid/operators/math/jit_kernel_macro.h"
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#include "paddle/fluid/platform/dynload/mklml.h"
#endif
#endif
...
@@ -29,71 +30,6 @@ namespace jitkernel {
...
@@ -29,71 +30,6 @@ namespace jitkernel {
namespace
jit
=
platform
::
jit
;
namespace
jit
=
platform
::
jit
;
#define NEW_IMPL(src, t, isa, k) \
p = std::dynamic_pointer_cast<src<t>>( \
std::make_shared<src##Impl<t, isa, k>>())
#define SEARCH_BLOCK(src, t, isa) \
if (d < AVX_FLOAT_BLOCK) { \
NEW_IMPL(src, t, isa, kLT8); \
} else if (d == AVX_FLOAT_BLOCK) { \
NEW_IMPL(src, t, isa, kEQ8); \
} else if (d > AVX_FLOAT_BLOCK && d < AVX512_FLOAT_BLOCK) { \
NEW_IMPL(src, t, isa, kGT8LT16); \
} else if (d == AVX512_FLOAT_BLOCK) { \
NEW_IMPL(src, t, isa, kEQ16); \
} else { \
NEW_IMPL(src, t, isa, kGT16); \
}
#define SEARCH_ISA_BLOCK(src, t) \
if (jit::MayIUse(jit::avx512f)) { \
SEARCH_BLOCK(src, t, jit::avx512f); \
} else if (jit::MayIUse(jit::avx2)) { \
SEARCH_BLOCK(src, t, jit::avx2); \
} else if (jit::MayIUse(jit::avx)) { \
SEARCH_BLOCK(src, t, jit::avx); \
} else { \
SEARCH_BLOCK(src, t, jit::isa_any); \
}
#define DEFINE_WITH_DTYPE(ker_key, ker_class, ker_dtype, dtype_key) \
template <> \
const std::shared_ptr<ker_class<ker_dtype>> \
KernelPool::Get<ker_class<ker_dtype>>(int d) { \
std::string key = #ker_key #dtype_key + std::to_string(d); \
if (kers_.find(key) == kers_.end()) { \
std::shared_ptr<ker_class<ker_dtype>> p; \
SEARCH_ISA_BLOCK(ker_class, ker_dtype); \
kers_.insert({key, std::dynamic_pointer_cast<Kernel>(p)}); \
return p; \
} \
return std::dynamic_pointer_cast<ker_class<ker_dtype>>(kers_.at(key)); \
}
#define REGISTER_BLAS_JITKERNEL(ker_key, ker_class) \
DEFINE_WITH_DTYPE(ker_key, ker_class, float, f); \
DEFINE_WITH_DTYPE(ker_key, ker_class, double, d)
#define FOR_EACH_ISA(macro_, block) \
macro_(jit::avx512f, block); \
macro_(jit::avx2, block); \
macro_(jit::avx, block); \
macro_(jit::isa_any, block)
#define FOR_EACH_BLOCK(macro_, isa) \
macro_(isa, kLT8); \
macro_(isa, kEQ8); \
macro_(isa, kGT8LT16); \
macro_(isa, kEQ16); \
macro_(isa, kGT16)
#define FOR_EACH_ISA_BLOCK(macro_) \
FOR_EACH_BLOCK(macro_, jit::avx512f); \
FOR_EACH_BLOCK(macro_, jit::avx2); \
FOR_EACH_BLOCK(macro_, jit::avx); \
FOR_EACH_BLOCK(macro_, jit::isa_any)
/* VMUL JitKernel */
/* VMUL JitKernel */
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
,
jit_block
>
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
,
jit_block
>
class
VMulKernelImpl
:
public
VMulKernel
<
T
>
{
class
VMulKernelImpl
:
public
VMulKernel
<
T
>
{
...
@@ -106,25 +42,25 @@ class VMulKernelImpl : public VMulKernel<T> {
...
@@ -106,25 +42,25 @@ class VMulKernelImpl : public VMulKernel<T> {
};
};
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
#define
VMUL_MKL_FLOAT(isa, block)
\
#define
MKL_FLOAT(isa, block)
\
template <> \
template <> \
void VMulKernelImpl<float, isa, block>::Compute(const int n, const float* x, \
void VMulKernelImpl<float, isa, block>::Compute(const int n, const float* x, \
const float* y, float* z) { \
const float* y, float* z) { \
platform::dynload::vsMul(n, x, y, z); \
platform::dynload::vsMul(n, x, y, z); \
}
}
#define
VMUL_MKL_DOUBLE(isa, block)
\
#define
MKL_DOUBLE(isa, block)
\
template <> \
template <> \
void VMulKernelImpl<double, isa, block>::Compute( \
void VMulKernelImpl<double, isa, block>::Compute( \
const int n, const double* x, const double* y, double* z) { \
const int n, const double* x, const double* y, double* z) { \
platform::dynload::vdMul(n, x, y, z); \
platform::dynload::vdMul(n, x, y, z); \
}
}
FOR_EACH_ISA
(
VMUL_
MKL_FLOAT
,
kGT16
);
FOR_EACH_ISA
(
MKL_FLOAT
,
kGT16
);
FOR_EACH_ISA_BLOCK
(
VMUL_
MKL_DOUBLE
);
FOR_EACH_ISA_BLOCK
(
MKL_DOUBLE
);
#endif
#endif
#define
VMUL_INTRI8_FLOAT(isa)
\
#define
INTRI8_FLOAT(isa)
\
template <> \
template <> \
void VMulKernelImpl<float, isa, kEQ8>::Compute(const int n, const float* x, \
void VMulKernelImpl<float, isa, kEQ8>::Compute(const int n, const float* x, \
const float* y, float* z) { \
const float* y, float* z) { \
...
@@ -137,19 +73,18 @@ FOR_EACH_ISA_BLOCK(VMUL_MKL_DOUBLE);
...
@@ -137,19 +73,18 @@ FOR_EACH_ISA_BLOCK(VMUL_MKL_DOUBLE);
// avx > for > mkl
// avx > for > mkl
#ifdef __AVX__
#ifdef __AVX__
VMUL_
INTRI8_FLOAT
(
jit
::
avx
);
INTRI8_FLOAT
(
jit
::
avx
);
#endif
#endif
#ifdef __AVX2__
#ifdef __AVX2__
VMUL_
INTRI8_FLOAT
(
jit
::
avx2
);
INTRI8_FLOAT
(
jit
::
avx2
);
#endif
#endif
#ifdef __AVX512F__
#ifdef __AVX512F__
VMUL_
INTRI8_FLOAT
(
jit
::
avx512f
);
INTRI8_FLOAT
(
jit
::
avx512f
);
#endif
#endif
// TODO(TJ): eq16 test and complete avx512
// TODO(TJ): eq16 test and complete avx512
#undef
VMUL_
INTRI8_FLOAT
#undef INTRI8_FLOAT
#undef
VMUL_
MKL_FLOAT
#undef MKL_FLOAT
#undef
VMUL_
MKL_DOUBLE
#undef MKL_DOUBLE
/* VADD JitKernel */
/* VADD JitKernel */
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
,
jit_block
>
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
,
jit_block
>
...
@@ -163,25 +98,25 @@ class VAddKernelImpl : public VAddKernel<T> {
...
@@ -163,25 +98,25 @@ class VAddKernelImpl : public VAddKernel<T> {
};
};
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
#define
VADD_MKL_FLOAT(isa, block)
\
#define
MKL_FLOAT(isa, block)
\
template <> \
template <> \
void VAddKernelImpl<float, isa, block>::Compute(const int n, const float* x, \
void VAddKernelImpl<float, isa, block>::Compute(const int n, const float* x, \
const float* y, float* z) { \
const float* y, float* z) { \
platform::dynload::vsAdd(n, x, y, z); \
platform::dynload::vsAdd(n, x, y, z); \
}
}
#define
VADD_MKL_DOUBLE(isa, block)
\
#define
MKL_DOUBLE(isa, block)
\
template <> \
template <> \
void VAddKernelImpl<double, isa, block>::Compute( \
void VAddKernelImpl<double, isa, block>::Compute( \
const int n, const double* x, const double* y, double* z) { \
const int n, const double* x, const double* y, double* z) { \
platform::dynload::vdAdd(n, x, y, z); \
platform::dynload::vdAdd(n, x, y, z); \
}
}
FOR_EACH_ISA
(
VADD_
MKL_FLOAT
,
kGT16
);
FOR_EACH_ISA
(
MKL_FLOAT
,
kGT16
);
FOR_EACH_ISA_BLOCK
(
VADD_
MKL_DOUBLE
);
FOR_EACH_ISA_BLOCK
(
MKL_DOUBLE
);
#endif
#endif
#define
VADD_INTRI8_FLOAT(isa)
\
#define
INTRI8_FLOAT(isa)
\
template <> \
template <> \
void VAddKernelImpl<float, isa, kEQ8>::Compute(const int n, const float* x, \
void VAddKernelImpl<float, isa, kEQ8>::Compute(const int n, const float* x, \
const float* y, float* z) { \
const float* y, float* z) { \
...
@@ -192,19 +127,19 @@ FOR_EACH_ISA_BLOCK(VADD_MKL_DOUBLE);
...
@@ -192,19 +127,19 @@ FOR_EACH_ISA_BLOCK(VADD_MKL_DOUBLE);
_mm256_storeu_ps(z, tmpx); \
_mm256_storeu_ps(z, tmpx); \
}
}
#ifdef __AVX__
#ifdef __AVX__
VADD_
INTRI8_FLOAT
(
jit
::
avx
);
INTRI8_FLOAT
(
jit
::
avx
);
#endif
#endif
#ifdef __AVX2__
#ifdef __AVX2__
VADD_
INTRI8_FLOAT
(
jit
::
avx2
);
INTRI8_FLOAT
(
jit
::
avx2
);
#endif
#endif
#ifdef __AVX512F__
#ifdef __AVX512F__
VADD_
INTRI8_FLOAT
(
jit
::
avx512f
);
INTRI8_FLOAT
(
jit
::
avx512f
);
#endif
#endif
// TODO(TJ): eq16 test and complete avx512
// TODO(TJ): eq16 test and complete avx512
#undef
VADD_
INTRI8_FLOAT
#undef INTRI8_FLOAT
#undef
VADD_
MKL_FLOAT
#undef MKL_FLOAT
#undef
VADD_
MKL_DOUBLE
#undef MKL_DOUBLE
/* VSCAL JitKernel */
/* VSCAL JitKernel */
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
,
jit_block
>
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
,
jit_block
>
...
@@ -223,25 +158,25 @@ class VScalKernelImpl : public VScalKernel<T> {
...
@@ -223,25 +158,25 @@ class VScalKernelImpl : public VScalKernel<T> {
};
};
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
#define
VSCAL_MKL_FLOAT(isa, block)
\
#define
MKL_FLOAT(isa, block)
\
template <> \
template <> \
void VScalKernelImpl<float, isa, block>::Compute(const int n, const float a, \
void VScalKernelImpl<float, isa, block>::Compute(const int n, const float a, \
float* x) { \
float* x) { \
platform::dynload::cblas_sscal(n, a, x, 1); \
platform::dynload::cblas_sscal(n, a, x, 1); \
}
}
#define
VSCAL_MKL_DOUBLE(isa, block)
\
#define
MKL_DOUBLE(isa, block)
\
template <> \
template <> \
void VScalKernelImpl<double, isa, block>::Compute( \
void VScalKernelImpl<double, isa, block>::Compute( \
const int n, const double a, double* x) { \
const int n, const double a, double* x) { \
platform::dynload::cblas_dscal(n, a, x, 1); \
platform::dynload::cblas_dscal(n, a, x, 1); \
}
}
FOR_EACH_ISA
(
VSCAL_
MKL_FLOAT
,
kGT16
);
FOR_EACH_ISA
(
MKL_FLOAT
,
kGT16
);
FOR_EACH_ISA_BLOCK
(
VSCAL_
MKL_DOUBLE
);
FOR_EACH_ISA_BLOCK
(
MKL_DOUBLE
);
#endif
#endif
#define
VSCAL_INTRI8
(isa) \
#define
INTRI8_FLOAT
(isa) \
template <> \
template <> \
void VScalKernelImpl<float, isa, kEQ8>::Compute(const int n, const float a, \
void VScalKernelImpl<float, isa, kEQ8>::Compute(const int n, const float a, \
const float* x, float* y) { \
const float* x, float* y) { \
...
@@ -251,7 +186,7 @@ FOR_EACH_ISA_BLOCK(VSCAL_MKL_DOUBLE);
...
@@ -251,7 +186,7 @@ FOR_EACH_ISA_BLOCK(VSCAL_MKL_DOUBLE);
tmp = _mm256_mul_ps(tmp, scalar); \
tmp = _mm256_mul_ps(tmp, scalar); \
_mm256_storeu_ps(y, tmp); \
_mm256_storeu_ps(y, tmp); \
}
}
#define
VSCAL_INTRI8_INPLACE
(isa) \
#define
INTRI8_INPLACE_FLOAT
(isa) \
template <> \
template <> \
void VScalKernelImpl<float, isa, kEQ8>::Compute(const int n, const float a, \
void VScalKernelImpl<float, isa, kEQ8>::Compute(const int n, const float a, \
float* x) { \
float* x) { \
...
@@ -263,36 +198,27 @@ FOR_EACH_ISA_BLOCK(VSCAL_MKL_DOUBLE);
...
@@ -263,36 +198,27 @@ FOR_EACH_ISA_BLOCK(VSCAL_MKL_DOUBLE);
}
}
#ifdef __AVX__
#ifdef __AVX__
VSCAL_INTRI8
(
jit
::
avx
);
INTRI8_FLOAT
(
jit
::
avx
);
VSCAL_INTRI8_INPLACE
(
jit
::
avx
);
INTRI8_INPLACE_FLOAT
(
jit
::
avx
);
#endif
#endif
#ifdef __AVX2__
#ifdef __AVX2__
VSCAL_INTRI8
(
jit
::
avx2
);
INTRI8_FLOAT
(
jit
::
avx2
);
VSCAL_INTRI8_INPLACE
(
jit
::
avx2
);
INTRI8_INPLACE_FLOAT
(
jit
::
avx2
);
#endif
#endif
#ifdef __AVX512F__
#ifdef __AVX512F__
VSCAL_INTRI8
(
jit
::
avx512f
);
INTRI8_FLOAT
(
jit
::
avx512f
);
VSCAL_INTRI8_INPLACE
(
jit
::
avx512f
);
INTRI8_INPLACE_FLOAT
(
jit
::
avx512f
);
#endif
#endif
// TODO(TJ): eq16 test and complete avx512
// TODO(TJ): eq16 test and complete avx512
#undef VSCAL_INTRI8
#undef INTRI8_FLOAT
#undef VSCAL_INTRI8_INPLACE
#undef INTRI8_INPLACE_FLOAT
#undef VSCAL_MKL_FLOAT
#undef MKL_FLOAT
#undef VSCAL_MKL_DOUBLE
#undef MKL_DOUBLE
REGISTER_BLAS_JITKERNEL
(
vmul
,
VMulKernel
);
REGISTER_BLAS_JITKERNEL
(
vadd
,
VAddKernel
);
REGISTER_BLAS_JITKERNEL
(
vscal
,
VScalKernel
);
#undef FOR_EACH_ISA
REGISTER_JITKERNEL
(
vmul
,
VMulKernel
);
#undef FOR_EACH_BLOCK
REGISTER_JITKERNEL
(
vadd
,
VAddKernel
);
#undef FOR_EACH_ISA_BLOCK
REGISTER_JITKERNEL
(
vscal
,
VScalKernel
);
#undef REGISTER_BLAS_JITKERNEL
#undef DEFINE_WITH_DTYPE
#undef SEARCH_ISA_BLOCK
#undef SEARCH_BLOCK
#undef NEW_IMPL
}
// namespace jitkernel
}
// namespace jitkernel
}
// namespace math
}
// namespace math
...
...
paddle/fluid/operators/math/jit_kernel_exp.cc
0 → 100644
浏览文件 @
2d0ff6a3
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/jit_kernel.h"
#include <string>
#include "paddle/fluid/operators/math/jit_kernel_macro.h"
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#endif
#ifdef __AVX__
#include <immintrin.h>
#endif
namespace
paddle
{
namespace
operators
{
namespace
math
{
#ifdef __AVX__
namespace
detail
{
__m256
Exp
(
__m256
a
);
}
// namespace detail
#endif
namespace
jitkernel
{
namespace
jit
=
platform
::
jit
;
/* VExp JitKernel */
template
<
typename
T
,
jit
::
cpu_isa_t
isa
,
jit_block
>
class
VExpKernelImpl
:
public
VExpKernel
<
T
>
{
public:
void
Compute
(
const
int
n
,
const
T
*
x
,
T
*
y
)
override
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
y
[
i
]
=
std
::
exp
(
x
[
i
]);
}
}
};
#ifdef PADDLE_WITH_MKLML
#define MKL_FLOAT(isa, block) \
template <> \
void VExpKernelImpl<float, isa, block>::Compute(const int n, const float* x, \
float* y) { \
platform::dynload::vsExp(n, x, y); \
}
#define MKL_DOUBLE(isa, block) \
template <> \
void VExpKernelImpl<double, isa, block>::Compute( \
const int n, const double* x, double* y) { \
platform::dynload::vdExp(n, x, y); \
}
FOR_EACH_ISA
(
MKL_FLOAT
,
kLT8
);
FOR_EACH_ISA
(
MKL_FLOAT
,
kGT8LT16
);
FOR_EACH_ISA
(
MKL_FLOAT
,
kGT16
);
FOR_EACH_ISA_BLOCK
(
MKL_DOUBLE
);
#endif
#define INTRI8_FLOAT(isa) \
template <> \
void VExpKernelImpl<float, isa, kEQ8>::Compute(const int n, const float* x, \
float* y) { \
__m256 tmp = _mm256_loadu_ps(x); \
_mm256_storeu_ps(y, detail::Exp(tmp)); \
}
#define INTRI16_FLOAT(isa) \
template <> \
void VExpKernelImpl<float, isa, kEQ16>::Compute(const int n, const float* x, \
float* y) { \
__m256 tmp0 = _mm256_loadu_ps(x); \
__m256 tmp1 = _mm256_loadu_ps(x + 8); \
tmp0 = detail::Exp(tmp0); \
tmp1 = detail::Exp(tmp1); \
_mm256_storeu_ps(y, tmp0); \
_mm256_storeu_ps(y + 8, tmp1); \
}
#ifdef __AVX__
INTRI8_FLOAT
(
jit
::
avx
);
INTRI16_FLOAT
(
jit
::
avx
);
#endif
#ifdef __AVX2__
INTRI8_FLOAT
(
jit
::
avx2
);
INTRI16_FLOAT
(
jit
::
avx2
);
#endif
#ifdef __AVX512F__
INTRI8_FLOAT
(
jit
::
avx512f
);
INTRI16_FLOAT
(
jit
::
avx512f
);
#endif
// TODO(TJ): eq16 test and complete avx512
#undef INTRI8_FLOAT
#undef INTRI16_FLOAT
#undef MKL_FLOAT
#undef MKL_DOUBLE
REGISTER_JITKERNEL
(
vexp
,
VExpKernel
);
}
// namespace jitkernel
}
// namespace math
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/math/jit_kernel_macro.h
0 → 100644
浏览文件 @
2d0ff6a3
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "paddle/fluid/platform/cpu_info.h"
namespace
paddle
{
namespace
operators
{
namespace
math
{
namespace
jitkernel
{
namespace
jit
=
platform
::
jit
;
#define NEW_JITKERNEL_IMPL(src, t, isa, k) \
p = std::dynamic_pointer_cast<src<t>>( \
std::make_shared<src##Impl<t, isa, k>>())
#define SEARCH_BLOCK(src, t, isa) \
if (d < AVX_FLOAT_BLOCK) { \
NEW_JITKERNEL_IMPL(src, t, isa, kLT8); \
} else if (d == AVX_FLOAT_BLOCK) { \
NEW_JITKERNEL_IMPL(src, t, isa, kEQ8); \
} else if (d > AVX_FLOAT_BLOCK && d < AVX512_FLOAT_BLOCK) { \
NEW_JITKERNEL_IMPL(src, t, isa, kGT8LT16); \
} else if (d == AVX512_FLOAT_BLOCK) { \
NEW_JITKERNEL_IMPL(src, t, isa, kEQ16); \
} else { \
NEW_JITKERNEL_IMPL(src, t, isa, kGT16); \
}
#define SEARCH_ISA_BLOCK(src, t) \
if (jit::MayIUse(jit::avx512f)) { \
SEARCH_BLOCK(src, t, jit::avx512f); \
} else if (jit::MayIUse(jit::avx2)) { \
SEARCH_BLOCK(src, t, jit::avx2); \
} else if (jit::MayIUse(jit::avx)) { \
SEARCH_BLOCK(src, t, jit::avx); \
} else { \
SEARCH_BLOCK(src, t, jit::isa_any); \
}
#define JITKERNEL_WITH_DTYPE(ker_key, ker_class, ker_dtype, dtype_key) \
template <> \
const std::shared_ptr<ker_class<ker_dtype>> \
KernelPool::Get<ker_class<ker_dtype>>(int d) { \
std::string key = #ker_key #dtype_key + std::to_string(d); \
if (kers_.find(key) == kers_.end()) { \
std::shared_ptr<ker_class<ker_dtype>> p; \
SEARCH_ISA_BLOCK(ker_class, ker_dtype); \
kers_.insert({key, std::dynamic_pointer_cast<Kernel>(p)}); \
return p; \
} \
return std::dynamic_pointer_cast<ker_class<ker_dtype>>(kers_.at(key)); \
}
#define REGISTER_JITKERNEL(ker_key, ker_class) \
JITKERNEL_WITH_DTYPE(ker_key, ker_class, float, f); \
JITKERNEL_WITH_DTYPE(ker_key, ker_class, double, d)
#define FOR_EACH_ISA(macro_, block) \
macro_(jit::avx512f, block); \
macro_(jit::avx2, block); \
macro_(jit::avx, block); \
macro_(jit::isa_any, block)
#define FOR_EACH_BLOCK(macro_, isa) \
macro_(isa, kLT8); \
macro_(isa, kEQ8); \
macro_(isa, kGT8LT16); \
macro_(isa, kEQ16); \
macro_(isa, kGT16)
#define FOR_EACH_ISA_BLOCK(macro_) \
FOR_EACH_BLOCK(macro_, jit::avx512f); \
FOR_EACH_BLOCK(macro_, jit::avx2); \
FOR_EACH_BLOCK(macro_, jit::avx); \
FOR_EACH_BLOCK(macro_, jit::isa_any)
}
// namespace jitkernel
}
// namespace math
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/math/jit_kernel_test.cc
浏览文件 @
2d0ff6a3
...
@@ -14,7 +14,7 @@ limitations under the License. */
...
@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/math/jit_kernel.h"
#include "paddle/fluid/operators/math/jit_kernel.h"
#include <sys/time.h>
#include <sys/time.h>
#include <cstring>
#include <cstring>
// for memcpy
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "gflags/gflags.h"
...
@@ -38,17 +38,72 @@ inline double GetCurrentUS() {
...
@@ -38,17 +38,72 @@ inline double GetCurrentUS() {
}
}
template
<
typename
T
>
template
<
typename
T
>
void
RandomVec
(
const
int
n
,
T
*
a
)
{
void
RandomVec
(
const
int
n
,
T
*
a
,
const
T
lower
=
static_cast
<
T
>
(
-
20.
f
),
const
T
upper
=
static_cast
<
T
>
(
20.
f
))
{
static
unsigned
int
seed
=
100
;
static
unsigned
int
seed
=
100
;
std
::
mt19937
rng
(
seed
++
);
std
::
mt19937
rng
(
seed
++
);
std
::
uniform_real_distribution
<
double
>
uniform_dist
(
0
,
1
);
std
::
uniform_real_distribution
<
double
>
uniform_dist
(
0
,
1
);
const
T
lower
=
static_cast
<
T
>
(
-
20.
f
);
const
T
upper
=
static_cast
<
T
>
(
20.
f
);
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
a
[
i
]
=
static_cast
<
T
>
(
uniform_dist
(
rng
)
*
(
upper
-
lower
)
+
lower
);
a
[
i
]
=
static_cast
<
T
>
(
uniform_dist
(
rng
)
*
(
upper
-
lower
)
+
lower
);
}
}
}
}
void
vexp_ref
(
const
int
n
,
const
float
*
x
,
float
*
y
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
y
[
i
]
=
std
::
exp
(
x
[
i
]);
}
}
#ifdef PADDLE_WITH_MKLML
void
vexp_mkl
(
const
int
n
,
const
float
*
x
,
float
*
y
)
{
paddle
::
platform
::
dynload
::
vsExp
(
n
,
x
,
y
);
}
#endif
TEST
(
JitKernel
,
vexp
)
{
namespace
jit
=
paddle
::
operators
::
math
::
jitkernel
;
for
(
int
d
:
{
7
,
8
,
15
,
16
,
30
,
128
})
{
std
::
vector
<
float
>
x
(
d
);
std
::
vector
<
float
>
zref
(
d
),
ztgt
(
d
);
RandomVec
<
float
>
(
d
,
x
.
data
(),
-
2.
f
,
2.
f
);
const
auto
&
ker
=
jit
::
KernelPool
::
Instance
().
template
Get
<
jit
::
VExpKernel
<
float
>
>
(
d
);
const
float
*
x_data
=
x
.
data
();
float
*
ztgt_data
=
ztgt
.
data
();
float
*
zref_data
=
zref
.
data
();
auto
trefs
=
GetCurrentUS
();
for
(
int
i
=
0
;
i
<
repeat
;
++
i
)
{
vexp_ref
(
d
,
x_data
,
zref_data
);
}
auto
trefe
=
GetCurrentUS
();
#ifdef PADDLE_WITH_MKLML
auto
tmkls
=
GetCurrentUS
();
for
(
int
i
=
0
;
i
<
repeat
;
++
i
)
{
vexp_mkl
(
d
,
x_data
,
zref_data
);
}
auto
tmkle
=
GetCurrentUS
();
#endif
auto
ttgts
=
GetCurrentUS
();
for
(
int
i
=
0
;
i
<
repeat
;
++
i
)
{
ker
->
Compute
(
d
,
x_data
,
ztgt_data
);
}
auto
ttgte
=
GetCurrentUS
();
VLOG
(
3
)
<<
"Vec size "
<<
d
<<
": refer takes: "
<<
(
trefe
-
trefs
)
/
repeat
#ifdef PADDLE_WITH_MKLML
<<
" us, mkl takes: "
<<
(
tmkle
-
tmkls
)
/
repeat
<<
" us, "
#else
<<
" us, "
#endif
<<
"tgt takes: "
<<
(
ttgte
-
ttgts
)
/
repeat
;
for
(
int
i
=
0
;
i
<
d
;
++
i
)
{
EXPECT_NEAR
(
ztgt_data
[
i
],
zref_data
[
i
],
1e-3
);
}
}
}
void
vscal_ref
(
const
int
n
,
const
float
a
,
const
float
*
x
,
float
*
y
)
{
void
vscal_ref
(
const
int
n
,
const
float
a
,
const
float
*
x
,
float
*
y
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
y
[
i
]
=
a
*
x
[
i
];
y
[
i
]
=
a
*
x
[
i
];
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录