Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b4751a34
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b4751a34
编写于
10月 18, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix illegal instruction of rnn2
上级
36588b33
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
125 addition
and
79 deletion
+125
-79
paddle/fluid/operators/math/jit_kernel_exp.cc
paddle/fluid/operators/math/jit_kernel_exp.cc
+3
-9
paddle/fluid/operators/math/jit_kernel_lstm.cc
paddle/fluid/operators/math/jit_kernel_lstm.cc
+122
-70
未找到文件。
paddle/fluid/operators/math/jit_kernel_exp.cc
浏览文件 @
b4751a34
...
@@ -27,13 +27,6 @@ limitations under the License. */
...
@@ -27,13 +27,6 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
namespace
math
{
namespace
math
{
#ifdef __AVX__
namespace
detail
{
__m256
Exp
(
__m256
a
);
}
// namespace detail
#endif
namespace
jitkernel
{
namespace
jitkernel
{
namespace
jit
=
platform
::
jit
;
namespace
jit
=
platform
::
jit
;
...
@@ -205,7 +198,7 @@ __m256 ExpAVX(__m256 x) {
...
@@ -205,7 +198,7 @@ __m256 ExpAVX(__m256 x) {
#ifdef __AVX2__
#ifdef __AVX2__
__m256
ExpAVX2
(
__m256
x
)
{
__m256
ExpAVX2
(
__m256
x
)
{
__m256
tmp
=
_mm256_setzero_ps
(),
fx
;
__m256
tmp
=
_mm256_setzero_ps
(),
fx
;
__m256
one
=
*
reinterpret_cast
<
const
__m256
*>
_ps256_one
;
__m256
one
=
*
reinterpret_cast
<
const
__m256
*>
(
_ps256_one
)
;
__m256i
imm0
;
__m256i
imm0
;
x
=
_mm256_min_ps
(
x
,
*
reinterpret_cast
<
const
__m256
*>
(
_ps256_exp_hi
));
x
=
_mm256_min_ps
(
x
,
*
reinterpret_cast
<
const
__m256
*>
(
_ps256_exp_hi
));
...
@@ -335,7 +328,8 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> {
...
@@ -335,7 +328,8 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> {
template <> \
template <> \
void VSigmoidKernelImpl<float, isa, kEQ8>::Compute(const float* x, float* y) \
void VSigmoidKernelImpl<float, isa, kEQ8>::Compute(const float* x, float* y) \
const { \
const { \
/*use static const??*/
__m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \
/* TODO(TJ): try to use static const*/
\
__m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \
__m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \
__m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \
__m256 tmp = _mm256_loadu_ps(x); \
__m256 tmp = _mm256_loadu_ps(x); \
INTRI_SIGMOID(tmp, min, max, expisa); \
INTRI_SIGMOID(tmp, min, max, expisa); \
...
...
paddle/fluid/operators/math/jit_kernel_lstm.cc
浏览文件 @
b4751a34
...
@@ -25,13 +25,18 @@ limitations under the License. */
...
@@ -25,13 +25,18 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
namespace
math
{
namespace
math
{
#ifdef __AVX__
namespace
jitkernel
{
namespace
detail
{
namespace
detail
{
__m256
Exp
(
__m256
a
);
#ifdef __AVX__
}
// namespace detail
__m256
ExpAVX
(
__m256
x
);
#endif
#endif
namespace
jitkernel
{
#ifdef __AVX2__
__m256
ExpAVX2
(
__m256
x
);
#endif
}
// namespace detail
namespace
jit
=
platform
::
jit
;
namespace
jit
=
platform
::
jit
;
#ifdef __AVX__
#ifdef __AVX__
...
@@ -43,43 +48,72 @@ class AVXAct {
...
@@ -43,43 +48,72 @@ class AVXAct {
virtual
__m256
Compute
(
__m256
x
)
const
=
0
;
virtual
__m256
Compute
(
__m256
x
)
const
=
0
;
};
};
template
<
act_type
type
>
template
<
act_type
type
,
jit
::
cpu_isa_t
isa
>
class
AVXActImpl
:
public
AVXAct
{
class
AVXActImpl
:
public
AVXAct
{
public:
public:
__m256
Compute
(
__m256
x
)
const
override
{
PADDLE_THROW
(
"Unkown type!"
);
}
__m256
Compute
(
__m256
x
)
const
override
{
PADDLE_THROW
(
"Unkown type!"
);
}
};
};
template
<
>
#define AVX_SIGMOID(isa, expisa) \
__m256
AVXActImpl
<
kSigmoid
>::
Compute
(
__m256
x
)
const
{
template <> \
__m256
ones
=
_mm256_set1_ps
(
1.0
f
);
__m256 AVXActImpl<kSigmoid, isa>::Compute(__m256 x) const { \
x
=
_mm256_max_ps
(
x
,
_mm256_set1_ps
(
SIGMOID_THRESHOLD_MIN
));
__m256 ones = _mm256_set1_ps(1.0f); \
x
=
_mm256_min_ps
(
x
,
_mm256_set1_ps
(
SIGMOID_THRESHOLD_MAX
));
x = _mm256_max_ps(x, _mm256_set1_ps(SIGMOID_THRESHOLD_MIN)); \
x
=
_mm256_sub_ps
(
_mm256_set1_ps
(
0.0
f
),
x
);
x = _mm256_min_ps(x, _mm256_set1_ps(SIGMOID_THRESHOLD_MAX)); \
x
=
detail
::
Exp
(
x
);
x = _mm256_sub_ps(_mm256_set1_ps(0.0f), x); \
x
=
_mm256_add_ps
(
ones
,
x
);
x = expisa(x); \
return
_mm256_div_ps
(
ones
,
x
);
x = _mm256_add_ps(ones, x); \
}
return _mm256_div_ps(ones, x); \
}
template
<
>
#define AVX_TANH(isa, expisa) \
__m256
AVXActImpl
<
kTanh
>::
Compute
(
__m256
x
)
const
{
template <> \
__m256
ones
=
_mm256_set1_ps
(
1.0
f
);
__m256 AVXActImpl<kTanh, isa>::Compute(__m256 x) const { \
x
=
_mm256_mul_ps
(
_mm256_set1_ps
(
-
2.0
f
),
x
);
__m256 ones = _mm256_set1_ps(1.0f); \
x
=
_mm256_min_ps
(
x
,
_mm256_set1_ps
(
EXP_MAX_INPUT
));
x = _mm256_mul_ps(_mm256_set1_ps(-2.0f), x); \
x
=
detail
::
Exp
(
x
);
x = _mm256_min_ps(x, _mm256_set1_ps(EXP_MAX_INPUT)); \
x
=
_mm256_add_ps
(
ones
,
x
);
x = expisa(x); \
x
=
_mm256_div_ps
(
_mm256_set1_ps
(
2.0
f
),
x
);
x = _mm256_add_ps(ones, x); \
return
_mm256_sub_ps
(
x
,
ones
);
x = _mm256_div_ps(_mm256_set1_ps(2.0f), x); \
}
return _mm256_sub_ps(x, ones); \
}
template
<
>
#define AVX_RELU(isa) \
__m256
AVXActImpl
<
kRelu
>::
Compute
(
__m256
x
)
const
{
template <> \
return
_mm256_max_ps
(
x
,
_mm256_setzero_ps
());
__m256 AVXActImpl<kRelu, isa>::Compute(__m256 x) const { \
}
return _mm256_max_ps(x, _mm256_setzero_ps()); \
}
#define AVX_IDENTITY(isa) \
template <> \
__m256 AVXActImpl<kIdentity, isa>::Compute(__m256 x) const { \
return x; \
}
#define FOR_EACH_AVX_ISA(macro_) \
macro_(jit::avx); \
macro_(jit::avx2); \
macro_(jit::avx512f)
FOR_EACH_AVX_ISA
(
AVX_RELU
);
FOR_EACH_AVX_ISA
(
AVX_IDENTITY
);
AVX_SIGMOID
(
jit
::
avx
,
detail
::
ExpAVX
);
AVX_TANH
(
jit
::
avx
,
detail
::
ExpAVX
);
#ifdef __AVX2__
AVX_SIGMOID
(
jit
::
avx2
,
detail
::
ExpAVX2
);
AVX_SIGMOID
(
jit
::
avx512f
,
detail
::
ExpAVX2
);
AVX_TANH
(
jit
::
avx2
,
detail
::
ExpAVX2
);
AVX_TANH
(
jit
::
avx512f
,
detail
::
ExpAVX2
);
#endif
#undef FOR_EACH_AVX_ISA
#undef AVX_IDENTITY
#undef AVX_RELU
#undef AVX_TANH
#undef AVX_SIGMOID
template
<
>
__m256
AVXActImpl
<
kIdentity
>::
Compute
(
__m256
x
)
const
{
return
x
;
}
#endif
#endif
template
<
typename
T
>
template
<
typename
T
>
...
@@ -119,23 +153,6 @@ class LSTMKernelImpl : public LSTMKernel<T> {
...
@@ -119,23 +153,6 @@ class LSTMKernelImpl : public LSTMKernel<T> {
act_cell_d_
=
GetActKernel
<
T
>
(
act_cell
,
d
);
act_cell_d_
=
GetActKernel
<
T
>
(
act_cell
,
d
);
vmul_d_
=
KernelPool
::
Instance
().
template
Get
<
VMulKernel
<
T
>
>
(
d
);
vmul_d_
=
KernelPool
::
Instance
().
template
Get
<
VMulKernel
<
T
>
>
(
d
);
vadd_d_
=
KernelPool
::
Instance
().
template
Get
<
VAddKernel
<
T
>
>
(
d
);
vadd_d_
=
KernelPool
::
Instance
().
template
Get
<
VAddKernel
<
T
>
>
(
d
);
#ifdef __AVX__
auto
GetAVXAct
=
[
&
](
const
std
::
string
&
type
)
->
std
::
unique_ptr
<
AVXAct
>
{
if
(
type
==
"sigmoid"
)
{
return
std
::
unique_ptr
<
AVXAct
>
(
new
AVXActImpl
<
kSigmoid
>
());
}
else
if
(
type
==
"relu"
)
{
return
std
::
unique_ptr
<
AVXAct
>
(
new
AVXActImpl
<
kRelu
>
());
}
else
if
(
type
==
"tanh"
)
{
return
std
::
unique_ptr
<
AVXAct
>
(
new
AVXActImpl
<
kTanh
>
());
}
else
if
(
type
==
"identity"
||
type
==
""
)
{
return
std
::
unique_ptr
<
AVXAct
>
(
new
AVXActImpl
<
kIdentity
>
());
}
PADDLE_THROW
(
"Not support type: %s"
,
type
);
};
avx_act_gate_
=
GetAVXAct
(
act_gate
);
avx_act_cand_
=
GetAVXAct
(
act_cand
);
avx_act_cell_
=
GetAVXAct
(
act_cell
);
#endif
}
}
void
ComputeCtHt
(
T
*
gates
,
const
T
*
ct_1
,
T
*
ct
,
T
*
ht
,
const
T
*
wp_data
,
void
ComputeCtHt
(
T
*
gates
,
const
T
*
ct_1
,
T
*
ct
,
T
*
ht
,
const
T
*
wp_data
,
...
@@ -176,6 +193,27 @@ class LSTMKernelImpl : public LSTMKernel<T> {
...
@@ -176,6 +193,27 @@ class LSTMKernelImpl : public LSTMKernel<T> {
};
};
#define INTRI8_FLOAT(isa) \
#define INTRI8_FLOAT(isa) \
template <> \
LSTMKernelImpl<float, isa, kEQ8>::LSTMKernelImpl( \
const std::string& act_gate, const std::string& act_cand, \
const std::string& act_cell, int d) \
: LSTMKernel<float>() { \
auto GetAVXAct = [&](const std::string& type) -> std::unique_ptr<AVXAct> { \
if (type == "sigmoid") { \
return std::unique_ptr<AVXAct>(new AVXActImpl<kSigmoid, isa>()); \
} else if (type == "relu") { \
return std::unique_ptr<AVXAct>(new AVXActImpl<kRelu, isa>()); \
} else if (type == "tanh") { \
return std::unique_ptr<AVXAct>(new AVXActImpl<kTanh, isa>()); \
} else if (type == "identity" || type == "") { \
return std::unique_ptr<AVXAct>(new AVXActImpl<kIdentity, isa>()); \
} \
PADDLE_THROW("Not support type: %s", type); \
}; \
avx_act_gate_ = GetAVXAct(act_gate); \
avx_act_cand_ = GetAVXAct(act_cand); \
avx_act_cell_ = GetAVXAct(act_cell); \
} \
template <> \
template <> \
void LSTMKernelImpl<float, isa, kEQ8>::ComputeCtHt( \
void LSTMKernelImpl<float, isa, kEQ8>::ComputeCtHt( \
float* gates, const float* ct_1, float* ct, float* ht, \
float* gates, const float* ct_1, float* ct, float* ht, \
...
@@ -195,6 +233,20 @@ class LSTMKernelImpl : public LSTMKernel<T> {
...
@@ -195,6 +233,20 @@ class LSTMKernelImpl : public LSTMKernel<T> {
/* H_t = act_cell(C_t) * ogated */
\
/* H_t = act_cell(C_t) * ogated */
\
o = _mm256_mul_ps(avx_act_cell_->Compute(f), avx_act_gate_->Compute(o)); \
o = _mm256_mul_ps(avx_act_cell_->Compute(f), avx_act_gate_->Compute(o)); \
_mm256_storeu_ps(ht, o); \
_mm256_storeu_ps(ht, o); \
} \
template <> \
void LSTMKernelImpl<float, isa, kEQ8>::ComputeC1H1( \
float* gates, float* ct, float* ht, const float* wp_data) const { \
__m256 c, i, o; \
c = _mm256_loadu_ps(gates); \
i = _mm256_loadu_ps(gates + 8); \
o = _mm256_loadu_ps(gates + 24); \
/* C_t = igated * cgated*/
\
c = _mm256_mul_ps(avx_act_gate_->Compute(i), avx_act_cand_->Compute(c)); \
_mm256_storeu_ps(ct, c); \
/* H_t = act_cell(C_t) * ogated */
\
o = _mm256_mul_ps(avx_act_cell_->Compute(c), avx_act_gate_->Compute(o)); \
_mm256_storeu_ps(ht, o); \
}
}
// TODO(TJ): optimize keq16
// TODO(TJ): optimize keq16
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录