Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
3462c299
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3462c299
编写于
8月 24, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine add bias with avx
上级
bb9f98e1
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
69 addition
and
27 deletion
+69
-27
paddle/fluid/operators/attention_lstm_op.cc
paddle/fluid/operators/attention_lstm_op.cc
+9
-21
paddle/fluid/operators/math/cpu_vec.h
paddle/fluid/operators/math/cpu_vec.h
+60
-6
未找到文件。
paddle/fluid/operators/attention_lstm_op.cc
浏览文件 @
3462c299
...
@@ -232,40 +232,28 @@ use lstm_x_t as input and compute as standard LSTM.
...
@@ -232,40 +232,28 @@ use lstm_x_t as input and compute as standard LSTM.
template
<
typename
T
>
template
<
typename
T
>
inline
void
bias_relu
(
const
int
n
,
const
T
*
x
,
const
T
*
bias
,
T
*
y
)
{
inline
void
bias_relu
(
const
int
n
,
const
T
*
x
,
const
T
*
bias
,
T
*
y
)
{
if
(
bias
)
{
if
(
bias
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
math
::
vec_add_bias
<
T
,
platform
::
jit
::
avx
>
(
n
,
*
bias
,
x
,
y
);
y
[
i
]
=
x
[
i
]
+
bias
[
0
];
math
::
vec_relu
<
T
,
platform
::
jit
::
avx
>
(
n
,
y
,
y
);
}
math
::
vec_relu
<
T
>
(
n
,
y
,
y
);
}
else
{
}
else
{
math
::
vec_relu
<
T
>
(
n
,
x
,
y
);
math
::
vec_relu
<
T
,
platform
::
jit
::
avx
>
(
n
,
x
,
y
);
}
}
}
}
template
<
typename
DeviceContext
,
typename
T
>
template
<
typename
T
>
inline
void
vec_softmax
(
const
math
::
BlasT
<
DeviceContext
,
T
>&
blas
,
const
int
n
,
inline
void
vec_softmax
(
const
int
n
,
const
T
*
x
,
T
*
y
)
{
const
T
*
x
,
T
*
y
)
{
T
scalar
=
x
[
0
];
T
scalar
=
x
[
0
];
// max
// max
for
(
int
i
=
1
;
i
<
n
;
++
i
)
{
for
(
int
i
=
1
;
i
<
n
;
++
i
)
{
scalar
=
scalar
<
x
[
i
]
?
x
[
i
]
:
scalar
;
scalar
=
scalar
<
x
[
i
]
?
x
[
i
]
:
scalar
;
}
}
math
::
vec_add_bias
<
T
,
platform
::
jit
::
avx
>
(
n
,
-
scalar
,
x
,
y
);
// sub
// sub
math
::
vec_exp
<
T
>
(
n
,
y
,
y
);
// exp
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
y
[
i
]
=
x
[
i
]
-
scalar
;
}
// exp
blas
.
VEXP
(
n
,
y
,
y
);
// sum
// sum
scalar
=
T
(
0
);
scalar
=
T
(
0
);
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
scalar
+=
y
[
i
];
scalar
+=
y
[
i
];
}
}
math
::
vec_scal
<
T
>
(
n
,
static_cast
<
T
>
(
1
)
/
scalar
,
y
);
// scale
// scale
blas
.
SCAL
(
n
,
static_cast
<
T
>
(
1
)
/
scalar
,
y
);
}
}
template
<
typename
T
>
template
<
typename
T
>
...
@@ -363,7 +351,7 @@ class AttentionLSTMKernel : public framework::OpKernel<T> {
...
@@ -363,7 +351,7 @@ class AttentionLSTMKernel : public framework::OpKernel<T> {
fc_out_data
);
fc_out_data
);
}
}
// 1d. softmax
// 1d. softmax
vec_softmax
<
DeviceContext
,
T
>
(
blas
,
seq_len
,
fc_out_data
,
fc_out_data
);
vec_softmax
<
T
>
(
seq_len
,
fc_out_data
,
fc_out_data
);
// mul x(seq_len*M) and sum pool
// mul x(seq_len*M) and sum pool
math
::
FCCompute
<
DeviceContext
,
T
>
(
blas
,
1
,
M
,
seq_len
,
fc_out_data
,
math
::
FCCompute
<
DeviceContext
,
T
>
(
blas
,
1
,
M
,
seq_len
,
fc_out_data
,
cur_x_data
,
lstm_x_data
);
cur_x_data
,
lstm_x_data
);
...
...
paddle/fluid/operators/math/cpu_vec.h
浏览文件 @
3462c299
...
@@ -87,7 +87,7 @@ inline void vec_scal<float, platform::jit::avx>(const int n, const float a,
...
@@ -87,7 +87,7 @@ inline void vec_scal<float, platform::jit::avx>(const int n, const float a,
const
float
*
x
,
float
*
y
)
{
const
float
*
x
,
float
*
y
)
{
#ifdef __AVX__
#ifdef __AVX__
constexpr
int
block
=
AVX_FLOAT_BLOCK
;
constexpr
int
block
=
AVX_FLOAT_BLOCK
;
if
(
n
<
block
*
4
)
{
// use larger threshold, since small ones has no boost
if
(
n
<
block
)
{
vec_scal
<
float
,
platform
::
jit
::
isa_any
>
(
n
,
a
,
x
,
y
);
vec_scal
<
float
,
platform
::
jit
::
isa_any
>
(
n
,
a
,
x
,
y
);
return
;
return
;
}
}
...
@@ -131,6 +131,62 @@ inline void vec_scal<float, platform::jit::avx512_common>(const int n,
...
@@ -131,6 +131,62 @@ inline void vec_scal<float, platform::jit::avx512_common>(const int n,
vec_scal
<
float
,
platform
::
jit
::
avx2
>
(
n
,
a
,
x
,
y
);
vec_scal
<
float
,
platform
::
jit
::
avx2
>
(
n
,
a
,
x
,
y
);
}
}
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
=
platform
::
jit
::
isa_any
>
inline
void
vec_add_bias
(
const
int
n
,
const
T
a
,
const
T
*
x
,
T
*
y
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
y
[
i
]
=
x
[
i
]
+
a
;
}
}
template
<
>
inline
void
vec_add_bias
<
float
,
platform
::
jit
::
avx
>
(
const
int
n
,
const
float
a
,
const
float
*
x
,
float
*
y
)
{
#ifdef __AVX__
constexpr
int
block
=
AVX_FLOAT_BLOCK
;
if
(
n
<
block
)
{
vec_add_bias
<
float
,
platform
::
jit
::
isa_any
>
(
n
,
a
,
x
,
y
);
return
;
}
const
int
rest
=
n
%
block
;
const
int
end
=
n
-
rest
;
int
i
=
0
;
__m256
bias
=
_mm256_set1_ps
(
a
);
__m256
tmp
;
#define MOVE_ONE_STEP \
tmp = _mm256_loadu_ps(x + i); \
tmp = _mm256_add_ps(tmp, bias); \
_mm256_storeu_ps(y + i, tmp)
for
(
i
=
0
;
i
<
end
;
i
+=
block
)
{
MOVE_ONE_STEP
;
}
#undef MOVE_ONE_STEP
if
(
rest
==
0
)
{
return
;
}
// can not continue move step if src and dst are inplace
for
(
i
=
n
-
rest
;
i
<
n
;
++
i
)
{
y
[
i
]
=
x
[
i
]
+
a
;
}
#else
vec_add_bias
<
float
,
platform
::
jit
::
isa_any
>
(
n
,
a
,
x
,
y
);
#endif
}
template
<
>
inline
void
vec_add_bias
<
float
,
platform
::
jit
::
avx2
>
(
const
int
n
,
const
float
a
,
const
float
*
x
,
float
*
y
)
{
vec_add_bias
<
float
,
platform
::
jit
::
avx
>
(
n
,
a
,
x
,
y
);
}
template
<
>
inline
void
vec_add_bias
<
float
,
platform
::
jit
::
avx512_common
>
(
const
int
n
,
const
float
a
,
const
float
*
x
,
float
*
y
)
{
// TODO(TJ): enable me
vec_add_bias
<
float
,
platform
::
jit
::
avx2
>
(
n
,
a
,
x
,
y
);
}
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
=
platform
::
jit
::
isa_any
>
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
=
platform
::
jit
::
isa_any
>
inline
void
vec_identity
(
const
int
n
,
const
T
*
x
,
T
*
y
)
{
inline
void
vec_identity
(
const
int
n
,
const
T
*
x
,
T
*
y
)
{
// do nothing
// do nothing
...
@@ -229,11 +285,10 @@ inline void vec_tanh(const int n, const T* x, T* y) {
...
@@ -229,11 +285,10 @@ inline void vec_tanh(const int n, const T* x, T* y) {
vec_scal
<
T
,
isa
>
(
n
,
static_cast
<
T
>
(
2
),
x
,
y
);
vec_scal
<
T
,
isa
>
(
n
,
static_cast
<
T
>
(
2
),
x
,
y
);
vec_sigmoid
<
T
,
isa
>
(
n
,
y
,
y
);
vec_sigmoid
<
T
,
isa
>
(
n
,
y
,
y
);
vec_scal
<
T
>
(
n
,
static_cast
<
T
>
(
2
),
y
);
vec_scal
<
T
>
(
n
,
static_cast
<
T
>
(
2
),
y
);
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
vec_add_bias
<
T
,
isa
>
(
n
,
static_cast
<
T
>
(
-
1
),
y
,
y
);
y
[
i
]
=
y
[
i
]
-
static_cast
<
T
>
(
1
);
}
}
}
// TODO(TJ): make relu clip
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
=
platform
::
jit
::
isa_any
>
template
<
typename
T
,
platform
::
jit
::
cpu_isa_t
isa
=
platform
::
jit
::
isa_any
>
inline
void
vec_relu
(
const
int
n
,
const
T
*
x
,
T
*
y
)
{
inline
void
vec_relu
(
const
int
n
,
const
T
*
x
,
T
*
y
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
...
@@ -246,7 +301,7 @@ inline void vec_relu<float, platform::jit::avx>(const int n, const float* x,
...
@@ -246,7 +301,7 @@ inline void vec_relu<float, platform::jit::avx>(const int n, const float* x,
float
*
y
)
{
float
*
y
)
{
#ifdef __AVX__
#ifdef __AVX__
constexpr
int
block
=
AVX_FLOAT_BLOCK
;
constexpr
int
block
=
AVX_FLOAT_BLOCK
;
if
(
n
<
block
)
{
if
(
n
<
block
*
4
)
{
vec_relu
<
float
,
platform
::
jit
::
isa_any
>
(
n
,
x
,
y
);
vec_relu
<
float
,
platform
::
jit
::
isa_any
>
(
n
,
x
,
y
);
return
;
return
;
}
}
...
@@ -288,7 +343,6 @@ inline void vec_relu<float, platform::jit::avx512_common>(const int n,
...
@@ -288,7 +343,6 @@ inline void vec_relu<float, platform::jit::avx512_common>(const int n,
// TODO(TJ): enable me
// TODO(TJ): enable me
vec_relu
<
float
,
platform
::
jit
::
avx2
>
(
n
,
x
,
y
);
vec_relu
<
float
,
platform
::
jit
::
avx2
>
(
n
,
x
,
y
);
}
}
// TODO(TJ): add vec add bias, make relu clip
// TODO(TJ): optimize double of sigmoid, tanh and relu if necessary
// TODO(TJ): optimize double of sigmoid, tanh and relu if necessary
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录