Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
51536f7f
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
51536f7f
编写于
3月 21, 2019
作者:
D
dengkaipeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
StrideASum. test=develop
上级
93701dba
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
16 addition
and
15 deletion
+16
-15
paddle/fluid/operators/jit/helper.cc
paddle/fluid/operators/jit/helper.cc
+1
-1
paddle/fluid/operators/jit/kernel_base.h
paddle/fluid/operators/jit/kernel_base.h
+2
-2
paddle/fluid/operators/jit/more/mix/mix.cc
paddle/fluid/operators/jit/more/mix/mix.cc
+1
-1
paddle/fluid/operators/jit/more/mkl/mkl.cc
paddle/fluid/operators/jit/more/mkl/mkl.cc
+3
-3
paddle/fluid/operators/jit/more/mkl/mkl.h
paddle/fluid/operators/jit/more/mkl/mkl.h
+2
-2
paddle/fluid/operators/jit/refer/CMakeLists.txt
paddle/fluid/operators/jit/refer/CMakeLists.txt
+1
-1
paddle/fluid/operators/jit/refer/refer.cc
paddle/fluid/operators/jit/refer/refer.cc
+1
-1
paddle/fluid/operators/jit/refer/refer.h
paddle/fluid/operators/jit/refer/refer.h
+4
-4
paddle/fluid/operators/jit/test.cc
paddle/fluid/operators/jit/test.cc
+1
-0
未找到文件。
paddle/fluid/operators/jit/helper.cc
浏览文件 @
51536f7f
...
...
@@ -56,7 +56,7 @@ const char* to_string(KernelType kt) {
ONE_CASE
(
kMatMul
);
ONE_CASE
(
kHMax
);
ONE_CASE
(
kHSum
);
ONE_CASE
(
kStrideSum
);
ONE_CASE
(
kStride
A
Sum
);
ONE_CASE
(
kSoftmax
);
ONE_CASE
(
kEmbSeqPool
);
ONE_CASE
(
kSgd
);
...
...
paddle/fluid/operators/jit/kernel_base.h
浏览文件 @
51536f7f
...
...
@@ -53,7 +53,7 @@ typedef enum {
kVSquare
,
kVSub
,
kVTanh
,
kStrideSum
,
kStride
A
Sum
,
kStrideScal
,
}
KernelType
;
...
...
@@ -132,7 +132,7 @@ DECLARE_KERNELTUPLE(XYNTuple, VCopy);
DECLARE_KERNELTUPLE
(
XRNTuple
,
HMax
);
DECLARE_KERNELTUPLE
(
XRNTuple
,
HSum
);
DECLARE_KERNELTUPLE
(
XRNSTuple
,
StrideSum
);
DECLARE_KERNELTUPLE
(
XRNSTuple
,
Stride
A
Sum
);
typedef
struct
{
void
*
gates
;
// gates: x_ch, x_ih, x_fh, x_oh
...
...
paddle/fluid/operators/jit/more/mix/mix.cc
浏览文件 @
51536f7f
...
...
@@ -54,7 +54,7 @@ void Softmax(const T* x, T* y, int n, int bs, int m) {
auto
compute_hmax
=
KernelFuncs
<
HMaxTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_hsum
=
KernelFuncs
<
HSumTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_vscal
=
KernelFuncs
<
VScalTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_stridesum
=
KernelFuncs
<
StrideSumTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_stridesum
=
KernelFuncs
<
Stride
A
SumTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_stridescal
=
KernelFuncs
<
StrideScalTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_vaddbias
=
KernelFuncs
<
VAddBiasTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
...
...
paddle/fluid/operators/jit/more/mkl/mkl.cc
浏览文件 @
51536f7f
...
...
@@ -147,12 +147,12 @@ void ASum<double>(const double* x, double* res, int n) {
}
template
<
>
void
StrideSum
<
float
>
(
const
float
*
x
,
float
*
res
,
int
n
,
int
stride
)
{
void
Stride
A
Sum
<
float
>
(
const
float
*
x
,
float
*
res
,
int
n
,
int
stride
)
{
res
[
0
]
=
platform
::
dynload
::
cblas_sasum
(
n
,
x
,
stride
);
}
template
<
>
void
StrideSum
<
double
>
(
const
double
*
x
,
double
*
res
,
int
n
,
int
stride
)
{
void
Stride
A
Sum
<
double
>
(
const
double
*
x
,
double
*
res
,
int
n
,
int
stride
)
{
res
[
0
]
=
platform
::
dynload
::
cblas_dasum
(
n
,
x
,
stride
);
}
...
...
@@ -174,7 +174,7 @@ bool VScalKernel<float>::CanBeUsed(const int& d) const {
template
<
>
bool
StrideScalKernel
<
float
>::
CanBeUsed
(
const
int
&
d
)
const
{
return
platform
::
MayIUse
(
platform
::
avx512f
)
&&
d
>
512
;
return
true
;
}
template
<
>
...
...
paddle/fluid/operators/jit/more/mkl/mkl.h
浏览文件 @
51536f7f
...
...
@@ -129,7 +129,7 @@ template <typename T>
void
ASum
(
const
T
*
x
,
T
*
res
,
int
n
);
template
<
typename
T
>
void
StrideSum
(
const
T
*
x
,
T
*
res
,
int
n
,
int
stride
);
void
Stride
A
Sum
(
const
T
*
x
,
T
*
res
,
int
n
,
int
stride
);
template
<
typename
T
>
void
StrideScal
(
const
T
*
a
,
const
T
*
x
,
T
*
y
,
int
n
,
int
stride
);
...
...
@@ -155,7 +155,7 @@ void Softmax(const T* x, T* y, int n, int bs, int m=1) {
VScal
(
&
sum
,
&
y
[
i
*
n
],
&
y
[
i
*
n
],
n
);
}
else
{
for
(
int
j
=
0
;
j
<
m
;
++
j
)
{
StrideSum
(
&
y
[
i
*
n
+
j
],
&
sum
,
n
/
m
,
m
);
Stride
A
Sum
(
&
y
[
i
*
n
+
j
],
&
sum
,
n
/
m
,
m
);
sum
=
static_cast
<
T
>
(
1
)
/
sum
;
StrideScal
(
&
sum
,
&
y
[
i
*
n
+
j
],
&
y
[
i
*
n
+
j
],
n
/
m
,
m
);
}
...
...
paddle/fluid/operators/jit/refer/CMakeLists.txt
浏览文件 @
51536f7f
...
...
@@ -33,7 +33,7 @@ USE_JITKERNEL_REFER(kMatMul)
USE_JITKERNEL_REFER
(
kVSquare
)
USE_JITKERNEL_REFER
(
kHSum
)
USE_JITKERNEL_REFER
(
kHMax
)
USE_JITKERNEL_REFER
(
kStrideSum
)
USE_JITKERNEL_REFER
(
kStride
A
Sum
)
USE_JITKERNEL_REFER
(
kSoftmax
)
USE_JITKERNEL_REFER
(
kEmbSeqPool
)
USE_JITKERNEL_REFER
(
kSgd
)
...
...
paddle/fluid/operators/jit/refer/refer.cc
浏览文件 @
51536f7f
...
...
@@ -52,7 +52,7 @@ REGISTER_REFER_KERNEL(SeqPool);
REGISTER_REFER_KERNEL
(
MatMul
);
REGISTER_REFER_KERNEL
(
HMax
);
REGISTER_REFER_KERNEL
(
HSum
);
REGISTER_REFER_KERNEL
(
StrideSum
);
REGISTER_REFER_KERNEL
(
Stride
A
Sum
);
REGISTER_REFER_KERNEL
(
Softmax
);
REGISTER_REFER_KERNEL
(
EmbSeqPool
);
REGISTER_REFER_KERNEL
(
Sgd
);
...
...
paddle/fluid/operators/jit/refer/refer.h
浏览文件 @
51536f7f
...
...
@@ -412,10 +412,10 @@ void HSum(const T* x, T* res, int n) {
}
template
<
typename
T
>
void
StrideSum
(
const
T
*
x
,
T
*
res
,
int
n
,
int
stride
)
{
void
Stride
A
Sum
(
const
T
*
x
,
T
*
res
,
int
n
,
int
stride
)
{
res
[
0
]
=
x
[
0
];
for
(
int
i
=
stride
;
i
<
n
;
i
+=
stride
)
{
res
[
0
]
+=
x
[
i
]
;
res
[
0
]
+=
std
::
abs
(
x
[
i
])
;
}
}
...
...
@@ -442,7 +442,7 @@ void Softmax(const T* x, T* y, int n, int bs = 1, int m = 1) {
VScal
(
&
scalar
,
y
,
y
,
n
);
}
else
{
for
(
int
j
=
0
;
j
<
m
;
j
++
)
{
StrideSum
(
&
y
[
j
],
&
scalar
,
n
,
m
);
Stride
A
Sum
(
&
y
[
j
],
&
scalar
,
n
,
m
);
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
StrideScal
(
&
scalar
,
&
y
[
j
],
&
y
[
j
],
n
,
m
);
}
...
...
@@ -554,7 +554,7 @@ DECLARE_REFER_KERNEL(GRUHtPart2);
DECLARE_REFER_KERNEL
(
HMax
);
DECLARE_REFER_KERNEL
(
HSum
);
DECLARE_REFER_KERNEL
(
StrideSum
);
DECLARE_REFER_KERNEL
(
Stride
A
Sum
);
// others
DECLARE_REFER_KERNEL
(
CRFDecoding
);
...
...
paddle/fluid/operators/jit/test.cc
浏览文件 @
51536f7f
...
...
@@ -727,6 +727,7 @@ void TestKernelSoftmax() {
if
(
m
>
n
||
n
%
m
!=
0
)
{
continue
;
}
VLOG
(
10
)
<<
"Softmax: "
<<
bs
<<
", "
<<
n
<<
", "
<<
m
;
auto
ref
=
jit
::
GetReferFunc
<
KernelTuple
>
();
EXPECT_TRUE
(
ref
!=
nullptr
);
std
::
vector
<
T
>
x
(
bs
*
n
),
y
(
bs
*
n
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录