Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b6c07552
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b6c07552
编写于
7月 18, 2017
作者:
Q
qijun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
implement some basic OpKernel
上级
3208914b
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
85 addition
and
25 deletion
+85
-25
paddle/operators/add_op.cc
paddle/operators/add_op.cc
+2
-3
paddle/operators/add_op.cu
paddle/operators/add_op.cu
+1
-2
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+1
-1
paddle/operators/mul_op.cu
paddle/operators/mul_op.cu
+1
-1
paddle/operators/mul_op.h
paddle/operators/mul_op.h
+14
-3
paddle/operators/rowwise_add_op.cc
paddle/operators/rowwise_add_op.cc
+1
-1
paddle/operators/rowwise_add_op.cu
paddle/operators/rowwise_add_op.cu
+1
-1
paddle/operators/rowwise_add_op.h
paddle/operators/rowwise_add_op.h
+16
-3
paddle/operators/sigmoid_op.cc
paddle/operators/sigmoid_op.cc
+2
-1
paddle/operators/sigmoid_op.cu
paddle/operators/sigmoid_op.cu
+1
-1
paddle/operators/sigmoid_op.h
paddle/operators/sigmoid_op.h
+9
-3
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+4
-1
paddle/operators/softmax_op.cu
paddle/operators/softmax_op.cu
+1
-1
paddle/operators/softmax_op.h
paddle/operators/softmax_op.h
+31
-3
未找到文件。
paddle/operators/add_op.cc
浏览文件 @
b6c07552
...
...
@@ -53,6 +53,5 @@ The equation is: Out = X + Y
}
// namespace paddle
REGISTER_OP
(
add_two
,
paddle
::
operators
::
AddOp
,
paddle
::
operators
::
AddOpMaker
);
typedef
paddle
::
operators
::
AddKernel
<::
paddle
::
platform
::
CPUPlace
,
float
>
AddKernel_CPU_float
;
REGISTER_OP_CPU_KERNEL
(
add_two
,
AddKernel_CPU_float
);
REGISTER_OP_CPU_KERNEL
(
add_two
,
paddle
::
operators
::
AddKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/add_op.cu
浏览文件 @
b6c07552
#include "paddle/operators/add_op.h"
#include "paddle/framework/op_registry.h"
typedef
paddle
::
operators
::
AddKernel
<::
paddle
::
platform
::
GPUPlace
,
float
>
AddKernel_GPU_float
;
REGISTER_OP_GPU_KERNEL
(
add_two
,
AddKernel_GPU_float
);
\ No newline at end of file
paddle
::
operators
::
AddKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
\ No newline at end of file
paddle/operators/mul_op.cc
浏览文件 @
b6c07552
...
...
@@ -57,4 +57,4 @@ The equation is: Out = X * Y
REGISTER_OP
(
mul
,
paddle
::
operators
::
MulOp
,
paddle
::
operators
::
MulOpMaker
);
REGISTER_OP_CPU_KERNEL
(
mul
,
paddle
::
operators
::
MulKernel
<
paddle
::
platform
::
CPUPlace
>
);
mul
,
paddle
::
operators
::
MulKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/mul_op.cu
浏览文件 @
b6c07552
...
...
@@ -17,4 +17,4 @@
REGISTER_OP_GPU_KERNEL
(
mul
,
paddle
::
operators
::
MulKernel
<
paddle
::
platform
::
GPUPlace
>
);
\ No newline at end of file
::
GPUPlace
,
float
>
);
\ No newline at end of file
paddle/operators/mul_op.h
浏览文件 @
b6c07552
...
...
@@ -20,11 +20,22 @@
namespace
paddle
{
namespace
operators
{
template
<
typename
Place
>
template
<
typename
Place
,
typename
T
>
class
MulKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
KernelContext
&
context
)
const
override
{
LOG
(
INFO
)
<<
"Mul kernel in "
<<
typeid
(
Place
).
name
();
void
Compute
(
const
framework
::
KernelContext
&
context
)
const
override
{
Eigen
::
array
<
Eigen
::
IndexPair
<
Eigen
::
DenseIndex
>
,
1
>
dim_pair
;
dim_pair
[
0
].
first
=
1
;
dim_pair
[
0
].
second
=
0
;
auto
input0
=
context
.
Input
(
0
)
->
Get
<
framework
::
Tensor
>
();
auto
input1
=
context
.
Input
(
1
)
->
Get
<
framework
::
Tensor
>
();
auto
*
output
=
context
.
Output
(
0
)
->
GetMutable
<
framework
::
Tensor
>
();
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
output
->
matrix
<
T
>
().
device
(
*
(
context
.
GetEigenDevice
<
Place
>
()))
=
input0
.
matrix
<
T
>
().
contract
(
input1
.
matrix
<
T
>
(),
dim_pair
);
}
};
}
// namespace operators
...
...
paddle/operators/rowwise_add_op.cc
浏览文件 @
b6c07552
...
...
@@ -58,4 +58,4 @@ REGISTER_OP(rowwise_add,
paddle
::
operators
::
RowWiseAddOpMaker
);
REGISTER_OP_CPU_KERNEL
(
rowwise_add
,
paddle
::
operators
::
RowWiseAddKernel
<
paddle
::
platform
::
CPUPlace
>
);
paddle
::
operators
::
RowWiseAddKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/rowwise_add_op.cu
浏览文件 @
b6c07552
...
...
@@ -3,4 +3,4 @@
REGISTER_OP_GPU_KERNEL
(
rowwise_add
,
paddle
::
operators
::
RowWiseAddKernel
<
paddle
::
platform
::
GPUPlace
>
);
paddle
::
operators
::
RowWiseAddKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/rowwise_add_op.h
浏览文件 @
b6c07552
...
...
@@ -19,11 +19,24 @@
namespace
paddle
{
namespace
operators
{
template
<
typename
Place
>
template
<
typename
Place
,
typename
T
>
class
RowWiseAddKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
KernelContext
&
context
)
const
override
{
LOG
(
INFO
)
<<
"RowWiseAdd kernel in "
<<
typeid
(
Place
).
name
();
void
Compute
(
const
framework
::
KernelContext
&
context
)
const
override
{
auto
in0
=
context
.
Input
(
0
)
->
Get
<
framework
::
Tensor
>
();
auto
in1
=
context
.
Input
(
1
)
->
Get
<
framework
::
Tensor
>
();
auto
*
out
=
context
.
Output
(
0
)
->
GetMutable
<
framework
::
Tensor
>
();
auto
input
=
in0
.
matrix
<
T
>
();
auto
bias
=
in1
.
vec
<
T
>
();
auto
output
=
out
->
matrix
<
T
>
();
const
int
bias_size
=
bias
.
dimension
(
0
);
const
int
rest_size
=
input
.
size
()
/
bias_size
;
Eigen
::
DSizes
<
int
,
1
>
one_d
(
input
.
size
());
Eigen
::
DSizes
<
int
,
1
>
bcast
(
rest_size
);
output
.
reshape
(
one_d
).
device
(
*
(
context
.
GetEigenDevice
<
Place
>
()))
=
input
.
reshape
(
one_d
)
+
bias
.
broadcast
(
bcast
).
reshape
(
one_d
);
}
};
...
...
paddle/operators/sigmoid_op.cc
浏览文件 @
b6c07552
...
...
@@ -46,4 +46,5 @@ REGISTER_OP(sigmoid,
paddle
::
operators
::
SigmoidOp
,
paddle
::
operators
::
SigmoidOpMaker
);
REGISTER_OP_CPU_KERNEL
(
sigmoid
,
paddle
::
operators
::
SigmoidKernel
<
paddle
::
platform
::
CPUPlace
>
);
sigmoid
,
paddle
::
operators
::
SigmoidKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/sigmoid_op.cu
浏览文件 @
b6c07552
...
...
@@ -2,4 +2,4 @@
#include <paddle/framework/op_registry.h>
REGISTER_OP_GPU_KERNEL
(
sigmoid
,
paddle
::
operators
::
SigmoidKernel
<
paddle
::
platform
::
GPUPlace
>
);
sigmoid
,
paddle
::
operators
::
SigmoidKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/sigmoid_op.h
浏览文件 @
b6c07552
...
...
@@ -20,11 +20,17 @@
namespace
paddle
{
namespace
operators
{
template
<
typename
Place
>
template
<
typename
Place
,
typename
T
>
class
SigmoidKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
KernelContext
&
context
)
const
override
{
LOG
(
INFO
)
<<
"Sigmoid kernel in "
<<
typeid
(
Place
).
name
();
void
Compute
(
const
framework
::
KernelContext
&
context
)
const
override
{
auto
input
=
context
.
Input
(
0
)
->
Get
<
framework
::
Tensor
>
();
auto
*
output
=
context
.
Output
(
0
)
->
GetMutable
<
framework
::
Tensor
>
();
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
output
->
flat
<
T
>
().
device
(
*
(
context
.
GetEigenDevice
<
Place
>
()))
=
1.0
/
(
1.0
+
(
-
1.0
*
input
.
flat
<
T
>
()).
exp
());
}
};
}
// namespace operators
...
...
paddle/operators/softmax_op.cc
浏览文件 @
b6c07552
...
...
@@ -23,6 +23,8 @@ protected:
const
std
::
vector
<
const
framework
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
framework
::
Tensor
*>
&
outputs
)
const
override
{
PADDLE_ENFORCE
(
inputs
.
size
()
==
1
,
"Only one input is need for softmax"
);
PADDLE_ENFORCE
(
inputs
[
0
]
->
dims
().
size
()
==
2
,
"The input of softmax op must be matrix"
);
PADDLE_ENFORCE
(
outputs
.
size
()
==
1
,
"Only one output is need for softmax"
);
outputs
[
0
]
->
set_dims
(
inputs
[
0
]
->
dims
());
...
...
@@ -46,4 +48,5 @@ public:
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
softmax
,
ops
::
SoftmaxOp
,
ops
::
SoftmaxOpMaker
);
REGISTER_OP_CPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
paddle
::
platform
::
CPUPlace
>
);
REGISTER_OP_CPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/softmax_op.cu
浏览文件 @
b6c07552
...
...
@@ -2,4 +2,4 @@
#include <paddle/operators/softmax_op.h>
REGISTER_OP_GPU_KERNEL
(
softmax
,
paddle
::
operators
::
SoftmaxKernel
<
paddle
::
platform
::
GPUPlace
>
);
softmax
,
paddle
::
operators
::
SoftmaxKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/softmax_op.h
浏览文件 @
b6c07552
...
...
@@ -20,11 +20,39 @@
namespace
paddle
{
namespace
operators
{
template
<
typename
Place
>
template
<
typename
Place
,
typename
T
>
class
SoftmaxKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
KernelContext
&
context
)
const
override
{
LOG
(
INFO
)
<<
"Softmax kernel in "
<<
typeid
(
Place
).
name
();
void
Compute
(
const
framework
::
KernelContext
&
context
)
const
override
{
auto
input
=
context
.
Input
(
0
)
->
Get
<
framework
::
Tensor
>
();
auto
*
output
=
context
.
Output
(
0
)
->
GetMutable
<
framework
::
Tensor
>
();
auto
logits
=
input
.
matrix
<
T
>
();
auto
softmax
=
output
->
matrix
<
T
>
();
const
int
kBatchDim
=
0
;
const
int
kClassDim
=
1
;
const
int
batch_size
=
logits
.
dimension
(
kBatchDim
);
const
int
num_classes
=
logits
.
dimension
(
kClassDim
);
Eigen
::
DSizes
<
int
,
1
>
along_class
(
kClassDim
);
Eigen
::
DSizes
<
int
,
2
>
batch_by_one
(
batch_size
,
1
);
Eigen
::
DSizes
<
int
,
2
>
one_by_class
(
1
,
num_classes
);
auto
shifted_logits
=
(
logits
-
logits
.
maximum
(
along_class
)
.
eval
()
.
reshape
(
batch_by_one
)
.
broadcast
(
one_by_class
));
softmax
.
device
(
*
(
context
.
GetEigenDevice
<
Place
>
()))
=
shifted_logits
.
exp
();
softmax
.
device
(
*
(
context
.
GetEigenDevice
<
Place
>
()))
=
(
softmax
*
softmax
.
sum
(
along_class
)
.
inverse
()
.
eval
()
.
reshape
(
batch_by_one
)
.
broadcast
(
one_by_class
));
}
};
}
// namespace operators
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录