Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
2ea2fbea
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2ea2fbea
编写于
8月 11, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Merge REGISTER_OP and REGISTER_GRADIENT_OP
上级
6768b310
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
37 addition
and
45 deletion
+37
-45
paddle/framework/backward_test.cc
paddle/framework/backward_test.cc
+6
-10
paddle/framework/grad_op_builder_test.cc
paddle/framework/grad_op_builder_test.cc
+2
-11
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+13
-9
paddle/framework/operator.h
paddle/framework/operator.h
+7
-0
paddle/operators/add_op.cc
paddle/operators/add_op.cc
+1
-2
paddle/operators/cross_entropy_op.cc
paddle/operators/cross_entropy_op.cc
+2
-3
paddle/operators/mean_op.cc
paddle/operators/mean_op.cc
+1
-2
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+1
-3
paddle/operators/sigmoid_op.cc
paddle/operators/sigmoid_op.cc
+2
-3
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+2
-2
未找到文件。
paddle/framework/backward_test.cc
浏览文件 @
2ea2fbea
...
...
@@ -150,20 +150,16 @@ class AddOpMaker : public OpProtoAndCheckerMaker {
namespace
f
=
paddle
::
framework
;
namespace
ops
=
paddle
::
operators
;
using
EnforceNotMet
=
paddle
::
platform
::
EnforceNotMet
;
REGISTER_OP
(
rowwise_add
,
f
::
EmptyOp
,
f
::
RowWiseAddOpMaker
,
rowwise_add_grad
);
REGISTER_GRADIENT_OP
(
rowwise_add_grad
,
f
::
EmptyOp
);
REGISTER_OP
(
mul
,
f
::
EmptyOp
,
f
::
MulOpMaker
,
mul_grad
);
REGISTER_GRADIENT_OP
(
mul_grad
,
f
::
EmptyOp
);
REGISTER_OP
(
sigmoid
,
f
::
EmptyOp
,
f
::
SigmoidOpMaker
,
sigmoid_grad
);
REGISTER_GRADIENT_OP
(
sigmoid_grad
,
f
::
EmptyOp
);
REGISTER_OP
(
rowwise_add
,
f
::
EmptyOp
,
f
::
RowWiseAddOpMaker
,
rowwise_add_grad
,
f
::
EmptyOp
);
REGISTER_OP
(
mul
,
f
::
EmptyOp
,
f
::
MulOpMaker
,
mul_grad
,
f
::
EmptyOp
);
REGISTER_OP
(
sigmoid
,
f
::
EmptyOp
,
f
::
SigmoidOpMaker
,
sigmoid_grad
,
f
::
EmptyOp
);
REGISTER_OP_WITHOUT_GRADIENT
(
nograd
,
f
::
EmptyOp
,
f
::
NoGradOpMaker
);
REGISTER_OP_WITHOUT_GRADIENT
(
fill_zeros_like
,
f
::
EmptyOp
,
f
::
FillZeroOpMaker
);
REGISTER_OP
(
add
,
f
::
EmptyOp
,
f
::
AddOpMaker
,
add_grad
);
REGISTER_GRADIENT_OP
(
add_grad
,
f
::
EmptyOp
);
REGISTER_OP
(
add
,
f
::
EmptyOp
,
f
::
AddOpMaker
,
add_grad
,
f
::
EmptyOp
);
REGISTER_OP_WITHOUT_GRADIENT
(
fc
,
f
::
FcOp
,
f
::
FcOpMaker
);
REGISTER_OP
(
many_output_op
,
f
::
EmptyOp
,
f
::
ManyOutputOpMaker
,
many_output_op_grad
);
REGISTER_GRADIENT_OP
(
many_output_op_grad
,
f
::
EmptyOp
);
many_output_op_grad
,
f
::
EmptyOp
);
TEST
(
Backward
,
simple_op_grad
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
...
...
paddle/framework/grad_op_builder_test.cc
浏览文件 @
2ea2fbea
...
...
@@ -8,13 +8,6 @@ USE_OP(add_two);
namespace
paddle
{
namespace
framework
{
class
NOP
:
public
OperatorBase
{
public:
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{}
};
class
MutiInOutOpMaker
:
public
OpProtoAndCheckerMaker
{
public:
MutiInOutOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
...
...
@@ -61,10 +54,8 @@ TEST(GradOpBuilder, AddTwo) {
EXPECT_EQ
(
grad_add_op
->
Output
(
"Y@GRAD"
),
"y@GRAD"
);
}
REGISTER_OP
(
mult_io
,
f
::
NOP
,
f
::
MutiInOutOpMaker
,
mult_io_grad
);
REGISTER_GRADIENT_OP
(
mult_io_grad
,
f
::
NOP
);
REGISTER_OP
(
io_ignored
,
f
::
NOP
,
f
::
IOIgnoredOpMaker
,
io_ignored_grad
);
REGISTER_GRADIENT_OP
(
io_ignored_grad
,
f
::
NOP
);
REGISTER_OP
(
mult_io
,
f
::
NOP
,
f
::
MutiInOutOpMaker
,
mult_io_grad
,
f
::
NOP
);
REGISTER_OP
(
io_ignored
,
f
::
NOP
,
f
::
IOIgnoredOpMaker
,
io_ignored_grad
,
f
::
NOP
);
TEST
(
GradOpBuilder
,
MutiInOut
)
{
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
4
,
5
}},
...
...
paddle/framework/op_registry.h
浏览文件 @
2ea2fbea
...
...
@@ -193,7 +193,7 @@ class OpRegistry {
using
VarNameList
=
std
::
vector
<
std
::
string
>
;
public:
template
<
typename
OpType
,
typename
ProtoMakerType
>
template
<
typename
OpType
,
typename
ProtoMakerType
,
typename
GradOpType
>
static
void
RegisterOp
(
const
std
::
string
&
op_type
,
const
std
::
string
&
grad_op_type
)
{
PADDLE_ENFORCE
(
op_info_map
().
count
(
op_type
)
==
0
,
...
...
@@ -226,6 +226,10 @@ class OpRegistry {
// ================================================ //
}
op_info_map
().
insert
(
std
::
make_pair
(
op_type
,
op_info
));
// register gradient op
if
(
!
grad_op_type
.
empty
())
{
RegisterOp
<
GradOpType
,
NOPMaker
,
NOP
>
(
grad_op_type
,
""
);
}
}
static
std
::
shared_ptr
<
OperatorBase
>
CreateOp
(
const
std
::
string
&
type
,
...
...
@@ -321,12 +325,13 @@ class Registrar {
void
Touch
()
{}
};
template
<
typename
OpType
,
typename
ProtoMakerType
>
template
<
typename
OpType
,
typename
ProtoMakerType
,
typename
GradOpType
>
class
OpRegistrar
:
public
Registrar
{
public:
explicit
OpRegistrar
(
const
char
*
op_type
)
{
OpRegistrar
(
op_type
,
""
);
}
OpRegistrar
(
const
char
*
op_type
,
const
char
*
grad_op_type
)
{
OpRegistry
::
RegisterOp
<
OpType
,
ProtoMakerType
>
(
op_type
,
grad_op_type
);
OpRegistry
::
RegisterOp
<
OpType
,
ProtoMakerType
,
GradOpType
>
(
op_type
,
grad_op_type
);
}
};
...
...
@@ -352,10 +357,12 @@ class OpKernelRegistrar : public Registrar {
/**
* Macro to register Operator.
*/
#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type) \
#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \
grad_op_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \
static ::paddle::framework::OpRegistrar<op_class, op_maker_class> \
static ::paddle::framework::OpRegistrar<op_class, op_maker_class, \
grad_op_class> \
__op_registrar_##op_type##__(#op_type, #grad_op_type); \
int TouchOpRegistrar_##op_type() { \
__op_registrar_##op_type##__.Touch(); \
...
...
@@ -363,10 +370,7 @@ class OpKernelRegistrar : public Registrar {
}
#define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \
REGISTER_OP(op_type, op_class, op_maker_class, )
#define REGISTER_GRADIENT_OP(op_type, op_class) \
REGISTER_OP(op_type, op_class, ::paddle::framework::NOPMaker, )
REGISTER_OP(op_type, op_class, op_maker_class, , ::paddle::framework::NOP)
/**
* Macro to register OperatorKernel.
...
...
paddle/framework/operator.h
浏览文件 @
2ea2fbea
...
...
@@ -125,6 +125,13 @@ class OperatorBase {
std
::
shared_ptr
<
std
::
unordered_map
<
std
::
string
,
int
>>
in_out_idxs_
;
};
class
NOP
:
public
OperatorBase
{
public:
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{}
};
class
InferShapeContext
{
public:
InferShapeContext
(
const
OperatorBase
&
op
,
const
Scope
&
scope
)
...
...
paddle/operators/add_op.cc
浏览文件 @
2ea2fbea
...
...
@@ -55,8 +55,7 @@ class AddOpGrad : public framework::OperatorWithKernel {
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
add_two
,
ops
::
AddOp
,
ops
::
AddOpMaker
,
add_two_grad
);
REGISTER_GRADIENT_OP
(
add_two_grad
,
ops
::
AddOpGrad
);
REGISTER_OP
(
add_two
,
ops
::
AddOp
,
ops
::
AddOpMaker
,
add_two_grad
,
ops
::
AddOpGrad
);
REGISTER_OP_CPU_KERNEL
(
add_two
,
ops
::
AddKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/cross_entropy_op.cc
浏览文件 @
2ea2fbea
...
...
@@ -69,12 +69,11 @@ OnehotCrossEntropy Operator.
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
onehot_cross_entropy
,
ops
::
OnehotCrossEntropyOp
,
ops
::
OnehotCrossEntropyOpMaker
,
onehot_cross_entropy_grad
);
ops
::
OnehotCrossEntropyOpMaker
,
onehot_cross_entropy_grad
,
ops
::
OnehotCrossEntropyGradientOp
);
REGISTER_OP_CPU_KERNEL
(
onehot_cross_entropy
,
ops
::
OnehotCrossEntropyOpKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
REGISTER_GRADIENT_OP
(
onehot_cross_entropy_grad
,
ops
::
OnehotCrossEntropyGradientOp
);
REGISTER_OP_CPU_KERNEL
(
onehot_cross_entropy_grad
,
ops
::
OnehotCrossEntropyGradientOpKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/mean_op.cc
浏览文件 @
2ea2fbea
...
...
@@ -50,9 +50,8 @@ class MeanGradOp : public framework::OperatorWithKernel {
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
mean
,
ops
::
MeanOp
,
ops
::
MeanOpMaker
,
mean_grad
);
REGISTER_OP
(
mean
,
ops
::
MeanOp
,
ops
::
MeanOpMaker
,
mean_grad
,
ops
::
MeanGradOp
);
REGISTER_OP_CPU_KERNEL
(
mean
,
ops
::
MeanKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
REGISTER_GRADIENT_OP
(
mean_grad
,
ops
::
MeanGradOp
);
REGISTER_OP_CPU_KERNEL
(
mean_grad
,
ops
::
MeanGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/mul_op.cc
浏览文件 @
2ea2fbea
...
...
@@ -65,7 +65,5 @@ class MulOpGrad : public framework::OperatorWithKernel {
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
mul
,
ops
::
MulOp
,
ops
::
MulOpMaker
,
mul_grad
);
REGISTER_GRADIENT_OP
(
mul_grad
,
ops
::
MulOpGrad
);
REGISTER_OP
(
mul
,
ops
::
MulOp
,
ops
::
MulOpMaker
,
mul_grad
,
ops
::
MulOpGrad
);
REGISTER_OP_CPU_KERNEL
(
mul
,
ops
::
MulKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/sigmoid_op.cc
浏览文件 @
2ea2fbea
...
...
@@ -48,9 +48,8 @@ class SigmoidOpGrad : public framework::OperatorWithKernel {
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
sigmoid
,
ops
::
SigmoidOp
,
ops
::
SigmoidOpMaker
,
sigmoid_grad
);
REGISTER_GRADIENT_OP
(
sigmoid_grad
,
ops
::
SigmoidOpGrad
);
REGISTER_OP
(
sigmoid
,
ops
::
SigmoidOp
,
ops
::
SigmoidOpMaker
,
sigmoid_grad
,
ops
::
SigmoidOpGrad
);
REGISTER_OP_CPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
...
...
paddle/operators/softmax_op.cc
浏览文件 @
2ea2fbea
...
...
@@ -64,9 +64,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
softmax
,
ops
::
SoftmaxOp
,
ops
::
SoftmaxOpMaker
,
softmax_grad
);
REGISTER_OP
(
softmax
,
ops
::
SoftmaxOp
,
ops
::
SoftmaxOpMaker
,
softmax_grad
,
ops
::
SoftmaxOpGrad
);
REGISTER_OP_CPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
REGISTER_GRADIENT_OP
(
softmax_grad
,
ops
::
SoftmaxOpGrad
);
REGISTER_OP_CPU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录