Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
19ab1dcd
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
19ab1dcd
编写于
8月 11, 2017
作者:
Y
Yi Wang
提交者:
GitHub
8月 11, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3373 from Canpio/refactor_registry_macro
Refactorize registry macro
上级
5d4126a8
580445a4
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
126 addition
and
79 deletion
+126
-79
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+122
-75
paddle/framework/pybind.cc
paddle/framework/pybind.cc
+3
-3
paddle/operators/recurrent_op_test.cc
paddle/operators/recurrent_op_test.cc
+1
-1
未找到文件。
paddle/framework/op_registry.h
浏览文件 @
19ab1dcd
...
...
@@ -307,22 +307,45 @@ class OpRegistry {
}
};
class
Registrar
{
public:
// In our design, various kinds of classes, e.g., operators and kernels, have
// their corresponding registry and registrar. The action of registration is
// in the constructor of a global registrar variable, which, however, are not
// used in the code that calls package framework, and would be removed from
// the generated binary file by the linker. To avoid such removal, we add
// Touch to all registrar classes and make USE_OP macros to call this
// method. So, as long as the callee code calls USE_OP, the global
// registrar variable won't be removed by the linker.
void
Touch
()
{}
};
template
<
typename
OpType
,
typename
ProtoMakerType
>
class
OpRegist
erHelpe
r
{
class
OpRegist
rar
:
public
Registra
r
{
public:
explicit
OpRegist
erHelpe
r
(
const
char
*
op_type
)
{
explicit
OpRegist
ra
r
(
const
char
*
op_type
)
{
OpRegistry
::
RegisterOp
<
OpType
,
ProtoMakerType
>
(
op_type
);
}
};
template
<
typename
GradOpType
>
class
GradOpRegist
erHelpe
r
{
class
GradOpRegist
rar
:
public
Registra
r
{
public:
GradOpRegist
erHelpe
r
(
const
char
*
op_type
,
const
char
*
grad_op_type
)
{
GradOpRegist
ra
r
(
const
char
*
op_type
,
const
char
*
grad_op_type
)
{
OpRegistry
::
RegisterGradOp
<
GradOpType
>
(
op_type
,
grad_op_type
);
}
};
template
<
typename
PlaceType
,
typename
KernelType
>
class
OpKernelRegistrar
:
public
Registrar
{
public:
explicit
OpKernelRegistrar
(
const
char
*
op_type
)
{
OperatorWithKernel
::
OpKernelKey
key
;
key
.
place_
=
PlaceType
();
OperatorWithKernel
::
AllOpKernels
()[
op_type
][
key
].
reset
(
new
KernelType
);
}
};
/**
* check if MACRO is used in GLOBAL NAMESPACE.
*/
...
...
@@ -333,97 +356,121 @@ class GradOpRegisterHelper {
msg)
/**
* Macro to
R
egister Operator.
* Macro to
r
egister Operator.
*/
#define REGISTER_OP(__op_type, __op_class, __op_maker_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE(__reg_op__##__op_type, \
"REGISTER_OP must be in global namespace"); \
static ::paddle::framework::OpRegisterHelper<__op_class, __op_maker_class> \
__op_register_##__op_type##__(#__op_type); \
int __op_register_##__op_type##_handle__() { return 0; }
#define REGISTER_OP(op_type, op_class, op_maker_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \
static ::paddle::framework::OpRegistrar<op_class, op_maker_class> \
__op_registrar_##op_type##__(#op_type); \
int TouchOpRegistrar_##op_type() { \
__op_registrar_##op_type##__.Touch(); \
return 0; \
}
/**
* Macro to
R
egister Gradient Operator.
* Macro to
r
egister Gradient Operator.
*/
#define REGISTER_GRADIENT_OP(__op_type, __grad_op_type, __grad_op_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##__op_type##__grad_op_type, \
"REGISTER_GRADIENT_OP must be in global namespace"); \
static ::paddle::framework::GradOpRegisterHelper<__grad_op_class> \
__op_gradient_register_##__op_type##__grad_op_type##__(#__op_type, \
#__grad_op_type); \
int __op_gradient_register_##__op_type##__grad_op_type##_handle__() { \
return 0; \
#define REGISTER_GRADIENT_OP(op_type, grad_op_type, grad_op_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##op_type##_##grad_op_type, \
"REGISTER_GRADIENT_OP must be called in global namespace"); \
static ::paddle::framework::GradOpRegistrar<grad_op_class> \
__op_gradient_registrar_##op_type##_##grad_op_type##__(#op_type, \
#grad_op_type); \
int TouchOpGradientRegistrar_##op_type() { \
__op_gradient_registrar_##op_type##_##grad_op_type##__.Touch(); \
return 0; \
}
/**
* Macro to
Forbid user register Gradient Operator
.
* Macro to
register OperatorKernel
.
*/
#define NO_GRADIENT(__op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##__op_type##__op_type##_grad, \
"NO_GRADIENT must be in global namespace")
#define REGISTER_OP_KERNEL(op_type, DEVICE_TYPE, place_class, ...) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_op_kernel_##op_type##_##DEVICE_TYPE##__, \
"REGISTER_OP_KERNEL must be called in global namespace"); \
static ::paddle::framework::OpKernelRegistrar<place_class, __VA_ARGS__> \
__op_kernel_registrar_##op_type##_##DEVICE_TYPE##__(#op_type); \
int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE() { \
__op_kernel_registrar_##op_type##_##DEVICE_TYPE##__.Touch(); \
return 0; \
}
/**
* Macro to
Register OperatorKernel
.
* Macro to
Forbid user register Gradient Operator
.
*/
#define REGISTER_OP_KERNEL(type, DEVICE_TYPE, PlaceType, ...) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_op_kernel_##type##_##DEVICE_TYPE##__, \
"REGISTER_OP_KERNEL must be in global namespace"); \
struct __op_kernel_register__##type##__##DEVICE_TYPE##__ { \
__op_kernel_register__##type##__##DEVICE_TYPE##__() { \
::paddle::framework::OperatorWithKernel::OpKernelKey key; \
key.place_ = PlaceType(); \
::paddle::framework::OperatorWithKernel::AllOpKernels()[#type][key] \
.reset(new __VA_ARGS__()); \
} \
}; \
static __op_kernel_register__##type##__##DEVICE_TYPE##__ \
__reg_kernel_##type##__##DEVICE_TYPE##__; \
int __op_kernel_register_##type##_handle_##DEVICE_TYPE##__() { return 0; }
// (type, KernelType)
#define REGISTER_OP_GPU_KERNEL(type, ...) \
REGISTER_OP_KERNEL(type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__)
// (type, KernelType)
#define REGISTER_OP_CPU_KERNEL(type, ...) \
REGISTER_OP_KERNEL(type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__)
#define NO_GRADIENT(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##op_type##_##op_type##_grad, \
"NO_GRADIENT must be called in global namespace")
#define REGISTER_OP_GPU_KERNEL(op_type, ...) \
REGISTER_OP_KERNEL(op_type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__)
#define REGISTER_OP_CPU_KERNEL(op_type, ...) \
REGISTER_OP_KERNEL(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__)
/**
* Macro to mark what Operator and Kernel we will use and tell the compiler to
* link them into target.
*/
#define USE_OP_WITHOUT_KERNEL(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_without_kernel_##op_type, \
"USE_OP_WITHOUT_KERNEL must be in global namespace"); \
extern int __op_register_##op_type##_handle__(); \
static int __use_op_ptr_##op_type##_without_kernel__ \
__attribute__((unused)) = __op_register_##op_type##_handle__()
#define USE_OP_KERNEL(op_type, DEVICE_TYPE) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_kernel_##op_type##_##DEVICE_TYPE##__, \
"USE_OP_KERNEL must be in global namespace"); \
extern int __op_kernel_register_##op_type##_handle_##DEVICE_TYPE##__(); \
static int __use_op_ptr_##op_type##_##DEVICE_TYPE##_kernel__ \
__attribute__((unused)) = \
__op_kernel_register_##op_type##_handle_##DEVICE_TYPE##__()
// use Operator with only cpu kernel.
#define USE_OP_CPU(op_type) \
USE_OP_WITHOUT_KERNEL(op_type); \
USE_OP_KERNEL(op_type, CPU)
#define USE_OP_ITSELF(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_itself_##op_type, \
"USE_OP_ITSELF must be called in global namespace"); \
extern int TouchOpRegistrar_##op_type(); \
static int use_op_itself_##op_type##_ __attribute__((unused)) = \
TouchOpRegistrar_##op_type()
// TODO(fengjiayi): Most ops' gradient op have not been compeleted. So we use
// `NO_GRAD` to disable micro USE_OP_GRADIENT(op_type). Otherwise the code can't
// be compiled. `NO_GRAD` should be removed after all gradient ops are
// compeleted.
#define NO_GRAD
#ifndef NO_GRAD
#define USE_OP_GRADIENT(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_gradient_##op_type, \
"USE_OP_GRADIENT must be called in global namespace"); \
extern int TouchOpGradientRegistrar_##op_type(); \
static int use_op_gradient_##op_type##_ __attribute__((unused)) = \
TouchOpGradientRegistrar_##op_type()
#else
#define USE_OP_GRADIENT(op_type)
#endif
#define USE_OP_DEVICE_KERNEL(op_type, DEVICE_TYPE) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_kernel_##op_type##_##DEVICE_TYPE##__, \
"USE_OP_DEVICE_KERNEL must be in global namespace"); \
extern int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE(); \
static int use_op_kernel_##op_type##_##DEVICE_TYPE##_ \
__attribute__((unused)) = \
TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE()
// TODO(fengjiayi): The following macros seems ugly, do we have better method?
#ifdef PADDLE_ONLY_CPU
#define USE_OP
(op_type) USE_OP_CPU(op_type
)
#define USE_OP
_KERNEL(op_type) USE_OP_DEVICE_KERNEL(op_type, CPU
)
#else
#define USE_OP
(op_type)
\
USE_OP_
CPU(op_type);
\
USE_OP_KERNEL(op_type, GPU)
#define USE_OP
_KERNEL(op_type)
\
USE_OP_
DEVICE_KERNEL(op_type, CPU);
\
USE_OP_
DEVICE_
KERNEL(op_type, GPU)
#endif
#define USE_NO_GRAD_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_KERNEL(op_type)
#define USE_CPU_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_DEVICE_KERNEL(op_type, CPU); \
USE_OP_GRADIENT(op_type)
#define USE_OP(op_type) \
USE_NO_GRAD_OP(op_type); \
USE_OP_GRADIENT(op_type)
}
// namespace framework
}
// namespace paddle
paddle/framework/pybind.cc
浏览文件 @
19ab1dcd
...
...
@@ -30,15 +30,15 @@ limitations under the License. */
namespace
py
=
pybind11
;
USE_OP
(
add_two
);
USE_
OP_CPU
(
onehot_cross_entropy
);
USE_OP
(
sgd
);
USE_
CPU_OP
(
onehot_cross_entropy
);
USE_
NO_GRAD_
OP
(
sgd
);
USE_OP
(
mul
);
USE_OP
(
mean
);
USE_OP
(
sigmoid
);
USE_OP
(
softmax
);
USE_OP
(
rowwise_add
);
USE_OP
(
fill_zeros_like
);
USE_OP_
WITHOUT_KERNEL
(
recurrent_op
);
USE_OP_
ITSELF
(
recurrent_op
);
USE_OP
(
gaussian_random
);
USE_OP
(
uniform_random
);
...
...
paddle/operators/recurrent_op_test.cc
浏览文件 @
19ab1dcd
...
...
@@ -395,4 +395,4 @@ TEST(RecurrentOp, LinkMemories) {
USE_OP
(
add_two
);
USE_OP
(
mul
);
USE_OP_
WITHOUT_KERNEL
(
recurrent_op
);
USE_OP_
ITSELF
(
recurrent_op
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录