提交 19ab1dcd 编写于 作者: Y Yi Wang 提交者: GitHub

Merge pull request #3373 from Canpio/refactor_registry_macro

Refactorize registry macro
...@@ -307,22 +307,45 @@ class OpRegistry { ...@@ -307,22 +307,45 @@ class OpRegistry {
} }
}; };
class Registrar {
public:
// In our design, various kinds of classes, e.g., operators and kernels, have
// their corresponding registry and registrar. The action of registration is
// in the constructor of a global registrar variable, which, however, are not
// used in the code that calls package framework, and would be removed from
// the generated binary file by the linker. To avoid such removal, we add
// Touch to all registrar classes and make USE_OP macros to call this
// method. So, as long as the callee code calls USE_OP, the global
// registrar variable won't be removed by the linker.
void Touch() {}
};
template <typename OpType, typename ProtoMakerType> template <typename OpType, typename ProtoMakerType>
class OpRegisterHelper { class OpRegistrar : public Registrar {
public: public:
explicit OpRegisterHelper(const char* op_type) { explicit OpRegistrar(const char* op_type) {
OpRegistry::RegisterOp<OpType, ProtoMakerType>(op_type); OpRegistry::RegisterOp<OpType, ProtoMakerType>(op_type);
} }
}; };
template <typename GradOpType> template <typename GradOpType>
class GradOpRegisterHelper { class GradOpRegistrar : public Registrar {
public: public:
GradOpRegisterHelper(const char* op_type, const char* grad_op_type) { GradOpRegistrar(const char* op_type, const char* grad_op_type) {
OpRegistry::RegisterGradOp<GradOpType>(op_type, grad_op_type); OpRegistry::RegisterGradOp<GradOpType>(op_type, grad_op_type);
} }
}; };
template <typename PlaceType, typename KernelType>
class OpKernelRegistrar : public Registrar {
public:
explicit OpKernelRegistrar(const char* op_type) {
OperatorWithKernel::OpKernelKey key;
key.place_ = PlaceType();
OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KernelType);
}
};
/** /**
* check if MACRO is used in GLOBAL NAMESPACE. * check if MACRO is used in GLOBAL NAMESPACE.
*/ */
...@@ -333,97 +356,121 @@ class GradOpRegisterHelper { ...@@ -333,97 +356,121 @@ class GradOpRegisterHelper {
msg) msg)
/** /**
* Macro to Register Operator. * Macro to register Operator.
*/ */
#define REGISTER_OP(__op_type, __op_class, __op_maker_class) \ #define REGISTER_OP(op_type, op_class, op_maker_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE(__reg_op__##__op_type, \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
"REGISTER_OP must be in global namespace"); \ __reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \
static ::paddle::framework::OpRegisterHelper<__op_class, __op_maker_class> \ static ::paddle::framework::OpRegistrar<op_class, op_maker_class> \
__op_register_##__op_type##__(#__op_type); \ __op_registrar_##op_type##__(#op_type); \
int __op_register_##__op_type##_handle__() { return 0; } int TouchOpRegistrar_##op_type() { \
__op_registrar_##op_type##__.Touch(); \
return 0; \
}
/** /**
* Macro to Register Gradient Operator. * Macro to register Gradient Operator.
*/ */
#define REGISTER_GRADIENT_OP(__op_type, __grad_op_type, __grad_op_class) \ #define REGISTER_GRADIENT_OP(op_type, grad_op_type, grad_op_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##__op_type##__grad_op_type, \ __reg_gradient_op__##op_type##_##grad_op_type, \
"REGISTER_GRADIENT_OP must be in global namespace"); \ "REGISTER_GRADIENT_OP must be called in global namespace"); \
static ::paddle::framework::GradOpRegisterHelper<__grad_op_class> \ static ::paddle::framework::GradOpRegistrar<grad_op_class> \
__op_gradient_register_##__op_type##__grad_op_type##__(#__op_type, \ __op_gradient_registrar_##op_type##_##grad_op_type##__(#op_type, \
#__grad_op_type); \ #grad_op_type); \
int __op_gradient_register_##__op_type##__grad_op_type##_handle__() { \ int TouchOpGradientRegistrar_##op_type() { \
__op_gradient_registrar_##op_type##_##grad_op_type##__.Touch(); \
return 0; \ return 0; \
} }
/** /**
* Macro to Forbid user register Gradient Operator. * Macro to register OperatorKernel.
*/ */
#define NO_GRADIENT(__op_type) \ #define REGISTER_OP_KERNEL(op_type, DEVICE_TYPE, place_class, ...) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##__op_type##__op_type##_grad, \ __reg_op_kernel_##op_type##_##DEVICE_TYPE##__, \
"NO_GRADIENT must be in global namespace") "REGISTER_OP_KERNEL must be called in global namespace"); \
static ::paddle::framework::OpKernelRegistrar<place_class, __VA_ARGS__> \
__op_kernel_registrar_##op_type##_##DEVICE_TYPE##__(#op_type); \
int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE() { \
__op_kernel_registrar_##op_type##_##DEVICE_TYPE##__.Touch(); \
return 0; \
}
/** /**
* Macro to Register OperatorKernel. * Macro to Forbid user register Gradient Operator.
*/ */
#define REGISTER_OP_KERNEL(type, DEVICE_TYPE, PlaceType, ...) \ #define NO_GRADIENT(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_op_kernel_##type##_##DEVICE_TYPE##__, \ __reg_gradient_op__##op_type##_##op_type##_grad, \
"REGISTER_OP_KERNEL must be in global namespace"); \ "NO_GRADIENT must be called in global namespace")
struct __op_kernel_register__##type##__##DEVICE_TYPE##__ { \
__op_kernel_register__##type##__##DEVICE_TYPE##__() { \ #define REGISTER_OP_GPU_KERNEL(op_type, ...) \
::paddle::framework::OperatorWithKernel::OpKernelKey key; \ REGISTER_OP_KERNEL(op_type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__)
key.place_ = PlaceType(); \
::paddle::framework::OperatorWithKernel::AllOpKernels()[#type][key] \ #define REGISTER_OP_CPU_KERNEL(op_type, ...) \
.reset(new __VA_ARGS__()); \ REGISTER_OP_KERNEL(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__)
} \
}; \
static __op_kernel_register__##type##__##DEVICE_TYPE##__ \
__reg_kernel_##type##__##DEVICE_TYPE##__; \
int __op_kernel_register_##type##_handle_##DEVICE_TYPE##__() { return 0; }
// (type, KernelType)
#define REGISTER_OP_GPU_KERNEL(type, ...) \
REGISTER_OP_KERNEL(type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__)
// (type, KernelType)
#define REGISTER_OP_CPU_KERNEL(type, ...) \
REGISTER_OP_KERNEL(type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__)
/** /**
* Macro to mark what Operator and Kernel we will use and tell the compiler to * Macro to mark what Operator and Kernel we will use and tell the compiler to
* link them into target. * link them into target.
*/ */
#define USE_OP_WITHOUT_KERNEL(op_type) \ #define USE_OP_ITSELF(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_itself_##op_type, \
"USE_OP_ITSELF must be called in global namespace"); \
extern int TouchOpRegistrar_##op_type(); \
static int use_op_itself_##op_type##_ __attribute__((unused)) = \
TouchOpRegistrar_##op_type()
// TODO(fengjiayi): Most ops' gradient op have not been compeleted. So we use
// `NO_GRAD` to disable micro USE_OP_GRADIENT(op_type). Otherwise the code can't
// be compiled. `NO_GRAD` should be removed after all gradient ops are
// compeleted.
#define NO_GRAD
#ifndef NO_GRAD
#define USE_OP_GRADIENT(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_without_kernel_##op_type, \ __use_op_gradient_##op_type, \
"USE_OP_WITHOUT_KERNEL must be in global namespace"); \ "USE_OP_GRADIENT must be called in global namespace"); \
extern int __op_register_##op_type##_handle__(); \ extern int TouchOpGradientRegistrar_##op_type(); \
static int __use_op_ptr_##op_type##_without_kernel__ \ static int use_op_gradient_##op_type##_ __attribute__((unused)) = \
__attribute__((unused)) = __op_register_##op_type##_handle__() TouchOpGradientRegistrar_##op_type()
#else
#define USE_OP_GRADIENT(op_type)
#endif
#define USE_OP_KERNEL(op_type, DEVICE_TYPE) \ #define USE_OP_DEVICE_KERNEL(op_type, DEVICE_TYPE) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_kernel_##op_type##_##DEVICE_TYPE##__, \ __use_op_kernel_##op_type##_##DEVICE_TYPE##__, \
"USE_OP_KERNEL must be in global namespace"); \ "USE_OP_DEVICE_KERNEL must be in global namespace"); \
extern int __op_kernel_register_##op_type##_handle_##DEVICE_TYPE##__(); \ extern int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE(); \
static int __use_op_ptr_##op_type##_##DEVICE_TYPE##_kernel__ \ static int use_op_kernel_##op_type##_##DEVICE_TYPE##_ \
__attribute__((unused)) = \ __attribute__((unused)) = \
__op_kernel_register_##op_type##_handle_##DEVICE_TYPE##__() TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE()
// use Operator with only cpu kernel. // TODO(fengjiayi): The following macros seems ugly, do we have better method?
#define USE_OP_CPU(op_type) \
USE_OP_WITHOUT_KERNEL(op_type); \
USE_OP_KERNEL(op_type, CPU)
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
#define USE_OP(op_type) USE_OP_CPU(op_type) #define USE_OP_KERNEL(op_type) USE_OP_DEVICE_KERNEL(op_type, CPU)
#else #else
#define USE_OP(op_type) \ #define USE_OP_KERNEL(op_type) \
USE_OP_CPU(op_type); \ USE_OP_DEVICE_KERNEL(op_type, CPU); \
USE_OP_KERNEL(op_type, GPU) USE_OP_DEVICE_KERNEL(op_type, GPU)
#endif #endif
#define USE_NO_GRAD_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_KERNEL(op_type)
#define USE_CPU_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_DEVICE_KERNEL(op_type, CPU); \
USE_OP_GRADIENT(op_type)
#define USE_OP(op_type) \
USE_NO_GRAD_OP(op_type); \
USE_OP_GRADIENT(op_type)
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -30,15 +30,15 @@ limitations under the License. */ ...@@ -30,15 +30,15 @@ limitations under the License. */
namespace py = pybind11; namespace py = pybind11;
USE_OP(add_two); USE_OP(add_two);
USE_OP_CPU(onehot_cross_entropy); USE_CPU_OP(onehot_cross_entropy);
USE_OP(sgd); USE_NO_GRAD_OP(sgd);
USE_OP(mul); USE_OP(mul);
USE_OP(mean); USE_OP(mean);
USE_OP(sigmoid); USE_OP(sigmoid);
USE_OP(softmax); USE_OP(softmax);
USE_OP(rowwise_add); USE_OP(rowwise_add);
USE_OP(fill_zeros_like); USE_OP(fill_zeros_like);
USE_OP_WITHOUT_KERNEL(recurrent_op); USE_OP_ITSELF(recurrent_op);
USE_OP(gaussian_random); USE_OP(gaussian_random);
USE_OP(uniform_random); USE_OP(uniform_random);
......
...@@ -395,4 +395,4 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -395,4 +395,4 @@ TEST(RecurrentOp, LinkMemories) {
USE_OP(add_two); USE_OP(add_two);
USE_OP(mul); USE_OP(mul);
USE_OP_WITHOUT_KERNEL(recurrent_op); USE_OP_ITSELF(recurrent_op);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册