提交 50ab1dd6 编写于 作者: Z zhaojiaying01

merge logical op param

上级 27459ab1
...@@ -131,9 +131,12 @@ extern const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU; ...@@ -131,9 +131,12 @@ extern const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU;
extern const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU; extern const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU;
extern const char *G_OP_TYPE_FUSION_CONV_BN_RELU; extern const char *G_OP_TYPE_FUSION_CONV_BN_RELU;
extern const char *G_OP_TYPE_GRU;
extern const char *G_OP_TYPE_GRU_UNIT;
extern const char *G_OP_TYPE_LRN; extern const char *G_OP_TYPE_LRN;
extern const char *G_OP_TYPE_MUL; extern const char *G_OP_TYPE_MUL;
extern const char *G_OP_TYPE_MULTICLASS_NMS; extern const char *G_OP_TYPE_MULTICLASS_NMS;
extern const char *G_OP_TYPE_NORM;
extern const char *G_OP_TYPE_POOL2D; extern const char *G_OP_TYPE_POOL2D;
extern const char *G_OP_TYPE_PRIOR_BOX; extern const char *G_OP_TYPE_PRIOR_BOX;
extern const char *G_OP_TYPE_RELU; extern const char *G_OP_TYPE_RELU;
...@@ -163,6 +166,10 @@ extern const char *G_OP_TYPE_CAST; ...@@ -163,6 +166,10 @@ extern const char *G_OP_TYPE_CAST;
extern const char *G_OP_TYPE_LOG; extern const char *G_OP_TYPE_LOG;
extern const char *G_OP_TYPE_LOD_RESET; extern const char *G_OP_TYPE_LOD_RESET;
extern const char *G_OP_TYPE_LESS_THAN; extern const char *G_OP_TYPE_LESS_THAN;
extern const char *G_OP_TYPE_LOGICAL_AND;
extern const char *G_OP_TYPE_LOGICAL_OR;
extern const char *G_OP_TYPE_LOGICAL_NOT;
extern const char *G_OP_TYPE_LOGICAL_XOR;
extern const char *G_OP_TYPE_QUANTIZE; extern const char *G_OP_TYPE_QUANTIZE;
extern const char *G_OP_TYPE_DEQUANTIZE; extern const char *G_OP_TYPE_DEQUANTIZE;
......
...@@ -168,6 +168,9 @@ LOAD_FUSION_MATCHER(fusion_conv_bn_relu); ...@@ -168,6 +168,9 @@ LOAD_FUSION_MATCHER(fusion_conv_bn_relu);
#ifdef GRU_OP #ifdef GRU_OP
LOAD_OP1(gru, CPU); LOAD_OP1(gru, CPU);
#endif #endif
#ifdef GRU_UNIT_OP
LOAD_OP1(gru_unit, CPU);
#endif
#ifdef FUSION_CONVADDBN_OP #ifdef FUSION_CONVADDBN_OP
LOAD_OP2(fusion_conv_add_bn, CPU, FPGA); LOAD_OP2(fusion_conv_add_bn, CPU, FPGA);
LOAD_FUSION_MATCHER(fusion_conv_add_bn); LOAD_FUSION_MATCHER(fusion_conv_add_bn);
...@@ -189,6 +192,9 @@ LOAD_OP1(crf_decoding, CPU); ...@@ -189,6 +192,9 @@ LOAD_OP1(crf_decoding, CPU);
#ifdef MUL_OP #ifdef MUL_OP
LOAD_OP2(mul, CPU, MALI_GPU); LOAD_OP2(mul, CPU, MALI_GPU);
#endif #endif
#ifdef NORM_OP
LOAD_OP1(norm, CPU);
#endif
#ifdef RELU_OP #ifdef RELU_OP
LOAD_OP2(relu, CPU, MALI_GPU); LOAD_OP2(relu, CPU, MALI_GPU);
LOAD_OP1(relu6, CPU); LOAD_OP1(relu6, CPU);
...@@ -279,3 +285,15 @@ LOAD_OP1(lod_reset, CPU); ...@@ -279,3 +285,15 @@ LOAD_OP1(lod_reset, CPU);
#ifdef LESS_THAN_OP #ifdef LESS_THAN_OP
LOAD_OP1(less_than, CPU); LOAD_OP1(less_than, CPU);
#endif #endif
#ifdef LOGICAL_AND_OP
LOAD_OP1(logical_and, CPU);
#endif
#ifdef LOGICAL_OR_OP
LOAD_OP1(logical_or, CPU);
#endif
#ifdef LOGICAL_NOT_OP
LOAD_OP1(logical_not, CPU);
#endif
#ifdef LOGICAL_XOR_OP
LOAD_OP1(logical_xor, CPU);
#endif
...@@ -56,12 +56,13 @@ void BinaryLogicalCompute(const Tensor* inputX, const Tensor* inputY, ...@@ -56,12 +56,13 @@ void BinaryLogicalCompute(const Tensor* inputX, const Tensor* inputY,
#ifdef LOGICAL_AND_OP #ifdef LOGICAL_AND_OP
template <> template <>
bool LogicalAndKernel<CPU, float>::Init(LogicalAndParam<CPU>* param) { bool LogicalAndKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true; return true;
} }
template <> template <>
void LogicalAndKernel<CPU, float>::Compute(const LogicalAndParam<CPU>& param) { void LogicalAndKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX(); auto* inputX = param.InputX();
auto* inputY = param.InputY(); auto* inputY = param.InputY();
auto* out = param.Out(); auto* out = param.Out();
...@@ -72,12 +73,13 @@ void LogicalAndKernel<CPU, float>::Compute(const LogicalAndParam<CPU>& param) { ...@@ -72,12 +73,13 @@ void LogicalAndKernel<CPU, float>::Compute(const LogicalAndParam<CPU>& param) {
#ifdef LOGICAL_OR_OP #ifdef LOGICAL_OR_OP
template <> template <>
bool LogicalOrKernel<CPU, float>::Init(LogicalOrParam<CPU>* param) { bool LogicalOrKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true; return true;
} }
template <> template <>
void LogicalOrKernel<CPU, float>::Compute(const LogicalOrParam<CPU>& param) { void LogicalOrKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX(); auto* inputX = param.InputX();
auto* inputY = param.InputY(); auto* inputY = param.InputY();
auto* out = param.Out(); auto* out = param.Out();
...@@ -88,12 +90,13 @@ void LogicalOrKernel<CPU, float>::Compute(const LogicalOrParam<CPU>& param) { ...@@ -88,12 +90,13 @@ void LogicalOrKernel<CPU, float>::Compute(const LogicalOrParam<CPU>& param) {
#ifdef LOGICAL_NOT_OP #ifdef LOGICAL_NOT_OP
template <> template <>
bool LogicalNotKernel<CPU, float>::Init(LogicalNotParam<CPU>* param) { bool LogicalNotKernel<CPU, float>::Init(LogicalUnaryParam<CPU>* param) {
return true; return true;
} }
template <> template <>
void LogicalNotKernel<CPU, float>::Compute(const LogicalNotParam<CPU>& param) { void LogicalNotKernel<CPU, float>::Compute(
const LogicalUnaryParam<CPU>& param) {
auto* inputX = param.InputX(); auto* inputX = param.InputX();
auto* out = param.Out(); auto* out = param.Out();
out->mutable_data<bool>(); out->mutable_data<bool>();
...@@ -103,12 +106,13 @@ void LogicalNotKernel<CPU, float>::Compute(const LogicalNotParam<CPU>& param) { ...@@ -103,12 +106,13 @@ void LogicalNotKernel<CPU, float>::Compute(const LogicalNotParam<CPU>& param) {
#ifdef LOGICAL_XOR_OP #ifdef LOGICAL_XOR_OP
template <> template <>
bool LogicalXorKernel<CPU, float>::Init(LogicalXorParam<CPU>* param) { bool LogicalXorKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true; return true;
} }
template <> template <>
void LogicalXorKernel<CPU, float>::Compute(const LogicalXorParam<CPU>& param) { void LogicalXorKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX(); auto* inputX = param.InputX();
auto* inputY = param.InputY(); auto* inputY = param.InputY();
auto* out = param.Out(); auto* out = param.Out();
......
...@@ -21,19 +21,19 @@ namespace paddle_mobile { ...@@ -21,19 +21,19 @@ namespace paddle_mobile {
namespace operators { namespace operators {
#ifdef LOGICAL_AND_OP #ifdef LOGICAL_AND_OP
DECLARE_KERNEL(LogicalAnd, LogicalAndParam); DECLARE_KERNEL(LogicalAnd, LogicalBinaryParam);
#endif #endif
#ifdef LOGICAL_OR_OP #ifdef LOGICAL_OR_OP
DECLARE_KERNEL(LogicalOr, LogicalOrParam); DECLARE_KERNEL(LogicalOr, LogicalBinaryParam);
#endif #endif
#ifdef LOGICAL_NOT_OP #ifdef LOGICAL_NOT_OP
DECLARE_KERNEL(LogicalNot, LogicalNotParam); DECLARE_KERNEL(LogicalNot, LogicalUnaryParam);
#endif #endif
#ifdef LOGICAL_XOR_OP #ifdef LOGICAL_XOR_OP
DECLARE_KERNEL(LogicalXor, LogicalXorParam); DECLARE_KERNEL(LogicalXor, LogicalBinaryParam);
#endif #endif
} // namespace operators } // namespace operators
......
...@@ -23,19 +23,19 @@ namespace paddle_mobile { ...@@ -23,19 +23,19 @@ namespace paddle_mobile {
namespace operators { namespace operators {
#ifdef LOGICAL_AND_OP #ifdef LOGICAL_AND_OP
DECLARE_OPERATOR(LogicalAnd, LogicalAndParam, LogicalAndKernel); DECLARE_OPERATOR(LogicalAnd, LogicalBinaryParam, LogicalAndKernel);
#endif #endif
#ifdef LOGICAL_OR_OP #ifdef LOGICAL_OR_OP
DECLARE_OPERATOR(LogicalOr, LogicalOrParam, LogicalOrKernel); DECLARE_OPERATOR(LogicalOr, LogicalBinaryParam, LogicalOrKernel);
#endif #endif
#ifdef LOGICAL_NOT_OP #ifdef LOGICAL_NOT_OP
DECLARE_OPERATOR(LogicalNot, LogicalNotParam, LogicalNotKernel); DECLARE_OPERATOR(LogicalNot, LogicalUnaryParam, LogicalNotKernel);
#endif #endif
#ifdef LOGICAL_XOR_OP #ifdef LOGICAL_XOR_OP
DECLARE_OPERATOR(LogicalXor, LogicalXorParam, LogicalXorKernel); DECLARE_OPERATOR(LogicalXor, LogicalBinaryParam, LogicalXorKernel);
#endif #endif
} // namespace operators } // namespace operators
......
...@@ -2942,40 +2942,16 @@ class CompareParam : public OpParam { ...@@ -2942,40 +2942,16 @@ class CompareParam : public OpParam {
}; };
#endif // LESS_THAN_OP #endif // LESS_THAN_OP
#ifdef LOGICAL_AND_OP #if defined(LOGICAL_AND_OP) || defined(LOGICAL_OR_OP) || defined(LOGICAL_XOR_OP)
template <typename Dtype> template <typename Dtype>
class LogicalAndParam : public OpParam { class LogicalBinaryParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType; typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType; typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public: public:
LogicalAndParam(const VariableNameMap &inputs, const VariableNameMap &outputs, LogicalBinaryParam(const VariableNameMap &inputs,
const AttributeMap &attrs, const Scope &scope) { const VariableNameMap &outputs, const AttributeMap &attrs,
input_x_ = InputXFrom<GType>(inputs, scope); const Scope &scope) {
input_y_ = InputYFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope);
}
const GType *InputX() const { return input_x_; }
const GType *InputY() const { return input_y_; }
GType *Out() const { return output_; }
public:
GType *input_x_;
GType *input_y_;
GType *output_;
};
#endif // LOGICAL_AND_OP
#ifdef LOGICAL_OR_OP
template <typename Dtype>
class LogicalOrParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
LogicalOrParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope); input_x_ = InputXFrom<GType>(inputs, scope);
input_y_ = InputYFrom<GType>(inputs, scope); input_y_ = InputYFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope); output_ = OutFrom<GType>(outputs, scope);
...@@ -2990,17 +2966,18 @@ class LogicalOrParam : public OpParam { ...@@ -2990,17 +2966,18 @@ class LogicalOrParam : public OpParam {
GType *input_y_; GType *input_y_;
GType *output_; GType *output_;
}; };
#endif // LOGICAL_OR_OP #endif // LOGICAL_AND_OP LOGICAL_OR_OP LOGICAL_XOR_OP
#ifdef LOGICAL_NOT_OP #ifdef LOGICAL_NOT_OP
template <typename Dtype> template <typename Dtype>
class LogicalNotParam : public OpParam { class LogicalUnaryParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType; typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType; typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public: public:
LogicalNotParam(const VariableNameMap &inputs, const VariableNameMap &outputs, LogicalUnaryParam(const VariableNameMap &inputs,
const AttributeMap &attrs, const Scope &scope) { const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope); input_x_ = InputXFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope); output_ = OutFrom<GType>(outputs, scope);
} }
...@@ -3014,30 +2991,5 @@ class LogicalNotParam : public OpParam { ...@@ -3014,30 +2991,5 @@ class LogicalNotParam : public OpParam {
}; };
#endif // LOGICAL_NOT_OP #endif // LOGICAL_NOT_OP
#ifdef LOGICAL_XOR_OP
template <typename Dtype>
class LogicalXorParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
LogicalXorParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope);
input_y_ = InputYFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope);
}
const GType *InputX() const { return input_x_; }
const GType *InputY() const { return input_y_; }
GType *Out() const { return output_; }
public:
GType *input_x_;
GType *input_y_;
GType *output_;
};
#endif // LOGICAL_XOR_OP
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册