提交 50ab1dd6 编写于 作者: Z zhaojiaying01

merge logical op param

上级 27459ab1
......@@ -131,9 +131,12 @@ extern const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU;
extern const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU;
extern const char *G_OP_TYPE_FUSION_CONV_BN_RELU;
extern const char *G_OP_TYPE_GRU;
extern const char *G_OP_TYPE_GRU_UNIT;
extern const char *G_OP_TYPE_LRN;
extern const char *G_OP_TYPE_MUL;
extern const char *G_OP_TYPE_MULTICLASS_NMS;
extern const char *G_OP_TYPE_NORM;
extern const char *G_OP_TYPE_POOL2D;
extern const char *G_OP_TYPE_PRIOR_BOX;
extern const char *G_OP_TYPE_RELU;
......@@ -163,6 +166,10 @@ extern const char *G_OP_TYPE_CAST;
extern const char *G_OP_TYPE_LOG;
extern const char *G_OP_TYPE_LOD_RESET;
extern const char *G_OP_TYPE_LESS_THAN;
extern const char *G_OP_TYPE_LOGICAL_AND;
extern const char *G_OP_TYPE_LOGICAL_OR;
extern const char *G_OP_TYPE_LOGICAL_NOT;
extern const char *G_OP_TYPE_LOGICAL_XOR;
extern const char *G_OP_TYPE_QUANTIZE;
extern const char *G_OP_TYPE_DEQUANTIZE;
......
......@@ -168,6 +168,9 @@ LOAD_FUSION_MATCHER(fusion_conv_bn_relu);
#ifdef GRU_OP
LOAD_OP1(gru, CPU);
#endif
#ifdef GRU_UNIT_OP
LOAD_OP1(gru_unit, CPU);
#endif
#ifdef FUSION_CONVADDBN_OP
LOAD_OP2(fusion_conv_add_bn, CPU, FPGA);
LOAD_FUSION_MATCHER(fusion_conv_add_bn);
......@@ -189,6 +192,9 @@ LOAD_OP1(crf_decoding, CPU);
#ifdef MUL_OP
LOAD_OP2(mul, CPU, MALI_GPU);
#endif
#ifdef NORM_OP
LOAD_OP1(norm, CPU);
#endif
#ifdef RELU_OP
LOAD_OP2(relu, CPU, MALI_GPU);
LOAD_OP1(relu6, CPU);
......@@ -279,3 +285,15 @@ LOAD_OP1(lod_reset, CPU);
#ifdef LESS_THAN_OP
LOAD_OP1(less_than, CPU);
#endif
#ifdef LOGICAL_AND_OP
LOAD_OP1(logical_and, CPU);
#endif
#ifdef LOGICAL_OR_OP
LOAD_OP1(logical_or, CPU);
#endif
#ifdef LOGICAL_NOT_OP
LOAD_OP1(logical_not, CPU);
#endif
#ifdef LOGICAL_XOR_OP
LOAD_OP1(logical_xor, CPU);
#endif
......@@ -56,12 +56,13 @@ void BinaryLogicalCompute(const Tensor* inputX, const Tensor* inputY,
#ifdef LOGICAL_AND_OP
template <>
bool LogicalAndKernel<CPU, float>::Init(LogicalAndParam<CPU>* param) {
bool LogicalAndKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true;
}
template <>
void LogicalAndKernel<CPU, float>::Compute(const LogicalAndParam<CPU>& param) {
void LogicalAndKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX();
auto* inputY = param.InputY();
auto* out = param.Out();
......@@ -72,12 +73,13 @@ void LogicalAndKernel<CPU, float>::Compute(const LogicalAndParam<CPU>& param) {
#ifdef LOGICAL_OR_OP
template <>
bool LogicalOrKernel<CPU, float>::Init(LogicalOrParam<CPU>* param) {
bool LogicalOrKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true;
}
template <>
void LogicalOrKernel<CPU, float>::Compute(const LogicalOrParam<CPU>& param) {
void LogicalOrKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX();
auto* inputY = param.InputY();
auto* out = param.Out();
......@@ -88,12 +90,13 @@ void LogicalOrKernel<CPU, float>::Compute(const LogicalOrParam<CPU>& param) {
#ifdef LOGICAL_NOT_OP
template <>
bool LogicalNotKernel<CPU, float>::Init(LogicalNotParam<CPU>* param) {
bool LogicalNotKernel<CPU, float>::Init(LogicalUnaryParam<CPU>* param) {
return true;
}
template <>
void LogicalNotKernel<CPU, float>::Compute(const LogicalNotParam<CPU>& param) {
void LogicalNotKernel<CPU, float>::Compute(
const LogicalUnaryParam<CPU>& param) {
auto* inputX = param.InputX();
auto* out = param.Out();
out->mutable_data<bool>();
......@@ -103,12 +106,13 @@ void LogicalNotKernel<CPU, float>::Compute(const LogicalNotParam<CPU>& param) {
#ifdef LOGICAL_XOR_OP
template <>
bool LogicalXorKernel<CPU, float>::Init(LogicalXorParam<CPU>* param) {
bool LogicalXorKernel<CPU, float>::Init(LogicalBinaryParam<CPU>* param) {
return true;
}
template <>
void LogicalXorKernel<CPU, float>::Compute(const LogicalXorParam<CPU>& param) {
void LogicalXorKernel<CPU, float>::Compute(
const LogicalBinaryParam<CPU>& param) {
auto* inputX = param.InputX();
auto* inputY = param.InputY();
auto* out = param.Out();
......
......@@ -21,19 +21,19 @@ namespace paddle_mobile {
namespace operators {
#ifdef LOGICAL_AND_OP
DECLARE_KERNEL(LogicalAnd, LogicalAndParam);
DECLARE_KERNEL(LogicalAnd, LogicalBinaryParam);
#endif
#ifdef LOGICAL_OR_OP
DECLARE_KERNEL(LogicalOr, LogicalOrParam);
DECLARE_KERNEL(LogicalOr, LogicalBinaryParam);
#endif
#ifdef LOGICAL_NOT_OP
DECLARE_KERNEL(LogicalNot, LogicalNotParam);
DECLARE_KERNEL(LogicalNot, LogicalUnaryParam);
#endif
#ifdef LOGICAL_XOR_OP
DECLARE_KERNEL(LogicalXor, LogicalXorParam);
DECLARE_KERNEL(LogicalXor, LogicalBinaryParam);
#endif
} // namespace operators
......
......@@ -23,19 +23,19 @@ namespace paddle_mobile {
namespace operators {
#ifdef LOGICAL_AND_OP
DECLARE_OPERATOR(LogicalAnd, LogicalAndParam, LogicalAndKernel);
DECLARE_OPERATOR(LogicalAnd, LogicalBinaryParam, LogicalAndKernel);
#endif
#ifdef LOGICAL_OR_OP
DECLARE_OPERATOR(LogicalOr, LogicalOrParam, LogicalOrKernel);
DECLARE_OPERATOR(LogicalOr, LogicalBinaryParam, LogicalOrKernel);
#endif
#ifdef LOGICAL_NOT_OP
DECLARE_OPERATOR(LogicalNot, LogicalNotParam, LogicalNotKernel);
DECLARE_OPERATOR(LogicalNot, LogicalUnaryParam, LogicalNotKernel);
#endif
#ifdef LOGICAL_XOR_OP
DECLARE_OPERATOR(LogicalXor, LogicalXorParam, LogicalXorKernel);
DECLARE_OPERATOR(LogicalXor, LogicalBinaryParam, LogicalXorKernel);
#endif
} // namespace operators
......
......@@ -2942,40 +2942,16 @@ class CompareParam : public OpParam {
};
#endif // LESS_THAN_OP
#ifdef LOGICAL_AND_OP
#if defined(LOGICAL_AND_OP) || defined(LOGICAL_OR_OP) || defined(LOGICAL_XOR_OP)
template <typename Dtype>
class LogicalAndParam : public OpParam {
class LogicalBinaryParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
LogicalAndParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope);
input_y_ = InputYFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope);
}
const GType *InputX() const { return input_x_; }
const GType *InputY() const { return input_y_; }
GType *Out() const { return output_; }
public:
GType *input_x_;
GType *input_y_;
GType *output_;
};
#endif // LOGICAL_AND_OP
#ifdef LOGICAL_OR_OP
template <typename Dtype>
class LogicalOrParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
LogicalOrParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
LogicalBinaryParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope);
input_y_ = InputYFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope);
......@@ -2990,17 +2966,18 @@ class LogicalOrParam : public OpParam {
GType *input_y_;
GType *output_;
};
#endif // LOGICAL_OR_OP
#endif // LOGICAL_AND_OP LOGICAL_OR_OP LOGICAL_XOR_OP
#ifdef LOGICAL_NOT_OP
template <typename Dtype>
class LogicalNotParam : public OpParam {
class LogicalUnaryParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
LogicalNotParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
LogicalUnaryParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope);
}
......@@ -3014,30 +2991,5 @@ class LogicalNotParam : public OpParam {
};
#endif // LOGICAL_NOT_OP
#ifdef LOGICAL_XOR_OP
template <typename Dtype>
class LogicalXorParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
LogicalXorParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope);
input_y_ = InputYFrom<GType>(inputs, scope);
output_ = OutFrom<GType>(outputs, scope);
}
const GType *InputX() const { return input_x_; }
const GType *InputY() const { return input_y_; }
GType *Out() const { return output_; }
public:
GType *input_x_;
GType *input_y_;
GType *output_;
};
#endif // LOGICAL_XOR_OP
} // namespace operators
} // namespace paddle_mobile
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册