提交 7213d3ba 编写于 作者: H hjchen2

Remove duplicate macro definition, and fix some code style

上级 8d8dbf47
......@@ -150,30 +150,6 @@ class OpKernelBase {
#endif
};
#define DECLARE_OPERATOR(OpName, OpParam, OpKernel) \
template <typename DeviceType, typename T> \
class OpName##Op : public framework::OperatorWithKernel< \
DeviceType, OpParam<DeviceType>, \
operators::OpKernel<DeviceType, T>> { \
public: \
OpName##Op(const std::string &type, const VariableNameMap &inputs, \
const VariableNameMap &outputs, \
const framework::AttributeMap &attrs, \
std::shared_ptr<framework::Scope> scope) \
: framework::OperatorWithKernel<DeviceType, OpParam<DeviceType>, \
operators::OpKernel<DeviceType, T>>( \
type, inputs, outputs, attrs, scope) {} \
\
void InferShape() const override; \
};
#define DEFINE_OP_CONSTRUCTOR(cls, parent_cls) \
cls(const std::string &type, const ::paddle_mobile::VariableNameMap &inputs, \
const ::paddle_mobile::VariableNameMap &outputs, \
const ::paddle_mobile::framework::AttributeMap &attrs, \
std::shared_ptr<::paddle_mobile::framework::Scope> scope) \
: parent_cls<Dtype, T>(type, inputs, outputs, attrs, scope) {}
class FusionOpMatcher {
public:
FusionOpMatcher() {}
......@@ -198,5 +174,38 @@ class FusionOpMatcher {
std::shared_ptr<OpDesc> new_opdesc_;
};
#define DECLARE_OPERATOR(OpName, OpParam, OpKernel) \
template <typename DeviceType, typename T> \
class OpName##Op : public framework::OperatorWithKernel< \
DeviceType, OpParam<DeviceType>, \
operators::OpKernel<DeviceType, T>> { \
public: \
OpName##Op(const std::string &type, const VariableNameMap &inputs, \
const VariableNameMap &outputs, \
const framework::AttributeMap &attrs, \
std::shared_ptr<framework::Scope> scope) \
: framework::OperatorWithKernel<DeviceType, OpParam<DeviceType>, \
operators::OpKernel<DeviceType, T>>( \
type, inputs, outputs, attrs, scope) {} \
\
void InferShape() const override; \
};
#define DECLARE_KERNEL(OpName, OpParam) \
template <typename DeviceType, typename T> \
class OpName##Kernel \
: public framework::OpKernelBase<DeviceType, OpParam<DeviceType>> { \
public: \
bool Init(OpParam<DeviceType> *param); \
void Compute(const OpParam<DeviceType> &param); \
};
#define DEFINE_OP_CONSTRUCTOR(cls, parent_cls) \
cls(const std::string &type, const ::paddle_mobile::VariableNameMap &inputs, \
const ::paddle_mobile::VariableNameMap &outputs, \
const ::paddle_mobile::framework::AttributeMap &attrs, \
std::shared_ptr<::paddle_mobile::framework::Scope> scope) \
: parent_cls<Dtype, T>(type, inputs, outputs, attrs, scope) {}
} // namespace framework
} // namespace paddle_mobile
......@@ -20,15 +20,6 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
#define DECLARE_KERNEL(OpName, Param) \
template <typename DeviceType, typename T> \
class OpName##Kernel \
: public framework::OpKernelBase<DeviceType, Param<DeviceType>> { \
public: \
bool Init(Param<DeviceType> *param); \
void Compute(const Param<DeviceType> &param); \
};
#ifdef RELU_OP
DECLARE_KERNEL(Relu, ReluParam);
DECLARE_KERNEL(Relu6, ReluParam);
......
......@@ -66,8 +66,9 @@ void SequencePoolImpl(const framework::LoDTensor &input,
memcpy(out_ptr, in_ptr, width * sizeof(float));
in_ptr += width;
int remain_h = height - 1;
int remain_w_start = 0;
#ifdef __ARM_NEON__
int remain_w_start = width & 0xfffc;
remain_w_start = width & 0xfffc;
#endif // __ARM_NEON__
for (int h = 0; h < remain_h; ++h) {
#ifdef __ARM_NEON__
......@@ -124,9 +125,10 @@ void SequencePoolImpl<SUM, float>(const framework::LoDTensor &input,
memcpy(out_ptr, in_ptr, width * sizeof(float));
in_ptr += width;
int remain_h = height - 1;
int remain_w_start = 0;
#ifdef __ARM_NEON__
int loop_w = width >> 2;
int remain_w_start = width & 0xfffc;
remain_w_start = width & 0xfffc;
#endif // __ARM_NEON__
for (int h = 0; h < remain_h; ++h) {
#ifdef __ARM_NEON__
......
......@@ -20,37 +20,28 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
#define DECLARE_KERNEL(KernelClass, KernelParam) \
template <typename DeviceType, typename T> \
class KernelClass \
: public framework::OpKernelBase<DeviceType, KernelParam<DeviceType>> { \
public: \
bool Init(KernelParam<DeviceType> *param); \
void Compute(const KernelParam<DeviceType> &param); \
};
#ifdef FUSION_DEQUANT_BN_OP
DECLARE_KERNEL(FusionDequantBNKernel, FusionDequantBNParam);
DECLARE_KERNEL(FusionDequantBN, FusionDequantBNParam);
#endif
#ifdef FUSION_DEQUANT_BN_RELU_OP
DECLARE_KERNEL(FusionDequantBNReluKernel, FusionDequantBNParam);
DECLARE_KERNEL(FusionDequantBNRelu, FusionDequantBNParam);
#endif
#ifdef FUSION_DEQUANT_ADD_BN_OP
DECLARE_KERNEL(FusionDequantAddBNKernel, FusionDequantAddBNParam);
DECLARE_KERNEL(FusionDequantAddBN, FusionDequantAddBNParam);
#endif
#ifdef FUSION_DEQUANT_ADD_BN_RELU_OP
DECLARE_KERNEL(FusionDequantAddBNReluKernel, FusionDequantAddBNParam);
DECLARE_KERNEL(FusionDequantAddBNRelu, FusionDequantAddBNParam);
#endif
#ifdef FUSION_DEQUANT_ADD_BN_QUANT_OP
DECLARE_KERNEL(FusionDequantAddBNQuantKernel, FusionDequantAddBNQuantParam);
DECLARE_KERNEL(FusionDequantAddBNQuant, FusionDequantAddBNQuantParam);
#endif
#ifdef FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
DECLARE_KERNEL(FusionDequantAddBNReluQuantKernel, FusionDequantAddBNQuantParam);
DECLARE_KERNEL(FusionDequantAddBNReluQuant, FusionDequantAddBNQuantParam);
#endif
} // namespace operators
......
......@@ -20,15 +20,6 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
#define DECLARE_KERNEL(KernelClass, KernelParam) \
template <typename DeviceType, typename T> \
class KernelClass##Kernel \
: public framework::OpKernelBase<DeviceType, KernelParam<DeviceType>> { \
public: \
bool Init(KernelParam<DeviceType> *param); \
void Compute(const KernelParam<DeviceType> &param); \
};
#ifdef TOP_K_OP
DECLARE_KERNEL(TopK, TopKParam)
#endif // TOP_K_OP
......
......@@ -20,25 +20,16 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
#define DECLARE_KERNEL(KernelClass, KernelParam) \
template <typename DeviceType, typename T> \
class KernelClass \
: public framework::OpKernelBase<DeviceType, KernelParam<DeviceType>> { \
public: \
bool Init(KernelParam<DeviceType> *param); \
void Compute(const KernelParam<DeviceType> &param); \
};
#ifdef SEQUENCE_EXPAND_OP
DECLARE_KERNEL(SequenceExpandKernel, SequenceExpandParam);
DECLARE_KERNEL(SequenceExpand, SequenceExpandParam);
#endif // SEQUENCE_EXPAND_OP
#ifdef SEQUENCE_POOL_OP
DECLARE_KERNEL(SequencePoolKernel, SequencePoolParam);
DECLARE_KERNEL(SequencePool, SequencePoolParam);
#endif // SEQUENCE_POOL_OP
#ifdef SEQUENCE_SOFTMAX_OP
DECLARE_KERNEL(SequenceSoftmaxKernel, SoftmaxParam);
DECLARE_KERNEL(SequenceSoftmax, SoftmaxParam);
#endif // SEQUENCE_SOFTMAX_OP
} // namespace operators
......
......@@ -43,7 +43,8 @@ int main(int argc, char* argv[]) {
std::shared_ptr<paddle_mobile::framework::Tensor> output;
std::vector<int64_t> dims{1, 3, 224, 224};
if (feed_shape) {
sscanf(feed_shape, "%d,%d,%d,%d", &dims[0], &dims[1], &dims[2], &dims[3]);
sscanf(feed_shape, "%ld,%ld,%ld,%ld", &dims[0], &dims[1], &dims[2],
&dims[3]);
}
std::cout << "feed shape: [" << dims[0] << ", " << dims[1] << ", "
<< dims[2] << ", " << dims[3] << "]\n";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册