diff --git a/src/framework/operator.h b/src/framework/operator.h index e8974120c9c4a840244af7b2c0d8ef7bf1c93e3e..deb573571f8ca0184da0714de54ee98655e78921 100644 --- a/src/framework/operator.h +++ b/src/framework/operator.h @@ -150,30 +150,6 @@ class OpKernelBase { #endif }; -#define DECLARE_OPERATOR(OpName, OpParam, OpKernel) \ - template \ - class OpName##Op : public framework::OperatorWithKernel< \ - DeviceType, OpParam, \ - operators::OpKernel> { \ - public: \ - OpName##Op(const std::string &type, const VariableNameMap &inputs, \ - const VariableNameMap &outputs, \ - const framework::AttributeMap &attrs, \ - std::shared_ptr scope) \ - : framework::OperatorWithKernel, \ - operators::OpKernel>( \ - type, inputs, outputs, attrs, scope) {} \ - \ - void InferShape() const override; \ - }; - -#define DEFINE_OP_CONSTRUCTOR(cls, parent_cls) \ - cls(const std::string &type, const ::paddle_mobile::VariableNameMap &inputs, \ - const ::paddle_mobile::VariableNameMap &outputs, \ - const ::paddle_mobile::framework::AttributeMap &attrs, \ - std::shared_ptr<::paddle_mobile::framework::Scope> scope) \ - : parent_cls(type, inputs, outputs, attrs, scope) {} - class FusionOpMatcher { public: FusionOpMatcher() {} @@ -198,5 +174,38 @@ class FusionOpMatcher { std::shared_ptr new_opdesc_; }; +#define DECLARE_OPERATOR(OpName, OpParam, OpKernel) \ + template \ + class OpName##Op : public framework::OperatorWithKernel< \ + DeviceType, OpParam, \ + operators::OpKernel> { \ + public: \ + OpName##Op(const std::string &type, const VariableNameMap &inputs, \ + const VariableNameMap &outputs, \ + const framework::AttributeMap &attrs, \ + std::shared_ptr scope) \ + : framework::OperatorWithKernel, \ + operators::OpKernel>( \ + type, inputs, outputs, attrs, scope) {} \ + \ + void InferShape() const override; \ + }; + +#define DECLARE_KERNEL(OpName, OpParam) \ + template \ + class OpName##Kernel \ + : public framework::OpKernelBase> { \ + public: \ + bool Init(OpParam *param); \ + void Compute(const OpParam ¶m); \ + }; + +#define DEFINE_OP_CONSTRUCTOR(cls, parent_cls) \ + cls(const std::string &type, const ::paddle_mobile::VariableNameMap &inputs, \ + const ::paddle_mobile::VariableNameMap &outputs, \ + const ::paddle_mobile::framework::AttributeMap &attrs, \ + std::shared_ptr<::paddle_mobile::framework::Scope> scope) \ + : parent_cls(type, inputs, outputs, attrs, scope) {} + } // namespace framework } // namespace paddle_mobile diff --git a/src/operators/kernel/activation_kernel.h b/src/operators/kernel/activation_kernel.h index f91ef705c3bdee48b6c4c85c7fc51209746b7f15..9eaf3fd96737a6bad3448160a41d523965008943 100644 --- a/src/operators/kernel/activation_kernel.h +++ b/src/operators/kernel/activation_kernel.h @@ -20,15 +20,6 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { -#define DECLARE_KERNEL(OpName, Param) \ - template \ - class OpName##Kernel \ - : public framework::OpKernelBase> { \ - public: \ - bool Init(Param *param); \ - void Compute(const Param ¶m); \ - }; - #ifdef RELU_OP DECLARE_KERNEL(Relu, ReluParam); DECLARE_KERNEL(Relu6, ReluParam); diff --git a/src/operators/kernel/arm/sequence_pool_kernel.cpp b/src/operators/kernel/arm/sequence_pool_kernel.cpp index f4e28a0ffbbc13428bd8b4643aaae915f14539bc..352158b973050c99555a82c0d0f02c318b7702ac 100644 --- a/src/operators/kernel/arm/sequence_pool_kernel.cpp +++ b/src/operators/kernel/arm/sequence_pool_kernel.cpp @@ -66,8 +66,9 @@ void SequencePoolImpl(const framework::LoDTensor &input, memcpy(out_ptr, in_ptr, width * sizeof(float)); in_ptr += width; int remain_h = height - 1; + int remain_w_start = 0; #ifdef __ARM_NEON__ - int remain_w_start = width & 0xfffc; + remain_w_start = width & 0xfffc; #endif // __ARM_NEON__ for (int h = 0; h < remain_h; ++h) { #ifdef __ARM_NEON__ @@ -124,9 +125,10 @@ void SequencePoolImpl(const framework::LoDTensor &input, memcpy(out_ptr, in_ptr, width * sizeof(float)); in_ptr += width; int remain_h = height - 1; + int remain_w_start = 0; #ifdef __ARM_NEON__ int loop_w = width >> 2; - int remain_w_start = width & 0xfffc; + remain_w_start = width & 0xfffc; #endif // __ARM_NEON__ for (int h = 0; h < remain_h; ++h) { #ifdef __ARM_NEON__ diff --git a/src/operators/kernel/dequant_bn_kernel.h b/src/operators/kernel/dequant_bn_kernel.h index abf2c68e8a965e9741520731c5c34beca8777299..cf759bf69cc959759d770b685cffaef25ac24386 100644 --- a/src/operators/kernel/dequant_bn_kernel.h +++ b/src/operators/kernel/dequant_bn_kernel.h @@ -20,37 +20,28 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { -#define DECLARE_KERNEL(KernelClass, KernelParam) \ - template \ - class KernelClass \ - : public framework::OpKernelBase> { \ - public: \ - bool Init(KernelParam *param); \ - void Compute(const KernelParam ¶m); \ - }; - #ifdef FUSION_DEQUANT_BN_OP -DECLARE_KERNEL(FusionDequantBNKernel, FusionDequantBNParam); +DECLARE_KERNEL(FusionDequantBN, FusionDequantBNParam); #endif #ifdef FUSION_DEQUANT_BN_RELU_OP -DECLARE_KERNEL(FusionDequantBNReluKernel, FusionDequantBNParam); +DECLARE_KERNEL(FusionDequantBNRelu, FusionDequantBNParam); #endif #ifdef FUSION_DEQUANT_ADD_BN_OP -DECLARE_KERNEL(FusionDequantAddBNKernel, FusionDequantAddBNParam); +DECLARE_KERNEL(FusionDequantAddBN, FusionDequantAddBNParam); #endif #ifdef FUSION_DEQUANT_ADD_BN_RELU_OP -DECLARE_KERNEL(FusionDequantAddBNReluKernel, FusionDequantAddBNParam); +DECLARE_KERNEL(FusionDequantAddBNRelu, FusionDequantAddBNParam); #endif #ifdef FUSION_DEQUANT_ADD_BN_QUANT_OP -DECLARE_KERNEL(FusionDequantAddBNQuantKernel, FusionDequantAddBNQuantParam); +DECLARE_KERNEL(FusionDequantAddBNQuant, FusionDequantAddBNQuantParam); #endif #ifdef FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP -DECLARE_KERNEL(FusionDequantAddBNReluQuantKernel, FusionDequantAddBNQuantParam); +DECLARE_KERNEL(FusionDequantAddBNReluQuant, FusionDequantAddBNQuantParam); #endif } // namespace operators diff --git a/src/operators/kernel/kernels.h b/src/operators/kernel/kernels.h index 2c76ba19f726d8c974e32b0482c32feec130ade4..7b57d50668861416f341248b40715432a501bb35 100644 --- a/src/operators/kernel/kernels.h +++ b/src/operators/kernel/kernels.h @@ -20,15 +20,6 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { -#define DECLARE_KERNEL(KernelClass, KernelParam) \ - template \ - class KernelClass##Kernel \ - : public framework::OpKernelBase> { \ - public: \ - bool Init(KernelParam *param); \ - void Compute(const KernelParam ¶m); \ - }; - #ifdef TOP_K_OP DECLARE_KERNEL(TopK, TopKParam) #endif // TOP_K_OP diff --git a/src/operators/kernel/sequence_kernels.h b/src/operators/kernel/sequence_kernels.h index 7884d0d475949c8a54b0ecc08fb578807ca2e2d2..ccee8c521690888257092c7c457534ae2149d9d0 100644 --- a/src/operators/kernel/sequence_kernels.h +++ b/src/operators/kernel/sequence_kernels.h @@ -20,25 +20,16 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { -#define DECLARE_KERNEL(KernelClass, KernelParam) \ - template \ - class KernelClass \ - : public framework::OpKernelBase> { \ - public: \ - bool Init(KernelParam *param); \ - void Compute(const KernelParam ¶m); \ - }; - #ifdef SEQUENCE_EXPAND_OP -DECLARE_KERNEL(SequenceExpandKernel, SequenceExpandParam); +DECLARE_KERNEL(SequenceExpand, SequenceExpandParam); #endif // SEQUENCE_EXPAND_OP #ifdef SEQUENCE_POOL_OP -DECLARE_KERNEL(SequencePoolKernel, SequencePoolParam); +DECLARE_KERNEL(SequencePool, SequencePoolParam); #endif // SEQUENCE_POOL_OP #ifdef SEQUENCE_SOFTMAX_OP -DECLARE_KERNEL(SequenceSoftmaxKernel, SoftmaxParam); +DECLARE_KERNEL(SequenceSoftmax, SoftmaxParam); #endif // SEQUENCE_SOFTMAX_OP } // namespace operators diff --git a/test/net/test_benchmark.cpp b/test/net/test_benchmark.cpp index a95d6c850e80216f9996bc8d582c0646eea1b78b..31a0850c4d531d13f7960d9857b3721ee69c6d27 100644 --- a/test/net/test_benchmark.cpp +++ b/test/net/test_benchmark.cpp @@ -43,7 +43,8 @@ int main(int argc, char* argv[]) { std::shared_ptr output; std::vector dims{1, 3, 224, 224}; if (feed_shape) { - sscanf(feed_shape, "%d,%d,%d,%d", &dims[0], &dims[1], &dims[2], &dims[3]); + sscanf(feed_shape, "%ld,%ld,%ld,%ld", &dims[0], &dims[1], &dims[2], + &dims[3]); } std::cout << "feed shape: [" << dims[0] << ", " << dims[1] << ", " << dims[2] << ", " << dims[3] << "]\n";