diff --git a/lite/backends/arm/math/elementwise.cc b/lite/backends/arm/math/elementwise.cc index 658a235d621426a04647f1c5802147836232be89..080ade3d41e2c23cc41b7ed39457d8e7851f46c7 100644 --- a/lite/backends/arm/math/elementwise.cc +++ b/lite/backends/arm/math/elementwise.cc @@ -59,15 +59,6 @@ void elementwise_add(const int32_t* dinx, naive_elementwise_op(dinx, diny, dout, num, naive_add); } -// todo: use arm intrinsics -template <> -void elementwise_add(const int64_t* dinx, - const int64_t* diny, - int64_t* dout, - int num) { - naive_elementwise_op(dinx, diny, dout, num, naive_add); -} - template <> void elementwise_add(const float* dinx, const float* diny, @@ -261,18 +252,6 @@ void elementwise_add_broadcast(const int32_t* dinx, dinx, diny, dout, batch, channels, num, naive_add); } -// todo: use arm intrinsics -template <> -void elementwise_add_broadcast(const int64_t* dinx, - const int64_t* diny, - int64_t* dout, - int batch, - int channels, - int num) { - naive_elementwise_op_broadcast( - dinx, diny, dout, batch, channels, num, naive_add); -} - template <> void elementwise_add_broadcast(const float* dinx, const float* diny, @@ -494,15 +473,6 @@ void elementwise_sub(const int32_t* dinx, naive_elementwise_op(dinx, diny, dout, num, naive_sub); } -// todo: use arm intrinsics -template <> -void elementwise_sub(const int64_t* dinx, - const int64_t* diny, - int64_t* dout, - int num) { - naive_elementwise_op(dinx, diny, dout, num, naive_sub); -} - template <> void elementwise_sub(const float* dinx, const float* diny, @@ -615,18 +585,6 @@ void elementwise_sub_broadcast(const int32_t* dinx, dinx, diny, dout, batch, channels, num, naive_sub); } -// todo: use arm intrinsics -template <> -void elementwise_sub_broadcast(const int64_t* dinx, - const int64_t* diny, - int64_t* dout, - int batch, - int channels, - int num) { - naive_elementwise_op_broadcast( - dinx, diny, dout, batch, channels, num, naive_sub); -} - template <> void elementwise_sub_broadcast(const float* dinx, const float* diny, diff --git a/lite/kernels/arm/elementwise_compute.cc b/lite/kernels/arm/elementwise_compute.cc index 42cfa2471564ff15480af78944ed97416a5f93ca..f207c8cfe1ba54b941587e034a79a45a883918c8 100644 --- a/lite/kernels/arm/elementwise_compute.cc +++ b/lite/kernels/arm/elementwise_compute.cc @@ -398,16 +398,6 @@ REGISTER_LITE_KERNEL( .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) .Finalize(); -using elementwise_add_int64_t = - paddle::lite::kernels::arm::ElementwiseAddCompute; -REGISTER_LITE_KERNEL( - elementwise_add, kARM, kInt64, kNCHW, elementwise_add_int64_t, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - REGISTER_LITE_KERNEL( fusion_elementwise_add_activation, kARM, @@ -439,16 +429,6 @@ REGISTER_LITE_KERNEL( .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) .Finalize(); -using elementwise_sub_int64_t = - paddle::lite::kernels::arm::ElementwiseSubCompute; -REGISTER_LITE_KERNEL( - elementwise_sub, kARM, kInt64, kNCHW, elementwise_sub_int64_t, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - REGISTER_LITE_KERNEL( fusion_elementwise_sub_activation, kARM, @@ -479,16 +459,6 @@ REGISTER_LITE_KERNEL( .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) .Finalize(); -using elementwise_mul_int64_t = - paddle::lite::kernels::arm::ElementwiseMulCompute; -REGISTER_LITE_KERNEL( - elementwise_mul, kARM, kInt64, kNCHW, elementwise_mul_int64_t, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - using fusion_elementwise_mul_activation_float_t = paddle::lite::kernels::arm:: ElementwiseMulActivationCompute; REGISTER_LITE_KERNEL(fusion_elementwise_mul_activation, @@ -558,17 +528,6 @@ REGISTER_LITE_KERNEL( .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) .Finalize(); -using elementwise_div_int64_t = - paddle::lite::kernels::arm::ElementwiseDivCompute; - -REGISTER_LITE_KERNEL( - elementwise_div, kARM, kInt64, kNCHW, elementwise_div_int64_t, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - REGISTER_LITE_KERNEL( fusion_elementwise_div_activation, kARM,