From ba6c936fd41fa8c9fddc9d3325333d2cd89e61ca Mon Sep 17 00:00:00 2001 From: zhangwen31 Date: Wed, 23 Sep 2020 07:54:10 +0000 Subject: [PATCH] [arm][kernel]fix: remove unused kernel to reduce build size for CI. test=develop --- lite/backends/arm/math/elementwise.cc | 42 ------------------------- lite/kernels/arm/elementwise_compute.cc | 41 ------------------------ 2 files changed, 83 deletions(-) diff --git a/lite/backends/arm/math/elementwise.cc b/lite/backends/arm/math/elementwise.cc index 658a235d62..080ade3d41 100644 --- a/lite/backends/arm/math/elementwise.cc +++ b/lite/backends/arm/math/elementwise.cc @@ -59,15 +59,6 @@ void elementwise_add(const int32_t* dinx, naive_elementwise_op(dinx, diny, dout, num, naive_add); } -// todo: use arm intrinsics -template <> -void elementwise_add(const int64_t* dinx, - const int64_t* diny, - int64_t* dout, - int num) { - naive_elementwise_op(dinx, diny, dout, num, naive_add); -} - template <> void elementwise_add(const float* dinx, const float* diny, @@ -261,18 +252,6 @@ void elementwise_add_broadcast(const int32_t* dinx, dinx, diny, dout, batch, channels, num, naive_add); } -// todo: use arm intrinsics -template <> -void elementwise_add_broadcast(const int64_t* dinx, - const int64_t* diny, - int64_t* dout, - int batch, - int channels, - int num) { - naive_elementwise_op_broadcast( - dinx, diny, dout, batch, channels, num, naive_add); -} - template <> void elementwise_add_broadcast(const float* dinx, const float* diny, @@ -494,15 +473,6 @@ void elementwise_sub(const int32_t* dinx, naive_elementwise_op(dinx, diny, dout, num, naive_sub); } -// todo: use arm intrinsics -template <> -void elementwise_sub(const int64_t* dinx, - const int64_t* diny, - int64_t* dout, - int num) { - naive_elementwise_op(dinx, diny, dout, num, naive_sub); -} - template <> void elementwise_sub(const float* dinx, const float* diny, @@ -615,18 +585,6 @@ void elementwise_sub_broadcast(const int32_t* dinx, dinx, diny, dout, batch, channels, num, naive_sub); } -// todo: use arm intrinsics -template <> -void elementwise_sub_broadcast(const int64_t* dinx, - const int64_t* diny, - int64_t* dout, - int batch, - int channels, - int num) { - naive_elementwise_op_broadcast( - dinx, diny, dout, batch, channels, num, naive_sub); -} - template <> void elementwise_sub_broadcast(const float* dinx, const float* diny, diff --git a/lite/kernels/arm/elementwise_compute.cc b/lite/kernels/arm/elementwise_compute.cc index 42cfa24715..f207c8cfe1 100644 --- a/lite/kernels/arm/elementwise_compute.cc +++ b/lite/kernels/arm/elementwise_compute.cc @@ -398,16 +398,6 @@ REGISTER_LITE_KERNEL( .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) .Finalize(); -using elementwise_add_int64_t = - paddle::lite::kernels::arm::ElementwiseAddCompute; -REGISTER_LITE_KERNEL( - elementwise_add, kARM, kInt64, kNCHW, elementwise_add_int64_t, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - REGISTER_LITE_KERNEL( fusion_elementwise_add_activation, kARM, @@ -439,16 +429,6 @@ REGISTER_LITE_KERNEL( .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) .Finalize(); -using elementwise_sub_int64_t = - paddle::lite::kernels::arm::ElementwiseSubCompute; -REGISTER_LITE_KERNEL( - elementwise_sub, kARM, kInt64, kNCHW, elementwise_sub_int64_t, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - REGISTER_LITE_KERNEL( fusion_elementwise_sub_activation, kARM, @@ -479,16 +459,6 @@ REGISTER_LITE_KERNEL( .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) .Finalize(); -using elementwise_mul_int64_t = - paddle::lite::kernels::arm::ElementwiseMulCompute; -REGISTER_LITE_KERNEL( - elementwise_mul, kARM, kInt64, kNCHW, elementwise_mul_int64_t, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - using fusion_elementwise_mul_activation_float_t = paddle::lite::kernels::arm:: ElementwiseMulActivationCompute; REGISTER_LITE_KERNEL(fusion_elementwise_mul_activation, @@ -558,17 +528,6 @@ REGISTER_LITE_KERNEL( .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) .Finalize(); -using elementwise_div_int64_t = - paddle::lite::kernels::arm::ElementwiseDivCompute; - -REGISTER_LITE_KERNEL( - elementwise_div, kARM, kInt64, kNCHW, elementwise_div_int64_t, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - REGISTER_LITE_KERNEL( fusion_elementwise_div_activation, kARM, -- GitLab