提交 ba6c936f 编写于 作者: Z zhangwen31

[arm][kernel]fix: remove unused kernel to reduce build size for CI. test=develop

上级 9ee77a23
......@@ -59,15 +59,6 @@ void elementwise_add<int32_t>(const int32_t* dinx,
naive_elementwise_op<int32_t>(dinx, diny, dout, num, naive_add<int32_t>);
}
// todo: use arm intrinsics
template <>
void elementwise_add<int64_t>(const int64_t* dinx,
const int64_t* diny,
int64_t* dout,
int num) {
naive_elementwise_op<int64_t>(dinx, diny, dout, num, naive_add<int64_t>);
}
template <>
void elementwise_add<float>(const float* dinx,
const float* diny,
......@@ -261,18 +252,6 @@ void elementwise_add_broadcast<int32_t>(const int32_t* dinx,
dinx, diny, dout, batch, channels, num, naive_add<int32_t>);
}
// todo: use arm intrinsics
template <>
void elementwise_add_broadcast<int64_t>(const int64_t* dinx,
const int64_t* diny,
int64_t* dout,
int batch,
int channels,
int num) {
naive_elementwise_op_broadcast<int64_t>(
dinx, diny, dout, batch, channels, num, naive_add<int64_t>);
}
template <>
void elementwise_add_broadcast<float>(const float* dinx,
const float* diny,
......@@ -494,15 +473,6 @@ void elementwise_sub<int32_t>(const int32_t* dinx,
naive_elementwise_op<int32_t>(dinx, diny, dout, num, naive_sub<int32_t>);
}
// todo: use arm intrinsics
template <>
void elementwise_sub<int64_t>(const int64_t* dinx,
const int64_t* diny,
int64_t* dout,
int num) {
naive_elementwise_op<int64_t>(dinx, diny, dout, num, naive_sub<int64_t>);
}
template <>
void elementwise_sub<float>(const float* dinx,
const float* diny,
......@@ -615,18 +585,6 @@ void elementwise_sub_broadcast<int32_t>(const int32_t* dinx,
dinx, diny, dout, batch, channels, num, naive_sub<int32_t>);
}
// todo: use arm intrinsics
template <>
void elementwise_sub_broadcast<int64_t>(const int64_t* dinx,
const int64_t* diny,
int64_t* dout,
int batch,
int channels,
int num) {
naive_elementwise_op_broadcast<int64_t>(
dinx, diny, dout, batch, channels, num, naive_sub<int64_t>);
}
template <>
void elementwise_sub_broadcast<float>(const float* dinx,
const float* diny,
......
......@@ -398,16 +398,6 @@ REGISTER_LITE_KERNEL(
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))})
.Finalize();
using elementwise_add_int64_t =
paddle::lite::kernels::arm::ElementwiseAddCompute<int64_t,
PRECISION(kInt64)>;
REGISTER_LITE_KERNEL(
elementwise_add, kARM, kInt64, kNCHW, elementwise_add_int64_t, def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.Finalize();
REGISTER_LITE_KERNEL(
fusion_elementwise_add_activation,
kARM,
......@@ -439,16 +429,6 @@ REGISTER_LITE_KERNEL(
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))})
.Finalize();
using elementwise_sub_int64_t =
paddle::lite::kernels::arm::ElementwiseSubCompute<int64_t,
PRECISION(kInt64)>;
REGISTER_LITE_KERNEL(
elementwise_sub, kARM, kInt64, kNCHW, elementwise_sub_int64_t, def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.Finalize();
REGISTER_LITE_KERNEL(
fusion_elementwise_sub_activation,
kARM,
......@@ -479,16 +459,6 @@ REGISTER_LITE_KERNEL(
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))})
.Finalize();
using elementwise_mul_int64_t =
paddle::lite::kernels::arm::ElementwiseMulCompute<int64_t,
PRECISION(kInt64)>;
REGISTER_LITE_KERNEL(
elementwise_mul, kARM, kInt64, kNCHW, elementwise_mul_int64_t, def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.Finalize();
using fusion_elementwise_mul_activation_float_t = paddle::lite::kernels::arm::
ElementwiseMulActivationCompute<float, PRECISION(kFloat)>;
REGISTER_LITE_KERNEL(fusion_elementwise_mul_activation,
......@@ -558,17 +528,6 @@ REGISTER_LITE_KERNEL(
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))})
.Finalize();
using elementwise_div_int64_t =
paddle::lite::kernels::arm::ElementwiseDivCompute<int64_t,
PRECISION(kInt64)>;
REGISTER_LITE_KERNEL(
elementwise_div, kARM, kInt64, kNCHW, elementwise_div_int64_t, def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))})
.Finalize();
REGISTER_LITE_KERNEL(
fusion_elementwise_div_activation,
kARM,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册