From 644d97f5da20f101136c3adadd3645376a49d6ba Mon Sep 17 00:00:00 2001 From: chenjiaoAngel Date: Fri, 8 May 2020 11:30:56 +0800 Subject: [PATCH] fic format. test=develop --- lite/backends/arm/math/gemm_prepacked_int8.cc | 63 +++++++++++-------- lite/backends/arm/math/gemv_arm_int8.cc | 18 +++--- lite/tests/math/gemm_int8_compute_test.cc | 1 + 3 files changed, 46 insertions(+), 36 deletions(-) diff --git a/lite/backends/arm/math/gemm_prepacked_int8.cc b/lite/backends/arm/math/gemm_prepacked_int8.cc index f45ad27415..d3150ac58d 100644 --- a/lite/backends/arm/math/gemm_prepacked_int8.cc +++ b/lite/backends/arm/math/gemm_prepacked_int8.cc @@ -4252,18 +4252,18 @@ void gemm_prepack_int8(const int8_t* A_packed, } #else gemm_prepack_oth_int8(A_packed, - B, - bias, - C, - M, - N, - K, - is_bias, - flag_act, - is_transB, - scale, - alpha, - ctx); + B, + bias, + C, + M, + N, + K, + is_bias, + flag_act, + is_transB, + scale, + alpha, + ctx); #endif } @@ -4319,22 +4319,33 @@ void gemm_prepack_int8(const int8_t* A_packed, ctx); } else { gemm_prepack_oth_int8(A_packed, - B, - bias, - C, - M, - N, - K, - is_bias, - flag_act, - is_transB, - scale, - alpha, - ctx); + B, + bias, + C, + M, + N, + K, + is_bias, + flag_act, + is_transB, + scale, + alpha, + ctx); } #else - gemm_prepack_oth_int8( - A_packed, B, bias, C, M, N, K, is_bias, flag_act, is_transB, scale, alpha, ctx); + gemm_prepack_oth_int8(A_packed, + B, + bias, + C, + M, + N, + K, + is_bias, + flag_act, + is_transB, + scale, + alpha, + ctx); #endif } diff --git a/lite/backends/arm/math/gemv_arm_int8.cc b/lite/backends/arm/math/gemv_arm_int8.cc index 86f7f47d8d..f9843a787d 100644 --- a/lite/backends/arm/math/gemv_arm_int8.cc +++ b/lite/backends/arm/math/gemv_arm_int8.cc @@ -65,17 +65,15 @@ inline void write_gemv_out(const int* in, vout1 = vmaxq_f32(vout1, vzero); vout0 = vminq_f32(vout0, vsix); vout1 = vminq_f32(vout1, vsix); + } else if (act == lite_api::ActivationType::kLeakyRelu) { + float32x4_t valpha = vdupq_n_f32(alpha); + uint32x4_t maska = vcgeq_f32(vout0, vzero); + uint32x4_t maskb = vcgeq_f32(vout1, vzero); + float32x4_t suma = vmulq_f32(vout0, valpha); + float32x4_t sumb = vmulq_f32(vout1, valpha); + vout0 = vbslq_f32(maska, vout0, suma); + vout1 = vbslq_f32(maskb, vout1, sumb); } - vout0 = vmaxq_f32(vout0, vzero); - vout1 = vmaxq_f32(vout1, vzero); - } else if (act == lite_api::ActivationType::kLeakyRelu) { - float32x4_t valpha = vdupq_n_f32(alpha); - uint32x4_t maska = vcgeq_f32(vout0, vzero); - uint32x4_t maskb = vcgeq_f32(vout1, vzero); - float32x4_t suma = vmulq_f32(vout0, valpha); - float32x4_t sumb = vmulq_f32(vout1, valpha); - vout0 = vbslq_f32(maska, vout0, suma); - vout1 = vbslq_f32(maskb, vout1, sumb); } vst1q_f32(out, vout0); vst1q_f32(out + 4, vout1); diff --git a/lite/tests/math/gemm_int8_compute_test.cc b/lite/tests/math/gemm_int8_compute_test.cc index 5c5a19a95c..ef8e59261b 100644 --- a/lite/tests/math/gemm_int8_compute_test.cc +++ b/lite/tests/math/gemm_int8_compute_test.cc @@ -23,6 +23,7 @@ #include "lite/core/profile/timer.h" #include "lite/core/tensor.h" #include "lite/tests/utils/tensor_utils.h" +#include "lite/operators/op_params.h" typedef paddle::lite::Tensor Tensor; using paddle::lite::profile::Timer; -- GitLab