From cb44f3572c9f57a7016c700d5f39396b8d135933 Mon Sep 17 00:00:00 2001 From: eclipsess Date: Fri, 29 Jun 2018 18:11:30 +0800 Subject: [PATCH] format --- src/common/types.cpp | 3 +-- src/operators/fusion_conv_add_bn_relu_op.h | 16 ++++++------ .../central-arm-func/batchnorm_arm_func.h | 15 ++++++----- .../central-arm-func/conv_add_bn_relu_func.h | 11 ++++---- .../kernel/mali/batchnorm_kernel.cpp | 2 +- src/operators/kernel/mali/conv_kernel.cpp | 2 +- .../kernel/mali/elementwise_add_kernel.cpp | 3 ++- src/operators/math/depthwise_conv_3x3.cpp | 2 +- src/operators/math/depthwiseconv3x3s1p1.cpp | 25 +++++++++++-------- src/operators/op_param.h | 6 ++--- 10 files changed, 45 insertions(+), 40 deletions(-) diff --git a/src/common/types.cpp b/src/common/types.cpp index d1647ab235..1611a919ff 100644 --- a/src/common/types.cpp +++ b/src/common/types.cpp @@ -60,8 +60,7 @@ std::unordered_map< {G_OP_TYPE_TRANSPOSE, {{"X"}, {"Out"}}}, {G_OP_TYPE_BOX_CODER, {{"PriorBox", "PriorBoxVar", "TargetBox"}, {"OutputBox"}}}, - {G_OP_TYPE_FUSION_CONV_ADD_BN_RELU, - {{"Input"}, {"Out"}}}, + {G_OP_TYPE_FUSION_CONV_ADD_BN_RELU, {{"Input"}, {"Out"}}}, {G_OP_TYPE_PRIOR_BOX, {{"Image", "Input"}, {"Boxes", "Variances"}}}, {G_OP_TYPE_MULTICLASS_NMS, {{"BBoxes", "Scores"}, {"Out"}}}, {G_OP_TYPE_FC, {{"X", "Y", "Z"}, {"Out"}}}, diff --git a/src/operators/fusion_conv_add_bn_relu_op.h b/src/operators/fusion_conv_add_bn_relu_op.h index 0091e30f58..0f2eabd7c5 100644 --- a/src/operators/fusion_conv_add_bn_relu_op.h +++ b/src/operators/fusion_conv_add_bn_relu_op.h @@ -42,12 +42,14 @@ class FusionConvAddBNReluMatcher : public framework::FusionOpMatcher { std::vector> *removed_nodes) { vector> origin_descs = node->OpDescs(node_.Depth()); - node->Folder(node_.Depth(), Type(), - { {G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}}, - {G_OP_TYPE_BATCHNORM, {{"Scale", "Scale"}, - {"Mean", "Mean"}, - {"Bias", "Bias"}, - {"Variance", "Variance"}}}}, removed_nodes); + node->Folder(node_.Depth(), Type(), + {{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}}, + {G_OP_TYPE_BATCHNORM, + {{"Scale", "Scale"}, + {"Mean", "Mean"}, + {"Bias", "Bias"}, + {"Variance", "Variance"}}}}, + removed_nodes); } std::string Type() { return G_OP_TYPE_FUSION_CONV_ADD_BN_RELU; } @@ -78,7 +80,7 @@ class FusionConvAddBNReluOp #ifdef PADDLE_MOBILE_CPU //#ifndef FUSION_CONV_ADD_BN_RELU_REGISTER -//static framework::FusionOpRegistrar fusion_conv_add_bn_relu_registrar( +// static framework::FusionOpRegistrar fusion_conv_add_bn_relu_registrar( // new FusionConvAddBNReluMatcher()); //#define FUSION_CONV_ADD_BN_RELU_REGISTER //#endif diff --git a/src/operators/kernel/central-arm-func/batchnorm_arm_func.h b/src/operators/kernel/central-arm-func/batchnorm_arm_func.h index 5813f39471..0445326c04 100644 --- a/src/operators/kernel/central-arm-func/batchnorm_arm_func.h +++ b/src/operators/kernel/central-arm-func/batchnorm_arm_func.h @@ -53,7 +53,7 @@ void BatchnormCompute(const BatchNormParam ¶m) { "C must equal to variance.numel()"); int HXW = H * W; - if (0&&HXW > 32) { + if (0 && HXW > 32) { int NXC = N * C; float *inv_std_ptr = new float[NXC * 4]; float *volatile new_scale_ptr = new float[NXC * 4]; @@ -222,15 +222,14 @@ void BatchnormCompute(const BatchNormParam ¶m) { } } } - } } -// for(int i = 0; i < new_scale.numel(); i++){ -// std::cout << "new_scale " << new_scale_ptr[i] <data(); - for (int c = 0; c < output_matrix_shape[0]; c++){ + for (int c = 0; c < output_matrix_shape[0]; c++) { // int start = c * output_matrix_shape[1]; - for (int j = 0; j < output_matrix_shape[1]; j++){ - // output_ptr[start + j] = output_ptr[start +j]*new_scale_ptr[c]+new_bias_ptr[c]; - // output_ptr[start + j] = output_ptr[start+j]< 0 ? 0 : output_ptr[start +j]; + for (int j = 0; j < output_matrix_shape[1]; j++) { + // output_ptr[start + j] = output_ptr[start + // +j]*new_scale_ptr[c]+new_bias_ptr[c]; output_ptr[start + j] = + // output_ptr[start+j]< 0 ? 0 : output_ptr[start +j]; } } } diff --git a/src/operators/kernel/mali/batchnorm_kernel.cpp b/src/operators/kernel/mali/batchnorm_kernel.cpp index 0447361c0b..ebd94e166e 100644 --- a/src/operators/kernel/mali/batchnorm_kernel.cpp +++ b/src/operators/kernel/mali/batchnorm_kernel.cpp @@ -128,7 +128,7 @@ class AclBatchNormOp : public acl::ACLOperator { }; template <> -bool BatchNormKernel::Init(BatchNormParam *param) const { +bool BatchNormKernel::Init(BatchNormParam* param) const { AclBatchNormOp* acl_op = reinterpret_cast*>(this->GetAclOp()); if (acl_op == nullptr) { diff --git a/src/operators/kernel/mali/conv_kernel.cpp b/src/operators/kernel/mali/conv_kernel.cpp index 687a53fbee..61e1e53dc7 100644 --- a/src/operators/kernel/mali/conv_kernel.cpp +++ b/src/operators/kernel/mali/conv_kernel.cpp @@ -195,7 +195,7 @@ class AclConvOp : public acl::ACLOperator { }; template <> -bool ConvKernel::Init(ConvParam *param) const { +bool ConvKernel::Init(ConvParam* param) const { AclConvOp* acl_op = reinterpret_cast*>(this->GetAclOp()); if (acl_op == nullptr) { diff --git a/src/operators/kernel/mali/elementwise_add_kernel.cpp b/src/operators/kernel/mali/elementwise_add_kernel.cpp index 80d660da35..a5471341ab 100644 --- a/src/operators/kernel/mali/elementwise_add_kernel.cpp +++ b/src/operators/kernel/mali/elementwise_add_kernel.cpp @@ -27,7 +27,8 @@ struct AddFunctor { }; template <> -bool ElementwiseAddKernel::Init(ElementwiseAddParam *param) const { +bool ElementwiseAddKernel::Init( + ElementwiseAddParam *param) const { return true; } diff --git a/src/operators/math/depthwise_conv_3x3.cpp b/src/operators/math/depthwise_conv_3x3.cpp index fd65383b89..1f61eb0dd2 100644 --- a/src/operators/math/depthwise_conv_3x3.cpp +++ b/src/operators/math/depthwise_conv_3x3.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "operators/math/depthwise_conv_3x3.h" -#include #include +#include namespace paddle_mobile { namespace operators { diff --git a/src/operators/math/depthwiseconv3x3s1p1.cpp b/src/operators/math/depthwiseconv3x3s1p1.cpp index ae327c6885..c03d1a0b74 100644 --- a/src/operators/math/depthwiseconv3x3s1p1.cpp +++ b/src/operators/math/depthwiseconv3x3s1p1.cpp @@ -66,10 +66,11 @@ void DepthwiseConv3x3s1p1(const Tensor *input, Tensor filter, Tensor *output, float w21 = filter_data_tmp[7]; float w22 = filter_data_tmp[8]; - output_data[0] =(w11 * input_data[0] + w12 * input_data[1] + w21 * input_data[l] + - w22 * input_data[l + 1] + bias_data[j]) * - newscale_data[j] + - newbias_data[j]; + output_data[0] = + (w11 * input_data[0] + w12 * input_data[1] + w21 * input_data[l] + + w22 * input_data[l + 1] + bias_data[j]) * + newscale_data[j] + + newbias_data[j]; output_data[l - 1] = (w10 * input_data[l - 2] + w11 * input_data[l - 1] + w20 * input_data[2 * l - 2] + w21 * input_data[2 * l - 1] + bias_data[j]) * @@ -88,11 +89,13 @@ void DepthwiseConv3x3s1p1(const Tensor *input, Tensor filter, Tensor *output, w11 * input_data[l * l - 1] + bias_data[j]) * newscale_data[j] + newbias_data[j]; - if(if_relu){ + if (if_relu) { output_data[0] = output_data[0] < 0 ? 0 : output_data[0]; - output_data[l-1] = output_data[l-1] < 0 ? 0 : output_data[l-1]; - output_data[(l-1)*l] = output_data[(l-1)*l] < 0 ? 0 : output_data[(l-1)*l]; - output_data[l * l - 1] = output_data[l * l - 1] < 0 ? 0 : output_data[l * l - 1]; + output_data[l - 1] = output_data[l - 1] < 0 ? 0 : output_data[l - 1]; + output_data[(l - 1) * l] = + output_data[(l - 1) * l] < 0 ? 0 : output_data[(l - 1) * l]; + output_data[l * l - 1] = + output_data[l * l - 1] < 0 ? 0 : output_data[l * l - 1]; } for (int i = 1; i < l - 1; ++i) { output_data[i * l] = @@ -111,9 +114,10 @@ void DepthwiseConv3x3s1p1(const Tensor *input, Tensor filter, Tensor *output, w21 * input_data[i * l + l - 1 + l] + bias_data[j]) * newscale_data[j] + newbias_data[j]; - if(if_relu){ + if (if_relu) { output_data[i * l] = output_data[i * l] < 0 ? 0 : output_data[i * l]; - output_data[i * l + l - 1] = output_data[i * l + l - 1] < 0 ? 0 : output_data[i * l + l - 1]; + output_data[i * l + l - 1] = + output_data[i * l + l - 1] < 0 ? 0 : output_data[i * l + l - 1]; } } @@ -332,7 +336,6 @@ void DepthwiseConv3x3s1p1(const Tensor *input, Tensor filter, Tensor *output, filter_data_tmp += 9; } } - } } // namespace math } // namespace operators diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 7374d65daf..46014b4822 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -823,9 +823,9 @@ class FusionConvAddParam : public OpParam { const int &Groups() const { return groups; } - void Set(Tensor *t) {t_ = t;} + void Set(Tensor *t) { t_ = t; } - const Tensor *Get() const {return t_;} + const Tensor *Get() const { return t_; } protected: Tensor *bias_; @@ -837,7 +837,7 @@ class FusionConvAddParam : public OpParam { vector paddings_; vector dilations_; int groups; - Tensor *t_; + Tensor *t_; }; Print &operator<<(Print &printer, const FusionConvAddParam &conv_param); -- GitLab