diff --git a/src/operators/kernel/central-arm-func/conv_add_bn_relu_func.h b/src/operators/kernel/central-arm-func/conv_add_bn_relu_func.h index bf96a2d46fd96516743127b71db57496e35b8a77..a2604ebd49789ba06c85bbbf8ba4aef0d737ac7d 100644 --- a/src/operators/kernel/central-arm-func/conv_add_bn_relu_func.h +++ b/src/operators/kernel/central-arm-func/conv_add_bn_relu_func.h @@ -17,11 +17,10 @@ limitations under the License. */ #pragma once #include "operators/math/depthwise_conv_3x3.h" #include "operators/op_param.h" + namespace paddle_mobile { namespace operators { - -template -void ConvAddBNReluCompute(const FusionConvAddBNReluParam ¶m) { +void ConvAddBNReluBasic(const FusionConvAddBNReluParam ¶m) { const Tensor *input = param.Input(); Tensor filter = *param.Filter(); Tensor bias = *param.Bias(); @@ -30,105 +29,121 @@ void ConvAddBNReluCompute(const FusionConvAddBNReluParam ¶m) { auto new_bias_ptr = new_bias.data(); auto new_scale_ptr = new_scale.data(); int axis = param.Axis(); + Tensor *output = param.Output(); + math::expand_bias(bias, axis, output->dims()); + output->ShareDataWith(bias); int groups = param.Groups(); std::vector strides = param.Strides(); std::vector paddings = param.Paddings(); std::vector dilations = param.Dilations(); - Tensor *output = param.Output(); + + const int batch_size = static_cast(input->dims()[0]); + std::vector filter_shape_vec(framework::vectorize(filter.dims())); - if (filter_shape_vec[2] == 3 && strides[0] == 1 && groups > 1) { - math::DepthwiseConvAddBNRelu3x3s1p1(input, filter, output, &bias, 1, - &new_scale, &new_bias, 1, 1); - } else { - const int batch_size = static_cast(input->dims()[0]); - - math::expand_bias(bias, axis, output->dims()); - output->ShareDataWith(bias); - - std::vector output_shape_vec(framework::vectorize(output->dims())); - size_t data_dim = filter_shape_vec.size() - 2; - std::vector col_shape_vec(1 + 2 * data_dim); - col_shape_vec[0] = input->dims()[1] / groups; - for (size_t j = 0; j < data_dim; ++j) { - col_shape_vec[j + 1] = filter_shape_vec[j + 2]; - col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2]; - } - framework::DDim col_shape(framework::make_ddim(col_shape_vec)); - - framework::DDim col_matrix_shape = - framework::flatten_to_2d(col_shape, data_dim + 1); - - bool is_expand = - math::IsExpand(filter_shape_vec, strides, paddings, dilations); - Tensor col; - Tensor col_matrix; - if (is_expand) { - col.mutable_data(col_shape); - col_matrix.ShareDataWith(col); - col_matrix.Resize(col_matrix_shape); - } + std::vector output_shape_vec(framework::vectorize(output->dims())); + size_t data_dim = filter_shape_vec.size() - 2; + std::vector col_shape_vec(1 + 2 * data_dim); + col_shape_vec[0] = input->dims()[1] / groups; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2]; + } + framework::DDim col_shape(framework::make_ddim(col_shape_vec)); + + framework::DDim col_matrix_shape = + framework::flatten_to_2d(col_shape, data_dim + 1); + + bool is_expand = + math::IsExpand(filter_shape_vec, strides, paddings, dilations); + Tensor col; + Tensor col_matrix; + if (is_expand) { + col.mutable_data(col_shape); + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + } - framework::DDim input_shape = framework::slice_ddim( - input->dims(), 1, static_cast(input->dims().size())); - - framework::DDim filter_matrix_shape = {filter.dims()[0], - filter.numel() / filter.dims()[0]}; - filter.Resize(filter_matrix_shape); - framework::DDim output_matrix_shape = { - output->dims()[1], - output->numel() / (output->dims()[0] * output->dims()[1])}; - - // convolution operator: im2col(or vol2col) + gemm - int in_step = static_cast(input->dims()[1]) / groups; - int out_step = static_cast(output->dims()[1]) / groups; - - math::Vol2ColFunctor vol2col; - math::Im2ColFunctor im2col; - - for (int i = 0; i < batch_size; i++) { - Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); - Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape); - - for (int g = 0; g < groups; g++) { - Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); - - if (!is_expand) { - col.ShareDataWith(in_slice); - col_matrix.ShareDataWith(col); - col_matrix.Resize(col_matrix_shape); - } else if (data_dim == 2U) { - // im2col - im2col(in_slice, dilations, strides, - std::vector{paddings[0], paddings[1], paddings[0], - paddings[1]}, - &col); - } else if (data_dim == 3U) { - // vol2col - vol2col(in_slice, dilations, strides, paddings, &col); - } - - // gemm - Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); - Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); - math::matmul(filter_slice, false, col_matrix, false, - static_cast(1), &out_slice, - static_cast(1), false); + framework::DDim input_shape = framework::slice_ddim( + input->dims(), 1, static_cast(input->dims().size())); + + framework::DDim filter_matrix_shape = {filter.dims()[0], + filter.numel() / filter.dims()[0]}; + filter.Resize(filter_matrix_shape); + framework::DDim output_matrix_shape = { + output->dims()[1], + output->numel() / (output->dims()[0] * output->dims()[1])}; + + // convolution operator: im2col(or vol2col) + gemm + int in_step = static_cast(input->dims()[1]) / groups; + int out_step = static_cast(output->dims()[1]) / groups; + + math::Vol2ColFunctor vol2col; + math::Im2ColFunctor im2col; + + for (int i = 0; i < batch_size; i++) { + Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); + Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape); + + for (int g = 0; g < groups; g++) { + Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); + + if (!is_expand) { + col.ShareDataWith(in_slice); + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + } else if (data_dim == 2U) { + // im2col + im2col(in_slice, dilations, strides, + std::vector{paddings[0], paddings[1], paddings[0], + paddings[1]}, + &col); + } else if (data_dim == 3U) { + // vol2col + vol2col(in_slice, dilations, strides, paddings, &col); } + // gemm + Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); + Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); + math::matmul(filter_slice, false, col_matrix, false, + static_cast(1), &out_slice, + static_cast(1)); } - - auto output_ptr = output->data(); - for (int c = 0; c < output_matrix_shape[0]; c++) { - int start = c * output_matrix_shape[1]; - for (int j = 0; j < output_matrix_shape[1]; j++) { - output_ptr[start + j] = - output_ptr[start + j] * new_scale_ptr[c] + new_bias_ptr[c]; - output_ptr[start + j] = - output_ptr[start + j] < 0 ? 0 : output_ptr[start + j]; - } + } + /// todo : use neon in special case instead of 2for(300ms) + auto output_ptr = output->data(); + for (int c = 0; c < output_matrix_shape[0]; c++) { + int start = c * output_matrix_shape[1]; + for (int j = 0; j < output_matrix_shape[1]; j++) { + output_ptr[start + j] = + output_ptr[start + j] * new_scale_ptr[c] + new_bias_ptr[c]; + output_ptr[start + j] = + output_ptr[start + j] < 0 ? 0 : output_ptr[start + j]; } } } +template +void ConvAddBNReluCompute(const FusionConvAddBNReluParam ¶m) { + Tensor Bias; + Bias.mutable_data({param.Groups()}); + if (param.Groups() == param.Input()->dims()[1] && + param.Input()->dims()[1] == param.Output()->dims()[1] && + param.Filter()->dims()[2] == param.Filter()->dims()[3] && + param.Filter()->dims()[2] == 3 && param.Strides()[0] == 1) { + math::DepthwiseConvAddBNRelu3x3s1p1( + param.Input(), param.Filter(), param.Output(), &Bias, 1, + param.NewScale(), param.NewBias(), 1, 1); + } else if (0 && param.Groups() == param.Input()->dims()[1] && + param.Input()->dims()[1] == param.Output()->dims()[1] && + param.Filter()->dims()[2] == param.Filter()->dims()[3] && + param.Filter()->dims()[2] == 3 && param.Strides()[0] == 2) { + math::DepthwiseConv3x3(param.Input(), param.Strides(), param.Paddings(), + param.Filter(), &Bias, param.Output(), false); + } else { + ConvAddBNReluBasic(param); + } +} + } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/math/depthwise_conv_3x3.cpp b/src/operators/math/depthwise_conv_3x3.cpp index 7d85ed1b6b680a3ce3fa8ce6061fe387fbb2f298..f7900f4696d84b4d52034319dc147010e2e861bc 100644 --- a/src/operators/math/depthwise_conv_3x3.cpp +++ b/src/operators/math/depthwise_conv_3x3.cpp @@ -508,12 +508,13 @@ void DepthwiseConv3x3s1p1(const Tensor *input, const Tensor *filter, } } -void DepthwiseConvAddBNRelu3x3s1p1(const Tensor *input, Tensor filter, +void DepthwiseConvAddBNRelu3x3s1p1(const Tensor *input, const Tensor *filter, Tensor *output, Tensor *bias, bool if_bias, - Tensor *new_scale, Tensor *new_bias, - bool if_bn, bool if_relu) { + const Tensor *new_scale, + const Tensor *new_bias, bool if_bn, + bool if_relu) { const float *input_data = input->data(); - const float *filter_data = filter.data(); + const float *filter_data = filter->data(); float *output_data = output->data(); const float *bias_data = bias->data(); const float *newscale_data = new_scale->data(); diff --git a/src/operators/math/depthwise_conv_3x3.h b/src/operators/math/depthwise_conv_3x3.h index 44299295eebad6a90fd994cf74589c09a3573aee..a0beb479926902a71b7e06128aa8cecdd5443196 100644 --- a/src/operators/math/depthwise_conv_3x3.h +++ b/src/operators/math/depthwise_conv_3x3.h @@ -32,10 +32,11 @@ void DepthwiseConv3x3(const Tensor *input, vector strides, Tensor *output, bool if_bias); void DepthwiseConv3x3s1p1(const Tensor *input, const Tensor *filter, Tensor *output, Tensor *bias, bool if_bias); -void DepthwiseConvAddBNRelu3x3s1p1(const Tensor *input, Tensor filter, +void DepthwiseConvAddBNRelu3x3s1p1(const Tensor *input, const Tensor *filter, Tensor *output, Tensor *bias, bool if_bias, - Tensor *new_scale, Tensor *new_bias, - bool if_bn, bool if_relu); + const Tensor *new_scale, + const Tensor *new_bias, bool if_bn, + bool if_relu); } // namespace math } // namespace operators } // namespace paddle_mobile