conv_arm_func.h 7.0 KB
Newer Older
L
liuruilong 已提交
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
L
liuruilong 已提交
2

L
liuruilong 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
L
liuruilong 已提交
6

L
liuruilong 已提交
7 8 9 10 11 12 13 14 15 16 17
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef CONV_OP

#pragma once
18
#include <vector>
19
#include "operators/kernel/central-arm-func/conv_arm_int8.h"
20 21 22 23
#include "operators/math/conv_func.h"
#include "operators/math/depthwise_conv_3x3.h"
#include "operators/math/im2col.h"
#include "operators/math/math_function.h"
24
#include "operators/math/pad.h"
25
#include "operators/math/vol2col.h"
L
liuruilong 已提交
26 27 28 29
#include "operators/op_param.h"

namespace paddle_mobile {
namespace operators {
30

N
nhzlx 已提交
31
inline void ConvBasic(const ConvParam<CPU> &param) {
L
liuruilong 已提交
32 33 34
  const Tensor *input = param.Input();
  Tensor filter = *param.Filter();
  Tensor *output = param.Output();
35
  output->mutable_data<float>();
L
liuruilong 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
  int groups = param.Groups();
  std::vector<int> strides = param.Strides();
  std::vector<int> paddings = param.Paddings();
  std::vector<int> dilations = param.Dilations();

  const int batch_size = static_cast<int>(input->dims()[0]);

  std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));

  std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
  size_t data_dim = filter_shape_vec.size() - 2;
  std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
  col_shape_vec[0] = input->dims()[1] / groups;
  for (size_t j = 0; j < data_dim; ++j) {
    col_shape_vec[j + 1] = filter_shape_vec[j + 2];
    col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2];
  }
  framework::DDim col_shape(framework::make_ddim(col_shape_vec));

  framework::DDim col_matrix_shape =
L
liuruilong 已提交
56
      framework::flatten_to_2d(col_shape, data_dim + 1);
L
liuruilong 已提交
57

58 59
  bool is_expand =
      math::IsExpand(filter_shape_vec, strides, paddings, dilations);
L
liuruilong 已提交
60 61 62 63 64 65 66 67 68
  Tensor col;
  Tensor col_matrix;
  if (is_expand) {
    col.mutable_data<float>(col_shape);
    col_matrix.ShareDataWith(col);
    col_matrix.Resize(col_matrix_shape);
  }

  framework::DDim input_shape = framework::slice_ddim(
L
liuruilong 已提交
69
      input->dims(), 1, static_cast<int>(input->dims().size()));
L
liuruilong 已提交
70 71 72 73 74

  framework::DDim filter_matrix_shape = {filter.dims()[0],
                                         filter.numel() / filter.dims()[0]};
  filter.Resize(filter_matrix_shape);
  framework::DDim output_matrix_shape = {
L
liuruilong 已提交
75 76
      output->dims()[1],
      output->numel() / (output->dims()[0] * output->dims()[1])};
L
liuruilong 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105

  // convolution operator: im2col(or vol2col) + gemm
  int in_step = static_cast<int>(input->dims()[1]) / groups;
  int out_step = static_cast<int>(output->dims()[1]) / groups;

  math::Vol2ColFunctor<CPU, float> vol2col;
  math::Im2ColFunctor<math::ColFormat::kCFO, CPU, float> im2col;

  for (int i = 0; i < batch_size; i++) {
    Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
    Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape);

    for (int g = 0; g < groups; g++) {
      Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);

      if (!is_expand) {
        col.ShareDataWith(in_slice);
        col_matrix.ShareDataWith(col);
        col_matrix.Resize(col_matrix_shape);
      } else if (data_dim == 2U) {
        // im2col
        im2col(in_slice, dilations, strides,
               std::vector<int>{paddings[0], paddings[1], paddings[0],
                                paddings[1]},
               &col);
      } else if (data_dim == 3U) {
        // vol2col
        vol2col(in_slice, dilations, strides, paddings, &col);
      }
L
liuruilong 已提交
106

L
liuruilong 已提交
107 108 109 110 111
      // gemm
      Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
      Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
      math::matmul<float>(filter_slice, false, col_matrix, false,
                          static_cast<float>(1), &out_slice,
Z
zhaojiaying01 已提交
112
                          static_cast<float>(0));
L
liuruilong 已提交
113 114 115 116
    }
  }
}

H
hjchen2 已提交
117
inline void ConvCompute_int8(const ConvParam<CPU> &param) {
118 119 120 121
  typedef void (*ConvFunc)(const Tensor &input, const Tensor &kernel,
                           Tensor *output);
  static ConvFunc conv_funcs_table[7][5] = {
      {0, 0, 0, 0, 0},                                // k = 1
H
hjchen2 已提交
122
      {0, 0, 0, 0, 0}, {conv3x3s1_int8, 0, 0, 0, 0},  // k = 3
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
      {0, 0, 0, 0, 0}, {conv5x5s1_int8, 0, 0, 0, 0},  // k = 5
      {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0},               // k = 7
  };
  const Tensor *input = param.Input();
  Tensor *filter = param.Filter();
  Tensor *output = param.Output();
  output->mutable_data<int32_t>();
  int groups = param.Groups();
  std::vector<int> strides = param.Strides();
  std::vector<int> paddings = param.Paddings();
  std::vector<int> dilations = param.Dilations();
  int kernel_h = filter->dims()[2];
  int kernel_w = filter->dims()[3];
  const int batch_size = static_cast<int>(input->dims()[0]);
  math::PadFunctor<CPU, int8_t> pad;

  Tensor input_pad;
  for (int i = 0; i < batch_size; ++i) {
    Tensor in_batch = input->Slice(i, i + 1);
    Tensor out_batch = output->Slice(i, i + 1);
    if (paddings[0] == 0 && paddings[1] == 0) {
      input_pad = in_batch;
    } else {
      framework::DDim pad_shape = in_batch.dims();
      pad_shape[2] += 2 * paddings[0];
      pad_shape[3] += 2 * paddings[1];
      input_pad.mutable_data<int8_t>(pad_shape);
      pad(in_batch, paddings[0], paddings[1], &input_pad);
    }
H
hjchen2 已提交
152

153
    if (strides[1] == strides[0] && strides[1] < 6 && kernel_h == kernel_w &&
H
hjchen2 已提交
154 155 156 157
        kernel_h < 8 && groups == 1 && dilations[0] == dilations[1] &&
        dilations[1] == 1) {
      ConvFunc conv_func = conv_funcs_table[kernel_h - 1][strides[0] - 1];
      if (conv_func) {
158 159 160 161 162 163 164 165 166 167
        conv_func(input_pad, *filter, &out_batch);
      } else {
        // TODO(hjchen2)
      }
    } else {
      // TODO(hjchen2)
    }
  }
}

E
eclipsess 已提交
168
template <typename P>
N
nhzlx 已提交
169
void ConvCompute(const ConvParam<CPU> &param) {
H
hjchen2 已提交
170 171
  if (param.Input()->type() == typeid(int8_t)) {
    ConvCompute_int8(param);
E
eclipsess 已提交
172
  } else {
H
hjchen2 已提交
173 174 175 176 177 178 179 180 181 182 183 184
    if (param.Groups() == param.Input()->dims()[1] &&
        param.Input()->dims()[1] == param.Output()->dims()[1] &&
        param.Filter()->dims()[2] == param.Filter()->dims()[3] &&
        param.Filter()->dims()[2] == 3 && param.Strides()[0] == 1) {
      math::DepthwiseConv3x3s1p1(param.Input(), param.Filter(), param.Output(),
                                 nullptr, false);
    } else if (param.Groups() == param.Input()->dims()[1] &&
               param.Input()->dims()[1] == param.Output()->dims()[1] &&
               param.Filter()->dims()[2] == param.Filter()->dims()[3] &&
               param.Filter()->dims()[2] == 3) {
      math::DepthwiseConv3x3(param.Input(), param.Strides(), param.Paddings(),
                             param.Filter(), nullptr, param.Output(), false);
185 186 187
    } else {
      ConvBasic(param);
    }
E
eclipsess 已提交
188 189 190
  }
}

L
liuruilong 已提交
191 192
}  // namespace operators
}  // namespace paddle_mobile
L
liuruilong 已提交
193 194

#endif