conv_op.h 13.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

H
hedaoyuan 已提交
17
#include "paddle/framework/eigen.h"
18 19 20
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/im2col.h"
#include "paddle/operators/math/math_function.h"
C
chengduoZH 已提交
21
#include "paddle/operators/math/vol2col.h"
22 23 24 25 26 27

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

武毅 已提交
28 29
// Base convolution operator definations for other conv
// like operators to reuse the implementation.
C
chengduoZH 已提交
30 31 32 33 34 35
inline int OutputSize(int input_size, int filter_size, int dilation,
                      int padding_up, int padding_down, int stride) {
  int output_size = (input_size + padding_up + padding_down -
                     (dilation * (filter_size - 1) + 1)) /
                        stride +
                    1;
武毅 已提交
36 37 38 39 40 41 42 43 44 45 46
  return output_size;
}

// Define Op classes in .h file so that other conv
// operator implementations can reuse the code.
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv2DOpMaker(framework::OpProto* proto,
                framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
47 48 49 50 51 52 53
class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv3DOpMaker(framework::OpProto* proto,
                framework::OpAttrChecker* op_checker);
};

class ConvOp : public framework::OperatorWithKernel {
武毅 已提交
54 55 56 57 58 59
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override;
};

C
chengduoZH 已提交
60
class ConvOpGrad : public framework::OperatorWithKernel {
武毅 已提交
61 62 63 64 65 66
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override;
};

67
template <typename Place, typename T>
C
chengduoZH 已提交
68
class GemmConvKernel : public framework::OpKernel<T> {
69 70 71
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
H
hedaoyuan 已提交
72 73 74 75
    // The filter will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
76 77 78 79 80
    Tensor* output = context.Output<Tensor>("Output");
    output->mutable_data<T>(context.GetPlace());

    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
H
hedaoyuan 已提交
81
    int groups = context.Attr<int>("groups");
C
chengduoZH 已提交
82
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");
83

C
chengduoZH 已提交
84 85 86 87 88 89 90 91 92 93 94
    const int batch_size = static_cast<int>(input->dims()[0]);

    // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w}
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
    filter_shape_vec.erase(filter_shape_vec.begin(),
                           filter_shape_vec.begin() + 2);

    // output_shape_vec: {o_h, o_w} or {o_d, o_h, o_w}
    std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
    output_shape_vec.erase(output_shape_vec.begin(),
                           output_shape_vec.begin() + 2);
95

H
hedaoyuan 已提交
96
    // use col_shape in the im2col calculation
C
chengduoZH 已提交
97 98 99 100 101 102 103 104 105 106
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
    std::vector<int64_t> col_shape_vec;
    col_shape_vec.push_back(input->dims()[1] / groups);
    col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(),
                         filter_shape_vec.end());
    col_shape_vec.insert(col_shape_vec.end(), output_shape_vec.begin(),
                         output_shape_vec.end());
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));

H
hedaoyuan 已提交
107
    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
108 109 110 111 112
    // size: (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d *
    // o_h * o_w)
    framework::DDim col_matrix_shape =
        framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1);

H
hedaoyuan 已提交
113
    Tensor col;
H
hedaoyuan 已提交
114
    col.mutable_data<T>(col_shape, context.GetPlace());
H
hedaoyuan 已提交
115 116 117
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
118 119
    Tensor col_matrix;
    col_matrix.ShareDataWith(col);
H
hedaoyuan 已提交
120
    col_matrix.Resize(col_matrix_shape);
121

C
chengduoZH 已提交
122 123 124
    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));

H
hedaoyuan 已提交
125 126
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
H
hedaoyuan 已提交
127 128
    filter.Resize(filter_matrix_shape);

C
chengduoZH 已提交
129 130 131 132 133 134 135 136
    framework::DDim output_matrix_shape = {
        output->dims()[1],
        output->numel() / (output->dims()[0] * output->dims()[1])};

    // convolution operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output->dims()[1]) / groups;

137
    for (int i = 0; i < batch_size; i++) {
138 139
      Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
      Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape);
H
hedaoyuan 已提交
140
      for (int g = 0; g < groups; g++) {
141
        Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
142 143 144 145

        if (filter_shape_vec.size() == 2) {
          // im2col
          math::Im2ColFunctor<math::ColFormat::kCFO, Place, T> im2col;
C
chengduoZH 已提交
146 147 148
          im2col(context.device_context(), in_slice, col, dilations[0],
                 dilations[1], strides[0], strides[1], paddings[0], paddings[0],
                 paddings[1], paddings[1]);
C
chengduoZH 已提交
149 150 151 152 153 154 155
        } else if (filter_shape_vec.size() == 3) {
          // vol2col
          math::Vol2ColFunctor<Place, T> vol2col;
          vol2col(context.device_context(), in_slice, col, strides[0],
                  strides[1], strides[2], paddings[0], paddings[1],
                  paddings[2]);
        }
H
hedaoyuan 已提交
156 157

        // gemm
158 159
        Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
        Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
H
hedaoyuan 已提交
160 161
        math::matmul<Place, T>(context.device_context(), filter_slice, false,
                               col_matrix, false, T(1.0), &out_slice, T(0.0));
H
hedaoyuan 已提交
162
      }
163 164 165 166 167
    }
  }
};

template <typename Place, typename T>
C
chengduoZH 已提交
168
class GemmConvGradKernel : public framework::OpKernel<T> {
169 170
 public:
  void Compute(const framework::ExecutionContext& context) const override {
H
hedaoyuan 已提交
171 172 173 174 175
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
H
hedaoyuan 已提交
176
    Tensor* filter_grad =
H
hedaoyuan 已提交
177
        context.Output<Tensor>(framework::GradVarName("Filter"));
H
hedaoyuan 已提交
178 179 180 181
    // The filter and filter_grad will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
H
hedaoyuan 已提交
182

C
chengduoZH 已提交
183 184
    if (!input_grad && !filter_grad) return;

H
hedaoyuan 已提交
185 186
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
187
    int groups = context.Attr<int>("groups");
C
chengduoZH 已提交
188
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");
H
hedaoyuan 已提交
189

C
chengduoZH 已提交
190
    const int batch_size = static_cast<int>(input->dims()[0]);
H
hedaoyuan 已提交
191

C
chengduoZH 已提交
192 193 194 195
    // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w}
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
    filter_shape_vec.erase(filter_shape_vec.begin(),
                           filter_shape_vec.begin() + 2);
196

C
chengduoZH 已提交
197 198 199 200 201
    // output_shape_vec: {o_h, o_w} or {o_d, o_h, o_w}
    std::vector<int64_t> output_shape_vec(
        framework::vectorize(output_grad->dims()));
    output_shape_vec.erase(output_shape_vec.begin(),
                           output_shape_vec.begin() + 2);
C
chengduoZH 已提交
202

C
chengduoZH 已提交
203 204 205 206 207 208 209 210 211 212
    // use col_shape in the im2col calculation
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
    std::vector<int64_t> col_shape_vec;
    col_shape_vec.push_back(input->dims()[1] / groups);
    col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(),
                         filter_shape_vec.end());
    col_shape_vec.insert(col_shape_vec.end(), output_shape_vec.begin(),
                         output_shape_vec.end());
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
213 214

    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
215 216 217 218 219 220 221 222
    // size: (i_c/g * k_h * k_w, o_h * o_w)
    // or
    // (i_c/g * k_d * k_h * k_w, o_d * o_h * o_w)
    framework::DDim col_matrix_shape =
        framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1);

    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));
C
chengduoZH 已提交
223

C
chengduoZH 已提交
224 225
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
C
chengduoZH 已提交
226 227 228
    filter.Resize(filter_matrix_shape);

    framework::DDim output_matrix_shape = {
C
chengduoZH 已提交
229 230 231
        output_grad->dims()[1],
        output_grad->numel() /
            (output_grad->dims()[0] * output_grad->dims()[1])};
C
chengduoZH 已提交
232

C
chengduoZH 已提交
233 234 235 236
    // convolution backward input operator:  gemm + col2im(or col2vol)
    // convolution backward weight operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output_grad->dims()[1]) / groups;
C
chengduoZH 已提交
237 238 239 240 241

    Tensor col;
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
242 243 244
    Tensor col_matrix;
    col.mutable_data<T>(col_shape, context.GetPlace());
    col_matrix.ShareDataWith(col);
C
chengduoZH 已提交
245 246
    col_matrix.Resize(col_matrix_shape);

C
chengduoZH 已提交
247
    math::SetConstant<Place, T> set_zero;
C
chengduoZH 已提交
248 249 250

    if (input_grad) {
      input_grad->mutable_data<T>(context.GetPlace());
C
chengduoZH 已提交
251
      set_zero(context.device_context(), input_grad, static_cast<T>(0));
C
chengduoZH 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264

      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_grad_batch = input_grad->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
          // gemm
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
          math::matmul<Place, T>(context.device_context(), filter_slice, true,
                                 out_grad_slice, false, T(1.0), &col_matrix,
                                 T(0.0));
C
chengduoZH 已提交
265
          // col2im
C
chengduoZH 已提交
266 267
          Tensor in_grad_slice =
              in_grad_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
268 269 270

          if (filter_shape_vec.size() == 2) {
            math::Col2ImFunctor<math::ColFormat::kCFO, Place, T> col2im;
C
chengduoZH 已提交
271 272 273
            col2im(context.device_context(), in_grad_slice, col, dilations[0],
                   dilations[1], strides[0], strides[1], paddings[0],
                   paddings[0], paddings[1], paddings[1]);
C
chengduoZH 已提交
274 275 276 277 278 279 280

          } else if (filter_shape_vec.size() == 3) {
            math::Col2VolFunctor<Place, T> col2vol;
            col2vol(context.device_context(), in_grad_slice, col, strides[0],
                    strides[1], strides[2], paddings[0], paddings[1],
                    paddings[2]);
          }
C
chengduoZH 已提交
281 282 283 284 285 286 287 288
        }
      }
    }

    if (filter_grad) {
      filter_grad->mutable_data<T>(context.GetPlace());
      Tensor filter_grad_ = *filter_grad;
      filter_grad_.Resize(filter_matrix_shape);
C
chengduoZH 已提交
289
      set_zero(context.device_context(), filter_grad, static_cast<T>(0));
C
chengduoZH 已提交
290 291 292 293 294 295

      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
C
chengduoZH 已提交
296
          // im2col
C
chengduoZH 已提交
297 298 299
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
300 301 302

          if (filter_shape_vec.size() == 2) {
            math::Im2ColFunctor<math::ColFormat::kCFO, Place, T> im2col;
C
chengduoZH 已提交
303 304 305
            im2col(context.device_context(), in_slice, col, dilations[0],
                   dilations[1], strides[0], strides[1], paddings[0],
                   paddings[0], paddings[1], paddings[1]);
C
chengduoZH 已提交
306 307 308 309 310 311
          } else if (filter_shape_vec.size() == 3) {
            math::Vol2ColFunctor<Place, T> vol2col;
            vol2col(context.device_context(), in_slice, col, strides[0],
                    strides[1], strides[2], paddings[0], paddings[1],
                    paddings[2]);
          }
C
chengduoZH 已提交
312 313 314 315 316 317 318 319 320 321 322 323

          // gemm
          Tensor filter_grad_slice =
              filter_grad_.Slice(g * out_step, (g + 1) * out_step);
          math::matmul<Place, T>(context.device_context(), out_grad_slice,
                                 false, col_matrix, true, T(1.0),
                                 &filter_grad_slice, T(1.0));
        }
      }
    }
  }
};
324 325
}  // namespace operators
}  // namespace paddle