conv_op.h 14.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

H
hedaoyuan 已提交
17
#include "paddle/framework/eigen.h"
18 19 20
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/im2col.h"
#include "paddle/operators/math/math_function.h"
C
chengduoZH 已提交
21
#include "paddle/operators/math/vol2col.h"
22 23 24 25 26 27

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

武毅 已提交
28 29
// Base convolution operator definations for other conv
// like operators to reuse the implementation.
C
chengduoZH 已提交
30
inline int OutputSize(int input_size, int filter_size, int dilation,
C
chengduoZH 已提交
31 32 33
                      int padding, int stride) {
  const int dkernel = dilation * (filter_size - 1) + 1;
  const int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
武毅 已提交
34 35
  return output_size;
}
C
chengduoZH 已提交
36 37 38
inline bool IsExpand(std::vector<int64_t>& filter_dim,
                     std::vector<int>& strides, std::vector<int>& paddings,
                     std::vector<int>& dilations) {
C
chengduoZH 已提交
39 40
  bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true;
  for (size_t j = 0; j < strides.size(); ++j) {
C
chengduoZH 已提交
41
    filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2]) == 1);
C
chengduoZH 已提交
42 43 44
    strides_1 = strides_1 && (strides[j] == 1);
    padding_0 = padding_0 && (paddings[j] == 0);
    dilation_1 = dilation_1 && (dilations[j] == 1);
C
chengduoZH 已提交
45
  }
C
chengduoZH 已提交
46
  return !(filter_1 && strides_1 && padding_0 && dilation_1);
C
chengduoZH 已提交
47
}
武毅 已提交
48 49 50 51 52

// Define Op classes in .h file so that other conv
// operator implementations can reuse the code.
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
53
  Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker);
武毅 已提交
54 55
};

C
chengduoZH 已提交
56 57
class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
58
  Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker);
C
chengduoZH 已提交
59 60 61
};

class ConvOp : public framework::OperatorWithKernel {
武毅 已提交
62 63 64
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
65 66 67 68

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
武毅 已提交
69 70
};

C
chengduoZH 已提交
71
class ConvOpGrad : public framework::OperatorWithKernel {
武毅 已提交
72 73 74
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
75 76 77 78

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
武毅 已提交
79 80
};

Q
QI JUN 已提交
81
template <typename DeviceContext, typename T>
C
chengduoZH 已提交
82
class GemmConvKernel : public framework::OpKernel<T> {
83 84 85
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
H
hedaoyuan 已提交
86 87 88 89
    // The filter will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
90 91 92
    Tensor* output = context.Output<Tensor>("Output");
    output->mutable_data<T>(context.GetPlace());

C
chengduoZH 已提交
93
    int groups = context.Attr<int>("groups");
94 95
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
96
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");
97

C
chengduoZH 已提交
98 99
    const int batch_size = static_cast<int>(input->dims()[0]);

C
chengduoZH 已提交
100
    // filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
C
chengduoZH 已提交
101
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
C
chengduoZH 已提交
102
    // output_shape_vec: {o_n, o_c, o_h, o_w} or {o_n, o_c, o_d, o_h, o_w}
C
chengduoZH 已提交
103
    std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
104

H
hedaoyuan 已提交
105
    // use col_shape in the im2col calculation
C
chengduoZH 已提交
106 107
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
C
chengduoZH 已提交
108 109 110 111 112 113 114
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = input->dims()[1] / groups;
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2];
    }
C
chengduoZH 已提交
115 116
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));

H
hedaoyuan 已提交
117
    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
118 119 120
    // size: (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d *
    // o_h * o_w)
    framework::DDim col_matrix_shape =
C
chengduoZH 已提交
121
        framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
122

C
chengduoZH 已提交
123
    bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations);
H
hedaoyuan 已提交
124
    Tensor col;
H
hedaoyuan 已提交
125 126 127
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
128
    Tensor col_matrix;
C
chengduoZH 已提交
129
    if (is_expand) {
C
chengduoZH 已提交
130 131 132 133
      col.mutable_data<T>(col_shape, context.GetPlace());
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);
    }
134

C
chengduoZH 已提交
135 136 137
    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));

H
hedaoyuan 已提交
138 139
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
H
hedaoyuan 已提交
140 141
    filter.Resize(filter_matrix_shape);

C
chengduoZH 已提交
142 143 144 145 146 147 148 149
    framework::DDim output_matrix_shape = {
        output->dims()[1],
        output->numel() / (output->dims()[0] * output->dims()[1])};

    // convolution operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output->dims()[1]) / groups;

Q
QI JUN 已提交
150 151
    math::Vol2ColFunctor<DeviceContext, T> vol2col;
    math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col;
C
chengduoZH 已提交
152

Q
QI JUN 已提交
153
    auto& dev_ctx = context.template device_context<DeviceContext>();
C
chengduoZH 已提交
154 155 156
    for (int i = 0; i < batch_size; i++) {
      Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
      Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape);
C
chengduoZH 已提交
157

C
chengduoZH 已提交
158 159
      for (int g = 0; g < groups; g++) {
        Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
H
hedaoyuan 已提交
160

C
chengduoZH 已提交
161
        if (!is_expand) {
C
chengduoZH 已提交
162 163 164
          col.ShareDataWith(in_slice);
          col_matrix.ShareDataWith(col);
          col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
165
        } else if (data_dim == 2U) {
C
chengduoZH 已提交
166
          // im2col
Q
QI JUN 已提交
167
          im2col(dev_ctx, in_slice, dilations, strides,
C
chengduoZH 已提交
168 169 170
                 std::vector<int>{paddings[0], paddings[1], paddings[0],
                                  paddings[1]},
                 &col);
C
chengduoZH 已提交
171
        } else if (data_dim == 3U) {
C
chengduoZH 已提交
172
          // vol2col
Q
QI JUN 已提交
173
          vol2col(dev_ctx, in_slice, dilations, strides, paddings, &col);
C
chengduoZH 已提交
174
        }
C
chengduoZH 已提交
175 176 177 178

        // gemm
        Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
        Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
Q
QI JUN 已提交
179 180
        math::matmul<DeviceContext, T>(dev_ctx, filter_slice, false, col_matrix,
                                       false, T(1.0), &out_slice, T(0.0));
H
hedaoyuan 已提交
181
      }
182 183 184 185
    }
  }
};

Q
QI JUN 已提交
186
template <typename DeviceContext, typename T>
C
chengduoZH 已提交
187
class GemmConvGradKernel : public framework::OpKernel<T> {
188 189
 public:
  void Compute(const framework::ExecutionContext& context) const override {
H
hedaoyuan 已提交
190 191 192 193 194
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
H
hedaoyuan 已提交
195
    Tensor* filter_grad =
H
hedaoyuan 已提交
196
        context.Output<Tensor>(framework::GradVarName("Filter"));
H
hedaoyuan 已提交
197 198 199 200
    // The filter and filter_grad will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
H
hedaoyuan 已提交
201

C
chengduoZH 已提交
202 203
    if (!input_grad && !filter_grad) return;

C
chengduoZH 已提交
204
    int groups = context.Attr<int>("groups");
H
hedaoyuan 已提交
205 206
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
207
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");
H
hedaoyuan 已提交
208

C
chengduoZH 已提交
209
    const int batch_size = static_cast<int>(input->dims()[0]);
H
hedaoyuan 已提交
210

C
chengduoZH 已提交
211
    // filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
C
chengduoZH 已提交
212
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
C
chengduoZH 已提交
213
    // output_shape_vec: {o_n, o_c, o_h, o_w} or {o_n, o_c, o_d, o_h, o_w}
C
chengduoZH 已提交
214 215
    std::vector<int64_t> output_shape_vec(
        framework::vectorize(output_grad->dims()));
C
chengduoZH 已提交
216

C
chengduoZH 已提交
217 218 219
    // use col_shape in the im2col calculation
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
C
chengduoZH 已提交
220 221 222 223 224 225 226
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = input->dims()[1] / groups;
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2];
    }
C
chengduoZH 已提交
227
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
228 229

    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
230 231 232 233
    // size: (i_c/g * k_h * k_w, o_h * o_w)
    // or
    // (i_c/g * k_d * k_h * k_w, o_d * o_h * o_w)
    framework::DDim col_matrix_shape =
C
chengduoZH 已提交
234
        framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
235 236 237

    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));
C
chengduoZH 已提交
238

C
chengduoZH 已提交
239 240
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
C
chengduoZH 已提交
241 242 243
    filter.Resize(filter_matrix_shape);

    framework::DDim output_matrix_shape = {
C
chengduoZH 已提交
244 245 246
        output_grad->dims()[1],
        output_grad->numel() /
            (output_grad->dims()[0] * output_grad->dims()[1])};
C
chengduoZH 已提交
247

C
chengduoZH 已提交
248 249 250 251
    // convolution backward input operator:  gemm + col2im(or col2vol)
    // convolution backward weight operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output_grad->dims()[1]) / groups;
C
chengduoZH 已提交
252

C
chengduoZH 已提交
253
    bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations);
C
chengduoZH 已提交
254 255 256 257
    Tensor col;
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
258
    Tensor col_matrix;
C
chengduoZH 已提交
259
    if (is_expand) {
C
chengduoZH 已提交
260 261 262 263
      col.mutable_data<T>(col_shape, context.GetPlace());
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);
    }
C
chengduoZH 已提交
264

Q
QI JUN 已提交
265 266
    math::SetConstant<DeviceContext, T> set_zero;
    auto& dev_ctx = context.template device_context<DeviceContext>();
C
chengduoZH 已提交
267 268 269 270

    if (input_grad) {
      input_grad->mutable_data<T>(context.GetPlace());

C
chengduoZH 已提交
271 272 273
      // if is_expand is false, the operation of set_zero is unnecessary,
      // because math::matmul will reset input_grad.
      if (is_expand) {
C
chengduoZH 已提交
274
        set_zero(dev_ctx, input_grad, static_cast<T>(0));
C
chengduoZH 已提交
275
      }
Q
QI JUN 已提交
276 277
      math::Col2VolFunctor<DeviceContext, T> col2vol;
      math::Col2ImFunctor<math::ColFormat::kCFO, DeviceContext, T> col2im;
C
chengduoZH 已提交
278

C
chengduoZH 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292
      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_grad_batch = input_grad->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
          // gemm
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);

          Tensor in_grad_slice =
              in_grad_batch.Slice(g * in_step, (g + 1) * in_step);

          if (!is_expand) {
C
chengduoZH 已提交
293 294
            col_matrix.ShareDataWith(in_grad_slice);
            col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
295
          }
Q
QI JUN 已提交
296 297 298
          math::matmul<DeviceContext, T>(dev_ctx, filter_slice, true,
                                         out_grad_slice, false, T(1.0),
                                         &col_matrix, T(0.0));
C
chengduoZH 已提交
299

C
chengduoZH 已提交
300
          if (is_expand && data_dim == 2U) {
Q
QI JUN 已提交
301
            col2im(dev_ctx, col, dilations, strides,
C
chengduoZH 已提交
302 303 304
                   std::vector<int>{paddings[0], paddings[1], paddings[0],
                                    paddings[1]},
                   &in_grad_slice);
C
chengduoZH 已提交
305
          } else if (is_expand && data_dim == 3U) {
Q
QI JUN 已提交
306
            col2vol(dev_ctx, col, dilations, strides, paddings, &in_grad_slice);
C
chengduoZH 已提交
307
          }
C
chengduoZH 已提交
308 309 310 311 312 313 314 315
        }
      }
    }

    if (filter_grad) {
      filter_grad->mutable_data<T>(context.GetPlace());
      Tensor filter_grad_ = *filter_grad;
      filter_grad_.Resize(filter_matrix_shape);
Q
QI JUN 已提交
316 317 318
      set_zero(dev_ctx, filter_grad, static_cast<T>(0));
      math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col;
      math::Vol2ColFunctor<DeviceContext, T> vol2col;
C
chengduoZH 已提交
319 320 321 322 323 324 325 326 327
      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
          // im2col
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
328

C
chengduoZH 已提交
329
          if (!is_expand) {
C
chengduoZH 已提交
330 331 332
            col.ShareDataWith(in_slice);
            col_matrix.ShareDataWith(col);
            col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
333
          } else if (data_dim == 2U) {
Q
QI JUN 已提交
334
            im2col(dev_ctx, in_slice, dilations, strides,
C
chengduoZH 已提交
335 336 337
                   std::vector<int>{paddings[0], paddings[1], paddings[0],
                                    paddings[1]},
                   &col);
C
chengduoZH 已提交
338
          } else if (data_dim == 3U) {
Q
QI JUN 已提交
339
            vol2col(dev_ctx, in_slice, dilations, strides, paddings, &col);
C
chengduoZH 已提交
340
          }
C
chengduoZH 已提交
341 342 343 344

          // gemm
          Tensor filter_grad_slice =
              filter_grad_.Slice(g * out_step, (g + 1) * out_step);
Q
QI JUN 已提交
345 346 347
          math::matmul<DeviceContext, T>(dev_ctx, out_grad_slice, false,
                                         col_matrix, true, T(1.0),
                                         &filter_grad_slice, T(1.0));
C
chengduoZH 已提交
348 349 350 351 352
        }
      }
    }
  }
};
353 354
}  // namespace operators
}  // namespace paddle