conv_op.h 13.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

H
hedaoyuan 已提交
17
#include "paddle/framework/eigen.h"
18 19 20
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/im2col.h"
#include "paddle/operators/math/math_function.h"
C
chengduoZH 已提交
21
#include "paddle/operators/math/vol2col.h"
22 23 24 25 26 27

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

武毅 已提交
28 29
// Base convolution operator definations for other conv
// like operators to reuse the implementation.
C
chengduoZH 已提交
30
inline int OutputSize(int input_size, int filter_size, int dilation,
C
chengduoZH 已提交
31 32 33
                      int padding, int stride) {
  const int dkernel = dilation * (filter_size - 1) + 1;
  const int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
武毅 已提交
34 35
  return output_size;
}
C
chengduoZH 已提交
36 37 38
inline bool IsExpand(std::vector<int64_t>& filter_dim,
                     std::vector<int>& strides, std::vector<int>& paddings,
                     std::vector<int>& dilations) {
C
chengduoZH 已提交
39 40
  bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true;
  for (size_t j = 0; j < strides.size(); ++j) {
C
chengduoZH 已提交
41
    filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2]) == 1);
C
chengduoZH 已提交
42 43 44
    strides_1 = strides_1 && (strides[j] == 1);
    padding_0 = padding_0 && (paddings[j] == 0);
    dilation_1 = dilation_1 && (dilations[j] == 1);
C
chengduoZH 已提交
45
  }
C
chengduoZH 已提交
46
  return !(filter_1 && strides_1 && padding_0 && dilation_1);
C
chengduoZH 已提交
47
}
武毅 已提交
48 49 50 51 52

// Define Op classes in .h file so that other conv
// operator implementations can reuse the code.
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
53
  Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker);
武毅 已提交
54 55
};

C
chengduoZH 已提交
56 57
class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
58
  Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker);
C
chengduoZH 已提交
59 60 61
};

class ConvOp : public framework::OperatorWithKernel {
武毅 已提交
62 63 64 65 66
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
};

C
chengduoZH 已提交
67
class ConvOpGrad : public framework::OperatorWithKernel {
武毅 已提交
68 69 70 71 72
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
};

Q
QI JUN 已提交
73
template <typename DeviceContext, typename T>
C
chengduoZH 已提交
74
class GemmConvKernel : public framework::OpKernel<T> {
75 76 77
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
H
hedaoyuan 已提交
78 79 80 81
    // The filter will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
82 83 84
    Tensor* output = context.Output<Tensor>("Output");
    output->mutable_data<T>(context.GetPlace());

C
chengduoZH 已提交
85
    int groups = context.Attr<int>("groups");
86 87
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
88
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");
89

C
chengduoZH 已提交
90 91
    const int batch_size = static_cast<int>(input->dims()[0]);

C
chengduoZH 已提交
92
    // filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
C
chengduoZH 已提交
93
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
C
chengduoZH 已提交
94
    // output_shape_vec: {o_n, o_c, o_h, o_w} or {o_n, o_c, o_d, o_h, o_w}
C
chengduoZH 已提交
95
    std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
96

H
hedaoyuan 已提交
97
    // use col_shape in the im2col calculation
C
chengduoZH 已提交
98 99
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
C
chengduoZH 已提交
100 101 102 103 104 105 106
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = input->dims()[1] / groups;
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2];
    }
C
chengduoZH 已提交
107 108
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));

H
hedaoyuan 已提交
109
    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
110 111 112
    // size: (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d *
    // o_h * o_w)
    framework::DDim col_matrix_shape =
C
chengduoZH 已提交
113
        framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
114

C
chengduoZH 已提交
115
    bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations);
H
hedaoyuan 已提交
116
    Tensor col;
H
hedaoyuan 已提交
117 118 119
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
120
    Tensor col_matrix;
C
chengduoZH 已提交
121
    if (is_expand) {
C
chengduoZH 已提交
122 123 124 125
      col.mutable_data<T>(col_shape, context.GetPlace());
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);
    }
126

C
chengduoZH 已提交
127 128 129
    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));

H
hedaoyuan 已提交
130 131
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
H
hedaoyuan 已提交
132 133
    filter.Resize(filter_matrix_shape);

C
chengduoZH 已提交
134 135 136 137 138 139 140 141
    framework::DDim output_matrix_shape = {
        output->dims()[1],
        output->numel() / (output->dims()[0] * output->dims()[1])};

    // convolution operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output->dims()[1]) / groups;

Q
QI JUN 已提交
142 143
    math::Vol2ColFunctor<DeviceContext, T> vol2col;
    math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col;
C
chengduoZH 已提交
144

Q
QI JUN 已提交
145
    auto& dev_ctx = context.template device_context<DeviceContext>();
C
chengduoZH 已提交
146 147 148
    for (int i = 0; i < batch_size; i++) {
      Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
      Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape);
C
chengduoZH 已提交
149

C
chengduoZH 已提交
150 151
      for (int g = 0; g < groups; g++) {
        Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
H
hedaoyuan 已提交
152

C
chengduoZH 已提交
153
        if (!is_expand) {
C
chengduoZH 已提交
154 155 156
          col.ShareDataWith(in_slice);
          col_matrix.ShareDataWith(col);
          col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
157
        } else if (data_dim == 2U) {
C
chengduoZH 已提交
158
          // im2col
Q
QI JUN 已提交
159
          im2col(dev_ctx, in_slice, dilations, strides,
C
chengduoZH 已提交
160 161 162
                 std::vector<int>{paddings[0], paddings[1], paddings[0],
                                  paddings[1]},
                 &col);
C
chengduoZH 已提交
163
        } else if (data_dim == 3U) {
C
chengduoZH 已提交
164
          // vol2col
Q
QI JUN 已提交
165
          vol2col(dev_ctx, in_slice, dilations, strides, paddings, &col);
C
chengduoZH 已提交
166
        }
C
chengduoZH 已提交
167 168 169 170

        // gemm
        Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
        Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
Q
QI JUN 已提交
171 172
        math::matmul<DeviceContext, T>(dev_ctx, filter_slice, false, col_matrix,
                                       false, T(1.0), &out_slice, T(0.0));
H
hedaoyuan 已提交
173
      }
174 175 176 177
    }
  }
};

Q
QI JUN 已提交
178
template <typename DeviceContext, typename T>
C
chengduoZH 已提交
179
class GemmConvGradKernel : public framework::OpKernel<T> {
180 181
 public:
  void Compute(const framework::ExecutionContext& context) const override {
H
hedaoyuan 已提交
182 183 184 185 186
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
H
hedaoyuan 已提交
187
    Tensor* filter_grad =
H
hedaoyuan 已提交
188
        context.Output<Tensor>(framework::GradVarName("Filter"));
H
hedaoyuan 已提交
189 190 191 192
    // The filter and filter_grad will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
H
hedaoyuan 已提交
193

C
chengduoZH 已提交
194 195
    if (!input_grad && !filter_grad) return;

C
chengduoZH 已提交
196
    int groups = context.Attr<int>("groups");
H
hedaoyuan 已提交
197 198
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
199
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");
H
hedaoyuan 已提交
200

C
chengduoZH 已提交
201
    const int batch_size = static_cast<int>(input->dims()[0]);
H
hedaoyuan 已提交
202

C
chengduoZH 已提交
203
    // filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
C
chengduoZH 已提交
204
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
C
chengduoZH 已提交
205
    // output_shape_vec: {o_n, o_c, o_h, o_w} or {o_n, o_c, o_d, o_h, o_w}
C
chengduoZH 已提交
206 207
    std::vector<int64_t> output_shape_vec(
        framework::vectorize(output_grad->dims()));
C
chengduoZH 已提交
208

C
chengduoZH 已提交
209 210 211
    // use col_shape in the im2col calculation
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
C
chengduoZH 已提交
212 213 214 215 216 217 218
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = input->dims()[1] / groups;
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2];
    }
C
chengduoZH 已提交
219
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
220 221

    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
222 223 224 225
    // size: (i_c/g * k_h * k_w, o_h * o_w)
    // or
    // (i_c/g * k_d * k_h * k_w, o_d * o_h * o_w)
    framework::DDim col_matrix_shape =
C
chengduoZH 已提交
226
        framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
227 228 229

    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));
C
chengduoZH 已提交
230

C
chengduoZH 已提交
231 232
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
C
chengduoZH 已提交
233 234 235
    filter.Resize(filter_matrix_shape);

    framework::DDim output_matrix_shape = {
C
chengduoZH 已提交
236 237 238
        output_grad->dims()[1],
        output_grad->numel() /
            (output_grad->dims()[0] * output_grad->dims()[1])};
C
chengduoZH 已提交
239

C
chengduoZH 已提交
240 241 242 243
    // convolution backward input operator:  gemm + col2im(or col2vol)
    // convolution backward weight operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output_grad->dims()[1]) / groups;
C
chengduoZH 已提交
244

C
chengduoZH 已提交
245
    bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations);
C
chengduoZH 已提交
246 247 248 249
    Tensor col;
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
250
    Tensor col_matrix;
C
chengduoZH 已提交
251
    if (is_expand) {
C
chengduoZH 已提交
252 253 254 255
      col.mutable_data<T>(col_shape, context.GetPlace());
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);
    }
C
chengduoZH 已提交
256

Q
QI JUN 已提交
257 258
    math::SetConstant<DeviceContext, T> set_zero;
    auto& dev_ctx = context.template device_context<DeviceContext>();
C
chengduoZH 已提交
259 260 261 262

    if (input_grad) {
      input_grad->mutable_data<T>(context.GetPlace());

C
chengduoZH 已提交
263 264 265
      // if is_expand is false, the operation of set_zero is unnecessary,
      // because math::matmul will reset input_grad.
      if (is_expand) {
C
chengduoZH 已提交
266
        set_zero(dev_ctx, input_grad, static_cast<T>(0));
C
chengduoZH 已提交
267
      }
Q
QI JUN 已提交
268 269
      math::Col2VolFunctor<DeviceContext, T> col2vol;
      math::Col2ImFunctor<math::ColFormat::kCFO, DeviceContext, T> col2im;
C
chengduoZH 已提交
270

C
chengduoZH 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284
      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_grad_batch = input_grad->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
          // gemm
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);

          Tensor in_grad_slice =
              in_grad_batch.Slice(g * in_step, (g + 1) * in_step);

          if (!is_expand) {
C
chengduoZH 已提交
285 286
            col_matrix.ShareDataWith(in_grad_slice);
            col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
287
          }
Q
QI JUN 已提交
288 289 290
          math::matmul<DeviceContext, T>(dev_ctx, filter_slice, true,
                                         out_grad_slice, false, T(1.0),
                                         &col_matrix, T(0.0));
C
chengduoZH 已提交
291

C
chengduoZH 已提交
292
          if (is_expand && data_dim == 2U) {
Q
QI JUN 已提交
293
            col2im(dev_ctx, col, dilations, strides,
C
chengduoZH 已提交
294 295 296
                   std::vector<int>{paddings[0], paddings[1], paddings[0],
                                    paddings[1]},
                   &in_grad_slice);
C
chengduoZH 已提交
297
          } else if (is_expand && data_dim == 3U) {
Q
QI JUN 已提交
298
            col2vol(dev_ctx, col, dilations, strides, paddings, &in_grad_slice);
C
chengduoZH 已提交
299
          }
C
chengduoZH 已提交
300 301 302 303 304 305 306 307
        }
      }
    }

    if (filter_grad) {
      filter_grad->mutable_data<T>(context.GetPlace());
      Tensor filter_grad_ = *filter_grad;
      filter_grad_.Resize(filter_matrix_shape);
Q
QI JUN 已提交
308 309 310
      set_zero(dev_ctx, filter_grad, static_cast<T>(0));
      math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col;
      math::Vol2ColFunctor<DeviceContext, T> vol2col;
C
chengduoZH 已提交
311 312 313 314 315 316 317 318 319
      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
          // im2col
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
320

C
chengduoZH 已提交
321
          if (!is_expand) {
C
chengduoZH 已提交
322 323 324
            col.ShareDataWith(in_slice);
            col_matrix.ShareDataWith(col);
            col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
325
          } else if (data_dim == 2U) {
Q
QI JUN 已提交
326
            im2col(dev_ctx, in_slice, dilations, strides,
C
chengduoZH 已提交
327 328 329
                   std::vector<int>{paddings[0], paddings[1], paddings[0],
                                    paddings[1]},
                   &col);
C
chengduoZH 已提交
330
          } else if (data_dim == 3U) {
Q
QI JUN 已提交
331
            vol2col(dev_ctx, in_slice, dilations, strides, paddings, &col);
C
chengduoZH 已提交
332
          }
C
chengduoZH 已提交
333 334 335 336

          // gemm
          Tensor filter_grad_slice =
              filter_grad_.Slice(g * out_step, (g + 1) * out_step);
Q
QI JUN 已提交
337 338 339
          math::matmul<DeviceContext, T>(dev_ctx, out_grad_slice, false,
                                         col_matrix, true, T(1.0),
                                         &filter_grad_slice, T(1.0));
C
chengduoZH 已提交
340 341 342 343 344
        }
      }
    }
  }
};
345 346
}  // namespace operators
}  // namespace paddle