conv_op.h 12.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

H
hedaoyuan 已提交
17
#include "paddle/framework/eigen.h"
18 19 20
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/im2col.h"
#include "paddle/operators/math/math_function.h"
C
chengduoZH 已提交
21
#include "paddle/operators/math/vol2col.h"
22 23 24 25 26 27

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

武毅 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
// Base convolution operator definations for other conv
// like operators to reuse the implementation.
inline int OutputSize(int input_size, int filter_size, int padding,
                      int stride) {
  int output_size = (input_size - filter_size + 2 * padding) / stride + 1;
  return output_size;
}

// Define Op classes in .h file so that other conv
// operator implementations can reuse the code.
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv2DOpMaker(framework::OpProto* proto,
                framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
44 45 46 47 48 49 50
class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv3DOpMaker(framework::OpProto* proto,
                framework::OpAttrChecker* op_checker);
};

class ConvOp : public framework::OperatorWithKernel {
武毅 已提交
51 52 53 54 55 56
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override;
};

C
chengduoZH 已提交
57
class ConvOpGrad : public framework::OperatorWithKernel {
武毅 已提交
58 59 60 61 62 63
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override;
};

64
template <typename Place, typename T>
C
chengduoZH 已提交
65
class GemmConvKernel : public framework::OpKernel<T> {
66 67 68
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
H
hedaoyuan 已提交
69 70 71 72
    // The filter will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
73 74 75 76 77
    Tensor* output = context.Output<Tensor>("Output");
    output->mutable_data<T>(context.GetPlace());

    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
H
hedaoyuan 已提交
78
    int groups = context.Attr<int>("groups");
79

C
chengduoZH 已提交
80 81 82 83 84 85 86 87 88 89 90
    const int batch_size = static_cast<int>(input->dims()[0]);

    // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w}
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
    filter_shape_vec.erase(filter_shape_vec.begin(),
                           filter_shape_vec.begin() + 2);

    // output_shape_vec: {o_h, o_w} or {o_d, o_h, o_w}
    std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
    output_shape_vec.erase(output_shape_vec.begin(),
                           output_shape_vec.begin() + 2);
91

H
hedaoyuan 已提交
92
    // use col_shape in the im2col calculation
C
chengduoZH 已提交
93 94 95 96 97 98 99 100 101 102
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
    std::vector<int64_t> col_shape_vec;
    col_shape_vec.push_back(input->dims()[1] / groups);
    col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(),
                         filter_shape_vec.end());
    col_shape_vec.insert(col_shape_vec.end(), output_shape_vec.begin(),
                         output_shape_vec.end());
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));

H
hedaoyuan 已提交
103
    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
104 105 106 107 108
    // size: (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d *
    // o_h * o_w)
    framework::DDim col_matrix_shape =
        framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1);

H
hedaoyuan 已提交
109
    Tensor col;
H
hedaoyuan 已提交
110
    col.mutable_data<T>(col_shape, context.GetPlace());
H
hedaoyuan 已提交
111 112 113
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
114 115
    Tensor col_matrix;
    col_matrix.ShareDataWith(col);
H
hedaoyuan 已提交
116
    col_matrix.Resize(col_matrix_shape);
117

C
chengduoZH 已提交
118 119 120
    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));

H
hedaoyuan 已提交
121 122
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
H
hedaoyuan 已提交
123 124
    filter.Resize(filter_matrix_shape);

C
chengduoZH 已提交
125 126 127 128 129 130 131 132
    framework::DDim output_matrix_shape = {
        output->dims()[1],
        output->numel() / (output->dims()[0] * output->dims()[1])};

    // convolution operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output->dims()[1]) / groups;

133
    for (int i = 0; i < batch_size; i++) {
134 135
      Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
      Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape);
H
hedaoyuan 已提交
136
      for (int g = 0; g < groups; g++) {
137
        Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151

        if (filter_shape_vec.size() == 2) {
          // im2col
          math::Im2ColFunctor<math::ColFormat::kCFO, Place, T> im2col;
          im2col(context.device_context(), in_slice, col, strides[0],
                 strides[1], paddings[0], paddings[0], paddings[1],
                 paddings[1]);
        } else if (filter_shape_vec.size() == 3) {
          // vol2col
          math::Vol2ColFunctor<Place, T> vol2col;
          vol2col(context.device_context(), in_slice, col, strides[0],
                  strides[1], strides[2], paddings[0], paddings[1],
                  paddings[2]);
        }
H
hedaoyuan 已提交
152 153

        // gemm
154 155
        Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
        Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
H
hedaoyuan 已提交
156 157
        math::matmul<Place, T>(context.device_context(), filter_slice, false,
                               col_matrix, false, T(1.0), &out_slice, T(0.0));
H
hedaoyuan 已提交
158
      }
159 160 161 162 163
    }
  }
};

template <typename Place, typename T>
C
chengduoZH 已提交
164
class GemmConvGradKernel : public framework::OpKernel<T> {
165 166
 public:
  void Compute(const framework::ExecutionContext& context) const override {
H
hedaoyuan 已提交
167 168 169 170 171
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
H
hedaoyuan 已提交
172
    Tensor* filter_grad =
H
hedaoyuan 已提交
173
        context.Output<Tensor>(framework::GradVarName("Filter"));
H
hedaoyuan 已提交
174 175 176 177
    // The filter and filter_grad will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
H
hedaoyuan 已提交
178

C
chengduoZH 已提交
179 180
    if (!input_grad && !filter_grad) return;

H
hedaoyuan 已提交
181 182
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
183
    int groups = context.Attr<int>("groups");
H
hedaoyuan 已提交
184

C
chengduoZH 已提交
185
    const int batch_size = static_cast<int>(input->dims()[0]);
H
hedaoyuan 已提交
186

C
chengduoZH 已提交
187 188 189 190
    // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w}
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
    filter_shape_vec.erase(filter_shape_vec.begin(),
                           filter_shape_vec.begin() + 2);
191

C
chengduoZH 已提交
192 193 194 195 196
    // output_shape_vec: {o_h, o_w} or {o_d, o_h, o_w}
    std::vector<int64_t> output_shape_vec(
        framework::vectorize(output_grad->dims()));
    output_shape_vec.erase(output_shape_vec.begin(),
                           output_shape_vec.begin() + 2);
C
chengduoZH 已提交
197

C
chengduoZH 已提交
198 199 200 201 202 203 204 205 206 207
    // use col_shape in the im2col calculation
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
    std::vector<int64_t> col_shape_vec;
    col_shape_vec.push_back(input->dims()[1] / groups);
    col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(),
                         filter_shape_vec.end());
    col_shape_vec.insert(col_shape_vec.end(), output_shape_vec.begin(),
                         output_shape_vec.end());
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
208 209

    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
210 211 212 213 214 215 216 217
    // size: (i_c/g * k_h * k_w, o_h * o_w)
    // or
    // (i_c/g * k_d * k_h * k_w, o_d * o_h * o_w)
    framework::DDim col_matrix_shape =
        framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1);

    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));
C
chengduoZH 已提交
218

C
chengduoZH 已提交
219 220
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
C
chengduoZH 已提交
221 222 223
    filter.Resize(filter_matrix_shape);

    framework::DDim output_matrix_shape = {
C
chengduoZH 已提交
224 225 226
        output_grad->dims()[1],
        output_grad->numel() /
            (output_grad->dims()[0] * output_grad->dims()[1])};
C
chengduoZH 已提交
227

C
chengduoZH 已提交
228 229 230 231
    // convolution backward input operator:  gemm + col2im(or col2vol)
    // convolution backward weight operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output_grad->dims()[1]) / groups;
C
chengduoZH 已提交
232 233 234 235 236

    Tensor col;
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
237 238 239
    Tensor col_matrix;
    col.mutable_data<T>(col_shape, context.GetPlace());
    col_matrix.ShareDataWith(col);
C
chengduoZH 已提交
240 241
    col_matrix.Resize(col_matrix_shape);

C
chengduoZH 已提交
242
    math::SetConstant<Place, T> set_zero;
C
chengduoZH 已提交
243 244 245

    if (input_grad) {
      input_grad->mutable_data<T>(context.GetPlace());
C
chengduoZH 已提交
246
      set_zero(context.device_context(), input_grad, static_cast<T>(0));
C
chengduoZH 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259

      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_grad_batch = input_grad->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
          // gemm
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
          math::matmul<Place, T>(context.device_context(), filter_slice, true,
                                 out_grad_slice, false, T(1.0), &col_matrix,
                                 T(0.0));
C
chengduoZH 已提交
260
          // col2im
C
chengduoZH 已提交
261 262
          Tensor in_grad_slice =
              in_grad_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275

          if (filter_shape_vec.size() == 2) {
            math::Col2ImFunctor<math::ColFormat::kCFO, Place, T> col2im;
            col2im(context.device_context(), in_grad_slice, col, strides[0],
                   strides[1], paddings[0], paddings[0], paddings[1],
                   paddings[1]);

          } else if (filter_shape_vec.size() == 3) {
            math::Col2VolFunctor<Place, T> col2vol;
            col2vol(context.device_context(), in_grad_slice, col, strides[0],
                    strides[1], strides[2], paddings[0], paddings[1],
                    paddings[2]);
          }
C
chengduoZH 已提交
276 277 278 279 280 281 282 283
        }
      }
    }

    if (filter_grad) {
      filter_grad->mutable_data<T>(context.GetPlace());
      Tensor filter_grad_ = *filter_grad;
      filter_grad_.Resize(filter_matrix_shape);
C
chengduoZH 已提交
284
      set_zero(context.device_context(), filter_grad, static_cast<T>(0));
C
chengduoZH 已提交
285 286 287 288 289 290

      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
C
chengduoZH 已提交
291
          // im2col
C
chengduoZH 已提交
292 293 294
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
295 296 297 298 299 300 301 302 303 304 305 306

          if (filter_shape_vec.size() == 2) {
            math::Im2ColFunctor<math::ColFormat::kCFO, Place, T> im2col;
            im2col(context.device_context(), in_slice, col, strides[0],
                   strides[1], paddings[0], paddings[0], paddings[1],
                   paddings[1]);
          } else if (filter_shape_vec.size() == 3) {
            math::Vol2ColFunctor<Place, T> vol2col;
            vol2col(context.device_context(), in_slice, col, strides[0],
                    strides[1], strides[2], paddings[0], paddings[1],
                    paddings[2]);
          }
C
chengduoZH 已提交
307 308 309 310 311 312 313 314 315 316 317 318

          // gemm
          Tensor filter_grad_slice =
              filter_grad_.Slice(g * out_step, (g + 1) * out_step);
          math::matmul<Place, T>(context.device_context(), out_grad_slice,
                                 false, col_matrix, true, T(1.0),
                                 &filter_grad_slice, T(1.0));
        }
      }
    }
  }
};
319 320
}  // namespace operators
}  // namespace paddle