conv_transpose_op.h 12.2 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
C
chengduoZH 已提交
19
#include "paddle/operators/math/im2col.h"
C
chengduoZH 已提交
20 21 22 23 24 25 26 27 28 29 30
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/vol2col.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using DDim = framework::DDim;

// Define Op classes in .h file so that other conv transpose
// operator implementations can reuse the code.
C
chengduoZH 已提交
31 32 33 34 35 36
class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv2DTransposeOpMaker(framework::OpProto* proto,
                         framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
37 38 39 40 41 42
class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv3DTransposeOpMaker(framework::OpProto* proto,
                         framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
43
class ConvTransposeOp : public framework::OperatorWithKernel {
C
chengduoZH 已提交
44 45 46 47 48 49 50
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override;
};

C
chengduoZH 已提交
51
class ConvTransposeOpGrad : public framework::OperatorWithKernel {
C
chengduoZH 已提交
52 53 54 55 56 57 58 59
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override;
};

template <typename Place, typename T>
60
class GemmConvTransposeKernel : public framework::OpKernel<T> {
C
chengduoZH 已提交
61 62 63 64 65 66 67 68
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    // The filter will be reshaped, so it should not be constant pointer
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* output = context.Output<Tensor>("Output");

    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
C
chengduoZH 已提交
69 70 71
    // TODO(Zhuoyuan): Paddings can be added in future.
    // groups will alway be disabled in conv2dtranspose.

C
chengduoZH 已提交
72
    int dilaiton_d = 1;
C
chengduoZH 已提交
73 74 75
    int dilation_h = 1;
    int dilation_w = 1;

C
chengduoZH 已提交
76
    const int batch_size = static_cast<int>(input->dims()[0]);
C
chengduoZH 已提交
77

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
    // input_shape_vec: {h, w} or {d, h, w}
    std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
    input_shape_vec.erase(input_shape_vec.begin(), input_shape_vec.begin() + 2);

    // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w}
    std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());
    filter_shape_vec.erase(filter_shape_vec.begin(),
                           filter_shape_vec.begin() + 2);

    // use col_shape in the im2col and col2im (or vol2col and col2vol)
    // calculation
    // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w}
    std::vector<int64_t> col_shape_vec;
    col_shape_vec.push_back(output->dims()[1]);
    col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(),
                         filter_shape_vec.end());
    col_shape_vec.insert(col_shape_vec.end(), input_shape_vec.begin(),
                         input_shape_vec.end());
    DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
97 98

    // use col_matrix_shape in the gemm calculation
99 100 101
    // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
    DDim col_matrix_shape =
        framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1);
C
chengduoZH 已提交
102 103 104 105 106 107 108 109 110 111

    Tensor col;
    col.mutable_data<T>(col_shape, context.GetPlace());
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
    Tensor col_matrix;
    col_matrix.ShareDataWith(col);
    col_matrix.Resize(col_matrix_shape);

112 113 114
    // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
    DDim output_shape =
        framework::slice_ddim(output->dims(), 1, output->dims().size());
C
chengduoZH 已提交
115

116 117 118 119 120
    // input matrix size: (m, h * w) or (m, d * h * w)
    DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};

    // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w)
    DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
C
chengduoZH 已提交
121 122 123
    filter.Resize(filter_matrix_shape);

    output->mutable_data<T>(context.GetPlace());
C
chengduoZH 已提交
124 125
    math::SetConstant<Place, T> set_zero;
    set_zero(context.device_context(), output, static_cast<T>(0));
C
chengduoZH 已提交
126

127 128
    // convolution transpose: gemm + col2im or col2vol (similar to conv-backward
    // on input)
C
chengduoZH 已提交
129
    for (int i = 0; i < batch_size; i++) {
130
      // batch with size (m, h * w) or (m, d * h * w)
C
chengduoZH 已提交
131 132
      Tensor input_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);

133
      // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
C
chengduoZH 已提交
134 135 136
      Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape);

      // col_matrix = filter * input_batch
137
      // of shape (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
138
      math::matmul<Place, T>(context.device_context(), filter, true,
C
chengduoZH 已提交
139 140 141
                             input_batch, false, static_cast<T>(1.0),
                             &col_matrix, static_cast<T>(0.0));

142 143 144 145 146
      if (filter_shape_vec.size() == 2) {
        // col2im: col_matrix -> dy
        // from (c * k_h * k_w, h * w) to (c, o_h, o_w)
        math::Col2ImFunctor<math::ColFormat::kCFO, Place, T> col2im;

C
chengduoZH 已提交
147 148
        col2im(context.device_context(), output_batch, col, dilation_h,
               dilation_w, strides[0], strides[1], 0, 0, 0, 0);
149 150 151 152
      } else if (filter_shape_vec.size() == 3) {
        // col2vol: col_matrix -> dy
        // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w)
        math::Col2VolFunctor<Place, T> col2vol;
C
chengduoZH 已提交
153 154 155
        col2vol(context.device_context(), output_batch, col, dilaiton_d,
                dilation_h, dilation_w, strides[0], strides[1], strides[2], 0,
                0, 0);
156
      }
C
chengduoZH 已提交
157 158 159 160 161
    }
  }
};

template <typename Place, typename T>
162
class GemmConvTransposeGradKernel : public framework::OpKernel<T> {
C
chengduoZH 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    // For filter, we do not use const pointer b/c we will do reshape,
    // but we should avoid modifying its value.
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
    Tensor* filter_grad =
        context.Output<Tensor>(framework::GradVarName("Filter"));

176 177
    if ((!input_grad) && (!filter_grad)) return;

C
chengduoZH 已提交
178 179 180 181
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    // Actually, no paddings and groups allowed in conv transpose.
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");

C
chengduoZH 已提交
182
    int dilaiton_d = 1;
C
chengduoZH 已提交
183 184 185
    int dilation_h = 1;
    int dilation_w = 1;

C
chengduoZH 已提交
186
    const int batch_size = static_cast<int>(input->dims()[0]);
C
chengduoZH 已提交
187

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
    // input_shape_vec: {h, w} or {d, h, w}
    std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
    input_shape_vec.erase(input_shape_vec.begin(), input_shape_vec.begin() + 2);

    // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w}
    std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());
    filter_shape_vec.erase(filter_shape_vec.begin(),
                           filter_shape_vec.begin() + 2);

    // use col_shape in the im2col and col2im (or vol2col and col2vol)
    // calculation
    // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w}
    std::vector<int64_t> col_shape_vec;
    col_shape_vec.push_back(output_grad->dims()[1]);
    col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(),
                         filter_shape_vec.end());
    col_shape_vec.insert(col_shape_vec.end(), input_shape_vec.begin(),
                         input_shape_vec.end());
    DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
207

208 209 210 211
    // use col_matrix_shape in the gemm calculation
    // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
    DDim col_matrix_shape =
        framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1);
C
chengduoZH 已提交
212

213 214 215
    // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
    DDim output_shape = framework::slice_ddim(output_grad->dims(), 1,
                                              output_grad->dims().size());
C
chengduoZH 已提交
216

217 218
    // input matrix size: (m, h * w) or (m, d * h * w)
    DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};
C
chengduoZH 已提交
219

220 221
    // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w)
    DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
C
chengduoZH 已提交
222 223 224 225 226
    filter.Resize(filter_matrix_shape);

    // convolution transpose grad on input:
    // im2col + gemm (similar to conv-forward)
    // input need to compute gradient
C
chengduoZH 已提交
227 228 229 230 231 232
    if (input_grad || filter_grad) {
      Tensor col;
      col.mutable_data<T>(col_shape, context.GetPlace());
      // col_matrix shares the same piece of data with col,
      // but will be reshaped into a two-dimensional matrix shape
      // to call the matrix multiplication interface.
C
chengduoZH 已提交
233 234 235 236
      Tensor col_matrix;
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);

C
chengduoZH 已提交
237 238
      Tensor filter_grad_;
      math::SetConstant<Place, T> set_zero;
C
chengduoZH 已提交
239

C
chengduoZH 已提交
240 241 242 243 244 245 246 247 248
      if (input_grad) {
        input_grad->mutable_data<T>(context.GetPlace());
        set_zero(context.device_context(), input_grad, static_cast<T>(0));
      }
      if (filter_grad) {  // filter size (m, c, k_h, k_w)
        filter_grad->mutable_data<T>(context.GetPlace());
        set_zero(context.device_context(), filter_grad, static_cast<T>(0));
        filter_grad_ = *filter_grad;
        filter_grad_.Resize(filter_matrix_shape);
C
chengduoZH 已提交
249 250
      }

C
chengduoZH 已提交
251 252
      for (int i = 0; i < batch_size; i++) {
        // batch with size (c, o_h * o_w)
C
chengduoZH 已提交
253 254 255
        Tensor output_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_shape);

256 257 258 259
        if (filter_shape_vec.size() == 2) {
          // im2col: dy -> col matrix
          // from (c, o_h, o_w) to (c * k_h * k_w, h * w)
          math::Im2ColFunctor<math::ColFormat::kCFO, Place, T> im2col;
C
chengduoZH 已提交
260 261 262
          im2col(context.device_context(), output_grad_batch, col, dilation_h,
                 dilation_w, strides[0], strides[1], paddings[0], paddings[0],
                 paddings[1], paddings[1]);
263 264 265 266
        } else if (filter_shape_vec.size() == 3) {
          // vol2col: dy -> col_matrix
          // from (c, o_d, o_h, o_w) to (c * k_d * k_h * k_w, d * h * w)
          math::Vol2ColFunctor<Place, T> vol2col;
C
chengduoZH 已提交
267 268 269
          vol2col(context.device_context(), output_grad_batch, col, dilaiton_d,
                  dilation_h, dilation_w, strides[0], strides[1], strides[2],
                  paddings[0], paddings[1], paddings[2]);
270
        }
C
chengduoZH 已提交
271

C
chengduoZH 已提交
272 273 274 275 276 277
        if (input_grad) {
          // batch with size (m, h, w)
          Tensor input_grad_batch =
              input_grad->Slice(i, i + 1).Resize(input_matrix_shape);
          // gemm: dx = filter * dy
          // (m, c * k_h * k_w) * (c * k_h * k_w, h * w) -> (m, h * w)
278
          // or
C
chengduoZH 已提交
279 280 281 282 283 284 285 286 287 288
          // (m, c * k_d * k_h * k_w) * (c * k_d * k_h * k_w, d * h * w) -> (m,
          // d, h, w)
          math::matmul<Place, T>(context.device_context(), filter, false,
                                 col_matrix, false, static_cast<T>(1.0),
                                 &input_grad_batch, static_cast<T>(0.0));
        }
        if (filter_grad) {
          // input batch
          Tensor in_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);
          // gemm: d_filter = x * dy^T
289 290
          // (m, c * h * w) * (k_h * k_w, c * h * w) -> (m, k_h * k_w)
          // or
C
chengduoZH 已提交
291 292 293 294 295 296
          // (m, d * h * w) * (d * h * w, c * k_d * k_h * k_w) -> (m, c * k_d *
          // k_h * k_w)
          math::matmul<Place, T>(context.device_context(), in_batch, false,
                                 col_matrix, true, static_cast<T>(1.0),
                                 &filter_grad_, static_cast<T>(1.0));
        }
C
chengduoZH 已提交
297 298 299 300 301 302
      }
    }
  }
};
}  // namespace operators
}  // namespace paddle