conv_transpose_op.h 11.8 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
C
chengduoZH 已提交
19
#include "paddle/operators/math/im2col.h"
C
chengduoZH 已提交
20 21 22 23 24 25 26 27 28 29 30
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/vol2col.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using DDim = framework::DDim;

// Define Op classes in .h file so that other conv transpose
// operator implementations can reuse the code.
C
chengduoZH 已提交
31 32 33 34 35 36
class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv2DTransposeOpMaker(framework::OpProto* proto,
                         framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
37 38 39 40 41 42
class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv3DTransposeOpMaker(framework::OpProto* proto,
                         framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
43
class ConvTransposeOp : public framework::OperatorWithKernel {
C
chengduoZH 已提交
44 45 46 47 48
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
};

C
chengduoZH 已提交
49
class ConvTransposeOpGrad : public framework::OperatorWithKernel {
C
chengduoZH 已提交
50 51 52 53 54 55
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
};

template <typename Place, typename T>
56
class GemmConvTransposeKernel : public framework::OpKernel<T> {
C
chengduoZH 已提交
57 58 59 60 61 62 63 64
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    // The filter will be reshaped, so it should not be constant pointer
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* output = context.Output<Tensor>("Output");

    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
C
chengduoZH 已提交
65
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
66 67 68
    // TODO(Zhuoyuan): Paddings can be added in future.
    // groups will alway be disabled in conv2dtranspose.

C
chengduoZH 已提交
69
    const int batch_size = static_cast<int>(input->dims()[0]);
C
chengduoZH 已提交
70

C
chengduoZH 已提交
71
    // input_shape_vec: {n, c, h, w} or {n, c, d, h, w}
72
    std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
C
chengduoZH 已提交
73
    // filter_shape_vec: {k_o, k_c, k_h, k_w} or {k_o, k_c, k_d, k_h, k_w}
74 75 76 77 78
    std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());

    // use col_shape in the im2col and col2im (or vol2col and col2vol)
    // calculation
    // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w}
C
chengduoZH 已提交
79 80 81 82 83 84 85
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = output->dims()[1];
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2];
    }
86
    DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
87 88

    // use col_matrix_shape in the gemm calculation
89
    // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
90
    DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
91 92 93 94 95 96 97 98 99 100

    Tensor col;
    col.mutable_data<T>(col_shape, context.GetPlace());
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
    Tensor col_matrix;
    col_matrix.ShareDataWith(col);
    col_matrix.Resize(col_matrix_shape);

101 102 103
    // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
    DDim output_shape =
        framework::slice_ddim(output->dims(), 1, output->dims().size());
C
chengduoZH 已提交
104

105 106 107 108 109
    // input matrix size: (m, h * w) or (m, d * h * w)
    DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};

    // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w)
    DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
C
chengduoZH 已提交
110 111 112
    filter.Resize(filter_matrix_shape);

    output->mutable_data<T>(context.GetPlace());
C
chengduoZH 已提交
113 114
    math::SetConstant<Place, T> set_zero;
    set_zero(context.device_context(), output, static_cast<T>(0));
C
chengduoZH 已提交
115

C
chengduoZH 已提交
116 117 118 119
    math::Col2ImFunctor<math::ColFormat::kCFO, Place, T> col2im;
    math::Col2VolFunctor<Place, T> col2vol;
    std::vector<int> dilations({1, 1, 1});

120 121
    // convolution transpose: gemm + col2im or col2vol (similar to conv-backward
    // on input)
C
chengduoZH 已提交
122
    for (int i = 0; i < batch_size; i++) {
123
      // batch with size (m, h * w) or (m, d * h * w)
C
chengduoZH 已提交
124 125
      Tensor input_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);

126
      // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
C
chengduoZH 已提交
127 128 129
      Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape);

      // col_matrix = filter * input_batch
130
      // of shape (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
131
      math::matmul<Place, T>(context.device_context(), filter, true,
C
chengduoZH 已提交
132 133 134
                             input_batch, false, static_cast<T>(1.0),
                             &col_matrix, static_cast<T>(0.0));

C
chengduoZH 已提交
135
      if (data_dim == 2U) {
136 137
        // col2im: col_matrix -> dy
        // from (c * k_h * k_w, h * w) to (c, o_h, o_w)
C
chengduoZH 已提交
138 139 140 141 142
        col2im(context.device_context(), col,
               std::vector<int>{dilations[0], dilations[1]}, strides,
               std::vector<int>{paddings[0], paddings[1], paddings[0],
                                paddings[1]},
               &output_batch);
C
chengduoZH 已提交
143
      } else if (data_dim == 3U) {
144 145
        // col2vol: col_matrix -> dy
        // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w)
C
chengduoZH 已提交
146 147
        col2vol(context.device_context(), col, dilations, strides, paddings,
                &output_batch);
148
      }
C
chengduoZH 已提交
149 150 151 152 153
    }
  }
};

template <typename Place, typename T>
154
class GemmConvTransposeGradKernel : public framework::OpKernel<T> {
C
chengduoZH 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    // For filter, we do not use const pointer b/c we will do reshape,
    // but we should avoid modifying its value.
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
    Tensor* filter_grad =
        context.Output<Tensor>(framework::GradVarName("Filter"));

168 169
    if ((!input_grad) && (!filter_grad)) return;

C
chengduoZH 已提交
170 171 172
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");

C
chengduoZH 已提交
173
    const int batch_size = static_cast<int>(input->dims()[0]);
C
chengduoZH 已提交
174

C
chengduoZH 已提交
175
    // input_shape_vec: {n, c, h, w} or {n, c, d, h, w}
176
    std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
C
chengduoZH 已提交
177
    // filter_shape_vec: {k_o, k_c, k_h, k_w} or {k_o, k_c, k_d, k_h, k_w}
178 179 180 181 182
    std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());

    // use col_shape in the im2col and col2im (or vol2col and col2vol)
    // calculation
    // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w}
C
chengduoZH 已提交
183 184 185 186 187 188 189
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = output_grad->dims()[1];
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2];
    }
190
    DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
191

192 193
    // use col_matrix_shape in the gemm calculation
    // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
194
    DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
195

196 197 198
    // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
    DDim output_shape = framework::slice_ddim(output_grad->dims(), 1,
                                              output_grad->dims().size());
C
chengduoZH 已提交
199

200 201
    // input matrix size: (m, h * w) or (m, d * h * w)
    DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};
C
chengduoZH 已提交
202

203 204
    // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w)
    DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
C
chengduoZH 已提交
205 206 207 208 209
    filter.Resize(filter_matrix_shape);

    // convolution transpose grad on input:
    // im2col + gemm (similar to conv-forward)
    // input need to compute gradient
C
chengduoZH 已提交
210 211 212 213 214 215
    if (input_grad || filter_grad) {
      Tensor col;
      col.mutable_data<T>(col_shape, context.GetPlace());
      // col_matrix shares the same piece of data with col,
      // but will be reshaped into a two-dimensional matrix shape
      // to call the matrix multiplication interface.
C
chengduoZH 已提交
216 217 218 219
      Tensor col_matrix;
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);

C
chengduoZH 已提交
220 221
      Tensor filter_grad_;
      math::SetConstant<Place, T> set_zero;
C
chengduoZH 已提交
222

C
chengduoZH 已提交
223 224 225 226
      math::Im2ColFunctor<math::ColFormat::kCFO, Place, T> im2col;
      math::Vol2ColFunctor<Place, T> vol2col;
      std::vector<int> dilations({1, 1, 1});

C
chengduoZH 已提交
227 228 229 230 231 232 233 234 235
      if (input_grad) {
        input_grad->mutable_data<T>(context.GetPlace());
        set_zero(context.device_context(), input_grad, static_cast<T>(0));
      }
      if (filter_grad) {  // filter size (m, c, k_h, k_w)
        filter_grad->mutable_data<T>(context.GetPlace());
        set_zero(context.device_context(), filter_grad, static_cast<T>(0));
        filter_grad_ = *filter_grad;
        filter_grad_.Resize(filter_matrix_shape);
C
chengduoZH 已提交
236 237
      }

C
chengduoZH 已提交
238 239
      for (int i = 0; i < batch_size; i++) {
        // batch with size (c, o_h * o_w)
C
chengduoZH 已提交
240 241 242
        Tensor output_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_shape);

C
chengduoZH 已提交
243
        if (data_dim == 2U) {
244 245
          // im2col: dy -> col matrix
          // from (c, o_h, o_w) to (c * k_h * k_w, h * w)
C
chengduoZH 已提交
246 247 248 249 250
          im2col(context.device_context(), output_grad_batch,
                 std::vector<int>{dilations[0], dilations[1]}, strides,
                 std::vector<int>{paddings[0], paddings[1], paddings[0],
                                  paddings[1]},
                 &col);
C
chengduoZH 已提交
251
        } else if (data_dim == 3U) {
252 253
          // vol2col: dy -> col_matrix
          // from (c, o_d, o_h, o_w) to (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
254 255
          vol2col(context.device_context(), output_grad_batch, dilations,
                  strides, paddings, &col);
256
        }
C
chengduoZH 已提交
257

C
chengduoZH 已提交
258 259 260 261 262 263
        if (input_grad) {
          // batch with size (m, h, w)
          Tensor input_grad_batch =
              input_grad->Slice(i, i + 1).Resize(input_matrix_shape);
          // gemm: dx = filter * dy
          // (m, c * k_h * k_w) * (c * k_h * k_w, h * w) -> (m, h * w)
264
          // or
C
chengduoZH 已提交
265 266 267 268 269 270 271 272 273 274
          // (m, c * k_d * k_h * k_w) * (c * k_d * k_h * k_w, d * h * w) -> (m,
          // d, h, w)
          math::matmul<Place, T>(context.device_context(), filter, false,
                                 col_matrix, false, static_cast<T>(1.0),
                                 &input_grad_batch, static_cast<T>(0.0));
        }
        if (filter_grad) {
          // input batch
          Tensor in_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);
          // gemm: d_filter = x * dy^T
275 276
          // (m, c * h * w) * (k_h * k_w, c * h * w) -> (m, k_h * k_w)
          // or
C
chengduoZH 已提交
277 278 279 280 281 282
          // (m, d * h * w) * (d * h * w, c * k_d * k_h * k_w) -> (m, c * k_d *
          // k_h * k_w)
          math::matmul<Place, T>(context.device_context(), in_batch, false,
                                 col_matrix, true, static_cast<T>(1.0),
                                 &filter_grad_, static_cast<T>(1.0));
        }
C
chengduoZH 已提交
283 284 285 286 287 288
      }
    }
  }
};
}  // namespace operators
}  // namespace paddle