conv_transpose_op.h 11.7 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
C
chengduoZH 已提交
19
#include "paddle/operators/math/im2col.h"
C
chengduoZH 已提交
20 21 22 23 24 25 26 27 28 29 30
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/vol2col.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using DDim = framework::DDim;

// Define Op classes in .h file so that other conv transpose
// operator implementations can reuse the code.
C
chengduoZH 已提交
31 32 33 34 35 36
class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv2DTransposeOpMaker(framework::OpProto* proto,
                         framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
37 38 39 40 41 42
class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv3DTransposeOpMaker(framework::OpProto* proto,
                         framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
43
class ConvTransposeOp : public framework::OperatorWithKernel {
C
chengduoZH 已提交
44 45 46 47 48
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
};

C
chengduoZH 已提交
49
class ConvTransposeOpGrad : public framework::OperatorWithKernel {
C
chengduoZH 已提交
50 51 52 53 54
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
};

Q
QI JUN 已提交
55
template <typename DeviceContext, typename T>
56
class GemmConvTransposeKernel : public framework::OpKernel<T> {
C
chengduoZH 已提交
57 58 59 60 61 62 63 64
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    // The filter will be reshaped, so it should not be constant pointer
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* output = context.Output<Tensor>("Output");

    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
C
chengduoZH 已提交
65
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
66 67
    // groups will alway be disabled in conv2dtranspose.

C
chengduoZH 已提交
68
    const int batch_size = static_cast<int>(input->dims()[0]);
C
chengduoZH 已提交
69

C
chengduoZH 已提交
70
    // input_shape_vec: {n, c, h, w} or {n, c, d, h, w}
71
    std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
C
chengduoZH 已提交
72
    // filter_shape_vec: {k_o, k_c, k_h, k_w} or {k_o, k_c, k_d, k_h, k_w}
73 74 75 76 77
    std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());

    // use col_shape in the im2col and col2im (or vol2col and col2vol)
    // calculation
    // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w}
C
chengduoZH 已提交
78 79 80 81 82 83 84
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = output->dims()[1];
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2];
    }
85
    DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
86 87

    // use col_matrix_shape in the gemm calculation
88
    // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
89
    DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
90 91 92 93 94 95 96 97 98 99

    Tensor col;
    col.mutable_data<T>(col_shape, context.GetPlace());
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
    Tensor col_matrix;
    col_matrix.ShareDataWith(col);
    col_matrix.Resize(col_matrix_shape);

100 101 102
    // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
    DDim output_shape =
        framework::slice_ddim(output->dims(), 1, output->dims().size());
C
chengduoZH 已提交
103

104 105 106 107 108
    // input matrix size: (m, h * w) or (m, d * h * w)
    DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};

    // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w)
    DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
C
chengduoZH 已提交
109 110 111
    filter.Resize(filter_matrix_shape);

    output->mutable_data<T>(context.GetPlace());
Q
QI JUN 已提交
112 113 114
    math::SetConstant<DeviceContext, T> set_zero;
    auto& dev_ctx = context.template device_context<DeviceContext>();
    set_zero(dev_ctx, output, static_cast<T>(0));
C
chengduoZH 已提交
115

Q
QI JUN 已提交
116 117
    math::Col2ImFunctor<math::ColFormat::kCFO, DeviceContext, T> col2im;
    math::Col2VolFunctor<DeviceContext, T> col2vol;
C
chengduoZH 已提交
118 119
    std::vector<int> dilations({1, 1, 1});

120 121
    // convolution transpose: gemm + col2im or col2vol (similar to conv-backward
    // on input)
C
chengduoZH 已提交
122
    for (int i = 0; i < batch_size; i++) {
123
      // batch with size (m, h * w) or (m, d * h * w)
C
chengduoZH 已提交
124 125
      Tensor input_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);

126
      // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
C
chengduoZH 已提交
127 128 129
      Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape);

      // col_matrix = filter * input_batch
130
      // of shape (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
Q
QI JUN 已提交
131 132 133
      math::matmul<DeviceContext, T>(dev_ctx, filter, true, input_batch, false,
                                     static_cast<T>(1.0), &col_matrix,
                                     static_cast<T>(0.0));
C
chengduoZH 已提交
134

C
chengduoZH 已提交
135
      if (data_dim == 2U) {
136 137
        // col2im: col_matrix -> dy
        // from (c * k_h * k_w, h * w) to (c, o_h, o_w)
Q
QI JUN 已提交
138 139 140
        col2im(dev_ctx, col, std::vector<int>{dilations[0], dilations[1]},
               strides, std::vector<int>{paddings[0], paddings[1], paddings[0],
                                         paddings[1]},
C
chengduoZH 已提交
141
               &output_batch);
C
chengduoZH 已提交
142
      } else if (data_dim == 3U) {
143 144
        // col2vol: col_matrix -> dy
        // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w)
Q
QI JUN 已提交
145
        col2vol(dev_ctx, col, dilations, strides, paddings, &output_batch);
146
      }
C
chengduoZH 已提交
147 148 149 150
    }
  }
};

Q
QI JUN 已提交
151
template <typename DeviceContext, typename T>
152
class GemmConvTransposeGradKernel : public framework::OpKernel<T> {
C
chengduoZH 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    // For filter, we do not use const pointer b/c we will do reshape,
    // but we should avoid modifying its value.
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
    Tensor* filter_grad =
        context.Output<Tensor>(framework::GradVarName("Filter"));

166 167
    if ((!input_grad) && (!filter_grad)) return;

C
chengduoZH 已提交
168 169 170
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");

C
chengduoZH 已提交
171
    const int batch_size = static_cast<int>(input->dims()[0]);
C
chengduoZH 已提交
172

C
chengduoZH 已提交
173
    // input_shape_vec: {n, c, h, w} or {n, c, d, h, w}
174
    std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
C
chengduoZH 已提交
175
    // filter_shape_vec: {k_o, k_c, k_h, k_w} or {k_o, k_c, k_d, k_h, k_w}
176 177 178 179 180
    std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());

    // use col_shape in the im2col and col2im (or vol2col and col2vol)
    // calculation
    // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w}
C
chengduoZH 已提交
181 182 183 184 185 186 187
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = output_grad->dims()[1];
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2];
    }
188
    DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
189

190 191
    // use col_matrix_shape in the gemm calculation
    // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
192
    DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
193

194 195 196
    // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
    DDim output_shape = framework::slice_ddim(output_grad->dims(), 1,
                                              output_grad->dims().size());
C
chengduoZH 已提交
197

198 199
    // input matrix size: (m, h * w) or (m, d * h * w)
    DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};
C
chengduoZH 已提交
200

201 202
    // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w)
    DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
C
chengduoZH 已提交
203 204 205 206 207
    filter.Resize(filter_matrix_shape);

    // convolution transpose grad on input:
    // im2col + gemm (similar to conv-forward)
    // input need to compute gradient
Q
QI JUN 已提交
208
    auto& dev_ctx = context.template device_context<DeviceContext>();
C
chengduoZH 已提交
209 210 211 212 213 214
    if (input_grad || filter_grad) {
      Tensor col;
      col.mutable_data<T>(col_shape, context.GetPlace());
      // col_matrix shares the same piece of data with col,
      // but will be reshaped into a two-dimensional matrix shape
      // to call the matrix multiplication interface.
C
chengduoZH 已提交
215 216 217 218
      Tensor col_matrix;
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);

C
chengduoZH 已提交
219
      Tensor filter_grad_;
Q
QI JUN 已提交
220
      math::SetConstant<DeviceContext, T> set_zero;
C
chengduoZH 已提交
221

Q
QI JUN 已提交
222 223
      math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col;
      math::Vol2ColFunctor<DeviceContext, T> vol2col;
C
chengduoZH 已提交
224 225
      std::vector<int> dilations({1, 1, 1});

C
chengduoZH 已提交
226 227 228 229 230
      if (input_grad) {
        input_grad->mutable_data<T>(context.GetPlace());
      }
      if (filter_grad) {  // filter size (m, c, k_h, k_w)
        filter_grad->mutable_data<T>(context.GetPlace());
Q
QI JUN 已提交
231
        set_zero(dev_ctx, filter_grad, static_cast<T>(0));
C
chengduoZH 已提交
232 233
        filter_grad_ = *filter_grad;
        filter_grad_.Resize(filter_matrix_shape);
C
chengduoZH 已提交
234 235
      }

C
chengduoZH 已提交
236 237
      for (int i = 0; i < batch_size; i++) {
        // batch with size (c, o_h * o_w)
C
chengduoZH 已提交
238 239 240
        Tensor output_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_shape);

C
chengduoZH 已提交
241
        if (data_dim == 2U) {
242 243
          // im2col: dy -> col matrix
          // from (c, o_h, o_w) to (c * k_h * k_w, h * w)
Q
QI JUN 已提交
244
          im2col(dev_ctx, output_grad_batch,
C
chengduoZH 已提交
245 246 247 248
                 std::vector<int>{dilations[0], dilations[1]}, strides,
                 std::vector<int>{paddings[0], paddings[1], paddings[0],
                                  paddings[1]},
                 &col);
C
chengduoZH 已提交
249
        } else if (data_dim == 3U) {
250 251
          // vol2col: dy -> col_matrix
          // from (c, o_d, o_h, o_w) to (c * k_d * k_h * k_w, d * h * w)
Q
QI JUN 已提交
252 253
          vol2col(dev_ctx, output_grad_batch, dilations, strides, paddings,
                  &col);
254
        }
C
chengduoZH 已提交
255

C
chengduoZH 已提交
256 257 258 259 260 261
        if (input_grad) {
          // batch with size (m, h, w)
          Tensor input_grad_batch =
              input_grad->Slice(i, i + 1).Resize(input_matrix_shape);
          // gemm: dx = filter * dy
          // (m, c * k_h * k_w) * (c * k_h * k_w, h * w) -> (m, h * w)
262
          // or
C
chengduoZH 已提交
263 264
          // (m, c * k_d * k_h * k_w) * (c * k_d * k_h * k_w, d * h * w) -> (m,
          // d, h, w)
Q
QI JUN 已提交
265 266 267
          math::matmul<DeviceContext, T>(
              dev_ctx, filter, false, col_matrix, false, static_cast<T>(1.0),
              &input_grad_batch, static_cast<T>(0.0));
C
chengduoZH 已提交
268 269 270 271 272
        }
        if (filter_grad) {
          // input batch
          Tensor in_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);
          // gemm: d_filter = x * dy^T
273 274
          // (m, c * h * w) * (k_h * k_w, c * h * w) -> (m, k_h * k_w)
          // or
C
chengduoZH 已提交
275 276
          // (m, d * h * w) * (d * h * w, c * k_d * k_h * k_w) -> (m, c * k_d *
          // k_h * k_w)
Q
QI JUN 已提交
277 278 279
          math::matmul<DeviceContext, T>(dev_ctx, in_batch, false, col_matrix,
                                         true, static_cast<T>(1.0),
                                         &filter_grad_, static_cast<T>(1.0));
C
chengduoZH 已提交
280
        }
C
chengduoZH 已提交
281 282 283 284 285 286
      }
    }
  }
};
}  // namespace operators
}  // namespace paddle