conv_transpose_op.h 12.3 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
C
chengduoZH 已提交
19
#include "paddle/operators/math/im2col.h"
C
chengduoZH 已提交
20 21 22 23 24 25 26 27 28 29 30
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/vol2col.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using DDim = framework::DDim;

// Define Op classes in .h file so that other conv transpose
// operator implementations can reuse the code.
C
chengduoZH 已提交
31 32 33 34 35 36
class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv2DTransposeOpMaker(framework::OpProto* proto,
                         framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
37 38 39 40 41 42
class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  Conv3DTransposeOpMaker(framework::OpProto* proto,
                         framework::OpAttrChecker* op_checker);
};

C
chengduoZH 已提交
43
class ConvTransposeOp : public framework::OperatorWithKernel {
C
chengduoZH 已提交
44 45 46 47 48
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
};

C
chengduoZH 已提交
49
class ConvTransposeOpGrad : public framework::OperatorWithKernel {
C
chengduoZH 已提交
50 51 52 53 54 55
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
};

template <typename Place, typename T>
56
class GemmConvTransposeKernel : public framework::OpKernel<T> {
C
chengduoZH 已提交
57 58 59 60 61 62 63 64
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    // The filter will be reshaped, so it should not be constant pointer
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* output = context.Output<Tensor>("Output");

    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
C
chengduoZH 已提交
65 66
    // Actually, no paddings and groups allowed in conv transpose.
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
67 68 69
    // TODO(Zhuoyuan): Paddings can be added in future.
    // groups will alway be disabled in conv2dtranspose.

C
chengduoZH 已提交
70
    const int batch_size = static_cast<int>(input->dims()[0]);
C
chengduoZH 已提交
71

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
    // input_shape_vec: {h, w} or {d, h, w}
    std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
    input_shape_vec.erase(input_shape_vec.begin(), input_shape_vec.begin() + 2);

    // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w}
    std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());
    filter_shape_vec.erase(filter_shape_vec.begin(),
                           filter_shape_vec.begin() + 2);

    // use col_shape in the im2col and col2im (or vol2col and col2vol)
    // calculation
    // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w}
    std::vector<int64_t> col_shape_vec;
    col_shape_vec.push_back(output->dims()[1]);
    col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(),
                         filter_shape_vec.end());
    col_shape_vec.insert(col_shape_vec.end(), input_shape_vec.begin(),
                         input_shape_vec.end());
    DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
91 92

    // use col_matrix_shape in the gemm calculation
93 94 95
    // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
    DDim col_matrix_shape =
        framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1);
C
chengduoZH 已提交
96 97 98 99 100 101 102 103 104 105

    Tensor col;
    col.mutable_data<T>(col_shape, context.GetPlace());
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
    Tensor col_matrix;
    col_matrix.ShareDataWith(col);
    col_matrix.Resize(col_matrix_shape);

106 107 108
    // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
    DDim output_shape =
        framework::slice_ddim(output->dims(), 1, output->dims().size());
C
chengduoZH 已提交
109

110 111 112 113 114
    // input matrix size: (m, h * w) or (m, d * h * w)
    DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};

    // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w)
    DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
C
chengduoZH 已提交
115 116 117
    filter.Resize(filter_matrix_shape);

    output->mutable_data<T>(context.GetPlace());
C
chengduoZH 已提交
118 119
    math::SetConstant<Place, T> set_zero;
    set_zero(context.device_context(), output, static_cast<T>(0));
C
chengduoZH 已提交
120

C
chengduoZH 已提交
121 122 123 124
    math::Col2ImFunctor<math::ColFormat::kCFO, Place, T> col2im;
    math::Col2VolFunctor<Place, T> col2vol;
    std::vector<int> dilations({1, 1, 1});

125 126
    // convolution transpose: gemm + col2im or col2vol (similar to conv-backward
    // on input)
C
chengduoZH 已提交
127
    for (int i = 0; i < batch_size; i++) {
128
      // batch with size (m, h * w) or (m, d * h * w)
C
chengduoZH 已提交
129 130
      Tensor input_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);

131
      // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
C
chengduoZH 已提交
132 133 134
      Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape);

      // col_matrix = filter * input_batch
135
      // of shape (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
136
      math::matmul<Place, T>(context.device_context(), filter, true,
C
chengduoZH 已提交
137 138 139
                             input_batch, false, static_cast<T>(1.0),
                             &col_matrix, static_cast<T>(0.0));

140 141 142
      if (filter_shape_vec.size() == 2) {
        // col2im: col_matrix -> dy
        // from (c * k_h * k_w, h * w) to (c, o_h, o_w)
C
chengduoZH 已提交
143 144 145 146 147
        col2im(context.device_context(), col,
               std::vector<int>{dilations[0], dilations[1]}, strides,
               std::vector<int>{paddings[0], paddings[1], paddings[0],
                                paddings[1]},
               &output_batch);
148 149 150
      } else if (filter_shape_vec.size() == 3) {
        // col2vol: col_matrix -> dy
        // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w)
C
chengduoZH 已提交
151 152
        col2vol(context.device_context(), col, dilations, strides,
                std::vector<int>{0, 0, 0}, &output_batch);
153
      }
C
chengduoZH 已提交
154 155 156 157 158
    }
  }
};

template <typename Place, typename T>
159
class GemmConvTransposeGradKernel : public framework::OpKernel<T> {
C
chengduoZH 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    // For filter, we do not use const pointer b/c we will do reshape,
    // but we should avoid modifying its value.
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
    Tensor* filter_grad =
        context.Output<Tensor>(framework::GradVarName("Filter"));

173 174
    if ((!input_grad) && (!filter_grad)) return;

C
chengduoZH 已提交
175 176 177 178
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    // Actually, no paddings and groups allowed in conv transpose.
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");

C
chengduoZH 已提交
179
    const int batch_size = static_cast<int>(input->dims()[0]);
C
chengduoZH 已提交
180

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
    // input_shape_vec: {h, w} or {d, h, w}
    std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
    input_shape_vec.erase(input_shape_vec.begin(), input_shape_vec.begin() + 2);

    // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w}
    std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());
    filter_shape_vec.erase(filter_shape_vec.begin(),
                           filter_shape_vec.begin() + 2);

    // use col_shape in the im2col and col2im (or vol2col and col2vol)
    // calculation
    // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w}
    std::vector<int64_t> col_shape_vec;
    col_shape_vec.push_back(output_grad->dims()[1]);
    col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(),
                         filter_shape_vec.end());
    col_shape_vec.insert(col_shape_vec.end(), input_shape_vec.begin(),
                         input_shape_vec.end());
    DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
200

201 202 203 204
    // use col_matrix_shape in the gemm calculation
    // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w)
    DDim col_matrix_shape =
        framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1);
C
chengduoZH 已提交
205

206 207 208
    // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w)
    DDim output_shape = framework::slice_ddim(output_grad->dims(), 1,
                                              output_grad->dims().size());
C
chengduoZH 已提交
209

210 211
    // input matrix size: (m, h * w) or (m, d * h * w)
    DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};
C
chengduoZH 已提交
212

213 214
    // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w)
    DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
C
chengduoZH 已提交
215 216 217 218 219
    filter.Resize(filter_matrix_shape);

    // convolution transpose grad on input:
    // im2col + gemm (similar to conv-forward)
    // input need to compute gradient
C
chengduoZH 已提交
220 221 222 223 224 225
    if (input_grad || filter_grad) {
      Tensor col;
      col.mutable_data<T>(col_shape, context.GetPlace());
      // col_matrix shares the same piece of data with col,
      // but will be reshaped into a two-dimensional matrix shape
      // to call the matrix multiplication interface.
C
chengduoZH 已提交
226 227 228 229
      Tensor col_matrix;
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);

C
chengduoZH 已提交
230 231
      Tensor filter_grad_;
      math::SetConstant<Place, T> set_zero;
C
chengduoZH 已提交
232

C
chengduoZH 已提交
233 234 235 236
      math::Im2ColFunctor<math::ColFormat::kCFO, Place, T> im2col;
      math::Vol2ColFunctor<Place, T> vol2col;
      std::vector<int> dilations({1, 1, 1});

C
chengduoZH 已提交
237 238 239 240 241 242 243 244 245
      if (input_grad) {
        input_grad->mutable_data<T>(context.GetPlace());
        set_zero(context.device_context(), input_grad, static_cast<T>(0));
      }
      if (filter_grad) {  // filter size (m, c, k_h, k_w)
        filter_grad->mutable_data<T>(context.GetPlace());
        set_zero(context.device_context(), filter_grad, static_cast<T>(0));
        filter_grad_ = *filter_grad;
        filter_grad_.Resize(filter_matrix_shape);
C
chengduoZH 已提交
246 247
      }

C
chengduoZH 已提交
248 249
      for (int i = 0; i < batch_size; i++) {
        // batch with size (c, o_h * o_w)
C
chengduoZH 已提交
250 251 252
        Tensor output_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_shape);

253 254 255
        if (filter_shape_vec.size() == 2) {
          // im2col: dy -> col matrix
          // from (c, o_h, o_w) to (c * k_h * k_w, h * w)
C
chengduoZH 已提交
256 257 258 259 260
          im2col(context.device_context(), output_grad_batch,
                 std::vector<int>{dilations[0], dilations[1]}, strides,
                 std::vector<int>{paddings[0], paddings[1], paddings[0],
                                  paddings[1]},
                 &col);
261 262 263
        } else if (filter_shape_vec.size() == 3) {
          // vol2col: dy -> col_matrix
          // from (c, o_d, o_h, o_w) to (c * k_d * k_h * k_w, d * h * w)
C
chengduoZH 已提交
264 265
          vol2col(context.device_context(), output_grad_batch, dilations,
                  strides, paddings, &col);
266
        }
C
chengduoZH 已提交
267

C
chengduoZH 已提交
268 269 270 271 272 273
        if (input_grad) {
          // batch with size (m, h, w)
          Tensor input_grad_batch =
              input_grad->Slice(i, i + 1).Resize(input_matrix_shape);
          // gemm: dx = filter * dy
          // (m, c * k_h * k_w) * (c * k_h * k_w, h * w) -> (m, h * w)
274
          // or
C
chengduoZH 已提交
275 276 277 278 279 280 281 282 283 284
          // (m, c * k_d * k_h * k_w) * (c * k_d * k_h * k_w, d * h * w) -> (m,
          // d, h, w)
          math::matmul<Place, T>(context.device_context(), filter, false,
                                 col_matrix, false, static_cast<T>(1.0),
                                 &input_grad_batch, static_cast<T>(0.0));
        }
        if (filter_grad) {
          // input batch
          Tensor in_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);
          // gemm: d_filter = x * dy^T
285 286
          // (m, c * h * w) * (k_h * k_w, c * h * w) -> (m, k_h * k_w)
          // or
C
chengduoZH 已提交
287 288 289 290 291 292
          // (m, d * h * w) * (d * h * w, c * k_d * k_h * k_w) -> (m, c * k_d *
          // k_h * k_w)
          math::matmul<Place, T>(context.device_context(), in_batch, false,
                                 col_matrix, true, static_cast<T>(1.0),
                                 &filter_grad_, static_cast<T>(1.0));
        }
C
chengduoZH 已提交
293 294 295 296 297 298
      }
    }
  }
};
}  // namespace operators
}  // namespace paddle