BlockExpandOp.cpp 6.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
H
hedaoyuan 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "Function.h"
16
#include "Im2Col.h"
H
hedaoyuan 已提交
17 18 19 20 21

namespace paddle {

/*
 * \brief Converts the image data of four dimensions(NCHW) into
H
hedaoyuan 已提交
22 23 24 25
 *        a sequence data of three dimensions(NST) in the forward calculation,
 *        which is reversed in the backward calculation.
 *        Where N is batch size, S is the length of the sequence after each
 *        image is expanded, T is the size of each time step in the sequence.
H
hedaoyuan 已提交
26
 *
H
hedaoyuan 已提交
27
 * Arguments in forward function:
H
hedaoyuan 已提交
28 29
 * \param inputs[0]  Image data of NCHW format.
 * \param outputs[0] Sequence data of NST format.
H
hedaoyuan 已提交
30 31 32 33
 *
 * Arguments in backward function:
 * \param inputs[0]  Sequence data of NST format.
 * \param outputs[0] Image data of NCHW format.
H
hedaoyuan 已提交
34
 */
35
class BlockExpandFunction : public FunctionBase {
W
Wu Yi 已提交
36
 public:
H
hedaoyuan 已提交
37 38 39 40 41 42 43 44 45 46 47
  void init(const FuncConfig& config) override {
    // function arguments
    strides_ = config.get<std::vector<size_t>>("strides");
    paddings_ = config.get<std::vector<size_t>>("paddings");
    blocks_ = config.get<std::vector<size_t>>("blocks");

    // number of inputs and outputs
    numInputs_ = 1;
    numOutputs_ = 1;
  }

H
hedaoyuan 已提交
48
  void checkShape(const TensorShape& image, const TensorShape& sequence) const {
H
hedaoyuan 已提交
49 50 51 52 53 54 55 56 57 58 59 60
    // image shape should be 4-dimensional.
    CHECK_EQ(image.ndims(), (size_t)4);
    // sequence shape should be 3-dimensional.
    CHECK_EQ(sequence.ndims(), (size_t)3);
    // The batchSize of the image needs to be equal to
    // the batchSize of the sequence.
    CHECK_EQ(image[0], sequence[0]);
  }

  // Calculate the shape of colData based on the shape of the image
  // and the shape of the sequence.
  TensorShape getColShape(const TensorShape& image,
61
                          const TensorShape& sequence) const {
H
hedaoyuan 已提交
62 63 64 65 66
    size_t inputChannels = image[1];
    size_t inputHeight = image[2];
    size_t inputWidth = image[3];
    size_t seqLength = sequence[1];
    size_t stepSize = sequence[2];
H
hedaoyuan 已提交
67 68 69 70 71 72 73
    size_t outputHeight =
        1 +
        (inputHeight + 2 * paddingH() - blockH() + strideH() - 1) / strideH();
    size_t outputWidth =
        1 +
        (inputWidth + 2 * paddingW() - blockW() + strideW() - 1) / strideW();
    CHECK_EQ(seqLength, outputHeight * outputWidth);
74
    CHECK_EQ(stepSize, inputChannels * blockH() * blockW());
H
hedaoyuan 已提交
75

76
    // [outputHeight, outputWidth, inputChannels, filterHeight, filterWidth]
H
hedaoyuan 已提交
77 78 79 80 81 82 83
    return TensorShape({outputHeight,
                        outputWidth,
                        inputChannels,
                        (size_t)blockH(),
                        (size_t)blockW()});
  }

W
Wu Yi 已提交
84
 protected:
H
hedaoyuan 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
  std::vector<size_t> strides_;
  std::vector<size_t> paddings_;
  std::vector<size_t> blocks_;

  inline int strideH() const { return strides_[0]; }

  inline int strideW() const { return strides_[1]; }

  inline int paddingH() const { return paddings_[0]; }

  inline int paddingW() const { return paddings_[1]; }

  inline int blockH() const { return blocks_[0]; }

  inline int blockW() const { return blocks_[1]; }
};

template <DeviceType Device>
103
class BlockExpandForward : public BlockExpandFunction {
W
Wu Yi 已提交
104
 public:
H
hedaoyuan 已提交
105
  void init(const FuncConfig& config) override {
106
    BlockExpandFunction::init(config);
H
hedaoyuan 已提交
107 108
  }

H
hedaoyuan 已提交
109 110 111 112 113 114
  void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
    const TensorShape& image = inputs[0].shape();
    const TensorShape& sequence = outputs[0].shape();
    checkShape(image, sequence);
  }

H
hedaoyuan 已提交
115 116 117
  void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
    CHECK_EQ(numInputs_, inputs.size());
    CHECK_EQ(numOutputs_, outputs.size());
H
hedaoyuan 已提交
118
    check(inputs, outputs);
H
hedaoyuan 已提交
119
    CHECK_EQ(outputs[0].getArgType(), ASSIGN_TO);
H
hedaoyuan 已提交
120 121 122
    const TensorShape& image = inputs[0].shape();
    const TensorShape& sequence = outputs[0].shape();

123
    TensorShape imShape = TensorShape({image[1], image[2], image[3]});
H
hedaoyuan 已提交
124 125 126 127 128
    TensorShape colShape = getColShape(image, sequence);
    size_t batchSize = image[0];

    real* imageData = inputs[0].data<real>();
    real* seqData = outputs[0].data<real>();
H
hedaoyuan 已提交
129 130
    Im2ColFunctor<kOCF, Device, real> im2col;
    for (size_t i = 0; i < batchSize; i++) {
131 132
      // The result of im2col is [outputHeight, outputWidth,
      // inputChannels, filterHeight, filterWidth], and it is easy to
133 134 135
      // reshape into [seqLength, stepSize], where seqLength is equal
      // output_height * output_width, stepSize is equal
      // input_channels * filter_height * filter_width
H
hedaoyuan 已提交
136
      im2col(imageData,
137 138 139
             imShape,
             seqData,
             colShape,
H
hedaoyuan 已提交
140 141 142
             strideH(),
             strideW(),
             paddingH(),
143 144 145
             paddingW());
      imageData += imShape.getElements();
      seqData += colShape.getElements();
H
hedaoyuan 已提交
146 147
    }
  }
H
hedaoyuan 已提交
148
};
H
hedaoyuan 已提交
149

H
hedaoyuan 已提交
150
template <DeviceType Device>
151
class BlockExpandBackward : public BlockExpandFunction {
W
Wu Yi 已提交
152
 public:
H
hedaoyuan 已提交
153
  void init(const FuncConfig& config) override {
154
    BlockExpandFunction::init(config);
H
hedaoyuan 已提交
155
  }
H
hedaoyuan 已提交
156

H
hedaoyuan 已提交
157 158 159 160 161 162
  void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
    const TensorShape& image = outputs[0].shape();
    const TensorShape& sequence = inputs[0].shape();
    checkShape(image, sequence);
  }

H
hedaoyuan 已提交
163 164 165
  void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
    CHECK_EQ(numInputs_, inputs.size());
    CHECK_EQ(numOutputs_, outputs.size());
H
hedaoyuan 已提交
166
    check(inputs, outputs);
H
hedaoyuan 已提交
167 168 169 170 171
    // Since the implementation of Col2ImFunctor is ADD_TO,
    // this function only supports ADD_TO mode.
    CHECK_EQ(outputs[0].getArgType(), ADD_TO);
    const TensorShape& image = outputs[0].shape();
    const TensorShape& sequence = inputs[0].shape();
H
hedaoyuan 已提交
172

173
    TensorShape imShape = TensorShape({image[1], image[2], image[3]});
H
hedaoyuan 已提交
174 175
    TensorShape colShape = getColShape(image, sequence);
    size_t batchSize = image[0];
H
hedaoyuan 已提交
176

H
hedaoyuan 已提交
177 178 179 180
    real* imageData = outputs[0].data<real>();
    real* seqData = inputs[0].data<real>();
    Col2ImFunctor<kOCF, Device, real> col2im;
    for (size_t i = 0; i < batchSize; i++) {
181 182 183 184
      col2im(imageData,
             imShape,
             seqData,
             colShape,
H
hedaoyuan 已提交
185 186 187
             strideH(),
             strideW(),
             paddingH(),
188 189 190
             paddingW());
      imageData += imShape.getElements();
      seqData += colShape.getElements();
H
hedaoyuan 已提交
191 192
    }
  }
H
hedaoyuan 已提交
193 194
};

195 196
REGISTER_TYPED_FUNC(BlockExpand, CPU, BlockExpandForward);
REGISTER_TYPED_FUNC(BlockExpandGrad, CPU, BlockExpandBackward);
197
#ifdef PADDLE_WITH_CUDA
198 199
REGISTER_TYPED_FUNC(BlockExpand, GPU, BlockExpandForward);
REGISTER_TYPED_FUNC(BlockExpandGrad, GPU, BlockExpandBackward);
200
#endif
201

H
hedaoyuan 已提交
202
}  // namespace paddle