ImageExpandOp.cpp 10.3 KB
Newer Older
H
hedaoyuan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "Function.h"
16
#include "Im2Col.h"
H
hedaoyuan 已提交
17 18 19 20

namespace paddle {

/*
21 22 23
 * imShape = [inputChannels, inputHeight, inputWidth]
 * colShape =
 *   [outputHeight, outputWidth, inputChannels, filterHeight, filterWidth]
H
hedaoyuan 已提交
24 25 26 27 28
 */
template <class T>
class Im2ColFunctor<kOCF, DEVICE_TYPE_CPU, T> {
public:
  void operator()(const T* imData,
29 30 31
                  const TensorShape& imShape,
                  T* colData,
                  const TensorShape& colShape,
H
hedaoyuan 已提交
32 33 34
                  int strideHeight,
                  int strideWidth,
                  int paddingHeight,
35 36 37 38 39 40 41 42
                  int paddingWidth) {
    int inputChannels = imShape[0];
    int inputHeight = imShape[1];
    int inputWidth = imShape[2];
    int filterHeight = colShape[3];
    int filterWidth = colShape[4];
    int outputHeight = colShape[0];
    int outputWidth = colShape[1];
H
hedaoyuan 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
    for (int outputH = 0; outputH < outputHeight; ++outputH) {
      for (int outputW = 0; outputW < outputWidth; ++outputW) {
        for (int channel = 0; channel < inputChannels; ++channel) {
          for (int filterH = 0; filterH < filterHeight; ++filterH) {
            for (int filterW = 0; filterW < filterWidth; ++filterW) {
              int imRowOffset =
                  outputH * strideHeight + filterH - paddingHeight;
              int imColOffset = outputW * strideWidth + filterW - paddingWidth;
              int colDataOffset =
                  (((outputH * outputWidth + outputW) * inputChannels +
                    channel) *
                       filterHeight +
                   filterH) *
                      filterWidth +
                  filterW;
              if (imRowOffset < 0 || imRowOffset >= inputHeight ||
                  imColOffset < 0 || imColOffset >= inputWidth) {
60
                colData[colDataOffset] = float(0);
H
hedaoyuan 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73 74
              } else {
                int imDataOffset =
                    (channel * inputHeight + imRowOffset) * inputWidth +
                    imColOffset;
                colData[colDataOffset] = imData[imDataOffset];
              }
            }
          }
        }
      }
    }
  }
};

75 76 77 78 79
/*
 * imShape = [inputChannels, inputHeight, inputWidth]
 * colShape =
 *   [outputHeight, outputWidth, inputChannels, filterHeight, filterWidth]
 */
H
hedaoyuan 已提交
80 81 82
template <class T>
class Col2ImFunctor<kOCF, DEVICE_TYPE_CPU, T> {
public:
83 84 85 86
  void operator()(T* imData,
                  const TensorShape& imShape,
                  const T* colData,
                  const TensorShape& colShape,
H
hedaoyuan 已提交
87 88 89
                  int strideHeight,
                  int strideWidth,
                  int paddingHeight,
90 91 92 93 94 95 96 97
                  int paddingWidth) {
    int inputChannels = imShape[0];
    int inputHeight = imShape[1];
    int inputWidth = imShape[2];
    int filterHeight = colShape[3];
    int filterWidth = colShape[4];
    int outputHeight = colShape[0];
    int outputWidth = colShape[1];
H
hedaoyuan 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
    for (int outputH = 0; outputH < outputHeight; ++outputH) {
      for (int outputW = 0; outputW < outputWidth; ++outputW) {
        for (int channel = 0; channel < inputChannels; ++channel) {
          for (int filterH = 0; filterH < filterHeight; ++filterH) {
            for (int filterW = 0; filterW < filterWidth; ++filterW) {
              int imRowOffset =
                  outputH * strideHeight + filterH - paddingHeight;
              int imColOffset = outputW * strideWidth + filterW - paddingWidth;
              int colDataOffset =
                  (((outputH * outputWidth + outputW) * inputChannels +
                    channel) *
                       filterHeight +
                   filterH) *
                      filterWidth +
                  filterW;
              if (imRowOffset >= 0 && imRowOffset < inputHeight &&
                  imColOffset >= 0 && imColOffset < inputWidth) {
                int imDataOffset =
                    (channel * inputHeight + imRowOffset) * inputWidth +
                    imColOffset;
                imData[imDataOffset] += colData[colDataOffset];
              }
            }
          }
        }
      }
    }
  }
};

H
hedaoyuan 已提交
128 129
/*
 * \brief Converts the image data of four dimensions(NCHW) into
H
hedaoyuan 已提交
130 131 132 133
 *        a sequence data of three dimensions(NST) in the forward calculation,
 *        which is reversed in the backward calculation.
 *        Where N is batch size, S is the length of the sequence after each
 *        image is expanded, T is the size of each time step in the sequence.
H
hedaoyuan 已提交
134
 *
H
hedaoyuan 已提交
135
 * Arguments in forward function:
H
hedaoyuan 已提交
136 137
 * \param inputs[0]  Image data of NCHW format.
 * \param outputs[0] Sequence data of NST format.
H
hedaoyuan 已提交
138 139 140 141
 *
 * Arguments in backward function:
 * \param inputs[0]  Sequence data of NST format.
 * \param outputs[0] Image data of NCHW format.
H
hedaoyuan 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155
 */
class ImageExpandFunction : public FunctionBase {
public:
  void init(const FuncConfig& config) override {
    // function arguments
    strides_ = config.get<std::vector<size_t>>("strides");
    paddings_ = config.get<std::vector<size_t>>("paddings");
    blocks_ = config.get<std::vector<size_t>>("blocks");

    // number of inputs and outputs
    numInputs_ = 1;
    numOutputs_ = 1;
  }

H
hedaoyuan 已提交
156 157
  virtual void calc(const BufferArgs& inputs, const BufferArgs& outputs) {}

158
  void check(const TensorShape& image, const TensorShape& sequence) const {
H
hedaoyuan 已提交
159 160 161 162 163 164 165 166 167 168 169 170
    // image shape should be 4-dimensional.
    CHECK_EQ(image.ndims(), (size_t)4);
    // sequence shape should be 3-dimensional.
    CHECK_EQ(sequence.ndims(), (size_t)3);
    // The batchSize of the image needs to be equal to
    // the batchSize of the sequence.
    CHECK_EQ(image[0], sequence[0]);
  }

  // Calculate the shape of colData based on the shape of the image
  // and the shape of the sequence.
  TensorShape getColShape(const TensorShape& image,
171
                          const TensorShape& sequence) const {
H
hedaoyuan 已提交
172 173 174 175 176
    size_t inputChannels = image[1];
    size_t inputHeight = image[2];
    size_t inputWidth = image[3];
    size_t seqLength = sequence[1];
    size_t stepSize = sequence[2];
H
hedaoyuan 已提交
177 178 179 180 181 182 183
    size_t outputHeight =
        1 +
        (inputHeight + 2 * paddingH() - blockH() + strideH() - 1) / strideH();
    size_t outputWidth =
        1 +
        (inputWidth + 2 * paddingW() - blockW() + strideW() - 1) / strideW();
    CHECK_EQ(seqLength, outputHeight * outputWidth);
184
    CHECK_EQ(stepSize, inputChannels * blockH() * blockW());
H
hedaoyuan 已提交
185

186
    // [outputHeight, outputWidth, inputChannels, filterHeight, filterWidth]
H
hedaoyuan 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
    return TensorShape({outputHeight,
                        outputWidth,
                        inputChannels,
                        (size_t)blockH(),
                        (size_t)blockW()});
  }

protected:
  std::vector<size_t> strides_;
  std::vector<size_t> paddings_;
  std::vector<size_t> blocks_;

  inline int strideH() const { return strides_[0]; }

  inline int strideW() const { return strides_[1]; }

  inline int paddingH() const { return paddings_[0]; }

  inline int paddingW() const { return paddings_[1]; }

  inline int blockH() const { return blocks_[0]; }

  inline int blockW() const { return blocks_[1]; }
};

template <DeviceType Device>
class ImageExpandForward : public ImageExpandFunction {
public:
  void init(const FuncConfig& config) override {
    ImageExpandFunction::init(config);
  }

  void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
    CHECK_EQ(numInputs_, inputs.size());
    CHECK_EQ(numOutputs_, outputs.size());
    const TensorShape& image = inputs[0].shape();
    const TensorShape& sequence = outputs[0].shape();
    check(image, sequence);

226
    TensorShape imShape = TensorShape({image[1], image[2], image[3]});
H
hedaoyuan 已提交
227 228 229 230 231
    TensorShape colShape = getColShape(image, sequence);
    size_t batchSize = image[0];

    real* imageData = inputs[0].data<real>();
    real* seqData = outputs[0].data<real>();
H
hedaoyuan 已提交
232 233
    Im2ColFunctor<kOCF, Device, real> im2col;
    for (size_t i = 0; i < batchSize; i++) {
234 235
      // The result of im2col is [outputHeight, outputWidth,
      // inputChannels, filterHeight, filterWidth], and it is easy to
236 237 238
      // reshape into [seqLength, stepSize], where seqLength is equal
      // output_height * output_width, stepSize is equal
      // input_channels * filter_height * filter_width
H
hedaoyuan 已提交
239
      im2col(imageData,
240 241 242
             imShape,
             seqData,
             colShape,
H
hedaoyuan 已提交
243 244 245
             strideH(),
             strideW(),
             paddingH(),
246 247 248
             paddingW());
      imageData += imShape.getElements();
      seqData += colShape.getElements();
H
hedaoyuan 已提交
249 250
    }
  }
H
hedaoyuan 已提交
251
};
H
hedaoyuan 已提交
252

H
hedaoyuan 已提交
253 254 255 256 257 258
template <DeviceType Device>
class ImageExpandBackward : public ImageExpandFunction {
public:
  void init(const FuncConfig& config) override {
    ImageExpandFunction::init(config);
  }
H
hedaoyuan 已提交
259

H
hedaoyuan 已提交
260 261 262 263 264 265 266 267 268
  void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
    CHECK_EQ(numInputs_, inputs.size());
    CHECK_EQ(numOutputs_, outputs.size());
    // Since the implementation of Col2ImFunctor is ADD_TO,
    // this function only supports ADD_TO mode.
    CHECK_EQ(outputs[0].getArgType(), ADD_TO);
    const TensorShape& image = outputs[0].shape();
    const TensorShape& sequence = inputs[0].shape();
    check(image, sequence);
H
hedaoyuan 已提交
269

270
    TensorShape imShape = TensorShape({image[1], image[2], image[3]});
H
hedaoyuan 已提交
271 272
    TensorShape colShape = getColShape(image, sequence);
    size_t batchSize = image[0];
H
hedaoyuan 已提交
273

H
hedaoyuan 已提交
274 275 276 277
    real* imageData = outputs[0].data<real>();
    real* seqData = inputs[0].data<real>();
    Col2ImFunctor<kOCF, Device, real> col2im;
    for (size_t i = 0; i < batchSize; i++) {
278 279 280 281
      col2im(imageData,
             imShape,
             seqData,
             colShape,
H
hedaoyuan 已提交
282 283 284
             strideH(),
             strideW(),
             paddingH(),
285 286 287
             paddingW());
      imageData += imShape.getElements();
      seqData += colShape.getElements();
H
hedaoyuan 已提交
288 289
    }
  }
H
hedaoyuan 已提交
290 291
};

H
hedaoyuan 已提交
292 293
REGISTER_TYPED_FUNC(ImageExpand, CPU, ImageExpandForward);
REGISTER_TYPED_FUNC(ImageExpandGrad, CPU, ImageExpandBackward);
294 295 296
#ifndef PADDLE_ONLY_CPU
REGISTER_TYPED_FUNC(ImageExpand, GPU, ImageExpandForward);
#endif
297

H
hedaoyuan 已提交
298
}  // namespace paddle