ExpandConvLayer.cpp 6.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yu Yang 已提交
15
#include "ExpandConvLayer.h"
Z
zhangjinchao01 已提交
16 17 18
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"

19 20 21 22
DEFINE_bool(use_nnpack,
            false,
            "Whether to use nnpack for convolution calculation.");

Z
zhangjinchao01 已提交
23 24
namespace paddle {

25 26 27 28
/*
 * The calculation of the exconvt(convolution transpose (deconv) operation)
 * is a swap of forward and backward of the calculation of exconv.
 * */
Z
zhangjinchao01 已提交
29
REGISTER_LAYER(exconv, ExpandConvLayer);
30
REGISTER_LAYER(exconvt, ExpandConvLayer);
Z
zhangjinchao01 已提交
31 32 33 34

bool ExpandConvLayer::init(const LayerMap &layerMap,
                           const ParameterMap &parameterMap) {
  /* Initialize the basic convolutional parent class */
35
  ExpandConvBaseLayer::init(layerMap, parameterMap);
36 37 38 39 40

  size_t numInputs = config_.inputs_size();
  inputShape_.resize(numInputs);
  filterShape_.resize(numInputs);
  outputShape_.resize(numInputs);
X
xzl 已提交
41 42 43 44 45

  string convType;
  string convGradInputType;
  string convGradFilterType;

46 47 48 49
  for (int i = 0; i < config_.inputs_size(); i++) {
    std::vector<size_t> paddings = {(size_t)paddingY_[i], (size_t)padding_[i]};
    std::vector<size_t> strides = {(size_t)strideY_[i], (size_t)stride_[i]};

X
xzl 已提交
50 51 52 53 54 55 56 57 58
    if (useGpu_ && (size_t)groups_[i] == (size_t)channels_[i] && !isDeconv_) {
      convType = "DepthwiseConv" convGradInputType =
          "DepthwiseConvGradInput" convGradFilterType =
              "DepthwiseConvGradFilter"
    } else {
      convType = "GemmConv" convGradInputType =
          "GemmConvGradInput" convGradFilterType = "GemmConvGradFilter"
    }

59 60 61 62 63 64 65 66
    if (FLAGS_use_nnpack) {
      CHECK_EQ(isDeconv_, false);
      createFunction(forward_,
                     "NNPACKConv",
                     FuncConfig()
                         .set("paddings", paddings)
                         .set("strides", strides)
                         .set("groups", (size_t)groups_[i])
H
hedaoyuan 已提交
67
                         .set("algo", std::string("auto")));
68 69
    } else {
      createFunction(forward_,
X
xzl 已提交
70
                     !isDeconv_ ? convType : convGradInputType,
71 72 73 74 75 76
                     FuncConfig()
                         .set("paddings", paddings)
                         .set("strides", strides)
                         .set("groups", (size_t)groups_[i]));

      createFunction(backward_,
X
xzl 已提交
77
                     !isDeconv_ ? convGradInputType : convType,
78 79 80 81
                     FuncConfig()
                         .set("paddings", paddings)
                         .set("strides", strides)
                         .set("groups", (size_t)groups_[i]));
82

83
      createFunction(backward_,
X
xzl 已提交
84
                     convGradFilterType,
85 86 87 88 89
                     FuncConfig()
                         .set("paddings", paddings)
                         .set("strides", strides)
                         .set("groups", (size_t)groups_[i]));
    }
90
  }
Z
zhangjinchao01 已提交
91 92 93
  return true;
}

94 95 96 97 98 99
// i is the index of input layers
#define BACKWARD_INPUT(i, inputs, outputs) \
  backward_[2 * i]->calc(inputs, outputs)
#define BACKWARD_FILTER(i, inputs, outputs) \
  backward_[2 * i + 1]->calc(inputs, outputs)

Z
zhangjinchao01 已提交
100 101 102
void ExpandConvLayer::forward(PassType passType) {
  Layer::forward(passType);

103
  size_t batchSize = inputLayers_[0]->getOutputValue()->getHeight();
104
  resetOutput(batchSize, getOutputSize());
Z
zhangjinchao01 已提交
105

106
  // Calculate the shape of the input, output, and filter.
107
  for (size_t i = 0; i < inputLayers_.size(); ++i) {
108 109 110 111 112
    inputShape_[i] = TensorShape({(size_t)batchSize,
                                  (size_t)channels_[i],
                                  (size_t)imgSizeH_[i],
                                  (size_t)imgSizeW_[i]});
    filterShape_[i] =
H
hedaoyuan 已提交
113 114 115 116 117
        TensorShape({(size_t)groups_[i],
                     !isDeconv_ ? (size_t)numFilters_ / groups_[i]
                                : (size_t)channels_[i] / groups_[i],
                     !isDeconv_ ? (size_t)channels_[i] / groups_[i]
                                : (size_t)numFilters_ / groups_[i],
118 119 120 121 122 123
                     (size_t)filterSizeY_[i],
                     (size_t)filterSize_[i]});
    outputShape_[i] = TensorShape({(size_t)batchSize,
                                   (size_t)numFilters_,
                                   (size_t)outputH_[i],
                                   (size_t)outputW_[i]});
Z
zhangjinchao01 已提交
124
  }
125 126 127 128 129 130 131

  // Calculate the output value.
  for (size_t i = 0; i < inputLayers_.size(); ++i) {
    BufferArgs inputs;
    BufferArgs outputs;
    inputs.addArg(*getInputValue(i), inputShape_[i]);
    inputs.addArg(*weights_[i]->getW(), filterShape_[i]);
H
hedaoyuan 已提交
132 133 134
    outputs.addArg(*getOutputValue(),
                   outputShape_[i],
                   !isDeconv_ && i == 0 ? ASSIGN_TO : ADD_TO);
135 136 137 138

    forward_[i]->calc(inputs, outputs);
  }

Z
zhangjinchao01 已提交
139
  /* add the bias-vector */
140
  if (biases_.get()) {
Z
zhangjinchao01 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
    if (sharedBiases_) {
      addSharedBias();
    } else {
      addUnsharedBias();
    }
  }

  /* activation */
  forwardActivation();
}

void ExpandConvLayer::backward(const UpdateCallback &callback) {
  backwardActivation();

  MatrixPtr outGrad = getOutputGrad();
  if (biases_ && biases_->getWGrad()) {
    bpropBiases(outGrad);
    /* Increasing the number of gradient */
    biases_->getParameterPtr()->incUpdate(callback);
  }

162
  // Calculate the input grad and filter grad.
163
  for (size_t i = 0; i < inputLayers_.size(); ++i) {
164 165 166 167 168 169 170
    if (getInputGrad(i)) {
      BufferArgs inputs;
      BufferArgs outputs;
      inputs.addArg(*getOutputGrad(), outputShape_[i]);
      inputs.addArg(*weights_[i]->getW(), filterShape_[i]);
      outputs.addArg(*getInputGrad(i), inputShape_[i], ADD_TO);
      BACKWARD_INPUT(i, inputs, outputs);
171
    }
172

Z
zhangjinchao01 已提交
173
    if (weights_[i]->getWGrad()) {
174 175 176 177 178 179 180 181 182 183 184 185
      BufferArgs inputs;
      BufferArgs outputs;
      if (!isDeconv_) {
        inputs.addArg(*getOutputGrad(), outputShape_[i]);
        inputs.addArg(*getInputValue(i), inputShape_[i]);
      } else {
        inputs.addArg(*getInputValue(i), inputShape_[i]);
        inputs.addArg(*getOutputGrad(), outputShape_[i]);
      }
      outputs.addArg(*weights_[i]->getWGrad(), filterShape_[i], ADD_TO);
      BACKWARD_FILTER(i, inputs, outputs);

Z
zhangjinchao01 已提交
186 187 188 189 190 191 192
      /* Increasing the number of gradient */
      weights_[i]->getParameterPtr()->incUpdate(callback);
    }
  }
}

}  // namespace paddle