MKLDNNConvLayer.cpp 13.7 KB
Newer Older
1
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
T
tensor-tang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "MKLDNNConvLayer.h"
X
Xin Pan 已提交
16
#include "paddle/legacy/math/MathUtils.h"
X
Xin Pan 已提交
17
#include "paddle/legacy/utils/Logging.h"
T
tensor-tang 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30

using namespace mkldnn;  // NOLINT
typedef memory::format format;

namespace paddle {

REGISTER_LAYER(mkldnn_conv, MKLDNNConvLayer);

bool MKLDNNConvLayer::init(const LayerMap& layerMap,
                           const ParameterMap& parameterMap) {
  if (!MKLDNNLayer::init(layerMap, parameterMap)) {
    return false;
  }
31
  CHECK_EQ(inputLayers_.size(), 1UL) << "Only support one input layer yet";
T
tensor-tang 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
  CHECK_EQ(inputLayers_.size(), parameters_.size());
  CHECK(config_.shared_biases()) << "Only support shared biases yet";

  oc_ = config_.num_filters();
  const ConvConfig& conf = config_.inputs(0).conv_conf();
  ic_ = conf.channels();
  fw_ = conf.filter_size();
  fh_ = conf.filter_size_y();
  pw_ = conf.padding();
  ph_ = conf.padding_y();
  dw_ = conf.dilation();
  dh_ = conf.dilation_y();
  sw_ = conf.stride();
  sh_ = conf.stride_y();
  gp_ = conf.groups();
47
  oh_ = conf.output_y();
T
tensor-tang 已提交
48
  ow_ = conf.output_x();
49
  ih_ = conf.img_size_y();
T
tensor-tang 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
  iw_ = conf.img_size();
  caffeMode_ = conf.caffe_mode();
  CHECK(caffeMode_) << "Only support caffe mode yet";
  CHECK(dh_ == 1 && dw_ == 1) << "Only support dilation 1 yet";
  // check group setting
  CHECK_EQ((oc_ / gp_) * gp_, oc_) << "group is indivisible for oc";
  CHECK_EQ((ic_ / gp_) * gp_, ic_) << "group is indivisible for ic";

  // create weight
  size_t height = oc_ / gp_;
  size_t width = ic_ * fh_ * fw_;
  CHECK_EQ(parameters_[0]->getSize(), height * width);
  weight_ =
      std::unique_ptr<Weight>(new Weight(height, width, parameters_[0], 0));

  // create biases
  if (biasParameter_.get() != NULL) {
T
tensor-tang 已提交
67
    biases_ = std::unique_ptr<Weight>(new Weight(1, oc_, biasParameter_, 0));
T
tensor-tang 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
  }
  return true;
}

void MKLDNNConvLayer::convertWeightsFromPaddle() {
  if (hasInitedWgt_) {
    return;
  }

  CHECK(wgtVal_) << "should have been initialized";
  // the paddle weight format is oihw or goihw
  auto targetDim = wgtVal_->getDims();
  auto srcFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw;
  wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim);
  hasInitedWgt_ = true;
}

void MKLDNNConvLayer::convertWeightsToPaddle() {
  CHECK(wgtVal_) << "should have been initialized";
  auto targetDim = wgtVal_->getDims();
  auto dstFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw;
  wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
}

void MKLDNNConvLayer::reshape(
93
    int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) {
T
tensor-tang 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107
  reshapeInput(bs, ih, iw);

  // cal output sizes
  // oc can not be changed
  int fh = (fh_ - 1) * dh_ + 1;
  int fw = (fw_ - 1) * dw_ + 1;
  oh = outputSize(ih, fh, ph_, sh_, caffeMode_);
  ow = outputSize(iw, fw, pw_, sw_, caffeMode_);

  reshapeOutput(oh, ow);
  resizeOutput(bs, oc * oh * ow);
}

void MKLDNNConvLayer::resetFwd(std::vector<primitive>& pipeline,
108
                               std::vector<MKLDNNMatrixPtr>& inputs,
T
tensor-tang 已提交
109
                               MKLDNNMatrixPtr& out) {
110 111
  resetFwdPD(fwdPD_);

112
  resetFwdBuffers(fwdPD_, inputs[0], wgtVal_, biasVal_, out);
113

114
  resetFwdPipeline(pipeline, fwdPD_, inputs[0], wgtVal_, biasVal_, out);
115 116 117
}

void MKLDNNConvLayer::resetBwd(std::vector<primitive>& pipeline,
118
                               std::vector<MKLDNNMatrixPtr>& inputs,
119 120 121 122 123 124 125 126
                               MKLDNNMatrixPtr& out) {
  std::shared_ptr<conv_bwdWgt::primitive_desc> bwdWgtPD;
  std::shared_ptr<conv_bwdData::primitive_desc> bwdDataPD;

  resetBwdWgtPD(bwdWgtPD);

  resetBwdDataPD(bwdDataPD);

127
  resetBwdBuffers(bwdWgtPD, bwdDataPD, inputs[0], wgtGrad_, biasGrad_, out);
T
tensor-tang 已提交
128

129 130
  resetBwdPipeline(
      pipeline, bwdWgtPD, bwdDataPD, inputs[0], wgtGrad_, biasGrad_, out);
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
}

void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) {
  weight_->getParameterPtr()->incUpdate(callback);
  if (biases_ && biases_->getWGrad()) {
    biases_->getParameterPtr()->incUpdate(callback);
  }
}

void MKLDNNConvLayer::loadConvSettings(memory::dims& wgt,
                                       memory::dims& bias,
                                       memory::dims& stride,
                                       memory::dims& dilation,
                                       memory::dims& padL,
                                       memory::dims& padR) {
  wgt = (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_}
                   : memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_};
  bias = memory::dims{oc_};
  stride = memory::dims{sh_, sw_};
  padL = memory::dims{ph_, pw_};
  padR = getPaddingR();
  // note: mkldnn dilation start from 0
  dilation = memory::dims{dh_ - 1, dw_ - 1};
}

void MKLDNNConvLayer::resetFwdPD(
    std::shared_ptr<conv_fwd::primitive_desc>& pd) {
T
tensor-tang 已提交
158 159 160
  // dims for conv
  memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
  memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
161 162
  memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
  loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
T
tensor-tang 已提交
163

164 165
  prop_kind pk = passType_ == PASS_TEST ? prop_kind::forward_scoring
                                        : prop_kind::forward_training;
T
tensor-tang 已提交
166 167 168
  algorithm algo = algorithm::convolution_direct;
  padding_kind padKind = padding_kind::zero;
  conv_fwd::desc fwdDesc =
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
      biases_ && biases_->getW()
          ? conv_fwd::desc(pk,
                           algo,
                           MKLDNNMatrix::createMemoryDesc(inDims),
                           MKLDNNMatrix::createMemoryDesc(wgtDims),
                           MKLDNNMatrix::createMemoryDesc(biasDims),
                           MKLDNNMatrix::createMemoryDesc(outDims),
                           strides,
                           dilations,
                           padL,
                           padR,
                           padKind)
          : conv_fwd::desc(pk,
                           algo,
                           MKLDNNMatrix::createMemoryDesc(inDims),
                           MKLDNNMatrix::createMemoryDesc(wgtDims),
                           MKLDNNMatrix::createMemoryDesc(outDims),
                           strides,
                           dilations,
                           padL,
                           padR,
                           padKind);
  pd.reset(new conv_fwd::primitive_desc(fwdDesc, engine_));
}

void MKLDNNConvLayer::resetFwdBuffers(
    std::shared_ptr<conv_fwd::primitive_desc>& pd,
    MKLDNNMatrixPtr& in,
    MKLDNNMatrixPtr& wgt,
    MKLDNNMatrixPtr& bias,
    MKLDNNMatrixPtr& out) {
  CHECK(pd);
201 202 203 204
  resetInValue(
      in, std::make_shared<memory::primitive_desc>(pd->src_primitive_desc()));

  resetOutValue(out, pd->dst_primitive_desc());
205

206
  resetWithMatrix(wgt, weight_->getW(), pd->weights_primitive_desc());
207

T
tensor-tang 已提交
208 209 210 211
  if (biases_ && biases_->getW()) {
    resetWithMatrix(bias, biases_->getW(), pd->bias_primitive_desc());
  } else {
    bias = nullptr;
212
  }
213 214 215 216 217 218 219 220 221 222 223 224 225
}

void MKLDNNConvLayer::resetFwdPipeline(
    std::vector<primitive>& pipeline,
    std::shared_ptr<conv_fwd::primitive_desc>& pd,
    MKLDNNMatrixPtr& in,
    MKLDNNMatrixPtr& wgt,
    MKLDNNMatrixPtr& bias,
    MKLDNNMatrixPtr& out) {
  if (bias) {
    fwd_.reset(new conv_fwd(*pd, *in, *wgt, *bias, *out));
  } else {
    fwd_.reset(new conv_fwd(*pd, *in, *wgt, *out));
T
tensor-tang 已提交
226
  }
227
  pipeline.push_back(*fwd_);
T
tensor-tang 已提交
228 229
}

230 231 232 233
void MKLDNNConvLayer::resetBwdWgtPD(
    std::shared_ptr<conv_bwdWgt::primitive_desc>& pd) {
  memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
  loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
T
tensor-tang 已提交
234

235
  // create backward weight using input, output and weight value memory desc
236
  CHECK(inVals_[0]) << "Should have internal input value";
237
  CHECK(outVal_) << "Should have internal output value";
T
tensor-tang 已提交
238 239 240
  CHECK(wgtVal_) << "Should have weight value";
  algorithm algo = algorithm::convolution_direct;
  padding_kind padKind = padding_kind::zero;
241 242
  auto bwdWgtDesc = biasVal_ != nullptr
                        ? conv_bwdWgt::desc(algo,
243
                                            inVals_[0]->getMemoryDesc(),
244 245 246 247 248 249 250 251
                                            wgtVal_->getMemoryDesc(),
                                            biasVal_->getMemoryDesc(),
                                            outVal_->getMemoryDesc(),
                                            strides,
                                            padL,
                                            padR,
                                            padKind)
                        : conv_bwdWgt::desc(algo,
252
                                            inVals_[0]->getMemoryDesc(),
253 254 255 256 257 258 259
                                            wgtVal_->getMemoryDesc(),
                                            outVal_->getMemoryDesc(),
                                            strides,
                                            padL,
                                            padR,
                                            padKind);
  pd.reset(new conv_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
260
  CHECK_PRIMITIVE_DESC_EQ(inVals_[0], pd->src_primitive_desc());
T
tensor-tang 已提交
261 262 263 264 265 266 267 268
  CHECK_PRIMITIVE_DESC_EQ(
      outVal_,
      pd->diff_dst_primitive_desc(),
      "primitive desc of out value and grad should be equal");
  CHECK_PRIMITIVE_DESC_EQ(
      wgtVal_,
      pd->diff_weights_primitive_desc(),
      "primitive desc of weight value and grad should be equal");
269
}
T
tensor-tang 已提交
270

271 272
void MKLDNNConvLayer::resetBwdDataPD(
    std::shared_ptr<conv_bwdData::primitive_desc>& pd) {
273
  pd = nullptr;
274 275
  if (inputLayers_[0]->getOutput().grad == nullptr) {
    return;
T
tensor-tang 已提交
276 277
  }

278 279
  memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
  loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
280
  CHECK(inVals_[0]) << "Should have internal input value";
281
  CHECK(outVal_) << "Should have internal output value";
282 283 284
  // create backward data using input and output value memory desc
  // but using weight memory desc with any format
  auto bwdDataDesc = conv_bwdData::desc(algorithm::convolution_direct,
285
                                        inVals_[0]->getMemoryDesc(),
286 287 288 289 290 291 292
                                        MKLDNNMatrix::createMemoryDesc(wgtDims),
                                        outVal_->getMemoryDesc(),
                                        strides,
                                        padL,
                                        padR,
                                        padding_kind::zero);
  pd.reset(new conv_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_));
T
tensor-tang 已提交
293
  CHECK_PRIMITIVE_DESC_EQ(
294
      inVals_[0],
T
tensor-tang 已提交
295 296 297 298 299 300
      pd->diff_src_primitive_desc(),
      "primitive desc of in value and grad should be equal");
  CHECK_PRIMITIVE_DESC_EQ(
      outVal_,
      pd->diff_dst_primitive_desc(),
      "primitive desc of out value and grad should be equal");
301 302 303 304 305 306 307 308 309 310
}

void MKLDNNConvLayer::resetBwdBuffers(
    std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
    std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
    MKLDNNMatrixPtr& in,
    MKLDNNMatrixPtr& wgt,
    MKLDNNMatrixPtr& bias,
    MKLDNNMatrixPtr& out) {
  CHECK(wgtPD);
311
  resetOutGrad(out, wgtPD->diff_dst_primitive_desc());
312

313 314
  resetWithMatrix(
      wgt, weight_->getWGrad(), wgtPD->diff_weights_primitive_desc());
T
tensor-tang 已提交
315 316 317 318
  CHECK_PRIMITIVE_DESC_EQ(
      wgtVal_,
      wgt->getPrimitiveDesc(),
      "primitive desc of weight grad and value should be equal");
319

320 321 322 323
  bias = nullptr;
  if (biases_ && biases_->getWGrad()) {
    resetWithMatrix(
        bias, biases_->getWGrad(), wgtPD->diff_bias_primitive_desc());
T
tensor-tang 已提交
324 325 326 327 328
    CHECK(bias);
    CHECK_PRIMITIVE_DESC_EQ(
        biasVal_,
        bias->getPrimitiveDesc(),
        "primitive desc of bias grad and value should be equal");
329
  }
330

331 332 333 334
  if (dataPD == nullptr) {
    return;
  }
  resetInGrad(in, dataPD->diff_src_primitive_desc());
335 336 337 338 339 340 341 342 343 344 345
  resetWgtValBwdData(dataPD, wgtValBwdData_);
}

void MKLDNNConvLayer::resetBwdPipeline(
    std::vector<primitive>& pipeline,
    std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
    std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
    MKLDNNMatrixPtr& in,
    MKLDNNMatrixPtr& wgt,
    MKLDNNMatrixPtr& bias,
    MKLDNNMatrixPtr& out) {
346
  CHECK(inVals_[0]);
347 348
  // add bwdWgt handle
  if (bias) {
349
    bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVals_[0], *out, *wgt, *bias));
350
  } else {
351
    bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVals_[0], *out, *wgt));
352 353 354 355 356 357 358 359 360 361 362 363 364 365
  }
  pipeline.push_back(*bwdWgt_);

  if (dataPD == nullptr) {
    return;
  }
  if (cvtWgtVal_) {
    pipeline.push_back(*cvtWgtVal_);
  }
  // add bwdData handle
  CHECK(wgtValBwdData_) << "Should have weight memory";
  bwdData_.reset(new conv_bwdData(*dataPD, *out, *wgtValBwdData_, *in));
  pipeline.push_back(*bwdData_);
}
T
tensor-tang 已提交
366

367 368 369 370 371 372 373 374
void MKLDNNConvLayer::resetWgtValBwdData(
    std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
    MKLDNNMatrixPtr& wgt) {
  if (dataPD == nullptr) {
    return;
  }

  // create new weight value for backward data, and create reorder if necessary
T
tensor-tang 已提交
375
  // since the primitive_desc would be different with wgtVal_
376 377
  CHECK(wgtVal_) << "should have weight value";
  if (dataPD->weights_primitive_desc() != wgtVal_->getPrimitiveDesc()) {
378
    wgtValBwdData_ = MKLDNNMatrix::create(dataPD->weights_primitive_desc());
T
tensor-tang 已提交
379 380 381 382 383
    cvtWgtVal_ = MKLDNNMatrix::createReorder(wgtVal_, wgtValBwdData_);
    CHECK(cvtWgtVal_);
  } else {
    wgtValBwdData_ = wgtVal_;
  }
T
tensor-tang 已提交
384
  VLOG(MKLDNN_FMTS) << "weight value format for backward data: "
T
tensor-tang 已提交
385 386 387 388
                    << wgtValBwdData_->getFormat();
}

}  // namespace paddle