MKLDNNLayer.h 15.0 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <vector>
#include "Layer.h"
19
#include "MKLDNNBase.h"
T
tensor-tang 已提交
20
#include "mkldnn.hpp"
T
tensor-tang 已提交
21
#include "paddle/math/MKLDNNMatrix.h"
22
#include "paddle/utils/Stat.h"
T
tensor-tang 已提交
23

T
tensor-tang 已提交
24 25
DECLARE_bool(use_mkldnn);

T
tensor-tang 已提交
26 27
namespace paddle {

28 29
class MKLDNNLayer;
typedef std::shared_ptr<MKLDNNLayer> MKLDNNLayerPtr;
T
tensor-tang 已提交
30 31

/**
32
 * @brief Base class of MKLDNNlayer.
T
tensor-tang 已提交
33 34
 *
 */
35
class MKLDNNLayer : public Layer {
T
tensor-tang 已提交
36 37 38
protected:
  // batch size
  int bs_;
39
  // their sizes are always from the first input layer
T
tensor-tang 已提交
40 41 42 43 44
  // input image channel, height and width
  int ic_, ih_, iw_;
  // output image channel, height and width
  int oc_, oh_, ow_;

45 46
  // the condition that forward need be reset
  size_t condition_;
T
tensor-tang 已提交
47 48 49
  // backward also need reset after reset forward handle
  bool needResetBwd_;

50 51 52
  // is output only mkldnn
  bool outputOnlyMKLDNN_;

T
tensor-tang 已提交
53 54
  // mkldnn engine, stream and primivtives
  mkldnn::engine engine_;
55
  std::shared_ptr<MKLDNNStream> stream_;
T
tensor-tang 已提交
56
  std::shared_ptr<mkldnn::primitive> fwd_;
T
tensor-tang 已提交
57 58
  std::shared_ptr<mkldnn::primitive> bwdWgt_;
  std::shared_ptr<mkldnn::primitive> bwdData_;
T
tensor-tang 已提交
59 60 61
  std::vector<mkldnn::primitive> pipelineFwd_;
  std::vector<mkldnn::primitive> pipelineBwd_;

T
tensor-tang 已提交
62 63 64 65 66 67 68 69 70
  /* Value and grad are seperated as internal and external buffers.
   * Each MKLDNNLayer must init or reset internal buffer at least,
   * and the external buffer format is always nchw of nc(when h==w==1),
   * which is the same format as paddle.
   * The output_.value and output_.grad always save the external data,
   * when mixed with cpu device.
   * When all layers are mkldnn layers, they could save internal data.
   */
  // below MKLDNNMatrix buffers are all internal buffers
71
  std::vector<MKLDNNMatrixPtr> inVals_;
72
  std::vector<MKLDNNMatrixPtr> inGrads_;
T
tensor-tang 已提交
73
  MKLDNNMatrixPtr outVal_;
T
tensor-tang 已提交
74
  MKLDNNMatrixPtr outGrad_;
75
  // below are external value and grad
76
  std::vector<MKLDNNMatrixPtr> extInVals_;
77
  std::vector<MKLDNNMatrixPtr> extInGrads_;
78 79 80
  MKLDNNMatrixPtr extOutVal_;
  MKLDNNMatrixPtr extOutGrad_;
  // convert handle between external and internal buffers
81
  std::vector<std::shared_ptr<mkldnn::reorder>> cvtInVals_;
82
  std::vector<std::shared_ptr<mkldnn::reorder>> cvtInGrads_;
83 84 85 86
  std::shared_ptr<mkldnn::reorder> cvtOutVal_;
  std::shared_ptr<mkldnn::reorder> cvtOutGrad_;

  // weight and bias are always internal buffers
T
tensor-tang 已提交
87
  MKLDNNMatrixPtr wgtVal_;
T
tensor-tang 已提交
88
  MKLDNNMatrixPtr wgtGrad_;
T
tensor-tang 已提交
89
  MKLDNNMatrixPtr biasVal_;
T
tensor-tang 已提交
90
  MKLDNNMatrixPtr biasGrad_;
T
tensor-tang 已提交
91

T
tensor-tang 已提交
92 93
  // merge grad primitive
  std::shared_ptr<mkldnn::primitive> mergeGrad_;
94
  std::vector<mkldnn::primitive> pipelineMergeGrad_;
T
tensor-tang 已提交
95 96 97
  // tmp input argument to save input grad, only used to merge grad
  Argument tmpInArg_;

T
tensor-tang 已提交
98
public:
99
  explicit MKLDNNLayer(const LayerConfig& config)
T
tensor-tang 已提交
100
      : Layer(config),
101
        condition_(0),
T
tensor-tang 已提交
102
        needResetBwd_(true),
103
        outputOnlyMKLDNN_(false),
T
tensor-tang 已提交
104
        engine_(mkldnn::engine::cpu, 0),
T
tensor-tang 已提交
105 106 107 108
        stream_(nullptr),
        fwd_(nullptr),
        bwdWgt_(nullptr),
        bwdData_(nullptr) {}
T
tensor-tang 已提交
109

110
  ~MKLDNNLayer() {}
T
tensor-tang 已提交
111

T
tensor-tang 已提交
112
  virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
T
tensor-tang 已提交
113 114
  virtual void forward(PassType passType);
  virtual void backward(const UpdateCallback& callback);
115 116

  /**
117 118
   * reshape the input and output channels and image sizes
   * and reset output buffer size
119
   */
120
  virtual void reshape(
121
      int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) = 0;
122 123

  /**
124
   * reset the mkldnn forward primitve and memories
125
   * only would be called when input size changes
126
   * weight and bias buffers should be coverd by child class itself
127
   */
128
  virtual void resetFwd(std::vector<mkldnn::primitive>& pipeline,
129
                        std::vector<MKLDNNMatrixPtr>& inputs,
130
                        MKLDNNMatrixPtr& out) = 0;
131 132

  /**
133
   * reset the mkldnn backward primitve and memories
134
   * only would be called when needed
135
   * weight and bias buffers should be coverd by child class itself
136
   */
137
  virtual void resetBwd(std::vector<mkldnn::primitive>& pipeline,
138
                        std::vector<MKLDNNMatrixPtr>& inputs,
139
                        MKLDNNMatrixPtr& out) = 0;
140 141 142 143 144 145

  /**
   * Update weights and biases if necessary.
   */
  virtual void updateWeights(const UpdateCallback& callback) {}

T
tensor-tang 已提交
146 147 148 149
  /**
   * convert weight from paddle format to mkldnn format
   * weight_ will be override
   */
150
  virtual void convertWeightsFromPaddle() {}
T
tensor-tang 已提交
151 152 153 154 155

  /**
   * convert mkldnn weight to paddle format
   * weight_ will be override
   */
156
  virtual void convertWeightsToPaddle() {}
T
tensor-tang 已提交
157

158
  /**
159
   * add this interface as public for unit test
160
   */
161 162 163
  void addOutputArgument(int deviceId) { Layer::addOutputArgument(deviceId); }

protected:
164 165 166 167 168 169 170 171 172
  /**
   * Some layers may have different condition to reset the forward.
   * The function returns the condition that do not need reset forward.
   */
  inline virtual size_t keepCondition() {
    // reset when the first input element size changed, not only the batchsize
    return inputLayers_[0]->getOutputValue()->getElementCnt();
  }

173 174 175
  /**
   * reshape the input image sizes and input batchsize
   */
176
  void reshapeInput(int& batchsize, int& height, int& width, size_t idx = 0);
177 178 179 180

  /**
   * reshape output image sizes
   */
T
tensor-tang 已提交
181
  void reshapeOutput(size_t height, size_t width);
182

T
tensor-tang 已提交
183
  /**
184 185 186 187 188
   * reset MKLDNNMatrix from Matrix and internal primitive desc.
   * reset nullptr if matrix or primitive desc is empty
   */
  void resetWithMatrix(MKLDNNMatrixPtr& dnn,
                       const MatrixPtr& mat,
T
tensor-tang 已提交
189
                       mkldnn::memory::primitive_desc pd);
190 191 192 193

  /**
   * reset input value from input MKLDNNMatrix and internal primitive desc.
   * reset both internal and external buffer and create reorder if necessary.
194
   * input channel may be different in concat.
195 196 197
   */
  void resetInValue(
      MKLDNNMatrixPtr& in,
198
      const std::shared_ptr<mkldnn::memory::primitive_desc>& intPD = nullptr,
199
      size_t idx = 0,
200
      int inputChannel = 0);
201 202 203 204 205 206

  /**
   * reset output value from internal primitive desc.
   * reset both internal and external buffer and create reorder if necessary.
   */
  void resetOutValue(MKLDNNMatrixPtr& out,
T
tensor-tang 已提交
207
                     mkldnn::memory::primitive_desc intPD);
208 209 210 211 212

  /**
   * reset input grad from internal primitive desc.
   * reset both internal and external buffer and create reorder if necessary.
   */
213 214
  void resetInGrad(MKLDNNMatrixPtr& in,
                   mkldnn::memory::primitive_desc intPD,
215
                   size_t idx = 0);
216 217 218 219 220

  /**
   * reset output grad from internal primitive desc.
   * merge grad if necessary.
   * reset both internal and external buffer and create reorder if necessary.
T
tensor-tang 已提交
221
   * note: about merge grad, when this layer has several outputs,
T
tensor-tang 已提交
222 223
   *       it could not be mixed with cpu device,
   *       since it can not get memory desc from cpu device.
T
tensor-tang 已提交
224
   */
T
tensor-tang 已提交
225
  void resetOutGrad(MKLDNNMatrixPtr& out, mkldnn::memory::primitive_desc intPD);
226 227 228

  /**
   * reset the merge grad primitive if necessary.
T
tensor-tang 已提交
229
   * note: do not support the grads mixed with cpu device,
230 231
   *       since it can not get memory desc from cpu device.
   */
T
tensor-tang 已提交
232 233 234 235 236 237 238
  void resetMergeGrad(MKLDNNMatrixPtr& out);

protected:
  /**
   * Set deviceId of this layer.
   */
  void setDevice(int id) { deviceId_ = id; }
239

T
tensor-tang 已提交
240 241 242 243 244 245 246 247 248 249
  /**
   * check the format is nchw or nc,
   * which is supported by Paddle default memory layout
   */
  bool isPaddleFormat(mkldnn::memory::format fmt) {
    if (fmt == mkldnn::memory::format::nchw ||
        fmt == mkldnn::memory::format::nc) {
      return true;
    } else {
      return false;
250
    }
T
tensor-tang 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263
  }

  /**
   * If input only has MKLDNN device.
   * Otherwise, only support the previous layer using CPU device.
   */
  bool inputIsOnlyMKLDNN(int index = 0) {
    int prevDevice = getPrev(index)->getDeviceId();
    if (prevDevice == MKLDNN_DEVICE) {
      return true;
    } else {
      CHECK_EQ(prevDevice, CPU_DEVICE) << "Only support CPU yet";
      return false;
264
    }
T
tensor-tang 已提交
265
  }
266

T
tensor-tang 已提交
267 268 269 270 271 272 273 274 275 276 277
  /**
   * If output only has MKLDNN device.
   * Otherwise, other devices should only using CPU device.
   */
  bool outputIsOnlyMKLDNN() {
    for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
      CHECK_EQ(outputOtherDevice_[i].deviceId, CPU_DEVICE)
          << "Only support other device is CPU yet";
    }
    outputOnlyMKLDNN_ = outputOtherDevice_.size() == 0;
    return outputOnlyMKLDNN_;
T
tensor-tang 已提交
278 279
  }

T
tensor-tang 已提交
280 281 282 283 284 285 286 287
  /**
   * print info about sizes
   */
  virtual void printSizeInfo() {
    VLOG(MKLDNN_SIZES) << getName() << ": bs: " << bs_ << ", ic: " << ic_
                       << ", ih: " << ih_ << ", iw: " << iw_ << ", oc: " << oc_
                       << ", oh: " << oh_ << ", ow: " << ow_;
  }
T
tensor-tang 已提交
288

289
  /**
290
   * print the mkldnn memory format of value
291
   */
292
  virtual void printValueFormat() {
293 294 295 296 297 298 299 300
    for (size_t i = 0; i < inVals_.size(); ++i) {
      if (!inVals_[i]) {
        continue;
      }
      VLOG(MKLDNN_FMTS) << "Input " << i << ", " << inputLayers_[i]->getName()
                        << ": " << (extInVals_[i] ? extInVals_[i]->getFormat()
                                                  : inVals_[i]->getFormat())
                        << " >>> " << inVals_[i]->getFormat() << " >>>";
301 302
    }
    if (outVal_) {
303 304 305
      VLOG(MKLDNN_FMTS) << outVal_->getFormat() << " >>> "
                        << (extOutVal_ ? extOutVal_->getFormat()
                                       : outVal_->getFormat());
306 307 308 309 310 311
    }
    if (wgtVal_) {
      VLOG(MKLDNN_FMTS) << "Weight value format: " << wgtVal_->getFormat();
    }
    if (biasVal_) {
      VLOG(MKLDNN_FMTS) << "Bias value format: " << biasVal_->getFormat();
312
    }
T
tensor-tang 已提交
313
  }
T
tensor-tang 已提交
314

315
  /**
316
   * print the mkldnn memory format of grad
317
   */
318 319
  virtual void printGradFormat() {
    if (outGrad_) {
320 321 322
      VLOG(MKLDNN_FMTS) << outGrad_->getFormat() << " <<< "
                        << (extOutGrad_ ? extOutGrad_->getFormat()
                                        : outGrad_->getFormat());
T
tensor-tang 已提交
323
    }
324 325 326 327 328 329 330 331
    for (size_t i = 0; i < inGrads_.size(); ++i) {
      if (!inGrads_[i]) {
        continue;
      }
      VLOG(MKLDNN_FMTS) << "Input " << i << ", " << inputLayers_[i]->getName()
                        << ": " << (extInGrads_[i] ? extInGrads_[i]->getFormat()
                                                   : inGrads_[i]->getFormat())
                        << " <<< " << inGrads_[i]->getFormat() << " <<<";
332 333 334 335 336 337
    }
    if (wgtGrad_) {
      VLOG(MKLDNN_FMTS) << "Weight grad format: " << wgtGrad_->getFormat();
    }
    if (biasGrad_) {
      VLOG(MKLDNN_FMTS) << "Bias grad format: " << biasGrad_->getFormat();
338
    }
T
tensor-tang 已提交
339 340
  }

341
private:
342 343 344 345
  /**
   * clear all grad
   */
  void clearGrads() {
T
tensor-tang 已提交
346 347 348
    if (output_.grad) {
      output_.grad->zeroMem();
    }
349
    for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
T
tensor-tang 已提交
350 351 352
      if (outputOtherDevice_[i].grad) {
        outputOtherDevice_[i].grad->zeroMem();
      }
353 354 355
    }
  }

T
tensor-tang 已提交
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
  /**
   * Set deviceId of the params used in this layer.
   */
  void setParamsDevice(int id, const ParameterMap& parameterMap) {
    for (auto& inputConfig : config_.inputs()) {
      if (inputConfig.has_input_parameter_name()) {
        ParameterPtr parameter;
        std::string name = inputConfig.input_parameter_name();
        CHECK(mapGet(name, parameterMap, &parameter))
            << "Cannot find input parameter " << name << " for layer "
            << getName();
        parameter->setDevice(id);
      }
    }
    if (config_.has_bias_parameter_name()) {
      ParameterPtr parameter;
      std::string name = config_.bias_parameter_name();
      CHECK(mapGet(name, parameterMap, &parameter))
          << "Cannot find bias parameter " << name << " for layer "
          << getName();
      parameter->setDevice(id);
    }
T
tensor-tang 已提交
378
  }
379

T
tensor-tang 已提交
380 381 382 383 384 385 386 387 388 389
  /**
   * Set output map of prev layers.
   */
  void setOutputMap() {
    outputMap_.clear();
    for (size_t i = 0; i < inputLayers_.size(); ++i) {
      inputLayers_[i]->setOutput(getName(), &tmpInArg_);
    }
  }

390 391 392 393 394 395 396 397 398 399 400 401 402
  /**
   * if have cpu device, share value and grad data with output_
   */
  void shareCPUDevice() {
    if (outputIsOnlyMKLDNN()) {
      return;
    }
    for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
      outputOtherDevice_[i].value = output_.value;
      outputOtherDevice_[i].grad = output_.grad;
    }
  }

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
  /**
   * Check the cpu device number of outputOtherDevice_.
   * should have only one at most.
   */
  void checkCPUOutputsNumber(int max = 1) {
    int cnt = 0;
    for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
      if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
        ++cnt;
      }
    }
    CHECK_LE(cnt, max) << "too much CPU devies";
  }

  /**
   * copy SeqInfo from input layer to this output and other output devices.
   * @note: do not use getInput(0) since it used this deviceId_,
   *        use "inputLayers_[0]->getOutput()" instead.
   */
  void copySeqInfoToOutputs() {
    if (inputLayers_.empty() || !needSequenceInfo_) {
      return;
    }
    const Argument& input = inputLayers_[0]->getOutput();
    output_.sequenceStartPositions = input.sequenceStartPositions;
    output_.subSequenceStartPositions = input.subSequenceStartPositions;
    output_.cpuSequenceDims = input.cpuSequenceDims;
    for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
      outputOtherDevice_[i].sequenceStartPositions =
          output_.sequenceStartPositions;
      outputOtherDevice_[i].subSequenceStartPositions =
          output_.subSequenceStartPositions;
      outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
    }
  }
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455

  void prepareValueConversions(std::vector<mkldnn::primitive>& pipeline) {
    // MKLDNNLayer output value should be MKLDNNMatrix
    // so external output value is necessary.
    // Then external input value is not necessary,
    // since input may be mkldnn internal buffer.
    CHECK(extOutVal_) << "external output value is necessary";
    output_.value = std::dynamic_pointer_cast<Matrix>(extOutVal_);
    CHECK(inVals_[0] && outVal_) << "internal memories are necessary";
    for (size_t i = 0; i < cvtInVals_.size(); ++i) {
      if (cvtInVals_[i]) {
        pipeline.insert(pipeline.begin(), *cvtInVals_[i]);
      }
    }
    if (cvtOutVal_) {
      pipeline.push_back(*cvtOutVal_);
    }
  }
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
  void prepareGradConversions(std::vector<mkldnn::primitive>& pipeline) {
    // external output grad is not necessary
    // since output may be mkldnn internal buffer or merge them directly.
    CHECK(outGrad_) << "internal output grad is necessary";
    if (extOutGrad_) {
      CHECK_EQ(extOutGrad_->getData(), output_.grad->getData())
          << "the external buffer should share the same data with output_.grad";
    }
    if (cvtOutGrad_) {
      pipeline.insert(pipeline.begin(), *cvtOutGrad_);
    }
    for (size_t i = 0; i < cvtInGrads_.size(); ++i) {
      if (cvtInGrads_[i]) {
        pipeline.push_back(*cvtInGrads_[i]);
      }
    }
  }
T
tensor-tang 已提交
473 474 475
};

}  // namespace paddle