MKLDNNPoolLayer.h 4.5 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "MKLDNNLayer.h"
#include "mkldnn.hpp"

namespace paddle {
typedef mkldnn::pooling_forward pool_fwd;
typedef mkldnn::pooling_backward pool_bwd;

/**
 * @brief A subclass of MKLDNNLayer pool layer.
 *
 * The config file api is mkldnn_pool
 */
class MKLDNNPoolLayer : public MKLDNNLayer {
protected:
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
  // padding height and width
  int ph_, pw_;
  // stride height and width
  int sh_, sw_;
  // filter(kenerl) height and width
  int fh_, fw_;

  // pooling_avg or pooling_max
  mkldnn::algorithm poolAlgo_;

  // MKLDNNMatrixPtr which should be created from CPU Device
  MKLDNNMatrixPtr cpuOutVal_;
  MKLDNNMatrixPtr cpuOutGrad_;
  // convert handle between CPU device and MKLDNN device
  std::shared_ptr<mkldnn::reorder> cvtOutVal_;
  std::shared_ptr<mkldnn::reorder> cvtOutGrad_;

T
tensor-tang 已提交
48 49
  // save forward primitive_desc, which can be used backward
  std::shared_ptr<pool_fwd::primitive_desc> fwdPD_;
50 51 52
  // according to https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
  // test_pooling_forward.cpp, pool need workspace for backward
  std::shared_ptr<mkldnn::memory> workspace_;
T
tensor-tang 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

public:
  explicit MKLDNNPoolLayer(const LayerConfig& config) : MKLDNNLayer(config) {}

  ~MKLDNNPoolLayer() {}

  bool init(const LayerMap& layerMap,
            const ParameterMap& parameterMap) override;

  void reshape(
      int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;

  void resetFwd(std::vector<mkldnn::primitive>& pipeline,
                MKLDNNMatrixPtr& in,
                MKLDNNMatrixPtr& wgt,
                MKLDNNMatrixPtr& bias,
                MKLDNNMatrixPtr& out) override;

  void resetBwd(std::vector<mkldnn::primitive>& pipeline,
                MKLDNNMatrixPtr& in,
                MKLDNNMatrixPtr& wgt,
                MKLDNNMatrixPtr& bias,
                MKLDNNMatrixPtr& out) override;

  void updateInputData() override;

79 80 81 82 83 84 85
  void printSizeInfo() override {
    MKLDNNLayer::printSizeInfo();
    VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_
                       << ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_
                       << ", sw: " << sw_;
  }

T
tensor-tang 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
protected:
  /**
   * Forward functions: reset buffers(input, output),
   *                    reset primitive descriptor,
   *                    reset pipeline.
   */
  void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out);
  void resetInValue(MKLDNNMatrixPtr& in);
  void resetOutValue(MKLDNNMatrixPtr& out);
  void resetFwdPD(std::shared_ptr<pool_fwd::primitive_desc>& pd,
                  MKLDNNMatrixPtr in,
                  MKLDNNMatrixPtr out);
  void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
                        std::shared_ptr<pool_fwd::primitive_desc>& pd,
                        MKLDNNMatrixPtr& in,
                        MKLDNNMatrixPtr& out);

  /**
   * Backward functions: reset buffers(input, output),
   *                     reset primitive descriptor,
   *                     reset pipeline.
   */
  void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out);
  void resetOutGrad(MKLDNNMatrixPtr& out);
  void resetInGrad(MKLDNNMatrixPtr& in);
  void resetBwdPD(std::shared_ptr<pool_bwd::primitive_desc>& pd,
                  MKLDNNMatrixPtr& in,
                  MKLDNNMatrixPtr& out);
  void resetBwdPipeline(std::vector<mkldnn::primitive>& pipeline,
                        std::shared_ptr<pool_bwd::primitive_desc>& pd,
                        MKLDNNMatrixPtr& in,
                        MKLDNNMatrixPtr& out);
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135

  /**
   * get padding_r according to
   * https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
   * test_pooling_forward.cpp
   */
  mkldnn::memory::dims getPaddingR() const {
    mkldnn::memory::dims padR = {ph_, pw_};
    for (int i = 0; i < 2; ++i) {
      if ((ih_ + ph_ + padR[0] - fh_) / sh_ + 1 < oh_) {
        ++padR[0];
      }
      if ((iw_ + pw_ + padR[1] - fw_) / sw_ + 1 < ow_) {
        ++padR[1];
      }
    }
    return padR;
  }
T
tensor-tang 已提交
136 137 138
};

}  // namespace paddle