MKLDNNMatrix.h 6.3 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

T
tensor-tang 已提交
17 18
#include <vector>
#include "Matrix.h"
T
tensor-tang 已提交
19 20 21 22 23 24 25 26
#include "mkldnn.hpp"
#include "paddle/parameter/Parameter.h"

namespace paddle {

class MKLDNNMatrix;
typedef std::shared_ptr<MKLDNNMatrix> MKLDNNMatrixPtr;

T
tensor-tang 已提交
27 28 29 30 31 32
#define CHECK_PRIMITIVE_DESC_EQ(MAT, PD, ...)                        \
  CHECK(MAT) << " can not be empty.";                                \
  CHECK(MAT->getPrimitiveDesc() == PD)                               \
      << #MAT "->getPrimitiveDesc() and " #PD " should be equal.\n " \
      << "" __VA_ARGS__;

T
tensor-tang 已提交
33 34 35 36
/**
 * @brief MKLDNN Matrix.
 *
 */
T
tensor-tang 已提交
37
class MKLDNNMatrix : public CpuMatrix, public mkldnn::memory {
T
tensor-tang 已提交
38
public:
39 40 41 42
  MKLDNNMatrix(CpuMatrixPtr m, mkldnn::memory::primitive_desc pd)
      : CpuMatrix(m->getData(), m->getHeight(), m->getWidth(), false),
        mkldnn::memory(pd, m->getData()),
        m_(m) {}
T
tensor-tang 已提交
43

T
tensor-tang 已提交
44 45
  ~MKLDNNMatrix() {}

46 47 48
  /**
   * Create MKLDNNMatrix from a MatrixPtr and memory primitive_desc
   */
49 50
  static MKLDNNMatrixPtr create(mkldnn::memory::primitive_desc pd,
                                MatrixPtr m = nullptr);
51 52 53 54

  /**
   * Create MKLDNNMatrix from a MatrixPtr and memory details info
   */
T
tensor-tang 已提交
55 56 57 58
  static MKLDNNMatrixPtr create(
      mkldnn::memory::dims dims,
      mkldnn::memory::format fmt,
      mkldnn::engine& eg,
59
      MatrixPtr m = nullptr,
T
tensor-tang 已提交
60 61
      mkldnn::memory::data_type dtype = mkldnn::memory::data_type::f32);

62 63 64 65 66 67 68 69 70 71 72 73
  /**
   * Create primitive descriptor.
   * default with f32 dtype
   */
  static mkldnn::memory::primitive_desc createPrimitiveDesc(
      const mkldnn::memory::dims dims,
      const mkldnn::memory::format& fmt,
      const mkldnn::engine& eg,
      const mkldnn::memory::data_type& dtype = mkldnn::memory::data_type::f32) {
    return mkldnn::memory::primitive_desc(memory::desc(dims, dtype, fmt), eg);
  }

74 75 76 77 78
  /**
   * Create Memory descriptor.
   * default with any format and f32 dtype
   */
  static mkldnn::memory::desc createMemoryDesc(
79
      const mkldnn::memory::dims dims,
80 81 82 83 84 85 86
      const mkldnn::memory::format& fmt = mkldnn::memory::format::any,
      const mkldnn::memory::data_type& dtype = mkldnn::memory::data_type::f32) {
    return mkldnn::memory::desc(dims, dtype, fmt);
  }

  /**
   * Create reorder primitive.
87
   * Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
88 89 90 91 92 93
   * checkData: whether to check the data handle of src and dst.
   *            if true, it will check the data and do not allow them equal;
   *            otherwise, it will not check them, then the reorder created
   *            may have inplace buffer.
   *            Do not set false, if you can not guarantee the inplace logical
   *            would work with your reorder.
94 95 96 97 98 99
   */
  static std::shared_ptr<mkldnn::reorder> createReorder(
      const MKLDNNMatrixPtr& src,
      const MKLDNNMatrixPtr& dst,
      bool checkData = true);

T
tensor-tang 已提交
100 101 102 103 104
  void copyFrom(const Matrix& src) {
    // TODO(TJ): reorder data if this format is not nchw or x
    m_->copyFrom(src);
  }

105 106 107 108 109
  void copyTo(Matrix& dst) {
    // TODO(TJ): reorder data if this format is not nchw or x
    dst.copyFrom(*m_);
  }

110
public:
T
tensor-tang 已提交
111 112
  /**
   * Reorder this MKLDNNMatrix from other format.
T
refine  
tensor-tang 已提交
113 114 115
   * Support inplace reorder.
   * @note: this function would only reorder the data layout.
   *        will NOT change this original dim or format info
T
tensor-tang 已提交
116 117 118 119 120 121 122
   */
  void reorderDataFrom(const MKLDNNMatrixPtr& m,
                       memory::format srcFmt,
                       memory::dims targetDim);

  /**
   * Reorder this MKLDNNMatrix to other format.
T
refine  
tensor-tang 已提交
123 124 125
   * Support inplace reorder.
   * @note: this function would only reorder the data layout.
   *        will NOT change the dst dim or format info
T
tensor-tang 已提交
126 127 128 129 130
   */
  void reorderDataTo(const MKLDNNMatrixPtr& m,
                     memory::format dstFmt,
                     memory::dims targetDim);

131 132 133 134 135 136 137
  /**
   * Dimensionality reduction.
   * Change format "nchw --> nc" or "oihw --> oi" if the h and w are both 1
   */
  void downSpatial();

  /**
138
   * set the memory data handle.
139 140 141
   * Caution: This will not check the buffer size of the data,
   *          it should be coverd by user.
   */
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
  void setData(real* data) {
    set_data_handle(data);
    CpuMatrix::setData(data);
    m_.reset();
  }

  /**
   * override Matrix::getData
   * check data before return
   */
  real* getData() override {
    CHECK_EQ((void*)data_, get_data_handle());
    return data_;
  }

  const real* getData() const override {
    CHECK_EQ((void*)data_, get_data_handle());
    return data_;
  }
161

T
tensor-tang 已提交
162
  /**
T
tensor-tang 已提交
163
   * Get primitive descriptor.
T
tensor-tang 已提交
164
   */
T
refine  
tensor-tang 已提交
165 166 167
  mkldnn::memory::primitive_desc getPrimitiveDesc() {
    return this->get_primitive_desc();
  }
T
tensor-tang 已提交
168

T
tensor-tang 已提交
169
  /**
T
tensor-tang 已提交
170
   * Get memory descriptor.
T
tensor-tang 已提交
171
   */
T
refine  
tensor-tang 已提交
172
  mkldnn::memory::desc getMemoryDesc() { return getPrimitiveDesc().desc(); }
T
tensor-tang 已提交
173 174

  /**
175
   * Get dimensions.
T
tensor-tang 已提交
176
   */
T
tensor-tang 已提交
177
  mkldnn::memory::dims getDims() {
T
refine  
tensor-tang 已提交
178
    mkldnn::memory::desc md = getMemoryDesc();
179 180
    const int* src = md.data.dims;
    int ndims = md.data.ndims;
T
tensor-tang 已提交
181 182 183 184 185 186 187
    mkldnn::memory::dims dst;
    dst.resize(ndims);
    for (int i = 0; i < ndims; ++i) {
      dst[i] = src[i];
    }
    return dst;
  }
T
tensor-tang 已提交
188

T
tensor-tang 已提交
189 190 191 192
  /**
   * Get format.
   */
  mkldnn::memory::format getFormat() {
T
refine  
tensor-tang 已提交
193
    return (mkldnn::memory::format)(getMemoryDesc().data.format);
T
tensor-tang 已提交
194 195 196
  }

  /**
197
   * Get memory data type.
T
tensor-tang 已提交
198
   */
199
  mkldnn::memory::data_type getDtype() {
T
refine  
tensor-tang 已提交
200
    return (mkldnn::memory::data_type)(getMemoryDesc().data.data_type);
201 202 203 204 205
  }

  /**
   * Get engine.
   */
T
refine  
tensor-tang 已提交
206
  mkldnn::engine getEngine() { return getPrimitiveDesc().get_engine(); }
T
tensor-tang 已提交
207 208 209

protected:
  /**
T
refine  
tensor-tang 已提交
210 211
   * Do reorder once.
   * Can support inplace.
T
tensor-tang 已提交
212 213 214 215 216 217
   */
  void reorderOnce(void* srcData,
                   void* dstData,
                   memory::format srcFmt,
                   memory::format dstFmt,
                   memory::dims dm);
218 219 220 221

private:
  // save the CpuMatrixPtr in case the buffer released outside
  CpuMatrixPtr m_;
T
tensor-tang 已提交
222 223 224
};

}  // namespace paddle