jit_kernel.h 4.4 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include <functional>
#include <memory>  // for shared_ptr
#include <string>
T
tensor-tang 已提交
19
#include <unordered_map>
T
tensor-tang 已提交
20
#include "paddle/fluid/platform/cpu_info.h"
T
tensor-tang 已提交
21 22 23 24 25 26 27 28
#include "paddle/fluid/platform/macros.h"

// Note: Only support on CPU yet.
namespace paddle {
namespace operators {
namespace math {
namespace jitkernel {

T
tensor-tang 已提交
29
// TODO(TJ): move these to some proper place
T
tensor-tang 已提交
30 31
#define SIGMOID_THRESHOLD_MIN -40.0
#define SIGMOID_THRESHOLD_MAX 13.0
T
tensor-tang 已提交
32
#define EXP_MAX_INPUT 40.0
33 34 35
#define XMM_FLOAT_BLOCK 4
#define YMM_FLOAT_BLOCK 8
#define ZMM_FLOAT_BLOCK 16
T
tensor-tang 已提交
36

T
tensor-tang 已提交
37
typedef enum { kLT8, kEQ8, kGT8LT16, kEQ16, kGT16 } jit_block;
T
tensor-tang 已提交
38

T
tensor-tang 已提交
39
class Kernel {
T
tensor-tang 已提交
40
 public:
T
tensor-tang 已提交
41
  Kernel() = default;
T
tensor-tang 已提交
42
  virtual ~Kernel() = default;
T
tensor-tang 已提交
43
  // TODO(TJ): below members should be deprecated.
T
tensor-tang 已提交
44 45 46
  int num_{0};
  int end_{0};
  int rest_{0};
T
tensor-tang 已提交
47 48 49 50 51
  DISABLE_COPY_AND_ASSIGN(Kernel);
};

class KernelPool {
 public:
T
tensor-tang 已提交
52
  static KernelPool &Instance();
T
tensor-tang 已提交
53 54

  template <typename Ker, typename... ARGS>
T
tensor-tang 已提交
55
  std::shared_ptr<const Ker> Get(ARGS... args);
T
tensor-tang 已提交
56

T
tensor-tang 已提交
57
  std::shared_ptr<const Kernel> Get(const std::string &key) const;
T
tensor-tang 已提交
58

T
tensor-tang 已提交
59 60
 private:
  KernelPool() = default;
T
tensor-tang 已提交
61
  std::unordered_map<std::string, std::shared_ptr<const Kernel>> kers_;
T
tensor-tang 已提交
62 63 64 65

  DISABLE_COPY_AND_ASSIGN(KernelPool);
};

T
tensor-tang 已提交
66 67 68
template <typename T>
class VMulKernel : public Kernel {
 public:
T
tensor-tang 已提交
69
  void (*Compute)(const T *, const T *, T *, int);
T
tensor-tang 已提交
70 71
};

T
tensor-tang 已提交
72 73 74
template <typename T>
class VAddKernel : public Kernel {
 public:
T
tensor-tang 已提交
75
  void (*Compute)(const T *, const T *, T *, int);
T
tensor-tang 已提交
76 77
};

T
tensor-tang 已提交
78
template <typename T>
T
tensor-tang 已提交
79
class VAddReluKernel : public Kernel {
T
tensor-tang 已提交
80
 public:
T
tensor-tang 已提交
81
  void (*Compute)(const T *, const T *, T *, int);
T
tensor-tang 已提交
82 83
};

T
tensor-tang 已提交
84
template <typename T>
T
tensor-tang 已提交
85
class VScalKernel : public Kernel {
T
tensor-tang 已提交
86
 public:
T
tensor-tang 已提交
87
  // y = a.*x
T
tensor-tang 已提交
88
  void (*Compute)(const T *, const T *, T *, int);
T
tensor-tang 已提交
89 90
};

T
tensor-tang 已提交
91
template <typename T>
T
tensor-tang 已提交
92
class VAddBiasKernel : public Kernel {
T
tensor-tang 已提交
93
 public:
T
tensor-tang 已提交
94 95
  // y = a.+x
  void (*Compute)(const T *, const T *, T *, int);
T
tensor-tang 已提交
96 97
};

98 99 100 101 102 103 104 105 106
#ifdef PADDLE_WITH_MKLDNN
template <typename T>
class EltwiseMulnChw16cNCKernel : public Kernel {
 public:
  // nChw16c = nChw16c .* NC
  void (*Compute)(const float *, const float *, float *, int, int);
};
#endif

T
tensor-tang 已提交
107
template <typename T>
T
tensor-tang 已提交
108
class VActKernel : public Kernel {
T
tensor-tang 已提交
109
 public:
T
tensor-tang 已提交
110
  void (*Compute)(const T *, T *, int);
T
tensor-tang 已提交
111 112 113
};

template <typename T>
T
tensor-tang 已提交
114
class VReluKernel : public VActKernel<T> {};
T
tensor-tang 已提交
115 116

template <typename T>
T
tensor-tang 已提交
117
class VIdentityKernel : public VActKernel<T> {};
T
tensor-tang 已提交
118

T
tensor-tang 已提交
119
template <typename T>
T
tensor-tang 已提交
120
class VExpKernel : public VActKernel<T> {};
T
tensor-tang 已提交
121

T
tensor-tang 已提交
122
template <typename T>
T
tensor-tang 已提交
123
class VSigmoidKernel : public VActKernel<T> {};
T
tensor-tang 已提交
124

T
tensor-tang 已提交
125
template <typename T>
T
tensor-tang 已提交
126
class VTanhKernel : public VActKernel<T> {};
T
tensor-tang 已提交
127 128 129 130

template <typename T>
class LSTMKernel : public Kernel {
 public:
T
tensor-tang 已提交
131
  virtual void ComputeCtHt(T *gates, const T *ct_1, T *ct, T *ht,
132 133
                           /* below only used in peephole*/
                           const T *wp_data = nullptr,
T
tensor-tang 已提交
134
                           T *checked = nullptr) const = 0;
135 136 137 138 139

  // compute c1 and h1 without c0 or h0
  virtual void ComputeC1H1(T *gates, T *ct, T *ht,
                           /* below only used in peephole*/
                           const T *wp_data = nullptr) const = 0;
T
tensor-tang 已提交
140 141
};

T
tensor-tang 已提交
142 143 144 145 146 147 148 149 150
template <typename T>
class GRUKernel : public Kernel {
 public:
  // compute h1 without h0
  virtual void ComputeH1(T *gates, T *ht) const = 0;
  virtual void ComputeHtPart1(T *gates, const T *ht_1, T *ht) const = 0;
  virtual void ComputeHtPart2(T *gates, const T *ht_1, T *ht) const = 0;
};

T
tensor-tang 已提交
151 152 153 154 155 156 157
template <typename T>
class CRFDecodeKernel : public Kernel {
 public:
  virtual void Compute(const int seq_len, const T *x, const T *w, T *alpha,
                       int *track) const = 0;
};

158 159 160 161 162 163 164 165
template <typename T>
class LayerNormKernel : public Kernel {
 public:
  virtual void Compute(T *x, T *out, T *mean, T *var, const T *scale,
                       const T *bias, int height,
                       const float epsilon) const = 0;
};

T
tensor-tang 已提交
166 167 168 169
}  // namespace jitkernel
}  // namespace math
}  // namespace operators
}  // namespace paddle