mkl.cc 6.7 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License. */

T
tensor-tang 已提交
15
#include "paddle/fluid/operators/jit/more/mkl/mkl.h"
16
#include "paddle/fluid/operators/jit/refer/refer.h"
T
tensor-tang 已提交
17
#include "paddle/fluid/operators/jit/registry.h"
18
#include "paddle/fluid/platform/cpu_info.h"
T
tensor-tang 已提交
19 20 21 22
#include "paddle/fluid/platform/dynload/mklml.h"

namespace paddle {
namespace operators {
T
tensor-tang 已提交
23
namespace jit {
T
tensor-tang 已提交
24 25 26
namespace more {
namespace mkl {

27
template <>
28 29 30 31 32
void MatMul<float>(const float* a, const float* b, float* c,
                   const matmul_attr_t* attr) {
  platform::dynload::cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
                                 attr->m, attr->n, attr->k, 1.f, a, attr->k, b,
                                 attr->n, 0.f, c, attr->n);
33 34 35
}

template <>
36 37 38 39 40
void MatMul<double>(const double* a, const double* b, double* c,
                    const matmul_attr_t* attr) {
  platform::dynload::cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
                                 attr->m, attr->n, attr->k, 1.0, a, attr->k, b,
                                 attr->n, 0.0, c, attr->n);
41 42
}

T
tensor-tang 已提交
43 44 45 46 47 48 49 50 51 52
template <>
void VMul<float>(const float* x, const float* y, float* z, int n) {
  platform::dynload::vsMul(n, x, y, z);
}

template <>
void VMul<double>(const double* x, const double* y, double* z, int n) {
  platform::dynload::vdMul(n, x, y, z);
}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
template <>
void VAdd<float>(const float* x, const float* y, float* z, int n) {
  platform::dynload::vsAdd(n, x, y, z);
}

template <>
void VAdd<double>(const double* x, const double* y, double* z, int n) {
  platform::dynload::vdAdd(n, x, y, z);
}

template <>
void VScal<float>(const float* a, const float* x, float* y, int n) {
  if (x == y) {
    platform::dynload::cblas_sscal(n, *a, y, 1);
  } else {
    refer::VScal<float>(a, x, y, n);
  }
}

template <>
void VScal<double>(const double* a, const double* x, double* y, int n) {
  if (x == y) {
    platform::dynload::cblas_dscal(n, *a, y, 1);
  } else {
    refer::VScal<double>(a, x, y, n);
  }
}

81 82 83 84 85 86 87 88 89 90
template <>
void VExp<float>(const float* x, float* y, int n) {
  platform::dynload::vsExp(n, x, y);
}

template <>
void VExp<double>(const double* x, double* y, int n) {
  platform::dynload::vdExp(n, x, y);
}

T
tensor-tang 已提交
91 92 93 94 95 96 97 98 99 100
template <>
void VSquare<float>(const float* x, float* y, int n) {
  platform::dynload::vsSqr(n, x, y);
}

template <>
void VSquare<double>(const double* x, double* y, int n) {
  platform::dynload::vdSqr(n, x, y);
}

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
template <>
void VCopy<float>(const float* x, float* y, int n) {
  platform::dynload::cblas_scopy(n, x, 1, y, 1);
}

template <>
void VCopy<double>(const double* x, double* y, int n) {
  platform::dynload::cblas_dcopy(n, x, 1, y, 1);
}

template <>
void VAXPY<float>(float a, const float* x, float* y, int n) {
  platform::dynload::cblas_saxpy(n, a, x, 1, y, 1);
}

template <>
void VAXPY<double>(double a, const double* x, double* y, int n) {
  platform::dynload::cblas_daxpy(n, a, x, 1, y, 1);
}

T
tensor-tang 已提交
121 122 123 124 125 126 127 128 129 130
template <>
void ASum<float>(const float* x, float* res, int n) {
  res[0] = platform::dynload::cblas_sasum(n, x, 1);
}

template <>
void ASum<double>(const double* x, double* res, int n) {
  res[0] = platform::dynload::cblas_dasum(n, x, 1);
}

131 132
// TODO(TJ): tuning me carefully on AVX, AVX2 and AVX512
template <>
T
tensor-tang 已提交
133
bool VMulKernel<float>::UseMe(const int& d) const {
134 135 136 137
  return platform::MayIUse(platform::avx512f) && d > 512;
}

template <>
T
tensor-tang 已提交
138
bool VAddKernel<float>::UseMe(const int& d) const {
T
tensor-tang 已提交
139
  return platform::MayIUse(platform::avx) && d > 512;
140 141 142
}

template <>
T
tensor-tang 已提交
143
bool VScalKernel<float>::UseMe(const int& d) const {
144 145 146
  return platform::MayIUse(platform::avx512f) && d > 512;
}

147
template <>
T
tensor-tang 已提交
148
bool VExpKernel<float>::UseMe(const int& d) const {
149 150 151
  return d > 7;
}

T
tensor-tang 已提交
152 153 154 155 156
template <>
bool VSquareKernel<float>::UseMe(const int& d) const {
  return d > 7;
}

157
template <>
T
tensor-tang 已提交
158
bool VSigmoidKernel<float>::UseMe(const int& d) const {
159 160 161 162
  return d > 7;
}

template <>
T
tensor-tang 已提交
163
bool VTanhKernel<float>::UseMe(const int& d) const {
164 165 166
  return d > 7;
}

167 168 169 170 171 172 173 174 175 176
template <>
bool SeqPoolKernel<float>::UseMe(const seq_pool_attr_t& attr) const {
  return true;
}

template <>
bool SeqPoolKernel<double>::UseMe(const seq_pool_attr_t& attr) const {
  return true;
}

177 178 179 180 181 182 183 184 185 186
template <>
bool EmbSeqPoolKernel<float>::UseMe(const emb_seq_pool_attr_t& attr) const {
  return true;
}

template <>
bool EmbSeqPoolKernel<double>::UseMe(const emb_seq_pool_attr_t& attr) const {
  return true;
}

187 188 189 190 191 192 193 194 195 196
template <>
bool SgdKernel<float>::UseMe(const sgd_attr_t& attr) const {
  return true;
}

template <>
bool SgdKernel<double>::UseMe(const sgd_attr_t& attr) const {
  return true;
}

197 198 199 200 201 202 203 204 205 206
template <>
bool MatMulKernel<float>::UseMe(const matmul_attr_t& attr) const {
  return platform::MayIUse(platform::avx);
}

template <>
bool MatMulKernel<double>::UseMe(const matmul_attr_t& attr) const {
  return true;
}

T
tensor-tang 已提交
207 208
template <>
bool SoftmaxKernel<float>::UseMe(const int& d) const {
T
tensor-tang 已提交
209 210
  // tuned on avx2
  return platform::MayIUse(platform::avx) && d < 60;
T
tensor-tang 已提交
211 212
}

T
tensor-tang 已提交
213 214 215 216
#define AWALYS_USE_ME_WITH_DOUBLE(func)                  \
  template <>                                            \
  bool func##Kernel<double>::UseMe(const int& d) const { \
    return true;                                         \
217 218 219 220 221
  }

AWALYS_USE_ME_WITH_DOUBLE(VMul);
AWALYS_USE_ME_WITH_DOUBLE(VAdd);
AWALYS_USE_ME_WITH_DOUBLE(VScal);
222 223 224
AWALYS_USE_ME_WITH_DOUBLE(VExp);
AWALYS_USE_ME_WITH_DOUBLE(VSigmoid);
AWALYS_USE_ME_WITH_DOUBLE(VTanh);
T
tensor-tang 已提交
225
AWALYS_USE_ME_WITH_DOUBLE(VSquare);
T
tensor-tang 已提交
226
AWALYS_USE_ME_WITH_DOUBLE(Softmax);
227 228

#undef AWALYS_USE_ME_WITH_DOUBLE
T
tensor-tang 已提交
229 230
}  // namespace mkl
}  // namespace more
T
tensor-tang 已提交
231
}  // namespace jit
T
tensor-tang 已提交
232 233 234
}  // namespace operators
}  // namespace paddle

T
tensor-tang 已提交
235
namespace mkl = paddle::operators::jit::more::mkl;
T
tensor-tang 已提交
236

237 238 239 240
#define REGISTER_MKL_KERNEL(key, func)                        \
  REGISTER_JITKERNEL_MORE(key, mkl, mkl::func##Kernel<float>, \
                          mkl::func##Kernel<double>)

241
REGISTER_MKL_KERNEL(kMatMul, MatMul);
T
tensor-tang 已提交
242 243 244 245
REGISTER_MKL_KERNEL(kVMul, VMul);
REGISTER_MKL_KERNEL(kVAdd, VAdd);
REGISTER_MKL_KERNEL(kVScal, VScal);
REGISTER_MKL_KERNEL(kVExp, VExp);
T
tensor-tang 已提交
246
REGISTER_MKL_KERNEL(kVSquare, VSquare);
T
tensor-tang 已提交
247 248
REGISTER_MKL_KERNEL(kVSigmoid, VSigmoid);
REGISTER_MKL_KERNEL(kVTanh, VTanh);
249
REGISTER_MKL_KERNEL(kSeqPool, SeqPool);
250
REGISTER_MKL_KERNEL(kEmbSeqPool, EmbSeqPool);
T
tensor-tang 已提交
251
REGISTER_MKL_KERNEL(kSoftmax, Softmax);
252
REGISTER_MKL_KERNEL(kSgd, Sgd);
253 254

#undef REGISTER_MKL_KERNEL