jit_kernel_test.cc 26.3 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/math/jit_kernel.h"
T
tensor-tang 已提交
16
#include <sys/time.h>
T
tensor-tang 已提交
17
#include <cmath>    // for exp
T
tensor-tang 已提交
18
#include <cstring>  // for memcpy
T
tensor-tang 已提交
19
#include <random>
T
tensor-tang 已提交
20 21 22 23 24 25
#include <string>
#include <vector>
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "gtest/gtest.h"

T
tensor-tang 已提交
26 27 28 29 30 31 32 33
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#endif

#ifdef __AVX__
#include <immintrin.h>
#endif

T
tensor-tang 已提交
34 35
constexpr int repeat = 20000;

T
tensor-tang 已提交
36 37 38 39 40 41 42
inline double GetCurrentUS() {
  struct timeval time;
  gettimeofday(&time, NULL);
  return 1e+6 * time.tv_sec + time.tv_usec;
}

template <typename T>
T
tensor-tang 已提交
43 44
void RandomVec(const int n, T* a, const T lower = static_cast<T>(-20.f),
               const T upper = static_cast<T>(20.f)) {
T
tensor-tang 已提交
45 46 47 48 49 50 51 52
  static unsigned int seed = 100;
  std::mt19937 rng(seed++);
  std::uniform_real_distribution<double> uniform_dist(0, 1);
  for (int i = 0; i < n; ++i) {
    a[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower);
  }
}

T
tensor-tang 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
void vrelu_ref(const int n, const float* x, float* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = x[i] > 0.f ? x[i] : 0.f;
  }
}

#if defined __AVX__ || defined __AVX2__
void vrelu_intri8(const int n, const float* x, float* y) {
  __m256 tmp = _mm256_loadu_ps(x);
  tmp = _mm256_max_ps(tmp, _mm256_setzero_ps());
  _mm256_storeu_ps(y, tmp);
}
#endif

TEST(JitKernel, vrelu) {
  namespace jit = paddle::operators::math::jitkernel;
  for (int d : {7, 8, 15, 16, 30, 256, 512}) {
    std::vector<float> x(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data(), -10.f, 1.f);
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VReluKernel<float>>(d);
    const float* x_data = x.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vrelu_ref(d, x_data, zref_data);
    }
    auto trefe = GetCurrentUS();
#if defined __AVX__ || defined __AVX2__
    if (d == 8) {
      auto si0 = GetCurrentUS();
      for (int i = 0; i < repeat; ++i) {
        vrelu_intri8(d, x_data, zref_data);
      }
      auto si1 = GetCurrentUS();
      VLOG(3) << "Vec size 8 intr takes: " << (si1 - si0) / repeat;
    }
#endif
    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      ker->Compute(x_data, ztgt_data);
    }
    auto ttgte = GetCurrentUS();
    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
            << " us, tgt takes: " << (ttgte - ttgts) / repeat;
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
    }
  }
}

T
tensor-tang 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
void vaddbias_ref(const int n, const float a, const float* x, float* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = x[i] + a;
  }
}

TEST(JitKernel, vaddbias) {
  namespace jit = paddle::operators::math::jitkernel;
  for (int d : {7, 8, 15, 16, 30, 64, 100, 128, 256}) {
    std::vector<float> x(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data(), -2.f, 2.f);
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VAddBiasKernel<float>>(d);
    const float a = 2.f;
    const float* x_data = x.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vaddbias_ref(d, a, x_data, zref_data);
    }
    auto trefe = GetCurrentUS();
    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
131
      ker->Compute(a, x_data, ztgt_data);
T
tensor-tang 已提交
132 133 134 135 136 137 138 139 140 141 142
    }
    auto ttgte = GetCurrentUS();

    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
            << " us, tgt takes: " << (ttgte - ttgts) / repeat;
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
    }
  }
}

T
tensor-tang 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156
void vexp_ref(const int n, const float* x, float* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = std::exp(x[i]);
  }
}

#ifdef PADDLE_WITH_MKLML
void vexp_mkl(const int n, const float* x, float* y) {
  paddle::platform::dynload::vsExp(n, x, y);
}
#endif

TEST(JitKernel, vexp) {
  namespace jit = paddle::operators::math::jitkernel;
T
tensor-tang 已提交
157
  for (int d : {7, 8, 15, 16, 30, 128, 256}) {
T
tensor-tang 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    std::vector<float> x(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data(), -2.f, 2.f);
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VExpKernel<float>>(d);
    const float* x_data = x.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vexp_ref(d, x_data, zref_data);
    }
    auto trefe = GetCurrentUS();

#ifdef PADDLE_WITH_MKLML
    auto tmkls = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vexp_mkl(d, x_data, zref_data);
    }
    auto tmkle = GetCurrentUS();
#endif

    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
182
      ker->Compute(x_data, ztgt_data);
T
tensor-tang 已提交
183 184 185 186 187 188 189 190 191 192 193 194
    }
    auto ttgte = GetCurrentUS();

    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
#ifdef PADDLE_WITH_MKLML
            << " us, mkl takes: " << (tmkle - tmkls) / repeat << " us, "
#else
            << " us, "
#endif
            << "tgt takes: " << (ttgte - ttgts) / repeat;
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
    }
  }
}

inline float _sigmoid(float x) {
  const float min = SIGMOID_THRESHOLD_MIN;
  const float max = SIGMOID_THRESHOLD_MAX;
  float tmp = (x < min) ? min : ((x > max) ? max : x);
  return 1.f / (1.f + std::exp(-tmp));
}

void vsigmoid_ref(const int n, const float* x, float* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = _sigmoid(x[i]);
  }
}

void vsigmoid_better(
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VExpKernel<float>>& vexp,
    const int n, const float* x, float* y) {
  const float min = SIGMOID_THRESHOLD_MIN;
  const float max = SIGMOID_THRESHOLD_MAX;
  for (int i = 0; i < n; ++i) {
    y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]);
    y[i] = 0.f - y[i];
  }
T
tensor-tang 已提交
222
  vexp->Compute(y, y);
223 224 225 226 227 228 229
  for (int i = 0; i < n; ++i) {
    y[i] = 1.f / (1.f + y[i]);
  }
}

TEST(JitKernel, vsigmoid) {
  namespace jit = paddle::operators::math::jitkernel;
T
tensor-tang 已提交
230
  for (int d : {7, 8, 15, 16, 30, 32, 64, 100, 128, 256}) {
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
    std::vector<float> x(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data(), -2.f, 2.f);
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VSigmoidKernel<float>>(d);
    const auto& vexp =
        jit::KernelPool::Instance().template Get<jit::VExpKernel<float>>(d);
    const float* x_data = x.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
    auto tmkls = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vsigmoid_better(vexp, d, x_data, zref_data);
    }
    auto tmkle = GetCurrentUS();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vsigmoid_ref(d, x_data, zref_data);
    }
    auto trefe = GetCurrentUS();
    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
253
      ker->Compute(x_data, ztgt_data);
254 255 256
    }
    auto ttgte = GetCurrentUS();

T
tensor-tang 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
            << " us, better(jit exp) takes: " << (tmkle - tmkls) / repeat
            << " us, tgt takes: " << (ttgte - ttgts) / repeat;
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
    }
  }
}

inline float _tanh(float x) { return 2.f * _sigmoid(2.f * x) - 1.f; }

void vtanh_ref(const int n, const float* x, float* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = _tanh(x[i]);
  }
}

void vtanh_better(
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VScalKernel<float>>& vscal,
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VSigmoidKernel<float>>&
        vsigmoid,
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VAddBiasKernel<float>>&
        vaddbias,
    const int n, const float* x, float* y) {
T
tensor-tang 已提交
284 285
  const float tmp1 = 2.f;
  vscal->Compute(&tmp1, x, y, n);
T
tensor-tang 已提交
286
  vsigmoid->Compute(y, y);
T
tensor-tang 已提交
287
  vscal->Compute(&tmp1, y, y, n);
T
tensor-tang 已提交
288
  vaddbias->Compute(-1.f, y, y);
T
tensor-tang 已提交
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
}

TEST(JitKernel, vtanh) {
  namespace jit = paddle::operators::math::jitkernel;
  for (int d : {7, 8, 15, 16, 30, 32, 64, 100, 128, 256}) {
    std::vector<float> x(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data(), -2.f, 2.f);
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VTanhKernel<float>>(d);
    const auto& vscal =
        jit::KernelPool::Instance().template Get<jit::VScalKernel<float>>(d);
    const auto& vsigmoid =
        jit::KernelPool::Instance().template Get<jit::VSigmoidKernel<float>>(d);
    const auto& vaddbias =
        jit::KernelPool::Instance().template Get<jit::VAddBiasKernel<float>>(d);
    const float* x_data = x.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
    auto tmkls = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vtanh_better(vscal, vsigmoid, vaddbias, d, x_data, zref_data);
    }
    auto tmkle = GetCurrentUS();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vtanh_ref(d, x_data, zref_data);
    }
    auto trefe = GetCurrentUS();
    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
320
      ker->Compute(x_data, ztgt_data);
T
tensor-tang 已提交
321 322 323
    }
    auto ttgte = GetCurrentUS();

324 325 326 327 328
    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
            << " us, better(jit exp) takes: " << (tmkle - tmkls) / repeat
            << " us, tgt takes: " << (ttgte - ttgts) / repeat;
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
T
tensor-tang 已提交
329 330 331 332
    }
  }
}

T
tensor-tang 已提交
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
void lstm_ctht_ref(
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VSigmoidKernel<float>>&
        vsigmoid_3d,
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VTanhKernel<float>>& vtanh_d,
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VExpKernel<float>>& vexp_1,
    const int d, float* gates, const float* ct_1, float* ct, float* ht) {
  vsigmoid_3d->Compute(gates + d, gates + d);
  vtanh_d->Compute(gates, gates);
  const float *i = gates + d, *f = gates + d * 2, *o = gates + d * 3;
  const float min = SIGMOID_THRESHOLD_MIN;
  const float max = SIGMOID_THRESHOLD_MAX;
  for (int k = 0; k < d; ++k) {
    // C_t = C_t-1 * fgated + cand_gated * igated
    ct[k] = ct_1[k] * f[k] + gates[k] * i[k];
    // H_t = act_cell(C_t) * ogated
    float tmp = ct[k] * 2;
    tmp = 0.f - ((tmp < min) ? min : ((tmp > max) ? max : tmp));
    vexp_1->Compute(&tmp, &tmp);
    tmp = 2.f / (1.f + tmp) - 1.f;
    ht[k] = tmp * o[k];
  }
}

void lstm_ctht_better(
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VSigmoidKernel<float>>&
        vsigmoid_3d,
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VTanhKernel<float>>& vtanh_d,
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VMulKernel<float>>& vmul_d,
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VAddKernel<float>>& vadd_d,
    const int d, float* gates, const float* ct_1, float* ct, float* ht) {
  int d2 = d * 2;
  vsigmoid_3d->Compute(gates + d, gates + d);
  vtanh_d->Compute(gates, gates);
T
tensor-tang 已提交
373 374
  vmul_d->Compute(gates, gates + d, gates + d, d);
  vmul_d->Compute(ct_1, gates + d2, gates + d2, d);
T
tensor-tang 已提交
375
  vadd_d->Compute(gates + d, gates + d2, ct, d);
T
tensor-tang 已提交
376 377
  /* H_t = act_cell(C_t) * ogated */
  vtanh_d->Compute(ct, gates + d2);
T
tensor-tang 已提交
378
  vmul_d->Compute(gates + d2, gates + d * 3, ht, d);
T
tensor-tang 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
}

TEST(JitKernel, lstm) {
  namespace jit = paddle::operators::math::jitkernel;
  for (int d : {7, 8, 15, 16, 30, 32, 64, 100}) {
    int d4 = d * 4;
    int d3 = d * 3;
    std::vector<float> x(d4), xref(d4);
    std::vector<float> ct_1(d), ct_tgt(d), ht_tgt(d);
    std::vector<float> ct_ref(d), ht_ref(d);
    RandomVec<float>(d4, x.data(), -2.f, 2.f);
    RandomVec<float>(d, ct_1.data(), -2.f, 2.f);
    memcpy(xref.data(), x.data(), sizeof(float) * d4);
    std::string act_gate = "sigmoid", act_cand = "tanh", act_cell = "tanh";
    const auto& ker =
        jit::KernelPool::Instance()
T
tensor-tang 已提交
395
            .template Get<jit::LSTMKernel<float>, const std::string&,
T
tensor-tang 已提交
396
                          const std::string&, const std::string&>(
T
tensor-tang 已提交
397
                act_gate, act_cand, act_cell, d, false);
T
tensor-tang 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
    // below kernels are used to compute refer
    const auto& vsigmoid_3d =
        jit::KernelPool::Instance().template Get<jit::VSigmoidKernel<float>>(
            d3);
    const auto& vtanh_d =
        jit::KernelPool::Instance().template Get<jit::VTanhKernel<float>>(d);
    const auto& vexp_1 =
        jit::KernelPool::Instance().template Get<jit::VExpKernel<float>>(1);
    const auto& vmul_d =
        jit::KernelPool::Instance().template Get<jit::VMulKernel<float>>(d);
    const auto& vadd_d =
        jit::KernelPool::Instance().template Get<jit::VAddKernel<float>>(d);

    float* x_data = x.data();
    float* xref_data = xref.data();
    const float* ct_1_data = ct_1.data();
    float* ct_tgt_data = ct_tgt.data();
    float* ht_tgt_data = ht_tgt.data();
    float* ct_ref_data = ct_ref.data();
    float* ht_ref_data = ht_ref.data();
    // compute once to check correctness
    lstm_ctht_ref(vsigmoid_3d, vtanh_d, vexp_1, d, xref_data, ct_1_data,
                  ct_ref_data, ht_ref_data);
    ker->ComputeCtHt(x_data, ct_1_data, ct_tgt_data, ht_tgt_data);
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ct_tgt_data[i], ct_ref_data[i], 1e-3);
      EXPECT_NEAR(ht_tgt_data[i], ht_ref_data[i], 1e-3);
    }

    auto tmkls = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      lstm_ctht_better(vsigmoid_3d, vtanh_d, vmul_d, vadd_d, d, xref_data,
                       ct_1_data, ct_ref_data, ht_ref_data);
    }
    auto tmkle = GetCurrentUS();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      lstm_ctht_ref(vsigmoid_3d, vtanh_d, vexp_1, d, xref_data, ct_1_data,
                    ct_ref_data, ht_ref_data);
    }
    auto trefe = GetCurrentUS();
    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      ker->ComputeCtHt(x_data, ct_1_data, ct_tgt_data, ht_tgt_data);
    }
    auto ttgte = GetCurrentUS();
    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
            << " us, better(jit) takes: " << (tmkle - tmkls) / repeat
            << " us, tgt takes: " << (ttgte - ttgts) / repeat;
  }
}

T
tensor-tang 已提交
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
void vscal_ref(const int n, const float a, const float* x, float* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = a * x[i];
  }
}
void vscal_inp_ref(const int n, const float a, float* x) {
  for (int i = 0; i < n; ++i) {
    x[i] = a * x[i];
  }
}
#if defined __AVX__ || defined __AVX2__
void vscal_intri8(const int n, const float a, const float* x, float* y) {
  __m256 tmp;
  __m256 scalar = _mm256_set1_ps(a);
  tmp = _mm256_loadu_ps(x);
  tmp = _mm256_mul_ps(tmp, scalar);
  _mm256_storeu_ps(y, tmp);
}
void vscal_inp_intri8(const int n, const float a, float* x) {
  __m256 tmp;
  __m256 scalar = _mm256_set1_ps(a);
  tmp = _mm256_loadu_ps(x);
  tmp = _mm256_mul_ps(tmp, scalar);
  _mm256_storeu_ps(x, tmp);
}
#endif

#ifdef PADDLE_WITH_MKLML
void vscal_inp_mkl(const int n, const float a, float* x) {
  paddle::platform::dynload::cblas_sscal(n, a, x, 1);
}
#endif

TEST(JitKernel, vscal) {
  namespace jit = paddle::operators::math::jitkernel;
  for (int d : {7, 8, 15, 16, 30, 256, 512}) {
    std::vector<float> x(d), y(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data());
    std::memcpy(y.data(), x.data(), sizeof(float) * d);
    float a = 2.f;
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VScalKernel<float>>(d);
    const float* x_data = x.data();
    float* y_data = y.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vscal_ref(d, a, x_data, zref_data);
    }
    auto trefe = GetCurrentUS();
    auto trefs1 = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vscal_inp_ref(d, a, y_data);
    }
    auto trefe1 = GetCurrentUS();

#ifdef PADDLE_WITH_MKLML
    auto tmkls = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vscal_inp_mkl(d, a, y_data);
    }
    auto tmkle = GetCurrentUS();
#endif

#if defined __AVX__ || defined __AVX2__
    if (d == 8) {
      auto si0 = GetCurrentUS();
      for (int i = 0; i < repeat; ++i) {
        vscal_intri8(d, a, x_data, zref_data);
      }
      auto si1 = GetCurrentUS();
      auto si2 = GetCurrentUS();
      for (int i = 0; i < repeat; ++i) {
        vscal_inp_intri8(d, a, y_data);
      }
      auto si3 = GetCurrentUS();
      VLOG(3) << "Vec size 8 intr takes: " << (si1 - si0) / repeat
              << " us, inplace: " << (si3 - si2) / repeat;
    }
#endif

    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
535
      ker->Compute(&a, x_data, ztgt_data, d);
T
tensor-tang 已提交
536 537 538 539
    }
    auto ttgte = GetCurrentUS();
    auto ttgts1 = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
540
      ker->Compute(&a, y_data, y_data, d);
T
tensor-tang 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
    }
    auto ttgte1 = GetCurrentUS();
    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
            << " us, inplace takes: " << (trefe1 - trefs1) / repeat
#ifdef PADDLE_WITH_MKLML
            << " us, mkl inplace takes: " << (tmkle - tmkls) / repeat << " us, "
#else
            << " us, "
#endif
            << "tgt takes: " << (ttgte - ttgts) / repeat
            << "us, tgt inplace takes: " << (ttgte1 - ttgts1) / repeat;
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
    }
  }
}
T
tensor-tang 已提交
557

T
tensor-tang 已提交
558 559 560 561 562 563
void vmul_ref(const int n, const float* x, const float* y, float* z) {
  for (int i = 0; i < n; ++i) {
    z[i] = x[i] * y[i];
  }
}

T
tensor-tang 已提交
564
#if defined __AVX__ || defined __AVX2__
T
tensor-tang 已提交
565
void vmul_intri8(const int n, const float* x, const float* y, float* z) {
T
tensor-tang 已提交
566 567 568 569 570 571 572
  __m256 tmpx, tmpy;
  tmpx = _mm256_loadu_ps(x);
  tmpy = _mm256_loadu_ps(y);
  tmpx = _mm256_mul_ps(tmpx, tmpy);
  _mm256_storeu_ps(z, tmpx);
}
#endif
T
tensor-tang 已提交
573

T
tensor-tang 已提交
574 575 576
#ifdef PADDLE_WITH_MKLML
void vmul_mkl(const int n, const float* x, const float* y, float* z) {
  paddle::platform::dynload::vsMul(n, x, y, z);
T
tensor-tang 已提交
577
}
T
tensor-tang 已提交
578
#endif
T
tensor-tang 已提交
579

T
tensor-tang 已提交
580 581
TEST(JitKernel, vmul) {
  namespace jit = paddle::operators::math::jitkernel;
582
  for (int d : {7, 8, 15, 16, 20, 30, 256, 512, 1000, 1024}) {
T
tensor-tang 已提交
583 584 585 586 587 588 589 590 591 592
    std::vector<float> x(d), y(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data());
    RandomVec<float>(d, y.data());
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VMulKernel<float>>(d);
    const float* x_data = x.data();
    const float* y_data = y.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
T
tensor-tang 已提交
593
    auto trefs = GetCurrentUS();
T
tensor-tang 已提交
594
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
595
      vmul_ref(d, x_data, y_data, zref_data);
T
tensor-tang 已提交
596
    }
T
tensor-tang 已提交
597
    auto trefe = GetCurrentUS();
T
tensor-tang 已提交
598

T
tensor-tang 已提交
599 600
#ifdef PADDLE_WITH_MKLML
    auto tmkls = GetCurrentUS();
T
tensor-tang 已提交
601
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
602
      vmul_mkl(d, x_data, y_data, zref_data);
T
tensor-tang 已提交
603
    }
T
tensor-tang 已提交
604 605
    auto tmkle = GetCurrentUS();
#endif
T
tensor-tang 已提交
606

T
tensor-tang 已提交
607 608 609 610
#if defined __AVX__ || defined __AVX2__
    if (d == 8) {
      auto si0 = GetCurrentUS();
      for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
611
        vmul_intri8(d, x_data, y_data, zref_data);
T
tensor-tang 已提交
612 613
      }
      auto si1 = GetCurrentUS();
T
tensor-tang 已提交
614 615 616 617 618 619
      VLOG(3) << "Vec size 8 intr takes: " << (si1 - si0) / repeat;
    }
#endif

    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
620
      ker->Compute(x_data, y_data, ztgt_data, d);
T
tensor-tang 已提交
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
    }
    auto ttgte = GetCurrentUS();

    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
#ifdef PADDLE_WITH_MKLML
            << " us, mkl takes: " << (tmkle - tmkls) / repeat << " us, "
#else
            << " us, "
#endif
            << "tgt takes: " << (ttgte - ttgts) / repeat;
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
    }
  }
}

void vadd_ref(const int n, const float* x, const float* y, float* z) {
  for (int i = 0; i < n; ++i) {
    z[i] = x[i] + y[i];
  }
}

#if defined __AVX__ || defined __AVX2__
void vadd_intri8(const int n, const float* x, const float* y, float* z) {
  __m256 tmpx, tmpy;
  tmpx = _mm256_loadu_ps(x);
  tmpy = _mm256_loadu_ps(y);
  tmpx = _mm256_add_ps(tmpx, tmpy);
  _mm256_storeu_ps(z, tmpx);
}
#endif

#ifdef PADDLE_WITH_MKLML
void vadd_mkl(const int n, const float* x, const float* y, float* z) {
  paddle::platform::dynload::vsAdd(n, x, y, z);
}
#endif

TEST(JitKernel, vadd) {
  namespace jit = paddle::operators::math::jitkernel;
  for (int d : {7, 8, 15, 16, 30, 256, 512}) {
    std::vector<float> x(d), y(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data());
    RandomVec<float>(d, y.data());
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VAddKernel<float>>(d);
    const float* x_data = x.data();
    const float* y_data = y.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vadd_ref(d, x_data, y_data, zref_data);
    }
    auto trefe = GetCurrentUS();

#ifdef PADDLE_WITH_MKLML
    auto tmkls = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vadd_mkl(d, x_data, y_data, zref_data);
    }
    auto tmkle = GetCurrentUS();
#endif

#if defined __AVX__ || defined __AVX2__
    if (d == 8) {
      auto si0 = GetCurrentUS();
      for (int i = 0; i < repeat; ++i) {
        vadd_intri8(d, x_data, y_data, zref_data);
      }
      auto si1 = GetCurrentUS();
T
tensor-tang 已提交
693 694 695 696
      VLOG(3) << "Vec size 8 intr takes: " << (si1 - si0) / repeat;
    }
#endif

T
tensor-tang 已提交
697 698
    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
699
      ker->Compute(x_data, y_data, ztgt_data, d);
T
tensor-tang 已提交
700 701 702 703
    }
    auto ttgte = GetCurrentUS();

    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
T
tensor-tang 已提交
704
#ifdef PADDLE_WITH_MKLML
T
tensor-tang 已提交
705
            << " us, mkl takes: " << (tmkle - tmkls) / repeat << " us, "
T
tensor-tang 已提交
706
#else
T
tensor-tang 已提交
707
            << " us, "
T
tensor-tang 已提交
708
#endif
T
tensor-tang 已提交
709
            << "tgt takes: " << (ttgte - ttgts) / repeat;
T
tensor-tang 已提交
710 711 712 713 714 715
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
    }
  }
}

T
tensor-tang 已提交
716 717 718 719 720 721 722 723 724 725 726
void vaddrelu_ref(const int n, const float* x, const float* y, float* z) {
  for (int i = 0; i < n; ++i) {
    z[i] = x[i] + y[i];
    z[i] = z[i] > 0 ? z[i] : 0;
  }
}
void vaddrelu_better(
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VAddKernel<float>>& vadd,
    const std::shared_ptr<
        const paddle::operators::math::jitkernel::VReluKernel<float>>& vrelu,
T
tensor-tang 已提交
727 728
    const float* x, const float* y, float* z, int d) {
  vadd->Compute(x, y, z, d);
T
tensor-tang 已提交
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
  vrelu->Compute(z, z);
}

TEST(JitKernel, vaddrelu) {
  namespace jit = paddle::operators::math::jitkernel;
  for (int d : {7, 8, 15, 16, 30, 256, 512}) {
    std::vector<float> x(d), y(d);
    std::vector<float> zref(d), ztgt(d);
    RandomVec<float>(d, x.data());
    RandomVec<float>(d, y.data());
    const auto& ker =
        jit::KernelPool::Instance().template Get<jit::VAddReluKernel<float>>(d);
    const auto& vadd =
        jit::KernelPool::Instance().template Get<jit::VAddKernel<float>>(d);
    const auto& vrelu =
        jit::KernelPool::Instance().template Get<jit::VReluKernel<float>>(d);
    const float* x_data = x.data();
    const float* y_data = y.data();
    float* ztgt_data = ztgt.data();
    float* zref_data = zref.data();
    auto trefs = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
      vadd_ref(d, x_data, y_data, zref_data);
    }
    auto trefe = GetCurrentUS();
    auto tmkls = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
756
      vaddrelu_better(vadd, vrelu, x_data, y_data, zref_data, d);
T
tensor-tang 已提交
757 758 759 760
    }
    auto tmkle = GetCurrentUS();
    auto ttgts = GetCurrentUS();
    for (int i = 0; i < repeat; ++i) {
T
tensor-tang 已提交
761
      ker->Compute(x_data, y_data, ztgt_data, d);
T
tensor-tang 已提交
762 763 764 765 766 767 768 769 770 771 772
    }
    auto ttgte = GetCurrentUS();
    VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat
            << " us, better takes: " << (tmkle - tmkls) / repeat << " us, "
            << "tgt takes: " << (ttgte - ttgts) / repeat;
    for (int i = 0; i < d; ++i) {
      EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3);
    }
  }
}

T
tensor-tang 已提交
773 774 775 776
TEST(JitKernel, pool) {
  namespace jit = paddle::operators::math::jitkernel;
  const int frame_size = 4;
  std::string act_gate = "sigmoid", act_cand = "tanh", act_cell = "tanh";
T
tensor-tang 已提交
777
  const auto& plstm1 =
T
tensor-tang 已提交
778
      jit::KernelPool::Instance()
T
tensor-tang 已提交
779
          .template Get<jit::LSTMKernel<float>, const std::string&,
T
tensor-tang 已提交
780
                        const std::string&, const std::string&>(
T
tensor-tang 已提交
781
              act_gate, act_cand, act_cell, frame_size, false);
T
tensor-tang 已提交
782
  const auto& plstm2 =
T
tensor-tang 已提交
783
      jit::KernelPool::Instance()
T
tensor-tang 已提交
784
          .template Get<jit::LSTMKernel<float>, const std::string&,
T
tensor-tang 已提交
785
                        const std::string&, const std::string&>(
T
tensor-tang 已提交
786 787 788 789 790 791 792
              act_gate, act_cand, act_cell, frame_size, false);
  const auto& peephole =
      jit::KernelPool::Instance()
          .template Get<jit::LSTMKernel<float>, const std::string&,
                        const std::string&, const std::string&>(
              act_gate, act_cand, act_cell, frame_size, true);
  EXPECT_TRUE(plstm1 != peephole);
T
tensor-tang 已提交
793

T
tensor-tang 已提交
794
  const auto& pvmul_f =
T
tensor-tang 已提交
795
      jit::KernelPool::Instance().template Get<jit::VMulKernel<float>>(4);
T
tensor-tang 已提交
796 797
  EXPECT_TRUE(std::dynamic_pointer_cast<const jit::Kernel>(plstm2) !=
              std::dynamic_pointer_cast<const jit::Kernel>(pvmul_f));
T
tensor-tang 已提交
798

T
tensor-tang 已提交
799
  const auto& pvmul_d =
T
tensor-tang 已提交
800
      jit::KernelPool::Instance().template Get<jit::VMulKernel<double>>(4);
T
tensor-tang 已提交
801 802
  EXPECT_TRUE(std::dynamic_pointer_cast<const jit::Kernel>(pvmul_f) !=
              std::dynamic_pointer_cast<const jit::Kernel>(pvmul_d));
T
tensor-tang 已提交
803

T
tensor-tang 已提交
804
  const auto& pvmul_from_key = jit::KernelPool::Instance().Get("vmulfjit4");
T
tensor-tang 已提交
805 806 807 808 809
#if defined(__APPLE__) || defined(__OSX__) || defined(_WIN32)
  EXPECT_EQ(pvmul_from_key, nullptr);
#else
  EXPECT_EQ(pvmul_from_key, pvmul_f);
#endif
T
tensor-tang 已提交
810
  const auto& pvmul_from_key2 = jit::KernelPool::Instance().Get("vmulfjit");
T
tensor-tang 已提交
811
  EXPECT_TRUE(pvmul_from_key2 == nullptr);
T
tensor-tang 已提交
812
}