jit_code.h 16.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

T
tensor-tang 已提交
17
#include <string>
18
#include "paddle/fluid/operators/math/jit_gen.h"
T
tensor-tang 已提交
19
#include "paddle/fluid/operators/math/jit_kernel_impl.h"
T
tensor-tang 已提交
20 21
#include "paddle/fluid/platform/cpu_info.h"

22 23 24 25 26 27 28 29 30 31 32 33 34
namespace paddle {
namespace operators {
namespace math {
namespace jitkernel {
namespace gen {

using reg64_t = const Xbyak::Reg64;
using reg32_t = const Xbyak::Reg32;
using xmm_t = const Xbyak::Xmm;
using ymm_t = const Xbyak::Ymm;
using zmm_t = const Xbyak::Zmm;
using Label = Xbyak::Label;

35 36 37 38 39 40 41 42 43 44
typedef enum {
  mul = 0,
  add,
  sub,
  relu,
  exp,
  sigmoid,
  tanh,
  identity
} operand_type;
45

T
tensor-tang 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
extern const float exp_float_consts[];
extern const int exp_int_0x7f[];
extern int g_tmp_mem[];

#define ALIGN32 __attribute__((aligned(32)))
#define EXP_HIG 88.3762626647949f
#define EXP_LOW -88.3762626647949f
#define CEPHES_LOG2EF 1.44269504088896341
#define CEPHES_EXP_C1 0.693359375
#define CEPHES_EXP_C2 -2.12194440e-4
#define CEPHES_EXP_P0 1.9875691500E-4
#define CEPHES_EXP_P1 1.3981999507E-3
#define CEPHES_EXP_P2 8.3334519073E-3
#define CEPHES_EXP_P3 4.1665795894E-2
#define CEPHES_EXP_P4 1.6666665459E-1
#define CEPHES_EXP_P5 5.0000001201E-1

#define REPEAT_8TIMES(val) val, val, val, val, val, val, val, val

#define OFFSET_EXP_ONE 0 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_TWO 1 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_0P5 2 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_HIG 3 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_LOW 4 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_LOG2EF 5 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_C1 6 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_C2 7 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_P0 8 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_P1 9 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_P2 10 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_P3 11 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_P4 12 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_P5 13 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_EXP_MAX_INPUT 14 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_SIGMOID_MAX 15 * YMM_FLOAT_BLOCK * sizeof(float)
#define OFFSET_SIGMOID_MIN 16 * YMM_FLOAT_BLOCK * sizeof(float)

T
tensor-tang 已提交
83
// function: vec = Operand(vec(or scalar), vec(or scalar)) (maybe with relu)
T
tensor-tang 已提交
84
class VXXJitCode : public JitCode {
T
tensor-tang 已提交
85
 public:
T
tensor-tang 已提交
86
  const char* name() const override {
T
tensor-tang 已提交
87
    std::string base = "VXXJitCode";
T
tensor-tang 已提交
88 89 90 91 92
    if (scalar_index_ == 1) {
      base += "_Scalar";
    } else {
      base += "_Vec";
    }
T
tensor-tang 已提交
93 94 95 96 97
    if (type_ == operand_type::mul) {
      base += "_Mul";
    } else if (type_ == operand_type::add) {
      base += "_Add";
    }
T
tensor-tang 已提交
98 99 100 101 102
    if (scalar_index_ == 2) {
      base += "_Scalar";
    } else {
      base += "_Vec";
    }
T
tensor-tang 已提交
103
    base += (with_relu_ ? "_Relu" : "");
T
tensor-tang 已提交
104 105
    return base.c_str();
  }
T
tensor-tang 已提交
106 107 108
  explicit VXXJitCode(int d, operand_type type, int scalar_index,
                      bool with_relu, size_t code_size = 256 * 1024,
                      void* code_ptr = nullptr)
T
tensor-tang 已提交
109 110 111
      : JitCode(code_size, code_ptr),
        num_(d),
        type_(type),
T
tensor-tang 已提交
112
        scalar_index_(scalar_index),
T
tensor-tang 已提交
113
        with_relu_(with_relu) {}
T
tensor-tang 已提交
114
  static bool init(int d, int scalar_index = 0);
T
tensor-tang 已提交
115 116 117 118
  void generate() override;

 private:
  int num_;
T
tensor-tang 已提交
119
  operand_type type_;
T
tensor-tang 已提交
120
  int scalar_index_;
T
tensor-tang 已提交
121
  bool with_relu_;
T
tensor-tang 已提交
122 123 124 125 126 127
  reg64_t param1{abi_param1};
  reg64_t param2{abi_param2};
  reg64_t param3{abi_param3};

  xmm_t xmm_src1 = xmm_t(0);
  xmm_t xmm_src2 = xmm_t(1);
T
tensor-tang 已提交
128 129
  xmm_t xmm_dst = xmm_t(2);
  xmm_t xmm_zero = xmm_t(3);
T
tensor-tang 已提交
130 131 132

  ymm_t ymm_src1 = ymm_t(0);
  ymm_t ymm_src2 = ymm_t(1);
T
tensor-tang 已提交
133 134
  ymm_t ymm_dst = ymm_t(2);
  ymm_t ymm_zero = ymm_t(3);
T
tensor-tang 已提交
135 136
};

137
class VActJitCode : public JitCode {
T
tensor-tang 已提交
138
 public:
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
  const char* name() const override {
    std::string base = "VActJitCode";
    switch (type_) {
      case operand_type::relu:
        base += "_Relu";
        break;
      case operand_type::exp:
        base += "_Exp";
        break;
      case operand_type::sigmoid:
        base += "_Sigmoid";
        break;
      case operand_type::tanh:
        base += "_Tanh";
        break;
      case operand_type::identity:
        base += "_Identity";
        break;
      default:
        break;
    }
    return base.c_str();
  }
T
tensor-tang 已提交
162

163
  explicit VActJitCode(int d, operand_type type, size_t code_size = 256 * 1024,
T
tensor-tang 已提交
164
                       void* code_ptr = nullptr)
165 166
      : JitCode(code_size, code_ptr), num_(d), type_(type) {}
  static bool init(int d, operand_type type);
T
tensor-tang 已提交
167 168
  void generate() override;

T
tensor-tang 已提交
169
 protected:
T
tensor-tang 已提交
170
  // compute relu with ymm, xmm
T
tensor-tang 已提交
171
  template <typename JMM>
172 173 174
  void relu_jmm(JMM& dst, JMM& src, int zero_idx = 15) {  // NOLINT
    JMM zero = JMM(zero_idx);
    vxorps(zero, zero, zero);
T
tensor-tang 已提交
175 176
    vmaxps(dst, src, zero);
  }
T
tensor-tang 已提交
177

T
tensor-tang 已提交
178
  // compute exp with ymm, xmm
T
tensor-tang 已提交
179
  template <typename JMM>
180 181
  void exp_jmm(JMM& dst, JMM& src, int src_idx = 11, int fx_idx = 12,  // NOLINT
               int fy_idx = 13, int mask_idx = 14, int tmp_idx = 15) {
T
tensor-tang 已提交
182
    using namespace platform;  // NOLINT
T
tensor-tang 已提交
183
    // check all idx can not equal
184
    JMM jmm_src = JMM(src_idx);
T
tensor-tang 已提交
185 186 187 188 189 190
    JMM jmm_fx = JMM(fx_idx);
    JMM jmm_fy = JMM(fy_idx);
    JMM jmm_mask = JMM(mask_idx);
    JMM jmm_tmp = JMM(tmp_idx);
    reg64_t reg_ptr_global = rax;
    push(reg_ptr_global);
191
    vmovaps(jmm_src, src);
T
tensor-tang 已提交
192 193
    mov(reg_ptr_global, reinterpret_cast<size_t>(exp_float_consts));
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_HIG]);
194
    vminps(jmm_src, jmm_src, jmm_tmp);
T
tensor-tang 已提交
195
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_LOW]);
196
    vmaxps(jmm_src, jmm_src, jmm_tmp);
T
tensor-tang 已提交
197 198
    // express exp(x) as exp(g + n*log(2))
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_LOG2EF]);
199
    vmulps(jmm_fx, jmm_src, jmm_tmp);
T
tensor-tang 已提交
200 201 202 203 204 205 206 207 208 209 210 211 212
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_0P5]);
    vaddps(jmm_fx, jmm_fx, jmm_tmp);
    vroundps(jmm_fy, jmm_fx, 0x01);
    // if greater, substract 1
    vcmpgtps(jmm_mask, jmm_fy, jmm_fx);
    vmovaps(jmm_tmp, ptr[reg_ptr_global]);
    vandps(jmm_mask, jmm_mask, jmm_tmp);
    vsubps(jmm_fx, jmm_fy, jmm_mask);
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_C1]);
    vmulps(jmm_fy, jmm_fx, jmm_tmp);
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_C2]);
    JMM ymm_z = JMM(jmm_mask.getIdx());
    vmulps(ymm_z, jmm_fx, jmm_tmp);
213 214 215
    vsubps(jmm_src, jmm_src, jmm_fy);
    vsubps(jmm_src, jmm_src, ymm_z);
    vmulps(ymm_z, jmm_src, jmm_src);
T
tensor-tang 已提交
216
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_P0]);
217
    vmulps(dst, jmm_src, jmm_tmp);
T
tensor-tang 已提交
218 219 220 221
    for (size_t i = OFFSET_EXP_P1; i < OFFSET_EXP_P5;
         i += (YMM_FLOAT_BLOCK * sizeof(float))) {
      vmovaps(jmm_tmp, ptr[reg_ptr_global + i]);  // P1~P4
      vaddps(dst, dst, jmm_tmp);
222
      vmulps(dst, dst, jmm_src);
T
tensor-tang 已提交
223 224 225 226
    }
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_P5]);
    vaddps(dst, dst, jmm_tmp);
    vmulps(dst, dst, ymm_z);
227
    vaddps(dst, dst, jmm_src);
T
tensor-tang 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
    vmovaps(jmm_tmp, ptr[reg_ptr_global]);
    vaddps(dst, dst, jmm_tmp);
    // build 2^n
    JMM ymm_int = jmm_fx;
    vcvttps2dq(ymm_int, jmm_fx);
    mov(reg_ptr_global, reinterpret_cast<size_t>(exp_int_0x7f));
    vmovdqa(jmm_tmp, ptr[reg_ptr_global]);
    if (MayIUse(avx2) || std::is_same<JMM, xmm_t>::value) {
      vpaddd(ymm_int, ymm_int, jmm_tmp);
      vpslld(ymm_int, ymm_int, 23);
    } else if (MayIUse(avx)) {
      xmm_t xtmp1 = xmm_t(ymm_int.getIdx());
      xmm_t xtmp2 = xmm_t(jmm_tmp.getIdx());
      reg64_t reg_ptr_tmp = reg_ptr_global;
      mov(reg_ptr_tmp, reinterpret_cast<size_t>(g_tmp_mem));
      vmovdqa(ptr[reg_ptr_tmp], ymm_int);
      vmovdqa(ptr[reg_ptr_tmp + YMM_FLOAT_BLOCK * sizeof(float)], jmm_tmp);
      vpaddd(xtmp1, xtmp1, xtmp2);
      vpslld(xtmp1, xtmp1, 23);
      vmovdqa(ptr[reg_ptr_tmp], xtmp1);
      // next 128bits
      vmovdqa(xtmp1, ptr[reg_ptr_tmp + XMM_FLOAT_BLOCK * sizeof(float)]);
      vmovdqa(xtmp2, ptr[reg_ptr_tmp +
                         (YMM_FLOAT_BLOCK + XMM_FLOAT_BLOCK) * sizeof(float)]);
      vpaddd(xtmp1, xtmp1, xtmp2);
      vpslld(xtmp1, xtmp1, 23);
      vmovdqa(ptr[reg_ptr_tmp + XMM_FLOAT_BLOCK * sizeof(float)], xtmp1);
      // load out
      vmovdqa(ymm_int, ptr[reg_ptr_tmp]);
    }
    vmulps(dst, dst, ymm_int);
    pop(reg_ptr_global);
  }
T
tensor-tang 已提交
261

T
tensor-tang 已提交
262 263
  // compute sigmoid with ymm, xmm
  template <typename JMM>
264 265 266
  void sigmoid_jmm(JMM& dst, JMM& src, int src_idx = 11,  // NOLINT
                   int fx_idx = 12, int fy_idx = 13, int mask_idx = 14,
                   int tmp_idx = 15) {
T
tensor-tang 已提交
267 268
    // y = 1 / (1 + e^-x)
    JMM jmm_tmp = JMM(tmp_idx);
269
    JMM jmm_src = JMM(src_idx);
T
tensor-tang 已提交
270 271
    reg64_t reg_ptr_global = rax;
    push(reg_ptr_global);
272
    vmovaps(jmm_src, src);
T
tensor-tang 已提交
273 274
    mov(reg_ptr_global, reinterpret_cast<size_t>(exp_float_consts));
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_SIGMOID_MAX]);
275
    vminps(jmm_src, jmm_src, jmm_tmp);
T
tensor-tang 已提交
276
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_SIGMOID_MIN]);
277
    vmaxps(jmm_src, jmm_src, jmm_tmp);
T
tensor-tang 已提交
278
    vxorps(jmm_tmp, jmm_tmp, jmm_tmp);
279 280
    vsubps(jmm_src, jmm_tmp, jmm_src);
    exp_jmm<JMM>(dst, jmm_src, src_idx, fx_idx, fy_idx, mask_idx, tmp_idx);
T
tensor-tang 已提交
281 282 283 284 285
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_ONE]);
    vaddps(dst, dst, jmm_tmp);
    vdivps(dst, jmm_tmp, dst);
    pop(reg_ptr_global);
  }
T
tensor-tang 已提交
286

T
tensor-tang 已提交
287 288
  // compute tanh with ymm, xmm
  template <typename JMM>
289 290 291
  void tanh_jmm(JMM& dst, JMM& src, int src_idx = 11,  // NOLINT
                int fx_idx = 12, int fy_idx = 13, int mask_idx = 14,
                int tmp_idx = 15) {
T
tensor-tang 已提交
292
    // y = 2 / (1 + e^(-2x)) - 1
293
    JMM jmm_src = JMM(src_idx);
T
tensor-tang 已提交
294 295 296 297
    JMM jmm_tmp = JMM(tmp_idx);
    JMM jmm_zero = JMM(mask_idx);
    reg64_t reg_ptr_global = rax;
    push(reg_ptr_global);
298
    vmovaps(jmm_src, src);
T
tensor-tang 已提交
299 300 301 302
    mov(reg_ptr_global, reinterpret_cast<size_t>(exp_float_consts));
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_TWO]);
    vxorps(jmm_zero, jmm_zero, jmm_zero);
    vsubps(jmm_tmp, jmm_zero, jmm_tmp);
303 304
    vmulps(jmm_src, jmm_src, jmm_tmp);
    exp_jmm<JMM>(dst, jmm_src, src_idx, fx_idx, fy_idx, mask_idx, tmp_idx);
T
tensor-tang 已提交
305 306 307 308 309 310 311 312
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_ONE]);
    vaddps(dst, dst, jmm_tmp);
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_TWO]);
    vdivps(dst, jmm_tmp, dst);
    vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_ONE]);
    vsubps(dst, dst, jmm_tmp);
    pop(reg_ptr_global);
  }
T
tensor-tang 已提交
313

314 315
  template <typename JMM>
  void act(JMM& dst, JMM& src, operand_type type) {  // NOLINT
316
    // use 11~15
317 318
    switch (type) {
      case operand_type::relu:
319
        relu_jmm<JMM>(dst, src, 15);
320 321
        break;
      case operand_type::exp:
322
        exp_jmm<JMM>(dst, src, 11, 12, 13, 14, 15);
323 324
        break;
      case operand_type::sigmoid:
325
        sigmoid_jmm<JMM>(dst, src, 11, 12, 13, 14, 15);
326 327
        break;
      case operand_type::tanh:
328
        tanh_jmm<JMM>(dst, src, 11, 12, 13, 14, 15);
329 330 331 332 333 334 335 336 337
        break;
      case operand_type::identity:
        break;
      default:
        // throw error
        break;
    }
  }

338
 protected:
T
tensor-tang 已提交
339
  int num_;
340
  operand_type type_;
T
tensor-tang 已提交
341 342
  reg64_t param1{abi_param1};
  reg64_t param2{abi_param2};
343 344

  xmm_t xmm_src = xmm_t(0);
T
tensor-tang 已提交
345
  ymm_t ymm_src = ymm_t(0);
346 347

  xmm_t xmm_dst = xmm_t(1);
T
tensor-tang 已提交
348 349 350
  ymm_t ymm_dst = ymm_t(1);
};

T
tensor-tang 已提交
351 352 353 354
class LSTMJitCode : public VActJitCode {
 public:
  const char* name() const override {
    std::string base = "LSTMJitCode";
355 356 357 358 359 360
    if (use_peephole_) {
      base += "_Peephole";
    }
    if (compute_c1h1_) {
      base += "_C1H1";
    }
T
tensor-tang 已提交
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
    auto AddTypeStr = [&](operand_type type) {
      switch (type) {
        case operand_type::relu:
          base += "_Relu";
          break;
        case operand_type::exp:
          base += "_Exp";
          break;
        case operand_type::sigmoid:
          base += "_Sigmoid";
          break;
        case operand_type::tanh:
          base += "_Tanh";
          break;
        case operand_type::identity:
          base += "_Identity";
          break;
        default:
          break;
      }
    };
    AddTypeStr(act_gate_);
    AddTypeStr(act_cand_);
    AddTypeStr(act_cell_);
    return base.c_str();
  }

388
  explicit LSTMJitCode(bool compute_c1h1, const lstm_attr_t& attr,
T
tensor-tang 已提交
389
                       size_t code_size = 256 * 1024, void* code_ptr = nullptr)
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
      : VActJitCode(attr.d, operand_type::sigmoid /* this is bugy*/, code_size,
                    code_ptr),
        compute_c1h1_(compute_c1h1) {
    auto typeExchange = [](const std::string& type) -> gen::operand_type {
      if (type == "sigmoid") {
        return operand_type::sigmoid;
      } else if (type == "relu") {
        return operand_type::relu;
      } else if (type == "tanh") {
        return operand_type::tanh;
      } else if (type == "identity" || type == "") {
        return operand_type::identity;
      }  // else throw error
      return operand_type::identity;
    };
    num_ = attr.d;
    use_peephole_ = attr.use_peephole;
    act_gate_ = typeExchange(attr.act_gate);
    act_cand_ = typeExchange(attr.act_cand);
    act_cell_ = typeExchange(attr.act_cell);
  }
T
tensor-tang 已提交
411 412 413 414 415
  static bool init(int d);
  void generate() override;

 protected:
  int num_;
416 417
  bool compute_c1h1_;
  bool use_peephole_;
T
tensor-tang 已提交
418 419 420 421
  operand_type act_gate_;
  operand_type act_cand_;
  operand_type act_cell_;
  reg64_t param1{abi_param1};
422
};
T
tensor-tang 已提交
423

424 425 426 427 428 429 430 431 432 433
class GRUJitCode : public VActJitCode {
 public:
  const char* name() const override {
    std::string base = "GRUJitCode";
    if (id_ == 0) {
      base += "_H1";
    } else if (id_ == 1) {
      base += "_HtPart1";
    } else if (id_ == 2) {
      base += "_HtPart2";
T
tensor-tang 已提交
434
    }
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
    auto AddTypeStr = [&](operand_type type) {
      switch (type) {
        case operand_type::relu:
          base += "_Relu";
          break;
        case operand_type::exp:
          base += "_Exp";
          break;
        case operand_type::sigmoid:
          base += "_Sigmoid";
          break;
        case operand_type::tanh:
          base += "_Tanh";
          break;
        case operand_type::identity:
          base += "_Identity";
          break;
        default:
          break;
      }
    };
    AddTypeStr(act_gate_);
    AddTypeStr(act_cand_);
    return base.c_str();
T
tensor-tang 已提交
459
  }
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490

  explicit GRUJitCode(int id, const gru_attr_t& attr,
                      size_t code_size = 256 * 1024, void* code_ptr = nullptr)
      : VActJitCode(attr.d, operand_type::sigmoid /* this is bugy*/, code_size,
                    code_ptr),
        id_(id) {
    auto typeExchange = [](const std::string& type) -> gen::operand_type {
      if (type == "sigmoid") {
        return operand_type::sigmoid;
      } else if (type == "relu") {
        return operand_type::relu;
      } else if (type == "tanh") {
        return operand_type::tanh;
      } else if (type == "identity" || type == "") {
        return operand_type::identity;
      }  // else throw error
      return operand_type::identity;
    };
    num_ = attr.d;
    act_gate_ = typeExchange(attr.act_gate);
    act_cand_ = typeExchange(attr.act_cand);
  }
  static bool init(int d);
  void generate() override;

 protected:
  int id_;
  int num_;
  operand_type act_gate_;
  operand_type act_cand_;
  reg64_t param1{abi_param1};
T
tensor-tang 已提交
491 492
};

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
#ifdef PADDLE_WITH_MKLDNN
struct EltwiseMulnChw16cNC : public Xbyak::CodeGenerator {
  explicit EltwiseMulnChw16cNC(size_t code_size = 256 * 1024)
      : Xbyak::CodeGenerator(code_size) {
    // RDI is ptr x_input
    // RSI is ptr y_input
    // RDX is ptr output
    // RCX is height
    // r8 is width

    push(rbx);

    xor_(rax, rax);
    xor_(r10, r10);
    vmovups(zmm3, ptr[rsi]);

    L("h_loop");
    xor_(rbx, rbx);
    L("w_loop");
    vmovups(zmm2, ptr[rdi + rax]);
    vmulps(zmm1, zmm2, zmm3);
    vmovups(ptr[rdx + rax], zmm1);
    add(rax, 64);
    inc(rbx);
    cmp(r8, rbx);
    jnz("w_loop");
    inc(r10);
    cmp(r10, rcx);
    jnz("h_loop");

    pop(rbx);
    ret();
  }
};
#endif

529 530 531 532 533
}  // namespace gen
}  // namespace jitkernel
}  // namespace math
}  // namespace operators
}  // namespace paddle