提交 0043c42b 编写于 作者: T tensor-tang

add vrelu jitcode

test=develop
上级 9a6e2392
...@@ -118,6 +118,39 @@ void VXXJitCode::generate() { ...@@ -118,6 +118,39 @@ void VXXJitCode::generate() {
ret(); ret();
} }
bool ReluJitCode::init(int d) { return MayIUse(avx); }
void ReluJitCode::generate() {
int offset = 0;
vxorps(ymm_zero, ymm_zero, ymm_zero);
for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) {
vmovups(ymm_src, ptr[param1 + offset]);
vmaxps(ymm_dst, ymm_zero, ymm_src);
vmovups(ptr[param2 + offset], ymm_dst);
offset += sizeof(float) * AVX_FLOAT_BLOCK;
}
int rest = num_ % AVX_FLOAT_BLOCK;
if (rest >= 4) {
vmovups(xmm_src, ptr[param1 + offset]);
vmaxps(xmm_dst, xmm_zero, xmm_src);
vmovups(ptr[param2 + offset], xmm_dst);
offset += sizeof(float) * 4;
rest -= 4;
}
if (rest >= 2) {
vmovups(xmm_src, ptr[param1 + offset]);
vmaxps(xmm_dst, xmm_zero, xmm_src);
vmovq(ptr[param2 + offset], xmm_dst);
offset += sizeof(float) * 2;
rest -= 2;
}
if (rest > 0) {
vmovups(xmm_src, ptr[param1 + offset]);
vmaxps(xmm_dst, xmm_zero, xmm_src);
vmovss(ptr[param2 + offset], xmm_dst);
}
ret();
}
} // namespace gen } // namespace gen
} // namespace jitkernel } // namespace jitkernel
} // namespace math } // namespace math
......
...@@ -85,6 +85,29 @@ class VXXJitCode : public JitCode { ...@@ -85,6 +85,29 @@ class VXXJitCode : public JitCode {
ymm_t ymm_zero = ymm_t(3); ymm_t ymm_zero = ymm_t(3);
}; };
class ReluJitCode : public JitCode {
public:
DECLARE_JIT_CODE(ReluJitCode);
explicit ReluJitCode(int d, size_t code_size = 256 * 1024,
void* code_ptr = nullptr)
: JitCode(code_size, code_ptr), num_(d) {}
static bool init(int d);
void generate() override;
private:
int num_;
reg64_t param1{abi_param1};
reg64_t param2{abi_param2};
xmm_t xmm_zero = xmm_t(0);
xmm_t xmm_src = xmm_t(1);
xmm_t xmm_dst = xmm_t(1);
ymm_t ymm_zero = ymm_t(0);
ymm_t ymm_src = ymm_t(1);
ymm_t ymm_dst = ymm_t(1);
};
} // namespace gen } // namespace gen
} // namespace jitkernel } // namespace jitkernel
} // namespace math } // namespace math
......
...@@ -97,37 +97,38 @@ class VAddBiasKernel : public Kernel { ...@@ -97,37 +97,38 @@ class VAddBiasKernel : public Kernel {
template <typename T> template <typename T>
class VActKernel : public Kernel { class VActKernel : public Kernel {
public: public:
virtual void Compute(const T *x, T *y) const = 0; virtual void ComputeDeprecated(const T *x, T *y) const = 0;
}; };
template <typename T> template <typename T>
class VReluKernel : public VActKernel<T> { class VReluKernel : public VActKernel<T> {
public: public:
virtual void Compute(const T *x, T *y) const = 0; virtual void ComputeDeprecated(const T *x, T *y) const = 0;
void (*Compute)(const T *, T *, int);
}; };
template <typename T> template <typename T>
class VIdentityKernel : public VActKernel<T> { class VIdentityKernel : public VActKernel<T> {
public: public:
virtual void Compute(const T *x, T *y) const = 0; virtual void ComputeDeprecated(const T *x, T *y) const = 0;
}; };
template <typename T> template <typename T>
class VExpKernel : public VActKernel<T> { class VExpKernel : public VActKernel<T> {
public: public:
virtual void Compute(const T *x, T *y) const = 0; virtual void ComputeDeprecated(const T *x, T *y) const = 0;
}; };
template <typename T> template <typename T>
class VSigmoidKernel : public VActKernel<T> { class VSigmoidKernel : public VActKernel<T> {
public: public:
virtual void Compute(const T *x, T *y) const = 0; virtual void ComputeDeprecated(const T *x, T *y) const = 0;
}; };
template <typename T> template <typename T>
class VTanhKernel : public VActKernel<T> { class VTanhKernel : public VActKernel<T> {
public: public:
virtual void Compute(const T *x, T *y) const = 0; virtual void ComputeDeprecated(const T *x, T *y) const = 0;
}; };
template <typename T> template <typename T>
......
...@@ -71,6 +71,13 @@ void VAddBiasRefer(const T* a, const T* x, T* y, int n) { ...@@ -71,6 +71,13 @@ void VAddBiasRefer(const T* a, const T* x, T* y, int n) {
} }
} }
template <typename T>
void VReluRefer(const T* x, T* y, int n) {
for (int i = 0; i < n; ++i) {
y[i] = x[i] > 0 ? x[i] : 0;
}
}
#ifdef PADDLE_WITH_MKLML #ifdef PADDLE_WITH_MKLML
template <typename T> template <typename T>
void VMulMKL(const T* x, const T* y, T* z, int n); void VMulMKL(const T* x, const T* y, T* z, int n);
...@@ -344,124 +351,60 @@ bool VAddBiasKernelImpl<float>::useJIT(int d) { ...@@ -344,124 +351,60 @@ bool VAddBiasKernelImpl<float>::useJIT(int d) {
} }
#endif #endif
#undef DECLARE_STATIC_FUNC
REGISTER_JITKERNEL(vmul, VMulKernel);
REGISTER_JITKERNEL(vadd, VAddKernel);
REGISTER_JITKERNEL(vaddrelu, VAddReluKernel);
REGISTER_JITKERNEL(vscal, VScalKernel);
REGISTER_JITKERNEL(vaddbias, VAddBiasKernel);
/* VRelu JitKernel */ /* VRelu JitKernel */
template <typename T, platform::jit::cpu_isa_t isa, jit_block> template <typename T>
class VReluKernelImpl : public VReluKernel<T> { class VReluKernelImpl : public VReluKernel<T> {
public: public:
explicit VReluKernelImpl(int d) : VReluKernel<T>() { this->num_ = d; } DECLARE_STATIC_FUNC;
void Compute(const T* x, T* y) const override { explicit VReluKernelImpl(int d) : VReluKernel<T>() {
for (int i = 0; i < this->num_; ++i) { this->num_ = d; // TODO(TJ): remove me when ComputeDeprecated done
y[i] = x[i] > 0 ? x[i] : 0; #ifdef PADDLE_WITH_XBYAK
if (useJIT(d)) {
size_t sz = 96 /*init*/ +
d / AVX_FLOAT_BLOCK * 4 /* instructions*/ *
8 /*everage byte for each instruction*/;
jitcode_.reset(new gen::ReluJitCode(d, sz > 4096 ? sz : 4096));
this->Compute = jitcode_->getCode<void (*)(const T*, T*, int)>();
return;
} }
} #endif
};
#define INTRI8_FLOAT(isa) \
template <> \
void VReluKernelImpl<float, isa, kEQ8>::Compute(const float* x, float* y) \
const { \
__m256 tmp = _mm256_loadu_ps(x); \
tmp = _mm256_max_ps(tmp, _mm256_setzero_ps()); \
_mm256_storeu_ps(y, tmp); \
}
#define INTRI16_FLOAT(isa) \
template <> \
void VReluKernelImpl<float, isa, kEQ16>::Compute(const float* x, float* y) \
const { \
__m256 zeros = _mm256_setzero_ps(); \
__m256 tmp0 = _mm256_loadu_ps(x); \
__m256 tmp1 = _mm256_loadu_ps(x + 8); \
tmp0 = _mm256_max_ps(tmp0, zeros); \
tmp1 = _mm256_max_ps(tmp1, zeros); \
_mm256_storeu_ps(y, tmp0); \
_mm256_storeu_ps(y + 8, tmp1); \
}
#define INTRI_GT8LT16_FLOAT(isa) \ this->Compute = VReluRefer<T>;
template <> \
VReluKernelImpl<float, isa, kGT8LT16>::VReluKernelImpl(int d) \
: VReluKernel<float>() { \
this->num_ = d; \
this->end_ = AVX_FLOAT_BLOCK; \
this->rest_ = d - AVX_FLOAT_BLOCK; \
} \
template <> \
void VReluKernelImpl<float, isa, kGT8LT16>::Compute(const float* x, \
float* y) const { \
__m256 zeros = _mm256_setzero_ps(); \
__m256 tmp0 = _mm256_loadu_ps(x); \
__m256 tmp1 = _mm256_loadu_ps(x + this->rest_); \
tmp0 = _mm256_max_ps(tmp0, zeros); \
tmp1 = _mm256_max_ps(tmp1, zeros); \
_mm256_storeu_ps(y, tmp0); \
_mm256_storeu_ps(y + this->rest_, tmp1); \
} }
void ComputeDeprecated(const T* x, T* y) const override {
#define INTRI_GT16_FLOAT(isa) \ VReluRefer(x, y, this->num_);
template <> \
VReluKernelImpl<float, isa, kGT16>::VReluKernelImpl(int d) \
: VReluKernel<float>() { \
this->num_ = d; \
this->end_ = d - d % AVX_FLOAT_BLOCK; \
this->rest_ = d - AVX_FLOAT_BLOCK; \
} \
template <> \
void VReluKernelImpl<float, isa, kGT16>::Compute(const float* x, float* y) \
const { \
__m256 zeros = _mm256_setzero_ps(); \
for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \
__m256 tmp = _mm256_loadu_ps(x + i); \
tmp = _mm256_max_ps(tmp, zeros); \
_mm256_storeu_ps(y + i, tmp); \
} \
__m256 tmp = _mm256_loadu_ps(x + this->rest_); \
tmp = _mm256_max_ps(tmp, zeros); \
_mm256_storeu_ps(y + this->rest_, tmp); \
} }
#ifdef PADDLE_WITH_XBYAK
#ifdef __AVX__ private:
INTRI8_FLOAT(jit::avx); std::unique_ptr<gen::ReluJitCode> jitcode_{nullptr};
INTRI16_FLOAT(jit::avx);
INTRI_GT8LT16_FLOAT(jit::avx);
INTRI_GT16_FLOAT(jit::avx);
#endif
#ifdef __AVX2__
INTRI8_FLOAT(jit::avx2);
INTRI16_FLOAT(jit::avx2);
INTRI_GT8LT16_FLOAT(jit::avx2);
INTRI_GT16_FLOAT(jit::avx2);
#endif #endif
#ifdef __AVX512F__ };
// TODO(TJ): refine avx512
INTRI8_FLOAT(jit::avx512f); #ifdef PADDLE_WITH_XBYAK
INTRI16_FLOAT(jit::avx512f); template <>
INTRI_GT8LT16_FLOAT(jit::avx512f); bool VReluKernelImpl<float>::useJIT(int d) {
INTRI_GT16_FLOAT(jit::avx512f); return gen::ReluJitCode::init(d);
}
#endif #endif
#undef INTRI8_FLOAT #undef DECLARE_STATIC_FUNC
#undef INTRI16_FLOAT
#undef INTRI_GT8LT16_FLOAT REGISTER_JITKERNEL(vmul, VMulKernel);
#undef INTRI_GT16_FLOAT REGISTER_JITKERNEL(vadd, VAddKernel);
REGISTER_JITKERNEL(vaddrelu, VAddReluKernel);
REGISTER_JITKERNEL(vscal, VScalKernel);
REGISTER_JITKERNEL(vaddbias, VAddBiasKernel);
REGISTER_JITKERNEL(vrelu, VReluKernel);
/* An empty JitKernel */ /* An empty JitKernel */
template <typename T, platform::jit::cpu_isa_t isa, jit_block> template <typename T, platform::jit::cpu_isa_t isa, jit_block>
class VIdentityKernelImpl : public VIdentityKernel<T> { class VIdentityKernelImpl : public VIdentityKernel<T> {
public: public:
explicit VIdentityKernelImpl(int d) : VIdentityKernel<T>() { this->num_ = d; } explicit VIdentityKernelImpl(int d) : VIdentityKernel<T>() { this->num_ = d; }
void Compute(const T* x, T* y) const override {} void ComputeDeprecated(const T* x, T* y) const override {}
}; };
REGISTER_JITKERNEL_DEPRECATED(vrelu, VReluKernel);
REGISTER_JITKERNEL_DEPRECATED(videntity, VIdentityKernel); REGISTER_JITKERNEL_DEPRECATED(videntity, VIdentityKernel);
} // namespace jitkernel } // namespace jitkernel
......
...@@ -35,7 +35,7 @@ template <typename T, jit::cpu_isa_t isa, jit_block> ...@@ -35,7 +35,7 @@ template <typename T, jit::cpu_isa_t isa, jit_block>
class VExpKernelImpl : public VExpKernel<T> { class VExpKernelImpl : public VExpKernel<T> {
public: public:
explicit VExpKernelImpl(int d) : VExpKernel<T>() { this->num_ = d; } explicit VExpKernelImpl(int d) : VExpKernel<T>() { this->num_ = d; }
void Compute(const T* x, T* y) const override { void ComputeDeprecated(const T* x, T* y) const override {
for (int i = 0; i < this->num_; ++i) { for (int i = 0; i < this->num_; ++i) {
y[i] = std::exp(x[i]); y[i] = std::exp(x[i]);
} }
...@@ -43,18 +43,18 @@ class VExpKernelImpl : public VExpKernel<T> { ...@@ -43,18 +43,18 @@ class VExpKernelImpl : public VExpKernel<T> {
}; };
#ifdef PADDLE_WITH_MKLML #ifdef PADDLE_WITH_MKLML
#define MKL_FLOAT(isa, block) \ #define MKL_FLOAT(isa, block) \
template <> \ template <> \
void VExpKernelImpl<float, isa, block>::Compute(const float* x, float* y) \ void VExpKernelImpl<float, isa, block>::ComputeDeprecated(const float* x, \
const { \ float* y) const { \
platform::dynload::vsExp(this->num_, x, y); \ platform::dynload::vsExp(this->num_, x, y); \
} }
#define MKL_DOUBLE(isa, block) \ #define MKL_DOUBLE(isa, block) \
template <> \ template <> \
void VExpKernelImpl<double, isa, block>::Compute(const double* x, double* y) \ void VExpKernelImpl<double, isa, block>::ComputeDeprecated( \
const { \ const double* x, double* y) const { \
platform::dynload::vdExp(this->num_, x, y); \ platform::dynload::vdExp(this->num_, x, y); \
} }
FOR_EACH_ISA(MKL_FLOAT, kLT8); FOR_EACH_ISA(MKL_FLOAT, kLT8);
FOR_EACH_ISA(MKL_FLOAT, kGT8LT16); FOR_EACH_ISA(MKL_FLOAT, kGT8LT16);
...@@ -211,24 +211,24 @@ __m256 ExpAVX2(__m256 x) { ...@@ -211,24 +211,24 @@ __m256 ExpAVX2(__m256 x) {
} // namespace detail } // namespace detail
#define INTRI8_FLOAT(isa, expisa) \ #define INTRI8_FLOAT(isa, expisa) \
template <> \ template <> \
void VExpKernelImpl<float, isa, kEQ8>::Compute(const float* x, float* y) \ void VExpKernelImpl<float, isa, kEQ8>::ComputeDeprecated(const float* x, \
const { \ float* y) const { \
__m256 tmp = _mm256_loadu_ps(x); \ __m256 tmp = _mm256_loadu_ps(x); \
_mm256_storeu_ps(y, expisa(tmp)); \ _mm256_storeu_ps(y, expisa(tmp)); \
} }
#define INTRI16_FLOAT(isa, expisa) \ #define INTRI16_FLOAT(isa, expisa) \
template <> \ template <> \
void VExpKernelImpl<float, isa, kEQ16>::Compute(const float* x, float* y) \ void VExpKernelImpl<float, isa, kEQ16>::ComputeDeprecated(const float* x, \
const { \ float* y) const { \
__m256 tmp0 = _mm256_loadu_ps(x); \ __m256 tmp0 = _mm256_loadu_ps(x); \
__m256 tmp1 = _mm256_loadu_ps(x + 8); \ __m256 tmp1 = _mm256_loadu_ps(x + 8); \
tmp0 = expisa(tmp0); \ tmp0 = expisa(tmp0); \
tmp1 = expisa(tmp1); \ tmp1 = expisa(tmp1); \
_mm256_storeu_ps(y, tmp0); \ _mm256_storeu_ps(y, tmp0); \
_mm256_storeu_ps(y + 8, tmp1); \ _mm256_storeu_ps(y + 8, tmp1); \
} }
#ifdef __AVX__ #ifdef __AVX__
...@@ -260,14 +260,14 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> { ...@@ -260,14 +260,14 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> {
this->num_ = d; this->num_ = d;
vexp_ = KernelPool::Instance().template Get<VExpKernel<T>>(d); vexp_ = KernelPool::Instance().template Get<VExpKernel<T>>(d);
} }
void Compute(const T* x, T* y) const override { void ComputeDeprecated(const T* x, T* y) const override {
const T min = SIGMOID_THRESHOLD_MIN; const T min = SIGMOID_THRESHOLD_MIN;
const T max = SIGMOID_THRESHOLD_MAX; const T max = SIGMOID_THRESHOLD_MAX;
for (int i = 0; i < this->num_; ++i) { for (int i = 0; i < this->num_; ++i) {
y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]); y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]);
y[i] = static_cast<T>(0) - y[i]; y[i] = static_cast<T>(0) - y[i];
} }
vexp_->Compute(y, y); vexp_->ComputeDeprecated(y, y);
for (int i = 0; i < this->num_; ++i) { for (int i = 0; i < this->num_; ++i) {
y[i] = static_cast<T>(1) / (static_cast<T>(1) + y[i]); y[i] = static_cast<T>(1) / (static_cast<T>(1) + y[i]);
} }
...@@ -285,30 +285,30 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> { ...@@ -285,30 +285,30 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> {
tmp = _mm256_add_ps(_mm256_set1_ps(1.0f), tmp); \ tmp = _mm256_add_ps(_mm256_set1_ps(1.0f), tmp); \
tmp = _mm256_div_ps(_mm256_set1_ps(1.0f), tmp) tmp = _mm256_div_ps(_mm256_set1_ps(1.0f), tmp)
#define INTRI8_FLOAT(isa, expisa) \ #define INTRI8_FLOAT(isa, expisa) \
template <> \ template <> \
void VSigmoidKernelImpl<float, isa, kEQ8>::Compute(const float* x, float* y) \ void VSigmoidKernelImpl<float, isa, kEQ8>::ComputeDeprecated( \
const { \ const float* x, float* y) const { \
/* TODO(TJ): try to use static const*/ \ /* TODO(TJ): try to use static const*/ \
__m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \ __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \
__m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \ __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \
__m256 tmp = _mm256_loadu_ps(x); \ __m256 tmp = _mm256_loadu_ps(x); \
INTRI_SIGMOID(tmp, min, max, expisa); \ INTRI_SIGMOID(tmp, min, max, expisa); \
_mm256_storeu_ps(y, tmp); \ _mm256_storeu_ps(y, tmp); \
} }
#define INTRI16_FLOAT(isa, expisa) \ #define INTRI16_FLOAT(isa, expisa) \
template <> \ template <> \
void VSigmoidKernelImpl<float, isa, kEQ16>::Compute(const float* x, \ void VSigmoidKernelImpl<float, isa, kEQ16>::ComputeDeprecated( \
float* y) const { \ const float* x, float* y) const { \
__m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \ __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \
__m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \ __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \
__m256 tmp0 = _mm256_loadu_ps(x); \ __m256 tmp0 = _mm256_loadu_ps(x); \
__m256 tmp1 = _mm256_loadu_ps(x + 8); \ __m256 tmp1 = _mm256_loadu_ps(x + 8); \
INTRI_SIGMOID(tmp0, min, max, expisa); \ INTRI_SIGMOID(tmp0, min, max, expisa); \
INTRI_SIGMOID(tmp1, min, max, expisa); \ INTRI_SIGMOID(tmp1, min, max, expisa); \
_mm256_storeu_ps(y, tmp0); \ _mm256_storeu_ps(y, tmp0); \
_mm256_storeu_ps(y + 8, tmp1); \ _mm256_storeu_ps(y + 8, tmp1); \
} }
#define INTRI_GT8LT16_FLOAT(isa, expisa) \ #define INTRI_GT8LT16_FLOAT(isa, expisa) \
...@@ -322,8 +322,8 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> { ...@@ -322,8 +322,8 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> {
KernelPool::Instance().template Get<VExpKernel<float>>(this->rest_); \ KernelPool::Instance().template Get<VExpKernel<float>>(this->rest_); \
} \ } \
template <> \ template <> \
void VSigmoidKernelImpl<float, isa, kGT8LT16>::Compute(const float* x, \ void VSigmoidKernelImpl<float, isa, kGT8LT16>::ComputeDeprecated( \
float* y) const { \ const float* x, float* y) const { \
__m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \ __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \
__m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \ __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \
__m256 tmp = _mm256_loadu_ps(x); \ __m256 tmp = _mm256_loadu_ps(x); \
...@@ -335,7 +335,7 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> { ...@@ -335,7 +335,7 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> {
y[i] = (x[i] < min_) ? min_ : ((x[i] > max_) ? max_ : x[i]); \ y[i] = (x[i] < min_) ? min_ : ((x[i] > max_) ? max_ : x[i]); \
y[i] = 0.f - y[i]; \ y[i] = 0.f - y[i]; \
} \ } \
vexp_->Compute(y + this->end_, y + this->end_); \ vexp_->ComputeDeprecated(y + this->end_, y + this->end_); \
for (int i = this->end_; i < this->num_; ++i) { \ for (int i = this->end_; i < this->num_; ++i) { \
y[i] = 1.f / (1.f + y[i]); \ y[i] = 1.f / (1.f + y[i]); \
} \ } \
...@@ -352,8 +352,8 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> { ...@@ -352,8 +352,8 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> {
KernelPool::Instance().template Get<VExpKernel<float>>(this->rest_); \ KernelPool::Instance().template Get<VExpKernel<float>>(this->rest_); \
} \ } \
template <> \ template <> \
void VSigmoidKernelImpl<float, isa, kGT16>::Compute(const float* x, \ void VSigmoidKernelImpl<float, isa, kGT16>::ComputeDeprecated( \
float* y) const { \ const float* x, float* y) const { \
__m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \ __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \
__m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \ __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \
for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \ for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \
...@@ -367,7 +367,7 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> { ...@@ -367,7 +367,7 @@ class VSigmoidKernelImpl : public VSigmoidKernel<T> {
y[i] = (x[i] < min_) ? min_ : ((x[i] > max_) ? max_ : x[i]); \ y[i] = (x[i] < min_) ? min_ : ((x[i] > max_) ? max_ : x[i]); \
y[i] = 0.f - y[i]; \ y[i] = 0.f - y[i]; \
} \ } \
vexp_->Compute(y + this->end_, y + this->end_); \ vexp_->ComputeDeprecated(y + this->end_, y + this->end_); \
for (int i = this->end_; i < this->num_; ++i) { \ for (int i = this->end_; i < this->num_; ++i) { \
y[i] = 1.f / (1.f + y[i]); \ y[i] = 1.f / (1.f + y[i]); \
} \ } \
...@@ -408,10 +408,10 @@ class VTanhKernelImpl : public VTanhKernel<T> { ...@@ -408,10 +408,10 @@ class VTanhKernelImpl : public VTanhKernel<T> {
vsigmoid_ = KernelPool::Instance().template Get<VSigmoidKernel<T>>(d); vsigmoid_ = KernelPool::Instance().template Get<VSigmoidKernel<T>>(d);
vaddbias_ = KernelPool::Instance().template Get<VAddBiasKernel<T>>(d); vaddbias_ = KernelPool::Instance().template Get<VAddBiasKernel<T>>(d);
} }
void Compute(const T* x, T* y) const override { void ComputeDeprecated(const T* x, T* y) const override {
const T a = static_cast<T>(2), b = static_cast<T>(-1); const T a = static_cast<T>(2), b = static_cast<T>(-1);
vscal_->Compute(&a, x, y, this->num_); vscal_->Compute(&a, x, y, this->num_);
vsigmoid_->Compute(y, y); vsigmoid_->ComputeDeprecated(y, y);
vscal_->Compute(&a, y, y, this->num_); vscal_->Compute(&a, y, y, this->num_);
vaddbias_->Compute(&b, y, y, this->num_); vaddbias_->Compute(&b, y, y, this->num_);
} }
...@@ -430,25 +430,25 @@ class VTanhKernelImpl : public VTanhKernel<T> { ...@@ -430,25 +430,25 @@ class VTanhKernelImpl : public VTanhKernel<T> {
tmp = _mm256_div_ps(_mm256_set1_ps(2.0f), tmp); \ tmp = _mm256_div_ps(_mm256_set1_ps(2.0f), tmp); \
tmp = _mm256_sub_ps(tmp, _mm256_set1_ps(1.0f)) tmp = _mm256_sub_ps(tmp, _mm256_set1_ps(1.0f))
#define INTRI8_FLOAT(isa, expisa) \ #define INTRI8_FLOAT(isa, expisa) \
template <> \ template <> \
void VTanhKernelImpl<float, isa, kEQ8>::Compute(const float* x, float* y) \ void VTanhKernelImpl<float, isa, kEQ8>::ComputeDeprecated(const float* x, \
const { \ float* y) const { \
__m256 tmp = _mm256_loadu_ps(x); \ __m256 tmp = _mm256_loadu_ps(x); \
INTRI_VTANH(tmp, expisa); \ INTRI_VTANH(tmp, expisa); \
_mm256_storeu_ps(y, tmp); \ _mm256_storeu_ps(y, tmp); \
} }
#define INTRI16_FLOAT(isa, expisa) \ #define INTRI16_FLOAT(isa, expisa) \
template <> \ template <> \
void VTanhKernelImpl<float, isa, kEQ16>::Compute(const float* x, float* y) \ void VTanhKernelImpl<float, isa, kEQ16>::ComputeDeprecated(const float* x, \
const { \ float* y) const { \
__m256 tmp0 = _mm256_loadu_ps(x); \ __m256 tmp0 = _mm256_loadu_ps(x); \
__m256 tmp1 = _mm256_loadu_ps(x + 8); \ __m256 tmp1 = _mm256_loadu_ps(x + 8); \
INTRI_VTANH(tmp0, expisa); \ INTRI_VTANH(tmp0, expisa); \
INTRI_VTANH(tmp1, expisa); \ INTRI_VTANH(tmp1, expisa); \
_mm256_storeu_ps(y, tmp0); \ _mm256_storeu_ps(y, tmp0); \
_mm256_storeu_ps(y + 8, tmp1); \ _mm256_storeu_ps(y + 8, tmp1); \
} }
#define INTRI_GT8LT16_FLOAT(isa, expisa) \ #define INTRI_GT8LT16_FLOAT(isa, expisa) \
...@@ -466,8 +466,8 @@ class VTanhKernelImpl : public VTanhKernel<T> { ...@@ -466,8 +466,8 @@ class VTanhKernelImpl : public VTanhKernel<T> {
this->rest_); \ this->rest_); \
} \ } \
template <> \ template <> \
void VTanhKernelImpl<float, isa, kGT8LT16>::Compute(const float* x, \ void VTanhKernelImpl<float, isa, kGT8LT16>::ComputeDeprecated( \
float* y) const { \ const float* x, float* y) const { \
__m256 tmp = _mm256_loadu_ps(x); \ __m256 tmp = _mm256_loadu_ps(x); \
INTRI_VTANH(tmp, expisa); \ INTRI_VTANH(tmp, expisa); \
_mm256_storeu_ps(y, tmp); \ _mm256_storeu_ps(y, tmp); \
...@@ -475,40 +475,40 @@ class VTanhKernelImpl : public VTanhKernel<T> { ...@@ -475,40 +475,40 @@ class VTanhKernelImpl : public VTanhKernel<T> {
y += AVX_FLOAT_BLOCK; \ y += AVX_FLOAT_BLOCK; \
const float a = 2.f, b = -1.f; \ const float a = 2.f, b = -1.f; \
vscal_->Compute(&a, x, y, this->num_); \ vscal_->Compute(&a, x, y, this->num_); \
vsigmoid_->Compute(y, y); \ vsigmoid_->ComputeDeprecated(y, y); \
vscal_->Compute(&a, y, y, this->num_); \ vscal_->Compute(&a, y, y, this->num_); \
vaddbias_->Compute(&b, y, y, this->num_); \ vaddbias_->Compute(&b, y, y, this->num_); \
} }
#define INTRI_GT16_FLOAT(isa, expisa) \ #define INTRI_GT16_FLOAT(isa, expisa) \
template <> \ template <> \
VTanhKernelImpl<float, isa, kGT16>::VTanhKernelImpl(int d) \ VTanhKernelImpl<float, isa, kGT16>::VTanhKernelImpl(int d) \
: VTanhKernel<float>() { \ : VTanhKernel<float>() { \
this->num_ = d; \ this->num_ = d; \
this->rest_ = d % AVX_FLOAT_BLOCK; \ this->rest_ = d % AVX_FLOAT_BLOCK; \
this->end_ = d - this->rest_; \ this->end_ = d - this->rest_; \
vscal_ = \ vscal_ = \
KernelPool::Instance().template Get<VScalKernel<float>>(this->rest_); \ KernelPool::Instance().template Get<VScalKernel<float>>(this->rest_); \
vsigmoid_ = KernelPool::Instance().template Get<VSigmoidKernel<float>>( \ vsigmoid_ = KernelPool::Instance().template Get<VSigmoidKernel<float>>( \
this->rest_); \ this->rest_); \
vaddbias_ = KernelPool::Instance().template Get<VAddBiasKernel<float>>( \ vaddbias_ = KernelPool::Instance().template Get<VAddBiasKernel<float>>( \
this->rest_); \ this->rest_); \
} \ } \
template <> \ template <> \
void VTanhKernelImpl<float, isa, kGT16>::Compute(const float* x, float* y) \ void VTanhKernelImpl<float, isa, kGT16>::ComputeDeprecated(const float* x, \
const { \ float* y) const { \
for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \ for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \
__m256 tmp = _mm256_loadu_ps(x + i); \ __m256 tmp = _mm256_loadu_ps(x + i); \
INTRI_VTANH(tmp, expisa); \ INTRI_VTANH(tmp, expisa); \
_mm256_storeu_ps(y + i, tmp); \ _mm256_storeu_ps(y + i, tmp); \
} \ } \
x += this->end_; \ x += this->end_; \
y += this->end_; \ y += this->end_; \
const float a = 2.f, b = -1.f; \ const float a = 2.f, b = -1.f; \
vscal_->Compute(&a, x, y, this->num_); \ vscal_->Compute(&a, x, y, this->num_); \
vsigmoid_->Compute(y, y); \ vsigmoid_->ComputeDeprecated(y, y); \
vscal_->Compute(&a, y, y, this->num_); \ vscal_->Compute(&a, y, y, this->num_); \
vaddbias_->Compute(&b, y, y, this->num_); \ vaddbias_->Compute(&b, y, y, this->num_); \
} }
#ifdef __AVX__ #ifdef __AVX__
......
...@@ -175,26 +175,26 @@ class LSTMKernelImpl : public LSTMKernel<T> { ...@@ -175,26 +175,26 @@ class LSTMKernelImpl : public LSTMKernel<T> {
void ComputeCtHt(T* gates, const T* ct_1, T* ct, T* ht, const T* wp_data, void ComputeCtHt(T* gates, const T* ct_1, T* ct, T* ht, const T* wp_data,
T* checked) const override { T* checked) const override {
// gates: W_ch, W_ih, W_fh, W_oh // gates: W_ch, W_ih, W_fh, W_oh
act_gate_d3_->Compute(gates + d_, gates + d_); act_gate_d3_->ComputeDeprecated(gates + d_, gates + d_);
/* C_t = C_t-1 * fgated + cand_gated * igated */ /* C_t = C_t-1 * fgated + cand_gated * igated */
act_cand_d_->Compute(gates, gates); act_cand_d_->ComputeDeprecated(gates, gates);
vmul_d_->Compute(gates, gates + d_, gates + d_, d_); vmul_d_->Compute(gates, gates + d_, gates + d_, d_);
vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_);
vadd_d_->Compute(gates + d_, gates + d2_, ct, d_); vadd_d_->Compute(gates + d_, gates + d2_, ct, d_);
/* H_t = act_cell(C_t) * ogated */ /* H_t = act_cell(C_t) * ogated */
act_cell_d_->Compute(ct, gates + d2_); act_cell_d_->ComputeDeprecated(ct, gates + d2_);
vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_);
} }
void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override { void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override {
/* C_t = igated * cgated*/ /* C_t = igated * cgated*/
act_gate_d_->Compute(gates + d_, gates + d_); act_gate_d_->ComputeDeprecated(gates + d_, gates + d_);
act_cand_d_->Compute(gates, gates); act_cand_d_->ComputeDeprecated(gates, gates);
vmul_d_->Compute(gates, gates + d_, ct, d_); vmul_d_->Compute(gates, gates + d_, ct, d_);
/* H_t = act_cell(C_t) * ogated */ /* H_t = act_cell(C_t) * ogated */
act_gate_d_->Compute(gates + d3_, gates + d3_); act_gate_d_->ComputeDeprecated(gates + d3_, gates + d3_);
act_cell_d_->Compute(ct, gates + d2_); act_cell_d_->ComputeDeprecated(ct, gates + d2_);
vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_);
} }
...@@ -292,32 +292,32 @@ class PeepholeKernelImpl : public LSTMKernel<T> { ...@@ -292,32 +292,32 @@ class PeepholeKernelImpl : public LSTMKernel<T> {
vmul_d_->Compute(wp_data, ct_1, checked, d_); vmul_d_->Compute(wp_data, ct_1, checked, d_);
vmul_d_->Compute(wp_data + d_, ct_1, checked + d_, d_); vmul_d_->Compute(wp_data + d_, ct_1, checked + d_, d_);
vadd_d2_->Compute(checked, gates + d_, gates + d_, d2_); vadd_d2_->Compute(checked, gates + d_, gates + d_, d2_);
act_gate_d2_->Compute(gates + d_, gates + d_); act_gate_d2_->ComputeDeprecated(gates + d_, gates + d_);
/* C_t = C_t-1 * fgated + cand_gated * igated*/ /* C_t = C_t-1 * fgated + cand_gated * igated*/
act_cand_d_->Compute(gates, gates); act_cand_d_->ComputeDeprecated(gates, gates);
vmul_d_->Compute(gates, gates + d_, gates + d_, d_); vmul_d_->Compute(gates, gates + d_, gates + d_, d_);
vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_);
vadd_d_->Compute(gates + d_, gates + d2_, ct, d_); vadd_d_->Compute(gates + d_, gates + d2_, ct, d_);
/* get ogated*/ /* get ogated*/
vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_);
vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_); vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_);
act_gate_d_->Compute(gates + d3_, gates + d3_); act_gate_d_->ComputeDeprecated(gates + d3_, gates + d3_);
/* H_t = act_cell(C_t) * ogated */ /* H_t = act_cell(C_t) * ogated */
act_cell_d_->Compute(ct, gates + d2_); act_cell_d_->ComputeDeprecated(ct, gates + d2_);
vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_);
} }
void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override { void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override {
/* C_t = igated * cgated*/ /* C_t = igated * cgated*/
act_gate_d_->Compute(gates + d_, gates + d_); act_gate_d_->ComputeDeprecated(gates + d_, gates + d_);
act_cand_d_->Compute(gates, gates); act_cand_d_->ComputeDeprecated(gates, gates);
vmul_d_->Compute(gates, gates + d_, ct, d_); vmul_d_->Compute(gates, gates + d_, ct, d_);
/* get outgated, put W_oc * C_t on igated */ /* get outgated, put W_oc * C_t on igated */
vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_);
vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_); vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_);
/* H_t = act_cell(C_t) * ogated */ /* H_t = act_cell(C_t) * ogated */
act_gate_d_->Compute(gates + d3_, gates + d3_); act_gate_d_->ComputeDeprecated(gates + d3_, gates + d3_);
act_cell_d_->Compute(ct, gates + d2_); act_cell_d_->ComputeDeprecated(ct, gates + d2_);
vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_);
} }
...@@ -376,20 +376,20 @@ class GRUKernelImpl : public GRUKernel<T> { ...@@ -376,20 +376,20 @@ class GRUKernelImpl : public GRUKernel<T> {
} }
void ComputeH1(T* gates, T* ht) const override { void ComputeH1(T* gates, T* ht) const override {
act_gate_d_->Compute(gates, gates); act_gate_d_->ComputeDeprecated(gates, gates);
act_state_d_->Compute(gates + d2_, gates + d2_); act_state_d_->ComputeDeprecated(gates + d2_, gates + d2_);
vmul_d_->Compute(gates, gates + d2_, ht, d_); vmul_d_->Compute(gates, gates + d2_, ht, d_);
} }
void ComputeHtPart1(T* gates, const T* ht_1, T* ht) const override { void ComputeHtPart1(T* gates, const T* ht_1, T* ht) const override {
// W: {W_update, W_reset; W_state} // W: {W_update, W_reset; W_state}
act_gate_d2_->Compute(gates, gates); act_gate_d2_->ComputeDeprecated(gates, gates);
vmul_d_->Compute(ht_1, gates + d_, ht, d_); vmul_d_->Compute(ht_1, gates + d_, ht, d_);
} }
void ComputeHtPart2(T* gates, const T* ht_1, T* ht) const override { void ComputeHtPart2(T* gates, const T* ht_1, T* ht) const override {
T* y = gates + d2_; T* y = gates + d2_;
act_state_d_->Compute(y, y); act_state_d_->ComputeDeprecated(y, y);
// out = zt*ht~ + (1-zt)*ht_1 // out = zt*ht~ + (1-zt)*ht_1
for (int i = 0; i < d_; ++i) { for (int i = 0; i < d_; ++i) {
ht[i] = gates[i] * y[i] + (static_cast<T>(1) - gates[i]) * ht_1[i]; ht[i] = gates[i] * y[i] + (static_cast<T>(1) - gates[i]) * ht_1[i];
......
...@@ -92,7 +92,7 @@ TEST(JitKernel, vrelu) { ...@@ -92,7 +92,7 @@ TEST(JitKernel, vrelu) {
#endif #endif
auto ttgts = GetCurrentUS(); auto ttgts = GetCurrentUS();
for (int i = 0; i < repeat; ++i) { for (int i = 0; i < repeat; ++i) {
ker->Compute(x_data, ztgt_data); ker->Compute(x_data, ztgt_data, d);
} }
auto ttgte = GetCurrentUS(); auto ttgte = GetCurrentUS();
VLOG(30) << "Vec size " << d VLOG(30) << "Vec size " << d
...@@ -181,7 +181,7 @@ TEST(JitKernel, vexp) { ...@@ -181,7 +181,7 @@ TEST(JitKernel, vexp) {
auto ttgts = GetCurrentUS(); auto ttgts = GetCurrentUS();
for (int i = 0; i < repeat; ++i) { for (int i = 0; i < repeat; ++i) {
ker->Compute(x_data, ztgt_data); ker->ComputeDeprecated(x_data, ztgt_data);
} }
auto ttgte = GetCurrentUS(); auto ttgte = GetCurrentUS();
...@@ -222,7 +222,7 @@ void vsigmoid_better( ...@@ -222,7 +222,7 @@ void vsigmoid_better(
y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]); y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]);
y[i] = 0.f - y[i]; y[i] = 0.f - y[i];
} }
vexp->Compute(y, y); vexp->ComputeDeprecated(y, y);
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
y[i] = 1.f / (1.f + y[i]); y[i] = 1.f / (1.f + y[i]);
} }
...@@ -253,7 +253,7 @@ TEST(JitKernel, vsigmoid) { ...@@ -253,7 +253,7 @@ TEST(JitKernel, vsigmoid) {
auto trefe = GetCurrentUS(); auto trefe = GetCurrentUS();
auto ttgts = GetCurrentUS(); auto ttgts = GetCurrentUS();
for (int i = 0; i < repeat; ++i) { for (int i = 0; i < repeat; ++i) {
ker->Compute(x_data, ztgt_data); ker->ComputeDeprecated(x_data, ztgt_data);
} }
auto ttgte = GetCurrentUS(); auto ttgte = GetCurrentUS();
...@@ -287,7 +287,7 @@ void vtanh_better( ...@@ -287,7 +287,7 @@ void vtanh_better(
const int n, const float* x, float* y) { const int n, const float* x, float* y) {
const float a = 2.f, b = -1.f; const float a = 2.f, b = -1.f;
vscal->Compute(&a, x, y, n); vscal->Compute(&a, x, y, n);
vsigmoid->Compute(y, y); vsigmoid->ComputeDeprecated(y, y);
vscal->Compute(&a, y, y, n); vscal->Compute(&a, y, y, n);
vaddbias->Compute(&b, y, y, n); vaddbias->Compute(&b, y, y, n);
} }
...@@ -321,7 +321,7 @@ TEST(JitKernel, vtanh) { ...@@ -321,7 +321,7 @@ TEST(JitKernel, vtanh) {
auto trefe = GetCurrentUS(); auto trefe = GetCurrentUS();
auto ttgts = GetCurrentUS(); auto ttgts = GetCurrentUS();
for (int i = 0; i < repeat; ++i) { for (int i = 0; i < repeat; ++i) {
ker->Compute(x_data, ztgt_data); ker->ComputeDeprecated(x_data, ztgt_data);
} }
auto ttgte = GetCurrentUS(); auto ttgte = GetCurrentUS();
...@@ -344,8 +344,8 @@ void lstm_ctht_ref( ...@@ -344,8 +344,8 @@ void lstm_ctht_ref(
const std::shared_ptr< const std::shared_ptr<
const paddle::operators::math::jitkernel::VExpKernel<float>>& vexp_1, const paddle::operators::math::jitkernel::VExpKernel<float>>& vexp_1,
const int d, float* gates, const float* ct_1, float* ct, float* ht) { const int d, float* gates, const float* ct_1, float* ct, float* ht) {
vsigmoid_3d->Compute(gates + d, gates + d); vsigmoid_3d->ComputeDeprecated(gates + d, gates + d);
vtanh_d->Compute(gates, gates); vtanh_d->ComputeDeprecated(gates, gates);
const float *i = gates + d, *f = gates + d * 2, *o = gates + d * 3; const float *i = gates + d, *f = gates + d * 2, *o = gates + d * 3;
const float min = SIGMOID_THRESHOLD_MIN; const float min = SIGMOID_THRESHOLD_MIN;
const float max = SIGMOID_THRESHOLD_MAX; const float max = SIGMOID_THRESHOLD_MAX;
...@@ -355,7 +355,7 @@ void lstm_ctht_ref( ...@@ -355,7 +355,7 @@ void lstm_ctht_ref(
// H_t = act_cell(C_t) * ogated // H_t = act_cell(C_t) * ogated
float tmp = ct[k] * 2; float tmp = ct[k] * 2;
tmp = 0.f - ((tmp < min) ? min : ((tmp > max) ? max : tmp)); tmp = 0.f - ((tmp < min) ? min : ((tmp > max) ? max : tmp));
vexp_1->Compute(&tmp, &tmp); vexp_1->ComputeDeprecated(&tmp, &tmp);
tmp = 2.f / (1.f + tmp) - 1.f; tmp = 2.f / (1.f + tmp) - 1.f;
ht[k] = tmp * o[k]; ht[k] = tmp * o[k];
} }
...@@ -373,13 +373,13 @@ void lstm_ctht_better( ...@@ -373,13 +373,13 @@ void lstm_ctht_better(
const paddle::operators::math::jitkernel::VAddKernel<float>>& vadd_d, const paddle::operators::math::jitkernel::VAddKernel<float>>& vadd_d,
const int d, float* gates, const float* ct_1, float* ct, float* ht) { const int d, float* gates, const float* ct_1, float* ct, float* ht) {
int d2 = d * 2; int d2 = d * 2;
vsigmoid_3d->Compute(gates + d, gates + d); vsigmoid_3d->ComputeDeprecated(gates + d, gates + d);
vtanh_d->Compute(gates, gates); vtanh_d->ComputeDeprecated(gates, gates);
vmul_d->Compute(gates, gates + d, gates + d, d); vmul_d->Compute(gates, gates + d, gates + d, d);
vmul_d->Compute(ct_1, gates + d2, gates + d2, d); vmul_d->Compute(ct_1, gates + d2, gates + d2, d);
vadd_d->Compute(gates + d, gates + d2, ct, d); vadd_d->Compute(gates + d, gates + d2, ct, d);
/* H_t = act_cell(C_t) * ogated */ /* H_t = act_cell(C_t) * ogated */
vtanh_d->Compute(ct, gates + d2); vtanh_d->ComputeDeprecated(ct, gates + d2);
vmul_d->Compute(gates + d2, gates + d * 3, ht, d); vmul_d->Compute(gates + d2, gates + d * 3, ht, d);
} }
...@@ -736,7 +736,7 @@ void vaddrelu_better( ...@@ -736,7 +736,7 @@ void vaddrelu_better(
const paddle::operators::math::jitkernel::VReluKernel<float>>& vrelu, const paddle::operators::math::jitkernel::VReluKernel<float>>& vrelu,
const float* x, const float* y, float* z, int d) { const float* x, const float* y, float* z, int d) {
vadd->Compute(x, y, z, d); vadd->Compute(x, y, z, d);
vrelu->Compute(z, z); vrelu->ComputeDeprecated(z, z);
} }
TEST(JitKernel, vaddrelu) { TEST(JitKernel, vaddrelu) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册