未验证 提交 d55481cf 编写于 作者: T tensor-tang 提交者: GitHub

Merge pull request #14241 from tensor-tang/refine/jit/vmulcode

Refine/jit/vmulcode
......@@ -25,23 +25,43 @@ namespace gen {
using namespace platform::jit; // NOLINT
bool VMulJitCode::init(int d) {
// TODO(TJ): maybe one AVX is enough, AVX above would slow down freq
// try more with avx2 or avx512
if (MayIUse(avx) || MayIUse(avx2)) {
return d % AVX_FLOAT_BLOCK == 0;
} else {
return false;
}
// It's not necessary to use avx512 since it would slow down the frequency
// and this kernel is not compute bound.
return MayIUse(avx);
}
void VMulJitCode::generate() {
// do not need push stack, and do not need save avx512reg if do not use avx512
int stride = sizeof(float) * AVX_FLOAT_BLOCK;
int offset = 0;
for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) {
vmovups(ymm_src1, ptr[param1 + i * stride]);
vmovups(ymm_src2, ptr[param2 + i * stride]);
vmovups(ymm_src1, ptr[param1 + offset]);
vmovups(ymm_src2, ptr[param2 + offset]);
vmulps(ymm_dst, ymm_src1, ymm_src2);
vmovups(ptr[param3 + stride * i], ymm_dst);
vmovups(ptr[param3 + offset], ymm_dst);
offset += sizeof(float) * AVX_FLOAT_BLOCK;
}
int rest = num_ % AVX_FLOAT_BLOCK;
if (rest >= 4) {
vmovups(xmm_src1, ptr[param1 + offset]);
vmovups(xmm_src2, ptr[param2 + offset]);
vmulps(xmm_dst, xmm_src1, xmm_src2);
vmovups(ptr[param3 + offset], xmm_dst);
offset += sizeof(float) * 4;
rest -= 4;
}
if (rest >= 2) {
vmovq(xmm_src1, ptr[param1 + offset]);
vmovq(xmm_src2, ptr[param2 + offset]);
vmulps(xmm_dst, xmm_src1, xmm_src2);
vmovq(ptr[param3 + offset], xmm_dst);
offset += sizeof(float) * 2;
rest -= 2;
}
if (rest > 0) {
vmovss(xmm_src1, ptr[param1 + offset]);
vmovss(xmm_src2, ptr[param2 + offset]);
vmulss(xmm_dst, xmm_src1, xmm_src2);
vmovss(ptr[param3 + offset], xmm_dst);
}
ret();
}
......
......@@ -45,15 +45,12 @@ class VMulJitCode : public JitCode {
reg64_t param3{abi_param3};
xmm_t xmm_src1 = xmm_t(0);
ymm_t ymm_src1 = ymm_t(0);
zmm_t zmm_src1 = zmm_t(0);
xmm_t xmm_src2 = xmm_t(1);
ymm_t ymm_src2 = ymm_t(1);
zmm_t zmm_src2 = zmm_t(1);
xmm_t xmm_dst = xmm_t(2);
ymm_t ymm_src1 = ymm_t(0);
ymm_t ymm_src2 = ymm_t(1);
ymm_t ymm_dst = ymm_t(2);
zmm_t zmm_dst = zmm_t(2);
};
} // namespace gen
......
......@@ -65,8 +65,9 @@ class VMulKernelImpl : public VMulKernel<T> {
explicit VMulKernelImpl(int d) : VMulKernel<T>() {
if (useJIT(d)) {
constexpr size_t sz = 256 * 1024; // TODO(TJ): should be related with d
jitcode_.reset(new gen::VMulJitCode(d, sz));
// roughly estimate the size of code
size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8;
jitcode_.reset(new gen::VMulJitCode(d, sz > 4096 ? sz : 4096));
this->Compute =
jitcode_->getCode<void (*)(const T*, const T*, T*, int)>();
return;
......
......@@ -578,7 +578,7 @@ void vmul_mkl(const int n, const float* x, const float* y, float* z) {
TEST(JitKernel, vmul) {
namespace jit = paddle::operators::math::jitkernel;
for (int d : {7, 8, 15, 16, 30, 256, 512, 1000, 1024}) {
for (int d : {7, 8, 15, 16, 20, 30, 256, 512, 1000, 1024}) {
std::vector<float> x(d), y(d);
std::vector<float> zref(d), ztgt(d);
RandomVec<float>(d, x.data());
......@@ -800,7 +800,7 @@ TEST(JitKernel, pool) {
EXPECT_TRUE(std::dynamic_pointer_cast<const jit::Kernel>(pvmul_f) !=
std::dynamic_pointer_cast<const jit::Kernel>(pvmul_d));
const auto& pvmul_from_key = jit::KernelPool::Instance().Get("vmulfany");
const auto& pvmul_from_key = jit::KernelPool::Instance().Get("vmulfjit4");
EXPECT_EQ(pvmul_f, pvmul_from_key);
const auto& pvmul_from_key2 = jit::KernelPool::Instance().Get("vmulfjit");
EXPECT_TRUE(pvmul_from_key2 == nullptr);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册