jit_code.cc 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/math/jit_code.h"
#include "paddle/fluid/operators/math/jit_kernel.h"
#include "paddle/fluid/platform/cpu_info.h"

namespace paddle {
namespace operators {
namespace math {
namespace jitkernel {
namespace gen {

using namespace platform::jit;  // NOLINT

T
tensor-tang 已提交
27
bool VXXJitCode::init(int d, int scalar_index) {
28 29
  // It's not necessary to use avx512 since it would slow down the frequency
  // and this kernel is not compute bound.
T
tensor-tang 已提交
30
  return MayIUse(avx) && scalar_index >= 0 && scalar_index <= 2;
31 32
}

T
tensor-tang 已提交
33
void VXXJitCode::generate() {
T
tensor-tang 已提交
34
  // do not need push stack, and do not need save avx512reg if do not use avx512
T
tensor-tang 已提交
35
  int offset = 0;
T
tensor-tang 已提交
36 37 38
  if (with_relu_) {
    vxorps(ymm_zero, ymm_zero, ymm_zero);
  }
T
tensor-tang 已提交
39 40 41 42 43
  if (scalar_index_ == 1) {
    vbroadcastss(ymm_src1, ptr[param1]);
  } else if (scalar_index_ == 2) {
    vbroadcastss(ymm_src2, ptr[param2]);
  }
T
tensor-tang 已提交
44
  for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) {
T
tensor-tang 已提交
45 46 47 48 49 50
    if (scalar_index_ != 1) {
      vmovups(ymm_src1, ptr[param1 + offset]);
    }
    if (scalar_index_ != 2) {
      vmovups(ymm_src2, ptr[param2 + offset]);
    }
T
tensor-tang 已提交
51 52 53 54 55
    if (type_ == operand_type::mul) {
      vmulps(ymm_dst, ymm_src1, ymm_src2);
    } else if (type_ == operand_type::add) {
      vaddps(ymm_dst, ymm_src1, ymm_src2);
    }
T
tensor-tang 已提交
56 57 58
    if (with_relu_) {
      vmaxps(ymm_dst, ymm_zero, ymm_dst);
    }
T
tensor-tang 已提交
59 60 61 62 63
    vmovups(ptr[param3 + offset], ymm_dst);
    offset += sizeof(float) * AVX_FLOAT_BLOCK;
  }
  int rest = num_ % AVX_FLOAT_BLOCK;
  if (rest >= 4) {
T
tensor-tang 已提交
64 65 66 67 68 69
    if (scalar_index_ != 1) {
      vmovups(xmm_src1, ptr[param1 + offset]);
    }
    if (scalar_index_ != 2) {
      vmovups(xmm_src2, ptr[param2 + offset]);
    }
T
tensor-tang 已提交
70 71 72 73 74
    if (type_ == operand_type::mul) {
      vmulps(xmm_dst, xmm_src1, xmm_src2);
    } else if (type_ == operand_type::add) {
      vaddps(xmm_dst, xmm_src1, xmm_src2);
    }
T
tensor-tang 已提交
75 76 77
    if (with_relu_) {
      vmaxps(xmm_dst, xmm_zero, xmm_dst);
    }
T
tensor-tang 已提交
78 79 80 81 82
    vmovups(ptr[param3 + offset], xmm_dst);
    offset += sizeof(float) * 4;
    rest -= 4;
  }
  if (rest >= 2) {
T
tensor-tang 已提交
83 84 85 86 87 88
    if (scalar_index_ != 1) {
      vmovups(xmm_src1, ptr[param1 + offset]);
    }
    if (scalar_index_ != 2) {
      vmovups(xmm_src2, ptr[param2 + offset]);
    }
T
tensor-tang 已提交
89 90 91 92 93
    if (type_ == operand_type::mul) {
      vmulps(xmm_dst, xmm_src1, xmm_src2);
    } else if (type_ == operand_type::add) {
      vaddps(xmm_dst, xmm_src1, xmm_src2);
    }
T
tensor-tang 已提交
94 95 96
    if (with_relu_) {
      vmaxps(xmm_dst, xmm_zero, xmm_dst);
    }
T
tensor-tang 已提交
97 98 99 100 101
    vmovq(ptr[param3 + offset], xmm_dst);
    offset += sizeof(float) * 2;
    rest -= 2;
  }
  if (rest > 0) {
T
tensor-tang 已提交
102 103 104 105 106 107
    if (scalar_index_ != 1) {
      vmovups(xmm_src1, ptr[param1 + offset]);
    }
    if (scalar_index_ != 2) {
      vmovups(xmm_src2, ptr[param2 + offset]);
    }
T
tensor-tang 已提交
108 109 110 111 112
    if (type_ == operand_type::mul) {
      vmulss(xmm_dst, xmm_src1, xmm_src2);
    } else if (type_ == operand_type::add) {
      vaddss(xmm_dst, xmm_src1, xmm_src2);
    }
T
tensor-tang 已提交
113 114 115
    if (with_relu_) {
      vmaxps(xmm_dst, xmm_zero, xmm_dst);
    }
T
tensor-tang 已提交
116 117 118 119
    vmovss(ptr[param3 + offset], xmm_dst);
  }
  ret();
}
T
tensor-tang 已提交
120

T
tensor-tang 已提交
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
bool ReluJitCode::init(int d) { return MayIUse(avx); }

void ReluJitCode::generate() {
  int offset = 0;
  vxorps(ymm_zero, ymm_zero, ymm_zero);
  for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) {
    vmovups(ymm_src, ptr[param1 + offset]);
    vmaxps(ymm_dst, ymm_zero, ymm_src);
    vmovups(ptr[param2 + offset], ymm_dst);
    offset += sizeof(float) * AVX_FLOAT_BLOCK;
  }
  int rest = num_ % AVX_FLOAT_BLOCK;
  if (rest >= 4) {
    vmovups(xmm_src, ptr[param1 + offset]);
    vmaxps(xmm_dst, xmm_zero, xmm_src);
    vmovups(ptr[param2 + offset], xmm_dst);
    offset += sizeof(float) * 4;
    rest -= 4;
  }
  if (rest >= 2) {
    vmovups(xmm_src, ptr[param1 + offset]);
    vmaxps(xmm_dst, xmm_zero, xmm_src);
    vmovq(ptr[param2 + offset], xmm_dst);
    offset += sizeof(float) * 2;
    rest -= 2;
  }
  if (rest > 0) {
    vmovups(xmm_src, ptr[param1 + offset]);
    vmaxps(xmm_dst, xmm_zero, xmm_src);
    vmovss(ptr[param2 + offset], xmm_dst);
  }
  ret();
}
154 155 156 157 158
}  // namespace gen
}  // namespace jitkernel
}  // namespace math
}  // namespace operators
}  // namespace paddle