提交 bc48092f 编写于 作者: R Richard Henderson

target/arm: Use vector infrastructure for aa64 add/sub/logic

Reviewed-by: NAlex Bennée <alex.bennee@linaro.org>
Reviewed-by: NPeter Maydell <peter.maydell@linaro.org>
Signed-off-by: NRichard Henderson <richard.henderson@linaro.org>
上级 8b3495ea
......@@ -21,6 +21,7 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg-op-gvec.h"
#include "qemu/log.h"
#include "arm_ldst.h"
#include "translate.h"
......@@ -84,6 +85,10 @@ typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
/* Note that the gvec expanders operate on offsets + sizes. */
typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t);
/* initialize TCG globals. */
void a64_translate_init(void)
{
......@@ -548,6 +553,14 @@ static TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
return ret;
}
/* Return the byte size of the "whole" vector register, VL / 8. */
static inline int vec_full_reg_size(DisasContext *s)
{
/* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags.
In the meantime this is just the AdvSIMD length of 128. */
return 128 / 8;
}
/* Return the offset into CPUARMState of a slice (from
* the least significant end) of FP register Qn (ie
* Dn, Sn, Hn or Bn).
......@@ -618,6 +631,23 @@ static TCGv_ptr get_fpstatus_ptr(void)
return statusptr;
}
/* Expand a 3-operand AdvSIMD vector operation using an expander function. */
static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
GVecGen3Fn *gvec_fn, int vece)
{
gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
}
/* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
int rn, int rm, const GVecGen3 *gvec_op)
{
tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), is_q ? 16 : 8,
vec_full_reg_size(s), gvec_op);
}
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
* than the 32 bit equivalent.
*/
......@@ -9072,85 +9102,111 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
}
}
static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(rn, rn, rm);
tcg_gen_and_i64(rn, rn, rd);
tcg_gen_xor_i64(rd, rm, rn);
}
static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(rn, rn, rd);
tcg_gen_and_i64(rn, rn, rm);
tcg_gen_xor_i64(rd, rd, rn);
}
static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(rn, rn, rd);
tcg_gen_andc_i64(rn, rn, rm);
tcg_gen_xor_i64(rd, rd, rn);
}
static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(vece, rn, rn, rm);
tcg_gen_and_vec(vece, rn, rn, rd);
tcg_gen_xor_vec(vece, rd, rm, rn);
}
static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(vece, rn, rn, rd);
tcg_gen_and_vec(vece, rn, rn, rm);
tcg_gen_xor_vec(vece, rd, rd, rn);
}
static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(vece, rn, rn, rd);
tcg_gen_andc_vec(vece, rn, rn, rm);
tcg_gen_xor_vec(vece, rd, rd, rn);
}
/* Logic op (opcode == 3) subgroup of C3.6.16. */
static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
{
static const GVecGen3 bsl_op = {
.fni8 = gen_bsl_i64,
.fniv = gen_bsl_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
static const GVecGen3 bit_op = {
.fni8 = gen_bit_i64,
.fniv = gen_bit_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
static const GVecGen3 bif_op = {
.fni8 = gen_bif_i64,
.fniv = gen_bif_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
int rd = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int rm = extract32(insn, 16, 5);
int size = extract32(insn, 22, 2);
bool is_u = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1);
TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
int pass;
if (!fp_access_check(s)) {
return;
}
tcg_op1 = tcg_temp_new_i64();
tcg_op2 = tcg_temp_new_i64();
tcg_res[0] = tcg_temp_new_i64();
tcg_res[1] = tcg_temp_new_i64();
for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
read_vec_element(s, tcg_op1, rn, pass, MO_64);
read_vec_element(s, tcg_op2, rm, pass, MO_64);
if (!is_u) {
switch (size) {
case 0: /* AND */
tcg_gen_and_i64(tcg_res[pass], tcg_op1, tcg_op2);
break;
case 1: /* BIC */
tcg_gen_andc_i64(tcg_res[pass], tcg_op1, tcg_op2);
break;
case 2: /* ORR */
tcg_gen_or_i64(tcg_res[pass], tcg_op1, tcg_op2);
break;
case 3: /* ORN */
tcg_gen_orc_i64(tcg_res[pass], tcg_op1, tcg_op2);
break;
}
} else {
if (size != 0) {
/* B* ops need res loaded to operate on */
read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
}
switch (size + 4 * is_u) {
case 0: /* AND */
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
return;
case 1: /* BIC */
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
return;
case 2: /* ORR */
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
return;
case 3: /* ORN */
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
return;
case 4: /* EOR */
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
return;
switch (size) {
case 0: /* EOR */
tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
break;
case 1: /* BSL bitwise select */
tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_op2);
tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_res[pass]);
tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op1);
break;
case 2: /* BIT, bitwise insert if true */
tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_op2);
tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
break;
case 3: /* BIF, bitwise insert if false */
tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
tcg_gen_andc_i64(tcg_op1, tcg_op1, tcg_op2);
tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
break;
}
}
}
case 5: /* BSL bitwise select */
gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
return;
case 6: /* BIT, bitwise insert if true */
gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
return;
case 7: /* BIF, bitwise insert if false */
gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
return;
write_vec_element(s, tcg_res[0], rd, 0, MO_64);
if (!is_q) {
tcg_gen_movi_i64(tcg_res[1], 0);
default:
g_assert_not_reached();
}
write_vec_element(s, tcg_res[1], rd, 1, MO_64);
tcg_temp_free_i64(tcg_op1);
tcg_temp_free_i64(tcg_op2);
tcg_temp_free_i64(tcg_res[0]);
tcg_temp_free_i64(tcg_res[1]);
}
/* Helper functions for 32 bit comparisons */
......@@ -9450,6 +9506,16 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
return;
}
switch (opcode) {
case 0x10: /* ADD, SUB */
if (u) {
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
} else {
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
}
return;
}
if (size == 3) {
assert(is_q);
for (pass = 0; pass < 2; pass++) {
......@@ -9622,16 +9688,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
genfn = fns[size][u];
break;
}
case 0x10: /* ADD, SUB */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
{ gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
{ tcg_gen_add_i32, tcg_gen_sub_i32 },
};
genfn = fns[size][u];
break;
}
case 0x11: /* CMTST, CMEQ */
{
static NeonGenTwoOpFn * const fns[3][2] = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册