提交 5047c204 编写于 作者: R Richard Henderson 提交者: Peter Maydell

target/arm: Implement SVE store vector/predicate register

Reviewed-by: NPeter Maydell <peter.maydell@linaro.org>
Signed-off-by: NRichard Henderson <richard.henderson@linaro.org>
Message-id: 20180627043328.11531-11-richard.henderson@linaro.org
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
上级 68459864
......@@ -793,6 +793,12 @@ LD1RQ_zpri 1010010 .. 00 0.... 001 ... ..... ..... \
### SVE Memory Store Group
# SVE store predicate register
STR_pri 1110010 11 0. ..... 000 ... ..... 0 .... @pd_rn_i9
# SVE store vector register
STR_zri 1110010 11 0. ..... 010 ... ..... ..... @rd_rn_i9
# SVE contiguous store (scalar plus immediate)
# ST1B, ST1H, ST1W, ST1D; require msz <= esz
ST_zpri 1110010 .. esz:2 0.... 111 ... ..... ..... \
......
......@@ -3762,6 +3762,89 @@ static void do_ldr(DisasContext *s, uint32_t vofs, uint32_t len,
tcg_temp_free_i64(t0);
}
/* Similarly for stores. */
static void do_str(DisasContext *s, uint32_t vofs, uint32_t len,
int rn, int imm)
{
uint32_t len_align = QEMU_ALIGN_DOWN(len, 8);
uint32_t len_remain = len % 8;
uint32_t nparts = len / 8 + ctpop8(len_remain);
int midx = get_mem_index(s);
TCGv_i64 addr, t0;
addr = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
/* Note that unpredicated load/store of vector/predicate registers
* are defined as a stream of bytes, which equates to little-endian
* operations on larger quantities. There is no nice way to force
* a little-endian store for aarch64_be-linux-user out of line.
*
* Attempt to keep code expansion to a minimum by limiting the
* amount of unrolling done.
*/
if (nparts <= 4) {
int i;
for (i = 0; i < len_align; i += 8) {
tcg_gen_ld_i64(t0, cpu_env, vofs + i);
tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i);
tcg_gen_qemu_st_i64(t0, addr, midx, MO_LEQ);
}
} else {
TCGLabel *loop = gen_new_label();
TCGv_ptr t2, i = tcg_const_local_ptr(0);
gen_set_label(loop);
t2 = tcg_temp_new_ptr();
tcg_gen_add_ptr(t2, cpu_env, i);
tcg_gen_ld_i64(t0, t2, vofs);
/* Minimize the number of local temps that must be re-read from
* the stack each iteration. Instead, re-compute values other
* than the loop counter.
*/
tcg_gen_addi_ptr(t2, i, imm);
tcg_gen_extu_ptr_i64(addr, t2);
tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn));
tcg_temp_free_ptr(t2);
tcg_gen_qemu_st_i64(t0, addr, midx, MO_LEQ);
tcg_gen_addi_ptr(i, i, 8);
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
tcg_temp_free_ptr(i);
}
/* Predicate register stores can be any multiple of 2. */
if (len_remain) {
tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + len_align);
switch (len_remain) {
case 2:
case 4:
case 8:
tcg_gen_qemu_st_i64(t0, addr, midx, MO_LE | ctz32(len_remain));
break;
case 6:
tcg_gen_qemu_st_i64(t0, addr, midx, MO_LEUL);
tcg_gen_addi_i64(addr, addr, 4);
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_qemu_st_i64(t0, addr, midx, MO_LEUW);
break;
default:
g_assert_not_reached();
}
}
tcg_temp_free_i64(addr);
tcg_temp_free_i64(t0);
}
static bool trans_LDR_zri(DisasContext *s, arg_rri *a, uint32_t insn)
{
if (sve_access_check(s)) {
......@@ -3782,6 +3865,26 @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a, uint32_t insn)
return true;
}
static bool trans_STR_zri(DisasContext *s, arg_rri *a, uint32_t insn)
{
if (sve_access_check(s)) {
int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd);
do_str(s, off, size, a->rn, a->imm * size);
}
return true;
}
static bool trans_STR_pri(DisasContext *s, arg_rri *a, uint32_t insn)
{
if (sve_access_check(s)) {
int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd);
do_str(s, off, size, a->rn, a->imm * size);
}
return true;
}
/*
*** SVE Memory - Contiguous Load Group
*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册