提交 eba01619 编写于 作者: S Stefan Hajnoczi

Merge remote-tracking branch 'rth/tags/pull-s390-20170512' into staging

Queued target/s390 patches

# gpg: Signature made Sat 13 May 2017 12:33:08 AM BST
# gpg:                using RSA key 0xAD1270CC4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"
# Primary key fingerprint: 9CB1 8DDA F8E8 49AD 2AFC  16A4 AD12 70CC 4DD0 279B

* rth/tags/pull-s390-20170512:
  target/s390x: implement serialization in BRANCH CONDITION
  target/s390x: fix SIGNAL PROCESSOR return value
  target/s390x: mask the SIGP order_code using SIGP_ORDER_MASK
  target/s390x: Use atomic operations for LOAD AND OP
  target/s390x: Use atomic operations for COMPARE SWAP
  target/s390x: Implement LOAD PAIR DISJOINT
  target/s390x: Diagnose specification exception for atomics
  target/s390x: Implement LOAD PROGRAM PARAMETER
  target/s390x: Implement STORE FACILITIES LIST EXTENDED
Signed-off-by: NStefan Hajnoczi <stefanha@redhat.com>
......@@ -430,6 +430,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
cc->write_elf64_note = s390_cpu_write_elf64_note;
cc->cpu_exec_interrupt = s390_cpu_exec_interrupt;
cc->debug_excp_handler = s390x_cpu_debug_excp_handler;
cc->do_unaligned_access = s390x_cpu_do_unaligned_access;
#endif
cc->disas_set_info = s390_cpu_disas_set_info;
......
......@@ -480,6 +480,9 @@ int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#ifndef CONFIG_USER_ONLY
void do_restart_interrupt(CPUS390XState *env);
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
uint8_t *ar)
......@@ -1075,6 +1078,9 @@ struct sysib_322 {
#define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1
#define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2
/* SIGP order code mask corresponding to bit positions 56-63 */
#define SIGP_ORDER_MASK 0x000000ff
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc);
......
......@@ -718,4 +718,20 @@ void s390x_cpu_debug_excp_handler(CPUState *cs)
cpu_loop_exit_noexc(cs);
}
}
/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
this is only for the atomic operations, for which we want to raise a
specification exception. */
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
if (retaddr) {
cpu_restore_state(cs, retaddr);
}
program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER);
}
#endif /* CONFIG_USER_ONLY */
......@@ -25,6 +25,7 @@ DEF_HELPER_3(cxgb, i64, env, s64, i32)
DEF_HELPER_3(celgb, i64, env, i64, i32)
DEF_HELPER_3(cdlgb, i64, env, i64, i32)
DEF_HELPER_3(cxlgb, i64, env, i64, i32)
DEF_HELPER_4(cdsg, void, env, i64, i32, i32)
DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
......@@ -83,6 +84,8 @@ DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64)
DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_2(stfle, i32, env, i64)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
......
......@@ -239,12 +239,12 @@
D(0xec7d, CLGIJ, RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1)
/* COMPARE AND SWAP */
D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, 0)
D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, 0)
D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, 1)
D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL)
D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL)
D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ)
/* COMPARE DOUBLE AND SWAP */
D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, 1)
D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, 1)
D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ)
D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ)
C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0)
/* COMPARE AND TRAP */
......@@ -390,20 +390,20 @@
/* LOAD ADDRESS RELATIVE LONG */
C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0)
/* LOAD AND ADD */
C(0xebf8, LAA, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, adds32)
C(0xebe8, LAAG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, adds64)
D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL)
D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ)
/* LOAD AND ADD LOGICAL */
C(0xebfa, LAAL, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, addu32)
C(0xebea, LAALG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, addu64)
D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL)
D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ)
/* LOAD AND AND */
C(0xebf4, LAN, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, and, nz32)
C(0xebe4, LANG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, and, nz64)
D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL)
D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ)
/* LOAD AND EXCLUSIVE OR */
C(0xebf7, LAX, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, xor, nz32)
C(0xebe7, LAXG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, xor, nz64)
D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL)
D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ)
/* LOAD AND OR */
C(0xebf6, LAO, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, or, nz32)
C(0xebe6, LAOG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, or, nz64)
D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL)
D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ)
/* LOAD AND TEST */
C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32)
C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64)
......@@ -504,7 +504,9 @@
C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0)
C(0xebf2, LOC, RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0)
C(0xebe2, LOCG, RSY_b, LOC, r1, m2_64, r1, 0, loc, 0)
/* LOAD PAIR DISJOINT TODO */
/* LOAD PAIR DISJOINT */
D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL)
D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ)
/* LOAD POSITIVE */
C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32)
C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64)
......@@ -747,6 +749,8 @@
C(0xe33e, STRV, RXY_a, Z, la2, r1_32u, new, m1_32, rev32, 0)
C(0xe32f, STRVG, RXY_a, Z, la2, r1_o, new, m1_64, rev64, 0)
/* STORE FACILITY LIST EXTENDED */
C(0xb2b0, STFLE, S, SFLE, 0, a2, 0, 0, stfle, 0)
/* STORE FPC */
C(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0)
......@@ -843,6 +847,8 @@
/* LOAD CONTROL */
C(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0)
C(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0)
/* LOAD PROGRAM PARAMETER */
C(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0)
/* LOAD PSW */
C(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0)
/* LOAD PSW EXTENDED */
......
......@@ -1764,8 +1764,6 @@ static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
return SIGP_CC_ORDER_CODE_ACCEPTED;
}
#define SIGP_ORDER_MASK 0x000000ff
static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
{
CPUS390XState *env = &cpu->env;
......
......@@ -23,6 +23,7 @@
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "qemu/int128.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/s390x/storage-keys.h"
......@@ -844,6 +845,45 @@ uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
return cc;
}
void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
uint32_t r1, uint32_t r3)
{
uintptr_t ra = GETPC();
Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
Int128 oldv;
bool fail;
if (parallel_cpus) {
#ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
fail = !int128_eq(oldv, cmpv);
#endif
} else {
uint64_t oldh, oldl;
oldh = cpu_ldq_data_ra(env, addr + 0, ra);
oldl = cpu_ldq_data_ra(env, addr + 8, ra);
oldv = int128_make128(oldl, oldh);
fail = !int128_eq(oldv, cmpv);
if (fail) {
newv = oldv;
}
cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
}
env->cc_op = fail;
env->regs[r1] = int128_gethi(oldv);
env->regs[r1 + 1] = int128_getlo(oldv);
}
#if !defined(CONFIG_USER_ONLY)
void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
......
......@@ -517,8 +517,7 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
/* Remember: Use "R1 or R1 + 1, whichever is the odd-numbered register"
as parameter (input). Status (output) is always R1. */
/* sigp contains the order code in bit positions 56-63, mask it here. */
switch (order_code & 0xff) {
switch (order_code & SIGP_ORDER_MASK) {
case SIGP_SET_ARCH:
/* switch arch */
break;
......@@ -678,3 +677,62 @@ void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
}
}
#endif
/* The maximum bit defined at the moment is 129. */
#define MAX_STFL_WORDS 3
/* Canonicalize the current cpu's features into the 64-bit words required
by STFLE. Return the index-1 of the max word that is non-zero. */
static unsigned do_stfle(CPUS390XState *env, uint64_t words[MAX_STFL_WORDS])
{
S390CPU *cpu = s390_env_get_cpu(env);
const unsigned long *features = cpu->model->features;
unsigned max_bit = 0;
S390Feat feat;
memset(words, 0, sizeof(uint64_t) * MAX_STFL_WORDS);
if (test_bit(S390_FEAT_ZARCH, features)) {
/* z/Architecture is always active if around */
words[0] = 1ull << (63 - 2);
}
for (feat = find_first_bit(features, S390_FEAT_MAX);
feat < S390_FEAT_MAX;
feat = find_next_bit(features, S390_FEAT_MAX, feat + 1)) {
const S390FeatDef *def = s390_feat_def(feat);
if (def->type == S390_FEAT_TYPE_STFL) {
unsigned bit = def->bit;
if (bit > max_bit) {
max_bit = bit;
}
assert(bit / 64 < MAX_STFL_WORDS);
words[bit / 64] |= 1ULL << (63 - bit % 64);
}
}
return max_bit / 64;
}
void HELPER(stfl)(CPUS390XState *env)
{
uint64_t words[MAX_STFL_WORDS];
do_stfle(env, words);
cpu_stl_data(env, 200, words[0] >> 32);
}
uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
{
uint64_t words[MAX_STFL_WORDS];
unsigned count_m1 = env->regs[0] & 0xff;
unsigned max_m1 = do_stfle(env, words);
unsigned i;
for (i = 0; i <= count_m1; ++i) {
cpu_stq_data(env, addr + 8 * i, words[i]);
}
env->regs[0] = deposit64(env->regs[0], 0, 8, max_m1);
return (count_m1 >= max_m1 ? 0 : 3);
}
......@@ -1194,6 +1194,7 @@ typedef enum DisasFacility {
FAC_SCF, /* store clock fast */
FAC_SFLE, /* store facility list extended */
FAC_ILA, /* interlocked access facility 1 */
FAC_LPP, /* load-program-parameter */
} DisasFacility;
struct DisasInsn {
......@@ -1517,6 +1518,21 @@ static ExitStatus op_bc(DisasContext *s, DisasOps *o)
int imm = is_imm ? get_field(s->fields, i2) : 0;
DisasCompare c;
/* BCR with R2 = 0 causes no branching */
if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
if (m1 == 14) {
/* Perform serialization */
/* FIXME: check for fast-BCR-serialization facility */
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
}
if (m1 == 15) {
/* Perform serialization */
/* FIXME: perform checkpoint-synchronisation */
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
}
return NO_EXIT;
}
disas_jcc(s, &c, m1);
return help_branch(s, &c, is_imm, imm, o->in2);
}
......@@ -1942,102 +1958,47 @@ static ExitStatus op_cps(DisasContext *s, DisasOps *o)
static ExitStatus op_cs(DisasContext *s, DisasOps *o)
{
/* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
int d2 = get_field(s->fields, d2);
int b2 = get_field(s->fields, b2);
int is_64 = s->insn->data;
TCGv_i64 addr, mem, cc, z;
TCGv_i64 addr, cc;
/* Note that in1 = R3 (new value) and
in2 = (zero-extended) R1 (expected value). */
/* Load the memory into the (temporary) output. While the PoO only talks
about moving the memory to R1 on inequality, if we include equality it
means that R1 is equal to the memory in all conditions. */
addr = get_address(s, 0, b2, d2);
if (is_64) {
tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
} else {
tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
}
tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
get_mem_index(s), s->insn->data | MO_ALIGN);
tcg_temp_free_i64(addr);
/* Are the memory and expected values (un)equal? Note that this setcond
produces the output CC value, thus the NE sense of the test. */
cc = tcg_temp_new_i64();
tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
/* If the memory and expected values are equal (CC==0), copy R3 to MEM.
Recall that we are allowed to unconditionally issue the store (and
thus any possible write trap), so (re-)store the original contents
of MEM in case of inequality. */
z = tcg_const_i64(0);
mem = tcg_temp_new_i64();
tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
if (is_64) {
tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
} else {
tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
}
tcg_temp_free_i64(z);
tcg_temp_free_i64(mem);
tcg_temp_free_i64(addr);
/* Store CC back to cc_op. Wait until after the store so that any
exception gets the old cc_op value. */
tcg_gen_extrl_i64_i32(cc_op, cc);
tcg_temp_free_i64(cc);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
{
/* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
int r1 = get_field(s->fields, r1);
int r3 = get_field(s->fields, r3);
int d2 = get_field(s->fields, d2);
int b2 = get_field(s->fields, b2);
TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
TCGv_i64 addr;
TCGv_i32 t_r1, t_r3;
/* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
addr = get_address(s, 0, b2, d2);
t_r1 = tcg_const_i32(r1);
t_r3 = tcg_const_i32(r3);
gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
tcg_temp_free_i64(addr);
tcg_temp_free_i32(t_r1);
tcg_temp_free_i32(t_r3);
addrh = get_address(s, 0, b2, d2);
addrl = get_address(s, 0, b2, d2 + 8);
outh = tcg_temp_new_i64();
outl = tcg_temp_new_i64();
tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
/* Fold the double-word compare with arithmetic. */
cc = tcg_temp_new_i64();
z = tcg_temp_new_i64();
tcg_gen_xor_i64(cc, outh, regs[r1]);
tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
tcg_gen_or_i64(cc, cc, z);
tcg_gen_movi_i64(z, 0);
tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
memh = tcg_temp_new_i64();
meml = tcg_temp_new_i64();
tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
tcg_temp_free_i64(z);
tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
tcg_temp_free_i64(memh);
tcg_temp_free_i64(meml);
tcg_temp_free_i64(addrh);
tcg_temp_free_i64(addrl);
/* Save back state now that we've passed all exceptions. */
tcg_gen_mov_i64(regs[r1], outh);
tcg_gen_mov_i64(regs[r1 + 1], outl);
tcg_gen_extrl_i64_i32(cc_op, cc);
tcg_temp_free_i64(outh);
tcg_temp_free_i64(outl);
tcg_temp_free_i64(cc);
set_cc_static(s);
return NO_EXIT;
}
......@@ -2363,6 +2324,50 @@ static ExitStatus op_iske(DisasContext *s, DisasOps *o)
}
#endif
static ExitStatus op_laa(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
recompute the addition for the computation of CC. */
tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
s->insn->data | MO_ALIGN);
/* However, we need to recompute the addition for setting CC. */
tcg_gen_add_i64(o->out, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_lan(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
recompute the addition for the computation of CC. */
tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
s->insn->data | MO_ALIGN);
/* However, we need to recompute the operation for setting CC. */
tcg_gen_and_i64(o->out, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_lao(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
recompute the addition for the computation of CC. */
tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
s->insn->data | MO_ALIGN);
/* However, we need to recompute the operation for setting CC. */
tcg_gen_or_i64(o->out, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_lax(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
recompute the addition for the computation of CC. */
tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
s->insn->data | MO_ALIGN);
/* However, we need to recompute the operation for setting CC. */
tcg_gen_xor_i64(o->out, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
{
gen_helper_ldeb(o->out, cpu_env, o->in2);
......@@ -2558,6 +2563,7 @@ static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
tcg_temp_free_i32(r3);
return NO_EXIT;
}
static ExitStatus op_lra(DisasContext *s, DisasOps *o)
{
check_privileged(s);
......@@ -2567,6 +2573,14 @@ static ExitStatus op_lra(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
{
check_privileged(s);
tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
return NO_EXIT;
}
static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
{
TCGv_i64 t1, t2;
......@@ -2750,6 +2764,31 @@ static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
{
TCGv_i64 a1, a2;
TCGMemOp mop = s->insn->data;
/* In a parallel context, stop the world and single step. */
if (parallel_cpus) {
potential_page_fault(s);
gen_exception(EXCP_ATOMIC);
return EXIT_NORETURN;
}
/* In a serial context, perform the two loads ... */
a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
tcg_temp_free_i64(a1);
tcg_temp_free_i64(a2);
/* ... and indicate that we performed them while interlocked. */
gen_op_movi_cc(s, 0);
return NO_EXIT;
}
#ifndef CONFIG_USER_ONLY
static ExitStatus op_lura(DisasContext *s, DisasOps *o)
{
......@@ -3382,6 +3421,7 @@ static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
check_privileged(s);
potential_page_fault(s);
gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
set_cc_static(s);
tcg_temp_free_i32(r1);
return NO_EXIT;
}
......@@ -3628,15 +3668,8 @@ static ExitStatus op_spt(DisasContext *s, DisasOps *o)
static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
{
TCGv_i64 f, a;
/* We really ought to have more complete indication of facilities
that we implement. Address this when STFLE is implemented. */
check_privileged(s);
f = tcg_const_i64(0xc0000000);
a = tcg_const_i64(200);
tcg_gen_qemu_st32(f, a, get_mem_index(s));
tcg_temp_free_i64(f);
tcg_temp_free_i64(a);
gen_helper_stfl(cpu_env);
return NO_EXIT;
}
......@@ -3802,6 +3835,14 @@ static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
}
#endif
static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
{
potential_page_fault(s);
gen_helper_stfle(cc_op, cpu_env, o->in2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_st8(DisasContext *s, DisasOps *o)
{
tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
......@@ -4420,6 +4461,22 @@ static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
}
#define SPEC_wout_r1_D32 SPEC_r1_even
static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
{
int r3 = get_field(f, r3);
store_reg32_i64(r3, o->out);
store_reg32_i64(r3 + 1, o->out2);
}
#define SPEC_wout_r3_P32 SPEC_r3_even
static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
{
int r3 = get_field(f, r3);
store_reg(r3, o->out);
store_reg(r3 + 1, o->out2);
}
#define SPEC_wout_r3_P64 SPEC_r3_even
static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
{
store_freg32_i64(get_field(f, r1), o->out);
......@@ -4486,21 +4543,17 @@ static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
}
#define SPEC_wout_m2_32 0
static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
{
/* XXX release reservation */
tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
store_reg32_i64(get_field(f, r1), o->in2);
store_reg(get_field(f, r1), o->in2);
}
#define SPEC_wout_m2_32_r1_atomic 0
#define SPEC_wout_in2_r1 0
static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
{
/* XXX release reservation */
tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
store_reg(get_field(f, r1), o->in2);
store_reg32_i64(get_field(f, r1), o->in2);
}
#define SPEC_wout_m2_64_r1_atomic 0
#define SPEC_wout_in2_r1_32 0
/* ====================================================================== */
/* The "INput 1" generators. These load the first operand to an insn. */
......@@ -4944,24 +4997,6 @@ static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
}
#define SPEC_in2_mri2_64 0
static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
{
/* XXX should reserve the address */
in1_la2(s, f, o);
o->in2 = tcg_temp_new_i64();
tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
}
#define SPEC_in2_m2_32s_atomic 0
static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
{
/* XXX should reserve the address */
in1_la2(s, f, o);
o->in2 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
}
#define SPEC_in2_m2_64_atomic 0
static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
{
o->in2 = tcg_const_i64(get_field(f, i2));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册