提交 8d04fb55 编写于 作者: J Jan Kiszka 提交者: Alex Bennée

tcg: drop global lock during TCG code execution

This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.

We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.

Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here.
These numbers demonstrate where we gain something:

20338 jan       20   0  331m  75m 6904 R   99  0.9   0:50.95 qemu-system-arm
20337 jan       20   0  331m  75m 6904 S   20  0.9   0:26.50 qemu-system-arm

The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond

32206 jan       20   0  330m  73m 7036 R   82  0.9   1:06.00 qemu-system-arm
32204 jan       20   0  330m  73m 7036 S   21  0.9   0:17.03 qemu-system-arm

We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Signed-off-by: NJan Kiszka <jan.kiszka@siemens.com>
Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com>
[FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex]
Signed-off-by: NKONRAD Frederic <fred.konrad@greensocs.com>
[EGC: fixed iothread lock for cpu-exec IRQ handling]
Signed-off-by: NEmilio G. Cota <cota@braap.org>
[AJB: -smp single-threaded fix, clean commit msg, BQL fixes]
Signed-off-by: NAlex Bennée <alex.bennee@linaro.org>
Reviewed-by: NRichard Henderson <rth@twiddle.net>
Reviewed-by: NPranith Kumar <bobby.prani@gmail.com>
[PM: target-arm changes]
Acked-by: NPeter Maydell <peter.maydell@linaro.org>
上级 791158d9
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "qemu/rcu.h" #include "qemu/rcu.h"
#include "exec/tb-hash.h" #include "exec/tb-hash.h"
#include "exec/log.h" #include "exec/log.h"
#include "qemu/main-loop.h"
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h" #include "hw/i386/apic.h"
#endif #endif
...@@ -388,8 +389,10 @@ static inline bool cpu_handle_halt(CPUState *cpu) ...@@ -388,8 +389,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
&& replay_interrupt()) { && replay_interrupt()) {
X86CPU *x86_cpu = X86_CPU(cpu); X86CPU *x86_cpu = X86_CPU(cpu);
qemu_mutex_lock_iothread();
apic_poll_irq(x86_cpu->apic_state); apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
qemu_mutex_unlock_iothread();
} }
#endif #endif
if (!cpu_has_work(cpu)) { if (!cpu_has_work(cpu)) {
...@@ -443,7 +446,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) ...@@ -443,7 +446,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
#else #else
if (replay_exception()) { if (replay_exception()) {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
qemu_mutex_lock_iothread();
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
qemu_mutex_unlock_iothread();
cpu->exception_index = -1; cpu->exception_index = -1;
} else if (!replay_has_interrupt()) { } else if (!replay_has_interrupt()) {
/* give a chance to iothread in replay mode */ /* give a chance to iothread in replay mode */
...@@ -469,9 +474,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, ...@@ -469,9 +474,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb) TranslationBlock **last_tb)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
int interrupt_request = cpu->interrupt_request;
if (unlikely(interrupt_request)) { if (unlikely(atomic_read(&cpu->interrupt_request))) {
int interrupt_request;
qemu_mutex_lock_iothread();
interrupt_request = cpu->interrupt_request;
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */ /* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
...@@ -479,6 +486,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, ...@@ -479,6 +486,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (interrupt_request & CPU_INTERRUPT_DEBUG) { if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG; cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
return true; return true;
} }
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
...@@ -488,6 +496,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, ...@@ -488,6 +496,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1; cpu->halted = 1;
cpu->exception_index = EXCP_HLT; cpu->exception_index = EXCP_HLT;
qemu_mutex_unlock_iothread();
return true; return true;
} }
#if defined(TARGET_I386) #if defined(TARGET_I386)
...@@ -498,12 +507,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, ...@@ -498,12 +507,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
do_cpu_init(x86_cpu); do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED; cpu->exception_index = EXCP_HALTED;
qemu_mutex_unlock_iothread();
return true; return true;
} }
#else #else
else if (interrupt_request & CPU_INTERRUPT_RESET) { else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt(); replay_interrupt();
cpu_reset(cpu); cpu_reset(cpu);
qemu_mutex_unlock_iothread();
return true; return true;
} }
#endif #endif
...@@ -526,7 +537,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, ...@@ -526,7 +537,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
the program flow was changed */ the program flow was changed */
*last_tb = NULL; *last_tb = NULL;
} }
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
qemu_mutex_unlock_iothread();
} }
if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) { if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
atomic_set(&cpu->exit_request, 0); atomic_set(&cpu->exit_request, 0);
cpu->exception_index = EXCP_INTERRUPT; cpu->exception_index = EXCP_INTERRUPT;
...@@ -643,6 +659,9 @@ int cpu_exec(CPUState *cpu) ...@@ -643,6 +659,9 @@ int cpu_exec(CPUState *cpu)
#endif /* buggy compiler */ #endif /* buggy compiler */
cpu->can_do_io = 1; cpu->can_do_io = 1;
tb_lock_reset(); tb_lock_reset();
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
} }
/* if an exception is pending, we execute it here */ /* if an exception is pending, we execute it here */
......
...@@ -1027,8 +1027,6 @@ static void qemu_kvm_init_cpu_signals(CPUState *cpu) ...@@ -1027,8 +1027,6 @@ static void qemu_kvm_init_cpu_signals(CPUState *cpu)
#endif /* _WIN32 */ #endif /* _WIN32 */
static QemuMutex qemu_global_mutex; static QemuMutex qemu_global_mutex;
static QemuCond qemu_io_proceeded_cond;
static unsigned iothread_requesting_mutex;
static QemuThread io_thread; static QemuThread io_thread;
...@@ -1042,7 +1040,6 @@ void qemu_init_cpu_loop(void) ...@@ -1042,7 +1040,6 @@ void qemu_init_cpu_loop(void)
qemu_init_sigbus(); qemu_init_sigbus();
qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_cpu_cond);
qemu_cond_init(&qemu_pause_cond); qemu_cond_init(&qemu_pause_cond);
qemu_cond_init(&qemu_io_proceeded_cond);
qemu_mutex_init(&qemu_global_mutex); qemu_mutex_init(&qemu_global_mutex);
qemu_thread_get_self(&io_thread); qemu_thread_get_self(&io_thread);
...@@ -1085,10 +1082,6 @@ static void qemu_tcg_wait_io_event(CPUState *cpu) ...@@ -1085,10 +1082,6 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
start_tcg_kick_timer(); start_tcg_kick_timer();
while (iothread_requesting_mutex) {
qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
}
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
qemu_wait_io_event_common(cpu); qemu_wait_io_event_common(cpu);
} }
...@@ -1249,9 +1242,11 @@ static int tcg_cpu_exec(CPUState *cpu) ...@@ -1249,9 +1242,11 @@ static int tcg_cpu_exec(CPUState *cpu)
cpu->icount_decr.u16.low = decr; cpu->icount_decr.u16.low = decr;
cpu->icount_extra = count; cpu->icount_extra = count;
} }
qemu_mutex_unlock_iothread();
cpu_exec_start(cpu); cpu_exec_start(cpu);
ret = cpu_exec(cpu); ret = cpu_exec(cpu);
cpu_exec_end(cpu); cpu_exec_end(cpu);
qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti; tcg_time += profile_getclock() - ti;
#endif #endif
...@@ -1479,27 +1474,14 @@ bool qemu_mutex_iothread_locked(void) ...@@ -1479,27 +1474,14 @@ bool qemu_mutex_iothread_locked(void)
void qemu_mutex_lock_iothread(void) void qemu_mutex_lock_iothread(void)
{ {
atomic_inc(&iothread_requesting_mutex); g_assert(!qemu_mutex_iothread_locked());
/* In the simple case there is no need to bump the VCPU thread out of qemu_mutex_lock(&qemu_global_mutex);
* TCG code execution.
*/
if (!tcg_enabled() || qemu_in_vcpu_thread() ||
!first_cpu || !first_cpu->created) {
qemu_mutex_lock(&qemu_global_mutex);
atomic_dec(&iothread_requesting_mutex);
} else {
if (qemu_mutex_trylock(&qemu_global_mutex)) {
qemu_cpu_kick_rr_cpu();
qemu_mutex_lock(&qemu_global_mutex);
}
atomic_dec(&iothread_requesting_mutex);
qemu_cond_broadcast(&qemu_io_proceeded_cond);
}
iothread_locked = true; iothread_locked = true;
} }
void qemu_mutex_unlock_iothread(void) void qemu_mutex_unlock_iothread(void)
{ {
g_assert(qemu_mutex_iothread_locked());
iothread_locked = false; iothread_locked = false;
qemu_mutex_unlock(&qemu_global_mutex); qemu_mutex_unlock(&qemu_global_mutex);
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h" #include "cpu.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/memory.h" #include "exec/memory.h"
...@@ -495,6 +496,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, ...@@ -495,6 +496,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
hwaddr physaddr = iotlbentry->addr; hwaddr physaddr = iotlbentry->addr;
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
uint64_t val; uint64_t val;
bool locked = false;
physaddr = (physaddr & TARGET_PAGE_MASK) + addr; physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr; cpu->mem_io_pc = retaddr;
...@@ -503,7 +505,16 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, ...@@ -503,7 +505,16 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
} }
cpu->mem_io_vaddr = addr; cpu->mem_io_vaddr = addr;
if (mr->global_locking) {
qemu_mutex_lock_iothread();
locked = true;
}
memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs); memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
if (locked) {
qemu_mutex_unlock_iothread();
}
return val; return val;
} }
...@@ -514,15 +525,23 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, ...@@ -514,15 +525,23 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
CPUState *cpu = ENV_GET_CPU(env); CPUState *cpu = ENV_GET_CPU(env);
hwaddr physaddr = iotlbentry->addr; hwaddr physaddr = iotlbentry->addr;
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
bool locked = false;
physaddr = (physaddr & TARGET_PAGE_MASK) + addr; physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr); cpu_io_recompile(cpu, retaddr);
} }
cpu->mem_io_vaddr = addr; cpu->mem_io_vaddr = addr;
cpu->mem_io_pc = retaddr; cpu->mem_io_pc = retaddr;
if (mr->global_locking) {
qemu_mutex_lock_iothread();
locked = true;
}
memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs); memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
if (locked) {
qemu_mutex_unlock_iothread();
}
} }
/* Return true if ADDR is present in the victim tlb, and has been copied /* Return true if ADDR is present in the victim tlb, and has been copied
......
...@@ -2134,9 +2134,9 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) ...@@ -2134,9 +2134,9 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
} }
cpu->watchpoint_hit = wp; cpu->watchpoint_hit = wp;
/* The tb_lock will be reset when cpu_loop_exit or /* Both tb_lock and iothread_mutex will be reset when
* cpu_loop_exit_noexc longjmp back into the cpu_exec * cpu_loop_exit or cpu_loop_exit_noexc longjmp
* main loop. * back into the cpu_exec main loop.
*/ */
tb_lock(); tb_lock();
tb_check_watchpoint(cpu); tb_check_watchpoint(cpu);
...@@ -2371,8 +2371,14 @@ static void io_mem_init(void) ...@@ -2371,8 +2371,14 @@ static void io_mem_init(void)
memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
NULL, UINT64_MAX); NULL, UINT64_MAX);
/* io_mem_notdirty calls tb_invalidate_phys_page_fast,
* which can be called without the iothread mutex.
*/
memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL, memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
NULL, UINT64_MAX); NULL, UINT64_MAX);
memory_region_clear_global_locking(&io_mem_notdirty);
memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
NULL, UINT64_MAX); NULL, UINT64_MAX);
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu-common.h" #include "qemu-common.h"
#include "hw/irq.h" #include "hw/irq.h"
#include "qom/object.h" #include "qom/object.h"
......
...@@ -457,8 +457,8 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip) ...@@ -457,8 +457,8 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
resume_all_vcpus(); resume_all_vcpus();
if (!kvm_enabled()) { if (!kvm_enabled()) {
/* tb_lock will be reset when cpu_loop_exit_noexc longjmps /* Both tb_lock and iothread_mutex will be reset when
* back into the cpu_exec loop. */ * longjmps back into the cpu_exec loop. */
tb_lock(); tb_lock();
tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1); tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
cpu_loop_exit_noexc(cs); cpu_loop_exit_noexc(cs);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/bitops.h" #include "qemu/bitops.h"
#include "qemu/main-loop.h"
#include "trace.h" #include "trace.h"
#include "gicv3_internal.h" #include "gicv3_internal.h"
#include "cpu.h" #include "cpu.h"
...@@ -733,6 +734,8 @@ void gicv3_cpuif_update(GICv3CPUState *cs) ...@@ -733,6 +734,8 @@ void gicv3_cpuif_update(GICv3CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs->cpu); ARMCPU *cpu = ARM_CPU(cs->cpu);
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
g_assert(qemu_mutex_iothread_locked());
trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq, trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
cs->hppi.grp, cs->hppi.prio); cs->hppi.grp, cs->hppi.prio);
......
...@@ -62,7 +62,16 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) ...@@ -62,7 +62,16 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
{ {
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env; CPUPPCState *env = &cpu->env;
unsigned int old_pending = env->pending_interrupts; unsigned int old_pending;
bool locked = false;
/* We may already have the BQL if coming from the reset path */
if (!qemu_mutex_iothread_locked()) {
locked = true;
qemu_mutex_lock_iothread();
}
old_pending = env->pending_interrupts;
if (level) { if (level) {
env->pending_interrupts |= 1 << n_IRQ; env->pending_interrupts |= 1 << n_IRQ;
...@@ -80,9 +89,14 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) ...@@ -80,9 +89,14 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
#endif #endif
} }
LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32 LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
"req %08x\n", __func__, env, n_IRQ, level, "req %08x\n", __func__, env, n_IRQ, level,
env->pending_interrupts, CPU(cpu)->interrupt_request); env->pending_interrupts, CPU(cpu)->interrupt_request);
if (locked) {
qemu_mutex_unlock_iothread();
}
} }
/* PowerPC 6xx / 7xx internal IRQ controller */ /* PowerPC 6xx / 7xx internal IRQ controller */
......
...@@ -1010,6 +1010,9 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, ...@@ -1010,6 +1010,9 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
{ {
CPUPPCState *env = &cpu->env; CPUPPCState *env = &cpu->env;
/* The TCG path should also be holding the BQL at this point */
g_assert(qemu_mutex_iothread_locked());
if (msr_pr) { if (msr_pr) {
hcall_dprintf("Hypercall made with MSR[PR]=1\n"); hcall_dprintf("Hypercall made with MSR[PR]=1\n");
env->gpr[3] = H_PRIVILEGE; env->gpr[3] = H_PRIVILEGE;
......
...@@ -329,6 +329,7 @@ struct CPUState { ...@@ -329,6 +329,7 @@ struct CPUState {
bool unplug; bool unplug;
bool crash_occurred; bool crash_occurred;
bool exit_request; bool exit_request;
/* updates protected by BQL */
uint32_t interrupt_request; uint32_t interrupt_request;
int singlestep_enabled; int singlestep_enabled;
int64_t icount_extra; int64_t icount_extra;
......
...@@ -917,6 +917,8 @@ void memory_region_transaction_commit(void) ...@@ -917,6 +917,8 @@ void memory_region_transaction_commit(void)
AddressSpace *as; AddressSpace *as;
assert(memory_region_transaction_depth); assert(memory_region_transaction_depth);
assert(qemu_mutex_iothread_locked());
--memory_region_transaction_depth; --memory_region_transaction_depth;
if (!memory_region_transaction_depth) { if (!memory_region_transaction_depth) {
if (memory_region_update_pending) { if (memory_region_update_pending) {
......
...@@ -113,9 +113,19 @@ static void cpu_common_get_memory_mapping(CPUState *cpu, ...@@ -113,9 +113,19 @@ static void cpu_common_get_memory_mapping(CPUState *cpu,
error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
} }
/* Resetting the IRQ comes from across the code base so we take the
* BQL here if we need to. cpu_interrupt assumes it is held.*/
void cpu_reset_interrupt(CPUState *cpu, int mask) void cpu_reset_interrupt(CPUState *cpu, int mask)
{ {
bool need_lock = !qemu_mutex_iothread_locked();
if (need_lock) {
qemu_mutex_lock_iothread();
}
cpu->interrupt_request &= ~mask; cpu->interrupt_request &= ~mask;
if (need_lock) {
qemu_mutex_unlock_iothread();
}
} }
void cpu_exit(CPUState *cpu) void cpu_exit(CPUState *cpu)
......
...@@ -6769,6 +6769,12 @@ void arm_cpu_do_interrupt(CPUState *cs) ...@@ -6769,6 +6769,12 @@ void arm_cpu_do_interrupt(CPUState *cs)
arm_cpu_do_interrupt_aarch32(cs); arm_cpu_do_interrupt_aarch32(cs);
} }
/* Hooks may change global state so BQL should be held, also the
* BQL needs to be held for any modification of
* cs->interrupt_request.
*/
g_assert(qemu_mutex_iothread_locked());
arm_call_el_change_hook(cpu); arm_call_el_change_hook(cpu);
if (!kvm_enabled()) { if (!kvm_enabled()) {
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "qemu/main-loop.h"
#include "cpu.h" #include "cpu.h"
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "internals.h" #include "internals.h"
...@@ -487,7 +488,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) ...@@ -487,7 +488,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
*/ */
env->regs[15] &= (env->thumb ? ~1 : ~3); env->regs[15] &= (env->thumb ? ~1 : ~3);
qemu_mutex_lock_iothread();
arm_call_el_change_hook(arm_env_get_cpu(env)); arm_call_el_change_hook(arm_env_get_cpu(env));
qemu_mutex_unlock_iothread();
} }
/* Access to user mode registers from privileged modes. */ /* Access to user mode registers from privileged modes. */
...@@ -735,28 +738,58 @@ void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) ...@@ -735,28 +738,58 @@ void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
{ {
const ARMCPRegInfo *ri = rip; const ARMCPRegInfo *ri = rip;
ri->writefn(env, ri, value); if (ri->type & ARM_CP_IO) {
qemu_mutex_lock_iothread();
ri->writefn(env, ri, value);
qemu_mutex_unlock_iothread();
} else {
ri->writefn(env, ri, value);
}
} }
uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
{ {
const ARMCPRegInfo *ri = rip; const ARMCPRegInfo *ri = rip;
uint32_t res;
return ri->readfn(env, ri); if (ri->type & ARM_CP_IO) {
qemu_mutex_lock_iothread();
res = ri->readfn(env, ri);
qemu_mutex_unlock_iothread();
} else {
res = ri->readfn(env, ri);
}
return res;
} }
void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
{ {
const ARMCPRegInfo *ri = rip; const ARMCPRegInfo *ri = rip;
ri->writefn(env, ri, value); if (ri->type & ARM_CP_IO) {
qemu_mutex_lock_iothread();
ri->writefn(env, ri, value);
qemu_mutex_unlock_iothread();
} else {
ri->writefn(env, ri, value);
}
} }
uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
{ {
const ARMCPRegInfo *ri = rip; const ARMCPRegInfo *ri = rip;
uint64_t res;
if (ri->type & ARM_CP_IO) {
qemu_mutex_lock_iothread();
res = ri->readfn(env, ri);
qemu_mutex_unlock_iothread();
} else {
res = ri->readfn(env, ri);
}
return ri->readfn(env, ri); return res;
} }
void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
...@@ -989,7 +1022,9 @@ void HELPER(exception_return)(CPUARMState *env) ...@@ -989,7 +1022,9 @@ void HELPER(exception_return)(CPUARMState *env)
cur_el, new_el, env->pc); cur_el, new_el, env->pc);
} }
qemu_mutex_lock_iothread();
arm_call_el_change_hook(arm_env_get_cpu(env)); arm_call_el_change_hook(arm_env_get_cpu(env));
qemu_mutex_unlock_iothread();
return; return;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h" #include "cpu.h"
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "exec/log.h" #include "exec/log.h"
...@@ -42,11 +43,14 @@ void helper_rsm(CPUX86State *env) ...@@ -42,11 +43,14 @@ void helper_rsm(CPUX86State *env)
#define SMM_REVISION_ID 0x00020000 #define SMM_REVISION_ID 0x00020000
#endif #endif
/* Called with iothread lock taken */
void cpu_smm_update(X86CPU *cpu) void cpu_smm_update(X86CPU *cpu)
{ {
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
bool smm_enabled = (env->hflags & HF_SMM_MASK); bool smm_enabled = (env->hflags & HF_SMM_MASK);
g_assert(qemu_mutex_iothread_locked());
if (cpu->smram) { if (cpu->smram) {
memory_region_set_enabled(cpu->smram, smm_enabled); memory_region_set_enabled(cpu->smram, smm_enabled);
} }
...@@ -333,7 +337,10 @@ void helper_rsm(CPUX86State *env) ...@@ -333,7 +337,10 @@ void helper_rsm(CPUX86State *env)
} }
env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
env->hflags &= ~HF_SMM_MASK; env->hflags &= ~HF_SMM_MASK;
qemu_mutex_lock_iothread();
cpu_smm_update(cpu); cpu_smm_update(cpu);
qemu_mutex_unlock_iothread();
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "sysemu/kvm.h" #include "sysemu/kvm.h"
#include "qemu/timer.h" #include "qemu/timer.h"
#include "qemu/main-loop.h"
#include "exec/address-spaces.h" #include "exec/address-spaces.h"
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
#include <linux/kvm.h> #include <linux/kvm.h>
...@@ -109,11 +110,13 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen) ...@@ -109,11 +110,13 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
/* SCLP service call */ /* SCLP service call */
uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
{ {
qemu_mutex_lock_iothread();
int r = sclp_service_call(env, r1, r2); int r = sclp_service_call(env, r1, r2);
if (r < 0) { if (r < 0) {
program_interrupt(env, -r, 4); program_interrupt(env, -r, 4);
return 0; r = 0;
} }
qemu_mutex_unlock_iothread();
return r; return r;
} }
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include "translate-all.h" #include "translate-all.h"
#include "qemu/bitmap.h" #include "qemu/bitmap.h"
#include "qemu/timer.h" #include "qemu/timer.h"
#include "qemu/main-loop.h"
#include "exec/log.h" #include "exec/log.h"
/* #define DEBUG_TB_INVALIDATE */ /* #define DEBUG_TB_INVALIDATE */
...@@ -1523,7 +1524,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, ...@@ -1523,7 +1524,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
/* len must be <= 8 and start must be a multiple of len. /* len must be <= 8 and start must be a multiple of len.
* Called via softmmu_template.h when code areas are written to with * Called via softmmu_template.h when code areas are written to with
* tb_lock held. * iothread mutex not held.
*/ */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{ {
...@@ -1725,7 +1726,10 @@ void tb_check_watchpoint(CPUState *cpu) ...@@ -1725,7 +1726,10 @@ void tb_check_watchpoint(CPUState *cpu)
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* in deterministic execution mode, instructions doing device I/Os /* in deterministic execution mode, instructions doing device I/Os
must be at the end of the TB */ * must be at the end of the TB.
*
* Called by softmmu_template.h, with iothread mutex not held.
*/
void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
{ {
#if defined(TARGET_MIPS) || defined(TARGET_SH4) #if defined(TARGET_MIPS) || defined(TARGET_SH4)
...@@ -1937,6 +1941,7 @@ void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) ...@@ -1937,6 +1941,7 @@ void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
void cpu_interrupt(CPUState *cpu, int mask) void cpu_interrupt(CPUState *cpu, int mask)
{ {
g_assert(qemu_mutex_iothread_locked());
cpu->interrupt_request |= mask; cpu->interrupt_request |= mask;
cpu->tcg_exit_req = 1; cpu->tcg_exit_req = 1;
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "qemu-common.h" #include "qemu-common.h"
#include "qom/cpu.h" #include "qom/cpu.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "qemu/main-loop.h"
uintptr_t qemu_real_host_page_size; uintptr_t qemu_real_host_page_size;
intptr_t qemu_real_host_page_mask; intptr_t qemu_real_host_page_mask;
...@@ -30,6 +31,7 @@ intptr_t qemu_real_host_page_mask; ...@@ -30,6 +31,7 @@ intptr_t qemu_real_host_page_mask;
static void tcg_handle_interrupt(CPUState *cpu, int mask) static void tcg_handle_interrupt(CPUState *cpu, int mask)
{ {
int old_mask; int old_mask;
g_assert(qemu_mutex_iothread_locked());
old_mask = cpu->interrupt_request; old_mask = cpu->interrupt_request;
cpu->interrupt_request |= mask; cpu->interrupt_request |= mask;
...@@ -40,17 +42,16 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask) ...@@ -40,17 +42,16 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
*/ */
if (!qemu_cpu_is_self(cpu)) { if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu); qemu_cpu_kick(cpu);
return;
}
if (use_icount) {
cpu->icount_decr.u16.high = 0xffff;
if (!cpu->can_do_io
&& (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function");
}
} else { } else {
cpu->tcg_exit_req = 1; if (use_icount) {
cpu->icount_decr.u16.high = 0xffff;
if (!cpu->can_do_io
&& (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function");
}
} else {
cpu->tcg_exit_req = 1;
}
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册