提交 e5143e30 编写于 作者: A Alex Bennée

tcg: remove global exit_request

There are now only two uses of the global exit_request left.

The first ensures we exit the run_loop when we first start to process
pending work and in the kick handler. This is just as easily done by
setting the first_cpu->exit_request flag.

The second use is in the round robin kick routine. The global
exit_request ensured every vCPU would set its local exit_request and
cause a full exit of the loop. Now the iothread isn't being held while
running we can just rely on the kick handler to push us out as intended.

We lightly re-factor the main vCPU thread to ensure cpu->exit_requests
cause us to exit the main loop and process any IO requests that might
come along. As an cpu->exit_request may legitimately get squashed
while processing the EXCP_INTERRUPT exception we also check
cpu->queued_work_first to ensure queued work is expedited as soon as
possible.
Signed-off-by: NAlex Bennée <alex.bennee@linaro.org>
Reviewed-by: NRichard Henderson <rth@twiddle.net>
上级 8d04fb55
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/memory-internal.h" #include "exec/memory-internal.h"
bool exit_request;
/* exit the current TB, but without causing any exception to be raised */ /* exit the current TB, but without causing any exception to be raised */
void cpu_loop_exit_noexc(CPUState *cpu) void cpu_loop_exit_noexc(CPUState *cpu)
{ {
......
...@@ -568,15 +568,13 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, ...@@ -568,15 +568,13 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
*tb_exit = ret & TB_EXIT_MASK; *tb_exit = ret & TB_EXIT_MASK;
switch (*tb_exit) { switch (*tb_exit) {
case TB_EXIT_REQUESTED: case TB_EXIT_REQUESTED:
/* Something asked us to stop executing /* Something asked us to stop executing chained TBs; just
* chained TBs; just continue round the main * continue round the main loop. Whatever requested the exit
* loop. Whatever requested the exit will also * will also have set something else (eg interrupt_request)
* have set something else (eg exit_request or * which we will handle next time around the loop. But we
* interrupt_request) which we will handle * need to ensure the tcg_exit_req read in generated code
* next time around the loop. But we need to * comes before the next read of cpu->exit_request or
* ensure the zeroing of tcg_exit_req (see cpu_tb_exec) * cpu->interrupt_request.
* comes before the next read of cpu->exit_request
* or cpu->interrupt_request.
*/ */
smp_mb(); smp_mb();
*last_tb = NULL; *last_tb = NULL;
...@@ -630,10 +628,6 @@ int cpu_exec(CPUState *cpu) ...@@ -630,10 +628,6 @@ int cpu_exec(CPUState *cpu)
rcu_read_lock(); rcu_read_lock();
if (unlikely(atomic_mb_read(&exit_request))) {
cpu->exit_request = 1;
}
cc->cpu_exec_enter(cpu); cc->cpu_exec_enter(cpu);
/* Calculate difference between guest clock and host clock. /* Calculate difference between guest clock and host clock.
......
...@@ -793,7 +793,6 @@ static inline int64_t qemu_tcg_next_kick(void) ...@@ -793,7 +793,6 @@ static inline int64_t qemu_tcg_next_kick(void)
static void qemu_cpu_kick_rr_cpu(void) static void qemu_cpu_kick_rr_cpu(void)
{ {
CPUState *cpu; CPUState *cpu;
atomic_mb_set(&exit_request, 1);
do { do {
cpu = atomic_mb_read(&tcg_current_rr_cpu); cpu = atomic_mb_read(&tcg_current_rr_cpu);
if (cpu) { if (cpu) {
...@@ -1316,11 +1315,11 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) ...@@ -1316,11 +1315,11 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
start_tcg_kick_timer(); start_tcg_kick_timer();
/* process any pending work */
atomic_mb_set(&exit_request, 1);
cpu = first_cpu; cpu = first_cpu;
/* process any pending work */
cpu->exit_request = 1;
while (1) { while (1) {
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */ /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer(); qemu_account_warp_timer();
...@@ -1329,7 +1328,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) ...@@ -1329,7 +1328,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
cpu = first_cpu; cpu = first_cpu;
} }
for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) { while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
atomic_mb_set(&tcg_current_rr_cpu, cpu); atomic_mb_set(&tcg_current_rr_cpu, cpu);
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
...@@ -1349,12 +1349,15 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) ...@@ -1349,12 +1349,15 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
break; break;
} }
} /* for cpu.. */ cpu = CPU_NEXT(cpu);
} /* while (cpu && !cpu->exit_request).. */
/* Does not need atomic_mb_set because a spurious wakeup is okay. */ /* Does not need atomic_mb_set because a spurious wakeup is okay. */
atomic_set(&tcg_current_rr_cpu, NULL); atomic_set(&tcg_current_rr_cpu, NULL);
/* Pairs with smp_wmb in qemu_cpu_kick. */ if (cpu && cpu->exit_request) {
atomic_mb_set(&exit_request, 0); atomic_mb_set(&cpu->exit_request, 0);
}
handle_icount_deadline(); handle_icount_deadline();
......
...@@ -404,7 +404,4 @@ bool memory_region_is_unassigned(MemoryRegion *mr); ...@@ -404,7 +404,4 @@ bool memory_region_is_unassigned(MemoryRegion *mr);
/* vl.c */ /* vl.c */
extern int singlestep; extern int singlestep;
/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
extern bool exit_request;
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册