提交 c385e6e4 编写于 作者: S Sergey Fedorov 提交者: Richard Henderson

cpu-exec: Move interrupt handling out of cpu_exec()

Simplify cpu_exec() by extracting interrupt handling code outside of
cpu_exec() into a new static inline function cpu_handle_interrupt().
Signed-off-by: NSergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: NSergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: NRichard Henderson  <rth@twiddle.net>
Message-Id: <1462962111-32237-4-git-send-email-sergey.fedorov@linaro.org>
Signed-off-by: NRichard Henderson <rth@twiddle.net>
上级 ea284766
......@@ -437,55 +437,12 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
return false;
}
/* main execution loop */
int cpu_exec(CPUState *cpu)
static inline void cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
#ifdef TARGET_I386
X86CPU *x86_cpu = X86_CPU(cpu);
CPUArchState *env = &x86_cpu->env;
#endif
int ret, interrupt_request;
TranslationBlock *tb, *last_tb;
int tb_exit = 0;
SyncClocks sc;
/* replay_interrupt may need current_cpu */
current_cpu = cpu;
if (cpu_handle_halt(cpu)) {
return EXCP_HALTED;
}
atomic_mb_set(&tcg_current_cpu, cpu);
rcu_read_lock();
if (unlikely(atomic_mb_read(&exit_request))) {
cpu->exit_request = 1;
}
cc->cpu_exec_enter(cpu);
int interrupt_request = cpu->interrupt_request;
/* Calculate difference between guest clock and host clock.
* This delay includes the delay of the last cycle, so
* what we have to do is sleep until it is 0. As for the
* advance/delay we gain here, we try to fix it next time.
*/
init_delay_params(&sc, cpu);
for(;;) {
/* prepare setjmp context for exception handling */
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
/* if an exception is pending, we execute it here */
if (cpu_handle_exception(cpu, &ret)) {
break;
}
last_tb = NULL; /* forget the last executed TB after exception */
cpu->tb_flushed = false; /* reset before first TB lookup */
for(;;) {
interrupt_request = cpu->interrupt_request;
if (unlikely(interrupt_request)) {
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
......@@ -496,8 +453,7 @@ int cpu_exec(CPUState *cpu)
cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(cpu);
}
if (replay_mode == REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
replay_interrupt();
......@@ -508,6 +464,8 @@ int cpu_exec(CPUState *cpu)
}
#if defined(TARGET_I386)
else if (interrupt_request & CPU_INTERRUPT_INIT) {
X86CPU *x86_cpu = X86_CPU(cpu);
CPUArchState *env = &x86_cpu->env;
replay_interrupt();
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
do_cpu_init(x86_cpu);
......@@ -528,7 +486,7 @@ int cpu_exec(CPUState *cpu)
else {
replay_interrupt();
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
last_tb = NULL;
*last_tb = NULL;
}
}
/* Don't use the cached interrupt_request value,
......@@ -537,15 +495,65 @@ int cpu_exec(CPUState *cpu)
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
last_tb = NULL;
*last_tb = NULL;
}
}
if (unlikely(cpu->exit_request
|| replay_has_interrupt())) {
if (unlikely(cpu->exit_request || replay_has_interrupt())) {
cpu->exit_request = 0;
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
}
/* main execution loop */
int cpu_exec(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
#ifdef TARGET_I386
X86CPU *x86_cpu = X86_CPU(cpu);
CPUArchState *env = &x86_cpu->env;
#endif
int ret;
TranslationBlock *tb, *last_tb;
int tb_exit = 0;
SyncClocks sc;
/* replay_interrupt may need current_cpu */
current_cpu = cpu;
if (cpu_handle_halt(cpu)) {
return EXCP_HALTED;
}
atomic_mb_set(&tcg_current_cpu, cpu);
rcu_read_lock();
if (unlikely(atomic_mb_read(&exit_request))) {
cpu->exit_request = 1;
}
cc->cpu_exec_enter(cpu);
/* Calculate difference between guest clock and host clock.
* This delay includes the delay of the last cycle, so
* what we have to do is sleep until it is 0. As for the
* advance/delay we gain here, we try to fix it next time.
*/
init_delay_params(&sc, cpu);
for(;;) {
/* prepare setjmp context for exception handling */
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
/* if an exception is pending, we execute it here */
if (cpu_handle_exception(cpu, &ret)) {
break;
}
last_tb = NULL; /* forget the last executed TB after exception */
cpu->tb_flushed = false; /* reset before first TB lookup */
for(;;) {
cpu_handle_interrupt(cpu, &last_tb);
tb = tb_find_fast(cpu, &last_tb, tb_exit);
if (likely(!cpu->exit_request)) {
uintptr_t ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册