cpu-exec.c 32.0 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
B
bellard 已提交
19
#include "config.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "disas/disas.h"
22
#include "tcg.h"
23
#include "qemu/atomic.h"
24
#include "sysemu/qtest.h"
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
#include "qemu/timer.h"

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
            sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
            sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
                   qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
                   cpu_get_clock_offset();
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
91

92
void cpu_loop_exit(CPUState *cpu)
B
bellard 已提交
93
{
94
    cpu->current_tb = NULL;
95
    siglongjmp(cpu->jmp_env, 1);
B
bellard 已提交
96
}
97

98 99 100
/* exit the current TB from a signal handler. The host registers are
   restored in a state compatible with the CPU emulator
 */
101
#if defined(CONFIG_SOFTMMU)
102
void cpu_resume_from_signal(CPUState *cpu, void *puc)
103 104 105
{
    /* XXX: restore cpu registers saved in host registers */

106
    cpu->exception_index = -1;
107
    siglongjmp(cpu->jmp_env, 1);
108 109
}
#endif
110

111 112 113 114
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
134 135 136 137 138
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
139
        CPUClass *cc = CPU_GET_CLASS(cpu);
140
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
141 142 143 144 145 146
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
147
    }
148 149 150 151 152 153
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
154 155 156
    return next_tb;
}

P
pbrook 已提交
157 158
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
159
static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
B
Blue Swirl 已提交
160
                             TranslationBlock *orig_tb)
P
pbrook 已提交
161
{
162
    CPUState *cpu = ENV_GET_CPU(env);
P
pbrook 已提交
163 164 165 166 167 168 169
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

170
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
P
pbrook 已提交
171
                     max_cycles);
172
    cpu->current_tb = tb;
P
pbrook 已提交
173
    /* execute the generated code */
174
    cpu_tb_exec(cpu, tb->tc_ptr);
175
    cpu->current_tb = NULL;
P
pbrook 已提交
176 177 178 179
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

180
static TranslationBlock *tb_find_slow(CPUArchState *env,
B
Blue Swirl 已提交
181
                                      target_ulong pc,
182
                                      target_ulong cs_base,
183
                                      uint64_t flags)
184
{
185
    CPUState *cpu = ENV_GET_CPU(env);
186 187
    TranslationBlock *tb, **ptb1;
    unsigned int h;
188
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
189
    target_ulong virt_page2;
190

191
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
192

193
    /* find translated block using physical mappings */
P
Paul Brook 已提交
194
    phys_pc = get_page_addr_code(env, pc);
195 196
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
197
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
198 199 200 201
    for(;;) {
        tb = *ptb1;
        if (!tb)
            goto not_found;
202
        if (tb->pc == pc &&
203
            tb->page_addr[0] == phys_page1 &&
204
            tb->cs_base == cs_base &&
205 206 207
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
208 209
                tb_page_addr_t phys_page2;

210
                virt_page2 = (pc & TARGET_PAGE_MASK) +
211
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
212
                phys_page2 = get_page_addr_code(env, virt_page2);
213 214 215 216 217 218 219 220 221
                if (tb->page_addr[1] == phys_page2)
                    goto found;
            } else {
                goto found;
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
 not_found:
P
pbrook 已提交
222
   /* if no translated code available, then translate it now */
223
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
224

225
 found:
226 227 228
    /* Move the last found TB to the head of the list */
    if (likely(*ptb1)) {
        *ptb1 = tb->phys_hash_next;
229 230
        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
231
    }
232
    /* we add the TB in the virtual pc hash table */
233
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
234 235 236
    return tb;
}

237
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
238
{
239
    CPUState *cpu = ENV_GET_CPU(env);
240 241
    TranslationBlock *tb;
    target_ulong cs_base, pc;
242
    int flags;
243 244 245 246

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
247
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
248
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
249 250
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
B
Blue Swirl 已提交
251
        tb = tb_find_slow(env, pc, cs_base, flags);
252 253 254 255
    }
    return tb;
}

256 257
static CPUDebugExcpHandler *debug_excp_handler;

258
void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
259 260 261 262
{
    debug_excp_handler = handler;
}

263
static void cpu_handle_debug_exception(CPUArchState *env)
264
{
265
    CPUState *cpu = ENV_GET_CPU(env);
266 267
    CPUWatchpoint *wp;

268 269
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
270 271 272 273 274 275 276 277
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
    if (debug_excp_handler) {
        debug_excp_handler(env);
    }
}

B
bellard 已提交
278 279
/* main execution loop */

280 281
volatile sig_atomic_t exit_request;

282
int cpu_exec(CPUArchState *env)
B
bellard 已提交
283
{
284
    CPUState *cpu = ENV_GET_CPU(env);
285 286 287
#if !(defined(CONFIG_USER_ONLY) && \
      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
    CPUClass *cc = CPU_GET_CLASS(cpu);
288 289 290
#endif
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
291
#endif
292 293
    int ret, interrupt_request;
    TranslationBlock *tb;
B
bellard 已提交
294
    uint8_t *tc_ptr;
295
    uintptr_t next_tb;
296 297
    SyncClocks sc;

298 299
    /* This must be volatile so it is not trashed by longjmp() */
    volatile bool have_tb_lock = false;
300

301
    if (cpu->halted) {
302
        if (!cpu_has_work(cpu)) {
303 304 305
            return EXCP_HALTED;
        }

306
        cpu->halted = 0;
307
    }
B
bellard 已提交
308

309
    current_cpu = cpu;
B
bellard 已提交
310

311
    /* As long as current_cpu is null, up to the assignment just above,
312 313
     * requests by other threads to exit the execution loop are expected to
     * be issued using the exit_request global. We must make sure that our
314
     * evaluation of the global value is performed past the current_cpu
315 316 317 318
     * value transition point, which requires a memory barrier as well as
     * an instruction scheduling constraint on modern architectures.  */
    smp_mb();

J
Jan Kiszka 已提交
319
    if (unlikely(exit_request)) {
320
        cpu->exit_request = 1;
321 322
    }

323
#if defined(TARGET_I386)
324 325
    /* put eflags in CPU temporary format */
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
L
liguang 已提交
326
    env->df = 1 - (2 * ((env->eflags >> 10) & 1));
327 328
    CC_OP = CC_OP_EFLAGS;
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
329
#elif defined(TARGET_SPARC)
P
pbrook 已提交
330 331 332 333
#elif defined(TARGET_M68K)
    env->cc_op = CC_OP_FLAGS;
    env->cc_dest = env->sr & 0xf;
    env->cc_x = (env->sr >> 4) & 1;
334 335
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_ARM)
336
#elif defined(TARGET_UNICORE32)
337
#elif defined(TARGET_PPC)
338
    env->reserve_addr = -1;
M
Michael Walle 已提交
339
#elif defined(TARGET_LM32)
340
#elif defined(TARGET_MICROBLAZE)
B
bellard 已提交
341
#elif defined(TARGET_MIPS)
A
Anthony Green 已提交
342
#elif defined(TARGET_MOXIE)
343
#elif defined(TARGET_OPENRISC)
B
bellard 已提交
344
#elif defined(TARGET_SH4)
345
#elif defined(TARGET_CRIS)
A
Alexander Graf 已提交
346
#elif defined(TARGET_S390X)
M
Max Filippov 已提交
347
#elif defined(TARGET_XTENSA)
B
bellard 已提交
348
    /* XXXXX */
B
bellard 已提交
349 350 351
#else
#error unsupported target CPU
#endif
352
    cpu->exception_index = -1;
353

354 355 356 357 358 359 360
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

B
bellard 已提交
361
    /* prepare setjmp context for exception handling */
362
    for(;;) {
363
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
364
            /* if an exception is pending, we execute it here */
365 366
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
367
                    /* exit request from the cpu execution loop */
368
                    ret = cpu->exception_index;
369 370 371
                    if (ret == EXCP_DEBUG) {
                        cpu_handle_debug_exception(env);
                    }
372
                    break;
A
aurel32 已提交
373 374
                } else {
#if defined(CONFIG_USER_ONLY)
375
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
376
                       which will be handled outside the cpu execution
377
                       loop */
B
bellard 已提交
378
#if defined(TARGET_I386)
379
                    cc->do_interrupt(cpu);
B
bellard 已提交
380
#endif
381
                    ret = cpu->exception_index;
382
                    break;
A
aurel32 已提交
383
#else
384
                    cc->do_interrupt(cpu);
385
                    cpu->exception_index = -1;
B
bellard 已提交
386
#endif
387
                }
388
            }
B
bellard 已提交
389

390
            next_tb = 0; /* force lookup of first TB */
391
            for(;;) {
392
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
393
                if (unlikely(interrupt_request)) {
394
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
395
                        /* Mask out external interrupts for this step. */
396
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
397
                    }
398
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
399
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
400
                        cpu->exception_index = EXCP_DEBUG;
401
                        cpu_loop_exit(cpu);
402
                    }
403
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
404
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
405
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
406
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
407 408
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
409
                        cpu->exception_index = EXCP_HLT;
410
                        cpu_loop_exit(cpu);
411 412
                    }
#endif
413 414 415 416 417 418 419 420 421 422 423 424
#if defined(TARGET_I386)
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
                    if (interrupt_request & CPU_INTERRUPT_RESET) {
                        cpu_reset(cpu);
                    }
#endif
B
bellard 已提交
425
#if defined(TARGET_I386)
426 427
#if !defined(CONFIG_USER_ONLY)
                    if (interrupt_request & CPU_INTERRUPT_POLL) {
428
                        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
429
                        apic_poll_irq(x86_cpu->apic_state);
430 431
                    }
#endif
432
                    if (interrupt_request & CPU_INTERRUPT_SIPI) {
433
                            do_cpu_sipi(x86_cpu);
434
                    } else if (env->hflags2 & HF2_GIF_MASK) {
435 436
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
                            !(env->hflags & HF_SMM_MASK)) {
B
Blue Swirl 已提交
437 438
                            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
                                                          0);
439
                            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
440
                            do_smm_enter(x86_cpu);
441 442 443
                            next_tb = 0;
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
                                   !(env->hflags2 & HF2_NMI_MASK)) {
444
                            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
445
                            env->hflags2 |= HF2_NMI_MASK;
446
                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
447
                            next_tb = 0;
448
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
449
                            cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
450
                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
451
                            next_tb = 0;
452 453 454 455 456 457 458
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
                                     (env->hflags2 & HF2_HIF_MASK)) ||
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
                                     (env->eflags & IF_MASK && 
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
                            int intno;
B
Blue Swirl 已提交
459 460
                            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
                                                          0);
461 462
                            cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
                                                        CPU_INTERRUPT_VIRQ);
463
                            intno = cpu_get_pic_interrupt(env);
464 465 466 467 468
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
                            do_interrupt_x86_hardirq(env, intno, 1);
                            /* ensure that no TB jump will be modified as
                               the program flow was changed */
                            next_tb = 0;
T
ths 已提交
469
#if !defined(CONFIG_USER_ONLY)
470 471 472 473 474
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
                                   (env->eflags & IF_MASK) && 
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
                            int intno;
                            /* FIXME: this should respect TPR */
B
Blue Swirl 已提交
475 476
                            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
                                                          0);
477 478 479 480
                            intno = ldl_phys(cpu->as,
                                             env->vm_vmcb
                                             + offsetof(struct vmcb,
                                                        control.int_vector));
481
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
482
                            do_interrupt_x86_hardirq(env, intno, 1);
483
                            cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
484
                            next_tb = 0;
B
bellard 已提交
485
#endif
486
                        }
B
bellard 已提交
487
                    }
488
#elif defined(TARGET_PPC)
489
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
490
                        ppc_hw_interrupt(env);
491 492 493
                        if (env->pending_interrupts == 0) {
                            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
                        }
494
                        next_tb = 0;
495
                    }
M
Michael Walle 已提交
496 497 498
#elif defined(TARGET_LM32)
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
                        && (env->ie & IE_IE)) {
499
                        cpu->exception_index = EXCP_IRQ;
500
                        cc->do_interrupt(cpu);
M
Michael Walle 已提交
501 502
                        next_tb = 0;
                    }
503 504 505 506 507
#elif defined(TARGET_MICROBLAZE)
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
                        && (env->sregs[SR_MSR] & MSR_IE)
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
508
                        cpu->exception_index = EXCP_IRQ;
509
                        cc->do_interrupt(cpu);
510 511
                        next_tb = 0;
                    }
B
bellard 已提交
512 513
#elif defined(TARGET_MIPS)
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
514
                        cpu_mips_hw_interrupts_pending(env)) {
B
bellard 已提交
515
                        /* Raise it */
516
                        cpu->exception_index = EXCP_EXT_INTERRUPT;
B
bellard 已提交
517
                        env->error_code = 0;
518
                        cc->do_interrupt(cpu);
519
                        next_tb = 0;
B
bellard 已提交
520
                    }
J
Jia Liu 已提交
521 522 523 524 525 526 527 528 529 530 531 532
#elif defined(TARGET_OPENRISC)
                    {
                        int idx = -1;
                        if ((interrupt_request & CPU_INTERRUPT_HARD)
                            && (env->sr & SR_IEE)) {
                            idx = EXCP_INT;
                        }
                        if ((interrupt_request & CPU_INTERRUPT_TIMER)
                            && (env->sr & SR_TEE)) {
                            idx = EXCP_TICK;
                        }
                        if (idx >= 0) {
533
                            cpu->exception_index = idx;
534
                            cc->do_interrupt(cpu);
J
Jia Liu 已提交
535 536 537
                            next_tb = 0;
                        }
                    }
538
#elif defined(TARGET_SPARC)
539 540 541 542 543 544 545 546 547
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
                        if (cpu_interrupts_enabled(env) &&
                            env->interrupt_index > 0) {
                            int pil = env->interrupt_index & 0xf;
                            int type = env->interrupt_index & 0xf0;

                            if (((type == TT_EXTINT) &&
                                  cpu_pil_allowed(env, pil)) ||
                                  type != TT_EXTINT) {
548
                                cpu->exception_index = env->interrupt_index;
549
                                cc->do_interrupt(cpu);
550 551 552
                                next_tb = 0;
                            }
                        }
553
                    }
B
bellard 已提交
554 555
#elif defined(TARGET_ARM)
                    if (interrupt_request & CPU_INTERRUPT_FIQ
556
                        && !(env->daif & PSTATE_F)) {
557
                        cpu->exception_index = EXCP_FIQ;
558
                        cc->do_interrupt(cpu);
559
                        next_tb = 0;
B
bellard 已提交
560
                    }
P
pbrook 已提交
561 562 563 564 565 566
                    /* ARMv7-M interrupt return works by loading a magic value
                       into the PC.  On real hardware the load causes the
                       return to occur.  The qemu implementation performs the
                       jump normally, then does the exception return when the
                       CPU tries to execute code at the magic address.
                       This will cause the magic PC value to be pushed to
567
                       the stack if an interrupt occurred at the wrong time.
P
pbrook 已提交
568 569
                       We avoid this by disabling interrupts when
                       pc contains a magic address.  */
B
bellard 已提交
570
                    if (interrupt_request & CPU_INTERRUPT_HARD
P
pbrook 已提交
571
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
572
                            || !(env->daif & PSTATE_I))) {
573
                        cpu->exception_index = EXCP_IRQ;
574
                        cc->do_interrupt(cpu);
575
                        next_tb = 0;
B
bellard 已提交
576
                    }
577 578 579
#elif defined(TARGET_UNICORE32)
                    if (interrupt_request & CPU_INTERRUPT_HARD
                        && !(env->uncached_asr & ASR_I)) {
580
                        cpu->exception_index = UC32_EXCP_INTR;
581
                        cc->do_interrupt(cpu);
582 583
                        next_tb = 0;
                    }
B
bellard 已提交
584
#elif defined(TARGET_SH4)
585
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
586
                        cc->do_interrupt(cpu);
587
                        next_tb = 0;
588
                    }
J
j_mayer 已提交
589
#elif defined(TARGET_ALPHA)
590 591 592
                    {
                        int idx = -1;
                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
593
                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
                        case 0 ... 3:
                            if (interrupt_request & CPU_INTERRUPT_HARD) {
                                idx = EXCP_DEV_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 4:
                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
                                idx = EXCP_CLK_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 5:
                            if (interrupt_request & CPU_INTERRUPT_SMP) {
                                idx = EXCP_SMP_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 6:
                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
                                idx = EXCP_MCHK;
                            }
                        }
                        if (idx >= 0) {
615
                            cpu->exception_index = idx;
616
                            env->error_code = 0;
617
                            cc->do_interrupt(cpu);
618 619
                            next_tb = 0;
                        }
J
j_mayer 已提交
620
                    }
621
#elif defined(TARGET_CRIS)
E
edgar_igl 已提交
622
                    if (interrupt_request & CPU_INTERRUPT_HARD
E
Edgar E. Iglesias 已提交
623 624
                        && (env->pregs[PR_CCS] & I_FLAG)
                        && !env->locked_irq) {
625
                        cpu->exception_index = EXCP_IRQ;
626
                        cc->do_interrupt(cpu);
E
edgar_igl 已提交
627 628
                        next_tb = 0;
                    }
629 630 631 632 633 634 635 636
                    if (interrupt_request & CPU_INTERRUPT_NMI) {
                        unsigned int m_flag_archval;
                        if (env->pregs[PR_VR] < 32) {
                            m_flag_archval = M_FLAG_V10;
                        } else {
                            m_flag_archval = M_FLAG_V32;
                        }
                        if ((env->pregs[PR_CCS] & m_flag_archval)) {
637
                            cpu->exception_index = EXCP_NMI;
638
                            cc->do_interrupt(cpu);
639 640
                            next_tb = 0;
                        }
641
                    }
P
pbrook 已提交
642 643 644 645 646 647 648 649 650
#elif defined(TARGET_M68K)
                    if (interrupt_request & CPU_INTERRUPT_HARD
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
                            < env->pending_level) {
                        /* Real hardware gets the interrupt vector via an
                           IACK cycle at this point.  Current emulated
                           hardware doesn't rely on this, so we
                           provide/save the vector when the interrupt is
                           first signalled.  */
651
                        cpu->exception_index = env->pending_vector;
652
                        do_interrupt_m68k_hardirq(env);
653
                        next_tb = 0;
P
pbrook 已提交
654
                    }
655 656 657
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                        (env->psw.mask & PSW_MASK_EXT)) {
658
                        cc->do_interrupt(cpu);
659 660
                        next_tb = 0;
                    }
661 662
#elif defined(TARGET_XTENSA)
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
663
                        cpu->exception_index = EXC_IRQ;
664
                        cc->do_interrupt(cpu);
665 666
                        next_tb = 0;
                    }
B
bellard 已提交
667
#endif
668
                   /* Don't use the cached interrupt_request value,
B
bellard 已提交
669
                      do_interrupt may have updated the EXITTB flag. */
670 671
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
672 673
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
674
                        next_tb = 0;
675
                    }
676
                }
677 678
                if (unlikely(cpu->exit_request)) {
                    cpu->exit_request = 0;
679
                    cpu->exception_index = EXCP_INTERRUPT;
680
                    cpu_loop_exit(cpu);
681
                }
682
                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
683
                have_tb_lock = true;
B
Blue Swirl 已提交
684
                tb = tb_find_fast(env);
P
pbrook 已提交
685 686
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
687
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
688 689 690 691
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
692
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
693
                }
694 695 696 697
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
698 699 700
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
P
Paolo Bonzini 已提交
701
                if (next_tb != 0 && tb->page_addr[1] == -1) {
702 703
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
704
                }
705
                have_tb_lock = false;
706
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
707 708 709 710 711

                /* cpu_interrupt might be called while translating the
                   TB, but before it is linked into a potentially
                   infinite loop and becomes env->current_tb. Avoid
                   starting execution if there is a pending interrupt. */
712
                cpu->current_tb = tb;
J
Jan Kiszka 已提交
713
                barrier();
714
                if (likely(!cpu->exit_request)) {
P
pbrook 已提交
715
                    tc_ptr = tb->tc_ptr;
716
                    /* execute the generated code */
717
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
718 719 720 721 722 723 724 725 726 727 728 729 730 731
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
                         * next time around the loop.
                         */
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
732
                        /* Instruction counter expired.  */
P
pbrook 已提交
733
                        int insns_left;
734
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
735
                        insns_left = cpu->icount_decr.u32;
736
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
737
                            /* Refill decrementer and continue execution.  */
738 739
                            cpu->icount_extra += insns_left;
                            if (cpu->icount_extra > 0xffff) {
P
pbrook 已提交
740 741
                                insns_left = 0xffff;
                            } else {
742
                                insns_left = cpu->icount_extra;
P
pbrook 已提交
743
                            }
744
                            cpu->icount_extra -= insns_left;
745
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
746 747 748
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
B
Blue Swirl 已提交
749
                                cpu_exec_nocache(env, insns_left, tb);
750
                                align_clocks(&sc, cpu);
P
pbrook 已提交
751
                            }
752
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
753
                            next_tb = 0;
754
                            cpu_loop_exit(cpu);
P
pbrook 已提交
755
                        }
756 757 758 759
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
760 761
                    }
                }
762
                cpu->current_tb = NULL;
763 764 765
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
B
bellard 已提交
766 767
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
768
            } /* for(;;) */
769 770 771
        } else {
            /* Reload env after longjmp - the compiler may have smashed all
             * local variables as longjmp is marked 'noreturn'. */
772 773
            cpu = current_cpu;
            env = cpu->env_ptr;
774 775 776
#if !(defined(CONFIG_USER_ONLY) && \
      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
            cc = CPU_GET_CLASS(cpu);
777 778 779
#endif
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
780
#endif
781 782 783 784
            if (have_tb_lock) {
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
                have_tb_lock = false;
            }
B
bellard 已提交
785
        }
786 787
    } /* for(;;) */

B
bellard 已提交
788

B
bellard 已提交
789
#if defined(TARGET_I386)
B
bellard 已提交
790
    /* restore flags in standard format */
791
    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
L
liguang 已提交
792
        | (env->df & DF_MASK);
B
bellard 已提交
793
#elif defined(TARGET_ARM)
B
bellard 已提交
794
    /* XXX: Save/restore host fpu exception state?.  */
795
#elif defined(TARGET_UNICORE32)
796
#elif defined(TARGET_SPARC)
797
#elif defined(TARGET_PPC)
M
Michael Walle 已提交
798
#elif defined(TARGET_LM32)
P
pbrook 已提交
799 800 801 802 803
#elif defined(TARGET_M68K)
    cpu_m68k_flush_flags(env, env->cc_op);
    env->cc_op = CC_OP_FLAGS;
    env->sr = (env->sr & 0xffe0)
              | env->cc_dest | (env->cc_x << 4);
804
#elif defined(TARGET_MICROBLAZE)
B
bellard 已提交
805
#elif defined(TARGET_MIPS)
A
Anthony Green 已提交
806
#elif defined(TARGET_MOXIE)
807
#elif defined(TARGET_OPENRISC)
B
bellard 已提交
808
#elif defined(TARGET_SH4)
J
j_mayer 已提交
809
#elif defined(TARGET_ALPHA)
810
#elif defined(TARGET_CRIS)
A
Alexander Graf 已提交
811
#elif defined(TARGET_S390X)
M
Max Filippov 已提交
812
#elif defined(TARGET_XTENSA)
B
bellard 已提交
813
    /* XXXXX */
B
bellard 已提交
814 815 816
#else
#error unsupported target CPU
#endif
P
pbrook 已提交
817

818 819
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
B
bellard 已提交
820 821
    return ret;
}