cpu-exec.c 29.7 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
B
bellard 已提交
19
#include "config.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "disas/disas.h"
22
#include "tcg.h"
23
#include "qemu/atomic.h"
24
#include "sysemu/qtest.h"
B
bellard 已提交
25

26
void cpu_loop_exit(CPUState *cpu)
B
bellard 已提交
27
{
28
    cpu->current_tb = NULL;
29
    siglongjmp(cpu->jmp_env, 1);
B
bellard 已提交
30
}
31

32 33 34
/* exit the current TB from a signal handler. The host registers are
   restored in a state compatible with the CPU emulator
 */
35
#if defined(CONFIG_SOFTMMU)
36
void cpu_resume_from_signal(CPUState *cpu, void *puc)
37 38 39
{
    /* XXX: restore cpu registers saved in host registers */

40
    cpu->exception_index = -1;
41
    siglongjmp(cpu->jmp_env, 1);
42 43
}
#endif
44

45 46 47 48
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
68 69 70 71 72
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
73
        CPUClass *cc = CPU_GET_CLASS(cpu);
74
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
75 76 77 78 79 80
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
81
    }
82 83 84 85 86 87
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
88 89 90
    return next_tb;
}

P
pbrook 已提交
91 92
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
93
static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
B
Blue Swirl 已提交
94
                             TranslationBlock *orig_tb)
P
pbrook 已提交
95
{
96
    CPUState *cpu = ENV_GET_CPU(env);
P
pbrook 已提交
97 98 99 100 101 102 103
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

104
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
P
pbrook 已提交
105
                     max_cycles);
106
    cpu->current_tb = tb;
P
pbrook 已提交
107
    /* execute the generated code */
108
    cpu_tb_exec(cpu, tb->tc_ptr);
109
    cpu->current_tb = NULL;
P
pbrook 已提交
110 111 112 113
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

114
static TranslationBlock *tb_find_slow(CPUArchState *env,
B
Blue Swirl 已提交
115
                                      target_ulong pc,
116
                                      target_ulong cs_base,
117
                                      uint64_t flags)
118
{
119
    CPUState *cpu = ENV_GET_CPU(env);
120 121
    TranslationBlock *tb, **ptb1;
    unsigned int h;
122
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
123
    target_ulong virt_page2;
124

125
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
126

127
    /* find translated block using physical mappings */
P
Paul Brook 已提交
128
    phys_pc = get_page_addr_code(env, pc);
129 130
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
131
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
132 133 134 135
    for(;;) {
        tb = *ptb1;
        if (!tb)
            goto not_found;
136
        if (tb->pc == pc &&
137
            tb->page_addr[0] == phys_page1 &&
138
            tb->cs_base == cs_base &&
139 140 141
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
142 143
                tb_page_addr_t phys_page2;

144
                virt_page2 = (pc & TARGET_PAGE_MASK) +
145
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
146
                phys_page2 = get_page_addr_code(env, virt_page2);
147 148 149 150 151 152 153 154 155
                if (tb->page_addr[1] == phys_page2)
                    goto found;
            } else {
                goto found;
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
 not_found:
P
pbrook 已提交
156
   /* if no translated code available, then translate it now */
157
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
158

159
 found:
160 161 162
    /* Move the last found TB to the head of the list */
    if (likely(*ptb1)) {
        *ptb1 = tb->phys_hash_next;
163 164
        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
165
    }
166
    /* we add the TB in the virtual pc hash table */
167
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 169 170
    return tb;
}

171
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
172
{
173
    CPUState *cpu = ENV_GET_CPU(env);
174 175
    TranslationBlock *tb;
    target_ulong cs_base, pc;
176
    int flags;
177 178 179 180

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
181
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183 184
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
B
Blue Swirl 已提交
185
        tb = tb_find_slow(env, pc, cs_base, flags);
186 187 188 189
    }
    return tb;
}

190 191
static CPUDebugExcpHandler *debug_excp_handler;

192
void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
193 194 195 196
{
    debug_excp_handler = handler;
}

197
static void cpu_handle_debug_exception(CPUArchState *env)
198
{
199
    CPUState *cpu = ENV_GET_CPU(env);
200 201
    CPUWatchpoint *wp;

202 203
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
204 205 206 207 208 209 210 211
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
    if (debug_excp_handler) {
        debug_excp_handler(env);
    }
}

B
bellard 已提交
212 213
/* main execution loop */

214 215
volatile sig_atomic_t exit_request;

216
int cpu_exec(CPUArchState *env)
B
bellard 已提交
217
{
218
    CPUState *cpu = ENV_GET_CPU(env);
219 220 221
#if !(defined(CONFIG_USER_ONLY) && \
      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
    CPUClass *cc = CPU_GET_CLASS(cpu);
222 223 224
#endif
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
225
#endif
226 227
    int ret, interrupt_request;
    TranslationBlock *tb;
B
bellard 已提交
228
    uint8_t *tc_ptr;
229
    uintptr_t next_tb;
230 231
    /* This must be volatile so it is not trashed by longjmp() */
    volatile bool have_tb_lock = false;
232

233
    if (cpu->halted) {
234
        if (!cpu_has_work(cpu)) {
235 236 237
            return EXCP_HALTED;
        }

238
        cpu->halted = 0;
239
    }
B
bellard 已提交
240

241
    current_cpu = cpu;
B
bellard 已提交
242

243
    /* As long as current_cpu is null, up to the assignment just above,
244 245
     * requests by other threads to exit the execution loop are expected to
     * be issued using the exit_request global. We must make sure that our
246
     * evaluation of the global value is performed past the current_cpu
247 248 249 250
     * value transition point, which requires a memory barrier as well as
     * an instruction scheduling constraint on modern architectures.  */
    smp_mb();

J
Jan Kiszka 已提交
251
    if (unlikely(exit_request)) {
252
        cpu->exit_request = 1;
253 254
    }

255
#if defined(TARGET_I386)
256 257
    /* put eflags in CPU temporary format */
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
L
liguang 已提交
258
    env->df = 1 - (2 * ((env->eflags >> 10) & 1));
259 260
    CC_OP = CC_OP_EFLAGS;
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
261
#elif defined(TARGET_SPARC)
P
pbrook 已提交
262 263 264 265
#elif defined(TARGET_M68K)
    env->cc_op = CC_OP_FLAGS;
    env->cc_dest = env->sr & 0xf;
    env->cc_x = (env->sr >> 4) & 1;
266 267
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_ARM)
268
#elif defined(TARGET_UNICORE32)
269
#elif defined(TARGET_PPC)
270
    env->reserve_addr = -1;
M
Michael Walle 已提交
271
#elif defined(TARGET_LM32)
272
#elif defined(TARGET_MICROBLAZE)
B
bellard 已提交
273
#elif defined(TARGET_MIPS)
A
Anthony Green 已提交
274
#elif defined(TARGET_MOXIE)
275
#elif defined(TARGET_OPENRISC)
B
bellard 已提交
276
#elif defined(TARGET_SH4)
277
#elif defined(TARGET_CRIS)
A
Alexander Graf 已提交
278
#elif defined(TARGET_S390X)
M
Max Filippov 已提交
279
#elif defined(TARGET_XTENSA)
B
bellard 已提交
280
    /* XXXXX */
B
bellard 已提交
281 282 283
#else
#error unsupported target CPU
#endif
284
    cpu->exception_index = -1;
285

B
bellard 已提交
286
    /* prepare setjmp context for exception handling */
287
    for(;;) {
288
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
289
            /* if an exception is pending, we execute it here */
290 291
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
292
                    /* exit request from the cpu execution loop */
293
                    ret = cpu->exception_index;
294 295 296
                    if (ret == EXCP_DEBUG) {
                        cpu_handle_debug_exception(env);
                    }
297
                    break;
A
aurel32 已提交
298 299
                } else {
#if defined(CONFIG_USER_ONLY)
300
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
301
                       which will be handled outside the cpu execution
302
                       loop */
B
bellard 已提交
303
#if defined(TARGET_I386)
304
                    cc->do_interrupt(cpu);
B
bellard 已提交
305
#endif
306
                    ret = cpu->exception_index;
307
                    break;
A
aurel32 已提交
308
#else
309
                    cc->do_interrupt(cpu);
310
                    cpu->exception_index = -1;
B
bellard 已提交
311
#endif
312
                }
313
            }
B
bellard 已提交
314

315
            next_tb = 0; /* force lookup of first TB */
316
            for(;;) {
317
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
318
                if (unlikely(interrupt_request)) {
319
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
320
                        /* Mask out external interrupts for this step. */
321
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
322
                    }
323
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
324
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
325
                        cpu->exception_index = EXCP_DEBUG;
326
                        cpu_loop_exit(cpu);
327
                    }
328
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
329
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
330
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
331
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
332 333
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
334
                        cpu->exception_index = EXCP_HLT;
335
                        cpu_loop_exit(cpu);
336 337
                    }
#endif
338 339 340 341 342 343 344 345 346 347 348 349
#if defined(TARGET_I386)
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
                    if (interrupt_request & CPU_INTERRUPT_RESET) {
                        cpu_reset(cpu);
                    }
#endif
B
bellard 已提交
350
#if defined(TARGET_I386)
351 352
#if !defined(CONFIG_USER_ONLY)
                    if (interrupt_request & CPU_INTERRUPT_POLL) {
353
                        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
354
                        apic_poll_irq(x86_cpu->apic_state);
355 356
                    }
#endif
357
                    if (interrupt_request & CPU_INTERRUPT_SIPI) {
358
                            do_cpu_sipi(x86_cpu);
359
                    } else if (env->hflags2 & HF2_GIF_MASK) {
360 361
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
                            !(env->hflags & HF_SMM_MASK)) {
B
Blue Swirl 已提交
362 363
                            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
                                                          0);
364
                            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
365
                            do_smm_enter(x86_cpu);
366 367 368
                            next_tb = 0;
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
                                   !(env->hflags2 & HF2_NMI_MASK)) {
369
                            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
370
                            env->hflags2 |= HF2_NMI_MASK;
371
                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
372
                            next_tb = 0;
373
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
374
                            cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
375
                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
376
                            next_tb = 0;
377 378 379 380 381 382 383
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
                                     (env->hflags2 & HF2_HIF_MASK)) ||
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
                                     (env->eflags & IF_MASK && 
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
                            int intno;
B
Blue Swirl 已提交
384 385
                            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
                                                          0);
386 387
                            cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
                                                        CPU_INTERRUPT_VIRQ);
388
                            intno = cpu_get_pic_interrupt(env);
389 390 391 392 393
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
                            do_interrupt_x86_hardirq(env, intno, 1);
                            /* ensure that no TB jump will be modified as
                               the program flow was changed */
                            next_tb = 0;
T
ths 已提交
394
#if !defined(CONFIG_USER_ONLY)
395 396 397 398 399
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
                                   (env->eflags & IF_MASK) && 
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
                            int intno;
                            /* FIXME: this should respect TPR */
B
Blue Swirl 已提交
400 401
                            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
                                                          0);
402 403 404 405
                            intno = ldl_phys(cpu->as,
                                             env->vm_vmcb
                                             + offsetof(struct vmcb,
                                                        control.int_vector));
406
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
407
                            do_interrupt_x86_hardirq(env, intno, 1);
408
                            cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
409
                            next_tb = 0;
B
bellard 已提交
410
#endif
411
                        }
B
bellard 已提交
412
                    }
413
#elif defined(TARGET_PPC)
414
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
415
                        ppc_hw_interrupt(env);
416 417 418
                        if (env->pending_interrupts == 0) {
                            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
                        }
419
                        next_tb = 0;
420
                    }
M
Michael Walle 已提交
421 422 423
#elif defined(TARGET_LM32)
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
                        && (env->ie & IE_IE)) {
424
                        cpu->exception_index = EXCP_IRQ;
425
                        cc->do_interrupt(cpu);
M
Michael Walle 已提交
426 427
                        next_tb = 0;
                    }
428 429 430 431 432
#elif defined(TARGET_MICROBLAZE)
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
                        && (env->sregs[SR_MSR] & MSR_IE)
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
433
                        cpu->exception_index = EXCP_IRQ;
434
                        cc->do_interrupt(cpu);
435 436
                        next_tb = 0;
                    }
B
bellard 已提交
437 438
#elif defined(TARGET_MIPS)
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
439
                        cpu_mips_hw_interrupts_pending(env)) {
B
bellard 已提交
440
                        /* Raise it */
441
                        cpu->exception_index = EXCP_EXT_INTERRUPT;
B
bellard 已提交
442
                        env->error_code = 0;
443
                        cc->do_interrupt(cpu);
444
                        next_tb = 0;
B
bellard 已提交
445
                    }
J
Jia Liu 已提交
446 447 448 449 450 451 452 453 454 455 456 457
#elif defined(TARGET_OPENRISC)
                    {
                        int idx = -1;
                        if ((interrupt_request & CPU_INTERRUPT_HARD)
                            && (env->sr & SR_IEE)) {
                            idx = EXCP_INT;
                        }
                        if ((interrupt_request & CPU_INTERRUPT_TIMER)
                            && (env->sr & SR_TEE)) {
                            idx = EXCP_TICK;
                        }
                        if (idx >= 0) {
458
                            cpu->exception_index = idx;
459
                            cc->do_interrupt(cpu);
J
Jia Liu 已提交
460 461 462
                            next_tb = 0;
                        }
                    }
463
#elif defined(TARGET_SPARC)
464 465 466 467 468 469 470 471 472
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
                        if (cpu_interrupts_enabled(env) &&
                            env->interrupt_index > 0) {
                            int pil = env->interrupt_index & 0xf;
                            int type = env->interrupt_index & 0xf0;

                            if (((type == TT_EXTINT) &&
                                  cpu_pil_allowed(env, pil)) ||
                                  type != TT_EXTINT) {
473
                                cpu->exception_index = env->interrupt_index;
474
                                cc->do_interrupt(cpu);
475 476 477
                                next_tb = 0;
                            }
                        }
478
                    }
B
bellard 已提交
479 480
#elif defined(TARGET_ARM)
                    if (interrupt_request & CPU_INTERRUPT_FIQ
481
                        && !(env->daif & PSTATE_F)) {
482
                        cpu->exception_index = EXCP_FIQ;
483
                        cc->do_interrupt(cpu);
484
                        next_tb = 0;
B
bellard 已提交
485
                    }
P
pbrook 已提交
486 487 488 489 490 491
                    /* ARMv7-M interrupt return works by loading a magic value
                       into the PC.  On real hardware the load causes the
                       return to occur.  The qemu implementation performs the
                       jump normally, then does the exception return when the
                       CPU tries to execute code at the magic address.
                       This will cause the magic PC value to be pushed to
492
                       the stack if an interrupt occurred at the wrong time.
P
pbrook 已提交
493 494
                       We avoid this by disabling interrupts when
                       pc contains a magic address.  */
B
bellard 已提交
495
                    if (interrupt_request & CPU_INTERRUPT_HARD
P
pbrook 已提交
496
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
497
                            || !(env->daif & PSTATE_I))) {
498
                        cpu->exception_index = EXCP_IRQ;
499
                        cc->do_interrupt(cpu);
500
                        next_tb = 0;
B
bellard 已提交
501
                    }
502 503 504
#elif defined(TARGET_UNICORE32)
                    if (interrupt_request & CPU_INTERRUPT_HARD
                        && !(env->uncached_asr & ASR_I)) {
505
                        cpu->exception_index = UC32_EXCP_INTR;
506
                        cc->do_interrupt(cpu);
507 508
                        next_tb = 0;
                    }
B
bellard 已提交
509
#elif defined(TARGET_SH4)
510
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
511
                        cc->do_interrupt(cpu);
512
                        next_tb = 0;
513
                    }
J
j_mayer 已提交
514
#elif defined(TARGET_ALPHA)
515 516 517
                    {
                        int idx = -1;
                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
518
                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
                        case 0 ... 3:
                            if (interrupt_request & CPU_INTERRUPT_HARD) {
                                idx = EXCP_DEV_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 4:
                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
                                idx = EXCP_CLK_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 5:
                            if (interrupt_request & CPU_INTERRUPT_SMP) {
                                idx = EXCP_SMP_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 6:
                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
                                idx = EXCP_MCHK;
                            }
                        }
                        if (idx >= 0) {
540
                            cpu->exception_index = idx;
541
                            env->error_code = 0;
542
                            cc->do_interrupt(cpu);
543 544
                            next_tb = 0;
                        }
J
j_mayer 已提交
545
                    }
546
#elif defined(TARGET_CRIS)
E
edgar_igl 已提交
547
                    if (interrupt_request & CPU_INTERRUPT_HARD
E
Edgar E. Iglesias 已提交
548 549
                        && (env->pregs[PR_CCS] & I_FLAG)
                        && !env->locked_irq) {
550
                        cpu->exception_index = EXCP_IRQ;
551
                        cc->do_interrupt(cpu);
E
edgar_igl 已提交
552 553
                        next_tb = 0;
                    }
554 555 556 557 558 559 560 561
                    if (interrupt_request & CPU_INTERRUPT_NMI) {
                        unsigned int m_flag_archval;
                        if (env->pregs[PR_VR] < 32) {
                            m_flag_archval = M_FLAG_V10;
                        } else {
                            m_flag_archval = M_FLAG_V32;
                        }
                        if ((env->pregs[PR_CCS] & m_flag_archval)) {
562
                            cpu->exception_index = EXCP_NMI;
563
                            cc->do_interrupt(cpu);
564 565
                            next_tb = 0;
                        }
566
                    }
P
pbrook 已提交
567 568 569 570 571 572 573 574 575
#elif defined(TARGET_M68K)
                    if (interrupt_request & CPU_INTERRUPT_HARD
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
                            < env->pending_level) {
                        /* Real hardware gets the interrupt vector via an
                           IACK cycle at this point.  Current emulated
                           hardware doesn't rely on this, so we
                           provide/save the vector when the interrupt is
                           first signalled.  */
576
                        cpu->exception_index = env->pending_vector;
577
                        do_interrupt_m68k_hardirq(env);
578
                        next_tb = 0;
P
pbrook 已提交
579
                    }
580 581 582
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                        (env->psw.mask & PSW_MASK_EXT)) {
583
                        cc->do_interrupt(cpu);
584 585
                        next_tb = 0;
                    }
586 587
#elif defined(TARGET_XTENSA)
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
588
                        cpu->exception_index = EXC_IRQ;
589
                        cc->do_interrupt(cpu);
590 591
                        next_tb = 0;
                    }
B
bellard 已提交
592
#endif
593
                   /* Don't use the cached interrupt_request value,
B
bellard 已提交
594
                      do_interrupt may have updated the EXITTB flag. */
595 596
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
597 598
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
599
                        next_tb = 0;
600
                    }
601
                }
602 603
                if (unlikely(cpu->exit_request)) {
                    cpu->exit_request = 0;
604
                    cpu->exception_index = EXCP_INTERRUPT;
605
                    cpu_loop_exit(cpu);
606
                }
607
                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
608
                have_tb_lock = true;
B
Blue Swirl 已提交
609
                tb = tb_find_fast(env);
P
pbrook 已提交
610 611
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
612
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
613 614 615 616
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
617
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
618
                }
619 620 621 622
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
623 624 625
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
P
Paolo Bonzini 已提交
626
                if (next_tb != 0 && tb->page_addr[1] == -1) {
627 628
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
629
                }
630
                have_tb_lock = false;
631
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
632 633 634 635 636

                /* cpu_interrupt might be called while translating the
                   TB, but before it is linked into a potentially
                   infinite loop and becomes env->current_tb. Avoid
                   starting execution if there is a pending interrupt. */
637
                cpu->current_tb = tb;
J
Jan Kiszka 已提交
638
                barrier();
639
                if (likely(!cpu->exit_request)) {
P
pbrook 已提交
640
                    tc_ptr = tb->tc_ptr;
641
                    /* execute the generated code */
642
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
643 644 645 646 647 648 649 650 651 652 653 654 655 656
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
                         * next time around the loop.
                         */
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
657
                        /* Instruction counter expired.  */
P
pbrook 已提交
658
                        int insns_left;
659
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
660
                        insns_left = cpu->icount_decr.u32;
661
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
662
                            /* Refill decrementer and continue execution.  */
663 664
                            cpu->icount_extra += insns_left;
                            if (cpu->icount_extra > 0xffff) {
P
pbrook 已提交
665 666
                                insns_left = 0xffff;
                            } else {
667
                                insns_left = cpu->icount_extra;
P
pbrook 已提交
668
                            }
669
                            cpu->icount_extra -= insns_left;
670
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
671 672 673
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
B
Blue Swirl 已提交
674
                                cpu_exec_nocache(env, insns_left, tb);
P
pbrook 已提交
675
                            }
676
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
677
                            next_tb = 0;
678
                            cpu_loop_exit(cpu);
P
pbrook 已提交
679
                        }
680 681 682 683
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
684 685
                    }
                }
686
                cpu->current_tb = NULL;
B
bellard 已提交
687 688
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
689
            } /* for(;;) */
690 691 692
        } else {
            /* Reload env after longjmp - the compiler may have smashed all
             * local variables as longjmp is marked 'noreturn'. */
693 694
            cpu = current_cpu;
            env = cpu->env_ptr;
695 696 697
#if !(defined(CONFIG_USER_ONLY) && \
      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
            cc = CPU_GET_CLASS(cpu);
698 699 700
#endif
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
701
#endif
702 703 704 705
            if (have_tb_lock) {
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
                have_tb_lock = false;
            }
B
bellard 已提交
706
        }
707 708
    } /* for(;;) */

B
bellard 已提交
709

B
bellard 已提交
710
#if defined(TARGET_I386)
B
bellard 已提交
711
    /* restore flags in standard format */
712
    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
L
liguang 已提交
713
        | (env->df & DF_MASK);
B
bellard 已提交
714
#elif defined(TARGET_ARM)
B
bellard 已提交
715
    /* XXX: Save/restore host fpu exception state?.  */
716
#elif defined(TARGET_UNICORE32)
717
#elif defined(TARGET_SPARC)
718
#elif defined(TARGET_PPC)
M
Michael Walle 已提交
719
#elif defined(TARGET_LM32)
P
pbrook 已提交
720 721 722 723 724
#elif defined(TARGET_M68K)
    cpu_m68k_flush_flags(env, env->cc_op);
    env->cc_op = CC_OP_FLAGS;
    env->sr = (env->sr & 0xffe0)
              | env->cc_dest | (env->cc_x << 4);
725
#elif defined(TARGET_MICROBLAZE)
B
bellard 已提交
726
#elif defined(TARGET_MIPS)
A
Anthony Green 已提交
727
#elif defined(TARGET_MOXIE)
728
#elif defined(TARGET_OPENRISC)
B
bellard 已提交
729
#elif defined(TARGET_SH4)
J
j_mayer 已提交
730
#elif defined(TARGET_ALPHA)
731
#elif defined(TARGET_CRIS)
A
Alexander Graf 已提交
732
#elif defined(TARGET_S390X)
M
Max Filippov 已提交
733
#elif defined(TARGET_XTENSA)
B
bellard 已提交
734
    /* XXXXX */
B
bellard 已提交
735 736 737
#else
#error unsupported target CPU
#endif
P
pbrook 已提交
738

739 740
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
B
bellard 已提交
741 742
    return ret;
}