cpu-exec.c 33.1 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
B
bellard 已提交
19
#include "config.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "disas/disas.h"
22
#include "tcg.h"
23
#include "qemu/atomic.h"
24
#include "sysemu/qtest.h"
25 26 27 28 29 30 31
#include "qemu/timer.h"

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
32
    int64_t realtime_clock;
33 34 35 36 37 38 39 40
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
41 42 43
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
            sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
            sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

97 98 99 100 101 102
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
103
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
104
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
105
                   sc->realtime_clock +
106 107
                   cpu_get_clock_offset();
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
108 109 110 111

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
112 113 114 115 116 117 118 119 120 121
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
122

123
void cpu_loop_exit(CPUState *cpu)
B
bellard 已提交
124
{
125
    cpu->current_tb = NULL;
126
    siglongjmp(cpu->jmp_env, 1);
B
bellard 已提交
127
}
128

129 130 131
/* exit the current TB from a signal handler. The host registers are
   restored in a state compatible with the CPU emulator
 */
132
#if defined(CONFIG_SOFTMMU)
133
void cpu_resume_from_signal(CPUState *cpu, void *puc)
134 135 136
{
    /* XXX: restore cpu registers saved in host registers */

137
    cpu->exception_index = -1;
138
    siglongjmp(cpu->jmp_env, 1);
139 140
}
#endif
141

142 143 144 145
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
165 166 167 168 169
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
170
        CPUClass *cc = CPU_GET_CLASS(cpu);
171
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
172 173 174 175 176 177
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
178
    }
179 180 181 182 183 184
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
185 186 187
    return next_tb;
}

P
pbrook 已提交
188 189
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
190
static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
B
Blue Swirl 已提交
191
                             TranslationBlock *orig_tb)
P
pbrook 已提交
192
{
193
    CPUState *cpu = ENV_GET_CPU(env);
P
pbrook 已提交
194 195 196 197 198 199 200
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

201
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
P
pbrook 已提交
202
                     max_cycles);
203
    cpu->current_tb = tb;
P
pbrook 已提交
204
    /* execute the generated code */
205
    cpu_tb_exec(cpu, tb->tc_ptr);
206
    cpu->current_tb = NULL;
P
pbrook 已提交
207 208 209 210
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

211
static TranslationBlock *tb_find_slow(CPUArchState *env,
B
Blue Swirl 已提交
212
                                      target_ulong pc,
213
                                      target_ulong cs_base,
214
                                      uint64_t flags)
215
{
216
    CPUState *cpu = ENV_GET_CPU(env);
217 218
    TranslationBlock *tb, **ptb1;
    unsigned int h;
219
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
220
    target_ulong virt_page2;
221

222
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
223

224
    /* find translated block using physical mappings */
P
Paul Brook 已提交
225
    phys_pc = get_page_addr_code(env, pc);
226 227
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
228
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
229 230 231 232
    for(;;) {
        tb = *ptb1;
        if (!tb)
            goto not_found;
233
        if (tb->pc == pc &&
234
            tb->page_addr[0] == phys_page1 &&
235
            tb->cs_base == cs_base &&
236 237 238
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
239 240
                tb_page_addr_t phys_page2;

241
                virt_page2 = (pc & TARGET_PAGE_MASK) +
242
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
243
                phys_page2 = get_page_addr_code(env, virt_page2);
244 245 246 247 248 249 250 251 252
                if (tb->page_addr[1] == phys_page2)
                    goto found;
            } else {
                goto found;
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
 not_found:
P
pbrook 已提交
253
   /* if no translated code available, then translate it now */
254
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
255

256
 found:
257 258 259
    /* Move the last found TB to the head of the list */
    if (likely(*ptb1)) {
        *ptb1 = tb->phys_hash_next;
260 261
        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
262
    }
263
    /* we add the TB in the virtual pc hash table */
264
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
265 266 267
    return tb;
}

268
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
269
{
270
    CPUState *cpu = ENV_GET_CPU(env);
271 272
    TranslationBlock *tb;
    target_ulong cs_base, pc;
273
    int flags;
274 275 276 277

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
278
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
279
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
280 281
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
B
Blue Swirl 已提交
282
        tb = tb_find_slow(env, pc, cs_base, flags);
283 284 285 286
    }
    return tb;
}

287 288
static CPUDebugExcpHandler *debug_excp_handler;

289
void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
290 291 292 293
{
    debug_excp_handler = handler;
}

294
static void cpu_handle_debug_exception(CPUArchState *env)
295
{
296
    CPUState *cpu = ENV_GET_CPU(env);
297 298
    CPUWatchpoint *wp;

299 300
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
301 302 303 304 305 306 307 308
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
    if (debug_excp_handler) {
        debug_excp_handler(env);
    }
}

B
bellard 已提交
309 310
/* main execution loop */

311 312
volatile sig_atomic_t exit_request;

313
int cpu_exec(CPUArchState *env)
B
bellard 已提交
314
{
315
    CPUState *cpu = ENV_GET_CPU(env);
316 317 318
#if !(defined(CONFIG_USER_ONLY) && \
      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
    CPUClass *cc = CPU_GET_CLASS(cpu);
319 320 321
#endif
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
322
#endif
323 324
    int ret, interrupt_request;
    TranslationBlock *tb;
B
bellard 已提交
325
    uint8_t *tc_ptr;
326
    uintptr_t next_tb;
327 328
    SyncClocks sc;

329 330
    /* This must be volatile so it is not trashed by longjmp() */
    volatile bool have_tb_lock = false;
331

332
    if (cpu->halted) {
333
        if (!cpu_has_work(cpu)) {
334 335 336
            return EXCP_HALTED;
        }

337
        cpu->halted = 0;
338
    }
B
bellard 已提交
339

340
    current_cpu = cpu;
B
bellard 已提交
341

342
    /* As long as current_cpu is null, up to the assignment just above,
343 344
     * requests by other threads to exit the execution loop are expected to
     * be issued using the exit_request global. We must make sure that our
345
     * evaluation of the global value is performed past the current_cpu
346 347 348 349
     * value transition point, which requires a memory barrier as well as
     * an instruction scheduling constraint on modern architectures.  */
    smp_mb();

J
Jan Kiszka 已提交
350
    if (unlikely(exit_request)) {
351
        cpu->exit_request = 1;
352 353
    }

354
#if defined(TARGET_I386)
355 356
    /* put eflags in CPU temporary format */
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
L
liguang 已提交
357
    env->df = 1 - (2 * ((env->eflags >> 10) & 1));
358 359
    CC_OP = CC_OP_EFLAGS;
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
360
#elif defined(TARGET_SPARC)
P
pbrook 已提交
361 362 363 364
#elif defined(TARGET_M68K)
    env->cc_op = CC_OP_FLAGS;
    env->cc_dest = env->sr & 0xf;
    env->cc_x = (env->sr >> 4) & 1;
365 366
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_ARM)
367
#elif defined(TARGET_UNICORE32)
368
#elif defined(TARGET_PPC)
369
    env->reserve_addr = -1;
M
Michael Walle 已提交
370
#elif defined(TARGET_LM32)
371
#elif defined(TARGET_MICROBLAZE)
B
bellard 已提交
372
#elif defined(TARGET_MIPS)
A
Anthony Green 已提交
373
#elif defined(TARGET_MOXIE)
374
#elif defined(TARGET_OPENRISC)
B
bellard 已提交
375
#elif defined(TARGET_SH4)
376
#elif defined(TARGET_CRIS)
A
Alexander Graf 已提交
377
#elif defined(TARGET_S390X)
M
Max Filippov 已提交
378
#elif defined(TARGET_XTENSA)
B
bellard 已提交
379
    /* XXXXX */
B
bellard 已提交
380 381 382
#else
#error unsupported target CPU
#endif
383
    cpu->exception_index = -1;
384

385 386 387 388 389 390 391
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

B
bellard 已提交
392
    /* prepare setjmp context for exception handling */
393
    for(;;) {
394
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
395
            /* if an exception is pending, we execute it here */
396 397
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
398
                    /* exit request from the cpu execution loop */
399
                    ret = cpu->exception_index;
400 401 402
                    if (ret == EXCP_DEBUG) {
                        cpu_handle_debug_exception(env);
                    }
403
                    break;
A
aurel32 已提交
404 405
                } else {
#if defined(CONFIG_USER_ONLY)
406
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
407
                       which will be handled outside the cpu execution
408
                       loop */
B
bellard 已提交
409
#if defined(TARGET_I386)
410
                    cc->do_interrupt(cpu);
B
bellard 已提交
411
#endif
412
                    ret = cpu->exception_index;
413
                    break;
A
aurel32 已提交
414
#else
415
                    cc->do_interrupt(cpu);
416
                    cpu->exception_index = -1;
B
bellard 已提交
417
#endif
418
                }
419
            }
B
bellard 已提交
420

421
            next_tb = 0; /* force lookup of first TB */
422
            for(;;) {
423
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
424
                if (unlikely(interrupt_request)) {
425
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
426
                        /* Mask out external interrupts for this step. */
427
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
428
                    }
429
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
430
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
431
                        cpu->exception_index = EXCP_DEBUG;
432
                        cpu_loop_exit(cpu);
433
                    }
434
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
435
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
436
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
437
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
438 439
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
440
                        cpu->exception_index = EXCP_HLT;
441
                        cpu_loop_exit(cpu);
442 443
                    }
#endif
444 445 446 447 448 449 450 451 452 453 454 455
#if defined(TARGET_I386)
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
                    if (interrupt_request & CPU_INTERRUPT_RESET) {
                        cpu_reset(cpu);
                    }
#endif
B
bellard 已提交
456
#if defined(TARGET_I386)
457 458
#if !defined(CONFIG_USER_ONLY)
                    if (interrupt_request & CPU_INTERRUPT_POLL) {
459
                        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
460
                        apic_poll_irq(x86_cpu->apic_state);
461 462
                    }
#endif
463
                    if (interrupt_request & CPU_INTERRUPT_SIPI) {
464
                            do_cpu_sipi(x86_cpu);
465
                    } else if (env->hflags2 & HF2_GIF_MASK) {
466 467
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
                            !(env->hflags & HF_SMM_MASK)) {
B
Blue Swirl 已提交
468 469
                            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
                                                          0);
470
                            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
471
                            do_smm_enter(x86_cpu);
472 473 474
                            next_tb = 0;
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
                                   !(env->hflags2 & HF2_NMI_MASK)) {
475
                            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
476
                            env->hflags2 |= HF2_NMI_MASK;
477
                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
478
                            next_tb = 0;
479
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
480
                            cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
481
                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
482
                            next_tb = 0;
483 484 485 486 487 488 489
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
                                     (env->hflags2 & HF2_HIF_MASK)) ||
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
                                     (env->eflags & IF_MASK && 
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
                            int intno;
B
Blue Swirl 已提交
490 491
                            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
                                                          0);
492 493
                            cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
                                                        CPU_INTERRUPT_VIRQ);
494
                            intno = cpu_get_pic_interrupt(env);
495 496 497 498 499
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
                            do_interrupt_x86_hardirq(env, intno, 1);
                            /* ensure that no TB jump will be modified as
                               the program flow was changed */
                            next_tb = 0;
T
ths 已提交
500
#if !defined(CONFIG_USER_ONLY)
501 502 503 504 505
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
                                   (env->eflags & IF_MASK) && 
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
                            int intno;
                            /* FIXME: this should respect TPR */
B
Blue Swirl 已提交
506 507
                            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
                                                          0);
508 509 510 511
                            intno = ldl_phys(cpu->as,
                                             env->vm_vmcb
                                             + offsetof(struct vmcb,
                                                        control.int_vector));
512
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
513
                            do_interrupt_x86_hardirq(env, intno, 1);
514
                            cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
515
                            next_tb = 0;
B
bellard 已提交
516
#endif
517
                        }
B
bellard 已提交
518
                    }
519
#elif defined(TARGET_PPC)
520
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
521
                        ppc_hw_interrupt(env);
522 523 524
                        if (env->pending_interrupts == 0) {
                            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
                        }
525
                        next_tb = 0;
526
                    }
M
Michael Walle 已提交
527 528 529
#elif defined(TARGET_LM32)
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
                        && (env->ie & IE_IE)) {
530
                        cpu->exception_index = EXCP_IRQ;
531
                        cc->do_interrupt(cpu);
M
Michael Walle 已提交
532 533
                        next_tb = 0;
                    }
534 535 536 537 538
#elif defined(TARGET_MICROBLAZE)
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
                        && (env->sregs[SR_MSR] & MSR_IE)
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
539
                        cpu->exception_index = EXCP_IRQ;
540
                        cc->do_interrupt(cpu);
541 542
                        next_tb = 0;
                    }
B
bellard 已提交
543 544
#elif defined(TARGET_MIPS)
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
545
                        cpu_mips_hw_interrupts_pending(env)) {
B
bellard 已提交
546
                        /* Raise it */
547
                        cpu->exception_index = EXCP_EXT_INTERRUPT;
B
bellard 已提交
548
                        env->error_code = 0;
549
                        cc->do_interrupt(cpu);
550
                        next_tb = 0;
B
bellard 已提交
551
                    }
J
Jia Liu 已提交
552 553 554 555 556 557 558 559 560 561 562 563
#elif defined(TARGET_OPENRISC)
                    {
                        int idx = -1;
                        if ((interrupt_request & CPU_INTERRUPT_HARD)
                            && (env->sr & SR_IEE)) {
                            idx = EXCP_INT;
                        }
                        if ((interrupt_request & CPU_INTERRUPT_TIMER)
                            && (env->sr & SR_TEE)) {
                            idx = EXCP_TICK;
                        }
                        if (idx >= 0) {
564
                            cpu->exception_index = idx;
565
                            cc->do_interrupt(cpu);
J
Jia Liu 已提交
566 567 568
                            next_tb = 0;
                        }
                    }
569
#elif defined(TARGET_SPARC)
570 571 572 573 574 575 576 577 578
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
                        if (cpu_interrupts_enabled(env) &&
                            env->interrupt_index > 0) {
                            int pil = env->interrupt_index & 0xf;
                            int type = env->interrupt_index & 0xf0;

                            if (((type == TT_EXTINT) &&
                                  cpu_pil_allowed(env, pil)) ||
                                  type != TT_EXTINT) {
579
                                cpu->exception_index = env->interrupt_index;
580
                                cc->do_interrupt(cpu);
581 582 583
                                next_tb = 0;
                            }
                        }
584
                    }
B
bellard 已提交
585 586
#elif defined(TARGET_ARM)
                    if (interrupt_request & CPU_INTERRUPT_FIQ
587
                        && !(env->daif & PSTATE_F)) {
588
                        cpu->exception_index = EXCP_FIQ;
589
                        cc->do_interrupt(cpu);
590
                        next_tb = 0;
B
bellard 已提交
591
                    }
P
pbrook 已提交
592 593 594 595 596 597
                    /* ARMv7-M interrupt return works by loading a magic value
                       into the PC.  On real hardware the load causes the
                       return to occur.  The qemu implementation performs the
                       jump normally, then does the exception return when the
                       CPU tries to execute code at the magic address.
                       This will cause the magic PC value to be pushed to
598
                       the stack if an interrupt occurred at the wrong time.
P
pbrook 已提交
599 600
                       We avoid this by disabling interrupts when
                       pc contains a magic address.  */
B
bellard 已提交
601
                    if (interrupt_request & CPU_INTERRUPT_HARD
P
pbrook 已提交
602
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
603
                            || !(env->daif & PSTATE_I))) {
604
                        cpu->exception_index = EXCP_IRQ;
605
                        cc->do_interrupt(cpu);
606
                        next_tb = 0;
B
bellard 已提交
607
                    }
608 609 610
#elif defined(TARGET_UNICORE32)
                    if (interrupt_request & CPU_INTERRUPT_HARD
                        && !(env->uncached_asr & ASR_I)) {
611
                        cpu->exception_index = UC32_EXCP_INTR;
612
                        cc->do_interrupt(cpu);
613 614
                        next_tb = 0;
                    }
B
bellard 已提交
615
#elif defined(TARGET_SH4)
616
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
617
                        cc->do_interrupt(cpu);
618
                        next_tb = 0;
619
                    }
J
j_mayer 已提交
620
#elif defined(TARGET_ALPHA)
621 622 623
                    {
                        int idx = -1;
                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
624
                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
                        case 0 ... 3:
                            if (interrupt_request & CPU_INTERRUPT_HARD) {
                                idx = EXCP_DEV_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 4:
                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
                                idx = EXCP_CLK_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 5:
                            if (interrupt_request & CPU_INTERRUPT_SMP) {
                                idx = EXCP_SMP_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 6:
                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
                                idx = EXCP_MCHK;
                            }
                        }
                        if (idx >= 0) {
646
                            cpu->exception_index = idx;
647
                            env->error_code = 0;
648
                            cc->do_interrupt(cpu);
649 650
                            next_tb = 0;
                        }
J
j_mayer 已提交
651
                    }
652
#elif defined(TARGET_CRIS)
E
edgar_igl 已提交
653
                    if (interrupt_request & CPU_INTERRUPT_HARD
E
Edgar E. Iglesias 已提交
654 655
                        && (env->pregs[PR_CCS] & I_FLAG)
                        && !env->locked_irq) {
656
                        cpu->exception_index = EXCP_IRQ;
657
                        cc->do_interrupt(cpu);
E
edgar_igl 已提交
658 659
                        next_tb = 0;
                    }
660 661 662 663 664 665 666 667
                    if (interrupt_request & CPU_INTERRUPT_NMI) {
                        unsigned int m_flag_archval;
                        if (env->pregs[PR_VR] < 32) {
                            m_flag_archval = M_FLAG_V10;
                        } else {
                            m_flag_archval = M_FLAG_V32;
                        }
                        if ((env->pregs[PR_CCS] & m_flag_archval)) {
668
                            cpu->exception_index = EXCP_NMI;
669
                            cc->do_interrupt(cpu);
670 671
                            next_tb = 0;
                        }
672
                    }
P
pbrook 已提交
673 674 675 676 677 678 679 680 681
#elif defined(TARGET_M68K)
                    if (interrupt_request & CPU_INTERRUPT_HARD
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
                            < env->pending_level) {
                        /* Real hardware gets the interrupt vector via an
                           IACK cycle at this point.  Current emulated
                           hardware doesn't rely on this, so we
                           provide/save the vector when the interrupt is
                           first signalled.  */
682
                        cpu->exception_index = env->pending_vector;
683
                        do_interrupt_m68k_hardirq(env);
684
                        next_tb = 0;
P
pbrook 已提交
685
                    }
686 687 688
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                        (env->psw.mask & PSW_MASK_EXT)) {
689
                        cc->do_interrupt(cpu);
690 691
                        next_tb = 0;
                    }
692 693
#elif defined(TARGET_XTENSA)
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
694
                        cpu->exception_index = EXC_IRQ;
695
                        cc->do_interrupt(cpu);
696 697
                        next_tb = 0;
                    }
B
bellard 已提交
698
#endif
699
                   /* Don't use the cached interrupt_request value,
B
bellard 已提交
700
                      do_interrupt may have updated the EXITTB flag. */
701 702
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
703 704
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
705
                        next_tb = 0;
706
                    }
707
                }
708 709
                if (unlikely(cpu->exit_request)) {
                    cpu->exit_request = 0;
710
                    cpu->exception_index = EXCP_INTERRUPT;
711
                    cpu_loop_exit(cpu);
712
                }
713
                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
714
                have_tb_lock = true;
B
Blue Swirl 已提交
715
                tb = tb_find_fast(env);
P
pbrook 已提交
716 717
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
718
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
719 720 721 722
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
723
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
724
                }
725 726 727 728
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
729 730 731
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
P
Paolo Bonzini 已提交
732
                if (next_tb != 0 && tb->page_addr[1] == -1) {
733 734
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
735
                }
736
                have_tb_lock = false;
737
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
738 739 740 741 742

                /* cpu_interrupt might be called while translating the
                   TB, but before it is linked into a potentially
                   infinite loop and becomes env->current_tb. Avoid
                   starting execution if there is a pending interrupt. */
743
                cpu->current_tb = tb;
J
Jan Kiszka 已提交
744
                barrier();
745
                if (likely(!cpu->exit_request)) {
P
pbrook 已提交
746
                    tc_ptr = tb->tc_ptr;
747
                    /* execute the generated code */
748
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
749 750 751 752 753 754 755 756 757 758 759 760 761 762
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
                         * next time around the loop.
                         */
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
763
                        /* Instruction counter expired.  */
P
pbrook 已提交
764
                        int insns_left;
765
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
766
                        insns_left = cpu->icount_decr.u32;
767
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
768
                            /* Refill decrementer and continue execution.  */
769 770
                            cpu->icount_extra += insns_left;
                            if (cpu->icount_extra > 0xffff) {
P
pbrook 已提交
771 772
                                insns_left = 0xffff;
                            } else {
773
                                insns_left = cpu->icount_extra;
P
pbrook 已提交
774
                            }
775
                            cpu->icount_extra -= insns_left;
776
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
777 778 779
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
B
Blue Swirl 已提交
780
                                cpu_exec_nocache(env, insns_left, tb);
781
                                align_clocks(&sc, cpu);
P
pbrook 已提交
782
                            }
783
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
784
                            next_tb = 0;
785
                            cpu_loop_exit(cpu);
P
pbrook 已提交
786
                        }
787 788 789 790
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
791 792
                    }
                }
793
                cpu->current_tb = NULL;
794 795 796
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
B
bellard 已提交
797 798
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
799
            } /* for(;;) */
800 801 802
        } else {
            /* Reload env after longjmp - the compiler may have smashed all
             * local variables as longjmp is marked 'noreturn'. */
803 804
            cpu = current_cpu;
            env = cpu->env_ptr;
805 806 807
#if !(defined(CONFIG_USER_ONLY) && \
      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
            cc = CPU_GET_CLASS(cpu);
808 809 810
#endif
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
811
#endif
812 813 814 815
            if (have_tb_lock) {
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
                have_tb_lock = false;
            }
B
bellard 已提交
816
        }
817 818
    } /* for(;;) */

B
bellard 已提交
819

B
bellard 已提交
820
#if defined(TARGET_I386)
B
bellard 已提交
821
    /* restore flags in standard format */
822
    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
L
liguang 已提交
823
        | (env->df & DF_MASK);
B
bellard 已提交
824
#elif defined(TARGET_ARM)
B
bellard 已提交
825
    /* XXX: Save/restore host fpu exception state?.  */
826
#elif defined(TARGET_UNICORE32)
827
#elif defined(TARGET_SPARC)
828
#elif defined(TARGET_PPC)
M
Michael Walle 已提交
829
#elif defined(TARGET_LM32)
P
pbrook 已提交
830 831 832 833 834
#elif defined(TARGET_M68K)
    cpu_m68k_flush_flags(env, env->cc_op);
    env->cc_op = CC_OP_FLAGS;
    env->sr = (env->sr & 0xffe0)
              | env->cc_dest | (env->cc_x << 4);
835
#elif defined(TARGET_MICROBLAZE)
B
bellard 已提交
836
#elif defined(TARGET_MIPS)
A
Anthony Green 已提交
837
#elif defined(TARGET_MOXIE)
838
#elif defined(TARGET_OPENRISC)
B
bellard 已提交
839
#elif defined(TARGET_SH4)
J
j_mayer 已提交
840
#elif defined(TARGET_ALPHA)
841
#elif defined(TARGET_CRIS)
A
Alexander Graf 已提交
842
#elif defined(TARGET_S390X)
M
Max Filippov 已提交
843
#elif defined(TARGET_XTENSA)
B
bellard 已提交
844
    /* XXXXX */
B
bellard 已提交
845 846 847
#else
#error unsupported target CPU
#endif
P
pbrook 已提交
848

849 850
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
B
bellard 已提交
851 852
    return ret;
}