cpu-exec.c 21.2 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
B
bellard 已提交
19
#include "config.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "trace.h"
22
#include "disas/disas.h"
23
#include "tcg.h"
24
#include "qemu/atomic.h"
25
#include "sysemu/qtest.h"
26
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
27
#include "exec/address-spaces.h"
28
#include "qemu/rcu.h"
29
#include "exec/tb-hash.h"
30 31 32
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
33
#include "sysemu/replay.h"
34 35 36 37 38 39

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
40
    int64_t realtime_clock;
41 42 43 44 45 46 47 48
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
49 50 51
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
P
Paolo Bonzini 已提交
71
            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
72 73 74 75 76 77 78 79 80 81
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

104 105 106 107 108 109
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
110 111
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
112
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
113 114 115 116 117 118
    if (sc->diff_clk < max_delay) {
        max_delay = sc->diff_clk;
    }
    if (sc->diff_clk > max_advance) {
        max_advance = sc->diff_clk;
    }
119 120 121 122

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
123 124 125 126 127 128 129 130 131 132
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
133

134 135 136 137
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

156
    cpu->can_do_io = !use_icount;
157
    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
158
    cpu->can_do_io = 1;
159 160 161
    trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
                       next_tb & TB_EXIT_MASK);

162 163 164 165 166
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
167
        CPUClass *cc = CPU_GET_CLASS(cpu);
168
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
169 170 171 172 173 174
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
175
    }
176 177 178 179 180 181
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
182 183 184
    return next_tb;
}

P
pbrook 已提交
185 186
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
187
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
188
                             TranslationBlock *orig_tb, bool ignore_icount)
P
pbrook 已提交
189 190 191 192 193 194 195 196
{
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

197
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
198 199
                     max_cycles | CF_NOCACHE
                         | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
200
    tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
201
    cpu->current_tb = tb;
P
pbrook 已提交
202
    /* execute the generated code */
203
    trace_exec_tb_nocache(tb, tb->pc);
204
    cpu_tb_exec(cpu, tb->tc_ptr);
205
    cpu->current_tb = NULL;
P
pbrook 已提交
206 207 208 209
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

210 211 212 213
static TranslationBlock *tb_find_physical(CPUState *cpu,
                                          target_ulong pc,
                                          target_ulong cs_base,
                                          uint64_t flags)
214
{
215
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
216 217
    TranslationBlock *tb, **ptb1;
    unsigned int h;
218
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
219
    target_ulong virt_page2;
220

221
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
222

223
    /* find translated block using physical mappings */
P
Paul Brook 已提交
224
    phys_pc = get_page_addr_code(env, pc);
225 226
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
227
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
228 229
    for(;;) {
        tb = *ptb1;
230 231 232
        if (!tb) {
            return NULL;
        }
233
        if (tb->pc == pc &&
234
            tb->page_addr[0] == phys_page1 &&
235
            tb->cs_base == cs_base &&
236 237 238
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
239 240
                tb_page_addr_t phys_page2;

241
                virt_page2 = (pc & TARGET_PAGE_MASK) +
242
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
243
                phys_page2 = get_page_addr_code(env, virt_page2);
244 245 246
                if (tb->page_addr[1] == phys_page2) {
                    break;
                }
247
            } else {
248
                break;
249 250 251 252
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
253

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
    /* Move the TB to the head of the list */
    *ptb1 = tb->phys_hash_next;
    tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
    tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
    return tb;
}

static TranslationBlock *tb_find_slow(CPUState *cpu,
                                      target_ulong pc,
                                      target_ulong cs_base,
                                      uint64_t flags)
{
    TranslationBlock *tb;

    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        goto found;
    }

#ifdef CONFIG_USER_ONLY
    /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
     * taken outside tb_lock.  Since we're momentarily dropping
     * tb_lock, there's a chance that our desired tb has been
     * translated.
     */
    tb_unlock();
    mmap_lock();
    tb_lock();
    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        mmap_unlock();
        goto found;
286
    }
287 288 289 290 291 292 293 294 295 296
#endif

    /* if no translated code available, then translate it now */
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);

#ifdef CONFIG_USER_ONLY
    mmap_unlock();
#endif

found:
297
    /* we add the TB in the virtual pc hash table */
298
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
299 300 301
    return tb;
}

302
static inline TranslationBlock *tb_find_fast(CPUState *cpu)
303
{
304
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
305 306
    TranslationBlock *tb;
    target_ulong cs_base, pc;
307
    int flags;
308 309 310 311

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
312
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
313
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
314 315
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
316
        tb = tb_find_slow(cpu, pc, cs_base, flags);
317 318 319 320
    }
    return tb;
}

321
static void cpu_handle_debug_exception(CPUState *cpu)
322
{
323
    CPUClass *cc = CPU_GET_CLASS(cpu);
324 325
    CPUWatchpoint *wp;

326 327
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
328 329 330
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
331 332

    cc->debug_excp_handler(cpu);
333 334
}

B
bellard 已提交
335 336
/* main execution loop */

337
int cpu_exec(CPUState *cpu)
B
bellard 已提交
338
{
339
    CPUClass *cc = CPU_GET_CLASS(cpu);
340 341
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
342
    CPUArchState *env = &x86_cpu->env;
343
#endif
344 345
    int ret, interrupt_request;
    TranslationBlock *tb;
B
bellard 已提交
346
    uint8_t *tc_ptr;
347
    uintptr_t next_tb;
348 349
    SyncClocks sc;

350 351 352
    /* replay_interrupt may need current_cpu */
    current_cpu = cpu;

353
    if (cpu->halted) {
354
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
355 356
        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
            && replay_interrupt()) {
357 358 359 360
            apic_poll_irq(x86_cpu->apic_state);
            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
        }
#endif
361
        if (!cpu_has_work(cpu)) {
362
            current_cpu = NULL;
363 364 365
            return EXCP_HALTED;
        }

366
        cpu->halted = 0;
367
    }
B
bellard 已提交
368

P
Paolo Bonzini 已提交
369
    atomic_mb_set(&tcg_current_cpu, cpu);
370 371
    rcu_read_lock();

372
    if (unlikely(atomic_mb_read(&exit_request))) {
373
        cpu->exit_request = 1;
374 375
    }

376
    cc->cpu_exec_enter(cpu);
377

378 379 380 381 382 383 384
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

B
bellard 已提交
385
    /* prepare setjmp context for exception handling */
386
    for(;;) {
387
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
388
            /* if an exception is pending, we execute it here */
389 390
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
391
                    /* exit request from the cpu execution loop */
392
                    ret = cpu->exception_index;
393
                    if (ret == EXCP_DEBUG) {
394
                        cpu_handle_debug_exception(cpu);
395
                    }
396
                    cpu->exception_index = -1;
397
                    break;
A
aurel32 已提交
398 399
                } else {
#if defined(CONFIG_USER_ONLY)
400
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
401
                       which will be handled outside the cpu execution
402
                       loop */
B
bellard 已提交
403
#if defined(TARGET_I386)
404
                    cc->do_interrupt(cpu);
B
bellard 已提交
405
#endif
406
                    ret = cpu->exception_index;
407
                    cpu->exception_index = -1;
408
                    break;
A
aurel32 已提交
409
#else
410 411 412 413 414 415 416 417
                    if (replay_exception()) {
                        cc->do_interrupt(cpu);
                        cpu->exception_index = -1;
                    } else if (!replay_has_interrupt()) {
                        /* give a chance to iothread in replay mode */
                        ret = EXCP_INTERRUPT;
                        break;
                    }
B
bellard 已提交
418
#endif
419
                }
420 421 422 423 424 425
            } else if (replay_has_exception()
                       && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
                /* try to cause an exception pending in the log */
                cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
                ret = -1;
                break;
426
            }
B
bellard 已提交
427

428
            next_tb = 0; /* force lookup of first TB */
429
            for(;;) {
430
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
431
                if (unlikely(interrupt_request)) {
432
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
433
                        /* Mask out external interrupts for this step. */
434
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
435
                    }
436
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
437
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
438
                        cpu->exception_index = EXCP_DEBUG;
439
                        cpu_loop_exit(cpu);
440
                    }
441 442 443 444 445
                    if (replay_mode == REPLAY_MODE_PLAY
                        && !replay_has_interrupt()) {
                        /* Do nothing */
                    } else if (interrupt_request & CPU_INTERRUPT_HALT) {
                        replay_interrupt();
446 447
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
448
                        cpu->exception_index = EXCP_HLT;
449
                        cpu_loop_exit(cpu);
450
                    }
451
#if defined(TARGET_I386)
452 453
                    else if (interrupt_request & CPU_INTERRUPT_INIT) {
                        replay_interrupt();
454 455 456 457 458 459
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
460 461
                    else if (interrupt_request & CPU_INTERRUPT_RESET) {
                        replay_interrupt();
462
                        cpu_reset(cpu);
463
                        cpu_loop_exit(cpu);
464
                    }
B
bellard 已提交
465
#endif
466 467 468 469
                    /* The target hook has 3 exit conditions:
                       False when the interrupt isn't processed,
                       True when it is, and we should restart on a new TB,
                       and via longjmp via cpu_loop_exit.  */
470 471 472 473 474
                    else {
                        replay_interrupt();
                        if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
                            next_tb = 0;
                        }
475 476 477
                    }
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
478 479
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
480 481
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
482
                        next_tb = 0;
483
                    }
484
                }
485 486
                if (unlikely(cpu->exit_request
                             || replay_has_interrupt())) {
487
                    cpu->exit_request = 0;
488
                    cpu->exception_index = EXCP_INTERRUPT;
489
                    cpu_loop_exit(cpu);
490
                }
K
KONRAD Frederic 已提交
491
                tb_lock();
492
                tb = tb_find_fast(cpu);
P
pbrook 已提交
493 494
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
495
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
496 497 498 499
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
500
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
501
                }
502 503 504 505
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
506 507 508
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
509 510
                if (next_tb != 0 && tb->page_addr[1] == -1
                    && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
511 512
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
513
                }
K
KONRAD Frederic 已提交
514
                tb_unlock();
515
                if (likely(!cpu->exit_request)) {
516
                    trace_exec_tb(tb, tb->pc);
P
pbrook 已提交
517
                    tc_ptr = tb->tc_ptr;
518
                    /* execute the generated code */
519
                    cpu->current_tb = tb;
520
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
521
                    cpu->current_tb = NULL;
522 523 524 525 526 527 528
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
529 530 531 532
                         * next time around the loop.  But we need to
                         * ensure the tcg_exit_req read in generated code
                         * comes before the next read of cpu->exit_request
                         * or cpu->interrupt_request.
533
                         */
534
                        smp_rmb();
535 536 537 538
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
539
                        /* Instruction counter expired.  */
P
Paolo Bonzini 已提交
540
                        int insns_left = cpu->icount_decr.u32;
541
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
542
                            /* Refill decrementer and continue execution.  */
543
                            cpu->icount_extra += insns_left;
P
Paolo Bonzini 已提交
544
                            insns_left = MIN(0xffff, cpu->icount_extra);
545
                            cpu->icount_extra -= insns_left;
546
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
547 548 549
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
P
Paolo Bonzini 已提交
550
                                tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
551
                                cpu_exec_nocache(cpu, insns_left, tb, false);
552
                                align_clocks(&sc, cpu);
P
pbrook 已提交
553
                            }
554
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
555
                            next_tb = 0;
556
                            cpu_loop_exit(cpu);
P
pbrook 已提交
557
                        }
558 559 560 561
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
562 563
                    }
                }
564 565 566
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
B
bellard 已提交
567 568
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
569
            } /* for(;;) */
570
        } else {
571 572 573 574 575
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
            /* Some compilers wrongly smash all local variables after
             * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
             * Reload essential local variables here for those compilers.
             * Newer versions of gcc would complain about this code (-Wclobbered). */
576
            cpu = current_cpu;
577
            cc = CPU_GET_CLASS(cpu);
578 579
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
580
            env = &x86_cpu->env;
581
#endif
582 583 584 585 586 587 588 589 590 591
#else /* buggy compiler */
            /* Assert that the compiler does not smash local variables. */
            g_assert(cpu == current_cpu);
            g_assert(cc == CPU_GET_CLASS(cpu));
#ifdef TARGET_I386
            g_assert(x86_cpu == X86_CPU(cpu));
            g_assert(env == &x86_cpu->env);
#endif
#endif /* buggy compiler */
            cpu->can_do_io = 1;
K
KONRAD Frederic 已提交
592
            tb_lock_reset();
B
bellard 已提交
593
        }
594 595
    } /* for(;;) */

596
    cc->cpu_exec_exit(cpu);
597
    rcu_read_unlock();
P
pbrook 已提交
598

599 600
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
P
Paolo Bonzini 已提交
601 602 603

    /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
    atomic_set(&tcg_current_cpu, NULL);
B
bellard 已提交
604 605
    return ret;
}