cpu-exec.c 21.2 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
P
Peter Maydell 已提交
19
#include "qemu/osdep.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "trace.h"
22
#include "disas/disas.h"
23
#include "tcg.h"
24
#include "qemu/atomic.h"
25
#include "sysemu/qtest.h"
26
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
27
#include "exec/address-spaces.h"
28
#include "qemu/rcu.h"
29
#include "exec/tb-hash.h"
30
#include "exec/log.h"
31 32 33
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
34
#include "sysemu/replay.h"
35 36 37 38 39 40

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
41
    int64_t realtime_clock;
42 43 44 45 46 47 48 49
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
50 51 52
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
P
Paolo Bonzini 已提交
72
            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
73 74 75 76 77 78 79 80 81 82
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

105 106 107 108 109 110
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
111 112
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
113
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
114 115 116 117 118 119
    if (sc->diff_clk < max_delay) {
        max_delay = sc->diff_clk;
    }
    if (sc->diff_clk > max_advance) {
        max_advance = sc->diff_clk;
    }
120 121 122 123

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
124 125 126 127 128 129 130 131 132 133
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
134

135 136 137 138
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

157
    cpu->can_do_io = !use_icount;
158
    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
159
    cpu->can_do_io = 1;
160 161 162
    trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
                       next_tb & TB_EXIT_MASK);

163 164 165 166 167
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
168
        CPUClass *cc = CPU_GET_CLASS(cpu);
169
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
170 171 172 173 174 175
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
176
    }
177 178 179 180 181 182
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
183 184 185
    return next_tb;
}

P
pbrook 已提交
186 187
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
188
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
189
                             TranslationBlock *orig_tb, bool ignore_icount)
P
pbrook 已提交
190 191 192 193 194 195 196 197
{
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

198
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
199 200
                     max_cycles | CF_NOCACHE
                         | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
201
    tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
202
    cpu->current_tb = tb;
P
pbrook 已提交
203
    /* execute the generated code */
204
    trace_exec_tb_nocache(tb, tb->pc);
205
    cpu_tb_exec(cpu, tb->tc_ptr);
206
    cpu->current_tb = NULL;
P
pbrook 已提交
207 208 209 210
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

211 212 213 214
static TranslationBlock *tb_find_physical(CPUState *cpu,
                                          target_ulong pc,
                                          target_ulong cs_base,
                                          uint64_t flags)
215
{
216
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
217 218
    TranslationBlock *tb, **ptb1;
    unsigned int h;
219
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
220
    target_ulong virt_page2;
221

222
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
223

224
    /* find translated block using physical mappings */
P
Paul Brook 已提交
225
    phys_pc = get_page_addr_code(env, pc);
226 227
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
228
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
229 230
    for(;;) {
        tb = *ptb1;
231 232 233
        if (!tb) {
            return NULL;
        }
234
        if (tb->pc == pc &&
235
            tb->page_addr[0] == phys_page1 &&
236
            tb->cs_base == cs_base &&
237 238 239
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
240 241
                tb_page_addr_t phys_page2;

242
                virt_page2 = (pc & TARGET_PAGE_MASK) +
243
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
244
                phys_page2 = get_page_addr_code(env, virt_page2);
245 246 247
                if (tb->page_addr[1] == phys_page2) {
                    break;
                }
248
            } else {
249
                break;
250 251 252 253
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
254

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
    /* Move the TB to the head of the list */
    *ptb1 = tb->phys_hash_next;
    tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
    tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
    return tb;
}

static TranslationBlock *tb_find_slow(CPUState *cpu,
                                      target_ulong pc,
                                      target_ulong cs_base,
                                      uint64_t flags)
{
    TranslationBlock *tb;

    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        goto found;
    }

#ifdef CONFIG_USER_ONLY
    /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
     * taken outside tb_lock.  Since we're momentarily dropping
     * tb_lock, there's a chance that our desired tb has been
     * translated.
     */
    tb_unlock();
    mmap_lock();
    tb_lock();
    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        mmap_unlock();
        goto found;
287
    }
288 289 290 291 292 293 294 295 296 297
#endif

    /* if no translated code available, then translate it now */
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);

#ifdef CONFIG_USER_ONLY
    mmap_unlock();
#endif

found:
298
    /* we add the TB in the virtual pc hash table */
299
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
300 301 302
    return tb;
}

303
static inline TranslationBlock *tb_find_fast(CPUState *cpu)
304
{
305
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
306 307
    TranslationBlock *tb;
    target_ulong cs_base, pc;
308
    int flags;
309 310 311 312

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
313
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
314
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
315 316
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
317
        tb = tb_find_slow(cpu, pc, cs_base, flags);
318 319 320 321
    }
    return tb;
}

322
static void cpu_handle_debug_exception(CPUState *cpu)
323
{
324
    CPUClass *cc = CPU_GET_CLASS(cpu);
325 326
    CPUWatchpoint *wp;

327 328
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
329 330 331
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
332 333

    cc->debug_excp_handler(cpu);
334 335
}

B
bellard 已提交
336 337
/* main execution loop */

338
int cpu_exec(CPUState *cpu)
B
bellard 已提交
339
{
340
    CPUClass *cc = CPU_GET_CLASS(cpu);
341 342
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
343
    CPUArchState *env = &x86_cpu->env;
344
#endif
345 346
    int ret, interrupt_request;
    TranslationBlock *tb;
B
bellard 已提交
347
    uint8_t *tc_ptr;
348
    uintptr_t next_tb;
349 350
    SyncClocks sc;

351 352 353
    /* replay_interrupt may need current_cpu */
    current_cpu = cpu;

354
    if (cpu->halted) {
355
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
356 357
        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
            && replay_interrupt()) {
358 359 360 361
            apic_poll_irq(x86_cpu->apic_state);
            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
        }
#endif
362
        if (!cpu_has_work(cpu)) {
363
            current_cpu = NULL;
364 365 366
            return EXCP_HALTED;
        }

367
        cpu->halted = 0;
368
    }
B
bellard 已提交
369

P
Paolo Bonzini 已提交
370
    atomic_mb_set(&tcg_current_cpu, cpu);
371 372
    rcu_read_lock();

373
    if (unlikely(atomic_mb_read(&exit_request))) {
374
        cpu->exit_request = 1;
375 376
    }

377
    cc->cpu_exec_enter(cpu);
378

379 380 381 382 383 384 385
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

B
bellard 已提交
386
    /* prepare setjmp context for exception handling */
387
    for(;;) {
388
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
389
            /* if an exception is pending, we execute it here */
390 391
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
392
                    /* exit request from the cpu execution loop */
393
                    ret = cpu->exception_index;
394
                    if (ret == EXCP_DEBUG) {
395
                        cpu_handle_debug_exception(cpu);
396
                    }
397
                    cpu->exception_index = -1;
398
                    break;
A
aurel32 已提交
399 400
                } else {
#if defined(CONFIG_USER_ONLY)
401
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
402
                       which will be handled outside the cpu execution
403
                       loop */
B
bellard 已提交
404
#if defined(TARGET_I386)
405
                    cc->do_interrupt(cpu);
B
bellard 已提交
406
#endif
407
                    ret = cpu->exception_index;
408
                    cpu->exception_index = -1;
409
                    break;
A
aurel32 已提交
410
#else
411 412 413 414 415 416 417 418
                    if (replay_exception()) {
                        cc->do_interrupt(cpu);
                        cpu->exception_index = -1;
                    } else if (!replay_has_interrupt()) {
                        /* give a chance to iothread in replay mode */
                        ret = EXCP_INTERRUPT;
                        break;
                    }
B
bellard 已提交
419
#endif
420
                }
421 422 423 424 425 426
            } else if (replay_has_exception()
                       && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
                /* try to cause an exception pending in the log */
                cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
                ret = -1;
                break;
427
            }
B
bellard 已提交
428

429
            next_tb = 0; /* force lookup of first TB */
430
            for(;;) {
431
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
432
                if (unlikely(interrupt_request)) {
433
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
434
                        /* Mask out external interrupts for this step. */
435
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
436
                    }
437
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
438
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
439
                        cpu->exception_index = EXCP_DEBUG;
440
                        cpu_loop_exit(cpu);
441
                    }
442 443 444 445 446
                    if (replay_mode == REPLAY_MODE_PLAY
                        && !replay_has_interrupt()) {
                        /* Do nothing */
                    } else if (interrupt_request & CPU_INTERRUPT_HALT) {
                        replay_interrupt();
447 448
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
449
                        cpu->exception_index = EXCP_HLT;
450
                        cpu_loop_exit(cpu);
451
                    }
452
#if defined(TARGET_I386)
453 454
                    else if (interrupt_request & CPU_INTERRUPT_INIT) {
                        replay_interrupt();
455 456 457 458 459 460
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
461 462
                    else if (interrupt_request & CPU_INTERRUPT_RESET) {
                        replay_interrupt();
463
                        cpu_reset(cpu);
464
                        cpu_loop_exit(cpu);
465
                    }
B
bellard 已提交
466
#endif
467 468 469 470
                    /* The target hook has 3 exit conditions:
                       False when the interrupt isn't processed,
                       True when it is, and we should restart on a new TB,
                       and via longjmp via cpu_loop_exit.  */
471 472 473 474 475
                    else {
                        replay_interrupt();
                        if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
                            next_tb = 0;
                        }
476 477 478
                    }
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
479 480
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
481 482
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
483
                        next_tb = 0;
484
                    }
485
                }
486 487
                if (unlikely(cpu->exit_request
                             || replay_has_interrupt())) {
488
                    cpu->exit_request = 0;
489
                    cpu->exception_index = EXCP_INTERRUPT;
490
                    cpu_loop_exit(cpu);
491
                }
K
KONRAD Frederic 已提交
492
                tb_lock();
493
                tb = tb_find_fast(cpu);
P
pbrook 已提交
494 495
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
496
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
497 498 499 500
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
501
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
502
                }
503 504 505 506
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
507 508 509
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
510 511
                if (next_tb != 0 && tb->page_addr[1] == -1
                    && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
512 513
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
514
                }
K
KONRAD Frederic 已提交
515
                tb_unlock();
516
                if (likely(!cpu->exit_request)) {
517
                    trace_exec_tb(tb, tb->pc);
P
pbrook 已提交
518
                    tc_ptr = tb->tc_ptr;
519
                    /* execute the generated code */
520
                    cpu->current_tb = tb;
521
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
522
                    cpu->current_tb = NULL;
523 524 525 526 527 528 529
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
530 531 532 533
                         * next time around the loop.  But we need to
                         * ensure the tcg_exit_req read in generated code
                         * comes before the next read of cpu->exit_request
                         * or cpu->interrupt_request.
534
                         */
535
                        smp_rmb();
536 537 538 539
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
540
                        /* Instruction counter expired.  */
P
Paolo Bonzini 已提交
541
                        int insns_left = cpu->icount_decr.u32;
542
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
543
                            /* Refill decrementer and continue execution.  */
544
                            cpu->icount_extra += insns_left;
P
Paolo Bonzini 已提交
545
                            insns_left = MIN(0xffff, cpu->icount_extra);
546
                            cpu->icount_extra -= insns_left;
547
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
548 549 550
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
P
Paolo Bonzini 已提交
551
                                tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
552
                                cpu_exec_nocache(cpu, insns_left, tb, false);
553
                                align_clocks(&sc, cpu);
P
pbrook 已提交
554
                            }
555
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
556
                            next_tb = 0;
557
                            cpu_loop_exit(cpu);
P
pbrook 已提交
558
                        }
559 560 561 562
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
563 564
                    }
                }
565 566 567
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
B
bellard 已提交
568 569
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
570
            } /* for(;;) */
571
        } else {
572 573 574 575 576
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
            /* Some compilers wrongly smash all local variables after
             * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
             * Reload essential local variables here for those compilers.
             * Newer versions of gcc would complain about this code (-Wclobbered). */
577
            cpu = current_cpu;
578
            cc = CPU_GET_CLASS(cpu);
579 580
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
581
            env = &x86_cpu->env;
582
#endif
583 584 585 586 587 588 589 590 591 592
#else /* buggy compiler */
            /* Assert that the compiler does not smash local variables. */
            g_assert(cpu == current_cpu);
            g_assert(cc == CPU_GET_CLASS(cpu));
#ifdef TARGET_I386
            g_assert(x86_cpu == X86_CPU(cpu));
            g_assert(env == &x86_cpu->env);
#endif
#endif /* buggy compiler */
            cpu->can_do_io = 1;
K
KONRAD Frederic 已提交
593
            tb_lock_reset();
B
bellard 已提交
594
        }
595 596
    } /* for(;;) */

597
    cc->cpu_exec_exit(cpu);
598
    rcu_read_unlock();
P
pbrook 已提交
599

600 601
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
P
Paolo Bonzini 已提交
602 603 604

    /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
    atomic_set(&tcg_current_cpu, NULL);
B
bellard 已提交
605 606
    return ret;
}