cpu-exec.c 21.3 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
P
Peter Maydell 已提交
19
#include "qemu/osdep.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "trace.h"
22
#include "disas/disas.h"
23
#include "tcg.h"
24
#include "qemu/atomic.h"
25
#include "sysemu/qtest.h"
26
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
27
#include "exec/address-spaces.h"
28
#include "qemu/rcu.h"
29
#include "exec/tb-hash.h"
30
#include "exec/log.h"
31 32 33
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
34
#include "sysemu/replay.h"
35 36 37 38 39 40

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
41
    int64_t realtime_clock;
42 43 44 45 46 47 48 49
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
50 51 52
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
P
Paolo Bonzini 已提交
72
            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
73 74 75 76 77 78 79 80 81 82
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

105 106 107 108 109 110
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
111 112
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
113
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
114 115 116 117 118 119
    if (sc->diff_clk < max_delay) {
        max_delay = sc->diff_clk;
    }
    if (sc->diff_clk > max_advance) {
        max_advance = sc->diff_clk;
    }
120 121 122 123

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
124 125 126 127 128 129 130 131 132 133
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
134

135
/* Execute a TB, and fix up the CPU state afterwards if necessary */
136
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
137 138
{
    CPUArchState *env = cpu->env_ptr;
139
    uintptr_t next_tb;
140 141 142 143
    uint8_t *tb_ptr = itb->tc_ptr;

    qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
                  itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

161
    cpu->can_do_io = !use_icount;
162
    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
163
    cpu->can_do_io = 1;
164 165 166
    trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
                       next_tb & TB_EXIT_MASK);

167 168 169 170 171
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
172
        CPUClass *cc = CPU_GET_CLASS(cpu);
173
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
174 175 176 177
        qemu_log_mask(CPU_LOG_EXEC,
                      "Stopped execution of TB chain before %p ["
                      TARGET_FMT_lx "] %s\n",
                      itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
178 179 180 181 182 183
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
184
    }
185 186 187 188 189 190
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
191 192 193
    return next_tb;
}

P
pbrook 已提交
194 195
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
196
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
197
                             TranslationBlock *orig_tb, bool ignore_icount)
P
pbrook 已提交
198 199 200 201 202 203 204 205
{
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

206
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
207 208
                     max_cycles | CF_NOCACHE
                         | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
209
    tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
210
    cpu->current_tb = tb;
P
pbrook 已提交
211
    /* execute the generated code */
212
    trace_exec_tb_nocache(tb, tb->pc);
213
    cpu_tb_exec(cpu, tb);
214
    cpu->current_tb = NULL;
P
pbrook 已提交
215 216 217 218
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

219 220 221 222
static TranslationBlock *tb_find_physical(CPUState *cpu,
                                          target_ulong pc,
                                          target_ulong cs_base,
                                          uint64_t flags)
223
{
224
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
225 226
    TranslationBlock *tb, **ptb1;
    unsigned int h;
227
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
228
    target_ulong virt_page2;
229

230
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
231

232
    /* find translated block using physical mappings */
P
Paul Brook 已提交
233
    phys_pc = get_page_addr_code(env, pc);
234 235
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
236
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
237 238
    for(;;) {
        tb = *ptb1;
239 240 241
        if (!tb) {
            return NULL;
        }
242
        if (tb->pc == pc &&
243
            tb->page_addr[0] == phys_page1 &&
244
            tb->cs_base == cs_base &&
245 246 247
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
248 249
                tb_page_addr_t phys_page2;

250
                virt_page2 = (pc & TARGET_PAGE_MASK) +
251
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
252
                phys_page2 = get_page_addr_code(env, virt_page2);
253 254 255
                if (tb->page_addr[1] == phys_page2) {
                    break;
                }
256
            } else {
257
                break;
258 259 260 261
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
262

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
    /* Move the TB to the head of the list */
    *ptb1 = tb->phys_hash_next;
    tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
    tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
    return tb;
}

static TranslationBlock *tb_find_slow(CPUState *cpu,
                                      target_ulong pc,
                                      target_ulong cs_base,
                                      uint64_t flags)
{
    TranslationBlock *tb;

    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        goto found;
    }

#ifdef CONFIG_USER_ONLY
    /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
     * taken outside tb_lock.  Since we're momentarily dropping
     * tb_lock, there's a chance that our desired tb has been
     * translated.
     */
    tb_unlock();
    mmap_lock();
    tb_lock();
    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        mmap_unlock();
        goto found;
295
    }
296 297 298 299 300 301 302 303 304 305
#endif

    /* if no translated code available, then translate it now */
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);

#ifdef CONFIG_USER_ONLY
    mmap_unlock();
#endif

found:
306
    /* we add the TB in the virtual pc hash table */
307
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
308 309 310
    return tb;
}

311
static inline TranslationBlock *tb_find_fast(CPUState *cpu)
312
{
313
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
314 315
    TranslationBlock *tb;
    target_ulong cs_base, pc;
316
    int flags;
317 318 319 320

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
321
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
322
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
323 324
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
325
        tb = tb_find_slow(cpu, pc, cs_base, flags);
326 327 328 329
    }
    return tb;
}

330
static void cpu_handle_debug_exception(CPUState *cpu)
331
{
332
    CPUClass *cc = CPU_GET_CLASS(cpu);
333 334
    CPUWatchpoint *wp;

335 336
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
337 338 339
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
340 341

    cc->debug_excp_handler(cpu);
342 343
}

B
bellard 已提交
344 345
/* main execution loop */

346
int cpu_exec(CPUState *cpu)
B
bellard 已提交
347
{
348
    CPUClass *cc = CPU_GET_CLASS(cpu);
349 350
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
351
    CPUArchState *env = &x86_cpu->env;
352
#endif
353 354
    int ret, interrupt_request;
    TranslationBlock *tb;
355
    uintptr_t next_tb;
356 357
    SyncClocks sc;

358 359 360
    /* replay_interrupt may need current_cpu */
    current_cpu = cpu;

361
    if (cpu->halted) {
362
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
363 364
        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
            && replay_interrupt()) {
365 366 367 368
            apic_poll_irq(x86_cpu->apic_state);
            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
        }
#endif
369
        if (!cpu_has_work(cpu)) {
370
            current_cpu = NULL;
371 372 373
            return EXCP_HALTED;
        }

374
        cpu->halted = 0;
375
    }
B
bellard 已提交
376

P
Paolo Bonzini 已提交
377
    atomic_mb_set(&tcg_current_cpu, cpu);
378 379
    rcu_read_lock();

380
    if (unlikely(atomic_mb_read(&exit_request))) {
381
        cpu->exit_request = 1;
382 383
    }

384
    cc->cpu_exec_enter(cpu);
385

386 387 388 389 390 391 392
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

B
bellard 已提交
393
    /* prepare setjmp context for exception handling */
394
    for(;;) {
395
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
396
            /* if an exception is pending, we execute it here */
397 398
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
399
                    /* exit request from the cpu execution loop */
400
                    ret = cpu->exception_index;
401
                    if (ret == EXCP_DEBUG) {
402
                        cpu_handle_debug_exception(cpu);
403
                    }
404
                    cpu->exception_index = -1;
405
                    break;
A
aurel32 已提交
406 407
                } else {
#if defined(CONFIG_USER_ONLY)
408
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
409
                       which will be handled outside the cpu execution
410
                       loop */
B
bellard 已提交
411
#if defined(TARGET_I386)
412
                    cc->do_interrupt(cpu);
B
bellard 已提交
413
#endif
414
                    ret = cpu->exception_index;
415
                    cpu->exception_index = -1;
416
                    break;
A
aurel32 已提交
417
#else
418 419 420 421 422 423 424 425
                    if (replay_exception()) {
                        cc->do_interrupt(cpu);
                        cpu->exception_index = -1;
                    } else if (!replay_has_interrupt()) {
                        /* give a chance to iothread in replay mode */
                        ret = EXCP_INTERRUPT;
                        break;
                    }
B
bellard 已提交
426
#endif
427
                }
428 429 430 431 432 433
            } else if (replay_has_exception()
                       && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
                /* try to cause an exception pending in the log */
                cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
                ret = -1;
                break;
434
            }
B
bellard 已提交
435

436
            next_tb = 0; /* force lookup of first TB */
437
            for(;;) {
438
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
439
                if (unlikely(interrupt_request)) {
440
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
441
                        /* Mask out external interrupts for this step. */
442
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
443
                    }
444
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
445
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
446
                        cpu->exception_index = EXCP_DEBUG;
447
                        cpu_loop_exit(cpu);
448
                    }
449 450 451 452 453
                    if (replay_mode == REPLAY_MODE_PLAY
                        && !replay_has_interrupt()) {
                        /* Do nothing */
                    } else if (interrupt_request & CPU_INTERRUPT_HALT) {
                        replay_interrupt();
454 455
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
456
                        cpu->exception_index = EXCP_HLT;
457
                        cpu_loop_exit(cpu);
458
                    }
459
#if defined(TARGET_I386)
460 461
                    else if (interrupt_request & CPU_INTERRUPT_INIT) {
                        replay_interrupt();
462 463 464 465 466 467
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
468 469
                    else if (interrupt_request & CPU_INTERRUPT_RESET) {
                        replay_interrupt();
470
                        cpu_reset(cpu);
471
                        cpu_loop_exit(cpu);
472
                    }
B
bellard 已提交
473
#endif
474 475 476 477
                    /* The target hook has 3 exit conditions:
                       False when the interrupt isn't processed,
                       True when it is, and we should restart on a new TB,
                       and via longjmp via cpu_loop_exit.  */
478 479 480 481 482
                    else {
                        replay_interrupt();
                        if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
                            next_tb = 0;
                        }
483 484 485
                    }
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
486 487
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
488 489
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
490
                        next_tb = 0;
491
                    }
492
                }
493 494
                if (unlikely(cpu->exit_request
                             || replay_has_interrupt())) {
495
                    cpu->exit_request = 0;
496
                    cpu->exception_index = EXCP_INTERRUPT;
497
                    cpu_loop_exit(cpu);
498
                }
K
KONRAD Frederic 已提交
499
                tb_lock();
500
                tb = tb_find_fast(cpu);
P
pbrook 已提交
501 502
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
503
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
504 505 506 507
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
508
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
509
                }
510 511 512
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
513 514
                if (next_tb != 0 && tb->page_addr[1] == -1
                    && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
515 516
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
517
                }
K
KONRAD Frederic 已提交
518
                tb_unlock();
519
                if (likely(!cpu->exit_request)) {
520
                    trace_exec_tb(tb, tb->pc);
521
                    /* execute the generated code */
522
                    cpu->current_tb = tb;
523
                    next_tb = cpu_tb_exec(cpu, tb);
524
                    cpu->current_tb = NULL;
525 526 527 528 529 530 531
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
532 533 534 535
                         * next time around the loop.  But we need to
                         * ensure the tcg_exit_req read in generated code
                         * comes before the next read of cpu->exit_request
                         * or cpu->interrupt_request.
536
                         */
537
                        smp_rmb();
538 539 540 541
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
542
                        /* Instruction counter expired.  */
P
Paolo Bonzini 已提交
543
                        int insns_left = cpu->icount_decr.u32;
544
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
545
                            /* Refill decrementer and continue execution.  */
546
                            cpu->icount_extra += insns_left;
P
Paolo Bonzini 已提交
547
                            insns_left = MIN(0xffff, cpu->icount_extra);
548
                            cpu->icount_extra -= insns_left;
549
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
550 551 552
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
P
Paolo Bonzini 已提交
553
                                tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
554
                                cpu_exec_nocache(cpu, insns_left, tb, false);
555
                                align_clocks(&sc, cpu);
P
pbrook 已提交
556
                            }
557
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
558
                            next_tb = 0;
559
                            cpu_loop_exit(cpu);
P
pbrook 已提交
560
                        }
561 562 563 564
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
565 566
                    }
                }
567 568 569
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
B
bellard 已提交
570 571
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
572
            } /* for(;;) */
573
        } else {
574 575 576 577 578
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
            /* Some compilers wrongly smash all local variables after
             * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
             * Reload essential local variables here for those compilers.
             * Newer versions of gcc would complain about this code (-Wclobbered). */
579
            cpu = current_cpu;
580
            cc = CPU_GET_CLASS(cpu);
581 582
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
583
            env = &x86_cpu->env;
584
#endif
585 586 587 588 589 590 591 592 593 594
#else /* buggy compiler */
            /* Assert that the compiler does not smash local variables. */
            g_assert(cpu == current_cpu);
            g_assert(cc == CPU_GET_CLASS(cpu));
#ifdef TARGET_I386
            g_assert(x86_cpu == X86_CPU(cpu));
            g_assert(env == &x86_cpu->env);
#endif
#endif /* buggy compiler */
            cpu->can_do_io = 1;
K
KONRAD Frederic 已提交
595
            tb_lock_reset();
B
bellard 已提交
596
        }
597 598
    } /* for(;;) */

599
    cc->cpu_exec_exit(cpu);
600
    rcu_read_unlock();
P
pbrook 已提交
601

602 603
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
P
Paolo Bonzini 已提交
604 605 606

    /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
    atomic_set(&tcg_current_cpu, NULL);
B
bellard 已提交
607 608
    return ret;
}