cpu-exec.c 21.4 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
P
Peter Maydell 已提交
19
#include "qemu/osdep.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "trace.h"
22
#include "disas/disas.h"
23
#include "tcg.h"
24
#include "qemu/atomic.h"
25
#include "sysemu/qtest.h"
26
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
27
#include "exec/address-spaces.h"
28
#include "qemu/rcu.h"
29
#include "exec/tb-hash.h"
30
#include "exec/log.h"
31 32 33
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
34
#include "sysemu/replay.h"
35 36 37 38 39 40

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
41
    int64_t realtime_clock;
42 43 44 45 46 47 48 49
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
50 51 52
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
P
Paolo Bonzini 已提交
72
            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
73 74 75 76 77 78 79 80 81 82
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

105 106 107 108 109 110
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
111 112
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
113
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
114 115 116 117 118 119
    if (sc->diff_clk < max_delay) {
        max_delay = sc->diff_clk;
    }
    if (sc->diff_clk > max_advance) {
        max_advance = sc->diff_clk;
    }
120 121 122 123

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
124 125 126 127 128 129 130 131 132 133
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
134

135
/* Execute a TB, and fix up the CPU state afterwards if necessary */
136
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
137 138
{
    CPUArchState *env = cpu->env_ptr;
139
    uintptr_t next_tb;
140 141
    uint8_t *tb_ptr = itb->tc_ptr;

142 143 144
    qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
                           "Trace %p [" TARGET_FMT_lx "] %s\n",
                           itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

162
    cpu->can_do_io = !use_icount;
163
    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
164
    cpu->can_do_io = 1;
165 166 167
    trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
                       next_tb & TB_EXIT_MASK);

168 169 170 171 172
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
173
        CPUClass *cc = CPU_GET_CLASS(cpu);
174
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
175 176 177 178
        qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
                               "Stopped execution of TB chain before %p ["
                               TARGET_FMT_lx "] %s\n",
                               itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
179 180 181 182 183 184
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
185
    }
186 187 188 189 190 191
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
192 193 194
    return next_tb;
}

P
pbrook 已提交
195 196
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
197
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
198
                             TranslationBlock *orig_tb, bool ignore_icount)
P
pbrook 已提交
199 200 201 202 203 204 205 206
{
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

207
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
208 209
                     max_cycles | CF_NOCACHE
                         | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
210
    tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
211
    cpu->current_tb = tb;
P
pbrook 已提交
212
    /* execute the generated code */
213
    trace_exec_tb_nocache(tb, tb->pc);
214
    cpu_tb_exec(cpu, tb);
215
    cpu->current_tb = NULL;
P
pbrook 已提交
216 217 218 219
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

220 221 222 223
static TranslationBlock *tb_find_physical(CPUState *cpu,
                                          target_ulong pc,
                                          target_ulong cs_base,
                                          uint64_t flags)
224
{
225
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
226 227
    TranslationBlock *tb, **ptb1;
    unsigned int h;
228
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
229
    target_ulong virt_page2;
230

231
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
232

233
    /* find translated block using physical mappings */
P
Paul Brook 已提交
234
    phys_pc = get_page_addr_code(env, pc);
235 236
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
237
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
238 239
    for(;;) {
        tb = *ptb1;
240 241 242
        if (!tb) {
            return NULL;
        }
243
        if (tb->pc == pc &&
244
            tb->page_addr[0] == phys_page1 &&
245
            tb->cs_base == cs_base &&
246 247 248
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
249 250
                tb_page_addr_t phys_page2;

251
                virt_page2 = (pc & TARGET_PAGE_MASK) +
252
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
253
                phys_page2 = get_page_addr_code(env, virt_page2);
254 255 256
                if (tb->page_addr[1] == phys_page2) {
                    break;
                }
257
            } else {
258
                break;
259 260 261 262
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
263

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
    /* Move the TB to the head of the list */
    *ptb1 = tb->phys_hash_next;
    tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
    tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
    return tb;
}

static TranslationBlock *tb_find_slow(CPUState *cpu,
                                      target_ulong pc,
                                      target_ulong cs_base,
                                      uint64_t flags)
{
    TranslationBlock *tb;

    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        goto found;
    }

#ifdef CONFIG_USER_ONLY
    /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
     * taken outside tb_lock.  Since we're momentarily dropping
     * tb_lock, there's a chance that our desired tb has been
     * translated.
     */
    tb_unlock();
    mmap_lock();
    tb_lock();
    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        mmap_unlock();
        goto found;
296
    }
297 298 299 300 301 302 303 304 305 306
#endif

    /* if no translated code available, then translate it now */
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);

#ifdef CONFIG_USER_ONLY
    mmap_unlock();
#endif

found:
307
    /* we add the TB in the virtual pc hash table */
308
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
309 310 311
    return tb;
}

312
static inline TranslationBlock *tb_find_fast(CPUState *cpu)
313
{
314
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
315 316
    TranslationBlock *tb;
    target_ulong cs_base, pc;
317
    int flags;
318 319 320 321

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
322
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
323
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
324 325
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
326
        tb = tb_find_slow(cpu, pc, cs_base, flags);
327 328 329 330
    }
    return tb;
}

331
static void cpu_handle_debug_exception(CPUState *cpu)
332
{
333
    CPUClass *cc = CPU_GET_CLASS(cpu);
334 335
    CPUWatchpoint *wp;

336 337
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
338 339 340
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
341 342

    cc->debug_excp_handler(cpu);
343 344
}

B
bellard 已提交
345 346
/* main execution loop */

347
int cpu_exec(CPUState *cpu)
B
bellard 已提交
348
{
349
    CPUClass *cc = CPU_GET_CLASS(cpu);
350 351
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
352
    CPUArchState *env = &x86_cpu->env;
353
#endif
354 355
    int ret, interrupt_request;
    TranslationBlock *tb;
356
    uintptr_t next_tb;
357 358
    SyncClocks sc;

359 360 361
    /* replay_interrupt may need current_cpu */
    current_cpu = cpu;

362
    if (cpu->halted) {
363
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
364 365
        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
            && replay_interrupt()) {
366 367 368 369
            apic_poll_irq(x86_cpu->apic_state);
            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
        }
#endif
370
        if (!cpu_has_work(cpu)) {
371
            current_cpu = NULL;
372 373 374
            return EXCP_HALTED;
        }

375
        cpu->halted = 0;
376
    }
B
bellard 已提交
377

P
Paolo Bonzini 已提交
378
    atomic_mb_set(&tcg_current_cpu, cpu);
379 380
    rcu_read_lock();

381
    if (unlikely(atomic_mb_read(&exit_request))) {
382
        cpu->exit_request = 1;
383 384
    }

385
    cc->cpu_exec_enter(cpu);
386

387 388 389 390 391 392 393
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

B
bellard 已提交
394
    /* prepare setjmp context for exception handling */
395
    for(;;) {
396
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
397
            /* if an exception is pending, we execute it here */
398 399
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
400
                    /* exit request from the cpu execution loop */
401
                    ret = cpu->exception_index;
402
                    if (ret == EXCP_DEBUG) {
403
                        cpu_handle_debug_exception(cpu);
404
                    }
405
                    cpu->exception_index = -1;
406
                    break;
A
aurel32 已提交
407 408
                } else {
#if defined(CONFIG_USER_ONLY)
409
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
410
                       which will be handled outside the cpu execution
411
                       loop */
B
bellard 已提交
412
#if defined(TARGET_I386)
413
                    cc->do_interrupt(cpu);
B
bellard 已提交
414
#endif
415
                    ret = cpu->exception_index;
416
                    cpu->exception_index = -1;
417
                    break;
A
aurel32 已提交
418
#else
419 420 421 422 423 424 425 426
                    if (replay_exception()) {
                        cc->do_interrupt(cpu);
                        cpu->exception_index = -1;
                    } else if (!replay_has_interrupt()) {
                        /* give a chance to iothread in replay mode */
                        ret = EXCP_INTERRUPT;
                        break;
                    }
B
bellard 已提交
427
#endif
428
                }
429 430 431 432 433 434
            } else if (replay_has_exception()
                       && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
                /* try to cause an exception pending in the log */
                cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
                ret = -1;
                break;
435
            }
B
bellard 已提交
436

437
            next_tb = 0; /* force lookup of first TB */
438
            for(;;) {
439
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
440
                if (unlikely(interrupt_request)) {
441
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
442
                        /* Mask out external interrupts for this step. */
443
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
444
                    }
445
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
446
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
447
                        cpu->exception_index = EXCP_DEBUG;
448
                        cpu_loop_exit(cpu);
449
                    }
450 451 452 453 454
                    if (replay_mode == REPLAY_MODE_PLAY
                        && !replay_has_interrupt()) {
                        /* Do nothing */
                    } else if (interrupt_request & CPU_INTERRUPT_HALT) {
                        replay_interrupt();
455 456
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
457
                        cpu->exception_index = EXCP_HLT;
458
                        cpu_loop_exit(cpu);
459
                    }
460
#if defined(TARGET_I386)
461 462
                    else if (interrupt_request & CPU_INTERRUPT_INIT) {
                        replay_interrupt();
463 464 465 466 467 468
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
469 470
                    else if (interrupt_request & CPU_INTERRUPT_RESET) {
                        replay_interrupt();
471
                        cpu_reset(cpu);
472
                        cpu_loop_exit(cpu);
473
                    }
B
bellard 已提交
474
#endif
475 476 477 478
                    /* The target hook has 3 exit conditions:
                       False when the interrupt isn't processed,
                       True when it is, and we should restart on a new TB,
                       and via longjmp via cpu_loop_exit.  */
479 480 481 482 483
                    else {
                        replay_interrupt();
                        if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
                            next_tb = 0;
                        }
484 485 486
                    }
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
487 488
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
489 490
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
491
                        next_tb = 0;
492
                    }
493
                }
494 495
                if (unlikely(cpu->exit_request
                             || replay_has_interrupt())) {
496
                    cpu->exit_request = 0;
497
                    cpu->exception_index = EXCP_INTERRUPT;
498
                    cpu_loop_exit(cpu);
499
                }
K
KONRAD Frederic 已提交
500
                tb_lock();
501
                tb = tb_find_fast(cpu);
P
pbrook 已提交
502 503
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
504
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
505 506 507 508
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
509
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
510
                }
511 512 513
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
514 515
                if (next_tb != 0 && tb->page_addr[1] == -1
                    && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
516 517
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
518
                }
K
KONRAD Frederic 已提交
519
                tb_unlock();
520
                if (likely(!cpu->exit_request)) {
521
                    trace_exec_tb(tb, tb->pc);
522
                    /* execute the generated code */
523
                    cpu->current_tb = tb;
524
                    next_tb = cpu_tb_exec(cpu, tb);
525
                    cpu->current_tb = NULL;
526 527 528 529 530 531 532
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
533 534 535 536
                         * next time around the loop.  But we need to
                         * ensure the tcg_exit_req read in generated code
                         * comes before the next read of cpu->exit_request
                         * or cpu->interrupt_request.
537
                         */
538
                        smp_rmb();
539 540 541 542
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
543
                        /* Instruction counter expired.  */
P
Paolo Bonzini 已提交
544
                        int insns_left = cpu->icount_decr.u32;
545
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
546
                            /* Refill decrementer and continue execution.  */
547
                            cpu->icount_extra += insns_left;
P
Paolo Bonzini 已提交
548
                            insns_left = MIN(0xffff, cpu->icount_extra);
549
                            cpu->icount_extra -= insns_left;
550
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
551 552 553
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
P
Paolo Bonzini 已提交
554
                                tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
555
                                cpu_exec_nocache(cpu, insns_left, tb, false);
556
                                align_clocks(&sc, cpu);
P
pbrook 已提交
557
                            }
558
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
559
                            next_tb = 0;
560
                            cpu_loop_exit(cpu);
P
pbrook 已提交
561
                        }
562 563 564 565
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
566 567
                    }
                }
568 569 570
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
B
bellard 已提交
571 572
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
573
            } /* for(;;) */
574
        } else {
575 576 577 578 579
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
            /* Some compilers wrongly smash all local variables after
             * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
             * Reload essential local variables here for those compilers.
             * Newer versions of gcc would complain about this code (-Wclobbered). */
580
            cpu = current_cpu;
581
            cc = CPU_GET_CLASS(cpu);
582 583
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
584
            env = &x86_cpu->env;
585
#endif
586 587 588 589 590 591 592 593 594 595
#else /* buggy compiler */
            /* Assert that the compiler does not smash local variables. */
            g_assert(cpu == current_cpu);
            g_assert(cc == CPU_GET_CLASS(cpu));
#ifdef TARGET_I386
            g_assert(x86_cpu == X86_CPU(cpu));
            g_assert(env == &x86_cpu->env);
#endif
#endif /* buggy compiler */
            cpu->can_do_io = 1;
K
KONRAD Frederic 已提交
596
            tb_lock_reset();
B
bellard 已提交
597
        }
598 599
    } /* for(;;) */

600
    cc->cpu_exec_exit(cpu);
601
    rcu_read_unlock();
P
pbrook 已提交
602

603 604
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
P
Paolo Bonzini 已提交
605 606 607

    /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
    atomic_set(&tcg_current_cpu, NULL);
B
bellard 已提交
608 609
    return ret;
}