cpu-exec.c 20.4 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
B
bellard 已提交
19
#include "config.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "trace.h"
22
#include "disas/disas.h"
23
#include "tcg.h"
24
#include "qemu/atomic.h"
25
#include "sysemu/qtest.h"
26
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
27 28
#include "exec/address-spaces.h"
#include "exec/memory-internal.h"
29
#include "qemu/rcu.h"
30 31 32 33 34 35

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
36
    int64_t realtime_clock;
37 38 39 40 41 42 43 44
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
45 46 47
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
P
Paolo Bonzini 已提交
67
            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
68 69 70 71 72 73 74 75 76 77
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

100 101 102 103 104 105
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
106 107
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
108
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
109 110 111 112 113 114
    if (sc->diff_clk < max_delay) {
        max_delay = sc->diff_clk;
    }
    if (sc->diff_clk > max_advance) {
        max_advance = sc->diff_clk;
    }
115 116 117 118

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
119 120 121 122 123 124 125 126 127 128
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
129

130
void cpu_loop_exit(CPUState *cpu)
B
bellard 已提交
131
{
132
    cpu->current_tb = NULL;
133
    siglongjmp(cpu->jmp_env, 1);
B
bellard 已提交
134
}
135

136 137 138
/* exit the current TB from a signal handler. The host registers are
   restored in a state compatible with the CPU emulator
 */
139
#if defined(CONFIG_SOFTMMU)
140
void cpu_resume_from_signal(CPUState *cpu, void *puc)
141 142 143
{
    /* XXX: restore cpu registers saved in host registers */

144
    cpu->exception_index = -1;
145
    siglongjmp(cpu->jmp_env, 1);
146
}
147 148 149

void cpu_reload_memory_map(CPUState *cpu)
{
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
    AddressSpaceDispatch *d;

    if (qemu_in_vcpu_thread()) {
        /* Do not let the guest prolong the critical section as much as it
         * as it desires.
         *
         * Currently, this is prevented by the I/O thread's periodinc kicking
         * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
         * but this will go away once TCG's execution moves out of the global
         * mutex.
         *
         * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
         * only protects cpu->as->dispatch.  Since we reload it below, we can
         * split the critical section.
         */
        rcu_read_unlock();
        rcu_read_lock();
    }

P
Paolo Bonzini 已提交
169
    /* The CPU and TLB are protected by the iothread lock.  */
170
    d = atomic_rcu_read(&cpu->as->dispatch);
P
Paolo Bonzini 已提交
171
    cpu->memory_dispatch = d;
172 173
    tlb_flush(cpu, 1);
}
174
#endif
175

176 177 178 179
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

198
    cpu->can_do_io = 0;
199
    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
200
    cpu->can_do_io = 1;
201 202 203
    trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
                       next_tb & TB_EXIT_MASK);

204 205 206 207 208
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
209
        CPUClass *cc = CPU_GET_CLASS(cpu);
210
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
211 212 213 214 215 216
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
217
    }
218 219 220 221 222 223
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
224 225 226
    return next_tb;
}

P
pbrook 已提交
227 228
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
229
static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
B
Blue Swirl 已提交
230
                             TranslationBlock *orig_tb)
P
pbrook 已提交
231
{
232
    CPUState *cpu = ENV_GET_CPU(env);
P
pbrook 已提交
233
    TranslationBlock *tb;
P
Pavel Dovgalyuk 已提交
234 235 236
    target_ulong pc = orig_tb->pc;
    target_ulong cs_base = orig_tb->cs_base;
    uint64_t flags = orig_tb->flags;
P
pbrook 已提交
237 238 239 240 241 242

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

P
Pavel Dovgalyuk 已提交
243 244 245
    /* tb_gen_code can flush our orig_tb, invalidate it now */
    tb_phys_invalidate(orig_tb, -1);
    tb = tb_gen_code(cpu, pc, cs_base, flags,
246
                     max_cycles | CF_NOCACHE);
247
    cpu->current_tb = tb;
P
pbrook 已提交
248
    /* execute the generated code */
249
    trace_exec_tb_nocache(tb, tb->pc);
250
    cpu_tb_exec(cpu, tb->tc_ptr);
251
    cpu->current_tb = NULL;
P
pbrook 已提交
252 253 254 255
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

256
static TranslationBlock *tb_find_slow(CPUArchState *env,
B
Blue Swirl 已提交
257
                                      target_ulong pc,
258
                                      target_ulong cs_base,
259
                                      uint64_t flags)
260
{
261
    CPUState *cpu = ENV_GET_CPU(env);
262 263
    TranslationBlock *tb, **ptb1;
    unsigned int h;
264
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
265
    target_ulong virt_page2;
266

267
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
268

269
    /* find translated block using physical mappings */
P
Paul Brook 已提交
270
    phys_pc = get_page_addr_code(env, pc);
271 272
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
273
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
274 275 276 277
    for(;;) {
        tb = *ptb1;
        if (!tb)
            goto not_found;
278
        if (tb->pc == pc &&
279
            tb->page_addr[0] == phys_page1 &&
280
            tb->cs_base == cs_base &&
281 282 283
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
284 285
                tb_page_addr_t phys_page2;

286
                virt_page2 = (pc & TARGET_PAGE_MASK) +
287
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
288
                phys_page2 = get_page_addr_code(env, virt_page2);
289 290 291 292 293 294 295 296 297
                if (tb->page_addr[1] == phys_page2)
                    goto found;
            } else {
                goto found;
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
 not_found:
P
pbrook 已提交
298
   /* if no translated code available, then translate it now */
299
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
300

301
 found:
302 303 304
    /* Move the last found TB to the head of the list */
    if (likely(*ptb1)) {
        *ptb1 = tb->phys_hash_next;
305 306
        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
307
    }
308
    /* we add the TB in the virtual pc hash table */
309
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
310 311 312
    return tb;
}

313
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
314
{
315
    CPUState *cpu = ENV_GET_CPU(env);
316 317
    TranslationBlock *tb;
    target_ulong cs_base, pc;
318
    int flags;
319 320 321 322

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
323
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
324
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
325 326
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
B
Blue Swirl 已提交
327
        tb = tb_find_slow(env, pc, cs_base, flags);
328 329 330 331
    }
    return tb;
}

332
static void cpu_handle_debug_exception(CPUArchState *env)
333
{
334
    CPUState *cpu = ENV_GET_CPU(env);
335
    CPUClass *cc = CPU_GET_CLASS(cpu);
336 337
    CPUWatchpoint *wp;

338 339
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
340 341 342
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
343 344

    cc->debug_excp_handler(cpu);
345 346
}

B
bellard 已提交
347 348
/* main execution loop */

349 350
volatile sig_atomic_t exit_request;

351
int cpu_exec(CPUArchState *env)
B
bellard 已提交
352
{
353
    CPUState *cpu = ENV_GET_CPU(env);
354
    CPUClass *cc = CPU_GET_CLASS(cpu);
355 356
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
357
#endif
358 359
    int ret, interrupt_request;
    TranslationBlock *tb;
B
bellard 已提交
360
    uint8_t *tc_ptr;
361
    uintptr_t next_tb;
362 363
    SyncClocks sc;

364 365
    /* This must be volatile so it is not trashed by longjmp() */
    volatile bool have_tb_lock = false;
366

367
    if (cpu->halted) {
368
        if (!cpu_has_work(cpu)) {
369 370 371
            return EXCP_HALTED;
        }

372
        cpu->halted = 0;
373
    }
B
bellard 已提交
374

375
    current_cpu = cpu;
B
bellard 已提交
376

377
    /* As long as current_cpu is null, up to the assignment just above,
378 379
     * requests by other threads to exit the execution loop are expected to
     * be issued using the exit_request global. We must make sure that our
380
     * evaluation of the global value is performed past the current_cpu
381 382 383 384
     * value transition point, which requires a memory barrier as well as
     * an instruction scheduling constraint on modern architectures.  */
    smp_mb();

385 386
    rcu_read_lock();

J
Jan Kiszka 已提交
387
    if (unlikely(exit_request)) {
388
        cpu->exit_request = 1;
389 390
    }

391
    cc->cpu_exec_enter(cpu);
392

393 394 395 396 397 398 399
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

B
bellard 已提交
400
    /* prepare setjmp context for exception handling */
401
    for(;;) {
402
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
403
            /* if an exception is pending, we execute it here */
404 405
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
406
                    /* exit request from the cpu execution loop */
407
                    ret = cpu->exception_index;
408 409 410
                    if (ret == EXCP_DEBUG) {
                        cpu_handle_debug_exception(env);
                    }
411
                    cpu->exception_index = -1;
412
                    break;
A
aurel32 已提交
413 414
                } else {
#if defined(CONFIG_USER_ONLY)
415
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
416
                       which will be handled outside the cpu execution
417
                       loop */
B
bellard 已提交
418
#if defined(TARGET_I386)
419
                    cc->do_interrupt(cpu);
B
bellard 已提交
420
#endif
421
                    ret = cpu->exception_index;
422
                    cpu->exception_index = -1;
423
                    break;
A
aurel32 已提交
424
#else
425
                    cc->do_interrupt(cpu);
426
                    cpu->exception_index = -1;
B
bellard 已提交
427
#endif
428
                }
429
            }
B
bellard 已提交
430

431
            next_tb = 0; /* force lookup of first TB */
432
            for(;;) {
433
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
434
                if (unlikely(interrupt_request)) {
435
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
436
                        /* Mask out external interrupts for this step. */
437
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
438
                    }
439
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
440
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
441
                        cpu->exception_index = EXCP_DEBUG;
442
                        cpu_loop_exit(cpu);
443
                    }
444
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
445 446
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
447
                        cpu->exception_index = EXCP_HLT;
448
                        cpu_loop_exit(cpu);
449
                    }
450 451 452 453 454 455 456 457 458 459 460
#if defined(TARGET_I386)
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
                    if (interrupt_request & CPU_INTERRUPT_RESET) {
                        cpu_reset(cpu);
                    }
B
bellard 已提交
461
#endif
462 463 464 465 466 467 468 469 470
                    /* The target hook has 3 exit conditions:
                       False when the interrupt isn't processed,
                       True when it is, and we should restart on a new TB,
                       and via longjmp via cpu_loop_exit.  */
                    if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
                        next_tb = 0;
                    }
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
471 472
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
473 474
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
475
                        next_tb = 0;
476
                    }
477
                }
478 479
                if (unlikely(cpu->exit_request)) {
                    cpu->exit_request = 0;
480
                    cpu->exception_index = EXCP_INTERRUPT;
481
                    cpu_loop_exit(cpu);
482
                }
483
                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
484
                have_tb_lock = true;
B
Blue Swirl 已提交
485
                tb = tb_find_fast(env);
P
pbrook 已提交
486 487
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
488
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
489 490 491 492
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
493
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
494
                }
495 496 497 498
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
499 500 501
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
P
Paolo Bonzini 已提交
502
                if (next_tb != 0 && tb->page_addr[1] == -1) {
503 504
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
505
                }
506
                have_tb_lock = false;
507
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
508 509 510 511 512

                /* cpu_interrupt might be called while translating the
                   TB, but before it is linked into a potentially
                   infinite loop and becomes env->current_tb. Avoid
                   starting execution if there is a pending interrupt. */
513
                cpu->current_tb = tb;
J
Jan Kiszka 已提交
514
                barrier();
515
                if (likely(!cpu->exit_request)) {
516
                    trace_exec_tb(tb, tb->pc);
P
pbrook 已提交
517
                    tc_ptr = tb->tc_ptr;
518
                    /* execute the generated code */
519
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
520 521 522 523 524 525 526 527 528 529 530 531 532
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
                         * next time around the loop.
                         */
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
533
                        /* Instruction counter expired.  */
P
Paolo Bonzini 已提交
534
                        int insns_left = cpu->icount_decr.u32;
535
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
536
                            /* Refill decrementer and continue execution.  */
537
                            cpu->icount_extra += insns_left;
P
Paolo Bonzini 已提交
538
                            insns_left = MIN(0xffff, cpu->icount_extra);
539
                            cpu->icount_extra -= insns_left;
540
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
541 542 543
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
P
Paolo Bonzini 已提交
544
                                tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
B
Blue Swirl 已提交
545
                                cpu_exec_nocache(env, insns_left, tb);
546
                                align_clocks(&sc, cpu);
P
pbrook 已提交
547
                            }
548
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
549
                            next_tb = 0;
550
                            cpu_loop_exit(cpu);
P
pbrook 已提交
551
                        }
552 553 554 555
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
556 557
                    }
                }
558
                cpu->current_tb = NULL;
559 560 561
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
B
bellard 已提交
562 563
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
564
            } /* for(;;) */
565 566 567
        } else {
            /* Reload env after longjmp - the compiler may have smashed all
             * local variables as longjmp is marked 'noreturn'. */
568 569
            cpu = current_cpu;
            env = cpu->env_ptr;
570
            cc = CPU_GET_CLASS(cpu);
571
            cpu->can_do_io = 1;
572 573
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
574
#endif
575 576 577 578
            if (have_tb_lock) {
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
                have_tb_lock = false;
            }
B
bellard 已提交
579
        }
580 581
    } /* for(;;) */

582
    cc->cpu_exec_exit(cpu);
583
    rcu_read_unlock();
P
pbrook 已提交
584

585 586
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
B
bellard 已提交
587 588
    return ret;
}