cpu-exec.c 21.1 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
P
Peter Maydell 已提交
19
#include "qemu/osdep.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "trace-root.h"
22
#include "disas/disas.h"
23
#include "exec/exec-all.h"
24
#include "tcg.h"
25
#include "qemu/atomic.h"
26
#include "sysemu/qtest.h"
27
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
28
#include "exec/address-spaces.h"
29
#include "qemu/rcu.h"
30
#include "exec/tb-hash.h"
31
#include "exec/log.h"
32
#include "qemu/main-loop.h"
33 34 35
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
36
#include "sysemu/replay.h"
37 38 39 40 41 42

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
43
    int64_t realtime_clock;
44 45 46 47 48 49 50 51
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
52 53 54
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
P
Paolo Bonzini 已提交
74
            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
75 76 77 78 79 80 81 82 83 84
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

107 108 109 110 111 112
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
113 114
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
115
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
116 117 118 119 120 121
    if (sc->diff_clk < max_delay) {
        max_delay = sc->diff_clk;
    }
    if (sc->diff_clk > max_advance) {
        max_advance = sc->diff_clk;
    }
122 123 124 125

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
126 127 128 129 130 131 132 133 134 135
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
136

137
/* Execute a TB, and fix up the CPU state afterwards if necessary */
138
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
139 140
{
    CPUArchState *env = cpu->env_ptr;
S
Sergey Fedorov 已提交
141 142 143
    uintptr_t ret;
    TranslationBlock *last_tb;
    int tb_exit;
144 145
    uint8_t *tb_ptr = itb->tc_ptr;

146
    qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
147 148 149
                           "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
                           itb->tc_ptr, cpu->cpu_index, itb->pc,
                           lookup_symbol(itb->pc));
150 151

#if defined(DEBUG_DISAS)
152 153
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
        && qemu_log_in_addr_range(itb->pc)) {
154
        qemu_log_lock();
155 156 157 158 159
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#else
        log_cpu_state(cpu, 0);
#endif
160
        qemu_log_unlock();
161 162 163
    }
#endif /* DEBUG_DISAS */

164
    cpu->can_do_io = !use_icount;
S
Sergey Fedorov 已提交
165
    ret = tcg_qemu_tb_exec(env, tb_ptr);
166
    cpu->can_do_io = 1;
S
Sergey Fedorov 已提交
167 168 169
    last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
    tb_exit = ret & TB_EXIT_MASK;
    trace_exec_tb_exit(last_tb, tb_exit);
170

S
Sergey Fedorov 已提交
171
    if (tb_exit > TB_EXIT_IDX1) {
172 173 174 175
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
176
        CPUClass *cc = CPU_GET_CLASS(cpu);
S
Sergey Fedorov 已提交
177
        qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
178 179
                               "Stopped execution of TB chain before %p ["
                               TARGET_FMT_lx "] %s\n",
S
Sergey Fedorov 已提交
180 181
                               last_tb->tc_ptr, last_tb->pc,
                               lookup_symbol(last_tb->pc));
182
        if (cc->synchronize_from_tb) {
S
Sergey Fedorov 已提交
183
            cc->synchronize_from_tb(cpu, last_tb);
184 185
        } else {
            assert(cc->set_pc);
S
Sergey Fedorov 已提交
186
            cc->set_pc(cpu, last_tb->pc);
187
        }
188
    }
S
Sergey Fedorov 已提交
189
    return ret;
190 191
}

192
#ifndef CONFIG_USER_ONLY
P
pbrook 已提交
193 194
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
195
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
196
                             TranslationBlock *orig_tb, bool ignore_icount)
P
pbrook 已提交
197 198 199 200 201 202 203 204
{
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

205
    tb_lock();
206
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
207 208
                     max_cycles | CF_NOCACHE
                         | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
209
    tb->orig_tb = orig_tb;
210 211
    tb_unlock();

P
pbrook 已提交
212
    /* execute the generated code */
213
    trace_exec_tb_nocache(tb, tb->pc);
214
    cpu_tb_exec(cpu, tb);
215 216

    tb_lock();
P
pbrook 已提交
217 218
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
219
    tb_unlock();
P
pbrook 已提交
220
}
221
#endif
P
pbrook 已提交
222

R
Richard Henderson 已提交
223 224
static void cpu_exec_step(CPUState *cpu)
{
225
    CPUClass *cc = CPU_GET_CLASS(cpu);
R
Richard Henderson 已提交
226 227 228 229 230 231
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
    TranslationBlock *tb;
    target_ulong cs_base, pc;
    uint32_t flags;

    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
    if (sigsetjmp(cpu->jmp_env, 0) == 0) {
        mmap_lock();
        tb_lock();
        tb = tb_gen_code(cpu, pc, cs_base, flags,
                         1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
        tb->orig_tb = NULL;
        tb_unlock();
        mmap_unlock();

        cc->cpu_exec_enter(cpu);
        /* execute the generated code */
        trace_exec_tb_nocache(tb, pc);
        cpu_tb_exec(cpu, tb);
        cc->cpu_exec_exit(cpu);

        tb_lock();
        tb_phys_invalidate(tb, -1);
        tb_free(tb);
        tb_unlock();
    } else {
        /* We may have exited due to another problem here, so we need
         * to reset any tb_locks we may have taken but didn't release.
         * The mmap_lock is dropped by tb_gen_code if it runs out of
         * memory.
         */
#ifndef CONFIG_SOFTMMU
        tcg_debug_assert(!have_mmap_lock());
#endif
        tb_lock_reset();
    }
R
Richard Henderson 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274 275
}

void cpu_exec_step_atomic(CPUState *cpu)
{
    start_exclusive();

    /* Since we got here, we know that parallel_cpus must be true.  */
    parallel_cpus = false;
    cpu_exec_step(cpu);
    parallel_cpus = true;

    end_exclusive();
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
struct tb_desc {
    target_ulong pc;
    target_ulong cs_base;
    CPUArchState *env;
    tb_page_addr_t phys_page1;
    uint32_t flags;
};

static bool tb_cmp(const void *p, const void *d)
{
    const TranslationBlock *tb = p;
    const struct tb_desc *desc = d;

    if (tb->pc == desc->pc &&
        tb->page_addr[0] == desc->phys_page1 &&
        tb->cs_base == desc->cs_base &&
292 293
        tb->flags == desc->flags &&
        !atomic_read(&tb->invalid)) {
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
        /* check next page if needed */
        if (tb->page_addr[1] == -1) {
            return true;
        } else {
            tb_page_addr_t phys_page2;
            target_ulong virt_page2;

            virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
            phys_page2 = get_page_addr_code(desc->env, virt_page2);
            if (tb->page_addr[1] == phys_page2) {
                return true;
            }
        }
    }
    return false;
}

S
Sergey Fedorov 已提交
311
static TranslationBlock *tb_htable_lookup(CPUState *cpu,
312 313
                                          target_ulong pc,
                                          target_ulong cs_base,
314
                                          uint32_t flags)
315
{
316 317
    tb_page_addr_t phys_pc;
    struct tb_desc desc;
318
    uint32_t h;
319

320 321 322 323 324 325
    desc.env = (CPUArchState *)cpu->env_ptr;
    desc.cs_base = cs_base;
    desc.flags = flags;
    desc.pc = pc;
    phys_pc = get_page_addr_code(desc.env, pc);
    desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
326
    h = tb_hash_func(phys_pc, pc, flags);
327
    return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
328 329
}

330 331 332
static inline TranslationBlock *tb_find(CPUState *cpu,
                                        TranslationBlock *last_tb,
                                        int tb_exit)
333
{
334
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
335 336
    TranslationBlock *tb;
    target_ulong cs_base, pc;
337
    uint32_t flags;
338
    bool have_tb_lock = false;
339 340 341 342

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
343
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
344
    tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
345 346
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
S
Sergey Fedorov 已提交
347
        tb = tb_htable_lookup(cpu, pc, cs_base, flags);
348 349 350 351 352 353 354 355 356 357 358 359 360
        if (!tb) {

            /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
             * taken outside tb_lock. As system emulation is currently
             * single threaded the locks are NOPs.
             */
            mmap_lock();
            tb_lock();
            have_tb_lock = true;

            /* There's a chance that our desired tb has been translated while
             * taking the locks so we check again inside the lock.
             */
S
Sergey Fedorov 已提交
361
            tb = tb_htable_lookup(cpu, pc, cs_base, flags);
362 363 364 365 366 367 368 369 370 371
            if (!tb) {
                /* if no translated code available, then translate it now */
                tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
            }

            mmap_unlock();
        }

        /* We add the TB in the virtual pc hash table for the fast lookup */
        atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
372
    }
373 374 375 376 377 378
#ifndef CONFIG_USER_ONLY
    /* We don't take care of direct jumps when address mapping changes in
     * system emulation. So it's not safe to make a direct jump to a TB
     * spanning two pages because the mapping for the second page can change.
     */
    if (tb->page_addr[1] != -1) {
379
        last_tb = NULL;
380 381
    }
#endif
382
    /* See if we can patch the calling TB. */
383
    if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
384 385 386 387
        if (!have_tb_lock) {
            tb_lock();
            have_tb_lock = true;
        }
388
        if (!tb->invalid) {
389 390
            tb_add_jump(last_tb, tb_exit, tb);
        }
391 392
    }
    if (have_tb_lock) {
393
        tb_unlock();
394
    }
395 396 397
    return tb;
}

398 399 400 401 402 403 404
static inline bool cpu_handle_halt(CPUState *cpu)
{
    if (cpu->halted) {
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
            && replay_interrupt()) {
            X86CPU *x86_cpu = X86_CPU(cpu);
405
            qemu_mutex_lock_iothread();
406 407
            apic_poll_irq(x86_cpu->apic_state);
            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
408
            qemu_mutex_unlock_iothread();
409 410 411 412 413 414 415 416 417 418 419 420
        }
#endif
        if (!cpu_has_work(cpu)) {
            return true;
        }

        cpu->halted = 0;
    }

    return false;
}

421
static inline void cpu_handle_debug_exception(CPUState *cpu)
422
{
423
    CPUClass *cc = CPU_GET_CLASS(cpu);
424 425
    CPUWatchpoint *wp;

426 427
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
428 429 430
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
431 432

    cc->debug_excp_handler(cpu);
433 434
}

435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
{
    if (cpu->exception_index >= 0) {
        if (cpu->exception_index >= EXCP_INTERRUPT) {
            /* exit request from the cpu execution loop */
            *ret = cpu->exception_index;
            if (*ret == EXCP_DEBUG) {
                cpu_handle_debug_exception(cpu);
            }
            cpu->exception_index = -1;
            return true;
        } else {
#if defined(CONFIG_USER_ONLY)
            /* if user mode only, we simulate a fake exception
               which will be handled outside the cpu execution
               loop */
#if defined(TARGET_I386)
            CPUClass *cc = CPU_GET_CLASS(cpu);
            cc->do_interrupt(cpu);
#endif
            *ret = cpu->exception_index;
            cpu->exception_index = -1;
            return true;
#else
            if (replay_exception()) {
                CPUClass *cc = CPU_GET_CLASS(cpu);
461
                qemu_mutex_lock_iothread();
462
                cc->do_interrupt(cpu);
463
                qemu_mutex_unlock_iothread();
464 465 466 467 468 469 470 471 472 473 474 475
                cpu->exception_index = -1;
            } else if (!replay_has_interrupt()) {
                /* give a chance to iothread in replay mode */
                *ret = EXCP_INTERRUPT;
                return true;
            }
#endif
        }
#ifndef CONFIG_USER_ONLY
    } else if (replay_has_exception()
               && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
        /* try to cause an exception pending in the log */
476
        cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
477 478 479 480 481 482 483 484
        *ret = -1;
        return true;
#endif
    }

    return false;
}

485
static inline bool cpu_handle_interrupt(CPUState *cpu,
486 487 488 489
                                        TranslationBlock **last_tb)
{
    CPUClass *cc = CPU_GET_CLASS(cpu);

490 491 492 493
    if (unlikely(atomic_read(&cpu->interrupt_request))) {
        int interrupt_request;
        qemu_mutex_lock_iothread();
        interrupt_request = cpu->interrupt_request;
494 495 496 497 498 499 500
        if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
            /* Mask out external interrupts for this step. */
            interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
        }
        if (interrupt_request & CPU_INTERRUPT_DEBUG) {
            cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
            cpu->exception_index = EXCP_DEBUG;
501
            qemu_mutex_unlock_iothread();
502
            return true;
503 504 505 506 507 508 509 510
        }
        if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
            /* Do nothing */
        } else if (interrupt_request & CPU_INTERRUPT_HALT) {
            replay_interrupt();
            cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
            cpu->halted = 1;
            cpu->exception_index = EXCP_HLT;
511
            qemu_mutex_unlock_iothread();
512
            return true;
513 514 515 516 517 518
        }
#if defined(TARGET_I386)
        else if (interrupt_request & CPU_INTERRUPT_INIT) {
            X86CPU *x86_cpu = X86_CPU(cpu);
            CPUArchState *env = &x86_cpu->env;
            replay_interrupt();
519
            cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
520 521
            do_cpu_init(x86_cpu);
            cpu->exception_index = EXCP_HALTED;
522
            qemu_mutex_unlock_iothread();
523
            return true;
524 525 526 527 528
        }
#else
        else if (interrupt_request & CPU_INTERRUPT_RESET) {
            replay_interrupt();
            cpu_reset(cpu);
529
            qemu_mutex_unlock_iothread();
530
            return true;
531 532 533 534 535 536 537 538
        }
#endif
        /* The target hook has 3 exit conditions:
           False when the interrupt isn't processed,
           True when it is, and we should restart on a new TB,
           and via longjmp via cpu_loop_exit.  */
        else {
            if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
539
                replay_interrupt();
540 541
                *last_tb = NULL;
            }
542 543 544
            /* The target hook may have updated the 'cpu->interrupt_request';
             * reload the 'interrupt_request' value */
            interrupt_request = cpu->interrupt_request;
545
        }
546
        if (interrupt_request & CPU_INTERRUPT_EXITTB) {
547 548 549 550 551
            cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
            /* ensure that no TB jump will be modified as
               the program flow was changed */
            *last_tb = NULL;
        }
552 553 554

        /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
        qemu_mutex_unlock_iothread();
555
    }
556

557 558 559
    /* Finally, check if we need to exit to the main loop.  */
    if (unlikely(atomic_read(&cpu->exit_request)
        || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
560
        atomic_set(&cpu->exit_request, 0);
561
        cpu->exception_index = EXCP_INTERRUPT;
562
        return true;
563
    }
564 565

    return false;
566 567
}

568
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
569
                                    TranslationBlock **last_tb, int *tb_exit)
570 571
{
    uintptr_t ret;
572
    int32_t insns_left;
573 574 575

    trace_exec_tb(tb, tb->pc);
    ret = cpu_tb_exec(cpu, tb);
576
    tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
577
    *tb_exit = ret & TB_EXIT_MASK;
578 579 580 581 582 583 584 585 586
    if (*tb_exit != TB_EXIT_REQUESTED) {
        *last_tb = tb;
        return;
    }

    *last_tb = NULL;
    insns_left = atomic_read(&cpu->icount_decr.u32);
    atomic_set(&cpu->icount_decr.u16.high, 0);
    if (insns_left < 0) {
A
Alex Bennée 已提交
587 588
        /* Something asked us to stop executing chained TBs; just
         * continue round the main loop. Whatever requested the exit
589 590 591
         * will also have set something else (eg exit_request or
         * interrupt_request) which we will handle next time around
         * the loop.  But we need to ensure the zeroing of icount_decr
592 593 594
         * comes before the next read of cpu->exit_request
         * or cpu->interrupt_request.
         */
595
        smp_mb();
596
        return;
597
    }
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614

    /* Instruction counter expired.  */
    assert(use_icount);
#ifndef CONFIG_USER_ONLY
    if (cpu->icount_extra) {
        /* Refill decrementer and continue execution.  */
        cpu->icount_extra += insns_left;
        insns_left = MIN(0xffff, cpu->icount_extra);
        cpu->icount_extra -= insns_left;
        cpu->icount_decr.u16.low = insns_left;
    } else {
        /* Execute any remaining instructions, then let the main loop
         * handle the next event.
         */
        if (insns_left > 0) {
            cpu_exec_nocache(cpu, insns_left, tb, false);
        }
615
    }
616
#endif
617 618
}

B
bellard 已提交
619 620
/* main execution loop */

621
int cpu_exec(CPUState *cpu)
B
bellard 已提交
622
{
623
    CPUClass *cc = CPU_GET_CLASS(cpu);
624
    int ret;
625
    SyncClocks sc = { 0 };
626

627 628 629
    /* replay_interrupt may need current_cpu */
    current_cpu = cpu;

630 631
    if (cpu_handle_halt(cpu)) {
        return EXCP_HALTED;
632
    }
B
bellard 已提交
633

634 635
    rcu_read_lock();

636
    cc->cpu_exec_enter(cpu);
637

638 639 640 641 642 643 644
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

645 646
    /* prepare setjmp context for exception handling */
    if (sigsetjmp(cpu->jmp_env, 0) != 0) {
647
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
648 649 650 651 652 653
        /* Some compilers wrongly smash all local variables after
         * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
         * Reload essential local variables here for those compilers.
         * Newer versions of gcc would complain about this code (-Wclobbered). */
        cpu = current_cpu;
        cc = CPU_GET_CLASS(cpu);
654
#else /* buggy compiler */
655 656 657
        /* Assert that the compiler does not smash local variables. */
        g_assert(cpu == current_cpu);
        g_assert(cc == CPU_GET_CLASS(cpu));
658
#endif /* buggy compiler */
659 660
        cpu->can_do_io = 1;
        tb_lock_reset();
661 662 663
        if (qemu_mutex_iothread_locked()) {
            qemu_mutex_unlock_iothread();
        }
664 665 666 667 668 669 670 671 672
    }

    /* if an exception is pending, we execute it here */
    while (!cpu_handle_exception(cpu, &ret)) {
        TranslationBlock *last_tb = NULL;
        int tb_exit = 0;

        while (!cpu_handle_interrupt(cpu, &last_tb)) {
            TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit);
673
            cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
674 675 676
            /* Try to align the host and virtual clocks
               if the guest is in advance */
            align_clocks(&sc, cpu);
B
bellard 已提交
677
        }
678
    }
679

680
    cc->cpu_exec_exit(cpu);
681
    rcu_read_unlock();
P
pbrook 已提交
682

B
bellard 已提交
683 684
    return ret;
}