cpus.c 37.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * QEMU System Emulator
 *
 * Copyright (c) 2003-2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

/* Needed early for CONFIG_BSD etc. */
#include "config-host.h"

28
#include "monitor/monitor.h"
29
#include "sysemu/sysemu.h"
30
#include "exec/gdbstub.h"
31 32
#include "sysemu/dma.h"
#include "sysemu/kvm.h"
L
Luiz Capitulino 已提交
33
#include "qmp-commands.h"
34

35
#include "qemu/thread.h"
36 37
#include "sysemu/cpus.h"
#include "sysemu/qtest.h"
38 39
#include "qemu/main-loop.h"
#include "qemu/bitmap.h"
40
#include "qemu/seqlock.h"
J
Jan Kiszka 已提交
41 42

#ifndef _WIN32
43
#include "qemu/compatfd.h"
J
Jan Kiszka 已提交
44
#endif
45

46 47 48 49
#ifdef CONFIG_LINUX

#include <sys/prctl.h>

M
Marcelo Tosatti 已提交
50 51 52 53
#ifndef PR_MCE_KILL
#define PR_MCE_KILL 33
#endif

54 55 56 57 58 59 60 61 62 63
#ifndef PR_MCE_KILL_SET
#define PR_MCE_KILL_SET 1
#endif

#ifndef PR_MCE_KILL_EARLY
#define PR_MCE_KILL_EARLY 1
#endif

#endif /* CONFIG_LINUX */

64
static CPUState *next_cpu;
65

66 67 68 69 70
bool cpu_is_stopped(CPUState *cpu)
{
    return cpu->stopped || !runstate_is_running();
}

71
static bool cpu_thread_is_idle(CPUState *cpu)
72
{
73
    if (cpu->stop || cpu->queued_work_first) {
74 75
        return false;
    }
76
    if (cpu_is_stopped(cpu)) {
77 78
        return true;
    }
79
    if (!cpu->halted || qemu_cpu_has_work(cpu) ||
80
        kvm_halt_in_kernel()) {
81 82 83 84 85 86 87
        return false;
    }
    return true;
}

static bool all_cpu_threads_idle(void)
{
88
    CPUState *cpu;
89

A
Andreas Färber 已提交
90
    CPU_FOREACH(cpu) {
91
        if (!cpu_thread_is_idle(cpu)) {
92 93 94 95 96 97
            return false;
        }
    }
    return true;
}

P
Paolo Bonzini 已提交
98 99 100
/***********************************************************/
/* guest cycle counter */

101 102 103 104 105
/* Protected by TimersState seqlock */

/* Compensate for varying guest execution speed.  */
static int64_t qemu_icount_bias;
static int64_t vm_clock_warp_start;
P
Paolo Bonzini 已提交
106 107 108 109
/* Conversion factor from emulated instructions to virtual clock ticks.  */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
#define MAX_ICOUNT_SHIFT 10
110 111 112 113

/* Only written by TCG thread */
static int64_t qemu_icount;

P
Paolo Bonzini 已提交
114 115 116 117 118
static QEMUTimer *icount_rt_timer;
static QEMUTimer *icount_vm_timer;
static QEMUTimer *icount_warp_timer;

typedef struct TimersState {
119
    /* Protected by BQL.  */
P
Paolo Bonzini 已提交
120 121
    int64_t cpu_ticks_prev;
    int64_t cpu_ticks_offset;
122 123 124 125 126

    /* cpu_clock_offset can be read out of BQL, so protect it with
     * this lock.
     */
    QemuSeqLock vm_clock_seqlock;
P
Paolo Bonzini 已提交
127 128 129 130 131
    int64_t cpu_clock_offset;
    int32_t cpu_ticks_enabled;
    int64_t dummy;
} TimersState;

L
Liu Ping Fan 已提交
132
static TimersState timers_state;
P
Paolo Bonzini 已提交
133 134

/* Return the virtual CPU time, based on the instruction counter.  */
P
Paolo Bonzini 已提交
135
static int64_t cpu_get_icount_locked(void)
P
Paolo Bonzini 已提交
136 137
{
    int64_t icount;
138
    CPUState *cpu = current_cpu;
P
Paolo Bonzini 已提交
139 140

    icount = qemu_icount;
141 142
    if (cpu) {
        CPUArchState *env = cpu->env_ptr;
P
Paolo Bonzini 已提交
143 144 145 146 147 148 149 150
        if (!can_do_io(env)) {
            fprintf(stderr, "Bad clock read\n");
        }
        icount -= (env->icount_decr.u16.low + env->icount_extra);
    }
    return qemu_icount_bias + (icount << icount_time_shift);
}

P
Paolo Bonzini 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163
int64_t cpu_get_icount(void)
{
    int64_t icount;
    unsigned start;

    do {
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
        icount = cpu_get_icount_locked();
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));

    return icount;
}

P
Paolo Bonzini 已提交
164
/* return the host CPU cycle counter and handle stop/restart */
165
/* Caller must hold the BQL */
P
Paolo Bonzini 已提交
166 167
int64_t cpu_get_ticks(void)
{
P
Paolo Bonzini 已提交
168 169
    int64_t ticks;

P
Paolo Bonzini 已提交
170 171 172
    if (use_icount) {
        return cpu_get_icount();
    }
P
Paolo Bonzini 已提交
173 174 175 176 177 178 179 180 181 182 183

    ticks = timers_state.cpu_ticks_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += cpu_get_real_ticks();
    }

    if (timers_state.cpu_ticks_prev > ticks) {
        /* Note: non increasing ticks may happen if the host uses
           software suspend */
        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
        ticks = timers_state.cpu_ticks_prev;
P
Paolo Bonzini 已提交
184
    }
P
Paolo Bonzini 已提交
185 186 187

    timers_state.cpu_ticks_prev = ticks;
    return ticks;
P
Paolo Bonzini 已提交
188 189
}

190
static int64_t cpu_get_clock_locked(void)
P
Paolo Bonzini 已提交
191
{
P
Paolo Bonzini 已提交
192
    int64_t ticks;
193

P
Paolo Bonzini 已提交
194 195 196
    ticks = timers_state.cpu_clock_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += get_clock();
P
Paolo Bonzini 已提交
197
    }
198

P
Paolo Bonzini 已提交
199
    return ticks;
200 201 202 203 204 205 206 207 208 209 210 211 212 213
}

/* return the host CPU monotonic timer and handle stop/restart */
int64_t cpu_get_clock(void)
{
    int64_t ti;
    unsigned start;

    do {
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
        ti = cpu_get_clock_locked();
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));

    return ti;
P
Paolo Bonzini 已提交
214 215
}

216 217 218
/* enable cpu_get_ticks()
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
 */
P
Paolo Bonzini 已提交
219 220
void cpu_enable_ticks(void)
{
221 222
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
223 224 225 226 227
    if (!timers_state.cpu_ticks_enabled) {
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
        timers_state.cpu_clock_offset -= get_clock();
        timers_state.cpu_ticks_enabled = 1;
    }
228
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
229 230 231
}

/* disable cpu_get_ticks() : the clock is stopped. You must not call
232 233 234
 * cpu_get_ticks() after that.
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
 */
P
Paolo Bonzini 已提交
235 236
void cpu_disable_ticks(void)
{
237 238
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
239
    if (timers_state.cpu_ticks_enabled) {
P
Paolo Bonzini 已提交
240
        timers_state.cpu_ticks_offset += cpu_get_real_ticks();
241
        timers_state.cpu_clock_offset = cpu_get_clock_locked();
P
Paolo Bonzini 已提交
242 243
        timers_state.cpu_ticks_enabled = 0;
    }
244
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257
}

/* Correlation between real and virtual time is always going to be
   fairly approximate, so ignore small variation.
   When the guest is idle real and virtual time will be aligned in
   the IO wait loop.  */
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)

static void icount_adjust(void)
{
    int64_t cur_time;
    int64_t cur_icount;
    int64_t delta;
258 259

    /* Protected by TimersState mutex.  */
P
Paolo Bonzini 已提交
260
    static int64_t last_delta;
261

P
Paolo Bonzini 已提交
262 263 264 265
    /* If the VM is not running, then do nothing.  */
    if (!runstate_is_running()) {
        return;
    }
266

P
Paolo Bonzini 已提交
267 268 269
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
    cur_time = cpu_get_clock_locked();
    cur_icount = cpu_get_icount_locked();
270

P
Paolo Bonzini 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
    delta = cur_icount - cur_time;
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
    if (delta > 0
        && last_delta + ICOUNT_WOBBLE < delta * 2
        && icount_time_shift > 0) {
        /* The guest is getting too far ahead.  Slow time down.  */
        icount_time_shift--;
    }
    if (delta < 0
        && last_delta - ICOUNT_WOBBLE > delta * 2
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
        /* The guest is getting too far behind.  Speed time up.  */
        icount_time_shift++;
    }
    last_delta = delta;
    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
P
Paolo Bonzini 已提交
287
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
288 289 290 291
}

static void icount_adjust_rt(void *opaque)
{
292 293
    timer_mod(icount_rt_timer,
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
P
Paolo Bonzini 已提交
294 295 296 297 298
    icount_adjust();
}

static void icount_adjust_vm(void *opaque)
{
299 300 301
    timer_mod(icount_vm_timer,
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
                   get_ticks_per_sec() / 10);
P
Paolo Bonzini 已提交
302 303 304 305 306 307 308 309 310 311
    icount_adjust();
}

static int64_t qemu_icount_round(int64_t count)
{
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
}

static void icount_warp_rt(void *opaque)
{
P
Paolo Bonzini 已提交
312 313 314 315
    /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
     * changes from -1 to another value, so the race here is okay.
     */
    if (atomic_read(&vm_clock_warp_start) == -1) {
P
Paolo Bonzini 已提交
316 317 318
        return;
    }

P
Paolo Bonzini 已提交
319
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
320
    if (runstate_is_running()) {
321
        int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
P
Paolo Bonzini 已提交
322 323 324 325
        int64_t warp_delta;

        warp_delta = clock - vm_clock_warp_start;
        if (use_icount == 2) {
P
Paolo Bonzini 已提交
326
            /*
327
             * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
P
Paolo Bonzini 已提交
328 329
             * far ahead of real time.
             */
P
Paolo Bonzini 已提交
330 331
            int64_t cur_time = cpu_get_clock_locked();
            int64_t cur_icount = cpu_get_icount_locked();
P
Paolo Bonzini 已提交
332
            int64_t delta = cur_time - cur_icount;
P
Paolo Bonzini 已提交
333
            warp_delta = MIN(warp_delta, delta);
P
Paolo Bonzini 已提交
334
        }
P
Paolo Bonzini 已提交
335
        qemu_icount_bias += warp_delta;
P
Paolo Bonzini 已提交
336 337
    }
    vm_clock_warp_start = -1;
P
Paolo Bonzini 已提交
338
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
339 340 341 342

    if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
    }
P
Paolo Bonzini 已提交
343 344
}

P
Paolo Bonzini 已提交
345 346
void qtest_clock_warp(int64_t dest)
{
347
    int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
348 349
    assert(qtest_enabled());
    while (clock < dest) {
350
        int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
351
        int64_t warp = MIN(dest - clock, deadline);
P
Paolo Bonzini 已提交
352
        seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
353
        qemu_icount_bias += warp;
P
Paolo Bonzini 已提交
354 355
        seqlock_write_unlock(&timers_state.vm_clock_seqlock);

356 357
        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
        clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
358
    }
359
    qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
360 361
}

362
void qemu_clock_warp(QEMUClockType type)
P
Paolo Bonzini 已提交
363
{
364
    int64_t clock;
P
Paolo Bonzini 已提交
365 366 367 368 369 370 371
    int64_t deadline;

    /*
     * There are too many global variables to make the "warp" behavior
     * applicable to other clocks.  But a clock argument removes the
     * need for if statements all over the place.
     */
372
    if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
P
Paolo Bonzini 已提交
373 374 375 376
        return;
    }

    /*
377 378
     * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
     * This ensures that the deadline for the timer is computed correctly below.
P
Paolo Bonzini 已提交
379 380
     * This also makes sure that the insn counter is synchronized before the
     * CPU starts running, in case the CPU is woken by an event other than
381
     * the earliest QEMU_CLOCK_VIRTUAL timer.
P
Paolo Bonzini 已提交
382 383
     */
    icount_warp_rt(NULL);
384 385
    timer_del(icount_warp_timer);
    if (!all_cpu_threads_idle()) {
P
Paolo Bonzini 已提交
386 387 388
        return;
    }

P
Paolo Bonzini 已提交
389 390 391 392 393
    if (qtest_enabled()) {
        /* When testing, qtest commands advance icount.  */
	return;
    }

394
    /* We want to use the earliest deadline from ALL vm_clocks */
395
    clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
396
    deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
397 398
    if (deadline < 0) {
        return;
399 400
    }

P
Paolo Bonzini 已提交
401 402
    if (deadline > 0) {
        /*
403
         * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
P
Paolo Bonzini 已提交
404 405 406
         * sleep.  Otherwise, the CPU might be waiting for a future timer
         * interrupt to wake it up, but the interrupt never comes because
         * the vCPU isn't running any insns and thus doesn't advance the
407
         * QEMU_CLOCK_VIRTUAL.
P
Paolo Bonzini 已提交
408 409
         *
         * An extreme solution for this problem would be to never let VCPUs
410 411 412 413 414 415 416 417
         * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
         * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
         * event.  Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
         * after some e"real" time, (related to the time left until the next
         * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
         * This avoids that the warps are visible externally; for example,
         * you will not be sending network packets continuously instead of
         * every 100ms.
P
Paolo Bonzini 已提交
418
         */
P
Paolo Bonzini 已提交
419
        seqlock_write_lock(&timers_state.vm_clock_seqlock);
420 421 422
        if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
            vm_clock_warp_start = clock;
        }
P
Paolo Bonzini 已提交
423
        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
424
        timer_mod_anticipate(icount_warp_timer, clock + deadline);
425
    } else if (deadline == 0) {
426
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
    }
}

static const VMStateDescription vmstate_timers = {
    .name = "timer",
    .version_id = 2,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .fields      = (VMStateField[]) {
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
        VMSTATE_INT64(dummy, TimersState),
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
        VMSTATE_END_OF_LIST()
    }
};

void configure_icount(const char *option)
{
445
    seqlock_init(&timers_state.vm_clock_seqlock, NULL);
P
Paolo Bonzini 已提交
446 447 448 449 450
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
    if (!option) {
        return;
    }

451 452
    icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
                                          icount_warp_rt, NULL);
P
Paolo Bonzini 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
    if (strcmp(option, "auto") != 0) {
        icount_time_shift = strtol(option, NULL, 0);
        use_icount = 1;
        return;
    }

    use_icount = 2;

    /* 125MIPS seems a reasonable initial guess at the guest speed.
       It will be corrected fairly quickly anyway.  */
    icount_time_shift = 3;

    /* Have both realtime and virtual time triggers for speed adjustment.
       The realtime trigger catches emulated time passing too slowly,
       the virtual time trigger catches emulated time passing too fast.
       Realtime triggers occur even when idle, so use them less frequently
       than VM triggers.  */
470 471 472 473 474 475 476 477 478
    icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
                                        icount_adjust_rt, NULL);
    timer_mod(icount_rt_timer,
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
    icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                        icount_adjust_vm, NULL);
    timer_mod(icount_vm_timer,
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
                   get_ticks_per_sec() / 10);
P
Paolo Bonzini 已提交
479 480
}

481 482 483 484
/***********************************************************/
void hw_error(const char *fmt, ...)
{
    va_list ap;
485
    CPUState *cpu;
486 487 488 489 490

    va_start(ap, fmt);
    fprintf(stderr, "qemu: hardware error: ");
    vfprintf(stderr, fmt, ap);
    fprintf(stderr, "\n");
A
Andreas Färber 已提交
491
    CPU_FOREACH(cpu) {
492
        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
493
        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
494 495 496 497 498 499 500
    }
    va_end(ap);
    abort();
}

void cpu_synchronize_all_states(void)
{
501
    CPUState *cpu;
502

A
Andreas Färber 已提交
503
    CPU_FOREACH(cpu) {
504
        cpu_synchronize_state(cpu);
505 506 507 508 509
    }
}

void cpu_synchronize_all_post_reset(void)
{
510
    CPUState *cpu;
511

A
Andreas Färber 已提交
512
    CPU_FOREACH(cpu) {
513
        cpu_synchronize_post_reset(cpu);
514 515 516 517 518
    }
}

void cpu_synchronize_all_post_init(void)
{
519
    CPUState *cpu;
520

A
Andreas Färber 已提交
521
    CPU_FOREACH(cpu) {
522
        cpu_synchronize_post_init(cpu);
523 524 525
    }
}

526
static int do_vm_stop(RunState state)
527
{
528 529
    int ret = 0;

530
    if (runstate_is_running()) {
531 532
        cpu_disable_ticks();
        pause_all_vcpus();
533
        runstate_set(state);
534
        vm_state_notify(0, state);
535 536
        monitor_protocol_event(QEVENT_STOP, NULL);
    }
537

538 539 540
    bdrv_drain_all();
    ret = bdrv_flush_all();

541
    return ret;
542 543
}

544
static bool cpu_can_run(CPUState *cpu)
545
{
A
Andreas Färber 已提交
546
    if (cpu->stop) {
547
        return false;
548
    }
549
    if (cpu_is_stopped(cpu)) {
550
        return false;
551
    }
552
    return true;
553 554
}

555
static void cpu_handle_guest_debug(CPUState *cpu)
556
{
557
    gdb_set_stop_cpu(cpu);
558
    qemu_system_debug_request();
559
    cpu->stopped = true;
560 561
}

562 563
static void cpu_signal(int sig)
{
564 565
    if (current_cpu) {
        cpu_exit(current_cpu);
566 567 568 569
    }
    exit_request = 1;
}

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
#ifdef CONFIG_LINUX
static void sigbus_reraise(void)
{
    sigset_t set;
    struct sigaction action;

    memset(&action, 0, sizeof(action));
    action.sa_handler = SIG_DFL;
    if (!sigaction(SIGBUS, &action, NULL)) {
        raise(SIGBUS);
        sigemptyset(&set);
        sigaddset(&set, SIGBUS);
        sigprocmask(SIG_UNBLOCK, &set, NULL);
    }
    perror("Failed to re-raise SIGBUS!\n");
    abort();
}

static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
                           void *ctx)
{
    if (kvm_on_sigbus(siginfo->ssi_code,
                      (void *)(intptr_t)siginfo->ssi_addr)) {
        sigbus_reraise();
    }
}

static void qemu_init_sigbus(void)
{
    struct sigaction action;

    memset(&action, 0, sizeof(action));
    action.sa_flags = SA_SIGINFO;
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
    sigaction(SIGBUS, &action, NULL);

    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
}

609
static void qemu_kvm_eat_signals(CPUState *cpu)
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
{
    struct timespec ts = { 0, 0 };
    siginfo_t siginfo;
    sigset_t waitset;
    sigset_t chkset;
    int r;

    sigemptyset(&waitset);
    sigaddset(&waitset, SIG_IPI);
    sigaddset(&waitset, SIGBUS);

    do {
        r = sigtimedwait(&waitset, &siginfo, &ts);
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
            perror("sigtimedwait");
            exit(1);
        }

        switch (r) {
        case SIGBUS:
630
            if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
                sigbus_reraise();
            }
            break;
        default:
            break;
        }

        r = sigpending(&chkset);
        if (r == -1) {
            perror("sigpending");
            exit(1);
        }
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
}

646 647 648 649 650
#else /* !CONFIG_LINUX */

static void qemu_init_sigbus(void)
{
}
651

652
static void qemu_kvm_eat_signals(CPUState *cpu)
653 654
{
}
655 656
#endif /* !CONFIG_LINUX */

657
#ifndef _WIN32
658 659 660 661
static void dummy_signal(int sig)
{
}

662
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
663 664 665 666 667 668 669 670 671 672 673 674
{
    int r;
    sigset_t set;
    struct sigaction sigact;

    memset(&sigact, 0, sizeof(sigact));
    sigact.sa_handler = dummy_signal;
    sigaction(SIG_IPI, &sigact, NULL);

    pthread_sigmask(SIG_BLOCK, NULL, &set);
    sigdelset(&set, SIG_IPI);
    sigdelset(&set, SIGBUS);
675
    r = kvm_set_signal_mask(cpu, &set);
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
    if (r) {
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
        exit(1);
    }
}

static void qemu_tcg_init_cpu_signals(void)
{
    sigset_t set;
    struct sigaction sigact;

    memset(&sigact, 0, sizeof(sigact));
    sigact.sa_handler = cpu_signal;
    sigaction(SIG_IPI, &sigact, NULL);

    sigemptyset(&set);
    sigaddset(&set, SIG_IPI);
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}

696
#else /* _WIN32 */
697
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
698
{
699 700
    abort();
}
701

702 703
static void qemu_tcg_init_cpu_signals(void)
{
704
}
705
#endif /* _WIN32 */
706

707
static QemuMutex qemu_global_mutex;
708 709
static QemuCond qemu_io_proceeded_cond;
static bool iothread_requesting_mutex;
710 711 712 713 714 715 716 717 718 719

static QemuThread io_thread;

static QemuThread *tcg_cpu_thread;
static QemuCond *tcg_halt_cond;

/* cpu creation */
static QemuCond qemu_cpu_cond;
/* system init */
static QemuCond qemu_pause_cond;
M
Marcelo Tosatti 已提交
720
static QemuCond qemu_work_cond;
721

P
Paolo Bonzini 已提交
722
void qemu_init_cpu_loop(void)
723
{
724
    qemu_init_sigbus();
725 726 727
    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
728
    qemu_cond_init(&qemu_io_proceeded_cond);
729 730
    qemu_mutex_init(&qemu_global_mutex);

J
Jan Kiszka 已提交
731
    qemu_thread_get_self(&io_thread);
732 733
}

734
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
M
Marcelo Tosatti 已提交
735 736 737
{
    struct qemu_work_item wi;

738
    if (qemu_cpu_is_self(cpu)) {
M
Marcelo Tosatti 已提交
739 740 741 742 743 744
        func(data);
        return;
    }

    wi.func = func;
    wi.data = data;
C
Chegu Vinod 已提交
745
    wi.free = false;
746 747
    if (cpu->queued_work_first == NULL) {
        cpu->queued_work_first = &wi;
748
    } else {
749
        cpu->queued_work_last->next = &wi;
750
    }
751
    cpu->queued_work_last = &wi;
M
Marcelo Tosatti 已提交
752 753 754
    wi.next = NULL;
    wi.done = false;

755
    qemu_cpu_kick(cpu);
M
Marcelo Tosatti 已提交
756
    while (!wi.done) {
757
        CPUState *self_cpu = current_cpu;
M
Marcelo Tosatti 已提交
758 759

        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
760
        current_cpu = self_cpu;
M
Marcelo Tosatti 已提交
761 762 763
    }
}

C
Chegu Vinod 已提交
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
{
    struct qemu_work_item *wi;

    if (qemu_cpu_is_self(cpu)) {
        func(data);
        return;
    }

    wi = g_malloc0(sizeof(struct qemu_work_item));
    wi->func = func;
    wi->data = data;
    wi->free = true;
    if (cpu->queued_work_first == NULL) {
        cpu->queued_work_first = wi;
    } else {
        cpu->queued_work_last->next = wi;
    }
    cpu->queued_work_last = wi;
    wi->next = NULL;
    wi->done = false;

    qemu_cpu_kick(cpu);
}

789
static void flush_queued_work(CPUState *cpu)
M
Marcelo Tosatti 已提交
790 791 792
{
    struct qemu_work_item *wi;

793
    if (cpu->queued_work_first == NULL) {
M
Marcelo Tosatti 已提交
794
        return;
795
    }
M
Marcelo Tosatti 已提交
796

797 798
    while ((wi = cpu->queued_work_first)) {
        cpu->queued_work_first = wi->next;
M
Marcelo Tosatti 已提交
799 800
        wi->func(wi->data);
        wi->done = true;
C
Chegu Vinod 已提交
801 802 803
        if (wi->free) {
            g_free(wi);
        }
M
Marcelo Tosatti 已提交
804
    }
805
    cpu->queued_work_last = NULL;
M
Marcelo Tosatti 已提交
806 807 808
    qemu_cond_broadcast(&qemu_work_cond);
}

809
static void qemu_wait_io_event_common(CPUState *cpu)
810
{
A
Andreas Färber 已提交
811 812
    if (cpu->stop) {
        cpu->stop = false;
813
        cpu->stopped = true;
814 815
        qemu_cond_signal(&qemu_pause_cond);
    }
816
    flush_queued_work(cpu);
817
    cpu->thread_kicked = false;
818 819
}

820
static void qemu_tcg_wait_io_event(void)
821
{
822
    CPUState *cpu;
823

824
    while (all_cpu_threads_idle()) {
825 826
       /* Start accounting real time to the virtual clock if the CPUs
          are idle.  */
827
        qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
828
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
829
    }
830

831 832 833
    while (iothread_requesting_mutex) {
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
    }
834

A
Andreas Färber 已提交
835
    CPU_FOREACH(cpu) {
836
        qemu_wait_io_event_common(cpu);
837
    }
838 839
}

840
static void qemu_kvm_wait_io_event(CPUState *cpu)
841
{
842
    while (cpu_thread_is_idle(cpu)) {
A
Andreas Färber 已提交
843
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
844
    }
845

846
    qemu_kvm_eat_signals(cpu);
847
    qemu_wait_io_event_common(cpu);
848 849
}

850
static void *qemu_kvm_cpu_thread_fn(void *arg)
851
{
852
    CPUState *cpu = arg;
J
Jan Kiszka 已提交
853
    int r;
854

855
    qemu_mutex_lock(&qemu_global_mutex);
856
    qemu_thread_get_self(cpu->thread);
A
Andreas Färber 已提交
857
    cpu->thread_id = qemu_get_thread_id();
858
    current_cpu = cpu;
859

860
    r = kvm_init_vcpu(cpu);
J
Jan Kiszka 已提交
861 862 863 864
    if (r < 0) {
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
        exit(1);
    }
865

866
    qemu_kvm_init_cpu_signals(cpu);
867 868

    /* signal CPU creation */
869
    cpu->created = true;
870 871 872
    qemu_cond_signal(&qemu_cpu_cond);

    while (1) {
873
        if (cpu_can_run(cpu)) {
874
            r = kvm_cpu_exec(cpu);
875
            if (r == EXCP_DEBUG) {
876
                cpu_handle_guest_debug(cpu);
877
            }
878
        }
879
        qemu_kvm_wait_io_event(cpu);
880 881 882 883 884
    }

    return NULL;
}

A
Anthony Liguori 已提交
885 886 887 888 889 890
static void *qemu_dummy_cpu_thread_fn(void *arg)
{
#ifdef _WIN32
    fprintf(stderr, "qtest is not supported under Windows\n");
    exit(1);
#else
891
    CPUState *cpu = arg;
A
Anthony Liguori 已提交
892 893 894 895
    sigset_t waitset;
    int r;

    qemu_mutex_lock_iothread();
896
    qemu_thread_get_self(cpu->thread);
A
Andreas Färber 已提交
897
    cpu->thread_id = qemu_get_thread_id();
A
Anthony Liguori 已提交
898 899 900 901 902

    sigemptyset(&waitset);
    sigaddset(&waitset, SIG_IPI);

    /* signal CPU creation */
903
    cpu->created = true;
A
Anthony Liguori 已提交
904 905
    qemu_cond_signal(&qemu_cpu_cond);

906
    current_cpu = cpu;
A
Anthony Liguori 已提交
907
    while (1) {
908
        current_cpu = NULL;
A
Anthony Liguori 已提交
909 910 911 912 913 914 915 916 917 918
        qemu_mutex_unlock_iothread();
        do {
            int sig;
            r = sigwait(&waitset, &sig);
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
        if (r == -1) {
            perror("sigwait");
            exit(1);
        }
        qemu_mutex_lock_iothread();
919
        current_cpu = cpu;
920
        qemu_wait_io_event_common(cpu);
A
Anthony Liguori 已提交
921 922 923 924 925 926
    }

    return NULL;
#endif
}

J
Jan Kiszka 已提交
927 928
static void tcg_exec_all(void);

929
static void *qemu_tcg_cpu_thread_fn(void *arg)
930
{
931
    CPUState *cpu = arg;
932

933
    qemu_tcg_init_cpu_signals();
934
    qemu_thread_get_self(cpu->thread);
935 936

    qemu_mutex_lock(&qemu_global_mutex);
937 938 939 940
    CPU_FOREACH(cpu) {
        cpu->thread_id = qemu_get_thread_id();
        cpu->created = true;
    }
941 942
    qemu_cond_signal(&qemu_cpu_cond);

943
    /* wait for initial kick-off after machine start */
A
Andreas Färber 已提交
944
    while (QTAILQ_FIRST(&cpus)->stopped) {
945
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
946 947

        /* process any pending work */
A
Andreas Färber 已提交
948
        CPU_FOREACH(cpu) {
949
            qemu_wait_io_event_common(cpu);
950
        }
951
    }
952 953

    while (1) {
J
Jan Kiszka 已提交
954
        tcg_exec_all();
955 956

        if (use_icount) {
957
            int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
958 959

            if (deadline == 0) {
960
                qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
961
            }
962
        }
963
        qemu_tcg_wait_io_event();
964 965 966 967 968
    }

    return NULL;
}

969
static void qemu_cpu_kick_thread(CPUState *cpu)
P
Paolo Bonzini 已提交
970 971 972 973
{
#ifndef _WIN32
    int err;

974
    err = pthread_kill(cpu->thread->thread, SIG_IPI);
P
Paolo Bonzini 已提交
975 976 977 978 979
    if (err) {
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
        exit(1);
    }
#else /* _WIN32 */
980
    if (!qemu_cpu_is_self(cpu)) {
981 982 983
        CONTEXT tcgContext;

        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
984
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
985 986 987 988 989 990 991 992 993 994 995 996
                    GetLastError());
            exit(1);
        }

        /* On multi-core systems, we are not sure that the thread is actually
         * suspended until we can get the context.
         */
        tcgContext.ContextFlags = CONTEXT_CONTROL;
        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
            continue;
        }

P
Paolo Bonzini 已提交
997
        cpu_signal(0);
998 999

        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1000
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1001 1002 1003
                    GetLastError());
            exit(1);
        }
P
Paolo Bonzini 已提交
1004 1005 1006 1007
    }
#endif
}

1008
void qemu_cpu_kick(CPUState *cpu)
1009
{
A
Andreas Färber 已提交
1010
    qemu_cond_broadcast(cpu->halt_cond);
1011
    if (!tcg_enabled() && !cpu->thread_kicked) {
1012
        qemu_cpu_kick_thread(cpu);
1013
        cpu->thread_kicked = true;
1014
    }
1015 1016
}

1017
void qemu_cpu_kick_self(void)
1018
{
1019
#ifndef _WIN32
1020
    assert(current_cpu);
1021

1022 1023 1024
    if (!current_cpu->thread_kicked) {
        qemu_cpu_kick_thread(current_cpu);
        current_cpu->thread_kicked = true;
1025
    }
1026 1027 1028
#else
    abort();
#endif
1029 1030
}

1031
bool qemu_cpu_is_self(CPUState *cpu)
1032
{
1033
    return qemu_thread_is_self(cpu->thread);
1034 1035
}

J
Juan Quintela 已提交
1036 1037
static bool qemu_in_vcpu_thread(void)
{
1038
    return current_cpu && qemu_cpu_is_self(current_cpu);
J
Juan Quintela 已提交
1039 1040
}

1041 1042
void qemu_mutex_lock_iothread(void)
{
A
Anthony Liguori 已提交
1043
    if (!tcg_enabled()) {
1044
        qemu_mutex_lock(&qemu_global_mutex);
1045
    } else {
1046
        iothread_requesting_mutex = true;
1047
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
1048
            qemu_cpu_kick_thread(first_cpu);
1049 1050
            qemu_mutex_lock(&qemu_global_mutex);
        }
1051 1052
        iothread_requesting_mutex = false;
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
1053
    }
1054 1055 1056 1057 1058 1059 1060 1061 1062
}

void qemu_mutex_unlock_iothread(void)
{
    qemu_mutex_unlock(&qemu_global_mutex);
}

static int all_vcpus_paused(void)
{
A
Andreas Färber 已提交
1063
    CPUState *cpu;
1064

A
Andreas Färber 已提交
1065
    CPU_FOREACH(cpu) {
1066
        if (!cpu->stopped) {
1067
            return 0;
1068
        }
1069 1070 1071 1072 1073 1074 1075
    }

    return 1;
}

void pause_all_vcpus(void)
{
A
Andreas Färber 已提交
1076
    CPUState *cpu;
1077

1078
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
A
Andreas Färber 已提交
1079
    CPU_FOREACH(cpu) {
1080 1081
        cpu->stop = true;
        qemu_cpu_kick(cpu);
1082 1083
    }

J
Juan Quintela 已提交
1084
    if (qemu_in_vcpu_thread()) {
1085 1086
        cpu_stop_current();
        if (!kvm_enabled()) {
A
Andreas Färber 已提交
1087
            CPU_FOREACH(cpu) {
1088 1089
                cpu->stop = false;
                cpu->stopped = true;
1090 1091 1092 1093 1094
            }
            return;
        }
    }

1095
    while (!all_vcpus_paused()) {
1096
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
A
Andreas Färber 已提交
1097
        CPU_FOREACH(cpu) {
1098
            qemu_cpu_kick(cpu);
1099 1100 1101 1102
        }
    }
}

1103 1104 1105 1106 1107 1108 1109
void cpu_resume(CPUState *cpu)
{
    cpu->stop = false;
    cpu->stopped = false;
    qemu_cpu_kick(cpu);
}

1110 1111
void resume_all_vcpus(void)
{
A
Andreas Färber 已提交
1112
    CPUState *cpu;
1113

1114
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
A
Andreas Färber 已提交
1115
    CPU_FOREACH(cpu) {
1116
        cpu_resume(cpu);
1117 1118 1119
    }
}

1120
static void qemu_tcg_init_vcpu(CPUState *cpu)
1121 1122 1123
{
    /* share a single thread for all cpus with TCG */
    if (!tcg_cpu_thread) {
1124
        cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1125 1126 1127
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
        qemu_cond_init(cpu->halt_cond);
        tcg_halt_cond = cpu->halt_cond;
1128
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
P
Paolo Bonzini 已提交
1129 1130
                           QEMU_THREAD_JOINABLE);
#ifdef _WIN32
1131
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
P
Paolo Bonzini 已提交
1132
#endif
1133
        while (!cpu->created) {
1134
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1135
        }
1136
        tcg_cpu_thread = cpu->thread;
1137
    } else {
1138
        cpu->thread = tcg_cpu_thread;
A
Andreas Färber 已提交
1139
        cpu->halt_cond = tcg_halt_cond;
1140 1141 1142
    }
}

1143
static void qemu_kvm_start_vcpu(CPUState *cpu)
1144
{
1145
    cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1146 1147
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(cpu->halt_cond);
1148
    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
P
Paolo Bonzini 已提交
1149
                       QEMU_THREAD_JOINABLE);
1150
    while (!cpu->created) {
1151
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1152
    }
1153 1154
}

1155
static void qemu_dummy_start_vcpu(CPUState *cpu)
A
Anthony Liguori 已提交
1156
{
1157
    cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1158 1159
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(cpu->halt_cond);
1160
    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
A
Anthony Liguori 已提交
1161
                       QEMU_THREAD_JOINABLE);
1162
    while (!cpu->created) {
A
Anthony Liguori 已提交
1163 1164 1165 1166
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    }
}

1167
void qemu_init_vcpu(CPUState *cpu)
1168
{
1169 1170
    cpu->nr_cores = smp_cores;
    cpu->nr_threads = smp_threads;
1171
    cpu->stopped = true;
1172
    if (kvm_enabled()) {
1173
        qemu_kvm_start_vcpu(cpu);
A
Anthony Liguori 已提交
1174
    } else if (tcg_enabled()) {
1175
        qemu_tcg_init_vcpu(cpu);
A
Anthony Liguori 已提交
1176
    } else {
1177
        qemu_dummy_start_vcpu(cpu);
1178
    }
1179 1180
}

1181
void cpu_stop_current(void)
1182
{
1183 1184 1185 1186
    if (current_cpu) {
        current_cpu->stop = false;
        current_cpu->stopped = true;
        cpu_exit(current_cpu);
1187
        qemu_cond_signal(&qemu_pause_cond);
1188
    }
1189 1190
}

1191
int vm_stop(RunState state)
1192
{
J
Juan Quintela 已提交
1193
    if (qemu_in_vcpu_thread()) {
1194
        qemu_system_vmstop_request(state);
1195 1196 1197 1198
        /*
         * FIXME: should not return to device code in case
         * vm_stop() has been requested.
         */
1199
        cpu_stop_current();
1200
        return 0;
1201
    }
1202 1203

    return do_vm_stop(state);
1204 1205
}

1206 1207
/* does a state transition even if the VM is already stopped,
   current state is forgotten forever */
1208
int vm_stop_force_state(RunState state)
1209 1210
{
    if (runstate_is_running()) {
1211
        return vm_stop(state);
1212 1213
    } else {
        runstate_set(state);
1214 1215 1216
        /* Make sure to return an error if the flush in a previous vm_stop()
         * failed. */
        return bdrv_flush_all();
1217 1218 1219
    }
}

1220
static int tcg_cpu_exec(CPUArchState *env)
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
{
    int ret;
#ifdef CONFIG_PROFILER
    int64_t ti;
#endif

#ifdef CONFIG_PROFILER
    ti = profile_getclock();
#endif
    if (use_icount) {
        int64_t count;
1232
        int64_t deadline;
1233 1234 1235 1236
        int decr;
        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
        env->icount_decr.u16.low = 0;
        env->icount_extra = 0;
1237
        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1238 1239

        /* Maintain prior (possibly buggy) behaviour where if no deadline
1240
         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1241 1242 1243 1244 1245 1246 1247 1248
         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
         * nanoseconds.
         */
        if ((deadline < 0) || (deadline > INT32_MAX)) {
            deadline = INT32_MAX;
        }

        count = qemu_icount_round(deadline);
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
        qemu_icount += count;
        decr = (count > 0xffff) ? 0xffff : count;
        count -= decr;
        env->icount_decr.u16.low = decr;
        env->icount_extra = count;
    }
    ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
    qemu_time += profile_getclock() - ti;
#endif
    if (use_icount) {
        /* Fold pending instructions back into the
           instruction counter, and clear the interrupt flag.  */
        qemu_icount -= (env->icount_decr.u16.low
                        + env->icount_extra);
        env->icount_decr.u32 = 0;
        env->icount_extra = 0;
    }
    return ret;
}

J
Jan Kiszka 已提交
1270
static void tcg_exec_all(void)
1271
{
1272 1273
    int r;

1274 1275
    /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
    qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1276

1277
    if (next_cpu == NULL) {
1278
        next_cpu = first_cpu;
1279
    }
A
Andreas Färber 已提交
1280
    for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1281 1282
        CPUState *cpu = next_cpu;
        CPUArchState *env = cpu->env_ptr;
1283

1284
        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1285
                          (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1286

1287
        if (cpu_can_run(cpu)) {
J
Jan Kiszka 已提交
1288
            r = tcg_cpu_exec(env);
1289
            if (r == EXCP_DEBUG) {
1290
                cpu_handle_guest_debug(cpu);
1291 1292
                break;
            }
1293
        } else if (cpu->stop || cpu->stopped) {
1294 1295 1296
            break;
        }
    }
J
Jan Kiszka 已提交
1297
    exit_request = 0;
1298 1299 1300 1301
}

void set_numa_modes(void)
{
1302
    CPUState *cpu;
1303 1304
    int i;

A
Andreas Färber 已提交
1305
    CPU_FOREACH(cpu) {
1306
        for (i = 0; i < nb_numa_nodes; i++) {
1307
            if (test_bit(cpu->cpu_index, node_cpumask[i])) {
1308
                cpu->numa_node = i;
1309 1310 1311 1312 1313
            }
        }
    }
}

1314
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1315 1316
{
    /* XXX: implement xxx_cpu_list for targets that still miss it */
P
Peter Maydell 已提交
1317 1318
#if defined(cpu_list)
    cpu_list(f, cpu_fprintf);
1319 1320
#endif
}
L
Luiz Capitulino 已提交
1321 1322 1323 1324

CpuInfoList *qmp_query_cpus(Error **errp)
{
    CpuInfoList *head = NULL, *cur_item = NULL;
1325
    CPUState *cpu;
L
Luiz Capitulino 已提交
1326

A
Andreas Färber 已提交
1327
    CPU_FOREACH(cpu) {
L
Luiz Capitulino 已提交
1328
        CpuInfoList *info;
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
#if defined(TARGET_I386)
        X86CPU *x86_cpu = X86_CPU(cpu);
        CPUX86State *env = &x86_cpu->env;
#elif defined(TARGET_PPC)
        PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
        CPUPPCState *env = &ppc_cpu->env;
#elif defined(TARGET_SPARC)
        SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
        CPUSPARCState *env = &sparc_cpu->env;
#elif defined(TARGET_MIPS)
        MIPSCPU *mips_cpu = MIPS_CPU(cpu);
        CPUMIPSState *env = &mips_cpu->env;
#endif
L
Luiz Capitulino 已提交
1342

1343
        cpu_synchronize_state(cpu);
L
Luiz Capitulino 已提交
1344 1345 1346

        info = g_malloc0(sizeof(*info));
        info->value = g_malloc0(sizeof(*info->value));
1347
        info->value->CPU = cpu->cpu_index;
1348
        info->value->current = (cpu == first_cpu);
1349
        info->value->halted = cpu->halted;
A
Andreas Färber 已提交
1350
        info->value->thread_id = cpu->thread_id;
L
Luiz Capitulino 已提交
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
#if defined(TARGET_I386)
        info->value->has_pc = true;
        info->value->pc = env->eip + env->segs[R_CS].base;
#elif defined(TARGET_PPC)
        info->value->has_nip = true;
        info->value->nip = env->nip;
#elif defined(TARGET_SPARC)
        info->value->has_pc = true;
        info->value->pc = env->pc;
        info->value->has_npc = true;
        info->value->npc = env->npc;
#elif defined(TARGET_MIPS)
        info->value->has_PC = true;
        info->value->PC = env->active_tc.PC;
#endif

        /* XXX: waiting for the qapi to support GSList */
        if (!cur_item) {
            head = cur_item = info;
        } else {
            cur_item->next = info;
            cur_item = info;
        }
    }

    return head;
}
L
Luiz Capitulino 已提交
1378 1379 1380 1381 1382 1383

void qmp_memsave(int64_t addr, int64_t size, const char *filename,
                 bool has_cpu, int64_t cpu_index, Error **errp)
{
    FILE *f;
    uint32_t l;
1384
    CPUState *cpu;
L
Luiz Capitulino 已提交
1385 1386 1387 1388 1389 1390
    uint8_t buf[1024];

    if (!has_cpu) {
        cpu_index = 0;
    }

1391 1392
    cpu = qemu_get_cpu(cpu_index);
    if (cpu == NULL) {
L
Luiz Capitulino 已提交
1393 1394 1395 1396 1397 1398 1399
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
                  "a CPU number");
        return;
    }

    f = fopen(filename, "wb");
    if (!f) {
1400
        error_setg_file_open(errp, errno, filename);
L
Luiz Capitulino 已提交
1401 1402 1403 1404 1405 1406 1407
        return;
    }

    while (size != 0) {
        l = sizeof(buf);
        if (l > size)
            l = size;
1408 1409 1410 1411
        if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
            error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
            goto exit;
        }
L
Luiz Capitulino 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
        if (fwrite(buf, 1, l, f) != l) {
            error_set(errp, QERR_IO_ERROR);
            goto exit;
        }
        addr += l;
        size -= l;
    }

exit:
    fclose(f);
}
L
Luiz Capitulino 已提交
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432

void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
                  Error **errp)
{
    FILE *f;
    uint32_t l;
    uint8_t buf[1024];

    f = fopen(filename, "wb");
    if (!f) {
1433
        error_setg_file_open(errp, errno, filename);
L
Luiz Capitulino 已提交
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
        return;
    }

    while (size != 0) {
        l = sizeof(buf);
        if (l > size)
            l = size;
        cpu_physical_memory_rw(addr, buf, l, 0);
        if (fwrite(buf, 1, l, f) != l) {
            error_set(errp, QERR_IO_ERROR);
            goto exit;
        }
        addr += l;
        size -= l;
    }

exit:
    fclose(f);
}
L
Luiz Capitulino 已提交
1453 1454 1455 1456

void qmp_inject_nmi(Error **errp)
{
#if defined(TARGET_I386)
1457 1458
    CPUState *cs;

A
Andreas Färber 已提交
1459
    CPU_FOREACH(cs) {
1460
        X86CPU *cpu = X86_CPU(cs);
L
Luiz Capitulino 已提交
1461

1462
        if (!cpu->apic_state) {
1463
            cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1464
        } else {
1465
            apic_deliver_nmi(cpu->apic_state);
1466
        }
L
Luiz Capitulino 已提交
1467
    }
1468 1469 1470 1471
#elif defined(TARGET_S390X)
    CPUState *cs;
    S390CPU *cpu;

A
Andreas Färber 已提交
1472
    CPU_FOREACH(cs) {
1473 1474 1475 1476 1477 1478 1479 1480 1481
        cpu = S390_CPU(cs);
        if (cpu->env.cpu_num == monitor_get_cpu_index()) {
            if (s390_cpu_restart(S390_CPU(cs)) == -1) {
                error_set(errp, QERR_UNSUPPORTED);
                return;
            }
            break;
        }
    }
L
Luiz Capitulino 已提交
1482 1483 1484 1485
#else
    error_set(errp, QERR_UNSUPPORTED);
#endif
}