cpus.c 39.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * QEMU System Emulator
 *
 * Copyright (c) 2003-2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

/* Needed early for CONFIG_BSD etc. */
#include "config-host.h"

28
#include "monitor/monitor.h"
W
Wenchao Xia 已提交
29
#include "qapi/qmp/qerror.h"
30
#include "sysemu/sysemu.h"
31
#include "exec/gdbstub.h"
32 33
#include "sysemu/dma.h"
#include "sysemu/kvm.h"
L
Luiz Capitulino 已提交
34
#include "qmp-commands.h"
35

36
#include "qemu/thread.h"
37 38
#include "sysemu/cpus.h"
#include "sysemu/qtest.h"
39 40
#include "qemu/main-loop.h"
#include "qemu/bitmap.h"
41
#include "qemu/seqlock.h"
W
Wenchao Xia 已提交
42
#include "qapi-event.h"
43
#include "hw/nmi.h"
J
Jan Kiszka 已提交
44 45

#ifndef _WIN32
46
#include "qemu/compatfd.h"
J
Jan Kiszka 已提交
47
#endif
48

49 50 51 52
#ifdef CONFIG_LINUX

#include <sys/prctl.h>

M
Marcelo Tosatti 已提交
53 54 55 56
#ifndef PR_MCE_KILL
#define PR_MCE_KILL 33
#endif

57 58 59 60 61 62 63 64 65 66
#ifndef PR_MCE_KILL_SET
#define PR_MCE_KILL_SET 1
#endif

#ifndef PR_MCE_KILL_EARLY
#define PR_MCE_KILL_EARLY 1
#endif

#endif /* CONFIG_LINUX */

67
static CPUState *next_cpu;
68 69
int64_t max_delay;
int64_t max_advance;
70

71 72 73 74 75
bool cpu_is_stopped(CPUState *cpu)
{
    return cpu->stopped || !runstate_is_running();
}

76
static bool cpu_thread_is_idle(CPUState *cpu)
77
{
78
    if (cpu->stop || cpu->queued_work_first) {
79 80
        return false;
    }
81
    if (cpu_is_stopped(cpu)) {
82 83
        return true;
    }
84
    if (!cpu->halted || cpu_has_work(cpu) ||
85
        kvm_halt_in_kernel()) {
86 87 88 89 90 91 92
        return false;
    }
    return true;
}

static bool all_cpu_threads_idle(void)
{
93
    CPUState *cpu;
94

A
Andreas Färber 已提交
95
    CPU_FOREACH(cpu) {
96
        if (!cpu_thread_is_idle(cpu)) {
97 98 99 100 101 102
            return false;
        }
    }
    return true;
}

P
Paolo Bonzini 已提交
103 104 105
/***********************************************************/
/* guest cycle counter */

106 107
/* Protected by TimersState seqlock */

108
static int64_t vm_clock_warp_start = -1;
P
Paolo Bonzini 已提交
109 110 111 112
/* Conversion factor from emulated instructions to virtual clock ticks.  */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
#define MAX_ICOUNT_SHIFT 10
113

P
Paolo Bonzini 已提交
114 115 116 117 118
static QEMUTimer *icount_rt_timer;
static QEMUTimer *icount_vm_timer;
static QEMUTimer *icount_warp_timer;

typedef struct TimersState {
119
    /* Protected by BQL.  */
P
Paolo Bonzini 已提交
120 121
    int64_t cpu_ticks_prev;
    int64_t cpu_ticks_offset;
122 123 124 125 126

    /* cpu_clock_offset can be read out of BQL, so protect it with
     * this lock.
     */
    QemuSeqLock vm_clock_seqlock;
P
Paolo Bonzini 已提交
127 128 129
    int64_t cpu_clock_offset;
    int32_t cpu_ticks_enabled;
    int64_t dummy;
130 131 132 133 134

    /* Compensate for varying guest execution speed.  */
    int64_t qemu_icount_bias;
    /* Only written by TCG thread */
    int64_t qemu_icount;
P
Paolo Bonzini 已提交
135 136
} TimersState;

L
Liu Ping Fan 已提交
137
static TimersState timers_state;
P
Paolo Bonzini 已提交
138 139

/* Return the virtual CPU time, based on the instruction counter.  */
P
Paolo Bonzini 已提交
140
static int64_t cpu_get_icount_locked(void)
P
Paolo Bonzini 已提交
141 142
{
    int64_t icount;
143
    CPUState *cpu = current_cpu;
P
Paolo Bonzini 已提交
144

145
    icount = timers_state.qemu_icount;
146
    if (cpu) {
147
        if (!cpu_can_do_io(cpu)) {
P
Paolo Bonzini 已提交
148 149
            fprintf(stderr, "Bad clock read\n");
        }
150
        icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
P
Paolo Bonzini 已提交
151
    }
152
    return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
P
Paolo Bonzini 已提交
153 154
}

P
Paolo Bonzini 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167
int64_t cpu_get_icount(void)
{
    int64_t icount;
    unsigned start;

    do {
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
        icount = cpu_get_icount_locked();
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));

    return icount;
}

168 169 170 171 172
int64_t cpu_icount_to_ns(int64_t icount)
{
    return icount << icount_time_shift;
}

P
Paolo Bonzini 已提交
173
/* return the host CPU cycle counter and handle stop/restart */
174
/* Caller must hold the BQL */
P
Paolo Bonzini 已提交
175 176
int64_t cpu_get_ticks(void)
{
P
Paolo Bonzini 已提交
177 178
    int64_t ticks;

P
Paolo Bonzini 已提交
179 180 181
    if (use_icount) {
        return cpu_get_icount();
    }
P
Paolo Bonzini 已提交
182 183 184 185 186 187 188 189 190 191 192

    ticks = timers_state.cpu_ticks_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += cpu_get_real_ticks();
    }

    if (timers_state.cpu_ticks_prev > ticks) {
        /* Note: non increasing ticks may happen if the host uses
           software suspend */
        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
        ticks = timers_state.cpu_ticks_prev;
P
Paolo Bonzini 已提交
193
    }
P
Paolo Bonzini 已提交
194 195 196

    timers_state.cpu_ticks_prev = ticks;
    return ticks;
P
Paolo Bonzini 已提交
197 198
}

199
static int64_t cpu_get_clock_locked(void)
P
Paolo Bonzini 已提交
200
{
P
Paolo Bonzini 已提交
201
    int64_t ticks;
202

P
Paolo Bonzini 已提交
203 204 205
    ticks = timers_state.cpu_clock_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += get_clock();
P
Paolo Bonzini 已提交
206
    }
207

P
Paolo Bonzini 已提交
208
    return ticks;
209 210 211 212 213 214 215 216 217 218 219 220 221 222
}

/* return the host CPU monotonic timer and handle stop/restart */
int64_t cpu_get_clock(void)
{
    int64_t ti;
    unsigned start;

    do {
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
        ti = cpu_get_clock_locked();
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));

    return ti;
P
Paolo Bonzini 已提交
223 224
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
/* return the offset between the host clock and virtual CPU clock */
int64_t cpu_get_clock_offset(void)
{
    int64_t ti;
    unsigned start;

    do {
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
        ti = timers_state.cpu_clock_offset;
        if (!timers_state.cpu_ticks_enabled) {
            ti -= get_clock();
        }
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));

    return -ti;
}

242 243 244
/* enable cpu_get_ticks()
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
 */
P
Paolo Bonzini 已提交
245 246
void cpu_enable_ticks(void)
{
247 248
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
249 250 251 252 253
    if (!timers_state.cpu_ticks_enabled) {
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
        timers_state.cpu_clock_offset -= get_clock();
        timers_state.cpu_ticks_enabled = 1;
    }
254
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
255 256 257
}

/* disable cpu_get_ticks() : the clock is stopped. You must not call
258 259 260
 * cpu_get_ticks() after that.
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
 */
P
Paolo Bonzini 已提交
261 262
void cpu_disable_ticks(void)
{
263 264
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
265
    if (timers_state.cpu_ticks_enabled) {
P
Paolo Bonzini 已提交
266
        timers_state.cpu_ticks_offset += cpu_get_real_ticks();
267
        timers_state.cpu_clock_offset = cpu_get_clock_locked();
P
Paolo Bonzini 已提交
268 269
        timers_state.cpu_ticks_enabled = 0;
    }
270
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283
}

/* Correlation between real and virtual time is always going to be
   fairly approximate, so ignore small variation.
   When the guest is idle real and virtual time will be aligned in
   the IO wait loop.  */
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)

static void icount_adjust(void)
{
    int64_t cur_time;
    int64_t cur_icount;
    int64_t delta;
284 285

    /* Protected by TimersState mutex.  */
P
Paolo Bonzini 已提交
286
    static int64_t last_delta;
287

P
Paolo Bonzini 已提交
288 289 290 291
    /* If the VM is not running, then do nothing.  */
    if (!runstate_is_running()) {
        return;
    }
292

P
Paolo Bonzini 已提交
293 294 295
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
    cur_time = cpu_get_clock_locked();
    cur_icount = cpu_get_icount_locked();
296

P
Paolo Bonzini 已提交
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
    delta = cur_icount - cur_time;
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
    if (delta > 0
        && last_delta + ICOUNT_WOBBLE < delta * 2
        && icount_time_shift > 0) {
        /* The guest is getting too far ahead.  Slow time down.  */
        icount_time_shift--;
    }
    if (delta < 0
        && last_delta - ICOUNT_WOBBLE > delta * 2
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
        /* The guest is getting too far behind.  Speed time up.  */
        icount_time_shift++;
    }
    last_delta = delta;
312 313
    timers_state.qemu_icount_bias = cur_icount
                              - (timers_state.qemu_icount << icount_time_shift);
P
Paolo Bonzini 已提交
314
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
315 316 317 318
}

static void icount_adjust_rt(void *opaque)
{
319 320
    timer_mod(icount_rt_timer,
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
P
Paolo Bonzini 已提交
321 322 323 324 325
    icount_adjust();
}

static void icount_adjust_vm(void *opaque)
{
326 327 328
    timer_mod(icount_vm_timer,
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
                   get_ticks_per_sec() / 10);
P
Paolo Bonzini 已提交
329 330 331 332 333 334 335 336 337 338
    icount_adjust();
}

static int64_t qemu_icount_round(int64_t count)
{
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
}

static void icount_warp_rt(void *opaque)
{
P
Paolo Bonzini 已提交
339 340 341 342
    /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
     * changes from -1 to another value, so the race here is okay.
     */
    if (atomic_read(&vm_clock_warp_start) == -1) {
P
Paolo Bonzini 已提交
343 344 345
        return;
    }

P
Paolo Bonzini 已提交
346
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
347
    if (runstate_is_running()) {
348
        int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
P
Paolo Bonzini 已提交
349 350 351 352
        int64_t warp_delta;

        warp_delta = clock - vm_clock_warp_start;
        if (use_icount == 2) {
P
Paolo Bonzini 已提交
353
            /*
354
             * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
P
Paolo Bonzini 已提交
355 356
             * far ahead of real time.
             */
P
Paolo Bonzini 已提交
357 358
            int64_t cur_time = cpu_get_clock_locked();
            int64_t cur_icount = cpu_get_icount_locked();
P
Paolo Bonzini 已提交
359
            int64_t delta = cur_time - cur_icount;
P
Paolo Bonzini 已提交
360
            warp_delta = MIN(warp_delta, delta);
P
Paolo Bonzini 已提交
361
        }
362
        timers_state.qemu_icount_bias += warp_delta;
P
Paolo Bonzini 已提交
363 364
    }
    vm_clock_warp_start = -1;
P
Paolo Bonzini 已提交
365
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
366 367 368 369

    if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
    }
P
Paolo Bonzini 已提交
370 371
}

P
Paolo Bonzini 已提交
372 373
void qtest_clock_warp(int64_t dest)
{
374
    int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
375 376
    assert(qtest_enabled());
    while (clock < dest) {
377
        int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
378
        int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
P
Paolo Bonzini 已提交
379
        seqlock_write_lock(&timers_state.vm_clock_seqlock);
380
        timers_state.qemu_icount_bias += warp;
P
Paolo Bonzini 已提交
381 382
        seqlock_write_unlock(&timers_state.vm_clock_seqlock);

383 384
        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
        clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
385
    }
386
    qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
387 388
}

389
void qemu_clock_warp(QEMUClockType type)
P
Paolo Bonzini 已提交
390
{
391
    int64_t clock;
P
Paolo Bonzini 已提交
392 393 394 395 396 397 398
    int64_t deadline;

    /*
     * There are too many global variables to make the "warp" behavior
     * applicable to other clocks.  But a clock argument removes the
     * need for if statements all over the place.
     */
399
    if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
P
Paolo Bonzini 已提交
400 401 402 403
        return;
    }

    /*
404 405
     * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
     * This ensures that the deadline for the timer is computed correctly below.
P
Paolo Bonzini 已提交
406 407
     * This also makes sure that the insn counter is synchronized before the
     * CPU starts running, in case the CPU is woken by an event other than
408
     * the earliest QEMU_CLOCK_VIRTUAL timer.
P
Paolo Bonzini 已提交
409 410
     */
    icount_warp_rt(NULL);
411 412
    timer_del(icount_warp_timer);
    if (!all_cpu_threads_idle()) {
P
Paolo Bonzini 已提交
413 414 415
        return;
    }

P
Paolo Bonzini 已提交
416 417 418 419 420
    if (qtest_enabled()) {
        /* When testing, qtest commands advance icount.  */
	return;
    }

421
    /* We want to use the earliest deadline from ALL vm_clocks */
422
    clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
423
    deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
424 425
    if (deadline < 0) {
        return;
426 427
    }

P
Paolo Bonzini 已提交
428 429
    if (deadline > 0) {
        /*
430
         * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
P
Paolo Bonzini 已提交
431 432 433
         * sleep.  Otherwise, the CPU might be waiting for a future timer
         * interrupt to wake it up, but the interrupt never comes because
         * the vCPU isn't running any insns and thus doesn't advance the
434
         * QEMU_CLOCK_VIRTUAL.
P
Paolo Bonzini 已提交
435 436
         *
         * An extreme solution for this problem would be to never let VCPUs
437 438 439 440 441 442 443 444
         * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
         * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
         * event.  Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
         * after some e"real" time, (related to the time left until the next
         * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
         * This avoids that the warps are visible externally; for example,
         * you will not be sending network packets continuously instead of
         * every 100ms.
P
Paolo Bonzini 已提交
445
         */
P
Paolo Bonzini 已提交
446
        seqlock_write_lock(&timers_state.vm_clock_seqlock);
447 448 449
        if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
            vm_clock_warp_start = clock;
        }
P
Paolo Bonzini 已提交
450
        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
451
        timer_mod_anticipate(icount_warp_timer, clock + deadline);
452
    } else if (deadline == 0) {
453
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
454 455 456
    }
}

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
static bool icount_state_needed(void *opaque)
{
    return use_icount;
}

/*
 * This is a subsection for icount migration.
 */
static const VMStateDescription icount_vmstate_timers = {
    .name = "timer/icount",
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_INT64(qemu_icount_bias, TimersState),
        VMSTATE_INT64(qemu_icount, TimersState),
        VMSTATE_END_OF_LIST()
    }
};

P
Paolo Bonzini 已提交
476 477 478 479
static const VMStateDescription vmstate_timers = {
    .name = "timer",
    .version_id = 2,
    .minimum_version_id = 1,
480
    .fields = (VMStateField[]) {
P
Paolo Bonzini 已提交
481 482 483 484
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
        VMSTATE_INT64(dummy, TimersState),
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
        VMSTATE_END_OF_LIST()
485 486 487 488 489 490 491 492
    },
    .subsections = (VMStateSubsection[]) {
        {
            .vmsd = &icount_vmstate_timers,
            .needed = icount_state_needed,
        }, {
            /* empty */
        }
P
Paolo Bonzini 已提交
493 494 495
    }
};

496 497 498 499 500 501
void cpu_ticks_init(void)
{
    seqlock_init(&timers_state.vm_clock_seqlock, NULL);
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
}

502
void configure_icount(QemuOpts *opts, Error **errp)
P
Paolo Bonzini 已提交
503
{
504
    const char *option;
505
    char *rem_str = NULL;
506 507

    option = qemu_opt_get(opts, "shift");
P
Paolo Bonzini 已提交
508
    if (!option) {
509 510 511
        if (qemu_opt_get(opts, "align") != NULL) {
            error_setg(errp, "Please specify shift option when using align");
        }
P
Paolo Bonzini 已提交
512 513
        return;
    }
514
    icount_align_option = qemu_opt_get_bool(opts, "align", false);
515 516
    icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
                                          icount_warp_rt, NULL);
P
Paolo Bonzini 已提交
517
    if (strcmp(option, "auto") != 0) {
518 519 520 521 522
        errno = 0;
        icount_time_shift = strtol(option, &rem_str, 0);
        if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
            error_setg(errp, "icount: Invalid shift value");
        }
P
Paolo Bonzini 已提交
523 524
        use_icount = 1;
        return;
525 526
    } else if (icount_align_option) {
        error_setg(errp, "shift=auto and align=on are incompatible");
P
Paolo Bonzini 已提交
527 528 529 530 531 532 533 534 535 536 537 538 539
    }

    use_icount = 2;

    /* 125MIPS seems a reasonable initial guess at the guest speed.
       It will be corrected fairly quickly anyway.  */
    icount_time_shift = 3;

    /* Have both realtime and virtual time triggers for speed adjustment.
       The realtime trigger catches emulated time passing too slowly,
       the virtual time trigger catches emulated time passing too fast.
       Realtime triggers occur even when idle, so use them less frequently
       than VM triggers.  */
540 541 542 543 544 545 546 547 548
    icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
                                        icount_adjust_rt, NULL);
    timer_mod(icount_rt_timer,
                   qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
    icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                        icount_adjust_vm, NULL);
    timer_mod(icount_vm_timer,
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
                   get_ticks_per_sec() / 10);
P
Paolo Bonzini 已提交
549 550
}

551 552 553 554
/***********************************************************/
void hw_error(const char *fmt, ...)
{
    va_list ap;
555
    CPUState *cpu;
556 557 558 559 560

    va_start(ap, fmt);
    fprintf(stderr, "qemu: hardware error: ");
    vfprintf(stderr, fmt, ap);
    fprintf(stderr, "\n");
A
Andreas Färber 已提交
561
    CPU_FOREACH(cpu) {
562
        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
563
        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
564 565 566 567 568 569 570
    }
    va_end(ap);
    abort();
}

void cpu_synchronize_all_states(void)
{
571
    CPUState *cpu;
572

A
Andreas Färber 已提交
573
    CPU_FOREACH(cpu) {
574
        cpu_synchronize_state(cpu);
575 576 577 578 579
    }
}

void cpu_synchronize_all_post_reset(void)
{
580
    CPUState *cpu;
581

A
Andreas Färber 已提交
582
    CPU_FOREACH(cpu) {
583
        cpu_synchronize_post_reset(cpu);
584 585 586 587 588
    }
}

void cpu_synchronize_all_post_init(void)
{
589
    CPUState *cpu;
590

A
Andreas Färber 已提交
591
    CPU_FOREACH(cpu) {
592
        cpu_synchronize_post_init(cpu);
593 594 595
    }
}

M
Marcelo Tosatti 已提交
596 597 598 599 600 601 602 603 604
void cpu_clean_all_dirty(void)
{
    CPUState *cpu;

    CPU_FOREACH(cpu) {
        cpu_clean_state(cpu);
    }
}

605
static int do_vm_stop(RunState state)
606
{
607 608
    int ret = 0;

609
    if (runstate_is_running()) {
610 611
        cpu_disable_ticks();
        pause_all_vcpus();
612
        runstate_set(state);
613
        vm_state_notify(0, state);
W
Wenchao Xia 已提交
614
        qapi_event_send_stop(&error_abort);
615
    }
616

617 618 619
    bdrv_drain_all();
    ret = bdrv_flush_all();

620
    return ret;
621 622
}

623
static bool cpu_can_run(CPUState *cpu)
624
{
A
Andreas Färber 已提交
625
    if (cpu->stop) {
626
        return false;
627
    }
628
    if (cpu_is_stopped(cpu)) {
629
        return false;
630
    }
631
    return true;
632 633
}

634
static void cpu_handle_guest_debug(CPUState *cpu)
635
{
636
    gdb_set_stop_cpu(cpu);
637
    qemu_system_debug_request();
638
    cpu->stopped = true;
639 640
}

641 642
static void cpu_signal(int sig)
{
643 644
    if (current_cpu) {
        cpu_exit(current_cpu);
645 646 647 648
    }
    exit_request = 1;
}

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
#ifdef CONFIG_LINUX
static void sigbus_reraise(void)
{
    sigset_t set;
    struct sigaction action;

    memset(&action, 0, sizeof(action));
    action.sa_handler = SIG_DFL;
    if (!sigaction(SIGBUS, &action, NULL)) {
        raise(SIGBUS);
        sigemptyset(&set);
        sigaddset(&set, SIGBUS);
        sigprocmask(SIG_UNBLOCK, &set, NULL);
    }
    perror("Failed to re-raise SIGBUS!\n");
    abort();
}

static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
                           void *ctx)
{
    if (kvm_on_sigbus(siginfo->ssi_code,
                      (void *)(intptr_t)siginfo->ssi_addr)) {
        sigbus_reraise();
    }
}

static void qemu_init_sigbus(void)
{
    struct sigaction action;

    memset(&action, 0, sizeof(action));
    action.sa_flags = SA_SIGINFO;
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
    sigaction(SIGBUS, &action, NULL);

    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
}

688
static void qemu_kvm_eat_signals(CPUState *cpu)
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
{
    struct timespec ts = { 0, 0 };
    siginfo_t siginfo;
    sigset_t waitset;
    sigset_t chkset;
    int r;

    sigemptyset(&waitset);
    sigaddset(&waitset, SIG_IPI);
    sigaddset(&waitset, SIGBUS);

    do {
        r = sigtimedwait(&waitset, &siginfo, &ts);
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
            perror("sigtimedwait");
            exit(1);
        }

        switch (r) {
        case SIGBUS:
709
            if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
                sigbus_reraise();
            }
            break;
        default:
            break;
        }

        r = sigpending(&chkset);
        if (r == -1) {
            perror("sigpending");
            exit(1);
        }
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
}

725 726 727 728 729
#else /* !CONFIG_LINUX */

static void qemu_init_sigbus(void)
{
}
730

731
static void qemu_kvm_eat_signals(CPUState *cpu)
732 733
{
}
734 735
#endif /* !CONFIG_LINUX */

736
#ifndef _WIN32
737 738 739 740
static void dummy_signal(int sig)
{
}

741
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
742 743 744 745 746 747 748 749 750 751 752 753
{
    int r;
    sigset_t set;
    struct sigaction sigact;

    memset(&sigact, 0, sizeof(sigact));
    sigact.sa_handler = dummy_signal;
    sigaction(SIG_IPI, &sigact, NULL);

    pthread_sigmask(SIG_BLOCK, NULL, &set);
    sigdelset(&set, SIG_IPI);
    sigdelset(&set, SIGBUS);
754
    r = kvm_set_signal_mask(cpu, &set);
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
    if (r) {
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
        exit(1);
    }
}

static void qemu_tcg_init_cpu_signals(void)
{
    sigset_t set;
    struct sigaction sigact;

    memset(&sigact, 0, sizeof(sigact));
    sigact.sa_handler = cpu_signal;
    sigaction(SIG_IPI, &sigact, NULL);

    sigemptyset(&set);
    sigaddset(&set, SIG_IPI);
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}

775
#else /* _WIN32 */
776
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
777
{
778 779
    abort();
}
780

781 782
static void qemu_tcg_init_cpu_signals(void)
{
783
}
784
#endif /* _WIN32 */
785

786
static QemuMutex qemu_global_mutex;
787 788
static QemuCond qemu_io_proceeded_cond;
static bool iothread_requesting_mutex;
789 790 791 792 793 794 795 796 797 798

static QemuThread io_thread;

static QemuThread *tcg_cpu_thread;
static QemuCond *tcg_halt_cond;

/* cpu creation */
static QemuCond qemu_cpu_cond;
/* system init */
static QemuCond qemu_pause_cond;
M
Marcelo Tosatti 已提交
799
static QemuCond qemu_work_cond;
800

P
Paolo Bonzini 已提交
801
void qemu_init_cpu_loop(void)
802
{
803
    qemu_init_sigbus();
804 805 806
    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
807
    qemu_cond_init(&qemu_io_proceeded_cond);
808 809
    qemu_mutex_init(&qemu_global_mutex);

J
Jan Kiszka 已提交
810
    qemu_thread_get_self(&io_thread);
811 812
}

813
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
M
Marcelo Tosatti 已提交
814 815 816
{
    struct qemu_work_item wi;

817
    if (qemu_cpu_is_self(cpu)) {
M
Marcelo Tosatti 已提交
818 819 820 821 822 823
        func(data);
        return;
    }

    wi.func = func;
    wi.data = data;
C
Chegu Vinod 已提交
824
    wi.free = false;
825 826
    if (cpu->queued_work_first == NULL) {
        cpu->queued_work_first = &wi;
827
    } else {
828
        cpu->queued_work_last->next = &wi;
829
    }
830
    cpu->queued_work_last = &wi;
M
Marcelo Tosatti 已提交
831 832 833
    wi.next = NULL;
    wi.done = false;

834
    qemu_cpu_kick(cpu);
M
Marcelo Tosatti 已提交
835
    while (!wi.done) {
836
        CPUState *self_cpu = current_cpu;
M
Marcelo Tosatti 已提交
837 838

        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
839
        current_cpu = self_cpu;
M
Marcelo Tosatti 已提交
840 841 842
    }
}

C
Chegu Vinod 已提交
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
{
    struct qemu_work_item *wi;

    if (qemu_cpu_is_self(cpu)) {
        func(data);
        return;
    }

    wi = g_malloc0(sizeof(struct qemu_work_item));
    wi->func = func;
    wi->data = data;
    wi->free = true;
    if (cpu->queued_work_first == NULL) {
        cpu->queued_work_first = wi;
    } else {
        cpu->queued_work_last->next = wi;
    }
    cpu->queued_work_last = wi;
    wi->next = NULL;
    wi->done = false;

    qemu_cpu_kick(cpu);
}

868
static void flush_queued_work(CPUState *cpu)
M
Marcelo Tosatti 已提交
869 870 871
{
    struct qemu_work_item *wi;

872
    if (cpu->queued_work_first == NULL) {
M
Marcelo Tosatti 已提交
873
        return;
874
    }
M
Marcelo Tosatti 已提交
875

876 877
    while ((wi = cpu->queued_work_first)) {
        cpu->queued_work_first = wi->next;
M
Marcelo Tosatti 已提交
878 879
        wi->func(wi->data);
        wi->done = true;
C
Chegu Vinod 已提交
880 881 882
        if (wi->free) {
            g_free(wi);
        }
M
Marcelo Tosatti 已提交
883
    }
884
    cpu->queued_work_last = NULL;
M
Marcelo Tosatti 已提交
885 886 887
    qemu_cond_broadcast(&qemu_work_cond);
}

888
static void qemu_wait_io_event_common(CPUState *cpu)
889
{
A
Andreas Färber 已提交
890 891
    if (cpu->stop) {
        cpu->stop = false;
892
        cpu->stopped = true;
893 894
        qemu_cond_signal(&qemu_pause_cond);
    }
895
    flush_queued_work(cpu);
896
    cpu->thread_kicked = false;
897 898
}

899
static void qemu_tcg_wait_io_event(void)
900
{
901
    CPUState *cpu;
902

903
    while (all_cpu_threads_idle()) {
904 905
       /* Start accounting real time to the virtual clock if the CPUs
          are idle.  */
906
        qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
907
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
908
    }
909

910 911 912
    while (iothread_requesting_mutex) {
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
    }
913

A
Andreas Färber 已提交
914
    CPU_FOREACH(cpu) {
915
        qemu_wait_io_event_common(cpu);
916
    }
917 918
}

919
static void qemu_kvm_wait_io_event(CPUState *cpu)
920
{
921
    while (cpu_thread_is_idle(cpu)) {
A
Andreas Färber 已提交
922
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
923
    }
924

925
    qemu_kvm_eat_signals(cpu);
926
    qemu_wait_io_event_common(cpu);
927 928
}

929
static void *qemu_kvm_cpu_thread_fn(void *arg)
930
{
931
    CPUState *cpu = arg;
J
Jan Kiszka 已提交
932
    int r;
933

934
    qemu_mutex_lock(&qemu_global_mutex);
935
    qemu_thread_get_self(cpu->thread);
A
Andreas Färber 已提交
936
    cpu->thread_id = qemu_get_thread_id();
937
    current_cpu = cpu;
938

939
    r = kvm_init_vcpu(cpu);
J
Jan Kiszka 已提交
940 941 942 943
    if (r < 0) {
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
        exit(1);
    }
944

945
    qemu_kvm_init_cpu_signals(cpu);
946 947

    /* signal CPU creation */
948
    cpu->created = true;
949 950 951
    qemu_cond_signal(&qemu_cpu_cond);

    while (1) {
952
        if (cpu_can_run(cpu)) {
953
            r = kvm_cpu_exec(cpu);
954
            if (r == EXCP_DEBUG) {
955
                cpu_handle_guest_debug(cpu);
956
            }
957
        }
958
        qemu_kvm_wait_io_event(cpu);
959 960 961 962 963
    }

    return NULL;
}

A
Anthony Liguori 已提交
964 965 966 967 968 969
static void *qemu_dummy_cpu_thread_fn(void *arg)
{
#ifdef _WIN32
    fprintf(stderr, "qtest is not supported under Windows\n");
    exit(1);
#else
970
    CPUState *cpu = arg;
A
Anthony Liguori 已提交
971 972 973 974
    sigset_t waitset;
    int r;

    qemu_mutex_lock_iothread();
975
    qemu_thread_get_self(cpu->thread);
A
Andreas Färber 已提交
976
    cpu->thread_id = qemu_get_thread_id();
A
Anthony Liguori 已提交
977 978 979 980 981

    sigemptyset(&waitset);
    sigaddset(&waitset, SIG_IPI);

    /* signal CPU creation */
982
    cpu->created = true;
A
Anthony Liguori 已提交
983 984
    qemu_cond_signal(&qemu_cpu_cond);

985
    current_cpu = cpu;
A
Anthony Liguori 已提交
986
    while (1) {
987
        current_cpu = NULL;
A
Anthony Liguori 已提交
988 989 990 991 992 993 994 995 996 997
        qemu_mutex_unlock_iothread();
        do {
            int sig;
            r = sigwait(&waitset, &sig);
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
        if (r == -1) {
            perror("sigwait");
            exit(1);
        }
        qemu_mutex_lock_iothread();
998
        current_cpu = cpu;
999
        qemu_wait_io_event_common(cpu);
A
Anthony Liguori 已提交
1000 1001 1002 1003 1004 1005
    }

    return NULL;
#endif
}

J
Jan Kiszka 已提交
1006 1007
static void tcg_exec_all(void);

1008
static void *qemu_tcg_cpu_thread_fn(void *arg)
1009
{
1010
    CPUState *cpu = arg;
1011

1012
    qemu_tcg_init_cpu_signals();
1013
    qemu_thread_get_self(cpu->thread);
1014 1015

    qemu_mutex_lock(&qemu_global_mutex);
1016 1017 1018 1019
    CPU_FOREACH(cpu) {
        cpu->thread_id = qemu_get_thread_id();
        cpu->created = true;
    }
1020 1021
    qemu_cond_signal(&qemu_cpu_cond);

1022
    /* wait for initial kick-off after machine start */
A
Andreas Färber 已提交
1023
    while (QTAILQ_FIRST(&cpus)->stopped) {
1024
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
1025 1026

        /* process any pending work */
A
Andreas Färber 已提交
1027
        CPU_FOREACH(cpu) {
1028
            qemu_wait_io_event_common(cpu);
1029
        }
1030
    }
1031 1032

    while (1) {
J
Jan Kiszka 已提交
1033
        tcg_exec_all();
1034 1035

        if (use_icount) {
1036
            int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1037 1038

            if (deadline == 0) {
1039
                qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1040
            }
1041
        }
1042
        qemu_tcg_wait_io_event();
1043 1044 1045 1046 1047
    }

    return NULL;
}

1048
static void qemu_cpu_kick_thread(CPUState *cpu)
P
Paolo Bonzini 已提交
1049 1050 1051 1052
{
#ifndef _WIN32
    int err;

1053
    err = pthread_kill(cpu->thread->thread, SIG_IPI);
P
Paolo Bonzini 已提交
1054 1055 1056 1057 1058
    if (err) {
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
        exit(1);
    }
#else /* _WIN32 */
1059
    if (!qemu_cpu_is_self(cpu)) {
1060 1061 1062
        CONTEXT tcgContext;

        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
1063
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
                    GetLastError());
            exit(1);
        }

        /* On multi-core systems, we are not sure that the thread is actually
         * suspended until we can get the context.
         */
        tcgContext.ContextFlags = CONTEXT_CONTROL;
        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
            continue;
        }

P
Paolo Bonzini 已提交
1076
        cpu_signal(0);
1077 1078

        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1079
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1080 1081 1082
                    GetLastError());
            exit(1);
        }
P
Paolo Bonzini 已提交
1083 1084 1085 1086
    }
#endif
}

1087
void qemu_cpu_kick(CPUState *cpu)
1088
{
A
Andreas Färber 已提交
1089
    qemu_cond_broadcast(cpu->halt_cond);
1090
    if (!tcg_enabled() && !cpu->thread_kicked) {
1091
        qemu_cpu_kick_thread(cpu);
1092
        cpu->thread_kicked = true;
1093
    }
1094 1095
}

1096
void qemu_cpu_kick_self(void)
1097
{
1098
#ifndef _WIN32
1099
    assert(current_cpu);
1100

1101 1102 1103
    if (!current_cpu->thread_kicked) {
        qemu_cpu_kick_thread(current_cpu);
        current_cpu->thread_kicked = true;
1104
    }
1105 1106 1107
#else
    abort();
#endif
1108 1109
}

1110
bool qemu_cpu_is_self(CPUState *cpu)
1111
{
1112
    return qemu_thread_is_self(cpu->thread);
1113 1114
}

J
Juan Quintela 已提交
1115 1116
static bool qemu_in_vcpu_thread(void)
{
1117
    return current_cpu && qemu_cpu_is_self(current_cpu);
J
Juan Quintela 已提交
1118 1119
}

1120 1121
void qemu_mutex_lock_iothread(void)
{
A
Anthony Liguori 已提交
1122
    if (!tcg_enabled()) {
1123
        qemu_mutex_lock(&qemu_global_mutex);
1124
    } else {
1125
        iothread_requesting_mutex = true;
1126
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
1127
            qemu_cpu_kick_thread(first_cpu);
1128 1129
            qemu_mutex_lock(&qemu_global_mutex);
        }
1130 1131
        iothread_requesting_mutex = false;
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
1132
    }
1133 1134 1135 1136 1137 1138 1139 1140 1141
}

void qemu_mutex_unlock_iothread(void)
{
    qemu_mutex_unlock(&qemu_global_mutex);
}

static int all_vcpus_paused(void)
{
A
Andreas Färber 已提交
1142
    CPUState *cpu;
1143

A
Andreas Färber 已提交
1144
    CPU_FOREACH(cpu) {
1145
        if (!cpu->stopped) {
1146
            return 0;
1147
        }
1148 1149 1150 1151 1152 1153 1154
    }

    return 1;
}

void pause_all_vcpus(void)
{
A
Andreas Färber 已提交
1155
    CPUState *cpu;
1156

1157
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
A
Andreas Färber 已提交
1158
    CPU_FOREACH(cpu) {
1159 1160
        cpu->stop = true;
        qemu_cpu_kick(cpu);
1161 1162
    }

J
Juan Quintela 已提交
1163
    if (qemu_in_vcpu_thread()) {
1164 1165
        cpu_stop_current();
        if (!kvm_enabled()) {
A
Andreas Färber 已提交
1166
            CPU_FOREACH(cpu) {
1167 1168
                cpu->stop = false;
                cpu->stopped = true;
1169 1170 1171 1172 1173
            }
            return;
        }
    }

1174
    while (!all_vcpus_paused()) {
1175
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
A
Andreas Färber 已提交
1176
        CPU_FOREACH(cpu) {
1177
            qemu_cpu_kick(cpu);
1178 1179 1180 1181
        }
    }
}

1182 1183 1184 1185 1186 1187 1188
void cpu_resume(CPUState *cpu)
{
    cpu->stop = false;
    cpu->stopped = false;
    qemu_cpu_kick(cpu);
}

1189 1190
void resume_all_vcpus(void)
{
A
Andreas Färber 已提交
1191
    CPUState *cpu;
1192

1193
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
A
Andreas Färber 已提交
1194
    CPU_FOREACH(cpu) {
1195
        cpu_resume(cpu);
1196 1197 1198
    }
}

1199 1200 1201
/* For temporary buffers for forming a name */
#define VCPU_THREAD_NAME_SIZE 16

1202
static void qemu_tcg_init_vcpu(CPUState *cpu)
1203
{
1204 1205
    char thread_name[VCPU_THREAD_NAME_SIZE];

1206 1207
    tcg_cpu_address_space_init(cpu, cpu->as);

1208 1209
    /* share a single thread for all cpus with TCG */
    if (!tcg_cpu_thread) {
1210
        cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1211 1212 1213
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
        qemu_cond_init(cpu->halt_cond);
        tcg_halt_cond = cpu->halt_cond;
1214 1215 1216 1217
        snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
                 cpu->cpu_index);
        qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
                           cpu, QEMU_THREAD_JOINABLE);
P
Paolo Bonzini 已提交
1218
#ifdef _WIN32
1219
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
P
Paolo Bonzini 已提交
1220
#endif
1221
        while (!cpu->created) {
1222
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1223
        }
1224
        tcg_cpu_thread = cpu->thread;
1225
    } else {
1226
        cpu->thread = tcg_cpu_thread;
A
Andreas Färber 已提交
1227
        cpu->halt_cond = tcg_halt_cond;
1228 1229 1230
    }
}

1231
static void qemu_kvm_start_vcpu(CPUState *cpu)
1232
{
1233 1234
    char thread_name[VCPU_THREAD_NAME_SIZE];

1235
    cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1236 1237
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(cpu->halt_cond);
1238 1239 1240 1241
    snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
             cpu->cpu_index);
    qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
                       cpu, QEMU_THREAD_JOINABLE);
1242
    while (!cpu->created) {
1243
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1244
    }
1245 1246
}

1247
static void qemu_dummy_start_vcpu(CPUState *cpu)
A
Anthony Liguori 已提交
1248
{
1249 1250
    char thread_name[VCPU_THREAD_NAME_SIZE];

1251
    cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1252 1253
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(cpu->halt_cond);
1254 1255 1256
    snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
             cpu->cpu_index);
    qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
A
Anthony Liguori 已提交
1257
                       QEMU_THREAD_JOINABLE);
1258
    while (!cpu->created) {
A
Anthony Liguori 已提交
1259 1260 1261 1262
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    }
}

1263
void qemu_init_vcpu(CPUState *cpu)
1264
{
1265 1266
    cpu->nr_cores = smp_cores;
    cpu->nr_threads = smp_threads;
1267
    cpu->stopped = true;
1268
    if (kvm_enabled()) {
1269
        qemu_kvm_start_vcpu(cpu);
A
Anthony Liguori 已提交
1270
    } else if (tcg_enabled()) {
1271
        qemu_tcg_init_vcpu(cpu);
A
Anthony Liguori 已提交
1272
    } else {
1273
        qemu_dummy_start_vcpu(cpu);
1274
    }
1275 1276
}

1277
void cpu_stop_current(void)
1278
{
1279 1280 1281 1282
    if (current_cpu) {
        current_cpu->stop = false;
        current_cpu->stopped = true;
        cpu_exit(current_cpu);
1283
        qemu_cond_signal(&qemu_pause_cond);
1284
    }
1285 1286
}

1287
int vm_stop(RunState state)
1288
{
J
Juan Quintela 已提交
1289
    if (qemu_in_vcpu_thread()) {
1290
        qemu_system_vmstop_request_prepare();
1291
        qemu_system_vmstop_request(state);
1292 1293 1294 1295
        /*
         * FIXME: should not return to device code in case
         * vm_stop() has been requested.
         */
1296
        cpu_stop_current();
1297
        return 0;
1298
    }
1299 1300

    return do_vm_stop(state);
1301 1302
}

1303 1304
/* does a state transition even if the VM is already stopped,
   current state is forgotten forever */
1305
int vm_stop_force_state(RunState state)
1306 1307
{
    if (runstate_is_running()) {
1308
        return vm_stop(state);
1309 1310
    } else {
        runstate_set(state);
1311 1312 1313
        /* Make sure to return an error if the flush in a previous vm_stop()
         * failed. */
        return bdrv_flush_all();
1314 1315 1316
    }
}

1317
static int tcg_cpu_exec(CPUArchState *env)
1318
{
1319
    CPUState *cpu = ENV_GET_CPU(env);
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
    int ret;
#ifdef CONFIG_PROFILER
    int64_t ti;
#endif

#ifdef CONFIG_PROFILER
    ti = profile_getclock();
#endif
    if (use_icount) {
        int64_t count;
1330
        int64_t deadline;
1331
        int decr;
1332 1333
        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
                                    + cpu->icount_extra);
1334
        cpu->icount_decr.u16.low = 0;
1335
        cpu->icount_extra = 0;
1336
        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1337 1338

        /* Maintain prior (possibly buggy) behaviour where if no deadline
1339
         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1340 1341 1342 1343 1344 1345 1346 1347
         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
         * nanoseconds.
         */
        if ((deadline < 0) || (deadline > INT32_MAX)) {
            deadline = INT32_MAX;
        }

        count = qemu_icount_round(deadline);
1348
        timers_state.qemu_icount += count;
1349 1350
        decr = (count > 0xffff) ? 0xffff : count;
        count -= decr;
1351
        cpu->icount_decr.u16.low = decr;
1352
        cpu->icount_extra = count;
1353 1354 1355 1356 1357 1358 1359 1360
    }
    ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
    qemu_time += profile_getclock() - ti;
#endif
    if (use_icount) {
        /* Fold pending instructions back into the
           instruction counter, and clear the interrupt flag.  */
1361 1362
        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
                        + cpu->icount_extra);
1363
        cpu->icount_decr.u32 = 0;
1364
        cpu->icount_extra = 0;
1365 1366 1367 1368
    }
    return ret;
}

J
Jan Kiszka 已提交
1369
static void tcg_exec_all(void)
1370
{
1371 1372
    int r;

1373 1374
    /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
    qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1375

1376
    if (next_cpu == NULL) {
1377
        next_cpu = first_cpu;
1378
    }
A
Andreas Färber 已提交
1379
    for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1380 1381
        CPUState *cpu = next_cpu;
        CPUArchState *env = cpu->env_ptr;
1382

1383
        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1384
                          (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1385

1386
        if (cpu_can_run(cpu)) {
J
Jan Kiszka 已提交
1387
            r = tcg_cpu_exec(env);
1388
            if (r == EXCP_DEBUG) {
1389
                cpu_handle_guest_debug(cpu);
1390 1391
                break;
            }
1392
        } else if (cpu->stop || cpu->stopped) {
1393 1394 1395
            break;
        }
    }
J
Jan Kiszka 已提交
1396
    exit_request = 0;
1397 1398
}

1399
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1400 1401
{
    /* XXX: implement xxx_cpu_list for targets that still miss it */
P
Peter Maydell 已提交
1402 1403
#if defined(cpu_list)
    cpu_list(f, cpu_fprintf);
1404 1405
#endif
}
L
Luiz Capitulino 已提交
1406 1407 1408 1409

CpuInfoList *qmp_query_cpus(Error **errp)
{
    CpuInfoList *head = NULL, *cur_item = NULL;
1410
    CPUState *cpu;
L
Luiz Capitulino 已提交
1411

A
Andreas Färber 已提交
1412
    CPU_FOREACH(cpu) {
L
Luiz Capitulino 已提交
1413
        CpuInfoList *info;
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
#if defined(TARGET_I386)
        X86CPU *x86_cpu = X86_CPU(cpu);
        CPUX86State *env = &x86_cpu->env;
#elif defined(TARGET_PPC)
        PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
        CPUPPCState *env = &ppc_cpu->env;
#elif defined(TARGET_SPARC)
        SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
        CPUSPARCState *env = &sparc_cpu->env;
#elif defined(TARGET_MIPS)
        MIPSCPU *mips_cpu = MIPS_CPU(cpu);
        CPUMIPSState *env = &mips_cpu->env;
1426 1427 1428
#elif defined(TARGET_TRICORE)
        TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
        CPUTriCoreState *env = &tricore_cpu->env;
1429
#endif
L
Luiz Capitulino 已提交
1430

1431
        cpu_synchronize_state(cpu);
L
Luiz Capitulino 已提交
1432 1433 1434

        info = g_malloc0(sizeof(*info));
        info->value = g_malloc0(sizeof(*info->value));
1435
        info->value->CPU = cpu->cpu_index;
1436
        info->value->current = (cpu == first_cpu);
1437
        info->value->halted = cpu->halted;
A
Andreas Färber 已提交
1438
        info->value->thread_id = cpu->thread_id;
L
Luiz Capitulino 已提交
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
#if defined(TARGET_I386)
        info->value->has_pc = true;
        info->value->pc = env->eip + env->segs[R_CS].base;
#elif defined(TARGET_PPC)
        info->value->has_nip = true;
        info->value->nip = env->nip;
#elif defined(TARGET_SPARC)
        info->value->has_pc = true;
        info->value->pc = env->pc;
        info->value->has_npc = true;
        info->value->npc = env->npc;
#elif defined(TARGET_MIPS)
        info->value->has_PC = true;
        info->value->PC = env->active_tc.PC;
1453 1454 1455
#elif defined(TARGET_TRICORE)
        info->value->has_PC = true;
        info->value->PC = env->PC;
L
Luiz Capitulino 已提交
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
#endif

        /* XXX: waiting for the qapi to support GSList */
        if (!cur_item) {
            head = cur_item = info;
        } else {
            cur_item->next = info;
            cur_item = info;
        }
    }

    return head;
}
L
Luiz Capitulino 已提交
1469 1470 1471 1472 1473 1474

void qmp_memsave(int64_t addr, int64_t size, const char *filename,
                 bool has_cpu, int64_t cpu_index, Error **errp)
{
    FILE *f;
    uint32_t l;
1475
    CPUState *cpu;
L
Luiz Capitulino 已提交
1476 1477 1478 1479 1480 1481
    uint8_t buf[1024];

    if (!has_cpu) {
        cpu_index = 0;
    }

1482 1483
    cpu = qemu_get_cpu(cpu_index);
    if (cpu == NULL) {
L
Luiz Capitulino 已提交
1484 1485 1486 1487 1488 1489 1490
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
                  "a CPU number");
        return;
    }

    f = fopen(filename, "wb");
    if (!f) {
1491
        error_setg_file_open(errp, errno, filename);
L
Luiz Capitulino 已提交
1492 1493 1494 1495 1496 1497 1498
        return;
    }

    while (size != 0) {
        l = sizeof(buf);
        if (l > size)
            l = size;
1499 1500 1501 1502
        if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
            error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
            goto exit;
        }
L
Luiz Capitulino 已提交
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
        if (fwrite(buf, 1, l, f) != l) {
            error_set(errp, QERR_IO_ERROR);
            goto exit;
        }
        addr += l;
        size -= l;
    }

exit:
    fclose(f);
}
L
Luiz Capitulino 已提交
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523

void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
                  Error **errp)
{
    FILE *f;
    uint32_t l;
    uint8_t buf[1024];

    f = fopen(filename, "wb");
    if (!f) {
1524
        error_setg_file_open(errp, errno, filename);
L
Luiz Capitulino 已提交
1525 1526 1527 1528 1529 1530 1531
        return;
    }

    while (size != 0) {
        l = sizeof(buf);
        if (l > size)
            l = size;
1532
        cpu_physical_memory_read(addr, buf, l);
L
Luiz Capitulino 已提交
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
        if (fwrite(buf, 1, l, f) != l) {
            error_set(errp, QERR_IO_ERROR);
            goto exit;
        }
        addr += l;
        size -= l;
    }

exit:
    fclose(f);
}
L
Luiz Capitulino 已提交
1544 1545 1546 1547

void qmp_inject_nmi(Error **errp)
{
#if defined(TARGET_I386)
1548 1549
    CPUState *cs;

A
Andreas Färber 已提交
1550
    CPU_FOREACH(cs) {
1551
        X86CPU *cpu = X86_CPU(cs);
L
Luiz Capitulino 已提交
1552

1553
        if (!cpu->apic_state) {
1554
            cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1555
        } else {
1556
            apic_deliver_nmi(cpu->apic_state);
1557
        }
L
Luiz Capitulino 已提交
1558 1559
    }
#else
1560
    nmi_monitor_handle(monitor_get_cpu_index(), errp);
L
Luiz Capitulino 已提交
1561 1562
#endif
}
1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579

void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
{
    if (!use_icount) {
        return;
    }

    cpu_fprintf(f, "Host - Guest clock  %"PRIi64" ms\n",
                (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
    if (icount_align_option) {
        cpu_fprintf(f, "Max guest delay     %"PRIi64" ms\n", -max_delay/SCALE_MS);
        cpu_fprintf(f, "Max guest advance   %"PRIi64" ms\n", max_advance/SCALE_MS);
    } else {
        cpu_fprintf(f, "Max guest delay     NA\n");
        cpu_fprintf(f, "Max guest advance   NA\n");
    }
}