cpus.c 39.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * QEMU System Emulator
 *
 * Copyright (c) 2003-2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

/* Needed early for CONFIG_BSD etc. */
#include "config-host.h"

28
#include "monitor/monitor.h"
W
Wenchao Xia 已提交
29
#include "qapi/qmp/qerror.h"
30
#include "sysemu/sysemu.h"
31
#include "exec/gdbstub.h"
32 33
#include "sysemu/dma.h"
#include "sysemu/kvm.h"
L
Luiz Capitulino 已提交
34
#include "qmp-commands.h"
35

36
#include "qemu/thread.h"
37 38
#include "sysemu/cpus.h"
#include "sysemu/qtest.h"
39 40
#include "qemu/main-loop.h"
#include "qemu/bitmap.h"
41
#include "qemu/seqlock.h"
W
Wenchao Xia 已提交
42
#include "qapi-event.h"
43
#include "hw/nmi.h"
J
Jan Kiszka 已提交
44 45

#ifndef _WIN32
46
#include "qemu/compatfd.h"
J
Jan Kiszka 已提交
47
#endif
48

49 50 51 52
#ifdef CONFIG_LINUX

#include <sys/prctl.h>

M
Marcelo Tosatti 已提交
53 54 55 56
#ifndef PR_MCE_KILL
#define PR_MCE_KILL 33
#endif

57 58 59 60 61 62 63 64 65 66
#ifndef PR_MCE_KILL_SET
#define PR_MCE_KILL_SET 1
#endif

#ifndef PR_MCE_KILL_EARLY
#define PR_MCE_KILL_EARLY 1
#endif

#endif /* CONFIG_LINUX */

67
static CPUState *next_cpu;
68 69
int64_t max_delay;
int64_t max_advance;
70

71 72 73 74 75
bool cpu_is_stopped(CPUState *cpu)
{
    return cpu->stopped || !runstate_is_running();
}

76
static bool cpu_thread_is_idle(CPUState *cpu)
77
{
78
    if (cpu->stop || cpu->queued_work_first) {
79 80
        return false;
    }
81
    if (cpu_is_stopped(cpu)) {
82 83
        return true;
    }
84
    if (!cpu->halted || cpu_has_work(cpu) ||
85
        kvm_halt_in_kernel()) {
86 87 88 89 90 91 92
        return false;
    }
    return true;
}

static bool all_cpu_threads_idle(void)
{
93
    CPUState *cpu;
94

A
Andreas Färber 已提交
95
    CPU_FOREACH(cpu) {
96
        if (!cpu_thread_is_idle(cpu)) {
97 98 99 100 101 102
            return false;
        }
    }
    return true;
}

P
Paolo Bonzini 已提交
103 104 105
/***********************************************************/
/* guest cycle counter */

106 107
/* Protected by TimersState seqlock */

108
static int64_t vm_clock_warp_start = -1;
P
Paolo Bonzini 已提交
109 110 111 112
/* Conversion factor from emulated instructions to virtual clock ticks.  */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
#define MAX_ICOUNT_SHIFT 10
113

P
Paolo Bonzini 已提交
114 115 116 117 118
static QEMUTimer *icount_rt_timer;
static QEMUTimer *icount_vm_timer;
static QEMUTimer *icount_warp_timer;

typedef struct TimersState {
119
    /* Protected by BQL.  */
P
Paolo Bonzini 已提交
120 121
    int64_t cpu_ticks_prev;
    int64_t cpu_ticks_offset;
122 123 124 125 126

    /* cpu_clock_offset can be read out of BQL, so protect it with
     * this lock.
     */
    QemuSeqLock vm_clock_seqlock;
P
Paolo Bonzini 已提交
127 128 129
    int64_t cpu_clock_offset;
    int32_t cpu_ticks_enabled;
    int64_t dummy;
130 131 132 133 134

    /* Compensate for varying guest execution speed.  */
    int64_t qemu_icount_bias;
    /* Only written by TCG thread */
    int64_t qemu_icount;
P
Paolo Bonzini 已提交
135 136
} TimersState;

L
Liu Ping Fan 已提交
137
static TimersState timers_state;
P
Paolo Bonzini 已提交
138

139
int64_t cpu_get_icount_raw(void)
P
Paolo Bonzini 已提交
140 141
{
    int64_t icount;
142
    CPUState *cpu = current_cpu;
P
Paolo Bonzini 已提交
143

144
    icount = timers_state.qemu_icount;
145
    if (cpu) {
146
        if (!cpu_can_do_io(cpu)) {
147 148
            fprintf(stderr, "Bad icount read\n");
            exit(1);
P
Paolo Bonzini 已提交
149
        }
150
        icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
P
Paolo Bonzini 已提交
151
    }
152 153 154 155 156 157 158
    return icount;
}

/* Return the virtual CPU time, based on the instruction counter.  */
static int64_t cpu_get_icount_locked(void)
{
    int64_t icount = cpu_get_icount_raw();
159
    return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
P
Paolo Bonzini 已提交
160 161
}

P
Paolo Bonzini 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174
int64_t cpu_get_icount(void)
{
    int64_t icount;
    unsigned start;

    do {
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
        icount = cpu_get_icount_locked();
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));

    return icount;
}

175 176 177 178 179
int64_t cpu_icount_to_ns(int64_t icount)
{
    return icount << icount_time_shift;
}

P
Paolo Bonzini 已提交
180
/* return the host CPU cycle counter and handle stop/restart */
181
/* Caller must hold the BQL */
P
Paolo Bonzini 已提交
182 183
int64_t cpu_get_ticks(void)
{
P
Paolo Bonzini 已提交
184 185
    int64_t ticks;

P
Paolo Bonzini 已提交
186 187 188
    if (use_icount) {
        return cpu_get_icount();
    }
P
Paolo Bonzini 已提交
189 190 191 192 193 194 195 196 197 198 199

    ticks = timers_state.cpu_ticks_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += cpu_get_real_ticks();
    }

    if (timers_state.cpu_ticks_prev > ticks) {
        /* Note: non increasing ticks may happen if the host uses
           software suspend */
        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
        ticks = timers_state.cpu_ticks_prev;
P
Paolo Bonzini 已提交
200
    }
P
Paolo Bonzini 已提交
201 202 203

    timers_state.cpu_ticks_prev = ticks;
    return ticks;
P
Paolo Bonzini 已提交
204 205
}

206
static int64_t cpu_get_clock_locked(void)
P
Paolo Bonzini 已提交
207
{
P
Paolo Bonzini 已提交
208
    int64_t ticks;
209

P
Paolo Bonzini 已提交
210 211 212
    ticks = timers_state.cpu_clock_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += get_clock();
P
Paolo Bonzini 已提交
213
    }
214

P
Paolo Bonzini 已提交
215
    return ticks;
216 217 218 219 220 221 222 223 224 225 226 227 228 229
}

/* return the host CPU monotonic timer and handle stop/restart */
int64_t cpu_get_clock(void)
{
    int64_t ti;
    unsigned start;

    do {
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
        ti = cpu_get_clock_locked();
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));

    return ti;
P
Paolo Bonzini 已提交
230 231
}

232 233 234
/* enable cpu_get_ticks()
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
 */
P
Paolo Bonzini 已提交
235 236
void cpu_enable_ticks(void)
{
237 238
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
239 240 241 242 243
    if (!timers_state.cpu_ticks_enabled) {
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
        timers_state.cpu_clock_offset -= get_clock();
        timers_state.cpu_ticks_enabled = 1;
    }
244
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
245 246 247
}

/* disable cpu_get_ticks() : the clock is stopped. You must not call
248 249 250
 * cpu_get_ticks() after that.
 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
 */
P
Paolo Bonzini 已提交
251 252
void cpu_disable_ticks(void)
{
253 254
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
255
    if (timers_state.cpu_ticks_enabled) {
P
Paolo Bonzini 已提交
256
        timers_state.cpu_ticks_offset += cpu_get_real_ticks();
257
        timers_state.cpu_clock_offset = cpu_get_clock_locked();
P
Paolo Bonzini 已提交
258 259
        timers_state.cpu_ticks_enabled = 0;
    }
260
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273
}

/* Correlation between real and virtual time is always going to be
   fairly approximate, so ignore small variation.
   When the guest is idle real and virtual time will be aligned in
   the IO wait loop.  */
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)

static void icount_adjust(void)
{
    int64_t cur_time;
    int64_t cur_icount;
    int64_t delta;
274 275

    /* Protected by TimersState mutex.  */
P
Paolo Bonzini 已提交
276
    static int64_t last_delta;
277

P
Paolo Bonzini 已提交
278 279 280 281
    /* If the VM is not running, then do nothing.  */
    if (!runstate_is_running()) {
        return;
    }
282

P
Paolo Bonzini 已提交
283 284 285
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
    cur_time = cpu_get_clock_locked();
    cur_icount = cpu_get_icount_locked();
286

P
Paolo Bonzini 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
    delta = cur_icount - cur_time;
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
    if (delta > 0
        && last_delta + ICOUNT_WOBBLE < delta * 2
        && icount_time_shift > 0) {
        /* The guest is getting too far ahead.  Slow time down.  */
        icount_time_shift--;
    }
    if (delta < 0
        && last_delta - ICOUNT_WOBBLE > delta * 2
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
        /* The guest is getting too far behind.  Speed time up.  */
        icount_time_shift++;
    }
    last_delta = delta;
302 303
    timers_state.qemu_icount_bias = cur_icount
                              - (timers_state.qemu_icount << icount_time_shift);
P
Paolo Bonzini 已提交
304
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
305 306 307 308
}

static void icount_adjust_rt(void *opaque)
{
309
    timer_mod(icount_rt_timer,
310
              qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
P
Paolo Bonzini 已提交
311 312 313 314 315
    icount_adjust();
}

static void icount_adjust_vm(void *opaque)
{
316 317 318
    timer_mod(icount_vm_timer,
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
                   get_ticks_per_sec() / 10);
P
Paolo Bonzini 已提交
319 320 321 322 323 324 325 326 327 328
    icount_adjust();
}

static int64_t qemu_icount_round(int64_t count)
{
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
}

static void icount_warp_rt(void *opaque)
{
P
Paolo Bonzini 已提交
329 330 331 332
    /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
     * changes from -1 to another value, so the race here is okay.
     */
    if (atomic_read(&vm_clock_warp_start) == -1) {
P
Paolo Bonzini 已提交
333 334 335
        return;
    }

P
Paolo Bonzini 已提交
336
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
337
    if (runstate_is_running()) {
338
        int64_t clock = cpu_get_clock_locked();
P
Paolo Bonzini 已提交
339 340 341 342
        int64_t warp_delta;

        warp_delta = clock - vm_clock_warp_start;
        if (use_icount == 2) {
P
Paolo Bonzini 已提交
343
            /*
344
             * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
P
Paolo Bonzini 已提交
345 346
             * far ahead of real time.
             */
P
Paolo Bonzini 已提交
347
            int64_t cur_icount = cpu_get_icount_locked();
348
            int64_t delta = clock - cur_icount;
P
Paolo Bonzini 已提交
349
            warp_delta = MIN(warp_delta, delta);
P
Paolo Bonzini 已提交
350
        }
351
        timers_state.qemu_icount_bias += warp_delta;
P
Paolo Bonzini 已提交
352 353
    }
    vm_clock_warp_start = -1;
P
Paolo Bonzini 已提交
354
    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
P
Paolo Bonzini 已提交
355 356 357 358

    if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
    }
P
Paolo Bonzini 已提交
359 360
}

P
Paolo Bonzini 已提交
361 362
void qtest_clock_warp(int64_t dest)
{
363
    int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
364 365
    assert(qtest_enabled());
    while (clock < dest) {
366
        int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
367
        int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
P
Paolo Bonzini 已提交
368
        seqlock_write_lock(&timers_state.vm_clock_seqlock);
369
        timers_state.qemu_icount_bias += warp;
P
Paolo Bonzini 已提交
370 371
        seqlock_write_unlock(&timers_state.vm_clock_seqlock);

372 373
        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
        clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
374
    }
375
    qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
376 377
}

378
void qemu_clock_warp(QEMUClockType type)
P
Paolo Bonzini 已提交
379
{
380
    int64_t clock;
P
Paolo Bonzini 已提交
381 382 383 384 385 386 387
    int64_t deadline;

    /*
     * There are too many global variables to make the "warp" behavior
     * applicable to other clocks.  But a clock argument removes the
     * need for if statements all over the place.
     */
388
    if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
P
Paolo Bonzini 已提交
389 390 391 392
        return;
    }

    /*
393 394
     * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
     * This ensures that the deadline for the timer is computed correctly below.
P
Paolo Bonzini 已提交
395 396
     * This also makes sure that the insn counter is synchronized before the
     * CPU starts running, in case the CPU is woken by an event other than
397
     * the earliest QEMU_CLOCK_VIRTUAL timer.
P
Paolo Bonzini 已提交
398 399
     */
    icount_warp_rt(NULL);
400 401
    timer_del(icount_warp_timer);
    if (!all_cpu_threads_idle()) {
P
Paolo Bonzini 已提交
402 403 404
        return;
    }

P
Paolo Bonzini 已提交
405 406 407 408 409
    if (qtest_enabled()) {
        /* When testing, qtest commands advance icount.  */
	return;
    }

410
    /* We want to use the earliest deadline from ALL vm_clocks */
411
    clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
412
    deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
413 414
    if (deadline < 0) {
        return;
415 416
    }

P
Paolo Bonzini 已提交
417 418
    if (deadline > 0) {
        /*
419
         * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
P
Paolo Bonzini 已提交
420 421 422
         * sleep.  Otherwise, the CPU might be waiting for a future timer
         * interrupt to wake it up, but the interrupt never comes because
         * the vCPU isn't running any insns and thus doesn't advance the
423
         * QEMU_CLOCK_VIRTUAL.
P
Paolo Bonzini 已提交
424 425
         *
         * An extreme solution for this problem would be to never let VCPUs
426 427 428
         * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
         * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
         * event.  Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
429 430
         * after some "real" time, (related to the time left until the next
         * event) has passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
431 432 433
         * This avoids that the warps are visible externally; for example,
         * you will not be sending network packets continuously instead of
         * every 100ms.
P
Paolo Bonzini 已提交
434
         */
P
Paolo Bonzini 已提交
435
        seqlock_write_lock(&timers_state.vm_clock_seqlock);
436 437 438
        if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
            vm_clock_warp_start = clock;
        }
P
Paolo Bonzini 已提交
439
        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
440
        timer_mod_anticipate(icount_warp_timer, clock + deadline);
441
    } else if (deadline == 0) {
442
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
P
Paolo Bonzini 已提交
443 444 445
    }
}

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
static bool icount_state_needed(void *opaque)
{
    return use_icount;
}

/*
 * This is a subsection for icount migration.
 */
static const VMStateDescription icount_vmstate_timers = {
    .name = "timer/icount",
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_INT64(qemu_icount_bias, TimersState),
        VMSTATE_INT64(qemu_icount, TimersState),
        VMSTATE_END_OF_LIST()
    }
};

P
Paolo Bonzini 已提交
465 466 467 468
static const VMStateDescription vmstate_timers = {
    .name = "timer",
    .version_id = 2,
    .minimum_version_id = 1,
469
    .fields = (VMStateField[]) {
P
Paolo Bonzini 已提交
470 471 472 473
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
        VMSTATE_INT64(dummy, TimersState),
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
        VMSTATE_END_OF_LIST()
474 475 476 477 478 479 480 481
    },
    .subsections = (VMStateSubsection[]) {
        {
            .vmsd = &icount_vmstate_timers,
            .needed = icount_state_needed,
        }, {
            /* empty */
        }
P
Paolo Bonzini 已提交
482 483 484
    }
};

485 486 487 488 489 490
void cpu_ticks_init(void)
{
    seqlock_init(&timers_state.vm_clock_seqlock, NULL);
    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
}

491
void configure_icount(QemuOpts *opts, Error **errp)
P
Paolo Bonzini 已提交
492
{
493
    const char *option;
494
    char *rem_str = NULL;
495 496

    option = qemu_opt_get(opts, "shift");
P
Paolo Bonzini 已提交
497
    if (!option) {
498 499 500
        if (qemu_opt_get(opts, "align") != NULL) {
            error_setg(errp, "Please specify shift option when using align");
        }
P
Paolo Bonzini 已提交
501 502
        return;
    }
503
    icount_align_option = qemu_opt_get_bool(opts, "align", false);
504 505
    icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
                                     icount_warp_rt, NULL);
P
Paolo Bonzini 已提交
506
    if (strcmp(option, "auto") != 0) {
507 508 509 510 511
        errno = 0;
        icount_time_shift = strtol(option, &rem_str, 0);
        if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
            error_setg(errp, "icount: Invalid shift value");
        }
P
Paolo Bonzini 已提交
512 513
        use_icount = 1;
        return;
514 515
    } else if (icount_align_option) {
        error_setg(errp, "shift=auto and align=on are incompatible");
P
Paolo Bonzini 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528
    }

    use_icount = 2;

    /* 125MIPS seems a reasonable initial guess at the guest speed.
       It will be corrected fairly quickly anyway.  */
    icount_time_shift = 3;

    /* Have both realtime and virtual time triggers for speed adjustment.
       The realtime trigger catches emulated time passing too slowly,
       the virtual time trigger catches emulated time passing too fast.
       Realtime triggers occur even when idle, so use them less frequently
       than VM triggers.  */
529 530
    icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
                                   icount_adjust_rt, NULL);
531
    timer_mod(icount_rt_timer,
532
                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
533 534 535 536 537
    icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                        icount_adjust_vm, NULL);
    timer_mod(icount_vm_timer,
                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
                   get_ticks_per_sec() / 10);
P
Paolo Bonzini 已提交
538 539
}

540 541 542 543
/***********************************************************/
void hw_error(const char *fmt, ...)
{
    va_list ap;
544
    CPUState *cpu;
545 546 547 548 549

    va_start(ap, fmt);
    fprintf(stderr, "qemu: hardware error: ");
    vfprintf(stderr, fmt, ap);
    fprintf(stderr, "\n");
A
Andreas Färber 已提交
550
    CPU_FOREACH(cpu) {
551
        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
552
        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
553 554 555 556 557 558 559
    }
    va_end(ap);
    abort();
}

void cpu_synchronize_all_states(void)
{
560
    CPUState *cpu;
561

A
Andreas Färber 已提交
562
    CPU_FOREACH(cpu) {
563
        cpu_synchronize_state(cpu);
564 565 566 567 568
    }
}

void cpu_synchronize_all_post_reset(void)
{
569
    CPUState *cpu;
570

A
Andreas Färber 已提交
571
    CPU_FOREACH(cpu) {
572
        cpu_synchronize_post_reset(cpu);
573 574 575 576 577
    }
}

void cpu_synchronize_all_post_init(void)
{
578
    CPUState *cpu;
579

A
Andreas Färber 已提交
580
    CPU_FOREACH(cpu) {
581
        cpu_synchronize_post_init(cpu);
582 583 584
    }
}

M
Marcelo Tosatti 已提交
585 586 587 588 589 590 591 592 593
void cpu_clean_all_dirty(void)
{
    CPUState *cpu;

    CPU_FOREACH(cpu) {
        cpu_clean_state(cpu);
    }
}

594
static int do_vm_stop(RunState state)
595
{
596 597
    int ret = 0;

598
    if (runstate_is_running()) {
599 600
        cpu_disable_ticks();
        pause_all_vcpus();
601
        runstate_set(state);
602
        vm_state_notify(0, state);
W
Wenchao Xia 已提交
603
        qapi_event_send_stop(&error_abort);
604
    }
605

606 607 608
    bdrv_drain_all();
    ret = bdrv_flush_all();

609
    return ret;
610 611
}

612
static bool cpu_can_run(CPUState *cpu)
613
{
A
Andreas Färber 已提交
614
    if (cpu->stop) {
615
        return false;
616
    }
617
    if (cpu_is_stopped(cpu)) {
618
        return false;
619
    }
620
    return true;
621 622
}

623
static void cpu_handle_guest_debug(CPUState *cpu)
624
{
625
    gdb_set_stop_cpu(cpu);
626
    qemu_system_debug_request();
627
    cpu->stopped = true;
628 629
}

630 631
static void cpu_signal(int sig)
{
632 633
    if (current_cpu) {
        cpu_exit(current_cpu);
634 635 636 637
    }
    exit_request = 1;
}

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
#ifdef CONFIG_LINUX
static void sigbus_reraise(void)
{
    sigset_t set;
    struct sigaction action;

    memset(&action, 0, sizeof(action));
    action.sa_handler = SIG_DFL;
    if (!sigaction(SIGBUS, &action, NULL)) {
        raise(SIGBUS);
        sigemptyset(&set);
        sigaddset(&set, SIGBUS);
        sigprocmask(SIG_UNBLOCK, &set, NULL);
    }
    perror("Failed to re-raise SIGBUS!\n");
    abort();
}

static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
                           void *ctx)
{
    if (kvm_on_sigbus(siginfo->ssi_code,
                      (void *)(intptr_t)siginfo->ssi_addr)) {
        sigbus_reraise();
    }
}

static void qemu_init_sigbus(void)
{
    struct sigaction action;

    memset(&action, 0, sizeof(action));
    action.sa_flags = SA_SIGINFO;
    action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
    sigaction(SIGBUS, &action, NULL);

    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
}

677
static void qemu_kvm_eat_signals(CPUState *cpu)
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
{
    struct timespec ts = { 0, 0 };
    siginfo_t siginfo;
    sigset_t waitset;
    sigset_t chkset;
    int r;

    sigemptyset(&waitset);
    sigaddset(&waitset, SIG_IPI);
    sigaddset(&waitset, SIGBUS);

    do {
        r = sigtimedwait(&waitset, &siginfo, &ts);
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
            perror("sigtimedwait");
            exit(1);
        }

        switch (r) {
        case SIGBUS:
698
            if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
                sigbus_reraise();
            }
            break;
        default:
            break;
        }

        r = sigpending(&chkset);
        if (r == -1) {
            perror("sigpending");
            exit(1);
        }
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
}

714 715 716 717 718
#else /* !CONFIG_LINUX */

static void qemu_init_sigbus(void)
{
}
719

720
static void qemu_kvm_eat_signals(CPUState *cpu)
721 722
{
}
723 724
#endif /* !CONFIG_LINUX */

725
#ifndef _WIN32
726 727 728 729
static void dummy_signal(int sig)
{
}

730
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
731 732 733 734 735 736 737 738 739 740 741 742
{
    int r;
    sigset_t set;
    struct sigaction sigact;

    memset(&sigact, 0, sizeof(sigact));
    sigact.sa_handler = dummy_signal;
    sigaction(SIG_IPI, &sigact, NULL);

    pthread_sigmask(SIG_BLOCK, NULL, &set);
    sigdelset(&set, SIG_IPI);
    sigdelset(&set, SIGBUS);
743
    r = kvm_set_signal_mask(cpu, &set);
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
    if (r) {
        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
        exit(1);
    }
}

static void qemu_tcg_init_cpu_signals(void)
{
    sigset_t set;
    struct sigaction sigact;

    memset(&sigact, 0, sizeof(sigact));
    sigact.sa_handler = cpu_signal;
    sigaction(SIG_IPI, &sigact, NULL);

    sigemptyset(&set);
    sigaddset(&set, SIG_IPI);
    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}

764
#else /* _WIN32 */
765
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
766
{
767 768
    abort();
}
769

770 771
static void qemu_tcg_init_cpu_signals(void)
{
772
}
773
#endif /* _WIN32 */
774

775
static QemuMutex qemu_global_mutex;
776 777
static QemuCond qemu_io_proceeded_cond;
static bool iothread_requesting_mutex;
778 779 780 781 782 783 784 785 786 787

static QemuThread io_thread;

static QemuThread *tcg_cpu_thread;
static QemuCond *tcg_halt_cond;

/* cpu creation */
static QemuCond qemu_cpu_cond;
/* system init */
static QemuCond qemu_pause_cond;
M
Marcelo Tosatti 已提交
788
static QemuCond qemu_work_cond;
789

P
Paolo Bonzini 已提交
790
void qemu_init_cpu_loop(void)
791
{
792
    qemu_init_sigbus();
793 794 795
    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
796
    qemu_cond_init(&qemu_io_proceeded_cond);
797 798
    qemu_mutex_init(&qemu_global_mutex);

J
Jan Kiszka 已提交
799
    qemu_thread_get_self(&io_thread);
800 801
}

802
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
M
Marcelo Tosatti 已提交
803 804 805
{
    struct qemu_work_item wi;

806
    if (qemu_cpu_is_self(cpu)) {
M
Marcelo Tosatti 已提交
807 808 809 810 811 812
        func(data);
        return;
    }

    wi.func = func;
    wi.data = data;
C
Chegu Vinod 已提交
813
    wi.free = false;
814 815
    if (cpu->queued_work_first == NULL) {
        cpu->queued_work_first = &wi;
816
    } else {
817
        cpu->queued_work_last->next = &wi;
818
    }
819
    cpu->queued_work_last = &wi;
M
Marcelo Tosatti 已提交
820 821 822
    wi.next = NULL;
    wi.done = false;

823
    qemu_cpu_kick(cpu);
M
Marcelo Tosatti 已提交
824
    while (!wi.done) {
825
        CPUState *self_cpu = current_cpu;
M
Marcelo Tosatti 已提交
826 827

        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
828
        current_cpu = self_cpu;
M
Marcelo Tosatti 已提交
829 830 831
    }
}

C
Chegu Vinod 已提交
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
{
    struct qemu_work_item *wi;

    if (qemu_cpu_is_self(cpu)) {
        func(data);
        return;
    }

    wi = g_malloc0(sizeof(struct qemu_work_item));
    wi->func = func;
    wi->data = data;
    wi->free = true;
    if (cpu->queued_work_first == NULL) {
        cpu->queued_work_first = wi;
    } else {
        cpu->queued_work_last->next = wi;
    }
    cpu->queued_work_last = wi;
    wi->next = NULL;
    wi->done = false;

    qemu_cpu_kick(cpu);
}

857
static void flush_queued_work(CPUState *cpu)
M
Marcelo Tosatti 已提交
858 859 860
{
    struct qemu_work_item *wi;

861
    if (cpu->queued_work_first == NULL) {
M
Marcelo Tosatti 已提交
862
        return;
863
    }
M
Marcelo Tosatti 已提交
864

865 866
    while ((wi = cpu->queued_work_first)) {
        cpu->queued_work_first = wi->next;
M
Marcelo Tosatti 已提交
867 868
        wi->func(wi->data);
        wi->done = true;
C
Chegu Vinod 已提交
869 870 871
        if (wi->free) {
            g_free(wi);
        }
M
Marcelo Tosatti 已提交
872
    }
873
    cpu->queued_work_last = NULL;
M
Marcelo Tosatti 已提交
874 875 876
    qemu_cond_broadcast(&qemu_work_cond);
}

877
static void qemu_wait_io_event_common(CPUState *cpu)
878
{
A
Andreas Färber 已提交
879 880
    if (cpu->stop) {
        cpu->stop = false;
881
        cpu->stopped = true;
882 883
        qemu_cond_signal(&qemu_pause_cond);
    }
884
    flush_queued_work(cpu);
885
    cpu->thread_kicked = false;
886 887
}

888
static void qemu_tcg_wait_io_event(void)
889
{
890
    CPUState *cpu;
891

892
    while (all_cpu_threads_idle()) {
893 894
       /* Start accounting real time to the virtual clock if the CPUs
          are idle.  */
895
        qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
896
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
897
    }
898

899 900 901
    while (iothread_requesting_mutex) {
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
    }
902

A
Andreas Färber 已提交
903
    CPU_FOREACH(cpu) {
904
        qemu_wait_io_event_common(cpu);
905
    }
906 907
}

908
static void qemu_kvm_wait_io_event(CPUState *cpu)
909
{
910
    while (cpu_thread_is_idle(cpu)) {
A
Andreas Färber 已提交
911
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
912
    }
913

914
    qemu_kvm_eat_signals(cpu);
915
    qemu_wait_io_event_common(cpu);
916 917
}

918
static void *qemu_kvm_cpu_thread_fn(void *arg)
919
{
920
    CPUState *cpu = arg;
J
Jan Kiszka 已提交
921
    int r;
922

923
    qemu_mutex_lock(&qemu_global_mutex);
924
    qemu_thread_get_self(cpu->thread);
A
Andreas Färber 已提交
925
    cpu->thread_id = qemu_get_thread_id();
926
    cpu->can_do_io = 1;
927
    current_cpu = cpu;
928

929
    r = kvm_init_vcpu(cpu);
J
Jan Kiszka 已提交
930 931 932 933
    if (r < 0) {
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
        exit(1);
    }
934

935
    qemu_kvm_init_cpu_signals(cpu);
936 937

    /* signal CPU creation */
938
    cpu->created = true;
939 940 941
    qemu_cond_signal(&qemu_cpu_cond);

    while (1) {
942
        if (cpu_can_run(cpu)) {
943
            r = kvm_cpu_exec(cpu);
944
            if (r == EXCP_DEBUG) {
945
                cpu_handle_guest_debug(cpu);
946
            }
947
        }
948
        qemu_kvm_wait_io_event(cpu);
949 950 951 952 953
    }

    return NULL;
}

A
Anthony Liguori 已提交
954 955 956 957 958 959
static void *qemu_dummy_cpu_thread_fn(void *arg)
{
#ifdef _WIN32
    fprintf(stderr, "qtest is not supported under Windows\n");
    exit(1);
#else
960
    CPUState *cpu = arg;
A
Anthony Liguori 已提交
961 962 963 964
    sigset_t waitset;
    int r;

    qemu_mutex_lock_iothread();
965
    qemu_thread_get_self(cpu->thread);
A
Andreas Färber 已提交
966
    cpu->thread_id = qemu_get_thread_id();
967
    cpu->can_do_io = 1;
A
Anthony Liguori 已提交
968 969 970 971 972

    sigemptyset(&waitset);
    sigaddset(&waitset, SIG_IPI);

    /* signal CPU creation */
973
    cpu->created = true;
A
Anthony Liguori 已提交
974 975
    qemu_cond_signal(&qemu_cpu_cond);

976
    current_cpu = cpu;
A
Anthony Liguori 已提交
977
    while (1) {
978
        current_cpu = NULL;
A
Anthony Liguori 已提交
979 980 981 982 983 984 985 986 987 988
        qemu_mutex_unlock_iothread();
        do {
            int sig;
            r = sigwait(&waitset, &sig);
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
        if (r == -1) {
            perror("sigwait");
            exit(1);
        }
        qemu_mutex_lock_iothread();
989
        current_cpu = cpu;
990
        qemu_wait_io_event_common(cpu);
A
Anthony Liguori 已提交
991 992 993 994 995 996
    }

    return NULL;
#endif
}

J
Jan Kiszka 已提交
997 998
static void tcg_exec_all(void);

999
static void *qemu_tcg_cpu_thread_fn(void *arg)
1000
{
1001
    CPUState *cpu = arg;
1002

1003
    qemu_tcg_init_cpu_signals();
1004
    qemu_thread_get_self(cpu->thread);
1005 1006

    qemu_mutex_lock(&qemu_global_mutex);
1007 1008 1009
    CPU_FOREACH(cpu) {
        cpu->thread_id = qemu_get_thread_id();
        cpu->created = true;
1010
        cpu->can_do_io = 1;
1011
    }
1012 1013
    qemu_cond_signal(&qemu_cpu_cond);

1014
    /* wait for initial kick-off after machine start */
A
Andreas Färber 已提交
1015
    while (QTAILQ_FIRST(&cpus)->stopped) {
1016
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
1017 1018

        /* process any pending work */
A
Andreas Färber 已提交
1019
        CPU_FOREACH(cpu) {
1020
            qemu_wait_io_event_common(cpu);
1021
        }
1022
    }
1023 1024

    while (1) {
J
Jan Kiszka 已提交
1025
        tcg_exec_all();
1026 1027

        if (use_icount) {
1028
            int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1029 1030

            if (deadline == 0) {
1031
                qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1032
            }
1033
        }
1034
        qemu_tcg_wait_io_event();
1035 1036 1037 1038 1039
    }

    return NULL;
}

1040
static void qemu_cpu_kick_thread(CPUState *cpu)
P
Paolo Bonzini 已提交
1041 1042 1043 1044
{
#ifndef _WIN32
    int err;

1045
    err = pthread_kill(cpu->thread->thread, SIG_IPI);
P
Paolo Bonzini 已提交
1046 1047 1048 1049 1050
    if (err) {
        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
        exit(1);
    }
#else /* _WIN32 */
1051
    if (!qemu_cpu_is_self(cpu)) {
1052 1053 1054
        CONTEXT tcgContext;

        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
1055
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
                    GetLastError());
            exit(1);
        }

        /* On multi-core systems, we are not sure that the thread is actually
         * suspended until we can get the context.
         */
        tcgContext.ContextFlags = CONTEXT_CONTROL;
        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
            continue;
        }

P
Paolo Bonzini 已提交
1068
        cpu_signal(0);
1069 1070

        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1071
            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1072 1073 1074
                    GetLastError());
            exit(1);
        }
P
Paolo Bonzini 已提交
1075 1076 1077 1078
    }
#endif
}

1079
void qemu_cpu_kick(CPUState *cpu)
1080
{
A
Andreas Färber 已提交
1081
    qemu_cond_broadcast(cpu->halt_cond);
1082
    if (!tcg_enabled() && !cpu->thread_kicked) {
1083
        qemu_cpu_kick_thread(cpu);
1084
        cpu->thread_kicked = true;
1085
    }
1086 1087
}

1088
void qemu_cpu_kick_self(void)
1089
{
1090
#ifndef _WIN32
1091
    assert(current_cpu);
1092

1093 1094 1095
    if (!current_cpu->thread_kicked) {
        qemu_cpu_kick_thread(current_cpu);
        current_cpu->thread_kicked = true;
1096
    }
1097 1098 1099
#else
    abort();
#endif
1100 1101
}

1102
bool qemu_cpu_is_self(CPUState *cpu)
1103
{
1104
    return qemu_thread_is_self(cpu->thread);
1105 1106
}

J
Juan Quintela 已提交
1107 1108
static bool qemu_in_vcpu_thread(void)
{
1109
    return current_cpu && qemu_cpu_is_self(current_cpu);
J
Juan Quintela 已提交
1110 1111
}

1112 1113
void qemu_mutex_lock_iothread(void)
{
A
Anthony Liguori 已提交
1114
    if (!tcg_enabled()) {
1115
        qemu_mutex_lock(&qemu_global_mutex);
1116
    } else {
1117
        iothread_requesting_mutex = true;
1118
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
1119
            qemu_cpu_kick_thread(first_cpu);
1120 1121
            qemu_mutex_lock(&qemu_global_mutex);
        }
1122 1123
        iothread_requesting_mutex = false;
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
1124
    }
1125 1126 1127 1128 1129 1130 1131 1132 1133
}

void qemu_mutex_unlock_iothread(void)
{
    qemu_mutex_unlock(&qemu_global_mutex);
}

static int all_vcpus_paused(void)
{
A
Andreas Färber 已提交
1134
    CPUState *cpu;
1135

A
Andreas Färber 已提交
1136
    CPU_FOREACH(cpu) {
1137
        if (!cpu->stopped) {
1138
            return 0;
1139
        }
1140 1141 1142 1143 1144 1145 1146
    }

    return 1;
}

void pause_all_vcpus(void)
{
A
Andreas Färber 已提交
1147
    CPUState *cpu;
1148

1149
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
A
Andreas Färber 已提交
1150
    CPU_FOREACH(cpu) {
1151 1152
        cpu->stop = true;
        qemu_cpu_kick(cpu);
1153 1154
    }

J
Juan Quintela 已提交
1155
    if (qemu_in_vcpu_thread()) {
1156 1157
        cpu_stop_current();
        if (!kvm_enabled()) {
A
Andreas Färber 已提交
1158
            CPU_FOREACH(cpu) {
1159 1160
                cpu->stop = false;
                cpu->stopped = true;
1161 1162 1163 1164 1165
            }
            return;
        }
    }

1166
    while (!all_vcpus_paused()) {
1167
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
A
Andreas Färber 已提交
1168
        CPU_FOREACH(cpu) {
1169
            qemu_cpu_kick(cpu);
1170 1171 1172 1173
        }
    }
}

1174 1175 1176 1177 1178 1179 1180
void cpu_resume(CPUState *cpu)
{
    cpu->stop = false;
    cpu->stopped = false;
    qemu_cpu_kick(cpu);
}

1181 1182
void resume_all_vcpus(void)
{
A
Andreas Färber 已提交
1183
    CPUState *cpu;
1184

1185
    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
A
Andreas Färber 已提交
1186
    CPU_FOREACH(cpu) {
1187
        cpu_resume(cpu);
1188 1189 1190
    }
}

1191 1192 1193
/* For temporary buffers for forming a name */
#define VCPU_THREAD_NAME_SIZE 16

1194
static void qemu_tcg_init_vcpu(CPUState *cpu)
1195
{
1196 1197
    char thread_name[VCPU_THREAD_NAME_SIZE];

1198 1199
    tcg_cpu_address_space_init(cpu, cpu->as);

1200 1201
    /* share a single thread for all cpus with TCG */
    if (!tcg_cpu_thread) {
1202
        cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1203 1204 1205
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
        qemu_cond_init(cpu->halt_cond);
        tcg_halt_cond = cpu->halt_cond;
1206 1207 1208 1209
        snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
                 cpu->cpu_index);
        qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
                           cpu, QEMU_THREAD_JOINABLE);
P
Paolo Bonzini 已提交
1210
#ifdef _WIN32
1211
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
P
Paolo Bonzini 已提交
1212
#endif
1213
        while (!cpu->created) {
1214
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1215
        }
1216
        tcg_cpu_thread = cpu->thread;
1217
    } else {
1218
        cpu->thread = tcg_cpu_thread;
A
Andreas Färber 已提交
1219
        cpu->halt_cond = tcg_halt_cond;
1220 1221 1222
    }
}

1223
static void qemu_kvm_start_vcpu(CPUState *cpu)
1224
{
1225 1226
    char thread_name[VCPU_THREAD_NAME_SIZE];

1227
    cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1228 1229
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(cpu->halt_cond);
1230 1231 1232 1233
    snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
             cpu->cpu_index);
    qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
                       cpu, QEMU_THREAD_JOINABLE);
1234
    while (!cpu->created) {
1235
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1236
    }
1237 1238
}

1239
static void qemu_dummy_start_vcpu(CPUState *cpu)
A
Anthony Liguori 已提交
1240
{
1241 1242
    char thread_name[VCPU_THREAD_NAME_SIZE];

1243
    cpu->thread = g_malloc0(sizeof(QemuThread));
A
Andreas Färber 已提交
1244 1245
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(cpu->halt_cond);
1246 1247 1248
    snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
             cpu->cpu_index);
    qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
A
Anthony Liguori 已提交
1249
                       QEMU_THREAD_JOINABLE);
1250
    while (!cpu->created) {
A
Anthony Liguori 已提交
1251 1252 1253 1254
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    }
}

1255
void qemu_init_vcpu(CPUState *cpu)
1256
{
1257 1258
    cpu->nr_cores = smp_cores;
    cpu->nr_threads = smp_threads;
1259
    cpu->stopped = true;
1260
    if (kvm_enabled()) {
1261
        qemu_kvm_start_vcpu(cpu);
A
Anthony Liguori 已提交
1262
    } else if (tcg_enabled()) {
1263
        qemu_tcg_init_vcpu(cpu);
A
Anthony Liguori 已提交
1264
    } else {
1265
        qemu_dummy_start_vcpu(cpu);
1266
    }
1267 1268
}

1269
void cpu_stop_current(void)
1270
{
1271 1272 1273 1274
    if (current_cpu) {
        current_cpu->stop = false;
        current_cpu->stopped = true;
        cpu_exit(current_cpu);
1275
        qemu_cond_signal(&qemu_pause_cond);
1276
    }
1277 1278
}

1279
int vm_stop(RunState state)
1280
{
J
Juan Quintela 已提交
1281
    if (qemu_in_vcpu_thread()) {
1282
        qemu_system_vmstop_request_prepare();
1283
        qemu_system_vmstop_request(state);
1284 1285 1286 1287
        /*
         * FIXME: should not return to device code in case
         * vm_stop() has been requested.
         */
1288
        cpu_stop_current();
1289
        return 0;
1290
    }
1291 1292

    return do_vm_stop(state);
1293 1294
}

1295 1296
/* does a state transition even if the VM is already stopped,
   current state is forgotten forever */
1297
int vm_stop_force_state(RunState state)
1298 1299
{
    if (runstate_is_running()) {
1300
        return vm_stop(state);
1301 1302
    } else {
        runstate_set(state);
1303 1304 1305
        /* Make sure to return an error if the flush in a previous vm_stop()
         * failed. */
        return bdrv_flush_all();
1306 1307 1308
    }
}

1309
static int tcg_cpu_exec(CPUArchState *env)
1310
{
1311
    CPUState *cpu = ENV_GET_CPU(env);
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
    int ret;
#ifdef CONFIG_PROFILER
    int64_t ti;
#endif

#ifdef CONFIG_PROFILER
    ti = profile_getclock();
#endif
    if (use_icount) {
        int64_t count;
1322
        int64_t deadline;
1323
        int decr;
1324 1325
        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
                                    + cpu->icount_extra);
1326
        cpu->icount_decr.u16.low = 0;
1327
        cpu->icount_extra = 0;
1328
        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1329 1330

        /* Maintain prior (possibly buggy) behaviour where if no deadline
1331
         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1332 1333 1334 1335 1336 1337 1338 1339
         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
         * nanoseconds.
         */
        if ((deadline < 0) || (deadline > INT32_MAX)) {
            deadline = INT32_MAX;
        }

        count = qemu_icount_round(deadline);
1340
        timers_state.qemu_icount += count;
1341 1342
        decr = (count > 0xffff) ? 0xffff : count;
        count -= decr;
1343
        cpu->icount_decr.u16.low = decr;
1344
        cpu->icount_extra = count;
1345 1346 1347 1348 1349 1350 1351 1352
    }
    ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
    qemu_time += profile_getclock() - ti;
#endif
    if (use_icount) {
        /* Fold pending instructions back into the
           instruction counter, and clear the interrupt flag.  */
1353 1354
        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
                        + cpu->icount_extra);
1355
        cpu->icount_decr.u32 = 0;
1356
        cpu->icount_extra = 0;
1357 1358 1359 1360
    }
    return ret;
}

J
Jan Kiszka 已提交
1361
static void tcg_exec_all(void)
1362
{
1363 1364
    int r;

1365 1366
    /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
    qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1367

1368
    if (next_cpu == NULL) {
1369
        next_cpu = first_cpu;
1370
    }
A
Andreas Färber 已提交
1371
    for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1372 1373
        CPUState *cpu = next_cpu;
        CPUArchState *env = cpu->env_ptr;
1374

1375
        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1376
                          (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1377

1378
        if (cpu_can_run(cpu)) {
J
Jan Kiszka 已提交
1379
            r = tcg_cpu_exec(env);
1380
            if (r == EXCP_DEBUG) {
1381
                cpu_handle_guest_debug(cpu);
1382 1383
                break;
            }
1384
        } else if (cpu->stop || cpu->stopped) {
1385 1386 1387
            break;
        }
    }
J
Jan Kiszka 已提交
1388
    exit_request = 0;
1389 1390
}

1391
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1392 1393
{
    /* XXX: implement xxx_cpu_list for targets that still miss it */
P
Peter Maydell 已提交
1394 1395
#if defined(cpu_list)
    cpu_list(f, cpu_fprintf);
1396 1397
#endif
}
L
Luiz Capitulino 已提交
1398 1399 1400 1401

CpuInfoList *qmp_query_cpus(Error **errp)
{
    CpuInfoList *head = NULL, *cur_item = NULL;
1402
    CPUState *cpu;
L
Luiz Capitulino 已提交
1403

A
Andreas Färber 已提交
1404
    CPU_FOREACH(cpu) {
L
Luiz Capitulino 已提交
1405
        CpuInfoList *info;
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
#if defined(TARGET_I386)
        X86CPU *x86_cpu = X86_CPU(cpu);
        CPUX86State *env = &x86_cpu->env;
#elif defined(TARGET_PPC)
        PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
        CPUPPCState *env = &ppc_cpu->env;
#elif defined(TARGET_SPARC)
        SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
        CPUSPARCState *env = &sparc_cpu->env;
#elif defined(TARGET_MIPS)
        MIPSCPU *mips_cpu = MIPS_CPU(cpu);
        CPUMIPSState *env = &mips_cpu->env;
1418 1419 1420
#elif defined(TARGET_TRICORE)
        TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
        CPUTriCoreState *env = &tricore_cpu->env;
1421
#endif
L
Luiz Capitulino 已提交
1422

1423
        cpu_synchronize_state(cpu);
L
Luiz Capitulino 已提交
1424 1425 1426

        info = g_malloc0(sizeof(*info));
        info->value = g_malloc0(sizeof(*info->value));
1427
        info->value->CPU = cpu->cpu_index;
1428
        info->value->current = (cpu == first_cpu);
1429
        info->value->halted = cpu->halted;
A
Andreas Färber 已提交
1430
        info->value->thread_id = cpu->thread_id;
L
Luiz Capitulino 已提交
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
#if defined(TARGET_I386)
        info->value->has_pc = true;
        info->value->pc = env->eip + env->segs[R_CS].base;
#elif defined(TARGET_PPC)
        info->value->has_nip = true;
        info->value->nip = env->nip;
#elif defined(TARGET_SPARC)
        info->value->has_pc = true;
        info->value->pc = env->pc;
        info->value->has_npc = true;
        info->value->npc = env->npc;
#elif defined(TARGET_MIPS)
        info->value->has_PC = true;
        info->value->PC = env->active_tc.PC;
1445 1446 1447
#elif defined(TARGET_TRICORE)
        info->value->has_PC = true;
        info->value->PC = env->PC;
L
Luiz Capitulino 已提交
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
#endif

        /* XXX: waiting for the qapi to support GSList */
        if (!cur_item) {
            head = cur_item = info;
        } else {
            cur_item->next = info;
            cur_item = info;
        }
    }

    return head;
}
L
Luiz Capitulino 已提交
1461 1462 1463 1464 1465 1466

void qmp_memsave(int64_t addr, int64_t size, const char *filename,
                 bool has_cpu, int64_t cpu_index, Error **errp)
{
    FILE *f;
    uint32_t l;
1467
    CPUState *cpu;
L
Luiz Capitulino 已提交
1468 1469 1470 1471 1472 1473
    uint8_t buf[1024];

    if (!has_cpu) {
        cpu_index = 0;
    }

1474 1475
    cpu = qemu_get_cpu(cpu_index);
    if (cpu == NULL) {
L
Luiz Capitulino 已提交
1476 1477 1478 1479 1480 1481 1482
        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
                  "a CPU number");
        return;
    }

    f = fopen(filename, "wb");
    if (!f) {
1483
        error_setg_file_open(errp, errno, filename);
L
Luiz Capitulino 已提交
1484 1485 1486 1487 1488 1489 1490
        return;
    }

    while (size != 0) {
        l = sizeof(buf);
        if (l > size)
            l = size;
1491 1492 1493 1494
        if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
            error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
            goto exit;
        }
L
Luiz Capitulino 已提交
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
        if (fwrite(buf, 1, l, f) != l) {
            error_set(errp, QERR_IO_ERROR);
            goto exit;
        }
        addr += l;
        size -= l;
    }

exit:
    fclose(f);
}
L
Luiz Capitulino 已提交
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515

void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
                  Error **errp)
{
    FILE *f;
    uint32_t l;
    uint8_t buf[1024];

    f = fopen(filename, "wb");
    if (!f) {
1516
        error_setg_file_open(errp, errno, filename);
L
Luiz Capitulino 已提交
1517 1518 1519 1520 1521 1522 1523
        return;
    }

    while (size != 0) {
        l = sizeof(buf);
        if (l > size)
            l = size;
1524
        cpu_physical_memory_read(addr, buf, l);
L
Luiz Capitulino 已提交
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
        if (fwrite(buf, 1, l, f) != l) {
            error_set(errp, QERR_IO_ERROR);
            goto exit;
        }
        addr += l;
        size -= l;
    }

exit:
    fclose(f);
}
L
Luiz Capitulino 已提交
1536 1537 1538 1539

void qmp_inject_nmi(Error **errp)
{
#if defined(TARGET_I386)
1540 1541
    CPUState *cs;

A
Andreas Färber 已提交
1542
    CPU_FOREACH(cs) {
1543
        X86CPU *cpu = X86_CPU(cs);
L
Luiz Capitulino 已提交
1544

1545
        if (!cpu->apic_state) {
1546
            cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1547
        } else {
1548
            apic_deliver_nmi(cpu->apic_state);
1549
        }
L
Luiz Capitulino 已提交
1550 1551
    }
#else
1552
    nmi_monitor_handle(monitor_get_cpu_index(), errp);
L
Luiz Capitulino 已提交
1553 1554
#endif
}
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571

void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
{
    if (!use_icount) {
        return;
    }

    cpu_fprintf(f, "Host - Guest clock  %"PRIi64" ms\n",
                (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
    if (icount_align_option) {
        cpu_fprintf(f, "Max guest delay     %"PRIi64" ms\n", -max_delay/SCALE_MS);
        cpu_fprintf(f, "Max guest advance   %"PRIi64" ms\n", max_advance/SCALE_MS);
    } else {
        cpu_fprintf(f, "Max guest delay     NA\n");
        cpu_fprintf(f, "Max guest advance   NA\n");
    }
}