kvm-all.c 39.4 KB
Newer Older
A
aliguori 已提交
1 2 3 4
/*
 * QEMU KVM support
 *
 * Copyright IBM, Corp. 2008
5
 *           Red Hat, Inc. 2008
A
aliguori 已提交
6 7 8
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
9
 *  Glauber Costa     <gcosta@redhat.com>
A
aliguori 已提交
10 11 12 13 14 15 16 17 18
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 *
 */

#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
19
#include <stdarg.h>
A
aliguori 已提交
20 21 22 23

#include <linux/kvm.h>

#include "qemu-common.h"
24
#include "qemu-barrier.h"
A
aliguori 已提交
25
#include "sysemu.h"
J
Jan Kiszka 已提交
26
#include "hw/hw.h"
27
#include "gdbstub.h"
A
aliguori 已提交
28
#include "kvm.h"
29
#include "bswap.h"
A
Avi Kivity 已提交
30
#include "memory.h"
A
aliguori 已提交
31

32 33 34 35 36
/* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD
#include <sys/eventfd.h>
#endif

A
aliguori 已提交
37 38 39
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
#define PAGE_SIZE TARGET_PAGE_SIZE

A
aliguori 已提交
40 41 42
//#define DEBUG_KVM

#ifdef DEBUG_KVM
43
#define DPRINTF(fmt, ...) \
A
aliguori 已提交
44 45
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#else
46
#define DPRINTF(fmt, ...) \
A
aliguori 已提交
47 48 49
    do { } while (0)
#endif

A
aliguori 已提交
50 51
typedef struct KVMSlot
{
A
Anthony Liguori 已提交
52 53
    target_phys_addr_t start_addr;
    ram_addr_t memory_size;
54
    void *ram;
A
aliguori 已提交
55 56 57
    int slot;
    int flags;
} KVMSlot;
A
aliguori 已提交
58

59 60
typedef struct kvm_dirty_log KVMDirtyLog;

A
aliguori 已提交
61 62 63 64 65
struct KVMState
{
    KVMSlot slots[32];
    int fd;
    int vmfd;
A
aliguori 已提交
66
    int coalesced_mmio;
67
    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
68
    bool coalesced_flush_in_progress;
69
    int broken_set_mem_region;
70
    int migration_log;
71
    int vcpu_events;
72
    int robust_singlestep;
73
    int debugregs;
74 75 76
#ifdef KVM_CAP_SET_GUEST_DEBUG
    struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
#endif
77 78
    int irqchip_in_kernel;
    int pit_in_kernel;
79
    int xsave, xcrs;
80
    int many_ioeventfds;
81 82 83 84 85 86 87
    int irqchip_inject_ioctl;
#ifdef KVM_CAP_IRQ_ROUTING
    struct kvm_irq_routing *irq_routes;
    int nr_allocated_irq_routes;
    uint32_t *used_gsi_bitmap;
    unsigned int max_gsi;
#endif
A
aliguori 已提交
88 89
};

90
KVMState *kvm_state;
A
aliguori 已提交
91

92 93 94 95 96 97
static const KVMCapabilityInfo kvm_required_capabilites[] = {
    KVM_CAP_INFO(USER_MEMORY),
    KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
    KVM_CAP_LAST_INFO
};

A
aliguori 已提交
98 99 100 101 102
static KVMSlot *kvm_alloc_slot(KVMState *s)
{
    int i;

    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
J
Jan Kiszka 已提交
103
        if (s->slots[i].memory_size == 0) {
A
aliguori 已提交
104
            return &s->slots[i];
J
Jan Kiszka 已提交
105
        }
A
aliguori 已提交
106 107
    }

108 109 110 111 112
    fprintf(stderr, "%s: no free slot available\n", __func__);
    abort();
}

static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
A
Anthony Liguori 已提交
113 114
                                         target_phys_addr_t start_addr,
                                         target_phys_addr_t end_addr)
115 116 117 118 119 120 121 122 123 124 125 126
{
    int i;

    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
        KVMSlot *mem = &s->slots[i];

        if (start_addr == mem->start_addr &&
            end_addr == mem->start_addr + mem->memory_size) {
            return mem;
        }
    }

A
aliguori 已提交
127 128 129
    return NULL;
}

130 131 132 133
/*
 * Find overlapping slot with lowest start address
 */
static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
A
Anthony Liguori 已提交
134 135
                                            target_phys_addr_t start_addr,
                                            target_phys_addr_t end_addr)
A
aliguori 已提交
136
{
137
    KVMSlot *found = NULL;
A
aliguori 已提交
138 139 140 141 142
    int i;

    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
        KVMSlot *mem = &s->slots[i];

143 144 145 146 147 148 149 150 151
        if (mem->memory_size == 0 ||
            (found && found->start_addr < mem->start_addr)) {
            continue;
        }

        if (end_addr > mem->start_addr &&
            start_addr < mem->start_addr + mem->memory_size) {
            found = mem;
        }
A
aliguori 已提交
152 153
    }

154
    return found;
A
aliguori 已提交
155 156
}

157 158
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
                                       target_phys_addr_t *phys_addr)
159 160 161 162 163 164
{
    int i;

    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
        KVMSlot *mem = &s->slots[i];

165 166
        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
            *phys_addr = mem->start_addr + (ram - mem->ram);
167 168 169 170 171 172 173
            return 1;
        }
    }

    return 0;
}

174 175 176 177 178 179 180
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
{
    struct kvm_userspace_memory_region mem;

    mem.slot = slot->slot;
    mem.guest_phys_addr = slot->start_addr;
    mem.memory_size = slot->memory_size;
181
    mem.userspace_addr = (unsigned long)slot->ram;
182
    mem.flags = slot->flags;
183 184 185
    if (s->migration_log) {
        mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
    }
186 187 188
    return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
}

J
Jan Kiszka 已提交
189 190 191 192
static void kvm_reset_vcpu(void *opaque)
{
    CPUState *env = opaque;

J
Jan Kiszka 已提交
193
    kvm_arch_reset_vcpu(env);
J
Jan Kiszka 已提交
194
}
195

196 197 198 199 200 201 202 203 204 205
int kvm_irqchip_in_kernel(void)
{
    return kvm_state->irqchip_in_kernel;
}

int kvm_pit_in_kernel(void)
{
    return kvm_state->pit_in_kernel;
}

A
aliguori 已提交
206 207 208 209 210 211
int kvm_init_vcpu(CPUState *env)
{
    KVMState *s = kvm_state;
    long mmap_size;
    int ret;

212
    DPRINTF("kvm_init_vcpu\n");
A
aliguori 已提交
213

214
    ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
A
aliguori 已提交
215
    if (ret < 0) {
216
        DPRINTF("kvm_create_vcpu failed\n");
A
aliguori 已提交
217 218 219 220 221
        goto err;
    }

    env->kvm_fd = ret;
    env->kvm_state = s;
222
    env->kvm_vcpu_dirty = 1;
A
aliguori 已提交
223 224 225

    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
    if (mmap_size < 0) {
226
        ret = mmap_size;
227
        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
A
aliguori 已提交
228 229 230 231 232 233 234
        goto err;
    }

    env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
                        env->kvm_fd, 0);
    if (env->kvm_run == MAP_FAILED) {
        ret = -errno;
235
        DPRINTF("mmap'ing vcpu state failed\n");
A
aliguori 已提交
236 237 238
        goto err;
    }

J
Jan Kiszka 已提交
239 240 241 242
    if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
        s->coalesced_mmio_ring =
            (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
    }
243

A
aliguori 已提交
244
    ret = kvm_arch_init_vcpu(env);
J
Jan Kiszka 已提交
245
    if (ret == 0) {
246
        qemu_register_reset(kvm_reset_vcpu, env);
J
Jan Kiszka 已提交
247
        kvm_arch_reset_vcpu(env);
J
Jan Kiszka 已提交
248
    }
A
aliguori 已提交
249 250 251 252
err:
    return ret;
}

253 254 255
/*
 * dirty pages logging control
 */
256 257 258 259 260 261 262

static int kvm_mem_flags(KVMState *s, bool log_dirty)
{
    return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
}

static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
263 264
{
    KVMState *s = kvm_state;
265
    int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
266 267 268
    int old_flags;

    old_flags = mem->flags;
269

270
    flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
271 272
    mem->flags = flags;

273 274 275 276
    /* If nothing changed effectively, no need to issue ioctl */
    if (s->migration_log) {
        flags |= KVM_MEM_LOG_DIRTY_PAGES;
    }
277

278
    if (flags == old_flags) {
279
        return 0;
280 281
    }

282 283 284
    return kvm_set_user_memory_region(s, mem);
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
                                      ram_addr_t size, bool log_dirty)
{
    KVMState *s = kvm_state;
    KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);

    if (mem == NULL)  {
        fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
                TARGET_FMT_plx "\n", __func__, phys_addr,
                (target_phys_addr_t)(phys_addr + size - 1));
        return -EINVAL;
    }
    return kvm_slot_dirty_pages_log_change(mem, log_dirty);
}

A
Avi Kivity 已提交
300 301
static void kvm_log_start(MemoryListener *listener,
                          MemoryRegionSection *section)
302
{
A
Avi Kivity 已提交
303 304 305 306 307 308 309
    int r;

    r = kvm_dirty_pages_log_change(section->offset_within_address_space,
                                   section->size, true);
    if (r < 0) {
        abort();
    }
310 311
}

A
Avi Kivity 已提交
312 313
static void kvm_log_stop(MemoryListener *listener,
                          MemoryRegionSection *section)
314
{
A
Avi Kivity 已提交
315 316 317 318 319 320 321
    int r;

    r = kvm_dirty_pages_log_change(section->offset_within_address_space,
                                   section->size, false);
    if (r < 0) {
        abort();
    }
322 323
}

324
static int kvm_set_migration_log(int enable)
325 326 327 328 329 330 331 332 333 334
{
    KVMState *s = kvm_state;
    KVMSlot *mem;
    int i, err;

    s->migration_log = enable;

    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
        mem = &s->slots[i];

335 336 337
        if (!mem->memory_size) {
            continue;
        }
338 339 340 341 342 343 344 345 346 347 348
        if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
            continue;
        }
        err = kvm_set_user_memory_region(s, mem);
        if (err) {
            return err;
        }
    }
    return 0;
}

349
/* get kvm's dirty pages bitmap and update qemu's */
350 351
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
                                         unsigned long *bitmap)
A
Alexander Graf 已提交
352
{
353 354
    unsigned int i, j;
    unsigned long page_number, addr, addr1, c;
355
    unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
356 357 358 359 360 361 362 363 364 365 366 367 368

    /*
     * bitmap-traveling is faster than memory-traveling (for addr...)
     * especially when most of the memory is not dirty.
     */
    for (i = 0; i < len; i++) {
        if (bitmap[i] != 0) {
            c = leul_to_cpu(bitmap[i]);
            do {
                j = ffsl(c) - 1;
                c &= ~(1ul << j);
                page_number = i * HOST_LONG_BITS + j;
                addr1 = page_number * TARGET_PAGE_SIZE;
369 370
                addr = section->offset_within_region + addr1;
                memory_region_set_dirty(section->mr, addr);
371 372 373 374
            } while (c != 0);
        }
    }
    return 0;
A
Alexander Graf 已提交
375 376
}

377 378
#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))

379 380 381 382 383
/**
 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
 * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
 * This means all bits are set to dirty.
 *
384
 * @start_add: start of logged region.
385 386
 * @end_addr: end of logged region.
 */
387
static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
388 389
{
    KVMState *s = kvm_state;
390 391 392 393
    unsigned long size, allocated_size = 0;
    KVMDirtyLog d;
    KVMSlot *mem;
    int ret = 0;
394 395
    target_phys_addr_t start_addr = section->offset_within_address_space;
    target_phys_addr_t end_addr = start_addr + section->size;
396

397 398 399 400 401 402
    d.dirty_bitmap = NULL;
    while (start_addr < end_addr) {
        mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
        if (mem == NULL) {
            break;
        }
403

404 405 406 407 408 409 410 411 412 413 414 415 416 417
        /* XXX bad kernel interface alert
         * For dirty bitmap, kernel allocates array of size aligned to
         * bits-per-long.  But for case when the kernel is 64bits and
         * the userspace is 32bits, userspace can't align to the same
         * bits-per-long, since sizeof(long) is different between kernel
         * and user space.  This way, userspace will provide buffer which
         * may be 4 bytes less than the kernel will use, resulting in
         * userspace memory corruption (which is not detectable by valgrind
         * too, in most cases).
         * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
         * a hope that sizeof(long) wont become >8 any time soon.
         */
        size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
                     /*HOST_LONG_BITS*/ 64) / 8;
418
        if (!d.dirty_bitmap) {
419
            d.dirty_bitmap = g_malloc(size);
420
        } else if (size > allocated_size) {
421
            d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
422 423 424
        }
        allocated_size = size;
        memset(d.dirty_bitmap, 0, allocated_size);
425

426
        d.slot = mem->slot;
427

428
        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
429
            DPRINTF("ioctl failed %d\n", errno);
430 431 432
            ret = -1;
            break;
        }
433

434
        kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
435
        start_addr = mem->start_addr + mem->memory_size;
436
    }
437
    g_free(d.dirty_bitmap);
438 439

    return ret;
440 441
}

A
Anthony Liguori 已提交
442
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
A
aliguori 已提交
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
{
    int ret = -ENOSYS;
    KVMState *s = kvm_state;

    if (s->coalesced_mmio) {
        struct kvm_coalesced_mmio_zone zone;

        zone.addr = start;
        zone.size = size;

        ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
    }

    return ret;
}

A
Anthony Liguori 已提交
459
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
A
aliguori 已提交
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
{
    int ret = -ENOSYS;
    KVMState *s = kvm_state;

    if (s->coalesced_mmio) {
        struct kvm_coalesced_mmio_zone zone;

        zone.addr = start;
        zone.size = size;

        ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
    }

    return ret;
}

476 477 478 479 480 481 482 483 484 485 486 487
int kvm_check_extension(KVMState *s, unsigned int extension)
{
    int ret;

    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
    if (ret < 0) {
        ret = 0;
    }

    return ret;
}

488 489
static int kvm_check_many_ioeventfds(void)
{
490 491 492 493 494
    /* Userspace can use ioeventfd for io notification.  This requires a host
     * that supports eventfd(2) and an I/O thread; since eventfd does not
     * support SIGIO it cannot interrupt the vcpu.
     *
     * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
495 496
     * can avoid creating too many ioeventfds.
     */
497
#if defined(CONFIG_EVENTFD)
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
    int ioeventfds[7];
    int i, ret = 0;
    for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
        ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
        if (ioeventfds[i] < 0) {
            break;
        }
        ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
        if (ret < 0) {
            close(ioeventfds[i]);
            break;
        }
    }

    /* Decide whether many devices are supported or not */
    ret = i == ARRAY_SIZE(ioeventfds);

    while (i-- > 0) {
        kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
        close(ioeventfds[i]);
    }
    return ret;
#else
    return 0;
#endif
}

525 526 527 528 529 530 531 532 533 534 535 536
static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
{
    while (list->name) {
        if (!kvm_check_extension(s, list->value)) {
            return list;
        }
        list++;
    }
    return NULL;
}

A
Avi Kivity 已提交
537
static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
538 539 540 541
{
    KVMState *s = kvm_state;
    KVMSlot *mem, old;
    int err;
A
Avi Kivity 已提交
542 543 544 545
    MemoryRegion *mr = section->mr;
    bool log_dirty = memory_region_is_logging(mr);
    target_phys_addr_t start_addr = section->offset_within_address_space;
    ram_addr_t size = section->size;
546
    void *ram = NULL;
547

548 549 550 551
    /* kvm works in page size chunks, but the function may be called
       with sub-page size and unaligned start address. */
    size = TARGET_PAGE_ALIGN(size);
    start_addr = TARGET_PAGE_ALIGN(start_addr);
552

A
Avi Kivity 已提交
553 554
    if (!memory_region_is_ram(mr)) {
        return;
555 556
    }

A
Avi Kivity 已提交
557 558
    ram = memory_region_get_ram_ptr(mr) + section->offset_within_region;

559 560 561 562 563 564
    while (1) {
        mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
        if (!mem) {
            break;
        }

A
Avi Kivity 已提交
565
        if (add && start_addr >= mem->start_addr &&
566
            (start_addr + size <= mem->start_addr + mem->memory_size) &&
567
            (ram - start_addr == mem->ram - mem->start_addr)) {
568
            /* The new slot fits into the existing one and comes with
569 570
             * identical parameters - update flags and done. */
            kvm_slot_dirty_pages_log_change(mem, log_dirty);
571 572 573 574 575
            return;
        }

        old = *mem;

576 577 578 579
        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
            kvm_physical_sync_dirty_bitmap(section);
        }

580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
        /* unregister the overlapping slot */
        mem->memory_size = 0;
        err = kvm_set_user_memory_region(s, mem);
        if (err) {
            fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
                    __func__, strerror(-err));
            abort();
        }

        /* Workaround for older KVM versions: we can't join slots, even not by
         * unregistering the previous ones and then registering the larger
         * slot. We have to maintain the existing fragmentation. Sigh.
         *
         * This workaround assumes that the new slot starts at the same
         * address as the first existing one. If not or if some overlapping
         * slot comes around later, we will fail (not seen in practice so far)
         * - and actually require a recent KVM version. */
        if (s->broken_set_mem_region &&
A
Avi Kivity 已提交
598
            old.start_addr == start_addr && old.memory_size < size && add) {
599 600 601
            mem = kvm_alloc_slot(s);
            mem->memory_size = old.memory_size;
            mem->start_addr = old.start_addr;
602
            mem->ram = old.ram;
603
            mem->flags = kvm_mem_flags(s, log_dirty);
604 605 606 607 608 609 610 611 612

            err = kvm_set_user_memory_region(s, mem);
            if (err) {
                fprintf(stderr, "%s: error updating slot: %s\n", __func__,
                        strerror(-err));
                abort();
            }

            start_addr += old.memory_size;
613
            ram += old.memory_size;
614 615 616 617 618 619 620 621 622
            size -= old.memory_size;
            continue;
        }

        /* register prefix slot */
        if (old.start_addr < start_addr) {
            mem = kvm_alloc_slot(s);
            mem->memory_size = start_addr - old.start_addr;
            mem->start_addr = old.start_addr;
623
            mem->ram = old.ram;
624
            mem->flags =  kvm_mem_flags(s, log_dirty);
625 626 627 628 629

            err = kvm_set_user_memory_region(s, mem);
            if (err) {
                fprintf(stderr, "%s: error registering prefix slot: %s\n",
                        __func__, strerror(-err));
630 631 632 633 634
#ifdef TARGET_PPC
                fprintf(stderr, "%s: This is probably because your kernel's " \
                                "PAGE_SIZE is too big. Please try to use 4k " \
                                "PAGE_SIZE!\n", __func__);
#endif
635 636 637 638 639 640 641 642 643 644 645 646
                abort();
            }
        }

        /* register suffix slot */
        if (old.start_addr + old.memory_size > start_addr + size) {
            ram_addr_t size_delta;

            mem = kvm_alloc_slot(s);
            mem->start_addr = start_addr + size;
            size_delta = mem->start_addr - old.start_addr;
            mem->memory_size = old.memory_size - size_delta;
647
            mem->ram = old.ram + size_delta;
648
            mem->flags = kvm_mem_flags(s, log_dirty);
649 650 651 652 653 654 655 656 657 658 659

            err = kvm_set_user_memory_region(s, mem);
            if (err) {
                fprintf(stderr, "%s: error registering suffix slot: %s\n",
                        __func__, strerror(-err));
                abort();
            }
        }
    }

    /* in case the KVM bug workaround already "consumed" the new slot */
J
Jan Kiszka 已提交
660
    if (!size) {
661
        return;
J
Jan Kiszka 已提交
662
    }
A
Avi Kivity 已提交
663
    if (!add) {
664
        return;
J
Jan Kiszka 已提交
665
    }
666 667 668
    mem = kvm_alloc_slot(s);
    mem->memory_size = size;
    mem->start_addr = start_addr;
669
    mem->ram = ram;
670
    mem->flags = kvm_mem_flags(s, log_dirty);
671 672 673 674 675 676 677 678 679

    err = kvm_set_user_memory_region(s, mem);
    if (err) {
        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
                strerror(-err));
        abort();
    }
}

A
Avi Kivity 已提交
680 681 682 683 684 685 686 687 688 689 690 691 692 693
static void kvm_region_add(MemoryListener *listener,
                           MemoryRegionSection *section)
{
    kvm_set_phys_mem(section, true);
}

static void kvm_region_del(MemoryListener *listener,
                           MemoryRegionSection *section)
{
    kvm_set_phys_mem(section, false);
}

static void kvm_log_sync(MemoryListener *listener,
                         MemoryRegionSection *section)
694
{
A
Avi Kivity 已提交
695 696
    int r;

697
    r = kvm_physical_sync_dirty_bitmap(section);
A
Avi Kivity 已提交
698 699 700
    if (r < 0) {
        abort();
    }
701 702
}

A
Avi Kivity 已提交
703
static void kvm_log_global_start(struct MemoryListener *listener)
704
{
A
Avi Kivity 已提交
705 706 707 708
    int r;

    r = kvm_set_migration_log(1);
    assert(r >= 0);
709 710
}

A
Avi Kivity 已提交
711
static void kvm_log_global_stop(struct MemoryListener *listener)
712
{
A
Avi Kivity 已提交
713 714 715 716
    int r;

    r = kvm_set_migration_log(0);
    assert(r >= 0);
717 718
}

A
Avi Kivity 已提交
719 720 721
static MemoryListener kvm_memory_listener = {
    .region_add = kvm_region_add,
    .region_del = kvm_region_del,
722 723
    .log_start = kvm_log_start,
    .log_stop = kvm_log_stop,
A
Avi Kivity 已提交
724 725 726
    .log_sync = kvm_log_sync,
    .log_global_start = kvm_log_global_start,
    .log_global_stop = kvm_log_global_stop,
727 728
};

729 730 731 732 733 734 735 736 737
static void kvm_handle_interrupt(CPUState *env, int mask)
{
    env->interrupt_request |= mask;

    if (!qemu_cpu_is_self(env)) {
        qemu_cpu_kick(env);
    }
}

738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
int kvm_irqchip_set_irq(KVMState *s, int irq, int level)
{
    struct kvm_irq_level event;
    int ret;

    assert(s->irqchip_in_kernel);

    event.level = level;
    event.irq = irq;
    ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event);
    if (ret < 0) {
        perror("kvm_set_irqchip_line");
        abort();
    }

    return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
}

#ifdef KVM_CAP_IRQ_ROUTING
static void set_gsi(KVMState *s, unsigned int gsi)
{
    assert(gsi < s->max_gsi);

    s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
}

static void kvm_init_irq_routing(KVMState *s)
{
    int gsi_count;

    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
    if (gsi_count > 0) {
        unsigned int gsi_bits, i;

        /* Round up so we can search ints using ffs */
        gsi_bits = (gsi_count + 31) / 32;
        s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
        s->max_gsi = gsi_bits;

        /* Mark any over-allocated bits as already in use */
        for (i = gsi_count; i < gsi_bits; i++) {
            set_gsi(s, i);
        }
    }

    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
    s->nr_allocated_irq_routes = 0;

    kvm_arch_init_irq_routing(s);
}

static void kvm_add_routing_entry(KVMState *s,
                                  struct kvm_irq_routing_entry *entry)
{
    struct kvm_irq_routing_entry *new;
    int n, size;

    if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
        n = s->nr_allocated_irq_routes * 2;
        if (n < 64) {
            n = 64;
        }
        size = sizeof(struct kvm_irq_routing);
        size += n * sizeof(*new);
        s->irq_routes = g_realloc(s->irq_routes, size);
        s->nr_allocated_irq_routes = n;
    }
    n = s->irq_routes->nr++;
    new = &s->irq_routes->entries[n];
    memset(new, 0, sizeof(*new));
    new->gsi = entry->gsi;
    new->type = entry->type;
    new->flags = entry->flags;
    new->u = entry->u;

    set_gsi(s, entry->gsi);
}

void kvm_irqchip_add_route(KVMState *s, int irq, int irqchip, int pin)
{
    struct kvm_irq_routing_entry e;

    e.gsi = irq;
    e.type = KVM_IRQ_ROUTING_IRQCHIP;
    e.flags = 0;
    e.u.irqchip.irqchip = irqchip;
    e.u.irqchip.pin = pin;
    kvm_add_routing_entry(s, &e);
}

int kvm_irqchip_commit_routes(KVMState *s)
{
    s->irq_routes->flags = 0;
    return kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
}

#else /* !KVM_CAP_IRQ_ROUTING */

static void kvm_init_irq_routing(KVMState *s)
{
}
#endif /* !KVM_CAP_IRQ_ROUTING */

static int kvm_irqchip_create(KVMState *s)
{
    QemuOptsList *list = qemu_find_opts("machine");
    int ret;

    if (QTAILQ_EMPTY(&list->head) ||
        !qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
                           "kernel_irqchip", false) ||
        !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
        return 0;
    }

    ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
    if (ret < 0) {
        fprintf(stderr, "Create kernel irqchip failed\n");
        return ret;
    }

    s->irqchip_inject_ioctl = KVM_IRQ_LINE;
    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
        s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
    }
    s->irqchip_in_kernel = 1;

    kvm_init_irq_routing(s);

    return 0;
}

870
int kvm_init(void)
A
aliguori 已提交
871
{
872 873 874
    static const char upgrade_note[] =
        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
        "(see http://sourceforge.net/projects/kvm).\n";
A
aliguori 已提交
875
    KVMState *s;
876
    const KVMCapabilityInfo *missing_cap;
A
aliguori 已提交
877 878 879
    int ret;
    int i;

880
    s = g_malloc0(sizeof(KVMState));
A
aliguori 已提交
881

882
#ifdef KVM_CAP_SET_GUEST_DEBUG
B
Blue Swirl 已提交
883
    QTAILQ_INIT(&s->kvm_sw_breakpoints);
884
#endif
J
Jan Kiszka 已提交
885
    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
A
aliguori 已提交
886
        s->slots[i].slot = i;
J
Jan Kiszka 已提交
887
    }
A
aliguori 已提交
888
    s->vmfd = -1;
K
Kevin Wolf 已提交
889
    s->fd = qemu_open("/dev/kvm", O_RDWR);
A
aliguori 已提交
890 891 892 893 894 895 896 897
    if (s->fd == -1) {
        fprintf(stderr, "Could not access KVM kernel module: %m\n");
        ret = -errno;
        goto err;
    }

    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
    if (ret < KVM_API_VERSION) {
J
Jan Kiszka 已提交
898
        if (ret > 0) {
A
aliguori 已提交
899
            ret = -EINVAL;
J
Jan Kiszka 已提交
900
        }
A
aliguori 已提交
901 902 903 904 905 906 907 908 909 910 911
        fprintf(stderr, "kvm version too old\n");
        goto err;
    }

    if (ret > KVM_API_VERSION) {
        ret = -EINVAL;
        fprintf(stderr, "kvm version not supported\n");
        goto err;
    }

    s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
912 913 914 915 916
    if (s->vmfd < 0) {
#ifdef TARGET_S390X
        fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
                        "your host kernel command line\n");
#endif
917
        ret = s->vmfd;
A
aliguori 已提交
918
        goto err;
919
    }
A
aliguori 已提交
920

921 922 923 924
    missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
    if (!missing_cap) {
        missing_cap =
            kvm_check_extension_list(s, kvm_arch_required_capabilities);
A
aliguori 已提交
925
    }
926
    if (missing_cap) {
927
        ret = -EINVAL;
928 929
        fprintf(stderr, "kvm does not support %s\n%s",
                missing_cap->name, upgrade_note);
930 931 932
        goto err;
    }

933
    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
A
aliguori 已提交
934

935
    s->broken_set_mem_region = 1;
936
    ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
937 938 939 940
    if (ret > 0) {
        s->broken_set_mem_region = 0;
    }

941 942 943 944
#ifdef KVM_CAP_VCPU_EVENTS
    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
#endif

945 946 947
    s->robust_singlestep =
        kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);

948 949 950 951
#ifdef KVM_CAP_DEBUGREGS
    s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
#endif

952 953 954 955 956 957 958 959
#ifdef KVM_CAP_XSAVE
    s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
#endif

#ifdef KVM_CAP_XCRS
    s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
#endif

960
    ret = kvm_arch_init(s);
J
Jan Kiszka 已提交
961
    if (ret < 0) {
A
aliguori 已提交
962
        goto err;
J
Jan Kiszka 已提交
963
    }
A
aliguori 已提交
964

965 966 967 968 969
    ret = kvm_irqchip_create(s);
    if (ret < 0) {
        goto err;
    }

A
aliguori 已提交
970
    kvm_state = s;
A
Avi Kivity 已提交
971
    memory_listener_register(&kvm_memory_listener);
A
aliguori 已提交
972

973 974
    s->many_ioeventfds = kvm_check_many_ioeventfds();

975 976
    cpu_interrupt_handler = kvm_handle_interrupt;

A
aliguori 已提交
977 978 979 980
    return 0;

err:
    if (s) {
981
        if (s->vmfd >= 0) {
A
aliguori 已提交
982
            close(s->vmfd);
J
Jan Kiszka 已提交
983 984
        }
        if (s->fd != -1) {
A
aliguori 已提交
985
            close(s->fd);
J
Jan Kiszka 已提交
986
        }
A
aliguori 已提交
987
    }
988
    g_free(s);
A
aliguori 已提交
989 990 991 992

    return ret;
}

993 994
static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
                          uint32_t count)
A
aliguori 已提交
995 996 997 998 999 1000 1001 1002
{
    int i;
    uint8_t *ptr = data;

    for (i = 0; i < count; i++) {
        if (direction == KVM_EXIT_IO_IN) {
            switch (size) {
            case 1:
1003
                stb_p(ptr, cpu_inb(port));
A
aliguori 已提交
1004 1005
                break;
            case 2:
1006
                stw_p(ptr, cpu_inw(port));
A
aliguori 已提交
1007 1008
                break;
            case 4:
1009
                stl_p(ptr, cpu_inl(port));
A
aliguori 已提交
1010 1011 1012 1013 1014
                break;
            }
        } else {
            switch (size) {
            case 1:
1015
                cpu_outb(port, ldub_p(ptr));
A
aliguori 已提交
1016 1017
                break;
            case 2:
1018
                cpu_outw(port, lduw_p(ptr));
A
aliguori 已提交
1019 1020
                break;
            case 4:
1021
                cpu_outl(port, ldl_p(ptr));
A
aliguori 已提交
1022 1023 1024 1025 1026 1027 1028 1029
                break;
            }
        }

        ptr += size;
    }
}

J
Jan Kiszka 已提交
1030
static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
M
Marcelo Tosatti 已提交
1031
{
1032
    fprintf(stderr, "KVM internal error.");
M
Marcelo Tosatti 已提交
1033 1034 1035
    if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
        int i;

1036
        fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
M
Marcelo Tosatti 已提交
1037 1038 1039 1040
        for (i = 0; i < run->internal.ndata; ++i) {
            fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
                    i, (uint64_t)run->internal.data[i]);
        }
1041 1042
    } else {
        fprintf(stderr, "\n");
M
Marcelo Tosatti 已提交
1043 1044 1045
    }
    if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
        fprintf(stderr, "emulation failure\n");
J
Jan Kiszka 已提交
1046
        if (!kvm_arch_stop_on_emulation_error(env)) {
1047
            cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
1048
            return EXCP_INTERRUPT;
J
Jan Kiszka 已提交
1049
        }
M
Marcelo Tosatti 已提交
1050 1051 1052 1053
    }
    /* FIXME: Should trigger a qmp message to let management know
     * something went wrong.
     */
J
Jan Kiszka 已提交
1054
    return -1;
M
Marcelo Tosatti 已提交
1055 1056
}

1057
void kvm_flush_coalesced_mmio_buffer(void)
A
aliguori 已提交
1058 1059
{
    KVMState *s = kvm_state;
1060 1061 1062 1063 1064 1065 1066

    if (s->coalesced_flush_in_progress) {
        return;
    }

    s->coalesced_flush_in_progress = true;

1067 1068
    if (s->coalesced_mmio_ring) {
        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
A
aliguori 已提交
1069 1070 1071 1072 1073 1074
        while (ring->first != ring->last) {
            struct kvm_coalesced_mmio *ent;

            ent = &ring->coalesced_mmio[ring->first];

            cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1075
            smp_wmb();
A
aliguori 已提交
1076 1077 1078
            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
        }
    }
1079 1080

    s->coalesced_flush_in_progress = false;
A
aliguori 已提交
1081 1082
}

1083
static void do_kvm_cpu_synchronize_state(void *_env)
1084
{
1085 1086
    CPUState *env = _env;

J
Jan Kiszka 已提交
1087
    if (!env->kvm_vcpu_dirty) {
1088
        kvm_arch_get_registers(env);
J
Jan Kiszka 已提交
1089
        env->kvm_vcpu_dirty = 1;
1090 1091 1092
    }
}

1093 1094
void kvm_cpu_synchronize_state(CPUState *env)
{
J
Jan Kiszka 已提交
1095
    if (!env->kvm_vcpu_dirty) {
1096
        run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
J
Jan Kiszka 已提交
1097
    }
1098 1099
}

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
void kvm_cpu_synchronize_post_reset(CPUState *env)
{
    kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
    env->kvm_vcpu_dirty = 0;
}

void kvm_cpu_synchronize_post_init(CPUState *env)
{
    kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
    env->kvm_vcpu_dirty = 0;
}

A
aliguori 已提交
1112 1113 1114
int kvm_cpu_exec(CPUState *env)
{
    struct kvm_run *run = env->kvm_run;
1115
    int ret, run_ret;
A
aliguori 已提交
1116

1117
    DPRINTF("kvm_cpu_exec()\n");
A
aliguori 已提交
1118

1119
    if (kvm_arch_process_async_events(env)) {
1120
        env->exit_request = 0;
1121
        return EXCP_HLT;
1122
    }
M
Marcelo Tosatti 已提交
1123

1124 1125
    cpu_single_env = env;

1126
    do {
J
Jan Kiszka 已提交
1127
        if (env->kvm_vcpu_dirty) {
1128
            kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
J
Jan Kiszka 已提交
1129
            env->kvm_vcpu_dirty = 0;
1130 1131
        }

1132
        kvm_arch_pre_run(env, run);
1133 1134 1135 1136 1137 1138 1139 1140 1141
        if (env->exit_request) {
            DPRINTF("interrupt exit requested\n");
            /*
             * KVM requires us to reenter the kernel after IO exits to complete
             * instruction emulation. This self-signal will ensure that we
             * leave ASAP again.
             */
            qemu_cpu_kick_self();
        }
1142
        cpu_single_env = NULL;
1143
        qemu_mutex_unlock_iothread();
1144

1145
        run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
1146

1147
        qemu_mutex_lock_iothread();
1148
        cpu_single_env = env;
A
aliguori 已提交
1149 1150
        kvm_arch_post_run(env, run);

1151 1152
        kvm_flush_coalesced_mmio_buffer();

1153
        if (run_ret < 0) {
1154 1155
            if (run_ret == -EINTR || run_ret == -EAGAIN) {
                DPRINTF("io window exit\n");
1156
                ret = EXCP_INTERRUPT;
1157 1158
                break;
            }
1159 1160
            fprintf(stderr, "error: kvm run failed %s\n",
                    strerror(-run_ret));
A
aliguori 已提交
1161 1162 1163 1164 1165
            abort();
        }

        switch (run->exit_reason) {
        case KVM_EXIT_IO:
1166
            DPRINTF("handle_io\n");
1167 1168 1169 1170 1171
            kvm_handle_io(run->io.port,
                          (uint8_t *)run + run->io.data_offset,
                          run->io.direction,
                          run->io.size,
                          run->io.count);
1172
            ret = 0;
A
aliguori 已提交
1173 1174
            break;
        case KVM_EXIT_MMIO:
1175
            DPRINTF("handle_mmio\n");
A
aliguori 已提交
1176 1177 1178 1179
            cpu_physical_memory_rw(run->mmio.phys_addr,
                                   run->mmio.data,
                                   run->mmio.len,
                                   run->mmio.is_write);
1180
            ret = 0;
A
aliguori 已提交
1181 1182
            break;
        case KVM_EXIT_IRQ_WINDOW_OPEN:
1183
            DPRINTF("irq_window_open\n");
1184
            ret = EXCP_INTERRUPT;
A
aliguori 已提交
1185 1186
            break;
        case KVM_EXIT_SHUTDOWN:
1187
            DPRINTF("shutdown\n");
A
aliguori 已提交
1188
            qemu_system_reset_request();
1189
            ret = EXCP_INTERRUPT;
A
aliguori 已提交
1190 1191
            break;
        case KVM_EXIT_UNKNOWN:
1192 1193
            fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
                    (uint64_t)run->hw.hardware_exit_reason);
J
Jan Kiszka 已提交
1194
            ret = -1;
A
aliguori 已提交
1195
            break;
M
Marcelo Tosatti 已提交
1196
        case KVM_EXIT_INTERNAL_ERROR:
J
Jan Kiszka 已提交
1197
            ret = kvm_handle_internal_error(env, run);
M
Marcelo Tosatti 已提交
1198
            break;
A
aliguori 已提交
1199
        default:
1200
            DPRINTF("kvm_arch_handle_exit\n");
A
aliguori 已提交
1201 1202 1203
            ret = kvm_arch_handle_exit(env, run);
            break;
        }
1204
    } while (ret == 0);
A
aliguori 已提交
1205

J
Jan Kiszka 已提交
1206
    if (ret < 0) {
1207
        cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
1208
        vm_stop(RUN_STATE_INTERNAL_ERROR);
A
aliguori 已提交
1209 1210
    }

1211 1212
    env->exit_request = 0;
    cpu_single_env = NULL;
A
aliguori 已提交
1213 1214 1215
    return ret;
}

1216
int kvm_ioctl(KVMState *s, int type, ...)
A
aliguori 已提交
1217 1218
{
    int ret;
1219 1220
    void *arg;
    va_list ap;
A
aliguori 已提交
1221

1222 1223 1224 1225 1226
    va_start(ap, type);
    arg = va_arg(ap, void *);
    va_end(ap);

    ret = ioctl(s->fd, type, arg);
J
Jan Kiszka 已提交
1227
    if (ret == -1) {
A
aliguori 已提交
1228
        ret = -errno;
J
Jan Kiszka 已提交
1229
    }
A
aliguori 已提交
1230 1231 1232
    return ret;
}

1233
int kvm_vm_ioctl(KVMState *s, int type, ...)
A
aliguori 已提交
1234 1235
{
    int ret;
1236 1237 1238 1239 1240 1241
    void *arg;
    va_list ap;

    va_start(ap, type);
    arg = va_arg(ap, void *);
    va_end(ap);
A
aliguori 已提交
1242

1243
    ret = ioctl(s->vmfd, type, arg);
J
Jan Kiszka 已提交
1244
    if (ret == -1) {
A
aliguori 已提交
1245
        ret = -errno;
J
Jan Kiszka 已提交
1246
    }
A
aliguori 已提交
1247 1248 1249
    return ret;
}

1250
int kvm_vcpu_ioctl(CPUState *env, int type, ...)
A
aliguori 已提交
1251 1252
{
    int ret;
1253 1254 1255 1256 1257 1258
    void *arg;
    va_list ap;

    va_start(ap, type);
    arg = va_arg(ap, void *);
    va_end(ap);
A
aliguori 已提交
1259

1260
    ret = ioctl(env->kvm_fd, type, arg);
J
Jan Kiszka 已提交
1261
    if (ret == -1) {
A
aliguori 已提交
1262
        ret = -errno;
J
Jan Kiszka 已提交
1263
    }
A
aliguori 已提交
1264 1265
    return ret;
}
A
aliguori 已提交
1266 1267 1268

int kvm_has_sync_mmu(void)
{
1269
    return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
A
aliguori 已提交
1270
}
1271

1272 1273 1274 1275 1276
int kvm_has_vcpu_events(void)
{
    return kvm_state->vcpu_events;
}

1277 1278 1279 1280 1281
int kvm_has_robust_singlestep(void)
{
    return kvm_state->robust_singlestep;
}

1282 1283 1284 1285 1286
int kvm_has_debugregs(void)
{
    return kvm_state->debugregs;
}

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
int kvm_has_xsave(void)
{
    return kvm_state->xsave;
}

int kvm_has_xcrs(void)
{
    return kvm_state->xcrs;
}

1297 1298 1299 1300 1301 1302 1303 1304
int kvm_has_many_ioeventfds(void)
{
    if (!kvm_enabled()) {
        return 0;
    }
    return kvm_state->many_ioeventfds;
}

1305 1306 1307 1308 1309
int kvm_has_gsi_routing(void)
{
    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
}

1310 1311 1312 1313 1314
int kvm_allows_irq0_override(void)
{
    return !kvm_enabled() || !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
}

1315 1316 1317
void kvm_setup_guest_memory(void *start, size_t size)
{
    if (!kvm_has_sync_mmu()) {
A
Andreas Färber 已提交
1318
        int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
1319 1320

        if (ret) {
A
Andreas Färber 已提交
1321 1322 1323
            perror("qemu_madvise");
            fprintf(stderr,
                    "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1324 1325 1326 1327 1328
            exit(1);
        }
    }
}

1329 1330 1331 1332 1333 1334
#ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
                                                 target_ulong pc)
{
    struct kvm_sw_breakpoint *bp;

B
Blue Swirl 已提交
1335
    QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
J
Jan Kiszka 已提交
1336
        if (bp->pc == pc) {
1337
            return bp;
J
Jan Kiszka 已提交
1338
        }
1339 1340 1341 1342 1343 1344
    }
    return NULL;
}

int kvm_sw_breakpoints_active(CPUState *env)
{
B
Blue Swirl 已提交
1345
    return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
1346 1347
}

G
Glauber Costa 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356
struct kvm_set_guest_debug_data {
    struct kvm_guest_debug dbg;
    CPUState *env;
    int err;
};

static void kvm_invoke_set_guest_debug(void *data)
{
    struct kvm_set_guest_debug_data *dbg_data = data;
J
Jan Kiszka 已提交
1357 1358 1359
    CPUState *env = dbg_data->env;

    dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
G
Glauber Costa 已提交
1360 1361
}

1362 1363
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
{
G
Glauber Costa 已提交
1364
    struct kvm_set_guest_debug_data data;
1365

1366
    data.dbg.control = reinject_trap;
1367

1368 1369 1370
    if (env->singlestep_enabled) {
        data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
    }
G
Glauber Costa 已提交
1371 1372
    kvm_arch_update_guest_debug(env, &data.dbg);
    data.env = env;
1373

1374
    run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
G
Glauber Costa 已提交
1375
    return data.err;
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
}

int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
                          target_ulong len, int type)
{
    struct kvm_sw_breakpoint *bp;
    CPUState *env;
    int err;

    if (type == GDB_BREAKPOINT_SW) {
        bp = kvm_find_sw_breakpoint(current_env, addr);
        if (bp) {
            bp->use_count++;
            return 0;
        }

1392
        bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
J
Jan Kiszka 已提交
1393
        if (!bp) {
1394
            return -ENOMEM;
J
Jan Kiszka 已提交
1395
        }
1396 1397 1398 1399 1400

        bp->pc = addr;
        bp->use_count = 1;
        err = kvm_arch_insert_sw_breakpoint(current_env, bp);
        if (err) {
1401
            g_free(bp);
1402 1403 1404
            return err;
        }

B
Blue Swirl 已提交
1405
        QTAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
1406 1407 1408
                          bp, entry);
    } else {
        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
J
Jan Kiszka 已提交
1409
        if (err) {
1410
            return err;
J
Jan Kiszka 已提交
1411
        }
1412 1413 1414 1415
    }

    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        err = kvm_update_guest_debug(env, 0);
J
Jan Kiszka 已提交
1416
        if (err) {
1417
            return err;
J
Jan Kiszka 已提交
1418
        }
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
    }
    return 0;
}

int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
                          target_ulong len, int type)
{
    struct kvm_sw_breakpoint *bp;
    CPUState *env;
    int err;

    if (type == GDB_BREAKPOINT_SW) {
        bp = kvm_find_sw_breakpoint(current_env, addr);
J
Jan Kiszka 已提交
1432
        if (!bp) {
1433
            return -ENOENT;
J
Jan Kiszka 已提交
1434
        }
1435 1436 1437 1438 1439 1440 1441

        if (bp->use_count > 1) {
            bp->use_count--;
            return 0;
        }

        err = kvm_arch_remove_sw_breakpoint(current_env, bp);
J
Jan Kiszka 已提交
1442
        if (err) {
1443
            return err;
J
Jan Kiszka 已提交
1444
        }
1445

B
Blue Swirl 已提交
1446
        QTAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
1447
        g_free(bp);
1448 1449
    } else {
        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
J
Jan Kiszka 已提交
1450
        if (err) {
1451
            return err;
J
Jan Kiszka 已提交
1452
        }
1453 1454 1455 1456
    }

    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        err = kvm_update_guest_debug(env, 0);
J
Jan Kiszka 已提交
1457
        if (err) {
1458
            return err;
J
Jan Kiszka 已提交
1459
        }
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
    }
    return 0;
}

void kvm_remove_all_breakpoints(CPUState *current_env)
{
    struct kvm_sw_breakpoint *bp, *next;
    KVMState *s = current_env->kvm_state;
    CPUState *env;

B
Blue Swirl 已提交
1470
    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
1471 1472 1473
        if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
            /* Try harder to find a CPU that currently sees the breakpoint. */
            for (env = first_cpu; env != NULL; env = env->next_cpu) {
J
Jan Kiszka 已提交
1474
                if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
1475
                    break;
J
Jan Kiszka 已提交
1476
                }
1477 1478 1479 1480 1481
            }
        }
    }
    kvm_arch_remove_all_hw_breakpoints();

J
Jan Kiszka 已提交
1482
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
1483
        kvm_update_guest_debug(env, 0);
J
Jan Kiszka 已提交
1484
    }
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
}

#else /* !KVM_CAP_SET_GUEST_DEBUG */

int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
{
    return -EINVAL;
}

int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
                          target_ulong len, int type)
{
    return -EINVAL;
}

int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
                          target_ulong len, int type)
{
    return -EINVAL;
}

void kvm_remove_all_breakpoints(CPUState *current_env)
{
}
#endif /* !KVM_CAP_SET_GUEST_DEBUG */
1510 1511 1512 1513 1514 1515

int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
{
    struct kvm_signal_mask *sigmask;
    int r;

J
Jan Kiszka 已提交
1516
    if (!sigset) {
1517
        return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
J
Jan Kiszka 已提交
1518
    }
1519

1520
    sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
1521 1522 1523 1524

    sigmask->len = 8;
    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
    r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
1525
    g_free(sigmask);
1526 1527 1528

    return r;
}
1529

1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign)
{
    int ret;
    struct kvm_ioeventfd iofd;

    iofd.datamatch = val;
    iofd.addr = addr;
    iofd.len = 4;
    iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
    iofd.fd = fd;

    if (!kvm_enabled()) {
        return -ENOSYS;
    }

    if (!assign) {
        iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
    }

    ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);

    if (ret < 0) {
        return -errno;
    }

    return 0;
}

1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
{
    struct kvm_ioeventfd kick = {
        .datamatch = val,
        .addr = addr,
        .len = 2,
        .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
        .fd = fd,
    };
    int r;
J
Jan Kiszka 已提交
1568
    if (!kvm_enabled()) {
1569
        return -ENOSYS;
J
Jan Kiszka 已提交
1570 1571
    }
    if (!assign) {
1572
        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
J
Jan Kiszka 已提交
1573
    }
1574
    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
J
Jan Kiszka 已提交
1575
    if (r < 0) {
1576
        return r;
J
Jan Kiszka 已提交
1577
    }
1578
    return 0;
1579
}
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589

int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr)
{
    return kvm_arch_on_sigbus_vcpu(env, code, addr);
}

int kvm_on_sigbus(int code, void *addr)
{
    return kvm_arch_on_sigbus(code, addr);
}