pci.c 90.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * vfio based device assignment support
 *
 * Copyright Red Hat, Inc. 2012
 *
 * Authors:
 *  Alex Williamson <alex.williamson@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * Based on qemu-kvm device-assignment:
 *  Adapted for KVM by Qumranet.
 *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
 *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
 *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
 *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
 *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
 */

P
Peter Maydell 已提交
21
#include "qemu/osdep.h"
A
Alex Williamson 已提交
22
#include <linux/vfio.h>
23 24
#include <sys/ioctl.h>

25 26
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
27
#include "hw/pci/pci_bridge.h"
28 29
#include "qemu/error-report.h"
#include "qemu/range.h"
A
Alex Williamson 已提交
30 31
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
32
#include "pci.h"
E
Eric Auger 已提交
33
#include "trace.h"
34
#include "qapi/error.h"
35

36 37
#define MSIX_CAP_LENGTH 12

38 39
static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * Disabling BAR mmaping can be slow, but toggling it around INTx can
 * also be a huge overhead.  We try to get the best of both worlds by
 * waiting until an interrupt to disable mmaps (subsequent transitions
 * to the same state are effectively no overhead).  If the interrupt has
 * been serviced and the time gap is long enough, we re-enable mmaps for
 * performance.  This works well for things like graphics cards, which
 * may not use their interrupt at all and are penalized to an unusable
 * level by read/write BAR traps.  Other devices, like NICs, have more
 * regular interrupts and see much better latency by staying in non-mmap
 * mode.  We therefore set the default mmap_timeout such that a ping
 * is just enough to keep the mmap disabled.  Users can experiment with
 * other options with the x-intx-mmap-timeout-ms parameter (a value of
 * zero disables the timer).
 */
static void vfio_intx_mmap_enable(void *opaque)
{
58
    VFIOPCIDevice *vdev = opaque;
59 60

    if (vdev->intx.pending) {
61 62
        timer_mod(vdev->intx.mmap_timer,
                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
63 64 65 66 67 68
        return;
    }

    vfio_mmap_set_enabled(vdev, true);
}

69 70
static void vfio_intx_interrupt(void *opaque)
{
71
    VFIOPCIDevice *vdev = opaque;
72 73 74 75 76

    if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
        return;
    }

77
    trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
78 79

    vdev->intx.pending = true;
80
    pci_irq_assert(&vdev->pdev);
81 82
    vfio_mmap_set_enabled(vdev, false);
    if (vdev->intx.mmap_timeout) {
83 84
        timer_mod(vdev->intx.mmap_timer,
                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
85
    }
86 87
}

88
static void vfio_intx_eoi(VFIODevice *vbasedev)
89
{
E
Eric Auger 已提交
90 91
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);

92 93 94 95
    if (!vdev->intx.pending) {
        return;
    }

96
    trace_vfio_intx_eoi(vbasedev->name);
97 98

    vdev->intx.pending = false;
99
    pci_irq_deassert(&vdev->pdev);
E
Eric Auger 已提交
100
    vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
101 102
}

103
static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
104 105 106 107 108 109 110 111 112 113 114
{
#ifdef CONFIG_KVM
    struct kvm_irqfd irqfd = {
        .fd = event_notifier_get_fd(&vdev->intx.interrupt),
        .gsi = vdev->intx.route.irq,
        .flags = KVM_IRQFD_FLAG_RESAMPLE,
    };
    struct vfio_irq_set *irq_set;
    int ret, argsz;
    int32_t *pfd;

115
    if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
116
        vdev->intx.route.mode != PCI_INTX_ENABLED ||
E
Eric Auger 已提交
117
        !kvm_resamplefds_enabled()) {
118 119 120 121 122
        return;
    }

    /* Get to a known interrupt state */
    qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
123
    vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
124
    vdev->intx.pending = false;
125
    pci_irq_deassert(&vdev->pdev);
126 127 128

    /* Get an eventfd for resample/unmask */
    if (event_notifier_init(&vdev->intx.unmask, 0)) {
129
        error_report("vfio: Error: event_notifier_init failed eoi");
130 131 132 133 134 135 136
        goto fail;
    }

    /* KVM triggers it, VFIO listens for it */
    irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);

    if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
137
        error_report("vfio: Error: Failed to setup resample irqfd: %m");
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
        goto fail_irqfd;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
    irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = irqfd.resamplefd;

153
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
154 155
    g_free(irq_set);
    if (ret) {
156
        error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
157 158 159 160
        goto fail_vfio;
    }

    /* Let'em rip */
161
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
162 163 164

    vdev->intx.kvm_accel = true;

165
    trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
166 167 168 169 170 171 172 173 174 175

    return;

fail_vfio:
    irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
    kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
fail_irqfd:
    event_notifier_cleanup(&vdev->intx.unmask);
fail:
    qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
176
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
177 178 179
#endif
}

180
static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
{
#ifdef CONFIG_KVM
    struct kvm_irqfd irqfd = {
        .fd = event_notifier_get_fd(&vdev->intx.interrupt),
        .gsi = vdev->intx.route.irq,
        .flags = KVM_IRQFD_FLAG_DEASSIGN,
    };

    if (!vdev->intx.kvm_accel) {
        return;
    }

    /*
     * Get to a known state, hardware masked, QEMU ready to accept new
     * interrupts, QEMU IRQ de-asserted.
     */
197
    vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
198
    vdev->intx.pending = false;
199
    pci_irq_deassert(&vdev->pdev);
200 201 202

    /* Tell KVM to stop listening for an INTx irqfd */
    if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
203
        error_report("vfio: Error: Failed to disable INTx irqfd: %m");
204 205 206 207 208 209 210 211 212 213 214
    }

    /* We only need to close the eventfd for VFIO to cleanup the kernel side */
    event_notifier_cleanup(&vdev->intx.unmask);

    /* QEMU starts listening for interrupt events. */
    qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);

    vdev->intx.kvm_accel = false;

    /* If we've missed an event, let it re-fire through QEMU */
215
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
216

217
    trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
218 219 220
#endif
}

221
static void vfio_intx_update(PCIDevice *pdev)
222
{
223
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
224 225 226 227 228 229 230 231 232 233 234 235
    PCIINTxRoute route;

    if (vdev->interrupt != VFIO_INT_INTx) {
        return;
    }

    route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);

    if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
        return; /* Nothing changed */
    }

236 237
    trace_vfio_intx_update(vdev->vbasedev.name,
                           vdev->intx.route.irq, route.irq);
238

239
    vfio_intx_disable_kvm(vdev);
240 241 242 243 244 245 246

    vdev->intx.route = route;

    if (route.mode != PCI_INTX_ENABLED) {
        return;
    }

247
    vfio_intx_enable_kvm(vdev);
248 249

    /* Re-enable the interrupt in cased we missed an EOI */
250
    vfio_intx_eoi(&vdev->vbasedev);
251 252
}

253
static int vfio_intx_enable(VFIOPCIDevice *vdev)
254 255
{
    uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
A
Alex Williamson 已提交
256 257 258
    int ret, argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;
259

260
    if (!pin) {
261 262 263 264 265 266
        return 0;
    }

    vfio_disable_interrupts(vdev);

    vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
267
    pci_config_set_interrupt_pin(vdev->pdev.config, pin);
268 269 270 271 272 273

#ifdef CONFIG_KVM
    /*
     * Only conditional to avoid generating error messages on platforms
     * where we won't actually use the result anyway.
     */
E
Eric Auger 已提交
274
    if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
275 276 277 278 279
        vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
                                                        vdev->intx.pin);
    }
#endif

280 281
    ret = event_notifier_init(&vdev->intx.interrupt, 0);
    if (ret) {
282
        error_report("vfio: Error: event_notifier_init failed");
283 284 285
        return ret;
    }

A
Alex Williamson 已提交
286 287 288 289 290 291 292 293 294 295 296 297
    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
    qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
298

299
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
A
Alex Williamson 已提交
300 301
    g_free(irq_set);
    if (ret) {
302
        error_report("vfio: Error: Failed to setup INTx fd: %m");
A
Alex Williamson 已提交
303
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
304
        event_notifier_cleanup(&vdev->intx.interrupt);
305 306 307
        return -errno;
    }

308
    vfio_intx_enable_kvm(vdev);
309

310 311
    vdev->interrupt = VFIO_INT_INTx;

312
    trace_vfio_intx_enable(vdev->vbasedev.name);
313 314 315 316

    return 0;
}

317
static void vfio_intx_disable(VFIOPCIDevice *vdev)
318 319 320
{
    int fd;

321
    timer_del(vdev->intx.mmap_timer);
322
    vfio_intx_disable_kvm(vdev);
323
    vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
324
    vdev->intx.pending = false;
325
    pci_irq_deassert(&vdev->pdev);
326 327 328 329 330 331 332 333
    vfio_mmap_set_enabled(vdev, true);

    fd = event_notifier_get_fd(&vdev->intx.interrupt);
    qemu_set_fd_handler(fd, NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->intx.interrupt);

    vdev->interrupt = VFIO_INT_NONE;

334
    trace_vfio_intx_disable(vdev->vbasedev.name);
335 336 337 338 339 340 341 342
}

/*
 * MSI/X
 */
static void vfio_msi_interrupt(void *opaque)
{
    VFIOMSIVector *vector = opaque;
343
    VFIOPCIDevice *vdev = vector->vdev;
344 345 346
    MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
    void (*notify)(PCIDevice *dev, unsigned vector);
    MSIMessage msg;
347 348 349 350 351 352
    int nr = vector - vdev->msi_vectors;

    if (!event_notifier_test_and_clear(&vector->interrupt)) {
        return;
    }

353
    if (vdev->interrupt == VFIO_INT_MSIX) {
354 355
        get_msg = msix_get_message;
        notify = msix_notify;
A
Alex Williamson 已提交
356 357 358 359 360 361 362

        /* A masked vector firing needs to use the PBA, enable it */
        if (msix_is_masked(&vdev->pdev, nr)) {
            set_bit(nr, vdev->msix->pending);
            memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
            trace_vfio_msix_pba_enable(vdev->vbasedev.name);
        }
A
Alex Williamson 已提交
363
    } else if (vdev->interrupt == VFIO_INT_MSI) {
364 365
        get_msg = msi_get_message;
        notify = msi_notify;
366 367 368 369
    } else {
        abort();
    }

370
    msg = get_msg(&vdev->pdev, nr);
371
    trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
372
    notify(&vdev->pdev, nr);
373 374
}

375
static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
{
    struct vfio_irq_set *irq_set;
    int ret = 0, i, argsz;
    int32_t *fds;

    argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = vdev->nr_vectors;
    fds = (int32_t *)&irq_set->data;

    for (i = 0; i < vdev->nr_vectors; i++) {
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
        int fd = -1;

        /*
         * MSI vs MSI-X - The guest has direct access to MSI mask and pending
         * bits, therefore we always use the KVM signaling path when setup.
         * MSI-X mask and pending bits are emulated, so we want to use the
         * KVM signaling path only when configured and unmasked.
         */
        if (vdev->msi_vectors[i].use) {
            if (vdev->msi_vectors[i].virq < 0 ||
                (msix && msix_is_masked(&vdev->pdev, i))) {
                fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
            } else {
                fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
            }
407
        }
408 409

        fds[i] = fd;
410 411
    }

412
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
413 414 415 416 417 418

    g_free(irq_set);

    return ret;
}

419
static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
420
                                  int vector_n, bool msix)
421 422 423
{
    int virq;

424
    if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
425 426 427 428 429 430 431
        return;
    }

    if (event_notifier_init(&vector->kvm_interrupt, 0)) {
        return;
    }

432
    virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
433 434 435 436 437
    if (virq < 0) {
        event_notifier_cleanup(&vector->kvm_interrupt);
        return;
    }

438
    if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
439 440 441 442 443 444 445 446 447 448 449
                                       NULL, virq) < 0) {
        kvm_irqchip_release_virq(kvm_state, virq);
        event_notifier_cleanup(&vector->kvm_interrupt);
        return;
    }

    vector->virq = virq;
}

static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
{
450 451
    kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
                                          vector->virq);
452 453 454 455 456
    kvm_irqchip_release_virq(kvm_state, vector->virq);
    vector->virq = -1;
    event_notifier_cleanup(&vector->kvm_interrupt);
}

457 458
static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
                                     PCIDevice *pdev)
459
{
460
    kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
461 462
}

463 464
static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
                                   MSIMessage *msg, IOHandler *handler)
465
{
466
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
467 468 469
    VFIOMSIVector *vector;
    int ret;

470
    trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
471 472 473

    vector = &vdev->msi_vectors[nr];

474 475 476 477 478 479 480 481
    if (!vector->use) {
        vector->vdev = vdev;
        vector->virq = -1;
        if (event_notifier_init(&vector->interrupt, 0)) {
            error_report("vfio: Error: event_notifier_init failed");
        }
        vector->use = true;
        msix_vector_use(pdev, nr);
482 483
    }

484 485 486
    qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                        handler, NULL, vector);

487 488 489 490
    /*
     * Attempt to enable route through KVM irqchip,
     * default to userspace handling if unavailable.
     */
491 492 493 494
    if (vector->virq >= 0) {
        if (!msg) {
            vfio_remove_kvm_msi_virq(vector);
        } else {
495
            vfio_update_kvm_msi_virq(vector, *msg, pdev);
496
        }
497
    } else {
498
        vfio_add_kvm_msi_virq(vdev, vector, nr, true);
499 500 501 502 503 504 505 506
    }

    /*
     * We don't want to have the host allocate all possible MSI vectors
     * for a device if they're not in use, so we shutdown and incrementally
     * increase them as needed.
     */
    if (vdev->nr_vectors < nr + 1) {
507
        vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
508 509 510
        vdev->nr_vectors = nr + 1;
        ret = vfio_enable_vectors(vdev, true);
        if (ret) {
511
            error_report("vfio: failed to enable vectors, %d", ret);
512 513
        }
    } else {
A
Alex Williamson 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
        int argsz;
        struct vfio_irq_set *irq_set;
        int32_t *pfd;

        argsz = sizeof(*irq_set) + sizeof(*pfd);

        irq_set = g_malloc0(argsz);
        irq_set->argsz = argsz;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                         VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
        irq_set->start = nr;
        irq_set->count = 1;
        pfd = (int32_t *)&irq_set->data;

529 530 531 532 533
        if (vector->virq >= 0) {
            *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
        } else {
            *pfd = event_notifier_get_fd(&vector->interrupt);
        }
A
Alex Williamson 已提交
534

535
        ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
A
Alex Williamson 已提交
536
        g_free(irq_set);
537
        if (ret) {
538
            error_report("vfio: failed to modify vector, %d", ret);
539 540 541
        }
    }

A
Alex Williamson 已提交
542 543 544 545 546 547 548 549
    /* Disable PBA emulation when nothing more is pending. */
    clear_bit(nr, vdev->msix->pending);
    if (find_first_bit(vdev->msix->pending,
                       vdev->nr_vectors) == vdev->nr_vectors) {
        memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
        trace_vfio_msix_pba_disable(vdev->vbasedev.name);
    }

550 551 552
    return 0;
}

553 554 555 556 557 558
static int vfio_msix_vector_use(PCIDevice *pdev,
                                unsigned int nr, MSIMessage msg)
{
    return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
}

559 560
static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
{
561
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
562 563
    VFIOMSIVector *vector = &vdev->msi_vectors[nr];

564
    trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
565 566

    /*
567 568 569 570 571 572
     * There are still old guests that mask and unmask vectors on every
     * interrupt.  If we're using QEMU bypass with a KVM irqfd, leave all of
     * the KVM setup in place, simply switch VFIO to use the non-bypass
     * eventfd.  We'll then fire the interrupt through QEMU and the MSI-X
     * core will mask the interrupt and set pending bits, allowing it to
     * be re-asserted on unmask.  Nothing to do if already using QEMU mode.
573
     */
574 575 576 577
    if (vector->virq >= 0) {
        int argsz;
        struct vfio_irq_set *irq_set;
        int32_t *pfd;
A
Alex Williamson 已提交
578

579
        argsz = sizeof(*irq_set) + sizeof(*pfd);
A
Alex Williamson 已提交
580

581 582 583 584 585 586 587 588
        irq_set = g_malloc0(argsz);
        irq_set->argsz = argsz;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                         VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
        irq_set->start = nr;
        irq_set->count = 1;
        pfd = (int32_t *)&irq_set->data;
A
Alex Williamson 已提交
589

590
        *pfd = event_notifier_get_fd(&vector->interrupt);
A
Alex Williamson 已提交
591

592
        ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
593

594
        g_free(irq_set);
595 596 597
    }
}

598
static void vfio_msix_enable(VFIOPCIDevice *vdev)
599 600 601
{
    vfio_disable_interrupts(vdev);

602
    vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
603 604 605

    vdev->interrupt = VFIO_INT_MSIX;

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
    /*
     * Some communication channels between VF & PF or PF & fw rely on the
     * physical state of the device and expect that enabling MSI-X from the
     * guest enables the same on the host.  When our guest is Linux, the
     * guest driver call to pci_enable_msix() sets the enabling bit in the
     * MSI-X capability, but leaves the vector table masked.  We therefore
     * can't rely on a vector_use callback (from request_irq() in the guest)
     * to switch the physical device into MSI-X mode because that may come a
     * long time after pci_enable_msix().  This code enables vector 0 with
     * triggering to userspace, then immediately release the vector, leaving
     * the physical device with no vectors enabled, but MSI-X enabled, just
     * like the guest view.
     */
    vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
    vfio_msix_vector_release(&vdev->pdev, 0);

622
    if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
623
                                  vfio_msix_vector_release, NULL)) {
624
        error_report("vfio: msix_set_vector_notifiers failed");
625 626
    }

627
    trace_vfio_msix_enable(vdev->vbasedev.name);
628 629
}

630
static void vfio_msi_enable(VFIOPCIDevice *vdev)
631 632 633 634 635 636 637
{
    int ret, i;

    vfio_disable_interrupts(vdev);

    vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
retry:
638
    vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
639 640 641 642 643

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];

        vector->vdev = vdev;
644
        vector->virq = -1;
645 646 647
        vector->use = true;

        if (event_notifier_init(&vector->interrupt, 0)) {
648
            error_report("vfio: Error: event_notifier_init failed");
649 650
        }

651 652 653
        qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                            vfio_msi_interrupt, NULL, vector);

654 655 656 657
        /*
         * Attempt to enable route through KVM irqchip,
         * default to userspace handling if unavailable.
         */
658
        vfio_add_kvm_msi_virq(vdev, vector, i, false);
659 660
    }

661 662 663
    /* Set interrupt type prior to possible interrupts */
    vdev->interrupt = VFIO_INT_MSI;

664 665 666
    ret = vfio_enable_vectors(vdev, false);
    if (ret) {
        if (ret < 0) {
667
            error_report("vfio: Error: Failed to setup MSI fds: %m");
668 669
        } else if (ret != vdev->nr_vectors) {
            error_report("vfio: Error: Failed to enable %d "
670
                         "MSI vectors, retry with %d", vdev->nr_vectors, ret);
671 672 673 674 675
        }

        for (i = 0; i < vdev->nr_vectors; i++) {
            VFIOMSIVector *vector = &vdev->msi_vectors[i];
            if (vector->virq >= 0) {
676
                vfio_remove_kvm_msi_virq(vector);
677
            }
678 679
            qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                                NULL, NULL, NULL);
680 681 682 683 684 685 686 687 688 689 690
            event_notifier_cleanup(&vector->interrupt);
        }

        g_free(vdev->msi_vectors);

        if (ret > 0 && ret != vdev->nr_vectors) {
            vdev->nr_vectors = ret;
            goto retry;
        }
        vdev->nr_vectors = 0;

691 692 693 694 695 696 697 698
        /*
         * Failing to setup MSI doesn't really fall within any specification.
         * Let's try leaving interrupts disabled and hope the guest figures
         * out to fall back to INTx for this device.
         */
        error_report("vfio: Error: Failed to enable MSI");
        vdev->interrupt = VFIO_INT_NONE;

699 700 701
        return;
    }

702
    trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
703 704
}

705
static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
706
{
707 708 709 710 711 712 713 714 715 716 717 718 719 720
    int i;

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];
        if (vdev->msi_vectors[i].use) {
            if (vector->virq >= 0) {
                vfio_remove_kvm_msi_virq(vector);
            }
            qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                                NULL, NULL, NULL);
            event_notifier_cleanup(&vector->interrupt);
        }
    }

721 722 723 724 725
    g_free(vdev->msi_vectors);
    vdev->msi_vectors = NULL;
    vdev->nr_vectors = 0;
    vdev->interrupt = VFIO_INT_NONE;

726
    vfio_intx_enable(vdev);
727 728
}

729
static void vfio_msix_disable(VFIOPCIDevice *vdev)
730
{
731 732
    int i;

733 734
    msix_unset_vector_notifiers(&vdev->pdev);

735 736 737 738 739 740 741
    /*
     * MSI-X will only release vectors if MSI-X is still enabled on the
     * device, check through the rest and release it ourselves if necessary.
     */
    for (i = 0; i < vdev->nr_vectors; i++) {
        if (vdev->msi_vectors[i].use) {
            vfio_msix_vector_release(&vdev->pdev, i);
742
            msix_vector_unuse(&vdev->pdev, i);
743 744 745
        }
    }

746
    if (vdev->nr_vectors) {
747
        vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
748 749
    }

750
    vfio_msi_disable_common(vdev);
751

A
Alex Williamson 已提交
752 753 754
    memset(vdev->msix->pending, 0,
           BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));

755
    trace_vfio_msix_disable(vdev->vbasedev.name);
756 757
}

758
static void vfio_msi_disable(VFIOPCIDevice *vdev)
759
{
760
    vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
761
    vfio_msi_disable_common(vdev);
762

763
    trace_vfio_msi_disable(vdev->vbasedev.name);
764 765
}

766
static void vfio_update_msi(VFIOPCIDevice *vdev)
767 768 769 770 771 772 773 774 775 776 777 778
{
    int i;

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];
        MSIMessage msg;

        if (!vector->use || vector->virq < 0) {
            continue;
        }

        msg = msi_get_message(&vdev->pdev, i);
779
        vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
780 781 782
    }
}

783
static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
784
{
785
    struct vfio_region_info *reg_info;
786 787
    uint64_t size;
    off_t off = 0;
P
Paolo Bonzini 已提交
788
    ssize_t bytes;
789

790 791
    if (vfio_get_region_info(&vdev->vbasedev,
                             VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
792 793 794 795
        error_report("vfio: Error getting ROM info: %m");
        return;
    }

796 797 798 799 800 801
    trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
                            (unsigned long)reg_info->offset,
                            (unsigned long)reg_info->flags);

    vdev->rom_size = size = reg_info->size;
    vdev->rom_offset = reg_info->offset;
802

803
    g_free(reg_info);
804 805

    if (!vdev->rom_size) {
806
        vdev->rom_read_failed = true;
807
        error_report("vfio-pci: Cannot read device rom at "
808
                    "%s", vdev->vbasedev.name);
809 810 811
        error_printf("Device option ROM contents are probably invalid "
                    "(check dmesg).\nSkip option ROM probe with rombar=0, "
                    "or load from file with romfile=\n");
812 813 814 815 816 817 818
        return;
    }

    vdev->rom = g_malloc(size);
    memset(vdev->rom, 0xff, size);

    while (size) {
819 820
        bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
                      size, vdev->rom_offset + off);
821 822 823 824 825 826 827 828 829 830 831 832 833
        if (bytes == 0) {
            break;
        } else if (bytes > 0) {
            off += bytes;
            size -= bytes;
        } else {
            if (errno == EINTR || errno == EAGAIN) {
                continue;
            }
            error_report("vfio: Error reading device ROM: %m");
            break;
        }
    }
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863

    /*
     * Test the ROM signature against our device, if the vendor is correct
     * but the device ID doesn't match, store the correct device ID and
     * recompute the checksum.  Intel IGD devices need this and are known
     * to have bogus checksums so we can't simply adjust the checksum.
     */
    if (pci_get_word(vdev->rom) == 0xaa55 &&
        pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
        !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
        uint16_t vid, did;

        vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
        did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);

        if (vid == vdev->vendor_id && did != vdev->device_id) {
            int i;
            uint8_t csum, *data = vdev->rom;

            pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
                         vdev->device_id);
            data[6] = 0;

            for (csum = 0, i = 0; i < vdev->rom_size; i++) {
                csum += data[i];
            }

            data[6] = -csum;
        }
    }
864 865 866 867
}

static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
{
868
    VFIOPCIDevice *vdev = opaque;
869 870 871 872 873 874 875
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } val;
    uint64_t data = 0;
876 877

    /* Load the ROM lazily when the guest tries to read it */
878
    if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
879 880 881
        vfio_pci_load_rom(vdev);
    }

882
    memcpy(&val, vdev->rom + addr,
883 884
           (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
    switch (size) {
    case 1:
        data = val.byte;
        break;
    case 2:
        data = le16_to_cpu(val.word);
        break;
    case 4:
        data = le32_to_cpu(val.dword);
        break;
    default:
        hw_error("vfio: unsupported read size, %d bytes\n", size);
        break;
    }

900
    trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
901

902
    return data;
903 904
}

905 906 907 908 909
static void vfio_rom_write(void *opaque, hwaddr addr,
                           uint64_t data, unsigned size)
{
}

910 911
static const MemoryRegionOps vfio_rom_ops = {
    .read = vfio_rom_read,
912
    .write = vfio_rom_write,
913
    .endianness = DEVICE_LITTLE_ENDIAN,
914 915
};

916
static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
917
{
918
    uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
919
    off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
920
    DeviceState *dev = DEVICE(vdev);
921
    char *name;
922
    int fd = vdev->vbasedev.fd;
923 924

    if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
925 926
        /* Since pci handles romfile, just print a message and return */
        if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
927 928
            error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
                         vdev->vbasedev.name);
929
        }
930 931 932 933 934 935 936
        return;
    }

    /*
     * Use the same size ROM BAR as the physical device.  The contents
     * will get filled in later when the guest tries to read it.
     */
937 938 939 940
    if (pread(fd, &orig, 4, offset) != 4 ||
        pwrite(fd, &size, 4, offset) != 4 ||
        pread(fd, &size, 4, offset) != 4 ||
        pwrite(fd, &orig, 4, offset) != 4) {
941
        error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
942 943 944
        return;
    }

945
    size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
946 947 948 949 950

    if (!size) {
        return;
    }

951 952
    if (vfio_blacklist_opt_rom(vdev)) {
        if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
953 954
            error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
                         vdev->vbasedev.name);
955
        } else {
956 957
            error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
                         vdev->vbasedev.name);
958 959 960 961
            return;
        }
    }

962
    trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
963

964
    name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
965 966 967

    memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
                          &vfio_rom_ops, vdev, name, size);
968
    g_free(name);
969 970 971 972 973

    pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
                     PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);

    vdev->pdev.has_rom = true;
974
    vdev->rom_read_failed = false;
975 976
}

977
void vfio_vga_write(void *opaque, hwaddr addr,
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
                           uint64_t data, unsigned size)
{
    VFIOVGARegion *region = opaque;
    VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } buf;
    off_t offset = vga->fd_offset + region->offset + addr;

    switch (size) {
    case 1:
        buf.byte = data;
        break;
    case 2:
        buf.word = cpu_to_le16(data);
        break;
    case 4:
        buf.dword = cpu_to_le32(data);
        break;
    default:
1001
        hw_error("vfio: unsupported write size, %d bytes", size);
1002 1003 1004 1005 1006 1007 1008 1009
        break;
    }

    if (pwrite(vga->fd, &buf, size, offset) != size) {
        error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
                     __func__, region->offset + addr, data, size);
    }

E
Eric Auger 已提交
1010
    trace_vfio_vga_write(region->offset + addr, data, size);
1011 1012
}

1013
uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
{
    VFIOVGARegion *region = opaque;
    VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } buf;
    uint64_t data = 0;
    off_t offset = vga->fd_offset + region->offset + addr;

    if (pread(vga->fd, &buf, size, offset) != size) {
        error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
                     __func__, region->offset + addr, size);
        return (uint64_t)-1;
    }

    switch (size) {
    case 1:
        data = buf.byte;
        break;
    case 2:
        data = le16_to_cpu(buf.word);
        break;
    case 4:
        data = le32_to_cpu(buf.dword);
        break;
    default:
1043
        hw_error("vfio: unsupported read size, %d bytes", size);
1044 1045 1046
        break;
    }

E
Eric Auger 已提交
1047
    trace_vfio_vga_read(region->offset + addr, size, data);
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057

    return data;
}

static const MemoryRegionOps vfio_vga_ops = {
    .read = vfio_vga_read,
    .write = vfio_vga_write,
    .endianness = DEVICE_LITTLE_ENDIAN,
};

1058 1059 1060
/*
 * PCI config space
 */
1061
uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1062
{
1063
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1064
    uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1065

1066 1067
    memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
    emu_bits = le32_to_cpu(emu_bits);
1068

1069 1070 1071 1072 1073 1074 1075
    if (emu_bits) {
        emu_val = pci_default_read_config(pdev, addr, len);
    }

    if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
        ssize_t ret;

1076 1077
        ret = pread(vdev->vbasedev.fd, &phys_val, len,
                    vdev->config_offset + addr);
1078
        if (ret != len) {
1079 1080
            error_report("%s(%s, 0x%x, 0x%x) failed: %m",
                         __func__, vdev->vbasedev.name, addr, len);
1081 1082
            return -errno;
        }
1083
        phys_val = le32_to_cpu(phys_val);
1084 1085
    }

1086
    val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1087

1088
    trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1089 1090 1091 1092

    return val;
}

1093 1094
void vfio_pci_write_config(PCIDevice *pdev,
                           uint32_t addr, uint32_t val, int len)
1095
{
1096
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1097 1098
    uint32_t val_le = cpu_to_le32(val);

1099
    trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1100 1101

    /* Write everything to VFIO, let it filter out what we can't write */
1102 1103
    if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
                != len) {
1104 1105
        error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
                     __func__, vdev->vbasedev.name, addr, val, len);
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
    }

    /* MSI/MSI-X Enabling/Disabling */
    if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
        ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
        int is_enabled, was_enabled = msi_enabled(pdev);

        pci_default_write_config(pdev, addr, val, len);

        is_enabled = msi_enabled(pdev);

1117 1118
        if (!was_enabled) {
            if (is_enabled) {
1119
                vfio_msi_enable(vdev);
1120 1121 1122
            }
        } else {
            if (!is_enabled) {
1123
                vfio_msi_disable(vdev);
1124 1125 1126
            } else {
                vfio_update_msi(vdev);
            }
1127
        }
1128
    } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1129 1130 1131 1132 1133 1134 1135 1136
        ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
        int is_enabled, was_enabled = msix_enabled(pdev);

        pci_default_write_config(pdev, addr, val, len);

        is_enabled = msix_enabled(pdev);

        if (!was_enabled && is_enabled) {
1137
            vfio_msix_enable(vdev);
1138
        } else if (was_enabled && !is_enabled) {
1139
            vfio_msix_disable(vdev);
1140
        }
1141 1142 1143
    } else {
        /* Write everything to QEMU to keep emulated bits correct */
        pci_default_write_config(pdev, addr, val, len);
1144 1145 1146 1147 1148 1149
    }
}

/*
 * Interrupt setup
 */
1150
static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1151
{
1152 1153 1154 1155 1156 1157
    /*
     * More complicated than it looks.  Disabling MSI/X transitions the
     * device to INTx mode (if supported).  Therefore we need to first
     * disable MSI/X and then cleanup by disabling INTx.
     */
    if (vdev->interrupt == VFIO_INT_MSIX) {
1158
        vfio_msix_disable(vdev);
1159
    } else if (vdev->interrupt == VFIO_INT_MSI) {
1160
        vfio_msi_disable(vdev);
1161 1162 1163
    }

    if (vdev->interrupt == VFIO_INT_INTx) {
1164
        vfio_intx_disable(vdev);
1165 1166 1167
    }
}

1168
static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
1169 1170 1171 1172
{
    uint16_t ctrl;
    bool msi_64bit, msi_maskbit;
    int ret, entries;
1173
    Error *err = NULL;
1174

1175
    if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1176 1177 1178 1179 1180 1181 1182 1183 1184
              vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
        return -errno;
    }
    ctrl = le16_to_cpu(ctrl);

    msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
    msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
    entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);

1185
    trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1186

1187
    ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
1188
    if (ret < 0) {
1189 1190 1191
        if (ret == -ENOTSUP) {
            return 0;
        }
1192 1193
        error_prepend(&err, "vfio: msi_init failed: ");
        error_report_err(err);
1194 1195 1196 1197 1198 1199 1200
        return ret;
    }
    vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);

    return 0;
}

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
{
    off_t start, end;
    VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;

    /*
     * We expect to find a single mmap covering the whole BAR, anything else
     * means it's either unsupported or already setup.
     */
    if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
        region->size != region->mmaps[0].size) {
        return;
    }

    /* MSI-X table start and end aligned to host page size */
    start = vdev->msix->table_offset & qemu_real_host_page_mask;
    end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
                               (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));

    /*
     * Does the MSI-X table cover the beginning of the BAR?  The whole BAR?
     * NB - Host page size is necessarily a power of two and so is the PCI
     * BAR (not counting EA yet), therefore if we have host page aligned
     * @start and @end, then any remainder of the BAR before or after those
     * must be at least host page sized and therefore mmap'able.
     */
    if (!start) {
        if (end >= region->size) {
            region->nr_mmaps = 0;
            g_free(region->mmaps);
            region->mmaps = NULL;
            trace_vfio_msix_fixup(vdev->vbasedev.name,
                                  vdev->msix->table_bar, 0, 0);
        } else {
            region->mmaps[0].offset = end;
            region->mmaps[0].size = region->size - end;
            trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);
        }

    /* Maybe it's aligned at the end of the BAR */
    } else if (end >= region->size) {
        region->mmaps[0].size = start;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);

    /* Otherwise it must split the BAR */
    } else {
        region->nr_mmaps = 2;
        region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);

        memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));

        region->mmaps[0].size = start;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);

        region->mmaps[1].offset = end;
        region->mmaps[1].size = region->size - end;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[1].offset,
                              region->mmaps[1].offset + region->mmaps[1].size);
    }
}

1269 1270 1271 1272 1273 1274 1275 1276
/*
 * We don't have any control over how pci_add_capability() inserts
 * capabilities into the chain.  In order to setup MSI-X we need a
 * MemoryRegion for the BAR.  In order to setup the BAR and not
 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
 * need to first look for where the MSI-X table lives.  So we
 * unfortunately split MSI-X setup across two functions.
 */
1277
static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
1278 1279 1280 1281
{
    uint8_t pos;
    uint16_t ctrl;
    uint32_t table, pba;
1282
    int fd = vdev->vbasedev.fd;
1283
    VFIOMSIXInfo *msix;
1284 1285 1286 1287 1288 1289

    pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
    if (!pos) {
        return 0;
    }

1290
    if (pread(fd, &ctrl, sizeof(ctrl),
1291
              vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
1292 1293 1294
        return -errno;
    }

1295
    if (pread(fd, &table, sizeof(table),
1296 1297 1298 1299
              vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
        return -errno;
    }

1300
    if (pread(fd, &pba, sizeof(pba),
1301 1302 1303 1304 1305 1306 1307 1308
              vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
        return -errno;
    }

    ctrl = le16_to_cpu(ctrl);
    table = le32_to_cpu(table);
    pba = le32_to_cpu(pba);

1309 1310 1311 1312 1313 1314
    msix = g_malloc0(sizeof(*msix));
    msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
    msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
    msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
    msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
    msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1315

1316 1317 1318 1319 1320
    /*
     * Test the size of the pba_offset variable and catch if it extends outside
     * of the specified BAR. If it is the case, we need to apply a hardware
     * specific quirk if the device is known or we have a broken configuration.
     */
1321
    if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1322 1323 1324 1325 1326 1327
        /*
         * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
         * adapters. The T5 hardware returns an incorrect value of 0x8000 for
         * the VF PBA offset while the BAR itself is only 8k. The correct value
         * is 0x1000, so we hard code that here.
         */
1328 1329
        if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
            (vdev->device_id & 0xff00) == 0x5800) {
1330
            msix->pba_offset = 0x1000;
1331 1332 1333
        } else {
            error_report("vfio: Hardware reports invalid configuration, "
                         "MSIX PBA outside of specified BAR");
1334
            g_free(msix);
1335 1336 1337 1338
            return -EINVAL;
        }
    }

1339
    trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1340 1341
                                msix->table_offset, msix->entries);
    vdev->msix = msix;
1342

1343 1344
    vfio_pci_fixup_msix_region(vdev);

1345 1346 1347
    return 0;
}

1348
static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos)
1349 1350 1351
{
    int ret;

A
Alex Williamson 已提交
1352 1353
    vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
                                    sizeof(unsigned long));
1354
    ret = msix_init(&vdev->pdev, vdev->msix->entries,
1355
                    vdev->bars[vdev->msix->table_bar].region.mem,
1356
                    vdev->msix->table_bar, vdev->msix->table_offset,
1357
                    vdev->bars[vdev->msix->pba_bar].region.mem,
1358 1359
                    vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
    if (ret < 0) {
1360 1361 1362
        if (ret == -ENOTSUP) {
            return 0;
        }
1363
        error_report("vfio: msix_init failed");
1364 1365 1366
        return ret;
    }

A
Alex Williamson 已提交
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
    /*
     * The PCI spec suggests that devices provide additional alignment for
     * MSI-X structures and avoid overlapping non-MSI-X related registers.
     * For an assigned device, this hopefully means that emulation of MSI-X
     * structures does not affect the performance of the device.  If devices
     * fail to provide that alignment, a significant performance penalty may
     * result, for instance Mellanox MT27500 VFs:
     * http://www.spinics.net/lists/kvm/msg125881.html
     *
     * The PBA is simply not that important for such a serious regression and
     * most drivers do not appear to look at it.  The solution for this is to
     * disable the PBA MemoryRegion unless it's being used.  We disable it
     * here and only enable it if a masked vector fires through QEMU.  As the
     * vector-use notifier is called, which occurs on unmask, we test whether
     * PBA emulation is needed and again disable if not.
     */
    memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);

1385 1386 1387
    return 0;
}

1388
static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1389 1390 1391 1392
{
    msi_uninit(&vdev->pdev);

    if (vdev->msix) {
E
Eric Auger 已提交
1393
        msix_uninit(&vdev->pdev,
1394 1395
                    vdev->bars[vdev->msix->table_bar].region.mem,
                    vdev->bars[vdev->msix->pba_bar].region.mem);
A
Alex Williamson 已提交
1396
        g_free(vdev->msix->pending);
1397 1398 1399 1400 1401 1402
    }
}

/*
 * Resource setup
 */
1403
static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1404 1405 1406 1407
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1408
        vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1409 1410 1411
    }
}

1412
static void vfio_bar_setup(VFIOPCIDevice *vdev, int nr)
1413 1414 1415 1416 1417 1418 1419 1420
{
    VFIOBAR *bar = &vdev->bars[nr];

    uint32_t pci_bar;
    uint8_t type;
    int ret;

    /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1421
    if (!bar->region.size) {
1422 1423 1424 1425
        return;
    }

    /* Determine what type of BAR this is for registration */
1426
    ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1427 1428
                vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
    if (ret != sizeof(pci_bar)) {
1429
        error_report("vfio: Failed to read BAR %d (%m)", nr);
1430 1431 1432 1433
        return;
    }

    pci_bar = le32_to_cpu(pci_bar);
A
Alex Williamson 已提交
1434 1435 1436 1437
    bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
    bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
    type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
                                    ~PCI_BASE_ADDRESS_MEM_MASK);
1438

1439 1440 1441
    if (vfio_region_mmap(&bar->region)) {
        error_report("Failed to mmap %s BAR %d. Performance may be slow",
                     vdev->vbasedev.name, nr);
1442
    }
1443

1444
    pci_register_bar(&vdev->pdev, nr, type, bar->region.mem);
1445 1446
}

1447
static void vfio_bars_setup(VFIOPCIDevice *vdev)
1448 1449 1450 1451
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1452
        vfio_bar_setup(vdev, i);
1453 1454 1455
    }
}

1456
static void vfio_bars_exit(VFIOPCIDevice *vdev)
1457 1458 1459 1460
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1461 1462
        vfio_bar_quirk_exit(vdev, i);
        vfio_region_exit(&vdev->bars[i].region);
1463
    }
1464

1465
    if (vdev->vga) {
1466
        pci_unregister_vga(&vdev->pdev);
1467
        vfio_vga_quirk_exit(vdev);
1468
    }
1469 1470
}

1471
static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1472 1473 1474 1475
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1476 1477
        vfio_bar_quirk_finalize(vdev, i);
        vfio_region_finalize(&vdev->bars[i].region);
1478 1479
    }

1480 1481 1482 1483 1484 1485
    if (vdev->vga) {
        vfio_vga_quirk_finalize(vdev);
        for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
            object_unparent(OBJECT(&vdev->vga->region[i].mem));
        }
        g_free(vdev->vga);
1486 1487 1488
    }
}

1489 1490 1491 1492 1493
/*
 * General setup
 */
static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
{
1494 1495
    uint8_t tmp;
    uint16_t next = PCI_CONFIG_SPACE_SIZE;
1496 1497

    for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1498
         tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1499 1500 1501 1502 1503 1504 1505 1506
        if (tmp > pos && tmp < next) {
            next = tmp;
        }
    }

    return next - pos;
}

1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521

static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
{
    uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;

    for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
        tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
        if (tmp > pos && tmp < next) {
            next = tmp;
        }
    }

    return next - pos;
}

1522 1523 1524 1525 1526
static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
{
    pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
}

1527
static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
                                   uint16_t val, uint16_t mask)
{
    vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
    vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
    vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
}

static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
{
    pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
}

1540
static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1541 1542 1543 1544 1545 1546 1547
                                   uint32_t val, uint32_t mask)
{
    vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
    vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
    vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
}

1548
static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
{
    uint16_t flags;
    uint8_t type;

    flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
    type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;

    if (type != PCI_EXP_TYPE_ENDPOINT &&
        type != PCI_EXP_TYPE_LEG_END &&
        type != PCI_EXP_TYPE_RC_END) {

        error_report("vfio: Assignment of PCIe type 0x%x "
                     "devices is not currently supported", type);
        return -EINVAL;
    }

    if (!pci_bus_is_express(vdev->pdev.bus)) {
1566 1567 1568
        PCIBus *bus = vdev->pdev.bus;
        PCIDevice *bridge;

1569
        /*
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
         * Traditionally PCI device assignment exposes the PCIe capability
         * as-is on non-express buses.  The reason being that some drivers
         * simply assume that it's there, for example tg3.  However when
         * we're running on a native PCIe machine type, like Q35, we need
         * to hide the PCIe capability.  The reason for this is twofold;
         * first Windows guests get a Code 10 error when the PCIe capability
         * is exposed in this configuration.  Therefore express devices won't
         * work at all unless they're attached to express buses in the VM.
         * Second, a native PCIe machine introduces the possibility of fine
         * granularity IOMMUs supporting both translation and isolation.
         * Guest code to discover the IOMMU visibility of a device, such as
         * IOMMU grouping code on Linux, is very aware of device types and
         * valid transitions between bus types.  An express device on a non-
         * express bus is not a valid combination on bare metal systems.
         *
         * Drivers that require a PCIe capability to make the device
         * functional are simply going to need to have their devices placed
         * on a PCIe bus in the VM.
1588
         */
1589 1590 1591 1592 1593 1594 1595 1596 1597
        while (!pci_bus_is_root(bus)) {
            bridge = pci_bridge_get_device(bus);
            bus = bridge->bus;
        }

        if (pci_bus_is_express(bus)) {
            return 0;
        }

1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
    } else if (pci_bus_is_root(vdev->pdev.bus)) {
        /*
         * On a Root Complex bus Endpoints become Root Complex Integrated
         * Endpoints, which changes the type and clears the LNK & LNK2 fields.
         */
        if (type == PCI_EXP_TYPE_ENDPOINT) {
            vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
                                   PCI_EXP_TYPE_RC_END << 4,
                                   PCI_EXP_FLAGS_TYPE);

            /* Link Capabilities, Status, and Control goes away */
            if (size > PCI_EXP_LNKCTL) {
                vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
                vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
                vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);

#ifndef PCI_EXP_LNKCAP2
#define PCI_EXP_LNKCAP2 44
#endif
#ifndef PCI_EXP_LNKSTA2
#define PCI_EXP_LNKSTA2 50
#endif
                /* Link 2 Capabilities, Status, and Control goes away */
                if (size > PCI_EXP_LNKCAP2) {
                    vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
                    vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
                    vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
                }
            }

        } else if (type == PCI_EXP_TYPE_LEG_END) {
            /*
             * Legacy endpoints don't belong on the root complex.  Windows
             * seems to be happier with devices if we skip the capability.
             */
            return 0;
        }

    } else {
        /*
         * Convert Root Complex Integrated Endpoints to regular endpoints.
         * These devices don't support LNK/LNK2 capabilities, so make them up.
         */
        if (type == PCI_EXP_TYPE_RC_END) {
            vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
                                   PCI_EXP_TYPE_ENDPOINT << 4,
                                   PCI_EXP_FLAGS_TYPE);
            vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
                                   PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
            vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
        }

        /* Mark the Link Status bits as emulated to allow virtual negotiation */
        vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
                               pci_get_word(vdev->pdev.config + pos +
                                            PCI_EXP_LNKSTA),
                               PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
    }

    pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
    if (pos >= 0) {
        vdev->pdev.exp.exp_cap = pos;
    }

    return pos;
}

1665
static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1666 1667 1668 1669
{
    uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);

    if (cap & PCI_EXP_DEVCAP_FLR) {
1670
        trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1671 1672 1673 1674
        vdev->has_flr = true;
    }
}

1675
static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1676 1677 1678 1679
{
    uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);

    if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1680
        trace_vfio_check_pm_reset(vdev->vbasedev.name);
1681 1682 1683 1684
        vdev->has_pm_reset = true;
    }
}

1685
static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1686 1687 1688 1689
{
    uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);

    if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1690
        trace_vfio_check_af_flr(vdev->vbasedev.name);
1691 1692 1693 1694
        vdev->has_flr = true;
    }
}

1695
static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
1696 1697 1698 1699 1700 1701
{
    PCIDevice *pdev = &vdev->pdev;
    uint8_t cap_id, next, size;
    int ret;

    cap_id = pdev->config[pos];
1702
    next = pdev->config[pos + PCI_CAP_LIST_NEXT];
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715

    /*
     * If it becomes important to configure capabilities to their actual
     * size, use this as the default when it's something we don't recognize.
     * Since QEMU doesn't actually handle many of the config accesses,
     * exact size doesn't seem worthwhile.
     */
    size = vfio_std_cap_max_size(pdev, pos);

    /*
     * pci_add_capability always inserts the new capability at the head
     * of the chain.  Therefore to end up with a chain that matches the
     * physical device, we insert from the end by making this recursive.
1716
     * This is also why we pre-calculate size above as cached config space
1717 1718 1719 1720 1721 1722 1723 1724
     * will be changed as we unwind the stack.
     */
    if (next) {
        ret = vfio_add_std_cap(vdev, next);
        if (ret) {
            return ret;
        }
    } else {
1725 1726 1727 1728
        /* Begin the rebuild, use QEMU emulated list bits */
        pdev->config[PCI_CAPABILITY_LIST] = 0;
        vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
        vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1729 1730
    }

1731
    /* Use emulated next pointer to allow dropping caps */
1732
    pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
1733

1734 1735
    switch (cap_id) {
    case PCI_CAP_ID_MSI:
1736
        ret = vfio_msi_setup(vdev, pos);
1737
        break;
1738
    case PCI_CAP_ID_EXP:
1739
        vfio_check_pcie_flr(vdev, pos);
1740 1741
        ret = vfio_setup_pcie_cap(vdev, pos, size);
        break;
1742
    case PCI_CAP_ID_MSIX:
1743
        ret = vfio_msix_setup(vdev, pos);
1744
        break;
1745
    case PCI_CAP_ID_PM:
1746
        vfio_check_pm_reset(vdev, pos);
1747
        vdev->pm_cap = pos;
1748 1749 1750 1751 1752 1753
        ret = pci_add_capability(pdev, cap_id, pos, size);
        break;
    case PCI_CAP_ID_AF:
        vfio_check_af_flr(vdev, pos);
        ret = pci_add_capability(pdev, cap_id, pos, size);
        break;
1754 1755 1756 1757 1758 1759
    default:
        ret = pci_add_capability(pdev, cap_id, pos, size);
        break;
    }

    if (ret < 0) {
1760 1761
        error_report("vfio: %s Error adding PCI capability "
                     "0x%x[0x%x]@0x%x: %d", vdev->vbasedev.name,
1762 1763 1764 1765 1766 1767 1768
                     cap_id, size, pos, ret);
        return ret;
    }

    return 0;
}

1769 1770 1771 1772 1773 1774 1775 1776
static int vfio_add_ext_cap(VFIOPCIDevice *vdev)
{
    PCIDevice *pdev = &vdev->pdev;
    uint32_t header;
    uint16_t cap_id, next, size;
    uint8_t cap_ver;
    uint8_t *config;

1777 1778 1779 1780 1781 1782
    /* Only add extended caps if we have them and the guest can see them */
    if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) ||
        !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
        return 0;
    }

1783 1784 1785 1786 1787 1788 1789 1790
    /*
     * pcie_add_capability always inserts the new capability at the tail
     * of the chain.  Therefore to end up with a chain that matches the
     * physical device, we cache the config space to avoid overwriting
     * the original config space when we parse the extended capabilities.
     */
    config = g_memdup(pdev->config, vdev->config_size);

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
    /*
     * Extended capabilities are chained with each pointing to the next, so we
     * can drop anything other than the head of the chain simply by modifying
     * the previous next pointer.  For the head of the chain, we can modify the
     * capability ID to something that cannot match a valid capability.  ID
     * 0 is reserved for this since absence of capabilities is indicated by
     * 0 for the ID, version, AND next pointer.  However, pcie_add_capability()
     * uses ID 0 as reserved for list management and will incorrectly match and
     * assert if we attempt to pre-load the head of the chain with with this
     * ID.  Use ID 0xFFFF temporarily since it is also seems to be reserved in
     * part for identifying absence of capabilities in a root complex register
     * block.  If the ID still exists after adding capabilities, switch back to
     * zero.  We'll mark this entire first dword as emulated for this purpose.
     */
    pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
                 PCI_EXT_CAP(0xFFFF, 0, 0));
    pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
    pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);

1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
    for (next = PCI_CONFIG_SPACE_SIZE; next;
         next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
        header = pci_get_long(config + next);
        cap_id = PCI_EXT_CAP_ID(header);
        cap_ver = PCI_EXT_CAP_VER(header);

        /*
         * If it becomes important to configure extended capabilities to their
         * actual size, use this as the default when it's something we don't
         * recognize. Since QEMU doesn't actually handle many of the config
         * accesses, exact size doesn't seem worthwhile.
         */
        size = vfio_ext_cap_max_size(config, next);

        /* Use emulated next pointer to allow dropping extended caps */
        pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
                                   PCI_EXT_CAP_NEXT_MASK);
1827 1828 1829

        switch (cap_id) {
        case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
A
Alex Williamson 已提交
1830
        case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
            trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
            break;
        default:
            pcie_add_capability(pdev, cap_id, cap_ver, next, size);
        }

    }

    /* Cleanup chain head ID if necessary */
    if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
        pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
1842 1843 1844 1845 1846 1847
    }

    g_free(config);
    return 0;
}

1848
static int vfio_add_capabilities(VFIOPCIDevice *vdev)
1849 1850
{
    PCIDevice *pdev = &vdev->pdev;
1851
    int ret;
1852 1853 1854 1855 1856 1857

    if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
        !pdev->config[PCI_CAPABILITY_LIST]) {
        return 0; /* Nothing to add */
    }

1858 1859 1860 1861 1862 1863
    ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
    if (ret) {
        return ret;
    }

    return vfio_add_ext_cap(vdev);
1864 1865
}

1866
static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
{
    PCIDevice *pdev = &vdev->pdev;
    uint16_t cmd;

    vfio_disable_interrupts(vdev);

    /* Make sure the device is in D0 */
    if (vdev->pm_cap) {
        uint16_t pmcsr;
        uint8_t state;

        pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
        state = pmcsr & PCI_PM_CTRL_STATE_MASK;
        if (state) {
            pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
            vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
            /* vfio handles the necessary delay here */
            pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
            state = pmcsr & PCI_PM_CTRL_STATE_MASK;
            if (state) {
1887
                error_report("vfio: Unable to power on device, stuck in D%d",
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
                             state);
            }
        }
    }

    /*
     * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
     * Also put INTx Disable in known state.
     */
    cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
    cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
             PCI_COMMAND_INTX_DISABLE);
    vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
}

1903
static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
1904
{
1905
    vfio_intx_enable(vdev);
1906 1907
}

1908
static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
1909
{
1910 1911 1912 1913 1914 1915
    char tmp[13];

    sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
            addr->bus, addr->slot, addr->function);

    return (strcmp(tmp, name) == 0);
1916 1917
}

1918
static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
1919 1920 1921 1922 1923 1924 1925 1926 1927
{
    VFIOGroup *group;
    struct vfio_pci_hot_reset_info *info;
    struct vfio_pci_dependent_device *devices;
    struct vfio_pci_hot_reset *reset;
    int32_t *fds;
    int ret, i, count;
    bool multi = false;

1928
    trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
1929 1930

    vfio_pci_pre_reset(vdev);
1931
    vdev->vbasedev.needs_reset = false;
1932 1933 1934 1935

    info = g_malloc0(sizeof(*info));
    info->argsz = sizeof(*info);

1936
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1937 1938 1939
    if (ret && errno != ENOSPC) {
        ret = -errno;
        if (!vdev->has_pm_reset) {
1940 1941
            error_report("vfio: Cannot reset device %s, "
                         "no available reset mechanism.", vdev->vbasedev.name);
1942 1943 1944 1945 1946 1947 1948 1949 1950
        }
        goto out_single;
    }

    count = info->count;
    info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
    info->argsz = sizeof(*info) + (count * sizeof(*devices));
    devices = &info->devices[0];

1951
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1952 1953 1954 1955 1956 1957
    if (ret) {
        ret = -errno;
        error_report("vfio: hot reset info failed: %m");
        goto out_single;
    }

1958
    trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
1959 1960 1961 1962

    /* Verify that we have all the groups required */
    for (i = 0; i < info->count; i++) {
        PCIHostDeviceAddress host;
1963
        VFIOPCIDevice *tmp;
1964
        VFIODevice *vbasedev_iter;
1965 1966 1967 1968 1969 1970

        host.domain = devices[i].segment;
        host.bus = devices[i].bus;
        host.slot = PCI_SLOT(devices[i].devfn);
        host.function = PCI_FUNC(devices[i].devfn);

E
Eric Auger 已提交
1971
        trace_vfio_pci_hot_reset_dep_devices(host.domain,
1972 1973
                host.bus, host.slot, host.function, devices[i].group_id);

1974
        if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1975 1976 1977
            continue;
        }

1978
        QLIST_FOREACH(group, &vfio_group_list, next) {
1979 1980 1981 1982 1983 1984 1985
            if (group->groupid == devices[i].group_id) {
                break;
            }
        }

        if (!group) {
            if (!vdev->has_pm_reset) {
1986
                error_report("vfio: Cannot reset device %s, "
1987
                             "depends on group %d which is not owned.",
1988
                             vdev->vbasedev.name, devices[i].group_id);
1989 1990 1991 1992 1993 1994
            }
            ret = -EPERM;
            goto out;
        }

        /* Prep dependent devices for reset and clear our marker. */
1995 1996 1997 1998 1999
        QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
            if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
                continue;
            }
            tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2000
            if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2001 2002 2003 2004 2005
                if (single) {
                    ret = -EINVAL;
                    goto out_single;
                }
                vfio_pci_pre_reset(tmp);
2006
                tmp->vbasedev.needs_reset = false;
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
                multi = true;
                break;
            }
        }
    }

    if (!single && !multi) {
        ret = -EINVAL;
        goto out_single;
    }

    /* Determine how many group fds need to be passed */
    count = 0;
2020
    QLIST_FOREACH(group, &vfio_group_list, next) {
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
        for (i = 0; i < info->count; i++) {
            if (group->groupid == devices[i].group_id) {
                count++;
                break;
            }
        }
    }

    reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
    reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
    fds = &reset->group_fds[0];

    /* Fill in group fds */
2034
    QLIST_FOREACH(group, &vfio_group_list, next) {
2035 2036 2037 2038 2039 2040 2041 2042 2043
        for (i = 0; i < info->count; i++) {
            if (group->groupid == devices[i].group_id) {
                fds[reset->count++] = group->fd;
                break;
            }
        }
    }

    /* Bus reset! */
2044
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
2045 2046
    g_free(reset);

2047
    trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
E
Eric Auger 已提交
2048
                                    ret ? "%m" : "Success");
2049 2050 2051 2052 2053

out:
    /* Re-enable INTx on affected devices */
    for (i = 0; i < info->count; i++) {
        PCIHostDeviceAddress host;
2054
        VFIOPCIDevice *tmp;
2055
        VFIODevice *vbasedev_iter;
2056 2057 2058 2059 2060 2061

        host.domain = devices[i].segment;
        host.bus = devices[i].bus;
        host.slot = PCI_SLOT(devices[i].devfn);
        host.function = PCI_FUNC(devices[i].devfn);

2062
        if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2063 2064 2065
            continue;
        }

2066
        QLIST_FOREACH(group, &vfio_group_list, next) {
2067 2068 2069 2070 2071 2072 2073 2074 2075
            if (group->groupid == devices[i].group_id) {
                break;
            }
        }

        if (!group) {
            break;
        }

2076 2077 2078 2079 2080
        QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
            if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
                continue;
            }
            tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2081
            if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
                vfio_pci_post_reset(tmp);
                break;
            }
        }
    }
out_single:
    vfio_pci_post_reset(vdev);
    g_free(info);

    return ret;
}

/*
 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
 * of a single in-use device.  VFIO_DEVICE_RESET will already handle the case
 * of doing hot resets when there is only a single device per bus.  The in-use
 * here refers to how many VFIODevices are affected.  A hot reset that affects
 * multiple devices, but only a single in-use device, means that we can call
 * it from our bus ->reset() callback since the extent is effectively a single
 * device.  This allows us to make use of it in the hotplug path.  When there
 * are multiple in-use devices, we can only trigger the hot reset during a
 * system reset and thus from our reset handler.  We separate _one vs _multi
 * here so that we don't overlap and do a double reset on the system reset
 * path where both our reset handler and ->reset() callback are used.  Calling
 * _one() will only do a hot reset for the one in-use devices case, calling
 * _multi() will do nothing if a _one() would have been sufficient.
 */
2109
static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2110 2111 2112 2113
{
    return vfio_pci_hot_reset(vdev, true);
}

2114
static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2115
{
2116
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2117 2118 2119
    return vfio_pci_hot_reset(vdev, false);
}

2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130
static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
{
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
    if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
        vbasedev->needs_reset = true;
    }
}

static VFIODeviceOps vfio_pci_ops = {
    .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
    .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2131
    .vfio_eoi = vfio_intx_eoi,
2132 2133
};

A
Alex Williamson 已提交
2134 2135 2136 2137 2138 2139
int vfio_populate_vga(VFIOPCIDevice *vdev)
{
    VFIODevice *vbasedev = &vdev->vbasedev;
    struct vfio_region_info *reg_info;
    int ret;

2140 2141 2142 2143
    ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
    if (ret) {
        return ret;
    }
A
Alex Williamson 已提交
2144

2145 2146 2147 2148 2149 2150 2151 2152 2153
    if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
        !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
        reg_info->size < 0xbffff + 1) {
        error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
                     (unsigned long)reg_info->flags,
                     (unsigned long)reg_info->size);
        g_free(reg_info);
        return -EINVAL;
    }
A
Alex Williamson 已提交
2154

2155
    vdev->vga = g_new0(VFIOVGA, 1);
A
Alex Williamson 已提交
2156

2157 2158
    vdev->vga->fd_offset = reg_info->offset;
    vdev->vga->fd = vdev->vbasedev.fd;
A
Alex Williamson 已提交
2159

2160
    g_free(reg_info);
A
Alex Williamson 已提交
2161

2162 2163 2164
    vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
    vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
A
Alex Williamson 已提交
2165

2166 2167 2168 2169 2170 2171
    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
                          OBJECT(vdev), &vfio_vga_ops,
                          &vdev->vga->region[QEMU_PCI_VGA_MEM],
                          "vfio-vga-mmio@0xa0000",
                          QEMU_PCI_VGA_MEM_SIZE);

2172 2173 2174
    vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
    vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
A
Alex Williamson 已提交
2175

2176 2177 2178 2179 2180 2181
    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
                          OBJECT(vdev), &vfio_vga_ops,
                          &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
                          "vfio-vga-io@0x3b0",
                          QEMU_PCI_VGA_IO_LO_SIZE);

2182 2183 2184
    vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
    vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
A
Alex Williamson 已提交
2185

2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
                          OBJECT(vdev), &vfio_vga_ops,
                          &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
                          "vfio-vga-io@0x3c0",
                          QEMU_PCI_VGA_IO_HI_SIZE);

    pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
                     &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
                     &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);

A
Alex Williamson 已提交
2196 2197 2198
    return 0;
}

2199
static int vfio_populate_device(VFIOPCIDevice *vdev)
2200
{
2201
    VFIODevice *vbasedev = &vdev->vbasedev;
2202
    struct vfio_region_info *reg_info;
2203
    struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
E
Eric Auger 已提交
2204
    int i, ret = -1;
2205 2206

    /* Sanity check device */
E
Eric Auger 已提交
2207
    if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2208
        error_report("vfio: Um, this isn't a PCI device");
2209 2210 2211
        goto error;
    }

E
Eric Auger 已提交
2212
    if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2213
        error_report("vfio: unexpected number of io regions %u",
E
Eric Auger 已提交
2214
                     vbasedev->num_regions);
2215 2216 2217
        goto error;
    }

E
Eric Auger 已提交
2218 2219
    if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
        error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
2220 2221 2222 2223
        goto error;
    }

    for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2224 2225 2226 2227 2228 2229
        char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);

        ret = vfio_region_setup(OBJECT(vdev), vbasedev,
                                &vdev->bars[i].region, i, name);
        g_free(name);

2230
        if (ret) {
2231
            error_report("vfio: Error getting region %d info: %m", i);
2232 2233 2234
            goto error;
        }

2235
        QLIST_INIT(&vdev->bars[i].quirks);
2236
    }
2237

2238 2239
    ret = vfio_get_region_info(vbasedev,
                               VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2240
    if (ret) {
2241
        error_report("vfio: Error getting config info: %m");
2242 2243 2244
        goto error;
    }

E
Eric Auger 已提交
2245
    trace_vfio_populate_device_config(vdev->vbasedev.name,
2246 2247 2248
                                      (unsigned long)reg_info->size,
                                      (unsigned long)reg_info->offset,
                                      (unsigned long)reg_info->flags);
2249

2250
    vdev->config_size = reg_info->size;
2251 2252 2253
    if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
        vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
    }
2254 2255 2256
    vdev->config_offset = reg_info->offset;

    g_free(reg_info);
2257

A
Alex Williamson 已提交
2258 2259
    if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
        ret = vfio_populate_vga(vdev);
2260 2261 2262 2263 2264 2265
        if (ret) {
            error_report(
                "vfio: Device does not support requested feature x-vga");
            goto error;
        }
    }
2266

2267 2268
    irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;

2269
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2270 2271
    if (ret) {
        /* This can fail for an old kernel or legacy PCI dev */
E
Eric Auger 已提交
2272
        trace_vfio_populate_device_get_irq_info_failure();
2273 2274 2275 2276
        ret = 0;
    } else if (irq_info.count == 1) {
        vdev->pci_aer = true;
    } else {
2277
        error_report("vfio: %s "
A
Alex Williamson 已提交
2278
                     "Could not enable error recovery for the device",
2279
                     vbasedev->name);
2280
    }
2281

E
Eric Auger 已提交
2282 2283 2284 2285
error:
    return ret;
}

2286
static void vfio_put_device(VFIOPCIDevice *vdev)
2287
{
2288
    g_free(vdev->vbasedev.name);
2289 2290
    g_free(vdev->msix);

E
Eric Auger 已提交
2291
    vfio_put_base_device(&vdev->vbasedev);
2292 2293
}

2294 2295
static void vfio_err_notifier_handler(void *opaque)
{
2296
    VFIOPCIDevice *vdev = opaque;
2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310

    if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
        return;
    }

    /*
     * TBD. Retrieve the error details and decide what action
     * needs to be taken. One of the actions could be to pass
     * the error to the guest and have the guest driver recover
     * from the error. This requires that PCIe capabilities be
     * exposed to the guest. For now, we just terminate the
     * guest to contain the error.
     */

2311
    error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2312

P
Paolo Bonzini 已提交
2313
    vm_stop(RUN_STATE_INTERNAL_ERROR);
2314 2315 2316 2317 2318 2319 2320 2321
}

/*
 * Registers error notifier for devices supporting error recovery.
 * If we encounter a failure in this function, we report an error
 * and continue after disabling error recovery support for the
 * device.
 */
2322
static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
{
    int ret;
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!vdev->pci_aer) {
        return;
    }

    if (event_notifier_init(&vdev->err_notifier, 0)) {
A
Alex Williamson 已提交
2334
        error_report("vfio: Unable to init event notifier for error detection");
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352
        vdev->pci_aer = false;
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->err_notifier);
    qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);

2353
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2354
    if (ret) {
A
Alex Williamson 已提交
2355
        error_report("vfio: Failed to set up error notification");
2356 2357 2358 2359 2360 2361 2362
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
        event_notifier_cleanup(&vdev->err_notifier);
        vdev->pci_aer = false;
    }
    g_free(irq_set);
}

2363
static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
{
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;
    int ret;

    if (!vdev->pci_aer) {
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;
    *pfd = -1;

2386
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2387
    if (ret) {
A
Alex Williamson 已提交
2388
        error_report("vfio: Failed to de-assign error fd: %m");
2389 2390 2391 2392 2393 2394 2395
    }
    g_free(irq_set);
    qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
                        NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->err_notifier);
}

2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486
static void vfio_req_notifier_handler(void *opaque)
{
    VFIOPCIDevice *vdev = opaque;

    if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
        return;
    }

    qdev_unplug(&vdev->pdev.qdev, NULL);
}

static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
{
    struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
                                      .index = VFIO_PCI_REQ_IRQ_INDEX };
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
        return;
    }

    if (ioctl(vdev->vbasedev.fd,
              VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
        return;
    }

    if (event_notifier_init(&vdev->req_notifier, 0)) {
        error_report("vfio: Unable to init event notifier for device request");
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->req_notifier);
    qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);

    if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
        error_report("vfio: Failed to set up device request notification");
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
        event_notifier_cleanup(&vdev->req_notifier);
    } else {
        vdev->req_enabled = true;
    }

    g_free(irq_set);
}

static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
{
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!vdev->req_enabled) {
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;
    *pfd = -1;

    if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
        error_report("vfio: Failed to de-assign device request fd: %m");
    }
    g_free(irq_set);
    qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
                        NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->req_notifier);

    vdev->req_enabled = false;
}

2487 2488
static int vfio_initfn(PCIDevice *pdev)
{
2489 2490
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
    VFIODevice *vbasedev_iter;
2491
    VFIOGroup *group;
2492
    char *tmp, group_path[PATH_MAX], *group_name;
2493 2494 2495
    ssize_t len;
    struct stat st;
    int groupid;
2496
    int i, ret;
2497

2498 2499 2500 2501 2502 2503 2504 2505 2506 2507
    if (!vdev->vbasedev.sysfsdev) {
        vdev->vbasedev.sysfsdev =
            g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
                            vdev->host.domain, vdev->host.bus,
                            vdev->host.slot, vdev->host.function);
    }

    if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
        error_report("vfio: error: no such host device: %s",
                     vdev->vbasedev.sysfsdev);
2508 2509 2510
        return -errno;
    }

2511
    vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
2512
    vdev->vbasedev.ops = &vfio_pci_ops;
2513 2514
    vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;

2515 2516 2517
    tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
    len = readlink(tmp, group_path, sizeof(group_path));
    g_free(tmp);
2518

2519
    if (len <= 0 || len >= sizeof(group_path)) {
2520
        error_report("vfio: error no iommu_group for device");
A
Alex Williamson 已提交
2521
        return len < 0 ? -errno : -ENAMETOOLONG;
2522 2523
    }

2524
    group_path[len] = 0;
2525

2526
    group_name = basename(group_path);
2527
    if (sscanf(group_name, "%d", &groupid) != 1) {
2528
        error_report("vfio: error reading %s: %m", group_path);
2529 2530 2531
        return -errno;
    }

2532
    trace_vfio_initfn(vdev->vbasedev.name, groupid);
2533

2534
    group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
2535
    if (!group) {
2536
        error_report("vfio: failed to get group %d", groupid);
2537 2538 2539
        return -ENOENT;
    }

2540 2541
    QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
        if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
2542 2543
            error_report("vfio: error: device %s is already attached",
                         vdev->vbasedev.name);
2544 2545 2546 2547 2548
            vfio_put_group(group);
            return -EBUSY;
        }
    }

2549
    ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev);
2550
    if (ret) {
2551
        error_report("vfio: failed to get device %s", vdev->vbasedev.name);
2552 2553 2554 2555
        vfio_put_group(group);
        return ret;
    }

2556 2557
    ret = vfio_populate_device(vdev);
    if (ret) {
2558
        return ret;
2559 2560
    }

2561
    /* Get a copy of config space */
2562
    ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2563 2564 2565 2566
                MIN(pci_config_size(&vdev->pdev), vdev->config_size),
                vdev->config_offset);
    if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
        ret = ret < 0 ? -errno : -EFAULT;
2567
        error_report("vfio: Failed to read device config space");
2568
        return ret;
2569 2570
    }

2571 2572 2573 2574 2575 2576
    /* vfio emulates a lot for us, but some bits need extra love */
    vdev->emulated_config_bits = g_malloc0(vdev->config_size);

    /* QEMU can choose to expose the ROM or not */
    memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);

A
Alex Williamson 已提交
2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623
    /*
     * The PCI spec reserves vendor ID 0xffff as an invalid value.  The
     * device ID is managed by the vendor and need only be a 16-bit value.
     * Allow any 16-bit value for subsystem so they can be hidden or changed.
     */
    if (vdev->vendor_id != PCI_ANY_ID) {
        if (vdev->vendor_id >= 0xffff) {
            error_report("vfio: Invalid PCI vendor ID provided");
            return -EINVAL;
        }
        vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
        trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
    } else {
        vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
    }

    if (vdev->device_id != PCI_ANY_ID) {
        if (vdev->device_id > 0xffff) {
            error_report("vfio: Invalid PCI device ID provided");
            return -EINVAL;
        }
        vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
        trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
    } else {
        vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
    }

    if (vdev->sub_vendor_id != PCI_ANY_ID) {
        if (vdev->sub_vendor_id > 0xffff) {
            error_report("vfio: Invalid PCI subsystem vendor ID provided");
            return -EINVAL;
        }
        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
                               vdev->sub_vendor_id, ~0);
        trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
                                              vdev->sub_vendor_id);
    }

    if (vdev->sub_device_id != PCI_ANY_ID) {
        if (vdev->sub_device_id > 0xffff) {
            error_report("vfio: Invalid PCI subsystem device ID provided");
            return -EINVAL;
        }
        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
        trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
                                              vdev->sub_device_id);
    }
2624

2625 2626 2627 2628
    /* QEMU can change multi-function devices to single function, or reverse */
    vdev->emulated_config_bits[PCI_HEADER_TYPE] =
                                              PCI_HEADER_TYPE_MULTI_FUNCTION;

A
Alex Williamson 已提交
2629 2630 2631 2632 2633 2634 2635
    /* Restore or clear multifunction, this is always controlled by QEMU */
    if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
        vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
    } else {
        vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
    }

2636 2637 2638 2639 2640 2641 2642 2643
    /*
     * Clear host resource mapping info.  If we choose not to register a
     * BAR, such as might be the case with the option ROM, we can get
     * confusing, unwritable, residual addresses from the host here.
     */
    memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
    memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);

2644
    vfio_pci_size_rom(vdev);
2645

2646
    ret = vfio_msix_early_setup(vdev);
2647
    if (ret) {
2648
        return ret;
2649 2650
    }

2651
    vfio_bars_setup(vdev);
2652 2653 2654 2655 2656 2657

    ret = vfio_add_capabilities(vdev);
    if (ret) {
        goto out_teardown;
    }

2658 2659 2660 2661
    if (vdev->vga) {
        vfio_vga_quirk_setup(vdev);
    }

2662 2663 2664 2665
    for (i = 0; i < PCI_ROM_SLOT; i++) {
        vfio_bar_quirk_setup(vdev, i);
    }

2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
    if (!vdev->igd_opregion &&
        vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
        struct vfio_region_info *opregion;

        if (vdev->pdev.qdev.hotplugged) {
            error_report("Cannot support IGD OpRegion feature on hotplugged "
                         "device %s", vdev->vbasedev.name);
            ret = -EINVAL;
            goto out_teardown;
        }

        ret = vfio_get_dev_region_info(&vdev->vbasedev,
                        VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
                        VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
        if (ret) {
            error_report("Device %s does not support requested IGD OpRegion "
                         "feature", vdev->vbasedev.name);
            goto out_teardown;
        }

        ret = vfio_pci_igd_opregion_init(vdev, opregion);
        g_free(opregion);
        if (ret) {
            error_report("Device %s IGD OpRegion initialization failed",
                         vdev->vbasedev.name);
            goto out_teardown;
        }
    }

2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
    /* QEMU emulates all of MSI & MSIX */
    if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
        memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
               MSIX_CAP_LENGTH);
    }

    if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
        memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
               vdev->msi_cap_size);
    }

2706
    if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
2707
        vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2708
                                                  vfio_intx_mmap_enable, vdev);
2709 2710
        pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
        ret = vfio_intx_enable(vdev);
2711 2712 2713 2714 2715
        if (ret) {
            goto out_teardown;
        }
    }

2716
    vfio_register_err_notifier(vdev);
2717
    vfio_register_req_notifier(vdev);
2718
    vfio_setup_resetfn_quirk(vdev);
A
Alex Williamson 已提交
2719

2720 2721 2722 2723 2724
    return 0;

out_teardown:
    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
    vfio_teardown_msi(vdev);
2725
    vfio_bars_exit(vdev);
2726 2727 2728 2729 2730 2731 2732 2733 2734
    return ret;
}

static void vfio_instance_finalize(Object *obj)
{
    PCIDevice *pci_dev = PCI_DEVICE(obj);
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
    VFIOGroup *group = vdev->vbasedev.group;

2735
    vfio_bars_finalize(vdev);
2736
    g_free(vdev->emulated_config_bits);
2737
    g_free(vdev->rom);
2738 2739 2740 2741 2742 2743 2744
    /*
     * XXX Leaking igd_opregion is not an oversight, we can't remove the
     * fw_cfg entry therefore leaking this allocation seems like the safest
     * option.
     *
     * g_free(vdev->igd_opregion);
     */
2745 2746 2747 2748 2749 2750
    vfio_put_device(vdev);
    vfio_put_group(group);
}

static void vfio_exitfn(PCIDevice *pdev)
{
2751
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2752

2753
    vfio_unregister_req_notifier(vdev);
2754
    vfio_unregister_err_notifier(vdev);
2755 2756
    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
    vfio_disable_interrupts(vdev);
2757
    if (vdev->intx.mmap_timer) {
2758
        timer_free(vdev->intx.mmap_timer);
2759
    }
2760
    vfio_teardown_msi(vdev);
2761
    vfio_bars_exit(vdev);
2762 2763 2764 2765 2766
}

static void vfio_pci_reset(DeviceState *dev)
{
    PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
2767
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2768

2769
    trace_vfio_pci_reset(vdev->vbasedev.name);
A
Alex Williamson 已提交
2770

2771
    vfio_pci_pre_reset(vdev);
2772

2773 2774 2775 2776
    if (vdev->resetfn && !vdev->resetfn(vdev)) {
        goto post_reset;
    }

2777 2778
    if (vdev->vbasedev.reset_works &&
        (vdev->has_flr || !vdev->has_pm_reset) &&
2779
        !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2780
        trace_vfio_pci_reset_flr(vdev->vbasedev.name);
2781
        goto post_reset;
2782 2783
    }

2784 2785 2786 2787
    /* See if we can do our own bus reset */
    if (!vfio_pci_hot_reset_one(vdev)) {
        goto post_reset;
    }
A
Alex Williamson 已提交
2788

2789
    /* If nothing else works and the device supports PM reset, use it */
2790
    if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
2791
        !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2792
        trace_vfio_pci_reset_pm(vdev->vbasedev.name);
2793
        goto post_reset;
2794
    }
A
Alex Williamson 已提交
2795

2796 2797
post_reset:
    vfio_pci_post_reset(vdev);
2798 2799
}

2800 2801 2802
static void vfio_instance_init(Object *obj)
{
    PCIDevice *pci_dev = PCI_DEVICE(obj);
2803
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
2804 2805 2806 2807 2808 2809

    device_add_bootindex_property(obj, &vdev->bootindex,
                                  "bootindex", NULL,
                                  &pci_dev->qdev, NULL);
}

2810
static Property vfio_pci_dev_properties[] = {
2811
    DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
2812
    DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
2813
    DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
2814
                       intx.mmap_timeout, 1100),
2815
    DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
2816
                    VFIO_FEATURE_ENABLE_VGA_BIT, false),
2817 2818
    DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
                    VFIO_FEATURE_ENABLE_REQ_BIT, true),
2819 2820
    DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
                    VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
2821
    DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
2822 2823 2824
    DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
    DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
    DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
A
Alex Williamson 已提交
2825 2826 2827 2828 2829 2830
    DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
                       sub_vendor_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
                       sub_device_id, PCI_ANY_ID),
2831
    DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
2832 2833
    /*
     * TODO - support passed fds... is this necessary?
2834 2835
     * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
     * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
2836 2837 2838 2839
     */
    DEFINE_PROP_END_OF_LIST(),
};

A
Alex Williamson 已提交
2840 2841 2842 2843
static const VMStateDescription vfio_pci_vmstate = {
    .name = "vfio-pci",
    .unmigratable = 1,
};
2844 2845 2846 2847 2848 2849 2850 2851

static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);

    dc->reset = vfio_pci_reset;
    dc->props = vfio_pci_dev_properties;
A
Alex Williamson 已提交
2852 2853
    dc->vmsd = &vfio_pci_vmstate;
    dc->desc = "VFIO-based PCI device assignment";
2854
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2855 2856 2857 2858
    pdc->init = vfio_initfn;
    pdc->exit = vfio_exitfn;
    pdc->config_read = vfio_pci_read_config;
    pdc->config_write = vfio_pci_write_config;
2859
    pdc->is_express = 1; /* We might be */
2860 2861 2862 2863 2864
}

static const TypeInfo vfio_pci_dev_info = {
    .name = "vfio-pci",
    .parent = TYPE_PCI_DEVICE,
2865
    .instance_size = sizeof(VFIOPCIDevice),
2866
    .class_init = vfio_pci_dev_class_init,
2867
    .instance_init = vfio_instance_init,
2868
    .instance_finalize = vfio_instance_finalize,
2869 2870 2871 2872 2873 2874 2875 2876
};

static void register_vfio_pci_dev_type(void)
{
    type_register_static(&vfio_pci_dev_info);
}

type_init(register_vfio_pci_dev_type)