pci.c 96.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * vfio based device assignment support
 *
 * Copyright Red Hat, Inc. 2012
 *
 * Authors:
 *  Alex Williamson <alex.williamson@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * Based on qemu-kvm device-assignment:
 *  Adapted for KVM by Qumranet.
 *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
 *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
 *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
 *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
 *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
 */

P
Peter Maydell 已提交
21
#include "qemu/osdep.h"
A
Alex Williamson 已提交
22
#include <linux/vfio.h>
23 24
#include <sys/ioctl.h>

25 26
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
27
#include "hw/pci/pci_bridge.h"
28 29
#include "qemu/error-report.h"
#include "qemu/range.h"
A
Alex Williamson 已提交
30 31
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
32
#include "pci.h"
E
Eric Auger 已提交
33
#include "trace.h"
34
#include "qapi/error.h"
35

36 37
#define MSIX_CAP_LENGTH 12

38 39
static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * Disabling BAR mmaping can be slow, but toggling it around INTx can
 * also be a huge overhead.  We try to get the best of both worlds by
 * waiting until an interrupt to disable mmaps (subsequent transitions
 * to the same state are effectively no overhead).  If the interrupt has
 * been serviced and the time gap is long enough, we re-enable mmaps for
 * performance.  This works well for things like graphics cards, which
 * may not use their interrupt at all and are penalized to an unusable
 * level by read/write BAR traps.  Other devices, like NICs, have more
 * regular interrupts and see much better latency by staying in non-mmap
 * mode.  We therefore set the default mmap_timeout such that a ping
 * is just enough to keep the mmap disabled.  Users can experiment with
 * other options with the x-intx-mmap-timeout-ms parameter (a value of
 * zero disables the timer).
 */
static void vfio_intx_mmap_enable(void *opaque)
{
58
    VFIOPCIDevice *vdev = opaque;
59 60

    if (vdev->intx.pending) {
61 62
        timer_mod(vdev->intx.mmap_timer,
                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
63 64 65 66 67 68
        return;
    }

    vfio_mmap_set_enabled(vdev, true);
}

69 70
static void vfio_intx_interrupt(void *opaque)
{
71
    VFIOPCIDevice *vdev = opaque;
72 73 74 75 76

    if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
        return;
    }

77
    trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
78 79

    vdev->intx.pending = true;
80
    pci_irq_assert(&vdev->pdev);
81 82
    vfio_mmap_set_enabled(vdev, false);
    if (vdev->intx.mmap_timeout) {
83 84
        timer_mod(vdev->intx.mmap_timer,
                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
85
    }
86 87
}

88
static void vfio_intx_eoi(VFIODevice *vbasedev)
89
{
E
Eric Auger 已提交
90 91
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);

92 93 94 95
    if (!vdev->intx.pending) {
        return;
    }

96
    trace_vfio_intx_eoi(vbasedev->name);
97 98

    vdev->intx.pending = false;
99
    pci_irq_deassert(&vdev->pdev);
E
Eric Auger 已提交
100
    vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
101 102
}

103
static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
104 105 106 107 108 109 110 111 112 113 114
{
#ifdef CONFIG_KVM
    struct kvm_irqfd irqfd = {
        .fd = event_notifier_get_fd(&vdev->intx.interrupt),
        .gsi = vdev->intx.route.irq,
        .flags = KVM_IRQFD_FLAG_RESAMPLE,
    };
    struct vfio_irq_set *irq_set;
    int ret, argsz;
    int32_t *pfd;

115
    if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
116
        vdev->intx.route.mode != PCI_INTX_ENABLED ||
E
Eric Auger 已提交
117
        !kvm_resamplefds_enabled()) {
118 119 120 121 122
        return;
    }

    /* Get to a known interrupt state */
    qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
123
    vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
124
    vdev->intx.pending = false;
125
    pci_irq_deassert(&vdev->pdev);
126 127 128

    /* Get an eventfd for resample/unmask */
    if (event_notifier_init(&vdev->intx.unmask, 0)) {
129
        error_setg(errp, "event_notifier_init failed eoi");
130 131 132 133 134 135 136
        goto fail;
    }

    /* KVM triggers it, VFIO listens for it */
    irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);

    if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
137
        error_setg_errno(errp, errno, "failed to setup resample irqfd");
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
        goto fail_irqfd;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
    irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = irqfd.resamplefd;

153
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
154 155
    g_free(irq_set);
    if (ret) {
156
        error_setg_errno(errp, -ret, "failed to setup INTx unmask fd");
157 158 159 160
        goto fail_vfio;
    }

    /* Let'em rip */
161
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
162 163 164

    vdev->intx.kvm_accel = true;

165
    trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
166 167 168 169 170 171 172 173 174 175

    return;

fail_vfio:
    irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
    kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
fail_irqfd:
    event_notifier_cleanup(&vdev->intx.unmask);
fail:
    qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
176
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
177 178 179
#endif
}

180
static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
{
#ifdef CONFIG_KVM
    struct kvm_irqfd irqfd = {
        .fd = event_notifier_get_fd(&vdev->intx.interrupt),
        .gsi = vdev->intx.route.irq,
        .flags = KVM_IRQFD_FLAG_DEASSIGN,
    };

    if (!vdev->intx.kvm_accel) {
        return;
    }

    /*
     * Get to a known state, hardware masked, QEMU ready to accept new
     * interrupts, QEMU IRQ de-asserted.
     */
197
    vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
198
    vdev->intx.pending = false;
199
    pci_irq_deassert(&vdev->pdev);
200 201 202

    /* Tell KVM to stop listening for an INTx irqfd */
    if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
203
        error_report("vfio: Error: Failed to disable INTx irqfd: %m");
204 205 206 207 208 209 210 211 212 213 214
    }

    /* We only need to close the eventfd for VFIO to cleanup the kernel side */
    event_notifier_cleanup(&vdev->intx.unmask);

    /* QEMU starts listening for interrupt events. */
    qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);

    vdev->intx.kvm_accel = false;

    /* If we've missed an event, let it re-fire through QEMU */
215
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
216

217
    trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
218 219 220
#endif
}

221
static void vfio_intx_update(PCIDevice *pdev)
222
{
223
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
224
    PCIINTxRoute route;
225
    Error *err = NULL;
226 227 228 229 230 231 232 233 234 235 236

    if (vdev->interrupt != VFIO_INT_INTx) {
        return;
    }

    route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);

    if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
        return; /* Nothing changed */
    }

237 238
    trace_vfio_intx_update(vdev->vbasedev.name,
                           vdev->intx.route.irq, route.irq);
239

240
    vfio_intx_disable_kvm(vdev);
241 242 243 244 245 246 247

    vdev->intx.route = route;

    if (route.mode != PCI_INTX_ENABLED) {
        return;
    }

248 249 250 251
    vfio_intx_enable_kvm(vdev, &err);
    if (err) {
        error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
    }
252 253

    /* Re-enable the interrupt in cased we missed an EOI */
254
    vfio_intx_eoi(&vdev->vbasedev);
255 256
}

257
static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
258 259
{
    uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
260
    int ret, argsz, retval = 0;
A
Alex Williamson 已提交
261 262
    struct vfio_irq_set *irq_set;
    int32_t *pfd;
263
    Error *err = NULL;
264

265
    if (!pin) {
266 267 268 269 270 271
        return 0;
    }

    vfio_disable_interrupts(vdev);

    vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
272
    pci_config_set_interrupt_pin(vdev->pdev.config, pin);
273 274 275 276 277 278

#ifdef CONFIG_KVM
    /*
     * Only conditional to avoid generating error messages on platforms
     * where we won't actually use the result anyway.
     */
E
Eric Auger 已提交
279
    if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
280 281 282 283 284
        vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
                                                        vdev->intx.pin);
    }
#endif

285 286
    ret = event_notifier_init(&vdev->intx.interrupt, 0);
    if (ret) {
287
        error_setg_errno(errp, -ret, "event_notifier_init failed");
288 289 290
        return ret;
    }

A
Alex Williamson 已提交
291 292 293 294 295 296 297 298 299 300 301 302
    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
    qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
303

304
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
A
Alex Williamson 已提交
305
    if (ret) {
306
        error_setg_errno(errp, -ret, "failed to setup INTx fd");
A
Alex Williamson 已提交
307
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
308
        event_notifier_cleanup(&vdev->intx.interrupt);
309 310
        retval = -errno;
        goto cleanup;
311 312
    }

313 314 315 316
    vfio_intx_enable_kvm(vdev, &err);
    if (err) {
        error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
    }
317

318 319
    vdev->interrupt = VFIO_INT_INTx;

320
    trace_vfio_intx_enable(vdev->vbasedev.name);
321

322 323 324 325
cleanup:
    g_free(irq_set);

    return retval;
326 327
}

328
static void vfio_intx_disable(VFIOPCIDevice *vdev)
329 330 331
{
    int fd;

332
    timer_del(vdev->intx.mmap_timer);
333
    vfio_intx_disable_kvm(vdev);
334
    vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
335
    vdev->intx.pending = false;
336
    pci_irq_deassert(&vdev->pdev);
337 338 339 340 341 342 343 344
    vfio_mmap_set_enabled(vdev, true);

    fd = event_notifier_get_fd(&vdev->intx.interrupt);
    qemu_set_fd_handler(fd, NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->intx.interrupt);

    vdev->interrupt = VFIO_INT_NONE;

345
    trace_vfio_intx_disable(vdev->vbasedev.name);
346 347 348 349 350 351 352 353
}

/*
 * MSI/X
 */
static void vfio_msi_interrupt(void *opaque)
{
    VFIOMSIVector *vector = opaque;
354
    VFIOPCIDevice *vdev = vector->vdev;
355 356 357
    MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
    void (*notify)(PCIDevice *dev, unsigned vector);
    MSIMessage msg;
358 359 360 361 362 363
    int nr = vector - vdev->msi_vectors;

    if (!event_notifier_test_and_clear(&vector->interrupt)) {
        return;
    }

364
    if (vdev->interrupt == VFIO_INT_MSIX) {
365 366
        get_msg = msix_get_message;
        notify = msix_notify;
A
Alex Williamson 已提交
367 368 369 370 371 372 373

        /* A masked vector firing needs to use the PBA, enable it */
        if (msix_is_masked(&vdev->pdev, nr)) {
            set_bit(nr, vdev->msix->pending);
            memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
            trace_vfio_msix_pba_enable(vdev->vbasedev.name);
        }
A
Alex Williamson 已提交
374
    } else if (vdev->interrupt == VFIO_INT_MSI) {
375 376
        get_msg = msi_get_message;
        notify = msi_notify;
377 378 379 380
    } else {
        abort();
    }

381
    msg = get_msg(&vdev->pdev, nr);
382
    trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
383
    notify(&vdev->pdev, nr);
384 385
}

386
static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
{
    struct vfio_irq_set *irq_set;
    int ret = 0, i, argsz;
    int32_t *fds;

    argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = vdev->nr_vectors;
    fds = (int32_t *)&irq_set->data;

    for (i = 0; i < vdev->nr_vectors; i++) {
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
        int fd = -1;

        /*
         * MSI vs MSI-X - The guest has direct access to MSI mask and pending
         * bits, therefore we always use the KVM signaling path when setup.
         * MSI-X mask and pending bits are emulated, so we want to use the
         * KVM signaling path only when configured and unmasked.
         */
        if (vdev->msi_vectors[i].use) {
            if (vdev->msi_vectors[i].virq < 0 ||
                (msix && msix_is_masked(&vdev->pdev, i))) {
                fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
            } else {
                fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
            }
418
        }
419 420

        fds[i] = fd;
421 422
    }

423
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
424 425 426 427 428 429

    g_free(irq_set);

    return ret;
}

430
static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
431
                                  int vector_n, bool msix)
432 433 434
{
    int virq;

435
    if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
436 437 438 439 440 441 442
        return;
    }

    if (event_notifier_init(&vector->kvm_interrupt, 0)) {
        return;
    }

443
    virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
444 445 446 447 448
    if (virq < 0) {
        event_notifier_cleanup(&vector->kvm_interrupt);
        return;
    }

449
    if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
450 451 452 453 454 455 456 457 458 459 460
                                       NULL, virq) < 0) {
        kvm_irqchip_release_virq(kvm_state, virq);
        event_notifier_cleanup(&vector->kvm_interrupt);
        return;
    }

    vector->virq = virq;
}

static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
{
461 462
    kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
                                          vector->virq);
463 464 465 466 467
    kvm_irqchip_release_virq(kvm_state, vector->virq);
    vector->virq = -1;
    event_notifier_cleanup(&vector->kvm_interrupt);
}

468 469
static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
                                     PCIDevice *pdev)
470
{
471
    kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
472
    kvm_irqchip_commit_routes(kvm_state);
473 474
}

475 476
static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
                                   MSIMessage *msg, IOHandler *handler)
477
{
478
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
479 480 481
    VFIOMSIVector *vector;
    int ret;

482
    trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
483 484 485

    vector = &vdev->msi_vectors[nr];

486 487 488 489 490 491 492 493
    if (!vector->use) {
        vector->vdev = vdev;
        vector->virq = -1;
        if (event_notifier_init(&vector->interrupt, 0)) {
            error_report("vfio: Error: event_notifier_init failed");
        }
        vector->use = true;
        msix_vector_use(pdev, nr);
494 495
    }

496 497 498
    qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                        handler, NULL, vector);

499 500 501 502
    /*
     * Attempt to enable route through KVM irqchip,
     * default to userspace handling if unavailable.
     */
503 504 505 506
    if (vector->virq >= 0) {
        if (!msg) {
            vfio_remove_kvm_msi_virq(vector);
        } else {
507
            vfio_update_kvm_msi_virq(vector, *msg, pdev);
508
        }
509
    } else {
510 511 512
        if (msg) {
            vfio_add_kvm_msi_virq(vdev, vector, nr, true);
        }
513 514 515 516 517 518 519 520
    }

    /*
     * We don't want to have the host allocate all possible MSI vectors
     * for a device if they're not in use, so we shutdown and incrementally
     * increase them as needed.
     */
    if (vdev->nr_vectors < nr + 1) {
521
        vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
522 523 524
        vdev->nr_vectors = nr + 1;
        ret = vfio_enable_vectors(vdev, true);
        if (ret) {
525
            error_report("vfio: failed to enable vectors, %d", ret);
526 527
        }
    } else {
A
Alex Williamson 已提交
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
        int argsz;
        struct vfio_irq_set *irq_set;
        int32_t *pfd;

        argsz = sizeof(*irq_set) + sizeof(*pfd);

        irq_set = g_malloc0(argsz);
        irq_set->argsz = argsz;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                         VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
        irq_set->start = nr;
        irq_set->count = 1;
        pfd = (int32_t *)&irq_set->data;

543 544 545 546 547
        if (vector->virq >= 0) {
            *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
        } else {
            *pfd = event_notifier_get_fd(&vector->interrupt);
        }
A
Alex Williamson 已提交
548

549
        ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
A
Alex Williamson 已提交
550
        g_free(irq_set);
551
        if (ret) {
552
            error_report("vfio: failed to modify vector, %d", ret);
553 554 555
        }
    }

A
Alex Williamson 已提交
556 557 558 559 560 561 562 563
    /* Disable PBA emulation when nothing more is pending. */
    clear_bit(nr, vdev->msix->pending);
    if (find_first_bit(vdev->msix->pending,
                       vdev->nr_vectors) == vdev->nr_vectors) {
        memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
        trace_vfio_msix_pba_disable(vdev->vbasedev.name);
    }

564 565 566
    return 0;
}

567 568 569 570 571 572
static int vfio_msix_vector_use(PCIDevice *pdev,
                                unsigned int nr, MSIMessage msg)
{
    return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
}

573 574
static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
{
575
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
576 577
    VFIOMSIVector *vector = &vdev->msi_vectors[nr];

578
    trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
579 580

    /*
581 582 583 584 585 586
     * There are still old guests that mask and unmask vectors on every
     * interrupt.  If we're using QEMU bypass with a KVM irqfd, leave all of
     * the KVM setup in place, simply switch VFIO to use the non-bypass
     * eventfd.  We'll then fire the interrupt through QEMU and the MSI-X
     * core will mask the interrupt and set pending bits, allowing it to
     * be re-asserted on unmask.  Nothing to do if already using QEMU mode.
587
     */
588 589 590 591
    if (vector->virq >= 0) {
        int argsz;
        struct vfio_irq_set *irq_set;
        int32_t *pfd;
A
Alex Williamson 已提交
592

593
        argsz = sizeof(*irq_set) + sizeof(*pfd);
A
Alex Williamson 已提交
594

595 596 597 598 599 600 601 602
        irq_set = g_malloc0(argsz);
        irq_set->argsz = argsz;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                         VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
        irq_set->start = nr;
        irq_set->count = 1;
        pfd = (int32_t *)&irq_set->data;
A
Alex Williamson 已提交
603

604
        *pfd = event_notifier_get_fd(&vector->interrupt);
A
Alex Williamson 已提交
605

606
        ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
607

608
        g_free(irq_set);
609 610 611
    }
}

612
static void vfio_msix_enable(VFIOPCIDevice *vdev)
613 614 615
{
    vfio_disable_interrupts(vdev);

616
    vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
617 618 619

    vdev->interrupt = VFIO_INT_MSIX;

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
    /*
     * Some communication channels between VF & PF or PF & fw rely on the
     * physical state of the device and expect that enabling MSI-X from the
     * guest enables the same on the host.  When our guest is Linux, the
     * guest driver call to pci_enable_msix() sets the enabling bit in the
     * MSI-X capability, but leaves the vector table masked.  We therefore
     * can't rely on a vector_use callback (from request_irq() in the guest)
     * to switch the physical device into MSI-X mode because that may come a
     * long time after pci_enable_msix().  This code enables vector 0 with
     * triggering to userspace, then immediately release the vector, leaving
     * the physical device with no vectors enabled, but MSI-X enabled, just
     * like the guest view.
     */
    vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
    vfio_msix_vector_release(&vdev->pdev, 0);

636
    if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
637
                                  vfio_msix_vector_release, NULL)) {
638
        error_report("vfio: msix_set_vector_notifiers failed");
639 640
    }

641
    trace_vfio_msix_enable(vdev->vbasedev.name);
642 643
}

644
static void vfio_msi_enable(VFIOPCIDevice *vdev)
645 646 647 648 649 650 651
{
    int ret, i;

    vfio_disable_interrupts(vdev);

    vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
retry:
652
    vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
653 654 655 656 657

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];

        vector->vdev = vdev;
658
        vector->virq = -1;
659 660 661
        vector->use = true;

        if (event_notifier_init(&vector->interrupt, 0)) {
662
            error_report("vfio: Error: event_notifier_init failed");
663 664
        }

665 666 667
        qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                            vfio_msi_interrupt, NULL, vector);

668 669 670 671
        /*
         * Attempt to enable route through KVM irqchip,
         * default to userspace handling if unavailable.
         */
672
        vfio_add_kvm_msi_virq(vdev, vector, i, false);
673 674
    }

675 676 677
    /* Set interrupt type prior to possible interrupts */
    vdev->interrupt = VFIO_INT_MSI;

678 679 680
    ret = vfio_enable_vectors(vdev, false);
    if (ret) {
        if (ret < 0) {
681
            error_report("vfio: Error: Failed to setup MSI fds: %m");
682 683
        } else if (ret != vdev->nr_vectors) {
            error_report("vfio: Error: Failed to enable %d "
684
                         "MSI vectors, retry with %d", vdev->nr_vectors, ret);
685 686 687 688 689
        }

        for (i = 0; i < vdev->nr_vectors; i++) {
            VFIOMSIVector *vector = &vdev->msi_vectors[i];
            if (vector->virq >= 0) {
690
                vfio_remove_kvm_msi_virq(vector);
691
            }
692 693
            qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                                NULL, NULL, NULL);
694 695 696 697 698 699 700 701 702 703 704
            event_notifier_cleanup(&vector->interrupt);
        }

        g_free(vdev->msi_vectors);

        if (ret > 0 && ret != vdev->nr_vectors) {
            vdev->nr_vectors = ret;
            goto retry;
        }
        vdev->nr_vectors = 0;

705 706 707 708 709 710 711 712
        /*
         * Failing to setup MSI doesn't really fall within any specification.
         * Let's try leaving interrupts disabled and hope the guest figures
         * out to fall back to INTx for this device.
         */
        error_report("vfio: Error: Failed to enable MSI");
        vdev->interrupt = VFIO_INT_NONE;

713 714 715
        return;
    }

716
    trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
717 718
}

719
static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
720
{
721
    Error *err = NULL;
722 723 724 725 726 727 728 729 730 731 732 733 734 735
    int i;

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];
        if (vdev->msi_vectors[i].use) {
            if (vector->virq >= 0) {
                vfio_remove_kvm_msi_virq(vector);
            }
            qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                                NULL, NULL, NULL);
            event_notifier_cleanup(&vector->interrupt);
        }
    }

736 737 738 739 740
    g_free(vdev->msi_vectors);
    vdev->msi_vectors = NULL;
    vdev->nr_vectors = 0;
    vdev->interrupt = VFIO_INT_NONE;

741 742 743 744
    vfio_intx_enable(vdev, &err);
    if (err) {
        error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
    }
745 746
}

747
static void vfio_msix_disable(VFIOPCIDevice *vdev)
748
{
749 750
    int i;

751 752
    msix_unset_vector_notifiers(&vdev->pdev);

753 754 755 756 757 758 759
    /*
     * MSI-X will only release vectors if MSI-X is still enabled on the
     * device, check through the rest and release it ourselves if necessary.
     */
    for (i = 0; i < vdev->nr_vectors; i++) {
        if (vdev->msi_vectors[i].use) {
            vfio_msix_vector_release(&vdev->pdev, i);
760
            msix_vector_unuse(&vdev->pdev, i);
761 762 763
        }
    }

764
    if (vdev->nr_vectors) {
765
        vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
766 767
    }

768
    vfio_msi_disable_common(vdev);
769

A
Alex Williamson 已提交
770 771 772
    memset(vdev->msix->pending, 0,
           BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));

773
    trace_vfio_msix_disable(vdev->vbasedev.name);
774 775
}

776
static void vfio_msi_disable(VFIOPCIDevice *vdev)
777
{
778
    vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
779
    vfio_msi_disable_common(vdev);
780

781
    trace_vfio_msi_disable(vdev->vbasedev.name);
782 783
}

784
static void vfio_update_msi(VFIOPCIDevice *vdev)
785 786 787 788 789 790 791 792 793 794 795 796
{
    int i;

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];
        MSIMessage msg;

        if (!vector->use || vector->virq < 0) {
            continue;
        }

        msg = msi_get_message(&vdev->pdev, i);
797
        vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
798 799 800
    }
}

801
static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
802
{
803
    struct vfio_region_info *reg_info;
804 805
    uint64_t size;
    off_t off = 0;
P
Paolo Bonzini 已提交
806
    ssize_t bytes;
807

808 809
    if (vfio_get_region_info(&vdev->vbasedev,
                             VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
810 811 812 813
        error_report("vfio: Error getting ROM info: %m");
        return;
    }

814 815 816 817 818 819
    trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
                            (unsigned long)reg_info->offset,
                            (unsigned long)reg_info->flags);

    vdev->rom_size = size = reg_info->size;
    vdev->rom_offset = reg_info->offset;
820

821
    g_free(reg_info);
822 823

    if (!vdev->rom_size) {
824
        vdev->rom_read_failed = true;
825
        error_report("vfio-pci: Cannot read device rom at "
826
                    "%s", vdev->vbasedev.name);
827 828 829
        error_printf("Device option ROM contents are probably invalid "
                    "(check dmesg).\nSkip option ROM probe with rombar=0, "
                    "or load from file with romfile=\n");
830 831 832 833 834 835 836
        return;
    }

    vdev->rom = g_malloc(size);
    memset(vdev->rom, 0xff, size);

    while (size) {
837 838
        bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
                      size, vdev->rom_offset + off);
839 840 841 842 843 844 845 846 847 848 849 850 851
        if (bytes == 0) {
            break;
        } else if (bytes > 0) {
            off += bytes;
            size -= bytes;
        } else {
            if (errno == EINTR || errno == EAGAIN) {
                continue;
            }
            error_report("vfio: Error reading device ROM: %m");
            break;
        }
    }
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881

    /*
     * Test the ROM signature against our device, if the vendor is correct
     * but the device ID doesn't match, store the correct device ID and
     * recompute the checksum.  Intel IGD devices need this and are known
     * to have bogus checksums so we can't simply adjust the checksum.
     */
    if (pci_get_word(vdev->rom) == 0xaa55 &&
        pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
        !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
        uint16_t vid, did;

        vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
        did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);

        if (vid == vdev->vendor_id && did != vdev->device_id) {
            int i;
            uint8_t csum, *data = vdev->rom;

            pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
                         vdev->device_id);
            data[6] = 0;

            for (csum = 0, i = 0; i < vdev->rom_size; i++) {
                csum += data[i];
            }

            data[6] = -csum;
        }
    }
882 883 884 885
}

static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
{
886
    VFIOPCIDevice *vdev = opaque;
887 888 889 890 891 892 893
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } val;
    uint64_t data = 0;
894 895

    /* Load the ROM lazily when the guest tries to read it */
896
    if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
897 898 899
        vfio_pci_load_rom(vdev);
    }

900
    memcpy(&val, vdev->rom + addr,
901 902
           (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
    switch (size) {
    case 1:
        data = val.byte;
        break;
    case 2:
        data = le16_to_cpu(val.word);
        break;
    case 4:
        data = le32_to_cpu(val.dword);
        break;
    default:
        hw_error("vfio: unsupported read size, %d bytes\n", size);
        break;
    }

918
    trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
919

920
    return data;
921 922
}

923 924 925 926 927
static void vfio_rom_write(void *opaque, hwaddr addr,
                           uint64_t data, unsigned size)
{
}

928 929
static const MemoryRegionOps vfio_rom_ops = {
    .read = vfio_rom_read,
930
    .write = vfio_rom_write,
931
    .endianness = DEVICE_LITTLE_ENDIAN,
932 933
};

934
static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
935
{
936
    uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
937
    off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
938
    DeviceState *dev = DEVICE(vdev);
939
    char *name;
940
    int fd = vdev->vbasedev.fd;
941 942

    if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
943 944
        /* Since pci handles romfile, just print a message and return */
        if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
945 946
            error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
                         vdev->vbasedev.name);
947
        }
948 949 950 951 952 953 954
        return;
    }

    /*
     * Use the same size ROM BAR as the physical device.  The contents
     * will get filled in later when the guest tries to read it.
     */
955 956 957 958
    if (pread(fd, &orig, 4, offset) != 4 ||
        pwrite(fd, &size, 4, offset) != 4 ||
        pread(fd, &size, 4, offset) != 4 ||
        pwrite(fd, &orig, 4, offset) != 4) {
959
        error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
960 961 962
        return;
    }

963
    size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
964 965 966 967 968

    if (!size) {
        return;
    }

969 970
    if (vfio_blacklist_opt_rom(vdev)) {
        if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
971 972
            error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
                         vdev->vbasedev.name);
973
        } else {
974 975
            error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
                         vdev->vbasedev.name);
976 977 978 979
            return;
        }
    }

980
    trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
981

982
    name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
983 984 985

    memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
                          &vfio_rom_ops, vdev, name, size);
986
    g_free(name);
987 988 989 990 991

    pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
                     PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);

    vdev->pdev.has_rom = true;
992
    vdev->rom_read_failed = false;
993 994
}

995
void vfio_vga_write(void *opaque, hwaddr addr,
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
                           uint64_t data, unsigned size)
{
    VFIOVGARegion *region = opaque;
    VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } buf;
    off_t offset = vga->fd_offset + region->offset + addr;

    switch (size) {
    case 1:
        buf.byte = data;
        break;
    case 2:
        buf.word = cpu_to_le16(data);
        break;
    case 4:
        buf.dword = cpu_to_le32(data);
        break;
    default:
1019
        hw_error("vfio: unsupported write size, %d bytes", size);
1020 1021 1022 1023 1024 1025 1026 1027
        break;
    }

    if (pwrite(vga->fd, &buf, size, offset) != size) {
        error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
                     __func__, region->offset + addr, data, size);
    }

E
Eric Auger 已提交
1028
    trace_vfio_vga_write(region->offset + addr, data, size);
1029 1030
}

1031
uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
{
    VFIOVGARegion *region = opaque;
    VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } buf;
    uint64_t data = 0;
    off_t offset = vga->fd_offset + region->offset + addr;

    if (pread(vga->fd, &buf, size, offset) != size) {
        error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
                     __func__, region->offset + addr, size);
        return (uint64_t)-1;
    }

    switch (size) {
    case 1:
        data = buf.byte;
        break;
    case 2:
        data = le16_to_cpu(buf.word);
        break;
    case 4:
        data = le32_to_cpu(buf.dword);
        break;
    default:
1061
        hw_error("vfio: unsupported read size, %d bytes", size);
1062 1063 1064
        break;
    }

E
Eric Auger 已提交
1065
    trace_vfio_vga_read(region->offset + addr, size, data);
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075

    return data;
}

static const MemoryRegionOps vfio_vga_ops = {
    .read = vfio_vga_read,
    .write = vfio_vga_write,
    .endianness = DEVICE_LITTLE_ENDIAN,
};

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
/*
 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
 * size if the BAR is in an exclusive page in host so that we could map
 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
 * page in guest. So we should set the priority of the expanded memory
 * region to zero in case of overlap with BARs which share the same page
 * with the sub-page BAR in guest. Besides, we should also recover the
 * size of this sub-page BAR when its base address is changed in guest
 * and not page aligned any more.
 */
static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
{
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
    VFIORegion *region = &vdev->bars[bar].region;
    MemoryRegion *mmap_mr, *mr;
    PCIIORegion *r;
    pcibus_t bar_addr;
    uint64_t size = region->size;

    /* Make sure that the whole region is allowed to be mmapped */
    if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
        region->mmaps[0].size != region->size) {
        return;
    }

    r = &pdev->io_regions[bar];
    bar_addr = r->addr;
    mr = region->mem;
    mmap_mr = &region->mmaps[0].mem;

    /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
    if (bar_addr != PCI_BAR_UNMAPPED &&
        !(bar_addr & ~qemu_real_host_page_mask)) {
        size = qemu_real_host_page_size;
    }

    memory_region_transaction_begin();

    memory_region_set_size(mr, size);
    memory_region_set_size(mmap_mr, size);
    if (size != region->size && memory_region_is_mapped(mr)) {
        memory_region_del_subregion(r->address_space, mr);
        memory_region_add_subregion_overlap(r->address_space,
                                            bar_addr, mr, 0);
    }

    memory_region_transaction_commit();
}

1125 1126 1127
/*
 * PCI config space
 */
1128
uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1129
{
1130
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1131
    uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1132

1133 1134
    memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
    emu_bits = le32_to_cpu(emu_bits);
1135

1136 1137 1138 1139 1140 1141 1142
    if (emu_bits) {
        emu_val = pci_default_read_config(pdev, addr, len);
    }

    if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
        ssize_t ret;

1143 1144
        ret = pread(vdev->vbasedev.fd, &phys_val, len,
                    vdev->config_offset + addr);
1145
        if (ret != len) {
1146 1147
            error_report("%s(%s, 0x%x, 0x%x) failed: %m",
                         __func__, vdev->vbasedev.name, addr, len);
1148 1149
            return -errno;
        }
1150
        phys_val = le32_to_cpu(phys_val);
1151 1152
    }

1153
    val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1154

1155
    trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1156 1157 1158 1159

    return val;
}

1160 1161
void vfio_pci_write_config(PCIDevice *pdev,
                           uint32_t addr, uint32_t val, int len)
1162
{
1163
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1164 1165
    uint32_t val_le = cpu_to_le32(val);

1166
    trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1167 1168

    /* Write everything to VFIO, let it filter out what we can't write */
1169 1170
    if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
                != len) {
1171 1172
        error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
                     __func__, vdev->vbasedev.name, addr, val, len);
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
    }

    /* MSI/MSI-X Enabling/Disabling */
    if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
        ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
        int is_enabled, was_enabled = msi_enabled(pdev);

        pci_default_write_config(pdev, addr, val, len);

        is_enabled = msi_enabled(pdev);

1184 1185
        if (!was_enabled) {
            if (is_enabled) {
1186
                vfio_msi_enable(vdev);
1187 1188 1189
            }
        } else {
            if (!is_enabled) {
1190
                vfio_msi_disable(vdev);
1191 1192 1193
            } else {
                vfio_update_msi(vdev);
            }
1194
        }
1195
    } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1196 1197 1198 1199 1200 1201 1202 1203
        ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
        int is_enabled, was_enabled = msix_enabled(pdev);

        pci_default_write_config(pdev, addr, val, len);

        is_enabled = msix_enabled(pdev);

        if (!was_enabled && is_enabled) {
1204
            vfio_msix_enable(vdev);
1205
        } else if (was_enabled && !is_enabled) {
1206
            vfio_msix_disable(vdev);
1207
        }
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
    } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
        range_covers_byte(addr, len, PCI_COMMAND)) {
        pcibus_t old_addr[PCI_NUM_REGIONS - 1];
        int bar;

        for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
            old_addr[bar] = pdev->io_regions[bar].addr;
        }

        pci_default_write_config(pdev, addr, val, len);

        for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
            if (old_addr[bar] != pdev->io_regions[bar].addr &&
                pdev->io_regions[bar].size > 0 &&
                pdev->io_regions[bar].size < qemu_real_host_page_size) {
                vfio_sub_page_bar_update_mapping(pdev, bar);
            }
        }
1226 1227 1228
    } else {
        /* Write everything to QEMU to keep emulated bits correct */
        pci_default_write_config(pdev, addr, val, len);
1229 1230 1231 1232 1233 1234
    }
}

/*
 * Interrupt setup
 */
1235
static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1236
{
1237 1238 1239 1240 1241 1242
    /*
     * More complicated than it looks.  Disabling MSI/X transitions the
     * device to INTx mode (if supported).  Therefore we need to first
     * disable MSI/X and then cleanup by disabling INTx.
     */
    if (vdev->interrupt == VFIO_INT_MSIX) {
1243
        vfio_msix_disable(vdev);
1244
    } else if (vdev->interrupt == VFIO_INT_MSI) {
1245
        vfio_msi_disable(vdev);
1246 1247 1248
    }

    if (vdev->interrupt == VFIO_INT_INTx) {
1249
        vfio_intx_disable(vdev);
1250 1251 1252
    }
}

1253
static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1254 1255 1256 1257
{
    uint16_t ctrl;
    bool msi_64bit, msi_maskbit;
    int ret, entries;
1258
    Error *err = NULL;
1259

1260
    if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1261
              vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1262
        error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
1263 1264 1265 1266 1267 1268 1269 1270
        return -errno;
    }
    ctrl = le16_to_cpu(ctrl);

    msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
    msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
    entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);

1271
    trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1272

1273
    ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
1274
    if (ret < 0) {
1275 1276 1277
        if (ret == -ENOTSUP) {
            return 0;
        }
1278 1279
        error_prepend(&err, "msi_init failed: ");
        error_propagate(errp, err);
1280 1281 1282 1283 1284 1285 1286
        return ret;
    }
    vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);

    return 0;
}

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
{
    off_t start, end;
    VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;

    /*
     * We expect to find a single mmap covering the whole BAR, anything else
     * means it's either unsupported or already setup.
     */
    if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
        region->size != region->mmaps[0].size) {
        return;
    }

    /* MSI-X table start and end aligned to host page size */
    start = vdev->msix->table_offset & qemu_real_host_page_mask;
    end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
                               (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));

    /*
     * Does the MSI-X table cover the beginning of the BAR?  The whole BAR?
     * NB - Host page size is necessarily a power of two and so is the PCI
     * BAR (not counting EA yet), therefore if we have host page aligned
     * @start and @end, then any remainder of the BAR before or after those
     * must be at least host page sized and therefore mmap'able.
     */
    if (!start) {
        if (end >= region->size) {
            region->nr_mmaps = 0;
            g_free(region->mmaps);
            region->mmaps = NULL;
            trace_vfio_msix_fixup(vdev->vbasedev.name,
                                  vdev->msix->table_bar, 0, 0);
        } else {
            region->mmaps[0].offset = end;
            region->mmaps[0].size = region->size - end;
            trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);
        }

    /* Maybe it's aligned at the end of the BAR */
    } else if (end >= region->size) {
        region->mmaps[0].size = start;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);

    /* Otherwise it must split the BAR */
    } else {
        region->nr_mmaps = 2;
        region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);

        memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));

        region->mmaps[0].size = start;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);

        region->mmaps[1].offset = end;
        region->mmaps[1].size = region->size - end;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[1].offset,
                              region->mmaps[1].offset + region->mmaps[1].size);
    }
}

1355 1356 1357 1358 1359 1360 1361 1362
/*
 * We don't have any control over how pci_add_capability() inserts
 * capabilities into the chain.  In order to setup MSI-X we need a
 * MemoryRegion for the BAR.  In order to setup the BAR and not
 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
 * need to first look for where the MSI-X table lives.  So we
 * unfortunately split MSI-X setup across two functions.
 */
1363
static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
1364 1365 1366 1367
{
    uint8_t pos;
    uint16_t ctrl;
    uint32_t table, pba;
1368
    int fd = vdev->vbasedev.fd;
1369
    VFIOMSIXInfo *msix;
1370 1371 1372

    pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
    if (!pos) {
1373
        return;
1374 1375
    }

1376
    if (pread(fd, &ctrl, sizeof(ctrl),
1377
              vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
1378
        error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
1379
        return;
1380 1381
    }

1382
    if (pread(fd, &table, sizeof(table),
1383
              vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
1384
        error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
1385
        return;
1386 1387
    }

1388
    if (pread(fd, &pba, sizeof(pba),
1389
              vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
1390
        error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
1391
        return;
1392 1393 1394 1395 1396 1397
    }

    ctrl = le16_to_cpu(ctrl);
    table = le32_to_cpu(table);
    pba = le32_to_cpu(pba);

1398 1399 1400 1401 1402 1403
    msix = g_malloc0(sizeof(*msix));
    msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
    msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
    msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
    msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
    msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1404

1405 1406 1407 1408 1409
    /*
     * Test the size of the pba_offset variable and catch if it extends outside
     * of the specified BAR. If it is the case, we need to apply a hardware
     * specific quirk if the device is known or we have a broken configuration.
     */
1410
    if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1411 1412 1413 1414 1415 1416
        /*
         * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
         * adapters. The T5 hardware returns an incorrect value of 0x8000 for
         * the VF PBA offset while the BAR itself is only 8k. The correct value
         * is 0x1000, so we hard code that here.
         */
1417 1418
        if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
            (vdev->device_id & 0xff00) == 0x5800) {
1419
            msix->pba_offset = 0x1000;
1420
        } else {
1421 1422
            error_setg(errp, "hardware reports invalid configuration, "
                       "MSIX PBA outside of specified BAR");
1423
            g_free(msix);
1424
            return;
1425 1426 1427
        }
    }

1428
    trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1429 1430
                                msix->table_offset, msix->entries);
    vdev->msix = msix;
1431

1432
    vfio_pci_fixup_msix_region(vdev);
1433 1434
}

1435
static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1436 1437
{
    int ret;
1438
    Error *err = NULL;
1439

A
Alex Williamson 已提交
1440 1441
    vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
                                    sizeof(unsigned long));
1442
    ret = msix_init(&vdev->pdev, vdev->msix->entries,
1443
                    vdev->bars[vdev->msix->table_bar].region.mem,
1444
                    vdev->msix->table_bar, vdev->msix->table_offset,
1445
                    vdev->bars[vdev->msix->pba_bar].region.mem,
1446 1447
                    vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
                    &err);
1448
    if (ret < 0) {
1449
        if (ret == -ENOTSUP) {
1450
            error_report_err(err);
1451 1452
            return 0;
        }
1453 1454

        error_propagate(errp, err);
1455 1456 1457
        return ret;
    }

A
Alex Williamson 已提交
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
    /*
     * The PCI spec suggests that devices provide additional alignment for
     * MSI-X structures and avoid overlapping non-MSI-X related registers.
     * For an assigned device, this hopefully means that emulation of MSI-X
     * structures does not affect the performance of the device.  If devices
     * fail to provide that alignment, a significant performance penalty may
     * result, for instance Mellanox MT27500 VFs:
     * http://www.spinics.net/lists/kvm/msg125881.html
     *
     * The PBA is simply not that important for such a serious regression and
     * most drivers do not appear to look at it.  The solution for this is to
     * disable the PBA MemoryRegion unless it's being used.  We disable it
     * here and only enable it if a masked vector fires through QEMU.  As the
     * vector-use notifier is called, which occurs on unmask, we test whether
     * PBA emulation is needed and again disable if not.
     */
    memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);

1476 1477 1478
    return 0;
}

1479
static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1480 1481 1482 1483
{
    msi_uninit(&vdev->pdev);

    if (vdev->msix) {
E
Eric Auger 已提交
1484
        msix_uninit(&vdev->pdev,
1485 1486
                    vdev->bars[vdev->msix->table_bar].region.mem,
                    vdev->bars[vdev->msix->pba_bar].region.mem);
A
Alex Williamson 已提交
1487
        g_free(vdev->msix->pending);
1488 1489 1490 1491 1492 1493
    }
}

/*
 * Resource setup
 */
1494
static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1495 1496 1497 1498
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1499
        vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1500 1501 1502
    }
}

1503
static void vfio_bar_setup(VFIOPCIDevice *vdev, int nr)
1504 1505 1506 1507 1508 1509 1510 1511
{
    VFIOBAR *bar = &vdev->bars[nr];

    uint32_t pci_bar;
    uint8_t type;
    int ret;

    /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1512
    if (!bar->region.size) {
1513 1514 1515 1516
        return;
    }

    /* Determine what type of BAR this is for registration */
1517
    ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1518 1519
                vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
    if (ret != sizeof(pci_bar)) {
1520
        error_report("vfio: Failed to read BAR %d (%m)", nr);
1521 1522 1523 1524
        return;
    }

    pci_bar = le32_to_cpu(pci_bar);
A
Alex Williamson 已提交
1525 1526 1527 1528
    bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
    bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
    type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
                                    ~PCI_BASE_ADDRESS_MEM_MASK);
1529

1530 1531 1532
    if (vfio_region_mmap(&bar->region)) {
        error_report("Failed to mmap %s BAR %d. Performance may be slow",
                     vdev->vbasedev.name, nr);
1533
    }
1534

1535
    pci_register_bar(&vdev->pdev, nr, type, bar->region.mem);
1536 1537
}

1538
static void vfio_bars_setup(VFIOPCIDevice *vdev)
1539 1540 1541 1542
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1543
        vfio_bar_setup(vdev, i);
1544 1545 1546
    }
}

1547
static void vfio_bars_exit(VFIOPCIDevice *vdev)
1548 1549 1550 1551
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1552 1553
        vfio_bar_quirk_exit(vdev, i);
        vfio_region_exit(&vdev->bars[i].region);
1554
    }
1555

1556
    if (vdev->vga) {
1557
        pci_unregister_vga(&vdev->pdev);
1558
        vfio_vga_quirk_exit(vdev);
1559
    }
1560 1561
}

1562
static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1563 1564 1565 1566
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1567 1568
        vfio_bar_quirk_finalize(vdev, i);
        vfio_region_finalize(&vdev->bars[i].region);
1569 1570
    }

1571 1572 1573 1574 1575 1576
    if (vdev->vga) {
        vfio_vga_quirk_finalize(vdev);
        for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
            object_unparent(OBJECT(&vdev->vga->region[i].mem));
        }
        g_free(vdev->vga);
1577 1578 1579
    }
}

1580 1581 1582 1583 1584
/*
 * General setup
 */
static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
{
1585 1586
    uint8_t tmp;
    uint16_t next = PCI_CONFIG_SPACE_SIZE;
1587 1588

    for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1589
         tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1590 1591 1592 1593 1594 1595 1596 1597
        if (tmp > pos && tmp < next) {
            next = tmp;
        }
    }

    return next - pos;
}

1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612

static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
{
    uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;

    for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
        tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
        if (tmp > pos && tmp < next) {
            next = tmp;
        }
    }

    return next - pos;
}

1613 1614 1615 1616 1617
static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
{
    pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
}

1618
static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
                                   uint16_t val, uint16_t mask)
{
    vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
    vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
    vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
}

static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
{
    pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
}

1631
static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1632 1633 1634 1635 1636 1637 1638
                                   uint32_t val, uint32_t mask)
{
    vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
    vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
    vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
}

1639 1640
static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
                               Error **errp)
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
{
    uint16_t flags;
    uint8_t type;

    flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
    type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;

    if (type != PCI_EXP_TYPE_ENDPOINT &&
        type != PCI_EXP_TYPE_LEG_END &&
        type != PCI_EXP_TYPE_RC_END) {

1652 1653
        error_setg(errp, "assignment of PCIe type 0x%x "
                   "devices is not currently supported", type);
1654 1655 1656 1657
        return -EINVAL;
    }

    if (!pci_bus_is_express(vdev->pdev.bus)) {
1658 1659 1660
        PCIBus *bus = vdev->pdev.bus;
        PCIDevice *bridge;

1661
        /*
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
         * Traditionally PCI device assignment exposes the PCIe capability
         * as-is on non-express buses.  The reason being that some drivers
         * simply assume that it's there, for example tg3.  However when
         * we're running on a native PCIe machine type, like Q35, we need
         * to hide the PCIe capability.  The reason for this is twofold;
         * first Windows guests get a Code 10 error when the PCIe capability
         * is exposed in this configuration.  Therefore express devices won't
         * work at all unless they're attached to express buses in the VM.
         * Second, a native PCIe machine introduces the possibility of fine
         * granularity IOMMUs supporting both translation and isolation.
         * Guest code to discover the IOMMU visibility of a device, such as
         * IOMMU grouping code on Linux, is very aware of device types and
         * valid transitions between bus types.  An express device on a non-
         * express bus is not a valid combination on bare metal systems.
         *
         * Drivers that require a PCIe capability to make the device
         * functional are simply going to need to have their devices placed
         * on a PCIe bus in the VM.
1680
         */
1681 1682 1683 1684 1685 1686 1687 1688 1689
        while (!pci_bus_is_root(bus)) {
            bridge = pci_bridge_get_device(bus);
            bus = bridge->bus;
        }

        if (pci_bus_is_express(bus)) {
            return 0;
        }

1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
    } else if (pci_bus_is_root(vdev->pdev.bus)) {
        /*
         * On a Root Complex bus Endpoints become Root Complex Integrated
         * Endpoints, which changes the type and clears the LNK & LNK2 fields.
         */
        if (type == PCI_EXP_TYPE_ENDPOINT) {
            vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
                                   PCI_EXP_TYPE_RC_END << 4,
                                   PCI_EXP_FLAGS_TYPE);

            /* Link Capabilities, Status, and Control goes away */
            if (size > PCI_EXP_LNKCTL) {
                vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
                vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
                vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);

#ifndef PCI_EXP_LNKCAP2
#define PCI_EXP_LNKCAP2 44
#endif
#ifndef PCI_EXP_LNKSTA2
#define PCI_EXP_LNKSTA2 50
#endif
                /* Link 2 Capabilities, Status, and Control goes away */
                if (size > PCI_EXP_LNKCAP2) {
                    vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
                    vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
                    vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
                }
            }

        } else if (type == PCI_EXP_TYPE_LEG_END) {
            /*
             * Legacy endpoints don't belong on the root complex.  Windows
             * seems to be happier with devices if we skip the capability.
             */
            return 0;
        }

    } else {
        /*
         * Convert Root Complex Integrated Endpoints to regular endpoints.
         * These devices don't support LNK/LNK2 capabilities, so make them up.
         */
        if (type == PCI_EXP_TYPE_RC_END) {
            vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
                                   PCI_EXP_TYPE_ENDPOINT << 4,
                                   PCI_EXP_FLAGS_TYPE);
            vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
                                   PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
            vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
        }

        /* Mark the Link Status bits as emulated to allow virtual negotiation */
        vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
                               pci_get_word(vdev->pdev.config + pos +
                                            PCI_EXP_LNKSTA),
                               PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
    }

1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
    /*
     * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
     * (Niantic errate #35) causing Windows to error with a Code 10 for the
     * device on Q35.  Fixup any such devices to report version 1.  If we
     * were to remove the capability entirely the guest would lose extended
     * config space.
     */
    if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
        vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
                               1, PCI_EXP_FLAGS_VERS);
    }

1761 1762 1763 1764
    pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
                             errp);
    if (pos < 0) {
        return pos;
1765 1766
    }

1767 1768
    vdev->pdev.exp.exp_cap = pos;

1769 1770 1771
    return pos;
}

1772
static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1773 1774 1775 1776
{
    uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);

    if (cap & PCI_EXP_DEVCAP_FLR) {
1777
        trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1778 1779 1780 1781
        vdev->has_flr = true;
    }
}

1782
static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1783 1784 1785 1786
{
    uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);

    if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1787
        trace_vfio_check_pm_reset(vdev->vbasedev.name);
1788 1789 1790 1791
        vdev->has_pm_reset = true;
    }
}

1792
static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1793 1794 1795 1796
{
    uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);

    if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1797
        trace_vfio_check_af_flr(vdev->vbasedev.name);
1798 1799 1800 1801
        vdev->has_flr = true;
    }
}

1802
static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
1803 1804 1805 1806 1807 1808
{
    PCIDevice *pdev = &vdev->pdev;
    uint8_t cap_id, next, size;
    int ret;

    cap_id = pdev->config[pos];
1809
    next = pdev->config[pos + PCI_CAP_LIST_NEXT];
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822

    /*
     * If it becomes important to configure capabilities to their actual
     * size, use this as the default when it's something we don't recognize.
     * Since QEMU doesn't actually handle many of the config accesses,
     * exact size doesn't seem worthwhile.
     */
    size = vfio_std_cap_max_size(pdev, pos);

    /*
     * pci_add_capability always inserts the new capability at the head
     * of the chain.  Therefore to end up with a chain that matches the
     * physical device, we insert from the end by making this recursive.
1823
     * This is also why we pre-calculate size above as cached config space
1824 1825 1826
     * will be changed as we unwind the stack.
     */
    if (next) {
1827
        ret = vfio_add_std_cap(vdev, next, errp);
1828
        if (ret) {
A
Alex Williamson 已提交
1829
            return ret;
1830 1831
        }
    } else {
1832 1833 1834 1835
        /* Begin the rebuild, use QEMU emulated list bits */
        pdev->config[PCI_CAPABILITY_LIST] = 0;
        vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
        vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1836 1837
    }

1838
    /* Use emulated next pointer to allow dropping caps */
1839
    pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
1840

1841 1842
    switch (cap_id) {
    case PCI_CAP_ID_MSI:
1843
        ret = vfio_msi_setup(vdev, pos, errp);
1844
        break;
1845
    case PCI_CAP_ID_EXP:
1846
        vfio_check_pcie_flr(vdev, pos);
1847
        ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
1848
        break;
1849
    case PCI_CAP_ID_MSIX:
1850
        ret = vfio_msix_setup(vdev, pos, errp);
1851
        break;
1852
    case PCI_CAP_ID_PM:
1853
        vfio_check_pm_reset(vdev, pos);
1854
        vdev->pm_cap = pos;
1855
        ret = pci_add_capability(pdev, cap_id, pos, size, errp);
1856 1857 1858
        break;
    case PCI_CAP_ID_AF:
        vfio_check_af_flr(vdev, pos);
1859
        ret = pci_add_capability(pdev, cap_id, pos, size, errp);
1860
        break;
1861
    default:
1862
        ret = pci_add_capability(pdev, cap_id, pos, size, errp);
1863 1864
        break;
    }
A
Alex Williamson 已提交
1865

1866
    if (ret < 0) {
1867 1868 1869
        error_prepend(errp,
                      "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
                      cap_id, size, pos);
1870 1871 1872 1873 1874 1875
        return ret;
    }

    return 0;
}

1876
static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
1877 1878 1879 1880 1881 1882 1883
{
    PCIDevice *pdev = &vdev->pdev;
    uint32_t header;
    uint16_t cap_id, next, size;
    uint8_t cap_ver;
    uint8_t *config;

1884 1885 1886
    /* Only add extended caps if we have them and the guest can see them */
    if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) ||
        !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
1887
        return;
1888 1889
    }

1890 1891 1892 1893 1894 1895 1896 1897
    /*
     * pcie_add_capability always inserts the new capability at the tail
     * of the chain.  Therefore to end up with a chain that matches the
     * physical device, we cache the config space to avoid overwriting
     * the original config space when we parse the extended capabilities.
     */
    config = g_memdup(pdev->config, vdev->config_size);

1898 1899 1900
    /*
     * Extended capabilities are chained with each pointing to the next, so we
     * can drop anything other than the head of the chain simply by modifying
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
     * the previous next pointer.  Seed the head of the chain here such that
     * we can simply skip any capabilities we want to drop below, regardless
     * of their position in the chain.  If this stub capability still exists
     * after we add the capabilities we want to expose, update the capability
     * ID to zero.  Note that we cannot seed with the capability header being
     * zero as this conflicts with definition of an absent capability chain
     * and prevents capabilities beyond the head of the list from being added.
     * By replacing the dummy capability ID with zero after walking the device
     * chain, we also transparently mark extended capabilities as absent if
     * no capabilities were added.  Note that the PCIe spec defines an absence
     * of extended capabilities to be determined by a value of zero for the
     * capability ID, version, AND next pointer.  A non-zero next pointer
     * should be sufficient to indicate additional capabilities are present,
     * which will occur if we call pcie_add_capability() below.  The entire
     * first dword is emulated to support this.
     *
     * NB. The kernel side does similar masking, so be prepared that our
     * view of the device may also contain a capability ID zero in the head
     * of the chain.  Skip it for the same reason that we cannot seed the
     * chain with a zero capability.
1921 1922 1923 1924 1925 1926
     */
    pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
                 PCI_EXT_CAP(0xFFFF, 0, 0));
    pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
    pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);

1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
    for (next = PCI_CONFIG_SPACE_SIZE; next;
         next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
        header = pci_get_long(config + next);
        cap_id = PCI_EXT_CAP_ID(header);
        cap_ver = PCI_EXT_CAP_VER(header);

        /*
         * If it becomes important to configure extended capabilities to their
         * actual size, use this as the default when it's something we don't
         * recognize. Since QEMU doesn't actually handle many of the config
         * accesses, exact size doesn't seem worthwhile.
         */
        size = vfio_ext_cap_max_size(config, next);

        /* Use emulated next pointer to allow dropping extended caps */
        pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
                                   PCI_EXT_CAP_NEXT_MASK);
1944 1945

        switch (cap_id) {
1946
        case 0: /* kernel masked capability */
1947
        case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
A
Alex Williamson 已提交
1948
        case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
            trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
            break;
        default:
            pcie_add_capability(pdev, cap_id, cap_ver, next, size);
        }

    }

    /* Cleanup chain head ID if necessary */
    if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
        pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
1960 1961 1962
    }

    g_free(config);
1963
    return;
1964 1965
}

1966
static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
1967 1968
{
    PCIDevice *pdev = &vdev->pdev;
1969
    int ret;
1970 1971 1972 1973 1974 1975

    if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
        !pdev->config[PCI_CAPABILITY_LIST]) {
        return 0; /* Nothing to add */
    }

1976
    ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
1977 1978 1979 1980
    if (ret) {
        return ret;
    }

1981 1982
    vfio_add_ext_cap(vdev);
    return 0;
1983 1984
}

1985
static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
{
    PCIDevice *pdev = &vdev->pdev;
    uint16_t cmd;

    vfio_disable_interrupts(vdev);

    /* Make sure the device is in D0 */
    if (vdev->pm_cap) {
        uint16_t pmcsr;
        uint8_t state;

        pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
        state = pmcsr & PCI_PM_CTRL_STATE_MASK;
        if (state) {
            pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
            vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
            /* vfio handles the necessary delay here */
            pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
            state = pmcsr & PCI_PM_CTRL_STATE_MASK;
            if (state) {
2006
                error_report("vfio: Unable to power on device, stuck in D%d",
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
                             state);
            }
        }
    }

    /*
     * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
     * Also put INTx Disable in known state.
     */
    cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
    cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
             PCI_COMMAND_INTX_DISABLE);
    vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
}

2022
static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
2023
{
2024
    Error *err = NULL;
2025
    int nr;
2026 2027 2028 2029 2030

    vfio_intx_enable(vdev, &err);
    if (err) {
        error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
    }
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041

    for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
        off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
        uint32_t val = 0;
        uint32_t len = sizeof(val);

        if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
            error_report("%s(%s) reset bar %d failed: %m", __func__,
                         vdev->vbasedev.name, nr);
        }
    }
2042 2043
}

2044
static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
2045
{
2046 2047 2048 2049 2050 2051
    char tmp[13];

    sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
            addr->bus, addr->slot, addr->function);

    return (strcmp(tmp, name) == 0);
2052 2053
}

2054
static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
2055 2056 2057 2058 2059 2060 2061 2062 2063
{
    VFIOGroup *group;
    struct vfio_pci_hot_reset_info *info;
    struct vfio_pci_dependent_device *devices;
    struct vfio_pci_hot_reset *reset;
    int32_t *fds;
    int ret, i, count;
    bool multi = false;

2064
    trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
2065

C
Cao jin 已提交
2066 2067 2068
    if (!single) {
        vfio_pci_pre_reset(vdev);
    }
2069
    vdev->vbasedev.needs_reset = false;
2070 2071 2072 2073

    info = g_malloc0(sizeof(*info));
    info->argsz = sizeof(*info);

2074
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2075 2076 2077
    if (ret && errno != ENOSPC) {
        ret = -errno;
        if (!vdev->has_pm_reset) {
2078 2079
            error_report("vfio: Cannot reset device %s, "
                         "no available reset mechanism.", vdev->vbasedev.name);
2080 2081 2082 2083 2084 2085 2086 2087 2088
        }
        goto out_single;
    }

    count = info->count;
    info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
    info->argsz = sizeof(*info) + (count * sizeof(*devices));
    devices = &info->devices[0];

2089
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2090 2091 2092 2093 2094 2095
    if (ret) {
        ret = -errno;
        error_report("vfio: hot reset info failed: %m");
        goto out_single;
    }

2096
    trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
2097 2098 2099 2100

    /* Verify that we have all the groups required */
    for (i = 0; i < info->count; i++) {
        PCIHostDeviceAddress host;
2101
        VFIOPCIDevice *tmp;
2102
        VFIODevice *vbasedev_iter;
2103 2104 2105 2106 2107 2108

        host.domain = devices[i].segment;
        host.bus = devices[i].bus;
        host.slot = PCI_SLOT(devices[i].devfn);
        host.function = PCI_FUNC(devices[i].devfn);

E
Eric Auger 已提交
2109
        trace_vfio_pci_hot_reset_dep_devices(host.domain,
2110 2111
                host.bus, host.slot, host.function, devices[i].group_id);

2112
        if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2113 2114 2115
            continue;
        }

2116
        QLIST_FOREACH(group, &vfio_group_list, next) {
2117 2118 2119 2120 2121 2122 2123
            if (group->groupid == devices[i].group_id) {
                break;
            }
        }

        if (!group) {
            if (!vdev->has_pm_reset) {
2124
                error_report("vfio: Cannot reset device %s, "
2125
                             "depends on group %d which is not owned.",
2126
                             vdev->vbasedev.name, devices[i].group_id);
2127 2128 2129 2130 2131 2132
            }
            ret = -EPERM;
            goto out;
        }

        /* Prep dependent devices for reset and clear our marker. */
2133
        QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2134 2135
            if (!vbasedev_iter->dev->realized ||
                vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2136 2137 2138
                continue;
            }
            tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2139
            if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2140 2141 2142 2143 2144
                if (single) {
                    ret = -EINVAL;
                    goto out_single;
                }
                vfio_pci_pre_reset(tmp);
2145
                tmp->vbasedev.needs_reset = false;
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
                multi = true;
                break;
            }
        }
    }

    if (!single && !multi) {
        ret = -EINVAL;
        goto out_single;
    }

    /* Determine how many group fds need to be passed */
    count = 0;
2159
    QLIST_FOREACH(group, &vfio_group_list, next) {
2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
        for (i = 0; i < info->count; i++) {
            if (group->groupid == devices[i].group_id) {
                count++;
                break;
            }
        }
    }

    reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
    reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
    fds = &reset->group_fds[0];

    /* Fill in group fds */
2173
    QLIST_FOREACH(group, &vfio_group_list, next) {
2174 2175 2176 2177 2178 2179 2180 2181 2182
        for (i = 0; i < info->count; i++) {
            if (group->groupid == devices[i].group_id) {
                fds[reset->count++] = group->fd;
                break;
            }
        }
    }

    /* Bus reset! */
2183
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
2184 2185
    g_free(reset);

2186
    trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
E
Eric Auger 已提交
2187
                                    ret ? "%m" : "Success");
2188 2189 2190 2191 2192

out:
    /* Re-enable INTx on affected devices */
    for (i = 0; i < info->count; i++) {
        PCIHostDeviceAddress host;
2193
        VFIOPCIDevice *tmp;
2194
        VFIODevice *vbasedev_iter;
2195 2196 2197 2198 2199 2200

        host.domain = devices[i].segment;
        host.bus = devices[i].bus;
        host.slot = PCI_SLOT(devices[i].devfn);
        host.function = PCI_FUNC(devices[i].devfn);

2201
        if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2202 2203 2204
            continue;
        }

2205
        QLIST_FOREACH(group, &vfio_group_list, next) {
2206 2207 2208 2209 2210 2211 2212 2213 2214
            if (group->groupid == devices[i].group_id) {
                break;
            }
        }

        if (!group) {
            break;
        }

2215
        QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2216 2217
            if (!vbasedev_iter->dev->realized ||
                vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2218 2219 2220
                continue;
            }
            tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2221
            if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2222 2223 2224 2225 2226 2227
                vfio_pci_post_reset(tmp);
                break;
            }
        }
    }
out_single:
C
Cao jin 已提交
2228 2229 2230
    if (!single) {
        vfio_pci_post_reset(vdev);
    }
2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
    g_free(info);

    return ret;
}

/*
 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
 * of a single in-use device.  VFIO_DEVICE_RESET will already handle the case
 * of doing hot resets when there is only a single device per bus.  The in-use
 * here refers to how many VFIODevices are affected.  A hot reset that affects
 * multiple devices, but only a single in-use device, means that we can call
 * it from our bus ->reset() callback since the extent is effectively a single
 * device.  This allows us to make use of it in the hotplug path.  When there
 * are multiple in-use devices, we can only trigger the hot reset during a
 * system reset and thus from our reset handler.  We separate _one vs _multi
 * here so that we don't overlap and do a double reset on the system reset
 * path where both our reset handler and ->reset() callback are used.  Calling
 * _one() will only do a hot reset for the one in-use devices case, calling
 * _multi() will do nothing if a _one() would have been sufficient.
 */
2251
static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2252 2253 2254 2255
{
    return vfio_pci_hot_reset(vdev, true);
}

2256
static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2257
{
2258
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2259 2260 2261
    return vfio_pci_hot_reset(vdev, false);
}

2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
{
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
    if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
        vbasedev->needs_reset = true;
    }
}

static VFIODeviceOps vfio_pci_ops = {
    .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
    .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2273
    .vfio_eoi = vfio_intx_eoi,
2274 2275
};

2276
int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
A
Alex Williamson 已提交
2277 2278 2279 2280 2281
{
    VFIODevice *vbasedev = &vdev->vbasedev;
    struct vfio_region_info *reg_info;
    int ret;

2282 2283
    ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
    if (ret) {
2284 2285 2286
        error_setg_errno(errp, -ret,
                         "failed getting region info for VGA region index %d",
                         VFIO_PCI_VGA_REGION_INDEX);
2287 2288
        return ret;
    }
A
Alex Williamson 已提交
2289

2290 2291 2292
    if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
        !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
        reg_info->size < 0xbffff + 1) {
2293 2294 2295
        error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
                   (unsigned long)reg_info->flags,
                   (unsigned long)reg_info->size);
2296 2297 2298
        g_free(reg_info);
        return -EINVAL;
    }
A
Alex Williamson 已提交
2299

2300
    vdev->vga = g_new0(VFIOVGA, 1);
A
Alex Williamson 已提交
2301

2302 2303
    vdev->vga->fd_offset = reg_info->offset;
    vdev->vga->fd = vdev->vbasedev.fd;
A
Alex Williamson 已提交
2304

2305
    g_free(reg_info);
A
Alex Williamson 已提交
2306

2307 2308 2309
    vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
    vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
A
Alex Williamson 已提交
2310

2311 2312 2313 2314 2315 2316
    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
                          OBJECT(vdev), &vfio_vga_ops,
                          &vdev->vga->region[QEMU_PCI_VGA_MEM],
                          "vfio-vga-mmio@0xa0000",
                          QEMU_PCI_VGA_MEM_SIZE);

2317 2318 2319
    vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
    vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
A
Alex Williamson 已提交
2320

2321 2322 2323 2324 2325 2326
    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
                          OBJECT(vdev), &vfio_vga_ops,
                          &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
                          "vfio-vga-io@0x3b0",
                          QEMU_PCI_VGA_IO_LO_SIZE);

2327 2328 2329
    vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
    vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
A
Alex Williamson 已提交
2330

2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
                          OBJECT(vdev), &vfio_vga_ops,
                          &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
                          "vfio-vga-io@0x3c0",
                          QEMU_PCI_VGA_IO_HI_SIZE);

    pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
                     &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
                     &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);

A
Alex Williamson 已提交
2341 2342 2343
    return 0;
}

2344
static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
2345
{
2346
    VFIODevice *vbasedev = &vdev->vbasedev;
2347
    struct vfio_region_info *reg_info;
2348
    struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
E
Eric Auger 已提交
2349
    int i, ret = -1;
2350 2351

    /* Sanity check device */
E
Eric Auger 已提交
2352
    if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2353
        error_setg(errp, "this isn't a PCI device");
2354
        return;
2355 2356
    }

E
Eric Auger 已提交
2357
    if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2358 2359
        error_setg(errp, "unexpected number of io regions %u",
                   vbasedev->num_regions);
2360
        return;
2361 2362
    }

E
Eric Auger 已提交
2363
    if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2364
        error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
2365
        return;
2366 2367 2368
    }

    for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2369 2370 2371 2372 2373 2374
        char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);

        ret = vfio_region_setup(OBJECT(vdev), vbasedev,
                                &vdev->bars[i].region, i, name);
        g_free(name);

2375
        if (ret) {
2376
            error_setg_errno(errp, -ret, "failed to get region %d info", i);
2377
            return;
2378 2379
        }

2380
        QLIST_INIT(&vdev->bars[i].quirks);
2381
    }
2382

2383 2384
    ret = vfio_get_region_info(vbasedev,
                               VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2385
    if (ret) {
2386
        error_setg_errno(errp, -ret, "failed to get config info");
2387
        return;
2388 2389
    }

E
Eric Auger 已提交
2390
    trace_vfio_populate_device_config(vdev->vbasedev.name,
2391 2392 2393
                                      (unsigned long)reg_info->size,
                                      (unsigned long)reg_info->offset,
                                      (unsigned long)reg_info->flags);
2394

2395
    vdev->config_size = reg_info->size;
2396 2397 2398
    if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
        vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
    }
2399 2400 2401
    vdev->config_offset = reg_info->offset;

    g_free(reg_info);
2402

A
Alex Williamson 已提交
2403
    if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2404
        ret = vfio_populate_vga(vdev, errp);
2405
        if (ret) {
2406
            error_append_hint(errp, "device does not support "
2407
                              "requested feature x-vga\n");
2408
            return;
2409 2410
        }
    }
2411

2412 2413
    irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;

2414
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2415 2416
    if (ret) {
        /* This can fail for an old kernel or legacy PCI dev */
E
Eric Auger 已提交
2417
        trace_vfio_populate_device_get_irq_info_failure();
2418 2419 2420
    } else if (irq_info.count == 1) {
        vdev->pci_aer = true;
    } else {
2421
        error_report(WARN_PREFIX
A
Alex Williamson 已提交
2422
                     "Could not enable error recovery for the device",
2423
                     vbasedev->name);
2424
    }
E
Eric Auger 已提交
2425 2426
}

2427
static void vfio_put_device(VFIOPCIDevice *vdev)
2428
{
2429
    g_free(vdev->vbasedev.name);
2430 2431
    g_free(vdev->msix);

E
Eric Auger 已提交
2432
    vfio_put_base_device(&vdev->vbasedev);
2433 2434
}

2435 2436
static void vfio_err_notifier_handler(void *opaque)
{
2437
    VFIOPCIDevice *vdev = opaque;
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451

    if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
        return;
    }

    /*
     * TBD. Retrieve the error details and decide what action
     * needs to be taken. One of the actions could be to pass
     * the error to the guest and have the guest driver recover
     * from the error. This requires that PCIe capabilities be
     * exposed to the guest. For now, we just terminate the
     * guest to contain the error.
     */

2452
    error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2453

P
Paolo Bonzini 已提交
2454
    vm_stop(RUN_STATE_INTERNAL_ERROR);
2455 2456 2457 2458 2459 2460 2461 2462
}

/*
 * Registers error notifier for devices supporting error recovery.
 * If we encounter a failure in this function, we report an error
 * and continue after disabling error recovery support for the
 * device.
 */
2463
static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474
{
    int ret;
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!vdev->pci_aer) {
        return;
    }

    if (event_notifier_init(&vdev->err_notifier, 0)) {
A
Alex Williamson 已提交
2475
        error_report("vfio: Unable to init event notifier for error detection");
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493
        vdev->pci_aer = false;
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->err_notifier);
    qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);

2494
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2495
    if (ret) {
A
Alex Williamson 已提交
2496
        error_report("vfio: Failed to set up error notification");
2497 2498 2499 2500 2501 2502 2503
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
        event_notifier_cleanup(&vdev->err_notifier);
        vdev->pci_aer = false;
    }
    g_free(irq_set);
}

2504
static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526
{
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;
    int ret;

    if (!vdev->pci_aer) {
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;
    *pfd = -1;

2527
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2528
    if (ret) {
A
Alex Williamson 已提交
2529
        error_report("vfio: Failed to de-assign error fd: %m");
2530 2531 2532 2533 2534 2535 2536
    }
    g_free(irq_set);
    qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
                        NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->err_notifier);
}

2537 2538 2539
static void vfio_req_notifier_handler(void *opaque)
{
    VFIOPCIDevice *vdev = opaque;
2540
    Error *err = NULL;
2541 2542 2543 2544 2545

    if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
        return;
    }

2546 2547 2548 2549
    qdev_unplug(&vdev->pdev.qdev, &err);
    if (err) {
        error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
    }
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631
}

static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
{
    struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
                                      .index = VFIO_PCI_REQ_IRQ_INDEX };
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
        return;
    }

    if (ioctl(vdev->vbasedev.fd,
              VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
        return;
    }

    if (event_notifier_init(&vdev->req_notifier, 0)) {
        error_report("vfio: Unable to init event notifier for device request");
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->req_notifier);
    qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);

    if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
        error_report("vfio: Failed to set up device request notification");
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
        event_notifier_cleanup(&vdev->req_notifier);
    } else {
        vdev->req_enabled = true;
    }

    g_free(irq_set);
}

static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
{
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!vdev->req_enabled) {
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;
    *pfd = -1;

    if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
        error_report("vfio: Failed to de-assign device request fd: %m");
    }
    g_free(irq_set);
    qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
                        NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->req_notifier);

    vdev->req_enabled = false;
}

E
Eric Auger 已提交
2632
static void vfio_realize(PCIDevice *pdev, Error **errp)
2633
{
2634 2635
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
    VFIODevice *vbasedev_iter;
2636
    VFIOGroup *group;
2637
    char *tmp, group_path[PATH_MAX], *group_name;
2638
    Error *err = NULL;
2639 2640 2641
    ssize_t len;
    struct stat st;
    int groupid;
2642
    int i, ret;
2643

2644
    if (!vdev->vbasedev.sysfsdev) {
E
Eric Auger 已提交
2645 2646 2647
        if (!(~vdev->host.domain || ~vdev->host.bus ||
              ~vdev->host.slot || ~vdev->host.function)) {
            error_setg(errp, "No provided host device");
2648 2649
            error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
                              "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
E
Eric Auger 已提交
2650 2651
            return;
        }
2652 2653 2654 2655 2656 2657 2658
        vdev->vbasedev.sysfsdev =
            g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
                            vdev->host.domain, vdev->host.bus,
                            vdev->host.slot, vdev->host.function);
    }

    if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
E
Eric Auger 已提交
2659 2660 2661
        error_setg_errno(errp, errno, "no such host device");
        error_prepend(errp, ERR_PREFIX, vdev->vbasedev.sysfsdev);
        return;
2662 2663
    }

2664
    vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
2665
    vdev->vbasedev.ops = &vfio_pci_ops;
2666
    vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
2667
    vdev->vbasedev.dev = &vdev->pdev.qdev;
2668

2669 2670 2671
    tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
    len = readlink(tmp, group_path, sizeof(group_path));
    g_free(tmp);
2672

2673
    if (len <= 0 || len >= sizeof(group_path)) {
E
Eric Auger 已提交
2674 2675
        error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
                         "no iommu_group found");
2676
        goto error;
2677 2678
    }

2679
    group_path[len] = 0;
2680

2681
    group_name = basename(group_path);
2682
    if (sscanf(group_name, "%d", &groupid) != 1) {
E
Eric Auger 已提交
2683
        error_setg_errno(errp, errno, "failed to read %s", group_path);
2684
        goto error;
2685 2686
    }

E
Eric Auger 已提交
2687
    trace_vfio_realize(vdev->vbasedev.name, groupid);
2688

E
Eric Auger 已提交
2689
    group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
2690
    if (!group) {
2691
        goto error;
2692 2693
    }

2694 2695
    QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
        if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
E
Eric Auger 已提交
2696
            error_setg(errp, "device is already attached");
2697
            vfio_put_group(group);
2698
            goto error;
2699 2700 2701
        }
    }

E
Eric Auger 已提交
2702
    ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
2703 2704
    if (ret) {
        vfio_put_group(group);
2705
        goto error;
2706 2707
    }

2708 2709 2710
    vfio_populate_device(vdev, &err);
    if (err) {
        error_propagate(errp, err);
2711
        goto error;
2712 2713
    }

2714
    /* Get a copy of config space */
2715
    ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2716 2717 2718 2719
                MIN(pci_config_size(&vdev->pdev), vdev->config_size),
                vdev->config_offset);
    if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
        ret = ret < 0 ? -errno : -EFAULT;
E
Eric Auger 已提交
2720
        error_setg_errno(errp, -ret, "failed to read device config space");
2721
        goto error;
2722 2723
    }

2724 2725 2726 2727 2728 2729
    /* vfio emulates a lot for us, but some bits need extra love */
    vdev->emulated_config_bits = g_malloc0(vdev->config_size);

    /* QEMU can choose to expose the ROM or not */
    memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);

A
Alex Williamson 已提交
2730 2731 2732 2733 2734 2735 2736
    /*
     * The PCI spec reserves vendor ID 0xffff as an invalid value.  The
     * device ID is managed by the vendor and need only be a 16-bit value.
     * Allow any 16-bit value for subsystem so they can be hidden or changed.
     */
    if (vdev->vendor_id != PCI_ANY_ID) {
        if (vdev->vendor_id >= 0xffff) {
E
Eric Auger 已提交
2737
            error_setg(errp, "invalid PCI vendor ID provided");
2738
            goto error;
A
Alex Williamson 已提交
2739 2740 2741 2742 2743 2744 2745 2746 2747
        }
        vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
        trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
    } else {
        vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
    }

    if (vdev->device_id != PCI_ANY_ID) {
        if (vdev->device_id > 0xffff) {
E
Eric Auger 已提交
2748
            error_setg(errp, "invalid PCI device ID provided");
2749
            goto error;
A
Alex Williamson 已提交
2750 2751 2752 2753 2754 2755 2756 2757 2758
        }
        vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
        trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
    } else {
        vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
    }

    if (vdev->sub_vendor_id != PCI_ANY_ID) {
        if (vdev->sub_vendor_id > 0xffff) {
E
Eric Auger 已提交
2759
            error_setg(errp, "invalid PCI subsystem vendor ID provided");
2760
            goto error;
A
Alex Williamson 已提交
2761 2762 2763 2764 2765 2766 2767 2768 2769
        }
        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
                               vdev->sub_vendor_id, ~0);
        trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
                                              vdev->sub_vendor_id);
    }

    if (vdev->sub_device_id != PCI_ANY_ID) {
        if (vdev->sub_device_id > 0xffff) {
E
Eric Auger 已提交
2770
            error_setg(errp, "invalid PCI subsystem device ID provided");
2771
            goto error;
A
Alex Williamson 已提交
2772 2773 2774 2775 2776
        }
        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
        trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
                                              vdev->sub_device_id);
    }
2777

2778 2779 2780 2781
    /* QEMU can change multi-function devices to single function, or reverse */
    vdev->emulated_config_bits[PCI_HEADER_TYPE] =
                                              PCI_HEADER_TYPE_MULTI_FUNCTION;

A
Alex Williamson 已提交
2782 2783 2784 2785 2786 2787 2788
    /* Restore or clear multifunction, this is always controlled by QEMU */
    if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
        vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
    } else {
        vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
    }

2789 2790 2791 2792 2793 2794 2795 2796
    /*
     * Clear host resource mapping info.  If we choose not to register a
     * BAR, such as might be the case with the option ROM, we can get
     * confusing, unwritable, residual addresses from the host here.
     */
    memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
    memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);

2797
    vfio_pci_size_rom(vdev);
2798

2799 2800 2801
    vfio_msix_early_setup(vdev, &err);
    if (err) {
        error_propagate(errp, err);
2802
        goto error;
2803 2804
    }

2805
    vfio_bars_setup(vdev);
2806

E
Eric Auger 已提交
2807
    ret = vfio_add_capabilities(vdev, errp);
2808 2809 2810 2811
    if (ret) {
        goto out_teardown;
    }

2812 2813 2814 2815
    if (vdev->vga) {
        vfio_vga_quirk_setup(vdev);
    }

2816 2817 2818 2819
    for (i = 0; i < PCI_ROM_SLOT; i++) {
        vfio_bar_quirk_setup(vdev, i);
    }

2820 2821 2822 2823 2824
    if (!vdev->igd_opregion &&
        vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
        struct vfio_region_info *opregion;

        if (vdev->pdev.qdev.hotplugged) {
E
Eric Auger 已提交
2825
            error_setg(errp,
2826 2827
                       "cannot support IGD OpRegion feature on hotplugged "
                       "device");
2828 2829 2830 2831 2832 2833 2834
            goto out_teardown;
        }

        ret = vfio_get_dev_region_info(&vdev->vbasedev,
                        VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
                        VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
        if (ret) {
E
Eric Auger 已提交
2835
            error_setg_errno(errp, -ret,
2836
                             "does not support requested IGD OpRegion feature");
2837 2838 2839
            goto out_teardown;
        }

E
Eric Auger 已提交
2840
        ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
2841 2842 2843 2844 2845 2846
        g_free(opregion);
        if (ret) {
            goto out_teardown;
        }
    }

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
    /* QEMU emulates all of MSI & MSIX */
    if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
        memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
               MSIX_CAP_LENGTH);
    }

    if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
        memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
               vdev->msi_cap_size);
    }

2858
    if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
2859
        vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2860
                                                  vfio_intx_mmap_enable, vdev);
2861
        pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
E
Eric Auger 已提交
2862
        ret = vfio_intx_enable(vdev, errp);
2863 2864 2865 2866 2867
        if (ret) {
            goto out_teardown;
        }
    }

2868
    vfio_register_err_notifier(vdev);
2869
    vfio_register_req_notifier(vdev);
2870
    vfio_setup_resetfn_quirk(vdev);
A
Alex Williamson 已提交
2871

E
Eric Auger 已提交
2872
    return;
2873 2874 2875 2876

out_teardown:
    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
    vfio_teardown_msi(vdev);
2877
    vfio_bars_exit(vdev);
2878
error:
E
Eric Auger 已提交
2879
    error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name);
2880 2881 2882 2883 2884 2885 2886 2887
}

static void vfio_instance_finalize(Object *obj)
{
    PCIDevice *pci_dev = PCI_DEVICE(obj);
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
    VFIOGroup *group = vdev->vbasedev.group;

2888
    vfio_bars_finalize(vdev);
2889
    g_free(vdev->emulated_config_bits);
2890
    g_free(vdev->rom);
2891 2892 2893 2894 2895 2896 2897
    /*
     * XXX Leaking igd_opregion is not an oversight, we can't remove the
     * fw_cfg entry therefore leaking this allocation seems like the safest
     * option.
     *
     * g_free(vdev->igd_opregion);
     */
2898 2899 2900 2901 2902 2903
    vfio_put_device(vdev);
    vfio_put_group(group);
}

static void vfio_exitfn(PCIDevice *pdev)
{
2904
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2905

2906
    vfio_unregister_req_notifier(vdev);
2907
    vfio_unregister_err_notifier(vdev);
2908 2909
    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
    vfio_disable_interrupts(vdev);
2910
    if (vdev->intx.mmap_timer) {
2911
        timer_free(vdev->intx.mmap_timer);
2912
    }
2913
    vfio_teardown_msi(vdev);
2914
    vfio_bars_exit(vdev);
2915 2916 2917 2918 2919
}

static void vfio_pci_reset(DeviceState *dev)
{
    PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
2920
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2921

2922
    trace_vfio_pci_reset(vdev->vbasedev.name);
A
Alex Williamson 已提交
2923

2924
    vfio_pci_pre_reset(vdev);
2925

2926 2927 2928 2929
    if (vdev->resetfn && !vdev->resetfn(vdev)) {
        goto post_reset;
    }

2930 2931
    if (vdev->vbasedev.reset_works &&
        (vdev->has_flr || !vdev->has_pm_reset) &&
2932
        !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2933
        trace_vfio_pci_reset_flr(vdev->vbasedev.name);
2934
        goto post_reset;
2935 2936
    }

2937 2938 2939 2940
    /* See if we can do our own bus reset */
    if (!vfio_pci_hot_reset_one(vdev)) {
        goto post_reset;
    }
A
Alex Williamson 已提交
2941

2942
    /* If nothing else works and the device supports PM reset, use it */
2943
    if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
2944
        !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2945
        trace_vfio_pci_reset_pm(vdev->vbasedev.name);
2946
        goto post_reset;
2947
    }
A
Alex Williamson 已提交
2948

2949 2950
post_reset:
    vfio_pci_post_reset(vdev);
2951 2952
}

2953 2954 2955
static void vfio_instance_init(Object *obj)
{
    PCIDevice *pci_dev = PCI_DEVICE(obj);
2956
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
2957 2958 2959 2960

    device_add_bootindex_property(obj, &vdev->bootindex,
                                  "bootindex", NULL,
                                  &pci_dev->qdev, NULL);
E
Eric Auger 已提交
2961 2962 2963 2964
    vdev->host.domain = ~0U;
    vdev->host.bus = ~0U;
    vdev->host.slot = ~0U;
    vdev->host.function = ~0U;
2965 2966
}

2967
static Property vfio_pci_dev_properties[] = {
2968
    DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
2969
    DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
2970
    DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
2971
                       intx.mmap_timeout, 1100),
2972
    DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
2973
                    VFIO_FEATURE_ENABLE_VGA_BIT, false),
2974 2975
    DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
                    VFIO_FEATURE_ENABLE_REQ_BIT, true),
2976 2977
    DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
                    VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
2978
    DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
2979 2980 2981
    DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
    DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
    DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
A
Alex Williamson 已提交
2982 2983 2984 2985 2986 2987
    DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
                       sub_vendor_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
                       sub_device_id, PCI_ANY_ID),
2988
    DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
2989 2990
    /*
     * TODO - support passed fds... is this necessary?
2991 2992
     * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
     * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
2993 2994 2995 2996
     */
    DEFINE_PROP_END_OF_LIST(),
};

A
Alex Williamson 已提交
2997 2998 2999 3000
static const VMStateDescription vfio_pci_vmstate = {
    .name = "vfio-pci",
    .unmigratable = 1,
};
3001 3002 3003 3004 3005 3006 3007 3008

static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);

    dc->reset = vfio_pci_reset;
    dc->props = vfio_pci_dev_properties;
A
Alex Williamson 已提交
3009 3010
    dc->vmsd = &vfio_pci_vmstate;
    dc->desc = "VFIO-based PCI device assignment";
3011
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
E
Eric Auger 已提交
3012
    pdc->realize = vfio_realize;
3013 3014 3015
    pdc->exit = vfio_exitfn;
    pdc->config_read = vfio_pci_read_config;
    pdc->config_write = vfio_pci_write_config;
3016
    pdc->is_express = 1; /* We might be */
3017 3018 3019 3020 3021
}

static const TypeInfo vfio_pci_dev_info = {
    .name = "vfio-pci",
    .parent = TYPE_PCI_DEVICE,
3022
    .instance_size = sizeof(VFIOPCIDevice),
3023
    .class_init = vfio_pci_dev_class_init,
3024
    .instance_init = vfio_instance_init,
3025
    .instance_finalize = vfio_instance_finalize,
3026 3027 3028 3029 3030 3031 3032 3033
};

static void register_vfio_pci_dev_type(void)
{
    type_register_static(&vfio_pci_dev_info);
}

type_init(register_vfio_pci_dev_type)