pci.c 85.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * vfio based device assignment support
 *
 * Copyright Red Hat, Inc. 2012
 *
 * Authors:
 *  Alex Williamson <alex.williamson@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * Based on qemu-kvm device-assignment:
 *  Adapted for KVM by Qumranet.
 *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
 *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
 *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
 *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
 *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
 */

P
Peter Maydell 已提交
21
#include "qemu/osdep.h"
A
Alex Williamson 已提交
22
#include <linux/vfio.h>
23 24 25
#include <sys/ioctl.h>
#include <sys/mman.h>

26 27
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
28
#include "hw/pci/pci_bridge.h"
29 30
#include "qemu/error-report.h"
#include "qemu/range.h"
A
Alex Williamson 已提交
31 32
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
33
#include "pci.h"
E
Eric Auger 已提交
34
#include "trace.h"
35

36 37
#define MSIX_CAP_LENGTH 12

38 39
static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * Disabling BAR mmaping can be slow, but toggling it around INTx can
 * also be a huge overhead.  We try to get the best of both worlds by
 * waiting until an interrupt to disable mmaps (subsequent transitions
 * to the same state are effectively no overhead).  If the interrupt has
 * been serviced and the time gap is long enough, we re-enable mmaps for
 * performance.  This works well for things like graphics cards, which
 * may not use their interrupt at all and are penalized to an unusable
 * level by read/write BAR traps.  Other devices, like NICs, have more
 * regular interrupts and see much better latency by staying in non-mmap
 * mode.  We therefore set the default mmap_timeout such that a ping
 * is just enough to keep the mmap disabled.  Users can experiment with
 * other options with the x-intx-mmap-timeout-ms parameter (a value of
 * zero disables the timer).
 */
static void vfio_intx_mmap_enable(void *opaque)
{
58
    VFIOPCIDevice *vdev = opaque;
59 60

    if (vdev->intx.pending) {
61 62
        timer_mod(vdev->intx.mmap_timer,
                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
63 64 65 66 67 68
        return;
    }

    vfio_mmap_set_enabled(vdev, true);
}

69 70
static void vfio_intx_interrupt(void *opaque)
{
71
    VFIOPCIDevice *vdev = opaque;
72 73 74 75 76

    if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
        return;
    }

77
    trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
78 79

    vdev->intx.pending = true;
80
    pci_irq_assert(&vdev->pdev);
81 82
    vfio_mmap_set_enabled(vdev, false);
    if (vdev->intx.mmap_timeout) {
83 84
        timer_mod(vdev->intx.mmap_timer,
                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
85
    }
86 87
}

88
static void vfio_intx_eoi(VFIODevice *vbasedev)
89
{
E
Eric Auger 已提交
90 91
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);

92 93 94 95
    if (!vdev->intx.pending) {
        return;
    }

96
    trace_vfio_intx_eoi(vbasedev->name);
97 98

    vdev->intx.pending = false;
99
    pci_irq_deassert(&vdev->pdev);
E
Eric Auger 已提交
100
    vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
101 102
}

103
static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
104 105 106 107 108 109 110 111 112 113 114
{
#ifdef CONFIG_KVM
    struct kvm_irqfd irqfd = {
        .fd = event_notifier_get_fd(&vdev->intx.interrupt),
        .gsi = vdev->intx.route.irq,
        .flags = KVM_IRQFD_FLAG_RESAMPLE,
    };
    struct vfio_irq_set *irq_set;
    int ret, argsz;
    int32_t *pfd;

115
    if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
116
        vdev->intx.route.mode != PCI_INTX_ENABLED ||
E
Eric Auger 已提交
117
        !kvm_resamplefds_enabled()) {
118 119 120 121 122
        return;
    }

    /* Get to a known interrupt state */
    qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
123
    vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
124
    vdev->intx.pending = false;
125
    pci_irq_deassert(&vdev->pdev);
126 127 128

    /* Get an eventfd for resample/unmask */
    if (event_notifier_init(&vdev->intx.unmask, 0)) {
129
        error_report("vfio: Error: event_notifier_init failed eoi");
130 131 132 133 134 135 136
        goto fail;
    }

    /* KVM triggers it, VFIO listens for it */
    irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);

    if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
137
        error_report("vfio: Error: Failed to setup resample irqfd: %m");
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
        goto fail_irqfd;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
    irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = irqfd.resamplefd;

153
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
154 155
    g_free(irq_set);
    if (ret) {
156
        error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
157 158 159 160
        goto fail_vfio;
    }

    /* Let'em rip */
161
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
162 163 164

    vdev->intx.kvm_accel = true;

165
    trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
166 167 168 169 170 171 172 173 174 175

    return;

fail_vfio:
    irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
    kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
fail_irqfd:
    event_notifier_cleanup(&vdev->intx.unmask);
fail:
    qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
176
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
177 178 179
#endif
}

180
static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
{
#ifdef CONFIG_KVM
    struct kvm_irqfd irqfd = {
        .fd = event_notifier_get_fd(&vdev->intx.interrupt),
        .gsi = vdev->intx.route.irq,
        .flags = KVM_IRQFD_FLAG_DEASSIGN,
    };

    if (!vdev->intx.kvm_accel) {
        return;
    }

    /*
     * Get to a known state, hardware masked, QEMU ready to accept new
     * interrupts, QEMU IRQ de-asserted.
     */
197
    vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
198
    vdev->intx.pending = false;
199
    pci_irq_deassert(&vdev->pdev);
200 201 202

    /* Tell KVM to stop listening for an INTx irqfd */
    if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
203
        error_report("vfio: Error: Failed to disable INTx irqfd: %m");
204 205 206 207 208 209 210 211 212 213 214
    }

    /* We only need to close the eventfd for VFIO to cleanup the kernel side */
    event_notifier_cleanup(&vdev->intx.unmask);

    /* QEMU starts listening for interrupt events. */
    qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);

    vdev->intx.kvm_accel = false;

    /* If we've missed an event, let it re-fire through QEMU */
215
    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
216

217
    trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
218 219 220
#endif
}

221
static void vfio_intx_update(PCIDevice *pdev)
222
{
223
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
224 225 226 227 228 229 230 231 232 233 234 235
    PCIINTxRoute route;

    if (vdev->interrupt != VFIO_INT_INTx) {
        return;
    }

    route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);

    if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
        return; /* Nothing changed */
    }

236 237
    trace_vfio_intx_update(vdev->vbasedev.name,
                           vdev->intx.route.irq, route.irq);
238

239
    vfio_intx_disable_kvm(vdev);
240 241 242 243 244 245 246

    vdev->intx.route = route;

    if (route.mode != PCI_INTX_ENABLED) {
        return;
    }

247
    vfio_intx_enable_kvm(vdev);
248 249

    /* Re-enable the interrupt in cased we missed an EOI */
250
    vfio_intx_eoi(&vdev->vbasedev);
251 252
}

253
static int vfio_intx_enable(VFIOPCIDevice *vdev)
254 255
{
    uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
A
Alex Williamson 已提交
256 257 258
    int ret, argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;
259

260
    if (!pin) {
261 262 263 264 265 266
        return 0;
    }

    vfio_disable_interrupts(vdev);

    vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
267
    pci_config_set_interrupt_pin(vdev->pdev.config, pin);
268 269 270 271 272 273

#ifdef CONFIG_KVM
    /*
     * Only conditional to avoid generating error messages on platforms
     * where we won't actually use the result anyway.
     */
E
Eric Auger 已提交
274
    if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
275 276 277 278 279
        vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
                                                        vdev->intx.pin);
    }
#endif

280 281
    ret = event_notifier_init(&vdev->intx.interrupt, 0);
    if (ret) {
282
        error_report("vfio: Error: event_notifier_init failed");
283 284 285
        return ret;
    }

A
Alex Williamson 已提交
286 287 288 289 290 291 292 293 294 295 296 297
    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
    qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
298

299
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
A
Alex Williamson 已提交
300 301
    g_free(irq_set);
    if (ret) {
302
        error_report("vfio: Error: Failed to setup INTx fd: %m");
A
Alex Williamson 已提交
303
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
304
        event_notifier_cleanup(&vdev->intx.interrupt);
305 306 307
        return -errno;
    }

308
    vfio_intx_enable_kvm(vdev);
309

310 311
    vdev->interrupt = VFIO_INT_INTx;

312
    trace_vfio_intx_enable(vdev->vbasedev.name);
313 314 315 316

    return 0;
}

317
static void vfio_intx_disable(VFIOPCIDevice *vdev)
318 319 320
{
    int fd;

321
    timer_del(vdev->intx.mmap_timer);
322
    vfio_intx_disable_kvm(vdev);
323
    vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
324
    vdev->intx.pending = false;
325
    pci_irq_deassert(&vdev->pdev);
326 327 328 329 330 331 332 333
    vfio_mmap_set_enabled(vdev, true);

    fd = event_notifier_get_fd(&vdev->intx.interrupt);
    qemu_set_fd_handler(fd, NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->intx.interrupt);

    vdev->interrupt = VFIO_INT_NONE;

334
    trace_vfio_intx_disable(vdev->vbasedev.name);
335 336 337 338 339 340 341 342
}

/*
 * MSI/X
 */
static void vfio_msi_interrupt(void *opaque)
{
    VFIOMSIVector *vector = opaque;
343
    VFIOPCIDevice *vdev = vector->vdev;
344 345 346
    MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
    void (*notify)(PCIDevice *dev, unsigned vector);
    MSIMessage msg;
347 348 349 350 351 352
    int nr = vector - vdev->msi_vectors;

    if (!event_notifier_test_and_clear(&vector->interrupt)) {
        return;
    }

353
    if (vdev->interrupt == VFIO_INT_MSIX) {
354 355
        get_msg = msix_get_message;
        notify = msix_notify;
A
Alex Williamson 已提交
356 357 358 359 360 361 362

        /* A masked vector firing needs to use the PBA, enable it */
        if (msix_is_masked(&vdev->pdev, nr)) {
            set_bit(nr, vdev->msix->pending);
            memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
            trace_vfio_msix_pba_enable(vdev->vbasedev.name);
        }
A
Alex Williamson 已提交
363
    } else if (vdev->interrupt == VFIO_INT_MSI) {
364 365
        get_msg = msi_get_message;
        notify = msi_notify;
366 367 368 369
    } else {
        abort();
    }

370
    msg = get_msg(&vdev->pdev, nr);
371
    trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
372
    notify(&vdev->pdev, nr);
373 374
}

375
static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
{
    struct vfio_irq_set *irq_set;
    int ret = 0, i, argsz;
    int32_t *fds;

    argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = vdev->nr_vectors;
    fds = (int32_t *)&irq_set->data;

    for (i = 0; i < vdev->nr_vectors; i++) {
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
        int fd = -1;

        /*
         * MSI vs MSI-X - The guest has direct access to MSI mask and pending
         * bits, therefore we always use the KVM signaling path when setup.
         * MSI-X mask and pending bits are emulated, so we want to use the
         * KVM signaling path only when configured and unmasked.
         */
        if (vdev->msi_vectors[i].use) {
            if (vdev->msi_vectors[i].virq < 0 ||
                (msix && msix_is_masked(&vdev->pdev, i))) {
                fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
            } else {
                fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
            }
407
        }
408 409

        fds[i] = fd;
410 411
    }

412
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
413 414 415 416 417 418

    g_free(irq_set);

    return ret;
}

419 420
static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
                                  MSIMessage *msg, bool msix)
421 422 423
{
    int virq;

424
    if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi) || !msg) {
425 426 427 428 429 430 431
        return;
    }

    if (event_notifier_init(&vector->kvm_interrupt, 0)) {
        return;
    }

432
    virq = kvm_irqchip_add_msi_route(kvm_state, *msg, &vdev->pdev);
433 434 435 436 437
    if (virq < 0) {
        event_notifier_cleanup(&vector->kvm_interrupt);
        return;
    }

438
    if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
439 440 441 442 443 444 445 446 447 448 449
                                       NULL, virq) < 0) {
        kvm_irqchip_release_virq(kvm_state, virq);
        event_notifier_cleanup(&vector->kvm_interrupt);
        return;
    }

    vector->virq = virq;
}

static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
{
450 451
    kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
                                          vector->virq);
452 453 454 455 456
    kvm_irqchip_release_virq(kvm_state, vector->virq);
    vector->virq = -1;
    event_notifier_cleanup(&vector->kvm_interrupt);
}

457 458
static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
                                     PCIDevice *pdev)
459
{
460
    kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
461 462
}

463 464
static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
                                   MSIMessage *msg, IOHandler *handler)
465
{
466
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
467 468 469
    VFIOMSIVector *vector;
    int ret;

470
    trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
471 472 473

    vector = &vdev->msi_vectors[nr];

474 475 476 477 478 479 480 481
    if (!vector->use) {
        vector->vdev = vdev;
        vector->virq = -1;
        if (event_notifier_init(&vector->interrupt, 0)) {
            error_report("vfio: Error: event_notifier_init failed");
        }
        vector->use = true;
        msix_vector_use(pdev, nr);
482 483
    }

484 485 486
    qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                        handler, NULL, vector);

487 488 489 490
    /*
     * Attempt to enable route through KVM irqchip,
     * default to userspace handling if unavailable.
     */
491 492 493 494
    if (vector->virq >= 0) {
        if (!msg) {
            vfio_remove_kvm_msi_virq(vector);
        } else {
495
            vfio_update_kvm_msi_virq(vector, *msg, pdev);
496
        }
497
    } else {
498
        vfio_add_kvm_msi_virq(vdev, vector, msg, true);
499 500 501 502 503 504 505 506
    }

    /*
     * We don't want to have the host allocate all possible MSI vectors
     * for a device if they're not in use, so we shutdown and incrementally
     * increase them as needed.
     */
    if (vdev->nr_vectors < nr + 1) {
507
        vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
508 509 510
        vdev->nr_vectors = nr + 1;
        ret = vfio_enable_vectors(vdev, true);
        if (ret) {
511
            error_report("vfio: failed to enable vectors, %d", ret);
512 513
        }
    } else {
A
Alex Williamson 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
        int argsz;
        struct vfio_irq_set *irq_set;
        int32_t *pfd;

        argsz = sizeof(*irq_set) + sizeof(*pfd);

        irq_set = g_malloc0(argsz);
        irq_set->argsz = argsz;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                         VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
        irq_set->start = nr;
        irq_set->count = 1;
        pfd = (int32_t *)&irq_set->data;

529 530 531 532 533
        if (vector->virq >= 0) {
            *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
        } else {
            *pfd = event_notifier_get_fd(&vector->interrupt);
        }
A
Alex Williamson 已提交
534

535
        ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
A
Alex Williamson 已提交
536
        g_free(irq_set);
537
        if (ret) {
538
            error_report("vfio: failed to modify vector, %d", ret);
539 540 541
        }
    }

A
Alex Williamson 已提交
542 543 544 545 546 547 548 549
    /* Disable PBA emulation when nothing more is pending. */
    clear_bit(nr, vdev->msix->pending);
    if (find_first_bit(vdev->msix->pending,
                       vdev->nr_vectors) == vdev->nr_vectors) {
        memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
        trace_vfio_msix_pba_disable(vdev->vbasedev.name);
    }

550 551 552
    return 0;
}

553 554 555 556 557 558
static int vfio_msix_vector_use(PCIDevice *pdev,
                                unsigned int nr, MSIMessage msg)
{
    return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
}

559 560
static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
{
561
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
562 563
    VFIOMSIVector *vector = &vdev->msi_vectors[nr];

564
    trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
565 566

    /*
567 568 569 570 571 572
     * There are still old guests that mask and unmask vectors on every
     * interrupt.  If we're using QEMU bypass with a KVM irqfd, leave all of
     * the KVM setup in place, simply switch VFIO to use the non-bypass
     * eventfd.  We'll then fire the interrupt through QEMU and the MSI-X
     * core will mask the interrupt and set pending bits, allowing it to
     * be re-asserted on unmask.  Nothing to do if already using QEMU mode.
573
     */
574 575 576 577
    if (vector->virq >= 0) {
        int argsz;
        struct vfio_irq_set *irq_set;
        int32_t *pfd;
A
Alex Williamson 已提交
578

579
        argsz = sizeof(*irq_set) + sizeof(*pfd);
A
Alex Williamson 已提交
580

581 582 583 584 585 586 587 588
        irq_set = g_malloc0(argsz);
        irq_set->argsz = argsz;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                         VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
        irq_set->start = nr;
        irq_set->count = 1;
        pfd = (int32_t *)&irq_set->data;
A
Alex Williamson 已提交
589

590
        *pfd = event_notifier_get_fd(&vector->interrupt);
A
Alex Williamson 已提交
591

592
        ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
593

594
        g_free(irq_set);
595 596 597
    }
}

598
static void vfio_msix_enable(VFIOPCIDevice *vdev)
599 600 601
{
    vfio_disable_interrupts(vdev);

602
    vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
603 604 605

    vdev->interrupt = VFIO_INT_MSIX;

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
    /*
     * Some communication channels between VF & PF or PF & fw rely on the
     * physical state of the device and expect that enabling MSI-X from the
     * guest enables the same on the host.  When our guest is Linux, the
     * guest driver call to pci_enable_msix() sets the enabling bit in the
     * MSI-X capability, but leaves the vector table masked.  We therefore
     * can't rely on a vector_use callback (from request_irq() in the guest)
     * to switch the physical device into MSI-X mode because that may come a
     * long time after pci_enable_msix().  This code enables vector 0 with
     * triggering to userspace, then immediately release the vector, leaving
     * the physical device with no vectors enabled, but MSI-X enabled, just
     * like the guest view.
     */
    vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
    vfio_msix_vector_release(&vdev->pdev, 0);

622
    if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
623
                                  vfio_msix_vector_release, NULL)) {
624
        error_report("vfio: msix_set_vector_notifiers failed");
625 626
    }

627
    trace_vfio_msix_enable(vdev->vbasedev.name);
628 629
}

630
static void vfio_msi_enable(VFIOPCIDevice *vdev)
631 632 633 634 635 636 637
{
    int ret, i;

    vfio_disable_interrupts(vdev);

    vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
retry:
638
    vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
639 640 641

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];
A
Alex Williamson 已提交
642
        MSIMessage msg = msi_get_message(&vdev->pdev, i);
643 644

        vector->vdev = vdev;
645
        vector->virq = -1;
646 647 648
        vector->use = true;

        if (event_notifier_init(&vector->interrupt, 0)) {
649
            error_report("vfio: Error: event_notifier_init failed");
650 651
        }

652 653 654
        qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                            vfio_msi_interrupt, NULL, vector);

655 656 657 658
        /*
         * Attempt to enable route through KVM irqchip,
         * default to userspace handling if unavailable.
         */
659
        vfio_add_kvm_msi_virq(vdev, vector, &msg, false);
660 661
    }

662 663 664
    /* Set interrupt type prior to possible interrupts */
    vdev->interrupt = VFIO_INT_MSI;

665 666 667
    ret = vfio_enable_vectors(vdev, false);
    if (ret) {
        if (ret < 0) {
668
            error_report("vfio: Error: Failed to setup MSI fds: %m");
669 670
        } else if (ret != vdev->nr_vectors) {
            error_report("vfio: Error: Failed to enable %d "
671
                         "MSI vectors, retry with %d", vdev->nr_vectors, ret);
672 673 674 675 676
        }

        for (i = 0; i < vdev->nr_vectors; i++) {
            VFIOMSIVector *vector = &vdev->msi_vectors[i];
            if (vector->virq >= 0) {
677
                vfio_remove_kvm_msi_virq(vector);
678
            }
679 680
            qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                                NULL, NULL, NULL);
681 682 683 684 685 686 687 688 689 690 691
            event_notifier_cleanup(&vector->interrupt);
        }

        g_free(vdev->msi_vectors);

        if (ret > 0 && ret != vdev->nr_vectors) {
            vdev->nr_vectors = ret;
            goto retry;
        }
        vdev->nr_vectors = 0;

692 693 694 695 696 697 698 699
        /*
         * Failing to setup MSI doesn't really fall within any specification.
         * Let's try leaving interrupts disabled and hope the guest figures
         * out to fall back to INTx for this device.
         */
        error_report("vfio: Error: Failed to enable MSI");
        vdev->interrupt = VFIO_INT_NONE;

700 701 702
        return;
    }

703
    trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
704 705
}

706
static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
707
{
708 709 710 711 712 713 714 715 716 717 718 719 720 721
    int i;

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];
        if (vdev->msi_vectors[i].use) {
            if (vector->virq >= 0) {
                vfio_remove_kvm_msi_virq(vector);
            }
            qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
                                NULL, NULL, NULL);
            event_notifier_cleanup(&vector->interrupt);
        }
    }

722 723 724 725 726
    g_free(vdev->msi_vectors);
    vdev->msi_vectors = NULL;
    vdev->nr_vectors = 0;
    vdev->interrupt = VFIO_INT_NONE;

727
    vfio_intx_enable(vdev);
728 729
}

730
static void vfio_msix_disable(VFIOPCIDevice *vdev)
731
{
732 733
    int i;

734 735
    msix_unset_vector_notifiers(&vdev->pdev);

736 737 738 739 740 741 742
    /*
     * MSI-X will only release vectors if MSI-X is still enabled on the
     * device, check through the rest and release it ourselves if necessary.
     */
    for (i = 0; i < vdev->nr_vectors; i++) {
        if (vdev->msi_vectors[i].use) {
            vfio_msix_vector_release(&vdev->pdev, i);
743
            msix_vector_unuse(&vdev->pdev, i);
744 745 746
        }
    }

747
    if (vdev->nr_vectors) {
748
        vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
749 750
    }

751
    vfio_msi_disable_common(vdev);
752

A
Alex Williamson 已提交
753 754 755
    memset(vdev->msix->pending, 0,
           BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));

756
    trace_vfio_msix_disable(vdev->vbasedev.name);
757 758
}

759
static void vfio_msi_disable(VFIOPCIDevice *vdev)
760
{
761
    vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
762
    vfio_msi_disable_common(vdev);
763

764
    trace_vfio_msi_disable(vdev->vbasedev.name);
765 766
}

767
static void vfio_update_msi(VFIOPCIDevice *vdev)
768 769 770 771 772 773 774 775 776 777 778 779
{
    int i;

    for (i = 0; i < vdev->nr_vectors; i++) {
        VFIOMSIVector *vector = &vdev->msi_vectors[i];
        MSIMessage msg;

        if (!vector->use || vector->virq < 0) {
            continue;
        }

        msg = msi_get_message(&vdev->pdev, i);
780
        vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
781 782 783
    }
}

784
static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
785
{
786
    struct vfio_region_info *reg_info;
787 788
    uint64_t size;
    off_t off = 0;
P
Paolo Bonzini 已提交
789
    ssize_t bytes;
790

791 792
    if (vfio_get_region_info(&vdev->vbasedev,
                             VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
793 794 795 796
        error_report("vfio: Error getting ROM info: %m");
        return;
    }

797 798 799 800 801 802
    trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
                            (unsigned long)reg_info->offset,
                            (unsigned long)reg_info->flags);

    vdev->rom_size = size = reg_info->size;
    vdev->rom_offset = reg_info->offset;
803

804
    g_free(reg_info);
805 806

    if (!vdev->rom_size) {
807
        vdev->rom_read_failed = true;
808
        error_report("vfio-pci: Cannot read device rom at "
809
                    "%s", vdev->vbasedev.name);
810 811 812
        error_printf("Device option ROM contents are probably invalid "
                    "(check dmesg).\nSkip option ROM probe with rombar=0, "
                    "or load from file with romfile=\n");
813 814 815 816 817 818 819
        return;
    }

    vdev->rom = g_malloc(size);
    memset(vdev->rom, 0xff, size);

    while (size) {
820 821
        bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
                      size, vdev->rom_offset + off);
822 823 824 825 826 827 828 829 830 831 832 833 834
        if (bytes == 0) {
            break;
        } else if (bytes > 0) {
            off += bytes;
            size -= bytes;
        } else {
            if (errno == EINTR || errno == EAGAIN) {
                continue;
            }
            error_report("vfio: Error reading device ROM: %m");
            break;
        }
    }
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864

    /*
     * Test the ROM signature against our device, if the vendor is correct
     * but the device ID doesn't match, store the correct device ID and
     * recompute the checksum.  Intel IGD devices need this and are known
     * to have bogus checksums so we can't simply adjust the checksum.
     */
    if (pci_get_word(vdev->rom) == 0xaa55 &&
        pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
        !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
        uint16_t vid, did;

        vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
        did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);

        if (vid == vdev->vendor_id && did != vdev->device_id) {
            int i;
            uint8_t csum, *data = vdev->rom;

            pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
                         vdev->device_id);
            data[6] = 0;

            for (csum = 0, i = 0; i < vdev->rom_size; i++) {
                csum += data[i];
            }

            data[6] = -csum;
        }
    }
865 866 867 868
}

static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
{
869
    VFIOPCIDevice *vdev = opaque;
870 871 872 873 874 875 876
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } val;
    uint64_t data = 0;
877 878

    /* Load the ROM lazily when the guest tries to read it */
879
    if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
880 881 882
        vfio_pci_load_rom(vdev);
    }

883
    memcpy(&val, vdev->rom + addr,
884 885
           (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);

886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
    switch (size) {
    case 1:
        data = val.byte;
        break;
    case 2:
        data = le16_to_cpu(val.word);
        break;
    case 4:
        data = le32_to_cpu(val.dword);
        break;
    default:
        hw_error("vfio: unsupported read size, %d bytes\n", size);
        break;
    }

901
    trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
902

903
    return data;
904 905
}

906 907 908 909 910
static void vfio_rom_write(void *opaque, hwaddr addr,
                           uint64_t data, unsigned size)
{
}

911 912
static const MemoryRegionOps vfio_rom_ops = {
    .read = vfio_rom_read,
913
    .write = vfio_rom_write,
914
    .endianness = DEVICE_LITTLE_ENDIAN,
915 916
};

917
static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
918
{
919
    uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
920
    off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
921
    DeviceState *dev = DEVICE(vdev);
922
    char name[32];
923
    int fd = vdev->vbasedev.fd;
924 925

    if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
926 927
        /* Since pci handles romfile, just print a message and return */
        if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
928 929
            error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
                         vdev->vbasedev.name);
930
        }
931 932 933 934 935 936 937
        return;
    }

    /*
     * Use the same size ROM BAR as the physical device.  The contents
     * will get filled in later when the guest tries to read it.
     */
938 939 940 941
    if (pread(fd, &orig, 4, offset) != 4 ||
        pwrite(fd, &size, 4, offset) != 4 ||
        pread(fd, &size, 4, offset) != 4 ||
        pwrite(fd, &orig, 4, offset) != 4) {
942
        error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
943 944 945
        return;
    }

946
    size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
947 948 949 950 951

    if (!size) {
        return;
    }

952 953
    if (vfio_blacklist_opt_rom(vdev)) {
        if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
954 955
            error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
                         vdev->vbasedev.name);
956
        } else {
957 958
            error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
                         vdev->vbasedev.name);
959 960 961 962
            return;
        }
    }

963
    trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
964

965
    snprintf(name, sizeof(name), "vfio[%s].rom", vdev->vbasedev.name);
966 967 968 969 970 971 972 973

    memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
                          &vfio_rom_ops, vdev, name, size);

    pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
                     PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);

    vdev->pdev.has_rom = true;
974
    vdev->rom_read_failed = false;
975 976
}

977
void vfio_vga_write(void *opaque, hwaddr addr,
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
                           uint64_t data, unsigned size)
{
    VFIOVGARegion *region = opaque;
    VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } buf;
    off_t offset = vga->fd_offset + region->offset + addr;

    switch (size) {
    case 1:
        buf.byte = data;
        break;
    case 2:
        buf.word = cpu_to_le16(data);
        break;
    case 4:
        buf.dword = cpu_to_le32(data);
        break;
    default:
1001
        hw_error("vfio: unsupported write size, %d bytes", size);
1002 1003 1004 1005 1006 1007 1008 1009
        break;
    }

    if (pwrite(vga->fd, &buf, size, offset) != size) {
        error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
                     __func__, region->offset + addr, data, size);
    }

E
Eric Auger 已提交
1010
    trace_vfio_vga_write(region->offset + addr, data, size);
1011 1012
}

1013
uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
{
    VFIOVGARegion *region = opaque;
    VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
    union {
        uint8_t byte;
        uint16_t word;
        uint32_t dword;
        uint64_t qword;
    } buf;
    uint64_t data = 0;
    off_t offset = vga->fd_offset + region->offset + addr;

    if (pread(vga->fd, &buf, size, offset) != size) {
        error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
                     __func__, region->offset + addr, size);
        return (uint64_t)-1;
    }

    switch (size) {
    case 1:
        data = buf.byte;
        break;
    case 2:
        data = le16_to_cpu(buf.word);
        break;
    case 4:
        data = le32_to_cpu(buf.dword);
        break;
    default:
1043
        hw_error("vfio: unsupported read size, %d bytes", size);
1044 1045 1046
        break;
    }

E
Eric Auger 已提交
1047
    trace_vfio_vga_read(region->offset + addr, size, data);
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057

    return data;
}

static const MemoryRegionOps vfio_vga_ops = {
    .read = vfio_vga_read,
    .write = vfio_vga_write,
    .endianness = DEVICE_LITTLE_ENDIAN,
};

1058 1059 1060
/*
 * PCI config space
 */
1061
uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1062
{
1063
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1064
    uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1065

1066 1067
    memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
    emu_bits = le32_to_cpu(emu_bits);
1068

1069 1070 1071 1072 1073 1074 1075
    if (emu_bits) {
        emu_val = pci_default_read_config(pdev, addr, len);
    }

    if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
        ssize_t ret;

1076 1077
        ret = pread(vdev->vbasedev.fd, &phys_val, len,
                    vdev->config_offset + addr);
1078
        if (ret != len) {
1079 1080
            error_report("%s(%s, 0x%x, 0x%x) failed: %m",
                         __func__, vdev->vbasedev.name, addr, len);
1081 1082
            return -errno;
        }
1083
        phys_val = le32_to_cpu(phys_val);
1084 1085
    }

1086
    val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1087

1088
    trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1089 1090 1091 1092

    return val;
}

1093 1094
void vfio_pci_write_config(PCIDevice *pdev,
                           uint32_t addr, uint32_t val, int len)
1095
{
1096
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1097 1098
    uint32_t val_le = cpu_to_le32(val);

1099
    trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1100 1101

    /* Write everything to VFIO, let it filter out what we can't write */
1102 1103
    if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
                != len) {
1104 1105
        error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
                     __func__, vdev->vbasedev.name, addr, val, len);
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
    }

    /* MSI/MSI-X Enabling/Disabling */
    if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
        ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
        int is_enabled, was_enabled = msi_enabled(pdev);

        pci_default_write_config(pdev, addr, val, len);

        is_enabled = msi_enabled(pdev);

1117 1118
        if (!was_enabled) {
            if (is_enabled) {
1119
                vfio_msi_enable(vdev);
1120 1121 1122
            }
        } else {
            if (!is_enabled) {
1123
                vfio_msi_disable(vdev);
1124 1125 1126
            } else {
                vfio_update_msi(vdev);
            }
1127
        }
1128
    } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1129 1130 1131 1132 1133 1134 1135 1136
        ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
        int is_enabled, was_enabled = msix_enabled(pdev);

        pci_default_write_config(pdev, addr, val, len);

        is_enabled = msix_enabled(pdev);

        if (!was_enabled && is_enabled) {
1137
            vfio_msix_enable(vdev);
1138
        } else if (was_enabled && !is_enabled) {
1139
            vfio_msix_disable(vdev);
1140
        }
1141 1142 1143
    } else {
        /* Write everything to QEMU to keep emulated bits correct */
        pci_default_write_config(pdev, addr, val, len);
1144 1145 1146 1147 1148 1149
    }
}

/*
 * Interrupt setup
 */
1150
static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1151
{
1152 1153 1154 1155 1156 1157
    /*
     * More complicated than it looks.  Disabling MSI/X transitions the
     * device to INTx mode (if supported).  Therefore we need to first
     * disable MSI/X and then cleanup by disabling INTx.
     */
    if (vdev->interrupt == VFIO_INT_MSIX) {
1158
        vfio_msix_disable(vdev);
1159
    } else if (vdev->interrupt == VFIO_INT_MSI) {
1160
        vfio_msi_disable(vdev);
1161 1162 1163
    }

    if (vdev->interrupt == VFIO_INT_INTx) {
1164
        vfio_intx_disable(vdev);
1165 1166 1167
    }
}

1168
static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
1169 1170 1171 1172 1173
{
    uint16_t ctrl;
    bool msi_64bit, msi_maskbit;
    int ret, entries;

1174
    if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1175 1176 1177 1178 1179 1180 1181 1182 1183
              vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
        return -errno;
    }
    ctrl = le16_to_cpu(ctrl);

    msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
    msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
    entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);

1184
    trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1185 1186 1187

    ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
    if (ret < 0) {
1188 1189 1190
        if (ret == -ENOTSUP) {
            return 0;
        }
1191
        error_report("vfio: msi_init failed");
1192 1193 1194 1195 1196 1197 1198
        return ret;
    }
    vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);

    return 0;
}

1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
{
    off_t start, end;
    VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;

    /*
     * We expect to find a single mmap covering the whole BAR, anything else
     * means it's either unsupported or already setup.
     */
    if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
        region->size != region->mmaps[0].size) {
        return;
    }

    /* MSI-X table start and end aligned to host page size */
    start = vdev->msix->table_offset & qemu_real_host_page_mask;
    end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
                               (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));

    /*
     * Does the MSI-X table cover the beginning of the BAR?  The whole BAR?
     * NB - Host page size is necessarily a power of two and so is the PCI
     * BAR (not counting EA yet), therefore if we have host page aligned
     * @start and @end, then any remainder of the BAR before or after those
     * must be at least host page sized and therefore mmap'able.
     */
    if (!start) {
        if (end >= region->size) {
            region->nr_mmaps = 0;
            g_free(region->mmaps);
            region->mmaps = NULL;
            trace_vfio_msix_fixup(vdev->vbasedev.name,
                                  vdev->msix->table_bar, 0, 0);
        } else {
            region->mmaps[0].offset = end;
            region->mmaps[0].size = region->size - end;
            trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);
        }

    /* Maybe it's aligned at the end of the BAR */
    } else if (end >= region->size) {
        region->mmaps[0].size = start;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);

    /* Otherwise it must split the BAR */
    } else {
        region->nr_mmaps = 2;
        region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);

        memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));

        region->mmaps[0].size = start;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[0].offset,
                              region->mmaps[0].offset + region->mmaps[0].size);

        region->mmaps[1].offset = end;
        region->mmaps[1].size = region->size - end;
        trace_vfio_msix_fixup(vdev->vbasedev.name,
                              vdev->msix->table_bar, region->mmaps[1].offset,
                              region->mmaps[1].offset + region->mmaps[1].size);
    }
}

1267 1268 1269 1270 1271 1272 1273 1274
/*
 * We don't have any control over how pci_add_capability() inserts
 * capabilities into the chain.  In order to setup MSI-X we need a
 * MemoryRegion for the BAR.  In order to setup the BAR and not
 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
 * need to first look for where the MSI-X table lives.  So we
 * unfortunately split MSI-X setup across two functions.
 */
1275
static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
1276 1277 1278 1279
{
    uint8_t pos;
    uint16_t ctrl;
    uint32_t table, pba;
1280
    int fd = vdev->vbasedev.fd;
1281
    VFIOMSIXInfo *msix;
1282 1283 1284 1285 1286 1287

    pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
    if (!pos) {
        return 0;
    }

1288
    if (pread(fd, &ctrl, sizeof(ctrl),
1289
              vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
1290 1291 1292
        return -errno;
    }

1293
    if (pread(fd, &table, sizeof(table),
1294 1295 1296 1297
              vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
        return -errno;
    }

1298
    if (pread(fd, &pba, sizeof(pba),
1299 1300 1301 1302 1303 1304 1305 1306
              vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
        return -errno;
    }

    ctrl = le16_to_cpu(ctrl);
    table = le32_to_cpu(table);
    pba = le32_to_cpu(pba);

1307 1308 1309 1310 1311 1312
    msix = g_malloc0(sizeof(*msix));
    msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
    msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
    msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
    msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
    msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1313

1314 1315 1316 1317 1318
    /*
     * Test the size of the pba_offset variable and catch if it extends outside
     * of the specified BAR. If it is the case, we need to apply a hardware
     * specific quirk if the device is known or we have a broken configuration.
     */
1319
    if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1320 1321 1322 1323 1324 1325
        /*
         * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
         * adapters. The T5 hardware returns an incorrect value of 0x8000 for
         * the VF PBA offset while the BAR itself is only 8k. The correct value
         * is 0x1000, so we hard code that here.
         */
1326 1327
        if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
            (vdev->device_id & 0xff00) == 0x5800) {
1328
            msix->pba_offset = 0x1000;
1329 1330 1331
        } else {
            error_report("vfio: Hardware reports invalid configuration, "
                         "MSIX PBA outside of specified BAR");
1332
            g_free(msix);
1333 1334 1335 1336
            return -EINVAL;
        }
    }

1337
    trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1338 1339
                                msix->table_offset, msix->entries);
    vdev->msix = msix;
1340

1341 1342
    vfio_pci_fixup_msix_region(vdev);

1343 1344 1345
    return 0;
}

1346
static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos)
1347 1348 1349
{
    int ret;

A
Alex Williamson 已提交
1350 1351
    vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
                                    sizeof(unsigned long));
1352
    ret = msix_init(&vdev->pdev, vdev->msix->entries,
1353
                    vdev->bars[vdev->msix->table_bar].region.mem,
1354
                    vdev->msix->table_bar, vdev->msix->table_offset,
1355
                    vdev->bars[vdev->msix->pba_bar].region.mem,
1356 1357
                    vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
    if (ret < 0) {
1358 1359 1360
        if (ret == -ENOTSUP) {
            return 0;
        }
1361
        error_report("vfio: msix_init failed");
1362 1363 1364
        return ret;
    }

A
Alex Williamson 已提交
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
    /*
     * The PCI spec suggests that devices provide additional alignment for
     * MSI-X structures and avoid overlapping non-MSI-X related registers.
     * For an assigned device, this hopefully means that emulation of MSI-X
     * structures does not affect the performance of the device.  If devices
     * fail to provide that alignment, a significant performance penalty may
     * result, for instance Mellanox MT27500 VFs:
     * http://www.spinics.net/lists/kvm/msg125881.html
     *
     * The PBA is simply not that important for such a serious regression and
     * most drivers do not appear to look at it.  The solution for this is to
     * disable the PBA MemoryRegion unless it's being used.  We disable it
     * here and only enable it if a masked vector fires through QEMU.  As the
     * vector-use notifier is called, which occurs on unmask, we test whether
     * PBA emulation is needed and again disable if not.
     */
    memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);

1383 1384 1385
    return 0;
}

1386
static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1387 1388 1389 1390
{
    msi_uninit(&vdev->pdev);

    if (vdev->msix) {
E
Eric Auger 已提交
1391
        msix_uninit(&vdev->pdev,
1392 1393
                    vdev->bars[vdev->msix->table_bar].region.mem,
                    vdev->bars[vdev->msix->pba_bar].region.mem);
A
Alex Williamson 已提交
1394
        g_free(vdev->msix->pending);
1395 1396 1397 1398 1399 1400
    }
}

/*
 * Resource setup
 */
1401
static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1402 1403 1404 1405
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1406
        vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1407 1408 1409
    }
}

1410
static void vfio_bar_setup(VFIOPCIDevice *vdev, int nr)
1411 1412 1413 1414 1415 1416 1417 1418
{
    VFIOBAR *bar = &vdev->bars[nr];

    uint32_t pci_bar;
    uint8_t type;
    int ret;

    /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1419
    if (!bar->region.size) {
1420 1421 1422 1423
        return;
    }

    /* Determine what type of BAR this is for registration */
1424
    ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1425 1426
                vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
    if (ret != sizeof(pci_bar)) {
1427
        error_report("vfio: Failed to read BAR %d (%m)", nr);
1428 1429 1430 1431
        return;
    }

    pci_bar = le32_to_cpu(pci_bar);
A
Alex Williamson 已提交
1432 1433 1434 1435
    bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
    bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
    type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
                                    ~PCI_BASE_ADDRESS_MEM_MASK);
1436

1437 1438 1439
    if (vfio_region_mmap(&bar->region)) {
        error_report("Failed to mmap %s BAR %d. Performance may be slow",
                     vdev->vbasedev.name, nr);
1440
    }
1441 1442

    vfio_bar_quirk_setup(vdev, nr);
1443 1444

    pci_register_bar(&vdev->pdev, nr, type, bar->region.mem);
1445 1446
}

1447
static void vfio_bars_setup(VFIOPCIDevice *vdev)
1448 1449 1450 1451
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1452
        vfio_bar_setup(vdev, i);
1453
    }
1454

1455 1456
    if (vdev->vga) {
        memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
1457
                              OBJECT(vdev), &vfio_vga_ops,
1458
                              &vdev->vga->region[QEMU_PCI_VGA_MEM],
1459 1460
                              "vfio-vga-mmio@0xa0000",
                              QEMU_PCI_VGA_MEM_SIZE);
1461
        memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
1462
                              OBJECT(vdev), &vfio_vga_ops,
1463
                              &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
1464 1465
                              "vfio-vga-io@0x3b0",
                              QEMU_PCI_VGA_IO_LO_SIZE);
1466
        memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
1467
                              OBJECT(vdev), &vfio_vga_ops,
1468
                              &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
1469 1470 1471
                              "vfio-vga-io@0x3c0",
                              QEMU_PCI_VGA_IO_HI_SIZE);

1472 1473 1474
        pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
                         &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
                         &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
1475
        vfio_vga_quirk_setup(vdev);
1476
    }
1477 1478
}

1479
static void vfio_bars_exit(VFIOPCIDevice *vdev)
1480 1481 1482 1483
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1484 1485
        vfio_bar_quirk_exit(vdev, i);
        vfio_region_exit(&vdev->bars[i].region);
1486
    }
1487

1488
    if (vdev->vga) {
1489
        pci_unregister_vga(&vdev->pdev);
1490
        vfio_vga_quirk_exit(vdev);
1491
    }
1492 1493
}

1494
static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1495 1496 1497 1498
{
    int i;

    for (i = 0; i < PCI_ROM_SLOT; i++) {
1499 1500
        vfio_bar_quirk_finalize(vdev, i);
        vfio_region_finalize(&vdev->bars[i].region);
1501 1502
    }

1503 1504 1505 1506 1507 1508
    if (vdev->vga) {
        vfio_vga_quirk_finalize(vdev);
        for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
            object_unparent(OBJECT(&vdev->vga->region[i].mem));
        }
        g_free(vdev->vga);
1509 1510 1511
    }
}

1512 1513 1514 1515 1516
/*
 * General setup
 */
static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
{
1517 1518
    uint8_t tmp;
    uint16_t next = PCI_CONFIG_SPACE_SIZE;
1519 1520

    for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1521
         tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1522 1523 1524 1525 1526 1527 1528 1529
        if (tmp > pos && tmp < next) {
            next = tmp;
        }
    }

    return next - pos;
}

1530 1531 1532 1533 1534
static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
{
    pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
}

1535
static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
                                   uint16_t val, uint16_t mask)
{
    vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
    vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
    vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
}

static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
{
    pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
}

1548
static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1549 1550 1551 1552 1553 1554 1555
                                   uint32_t val, uint32_t mask)
{
    vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
    vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
    vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
}

1556
static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
{
    uint16_t flags;
    uint8_t type;

    flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
    type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;

    if (type != PCI_EXP_TYPE_ENDPOINT &&
        type != PCI_EXP_TYPE_LEG_END &&
        type != PCI_EXP_TYPE_RC_END) {

        error_report("vfio: Assignment of PCIe type 0x%x "
                     "devices is not currently supported", type);
        return -EINVAL;
    }

    if (!pci_bus_is_express(vdev->pdev.bus)) {
1574 1575 1576
        PCIBus *bus = vdev->pdev.bus;
        PCIDevice *bridge;

1577
        /*
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
         * Traditionally PCI device assignment exposes the PCIe capability
         * as-is on non-express buses.  The reason being that some drivers
         * simply assume that it's there, for example tg3.  However when
         * we're running on a native PCIe machine type, like Q35, we need
         * to hide the PCIe capability.  The reason for this is twofold;
         * first Windows guests get a Code 10 error when the PCIe capability
         * is exposed in this configuration.  Therefore express devices won't
         * work at all unless they're attached to express buses in the VM.
         * Second, a native PCIe machine introduces the possibility of fine
         * granularity IOMMUs supporting both translation and isolation.
         * Guest code to discover the IOMMU visibility of a device, such as
         * IOMMU grouping code on Linux, is very aware of device types and
         * valid transitions between bus types.  An express device on a non-
         * express bus is not a valid combination on bare metal systems.
         *
         * Drivers that require a PCIe capability to make the device
         * functional are simply going to need to have their devices placed
         * on a PCIe bus in the VM.
1596
         */
1597 1598 1599 1600 1601 1602 1603 1604 1605
        while (!pci_bus_is_root(bus)) {
            bridge = pci_bridge_get_device(bus);
            bus = bridge->bus;
        }

        if (pci_bus_is_express(bus)) {
            return 0;
        }

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
    } else if (pci_bus_is_root(vdev->pdev.bus)) {
        /*
         * On a Root Complex bus Endpoints become Root Complex Integrated
         * Endpoints, which changes the type and clears the LNK & LNK2 fields.
         */
        if (type == PCI_EXP_TYPE_ENDPOINT) {
            vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
                                   PCI_EXP_TYPE_RC_END << 4,
                                   PCI_EXP_FLAGS_TYPE);

            /* Link Capabilities, Status, and Control goes away */
            if (size > PCI_EXP_LNKCTL) {
                vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
                vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
                vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);

#ifndef PCI_EXP_LNKCAP2
#define PCI_EXP_LNKCAP2 44
#endif
#ifndef PCI_EXP_LNKSTA2
#define PCI_EXP_LNKSTA2 50
#endif
                /* Link 2 Capabilities, Status, and Control goes away */
                if (size > PCI_EXP_LNKCAP2) {
                    vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
                    vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
                    vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
                }
            }

        } else if (type == PCI_EXP_TYPE_LEG_END) {
            /*
             * Legacy endpoints don't belong on the root complex.  Windows
             * seems to be happier with devices if we skip the capability.
             */
            return 0;
        }

    } else {
        /*
         * Convert Root Complex Integrated Endpoints to regular endpoints.
         * These devices don't support LNK/LNK2 capabilities, so make them up.
         */
        if (type == PCI_EXP_TYPE_RC_END) {
            vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
                                   PCI_EXP_TYPE_ENDPOINT << 4,
                                   PCI_EXP_FLAGS_TYPE);
            vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
                                   PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
            vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
        }

        /* Mark the Link Status bits as emulated to allow virtual negotiation */
        vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
                               pci_get_word(vdev->pdev.config + pos +
                                            PCI_EXP_LNKSTA),
                               PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
    }

    pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
    if (pos >= 0) {
        vdev->pdev.exp.exp_cap = pos;
    }

    return pos;
}

1673
static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1674 1675 1676 1677
{
    uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);

    if (cap & PCI_EXP_DEVCAP_FLR) {
1678
        trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1679 1680 1681 1682
        vdev->has_flr = true;
    }
}

1683
static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1684 1685 1686 1687
{
    uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);

    if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1688
        trace_vfio_check_pm_reset(vdev->vbasedev.name);
1689 1690 1691 1692
        vdev->has_pm_reset = true;
    }
}

1693
static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1694 1695 1696 1697
{
    uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);

    if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1698
        trace_vfio_check_af_flr(vdev->vbasedev.name);
1699 1700 1701 1702
        vdev->has_flr = true;
    }
}

1703
static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
1704 1705 1706 1707 1708 1709
{
    PCIDevice *pdev = &vdev->pdev;
    uint8_t cap_id, next, size;
    int ret;

    cap_id = pdev->config[pos];
1710
    next = pdev->config[pos + PCI_CAP_LIST_NEXT];
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723

    /*
     * If it becomes important to configure capabilities to their actual
     * size, use this as the default when it's something we don't recognize.
     * Since QEMU doesn't actually handle many of the config accesses,
     * exact size doesn't seem worthwhile.
     */
    size = vfio_std_cap_max_size(pdev, pos);

    /*
     * pci_add_capability always inserts the new capability at the head
     * of the chain.  Therefore to end up with a chain that matches the
     * physical device, we insert from the end by making this recursive.
1724
     * This is also why we pre-calculate size above as cached config space
1725 1726 1727 1728 1729 1730 1731 1732
     * will be changed as we unwind the stack.
     */
    if (next) {
        ret = vfio_add_std_cap(vdev, next);
        if (ret) {
            return ret;
        }
    } else {
1733 1734 1735 1736
        /* Begin the rebuild, use QEMU emulated list bits */
        pdev->config[PCI_CAPABILITY_LIST] = 0;
        vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
        vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1737 1738
    }

1739
    /* Use emulated next pointer to allow dropping caps */
1740
    pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
1741

1742 1743
    switch (cap_id) {
    case PCI_CAP_ID_MSI:
1744
        ret = vfio_msi_setup(vdev, pos);
1745
        break;
1746
    case PCI_CAP_ID_EXP:
1747
        vfio_check_pcie_flr(vdev, pos);
1748 1749
        ret = vfio_setup_pcie_cap(vdev, pos, size);
        break;
1750
    case PCI_CAP_ID_MSIX:
1751
        ret = vfio_msix_setup(vdev, pos);
1752
        break;
1753
    case PCI_CAP_ID_PM:
1754
        vfio_check_pm_reset(vdev, pos);
1755
        vdev->pm_cap = pos;
1756 1757 1758 1759 1760 1761
        ret = pci_add_capability(pdev, cap_id, pos, size);
        break;
    case PCI_CAP_ID_AF:
        vfio_check_af_flr(vdev, pos);
        ret = pci_add_capability(pdev, cap_id, pos, size);
        break;
1762 1763 1764 1765 1766 1767
    default:
        ret = pci_add_capability(pdev, cap_id, pos, size);
        break;
    }

    if (ret < 0) {
1768 1769
        error_report("vfio: %s Error adding PCI capability "
                     "0x%x[0x%x]@0x%x: %d", vdev->vbasedev.name,
1770 1771 1772 1773 1774 1775 1776
                     cap_id, size, pos, ret);
        return ret;
    }

    return 0;
}

1777
static int vfio_add_capabilities(VFIOPCIDevice *vdev)
1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
{
    PCIDevice *pdev = &vdev->pdev;

    if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
        !pdev->config[PCI_CAPABILITY_LIST]) {
        return 0; /* Nothing to add */
    }

    return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
}

1789
static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
{
    PCIDevice *pdev = &vdev->pdev;
    uint16_t cmd;

    vfio_disable_interrupts(vdev);

    /* Make sure the device is in D0 */
    if (vdev->pm_cap) {
        uint16_t pmcsr;
        uint8_t state;

        pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
        state = pmcsr & PCI_PM_CTRL_STATE_MASK;
        if (state) {
            pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
            vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
            /* vfio handles the necessary delay here */
            pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
            state = pmcsr & PCI_PM_CTRL_STATE_MASK;
            if (state) {
1810
                error_report("vfio: Unable to power on device, stuck in D%d",
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
                             state);
            }
        }
    }

    /*
     * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
     * Also put INTx Disable in known state.
     */
    cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
    cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
             PCI_COMMAND_INTX_DISABLE);
    vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
}

1826
static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
1827
{
1828
    vfio_intx_enable(vdev);
1829 1830
}

1831
static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
1832
{
1833 1834 1835 1836 1837 1838
    char tmp[13];

    sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
            addr->bus, addr->slot, addr->function);

    return (strcmp(tmp, name) == 0);
1839 1840
}

1841
static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
1842 1843 1844 1845 1846 1847 1848 1849 1850
{
    VFIOGroup *group;
    struct vfio_pci_hot_reset_info *info;
    struct vfio_pci_dependent_device *devices;
    struct vfio_pci_hot_reset *reset;
    int32_t *fds;
    int ret, i, count;
    bool multi = false;

1851
    trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
1852 1853

    vfio_pci_pre_reset(vdev);
1854
    vdev->vbasedev.needs_reset = false;
1855 1856 1857 1858

    info = g_malloc0(sizeof(*info));
    info->argsz = sizeof(*info);

1859
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1860 1861 1862
    if (ret && errno != ENOSPC) {
        ret = -errno;
        if (!vdev->has_pm_reset) {
1863 1864
            error_report("vfio: Cannot reset device %s, "
                         "no available reset mechanism.", vdev->vbasedev.name);
1865 1866 1867 1868 1869 1870 1871 1872 1873
        }
        goto out_single;
    }

    count = info->count;
    info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
    info->argsz = sizeof(*info) + (count * sizeof(*devices));
    devices = &info->devices[0];

1874
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1875 1876 1877 1878 1879 1880
    if (ret) {
        ret = -errno;
        error_report("vfio: hot reset info failed: %m");
        goto out_single;
    }

1881
    trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
1882 1883 1884 1885

    /* Verify that we have all the groups required */
    for (i = 0; i < info->count; i++) {
        PCIHostDeviceAddress host;
1886
        VFIOPCIDevice *tmp;
1887
        VFIODevice *vbasedev_iter;
1888 1889 1890 1891 1892 1893

        host.domain = devices[i].segment;
        host.bus = devices[i].bus;
        host.slot = PCI_SLOT(devices[i].devfn);
        host.function = PCI_FUNC(devices[i].devfn);

E
Eric Auger 已提交
1894
        trace_vfio_pci_hot_reset_dep_devices(host.domain,
1895 1896
                host.bus, host.slot, host.function, devices[i].group_id);

1897
        if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1898 1899 1900
            continue;
        }

1901
        QLIST_FOREACH(group, &vfio_group_list, next) {
1902 1903 1904 1905 1906 1907 1908
            if (group->groupid == devices[i].group_id) {
                break;
            }
        }

        if (!group) {
            if (!vdev->has_pm_reset) {
1909
                error_report("vfio: Cannot reset device %s, "
1910
                             "depends on group %d which is not owned.",
1911
                             vdev->vbasedev.name, devices[i].group_id);
1912 1913 1914 1915 1916 1917
            }
            ret = -EPERM;
            goto out;
        }

        /* Prep dependent devices for reset and clear our marker. */
1918 1919 1920 1921 1922
        QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
            if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
                continue;
            }
            tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
1923
            if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
1924 1925 1926 1927 1928
                if (single) {
                    ret = -EINVAL;
                    goto out_single;
                }
                vfio_pci_pre_reset(tmp);
1929
                tmp->vbasedev.needs_reset = false;
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
                multi = true;
                break;
            }
        }
    }

    if (!single && !multi) {
        ret = -EINVAL;
        goto out_single;
    }

    /* Determine how many group fds need to be passed */
    count = 0;
1943
    QLIST_FOREACH(group, &vfio_group_list, next) {
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
        for (i = 0; i < info->count; i++) {
            if (group->groupid == devices[i].group_id) {
                count++;
                break;
            }
        }
    }

    reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
    reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
    fds = &reset->group_fds[0];

    /* Fill in group fds */
1957
    QLIST_FOREACH(group, &vfio_group_list, next) {
1958 1959 1960 1961 1962 1963 1964 1965 1966
        for (i = 0; i < info->count; i++) {
            if (group->groupid == devices[i].group_id) {
                fds[reset->count++] = group->fd;
                break;
            }
        }
    }

    /* Bus reset! */
1967
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
1968 1969
    g_free(reset);

1970
    trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
E
Eric Auger 已提交
1971
                                    ret ? "%m" : "Success");
1972 1973 1974 1975 1976

out:
    /* Re-enable INTx on affected devices */
    for (i = 0; i < info->count; i++) {
        PCIHostDeviceAddress host;
1977
        VFIOPCIDevice *tmp;
1978
        VFIODevice *vbasedev_iter;
1979 1980 1981 1982 1983 1984

        host.domain = devices[i].segment;
        host.bus = devices[i].bus;
        host.slot = PCI_SLOT(devices[i].devfn);
        host.function = PCI_FUNC(devices[i].devfn);

1985
        if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1986 1987 1988
            continue;
        }

1989
        QLIST_FOREACH(group, &vfio_group_list, next) {
1990 1991 1992 1993 1994 1995 1996 1997 1998
            if (group->groupid == devices[i].group_id) {
                break;
            }
        }

        if (!group) {
            break;
        }

1999 2000 2001 2002 2003
        QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
            if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
                continue;
            }
            tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2004
            if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
                vfio_pci_post_reset(tmp);
                break;
            }
        }
    }
out_single:
    vfio_pci_post_reset(vdev);
    g_free(info);

    return ret;
}

/*
 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
 * of a single in-use device.  VFIO_DEVICE_RESET will already handle the case
 * of doing hot resets when there is only a single device per bus.  The in-use
 * here refers to how many VFIODevices are affected.  A hot reset that affects
 * multiple devices, but only a single in-use device, means that we can call
 * it from our bus ->reset() callback since the extent is effectively a single
 * device.  This allows us to make use of it in the hotplug path.  When there
 * are multiple in-use devices, we can only trigger the hot reset during a
 * system reset and thus from our reset handler.  We separate _one vs _multi
 * here so that we don't overlap and do a double reset on the system reset
 * path where both our reset handler and ->reset() callback are used.  Calling
 * _one() will only do a hot reset for the one in-use devices case, calling
 * _multi() will do nothing if a _one() would have been sufficient.
 */
2032
static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2033 2034 2035 2036
{
    return vfio_pci_hot_reset(vdev, true);
}

2037
static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2038
{
2039
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2040 2041 2042
    return vfio_pci_hot_reset(vdev, false);
}

2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053
static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
{
    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
    if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
        vbasedev->needs_reset = true;
    }
}

static VFIODeviceOps vfio_pci_ops = {
    .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
    .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2054
    .vfio_eoi = vfio_intx_eoi,
2055 2056
};

2057
static int vfio_populate_device(VFIOPCIDevice *vdev)
2058
{
2059
    VFIODevice *vbasedev = &vdev->vbasedev;
2060
    struct vfio_region_info *reg_info;
2061
    struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
E
Eric Auger 已提交
2062
    int i, ret = -1;
2063 2064

    /* Sanity check device */
E
Eric Auger 已提交
2065
    if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2066
        error_report("vfio: Um, this isn't a PCI device");
2067 2068 2069
        goto error;
    }

E
Eric Auger 已提交
2070
    if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2071
        error_report("vfio: unexpected number of io regions %u",
E
Eric Auger 已提交
2072
                     vbasedev->num_regions);
2073 2074 2075
        goto error;
    }

E
Eric Auger 已提交
2076 2077
    if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
        error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
2078 2079 2080 2081
        goto error;
    }

    for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2082 2083 2084 2085 2086 2087
        char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);

        ret = vfio_region_setup(OBJECT(vdev), vbasedev,
                                &vdev->bars[i].region, i, name);
        g_free(name);

2088
        if (ret) {
2089
            error_report("vfio: Error getting region %d info: %m", i);
2090 2091 2092
            goto error;
        }

2093
        QLIST_INIT(&vdev->bars[i].quirks);
2094
    }
2095

2096 2097
    ret = vfio_get_region_info(vbasedev,
                               VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2098
    if (ret) {
2099
        error_report("vfio: Error getting config info: %m");
2100 2101 2102
        goto error;
    }

E
Eric Auger 已提交
2103
    trace_vfio_populate_device_config(vdev->vbasedev.name,
2104 2105 2106
                                      (unsigned long)reg_info->size,
                                      (unsigned long)reg_info->offset,
                                      (unsigned long)reg_info->flags);
2107

2108
    vdev->config_size = reg_info->size;
2109 2110 2111
    if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
        vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
    }
2112 2113 2114
    vdev->config_offset = reg_info->offset;

    g_free(reg_info);
2115

2116
    if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) &&
E
Eric Auger 已提交
2117
        vbasedev->num_regions > VFIO_PCI_VGA_REGION_INDEX) {
2118 2119
        ret = vfio_get_region_info(vbasedev,
                                   VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2120 2121 2122 2123 2124 2125
        if (ret) {
            error_report(
                "vfio: Device does not support requested feature x-vga");
            goto error;
        }

2126 2127 2128
        if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
            !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
            reg_info->size < 0xbffff + 1) {
2129
            error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
2130 2131 2132 2133
                         (unsigned long)reg_info->flags,
                         (unsigned long)reg_info->size);
            g_free(reg_info);
            ret = -1;
2134 2135 2136
            goto error;
        }

2137
        vdev->vga = g_new0(VFIOVGA, 1);
2138

2139 2140
        vdev->vga->fd_offset = reg_info->offset;
        vdev->vga->fd = vdev->vbasedev.fd;
2141

2142
        g_free(reg_info);
2143

2144 2145 2146
        vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
        vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
        QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
2147

2148 2149 2150
        vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
        vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
        QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
2151

2152 2153 2154
        vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
        vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
        QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
2155
    }
2156

2157 2158
    irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;

2159
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2160 2161
    if (ret) {
        /* This can fail for an old kernel or legacy PCI dev */
E
Eric Auger 已提交
2162
        trace_vfio_populate_device_get_irq_info_failure();
2163 2164 2165 2166
        ret = 0;
    } else if (irq_info.count == 1) {
        vdev->pci_aer = true;
    } else {
2167
        error_report("vfio: %s "
A
Alex Williamson 已提交
2168
                     "Could not enable error recovery for the device",
2169
                     vbasedev->name);
2170
    }
2171

E
Eric Auger 已提交
2172 2173 2174 2175
error:
    return ret;
}

2176
static void vfio_put_device(VFIOPCIDevice *vdev)
2177
{
2178
    g_free(vdev->vbasedev.name);
2179 2180
    g_free(vdev->msix);

E
Eric Auger 已提交
2181
    vfio_put_base_device(&vdev->vbasedev);
2182 2183
}

2184 2185
static void vfio_err_notifier_handler(void *opaque)
{
2186
    VFIOPCIDevice *vdev = opaque;
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200

    if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
        return;
    }

    /*
     * TBD. Retrieve the error details and decide what action
     * needs to be taken. One of the actions could be to pass
     * the error to the guest and have the guest driver recover
     * from the error. This requires that PCIe capabilities be
     * exposed to the guest. For now, we just terminate the
     * guest to contain the error.
     */

2201
    error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2202

P
Paolo Bonzini 已提交
2203
    vm_stop(RUN_STATE_INTERNAL_ERROR);
2204 2205 2206 2207 2208 2209 2210 2211
}

/*
 * Registers error notifier for devices supporting error recovery.
 * If we encounter a failure in this function, we report an error
 * and continue after disabling error recovery support for the
 * device.
 */
2212
static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
{
    int ret;
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!vdev->pci_aer) {
        return;
    }

    if (event_notifier_init(&vdev->err_notifier, 0)) {
A
Alex Williamson 已提交
2224
        error_report("vfio: Unable to init event notifier for error detection");
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
        vdev->pci_aer = false;
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->err_notifier);
    qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);

2243
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2244
    if (ret) {
A
Alex Williamson 已提交
2245
        error_report("vfio: Failed to set up error notification");
2246 2247 2248 2249 2250 2251 2252
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
        event_notifier_cleanup(&vdev->err_notifier);
        vdev->pci_aer = false;
    }
    g_free(irq_set);
}

2253
static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
{
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;
    int ret;

    if (!vdev->pci_aer) {
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;
    *pfd = -1;

2276
    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2277
    if (ret) {
A
Alex Williamson 已提交
2278
        error_report("vfio: Failed to de-assign error fd: %m");
2279 2280 2281 2282 2283 2284 2285
    }
    g_free(irq_set);
    qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
                        NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->err_notifier);
}

2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
static void vfio_req_notifier_handler(void *opaque)
{
    VFIOPCIDevice *vdev = opaque;

    if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
        return;
    }

    qdev_unplug(&vdev->pdev.qdev, NULL);
}

static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
{
    struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
                                      .index = VFIO_PCI_REQ_IRQ_INDEX };
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
        return;
    }

    if (ioctl(vdev->vbasedev.fd,
              VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
        return;
    }

    if (event_notifier_init(&vdev->req_notifier, 0)) {
        error_report("vfio: Unable to init event notifier for device request");
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;

    *pfd = event_notifier_get_fd(&vdev->req_notifier);
    qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);

    if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
        error_report("vfio: Failed to set up device request notification");
        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
        event_notifier_cleanup(&vdev->req_notifier);
    } else {
        vdev->req_enabled = true;
    }

    g_free(irq_set);
}

static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
{
    int argsz;
    struct vfio_irq_set *irq_set;
    int32_t *pfd;

    if (!vdev->req_enabled) {
        return;
    }

    argsz = sizeof(*irq_set) + sizeof(*pfd);

    irq_set = g_malloc0(argsz);
    irq_set->argsz = argsz;
    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
                     VFIO_IRQ_SET_ACTION_TRIGGER;
    irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
    irq_set->start = 0;
    irq_set->count = 1;
    pfd = (int32_t *)&irq_set->data;
    *pfd = -1;

    if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
        error_report("vfio: Failed to de-assign device request fd: %m");
    }
    g_free(irq_set);
    qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
                        NULL, NULL, vdev);
    event_notifier_cleanup(&vdev->req_notifier);

    vdev->req_enabled = false;
}

2377 2378
static int vfio_initfn(PCIDevice *pdev)
{
2379 2380
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
    VFIODevice *vbasedev_iter;
2381
    VFIOGroup *group;
2382
    char *tmp, group_path[PATH_MAX], *group_name;
2383 2384 2385 2386 2387
    ssize_t len;
    struct stat st;
    int groupid;
    int ret;

2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
    if (!vdev->vbasedev.sysfsdev) {
        vdev->vbasedev.sysfsdev =
            g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
                            vdev->host.domain, vdev->host.bus,
                            vdev->host.slot, vdev->host.function);
    }

    if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
        error_report("vfio: error: no such host device: %s",
                     vdev->vbasedev.sysfsdev);
2398 2399 2400
        return -errno;
    }

2401
    vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
2402
    vdev->vbasedev.ops = &vfio_pci_ops;
2403 2404
    vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;

2405 2406 2407
    tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
    len = readlink(tmp, group_path, sizeof(group_path));
    g_free(tmp);
2408

2409
    if (len <= 0 || len >= sizeof(group_path)) {
2410
        error_report("vfio: error no iommu_group for device");
A
Alex Williamson 已提交
2411
        return len < 0 ? -errno : -ENAMETOOLONG;
2412 2413
    }

2414
    group_path[len] = 0;
2415

2416
    group_name = basename(group_path);
2417
    if (sscanf(group_name, "%d", &groupid) != 1) {
2418
        error_report("vfio: error reading %s: %m", group_path);
2419 2420 2421
        return -errno;
    }

2422
    trace_vfio_initfn(vdev->vbasedev.name, groupid);
2423

2424
    group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
2425
    if (!group) {
2426
        error_report("vfio: failed to get group %d", groupid);
2427 2428 2429
        return -ENOENT;
    }

2430 2431
    QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
        if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
2432 2433
            error_report("vfio: error: device %s is already attached",
                         vdev->vbasedev.name);
2434 2435 2436 2437 2438
            vfio_put_group(group);
            return -EBUSY;
        }
    }

2439
    ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev);
2440
    if (ret) {
2441
        error_report("vfio: failed to get device %s", vdev->vbasedev.name);
2442 2443 2444 2445
        vfio_put_group(group);
        return ret;
    }

2446 2447
    ret = vfio_populate_device(vdev);
    if (ret) {
2448
        return ret;
2449 2450
    }

2451
    /* Get a copy of config space */
2452
    ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2453 2454 2455 2456
                MIN(pci_config_size(&vdev->pdev), vdev->config_size),
                vdev->config_offset);
    if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
        ret = ret < 0 ? -errno : -EFAULT;
2457
        error_report("vfio: Failed to read device config space");
2458
        return ret;
2459 2460
    }

2461 2462 2463 2464 2465 2466
    /* vfio emulates a lot for us, but some bits need extra love */
    vdev->emulated_config_bits = g_malloc0(vdev->config_size);

    /* QEMU can choose to expose the ROM or not */
    memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);

A
Alex Williamson 已提交
2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
    /*
     * The PCI spec reserves vendor ID 0xffff as an invalid value.  The
     * device ID is managed by the vendor and need only be a 16-bit value.
     * Allow any 16-bit value for subsystem so they can be hidden or changed.
     */
    if (vdev->vendor_id != PCI_ANY_ID) {
        if (vdev->vendor_id >= 0xffff) {
            error_report("vfio: Invalid PCI vendor ID provided");
            return -EINVAL;
        }
        vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
        trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
    } else {
        vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
    }

    if (vdev->device_id != PCI_ANY_ID) {
        if (vdev->device_id > 0xffff) {
            error_report("vfio: Invalid PCI device ID provided");
            return -EINVAL;
        }
        vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
        trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
    } else {
        vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
    }

    if (vdev->sub_vendor_id != PCI_ANY_ID) {
        if (vdev->sub_vendor_id > 0xffff) {
            error_report("vfio: Invalid PCI subsystem vendor ID provided");
            return -EINVAL;
        }
        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
                               vdev->sub_vendor_id, ~0);
        trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
                                              vdev->sub_vendor_id);
    }

    if (vdev->sub_device_id != PCI_ANY_ID) {
        if (vdev->sub_device_id > 0xffff) {
            error_report("vfio: Invalid PCI subsystem device ID provided");
            return -EINVAL;
        }
        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
        trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
                                              vdev->sub_device_id);
    }
2514

2515 2516 2517 2518
    /* QEMU can change multi-function devices to single function, or reverse */
    vdev->emulated_config_bits[PCI_HEADER_TYPE] =
                                              PCI_HEADER_TYPE_MULTI_FUNCTION;

A
Alex Williamson 已提交
2519 2520 2521 2522 2523 2524 2525
    /* Restore or clear multifunction, this is always controlled by QEMU */
    if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
        vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
    } else {
        vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
    }

2526 2527 2528 2529 2530 2531 2532 2533
    /*
     * Clear host resource mapping info.  If we choose not to register a
     * BAR, such as might be the case with the option ROM, we can get
     * confusing, unwritable, residual addresses from the host here.
     */
    memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
    memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);

2534
    vfio_pci_size_rom(vdev);
2535

2536
    ret = vfio_msix_early_setup(vdev);
2537
    if (ret) {
2538
        return ret;
2539 2540
    }

2541
    vfio_bars_setup(vdev);
2542 2543 2544 2545 2546 2547

    ret = vfio_add_capabilities(vdev);
    if (ret) {
        goto out_teardown;
    }

2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558
    /* QEMU emulates all of MSI & MSIX */
    if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
        memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
               MSIX_CAP_LENGTH);
    }

    if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
        memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
               vdev->msi_cap_size);
    }

2559
    if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
2560
        vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2561
                                                  vfio_intx_mmap_enable, vdev);
2562 2563
        pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
        ret = vfio_intx_enable(vdev);
2564 2565 2566 2567 2568
        if (ret) {
            goto out_teardown;
        }
    }

2569
    vfio_register_err_notifier(vdev);
2570
    vfio_register_req_notifier(vdev);
2571
    vfio_setup_resetfn_quirk(vdev);
A
Alex Williamson 已提交
2572

2573 2574 2575 2576 2577
    return 0;

out_teardown:
    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
    vfio_teardown_msi(vdev);
2578
    vfio_bars_exit(vdev);
2579 2580 2581 2582 2583 2584 2585 2586 2587
    return ret;
}

static void vfio_instance_finalize(Object *obj)
{
    PCIDevice *pci_dev = PCI_DEVICE(obj);
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
    VFIOGroup *group = vdev->vbasedev.group;

2588
    vfio_bars_finalize(vdev);
2589
    g_free(vdev->emulated_config_bits);
2590
    g_free(vdev->rom);
2591 2592 2593 2594 2595 2596
    vfio_put_device(vdev);
    vfio_put_group(group);
}

static void vfio_exitfn(PCIDevice *pdev)
{
2597
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2598

2599
    vfio_unregister_req_notifier(vdev);
2600
    vfio_unregister_err_notifier(vdev);
2601 2602
    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
    vfio_disable_interrupts(vdev);
2603
    if (vdev->intx.mmap_timer) {
2604
        timer_free(vdev->intx.mmap_timer);
2605
    }
2606
    vfio_teardown_msi(vdev);
2607
    vfio_bars_exit(vdev);
2608 2609 2610 2611 2612
}

static void vfio_pci_reset(DeviceState *dev)
{
    PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
2613
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2614

2615
    trace_vfio_pci_reset(vdev->vbasedev.name);
A
Alex Williamson 已提交
2616

2617
    vfio_pci_pre_reset(vdev);
2618

2619 2620 2621 2622
    if (vdev->resetfn && !vdev->resetfn(vdev)) {
        goto post_reset;
    }

2623 2624
    if (vdev->vbasedev.reset_works &&
        (vdev->has_flr || !vdev->has_pm_reset) &&
2625
        !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2626
        trace_vfio_pci_reset_flr(vdev->vbasedev.name);
2627
        goto post_reset;
2628 2629
    }

2630 2631 2632 2633
    /* See if we can do our own bus reset */
    if (!vfio_pci_hot_reset_one(vdev)) {
        goto post_reset;
    }
A
Alex Williamson 已提交
2634

2635
    /* If nothing else works and the device supports PM reset, use it */
2636
    if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
2637
        !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2638
        trace_vfio_pci_reset_pm(vdev->vbasedev.name);
2639
        goto post_reset;
2640
    }
A
Alex Williamson 已提交
2641

2642 2643
post_reset:
    vfio_pci_post_reset(vdev);
2644 2645
}

2646 2647 2648
static void vfio_instance_init(Object *obj)
{
    PCIDevice *pci_dev = PCI_DEVICE(obj);
2649
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
2650 2651 2652 2653 2654 2655

    device_add_bootindex_property(obj, &vdev->bootindex,
                                  "bootindex", NULL,
                                  &pci_dev->qdev, NULL);
}

2656
static Property vfio_pci_dev_properties[] = {
2657
    DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
2658
    DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
2659
    DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
2660
                       intx.mmap_timeout, 1100),
2661
    DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
2662
                    VFIO_FEATURE_ENABLE_VGA_BIT, false),
2663 2664
    DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
                    VFIO_FEATURE_ENABLE_REQ_BIT, true),
2665
    DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
2666 2667 2668
    DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
    DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
    DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
A
Alex Williamson 已提交
2669 2670 2671 2672 2673 2674
    DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
                       sub_vendor_id, PCI_ANY_ID),
    DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
                       sub_device_id, PCI_ANY_ID),
2675 2676
    /*
     * TODO - support passed fds... is this necessary?
2677 2678
     * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
     * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
2679 2680 2681 2682
     */
    DEFINE_PROP_END_OF_LIST(),
};

A
Alex Williamson 已提交
2683 2684 2685 2686
static const VMStateDescription vfio_pci_vmstate = {
    .name = "vfio-pci",
    .unmigratable = 1,
};
2687 2688 2689 2690 2691 2692 2693 2694

static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);

    dc->reset = vfio_pci_reset;
    dc->props = vfio_pci_dev_properties;
A
Alex Williamson 已提交
2695 2696
    dc->vmsd = &vfio_pci_vmstate;
    dc->desc = "VFIO-based PCI device assignment";
2697
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2698 2699 2700 2701
    pdc->init = vfio_initfn;
    pdc->exit = vfio_exitfn;
    pdc->config_read = vfio_pci_read_config;
    pdc->config_write = vfio_pci_write_config;
2702
    pdc->is_express = 1; /* We might be */
2703 2704 2705 2706 2707
}

static const TypeInfo vfio_pci_dev_info = {
    .name = "vfio-pci",
    .parent = TYPE_PCI_DEVICE,
2708
    .instance_size = sizeof(VFIOPCIDevice),
2709
    .class_init = vfio_pci_dev_class_init,
2710
    .instance_init = vfio_instance_init,
2711
    .instance_finalize = vfio_instance_finalize,
2712 2713 2714 2715 2716 2717 2718 2719
};

static void register_vfio_pci_dev_type(void)
{
    type_register_static(&vfio_pci_dev_info);
}

type_init(register_vfio_pci_dev_type)