virtio-pci.c 83.1 KB
Newer Older
P
Paul Brook 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Virtio PCI Bindings
 *
 * Copyright IBM, Corp. 2007
 * Copyright (c) 2009 CodeSourcery
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *  Paul Brook        <paul@codesourcery.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
14 15
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
P
Paul Brook 已提交
16 17
 */

P
Peter Maydell 已提交
18
#include "qemu/osdep.h"
P
Paul Brook 已提交
19

20
#include "standard-headers/linux/virtio_pci.h"
P
Paolo Bonzini 已提交
21 22 23 24 25 26
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-blk.h"
#include "hw/virtio/virtio-net.h"
#include "hw/virtio/virtio-serial.h"
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/virtio-balloon.h"
27
#include "hw/virtio/virtio-input.h"
28
#include "hw/pci/pci.h"
29
#include "qapi/error.h"
30
#include "qemu/error-report.h"
31 32 33
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/loader.h"
34
#include "sysemu/kvm.h"
35
#include "sysemu/block-backend.h"
36
#include "virtio-pci.h"
37
#include "qemu/range.h"
P
Paolo Bonzini 已提交
38
#include "hw/virtio/virtio-bus.h"
39
#include "qapi/visitor.h"
P
Paul Brook 已提交
40

41
#define VIRTIO_PCI_REGION_SIZE(dev)     VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
42

43 44
#undef VIRTIO_PCI_CONFIG

45 46
/* The remaining space is defined by each driver as the per-driver
 * configuration space */
47
#define VIRTIO_PCI_CONFIG_SIZE(dev)     VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
P
Paul Brook 已提交
48

49 50
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
                               VirtIOPCIProxy *dev);
51
static void virtio_pci_reset(DeviceState *qdev);
52

P
Paul Brook 已提交
53
/* virtio device */
54 55 56 57 58
/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
{
    return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
}
P
Paul Brook 已提交
59

60 61 62 63
/* DeviceState to VirtIOPCIProxy. Note: used on datapath,
 * be careful and test performance if you change this.
 */
static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
P
Paul Brook 已提交
64
{
65 66 67 68 69 70
    return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
}

static void virtio_pci_notify(DeviceState *d, uint16_t vector)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
P
Paolo Bonzini 已提交
71

72 73
    if (msix_enabled(&proxy->pci_dev))
        msix_notify(&proxy->pci_dev, vector);
P
Paolo Bonzini 已提交
74 75
    else {
        VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
P
Paolo Bonzini 已提交
76
        pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
P
Paolo Bonzini 已提交
77
    }
P
Paul Brook 已提交
78 79
}

80
static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
81
{
82
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
83 84
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

85 86 87
    pci_device_save(&proxy->pci_dev, f);
    msix_save(&proxy->pci_dev, f);
    if (msix_present(&proxy->pci_dev))
P
Paolo Bonzini 已提交
88
        qemu_put_be16(f, vdev->config_vector);
89 90
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq,
                                               QEMUFile *f)
{
    vq->num = qemu_get_be16(f);
    vq->enabled = qemu_get_be16(f);
    vq->desc[0] = qemu_get_be32(f);
    vq->desc[1] = qemu_get_be32(f);
    vq->avail[0] = qemu_get_be32(f);
    vq->avail[1] = qemu_get_be32(f);
    vq->used[0] = qemu_get_be32(f);
    vq->used[1] = qemu_get_be32(f);
}

static bool virtio_pci_has_extra_state(DeviceState *d)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);

    return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
}

static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
{
    VirtIOPCIProxy *proxy = pv;
    int i;

    proxy->dfselect = qemu_get_be32(f);
    proxy->gfselect = qemu_get_be32(f);
    proxy->guest_features[0] = qemu_get_be32(f);
    proxy->guest_features[1] = qemu_get_be32(f);
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
        virtio_pci_load_modern_queue_state(&proxy->vqs[i], f);
    }

    return 0;
}

static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq,
                                               QEMUFile *f)
{
    qemu_put_be16(f, vq->num);
    qemu_put_be16(f, vq->enabled);
    qemu_put_be32(f, vq->desc[0]);
    qemu_put_be32(f, vq->desc[1]);
    qemu_put_be32(f, vq->avail[0]);
    qemu_put_be32(f, vq->avail[1]);
    qemu_put_be32(f, vq->used[0]);
    qemu_put_be32(f, vq->used[1]);
}

static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
{
    VirtIOPCIProxy *proxy = pv;
    int i;

    qemu_put_be32(f, proxy->dfselect);
    qemu_put_be32(f, proxy->gfselect);
    qemu_put_be32(f, proxy->guest_features[0]);
    qemu_put_be32(f, proxy->guest_features[1]);
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
        virtio_pci_save_modern_queue_state(&proxy->vqs[i], f);
    }
}

static const VMStateInfo vmstate_info_virtio_pci_modern_state = {
    .name = "virtqueue_state",
    .get = get_virtio_pci_modern_state,
    .put = put_virtio_pci_modern_state,
};

static bool virtio_pci_modern_state_needed(void *opaque)
{
    VirtIOPCIProxy *proxy = opaque;

164
    return virtio_pci_modern(proxy);
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
}

static const VMStateDescription vmstate_virtio_pci_modern_state = {
    .name = "virtio_pci/modern_state",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_pci_modern_state_needed,
    .fields = (VMStateField[]) {
        {
            .name         = "modern_state",
            .version_id   = 0,
            .field_exists = NULL,
            .size         = 0,
            .info         = &vmstate_info_virtio_pci_modern_state,
            .flags        = VMS_SINGLE,
            .offset       = 0,
        },
        VMSTATE_END_OF_LIST()
    }
};

static const VMStateDescription vmstate_virtio_pci = {
    .name = "virtio_pci",
    .version_id = 1,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .fields = (VMStateField[]) {
        VMSTATE_END_OF_LIST()
    },
    .subsections = (const VMStateDescription*[]) {
        &vmstate_virtio_pci_modern_state,
        NULL
    }
};

static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);

    vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
}

static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);

    return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
}

214
static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
215
{
216
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
217 218
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

219
    if (msix_present(&proxy->pci_dev))
P
Paolo Bonzini 已提交
220
        qemu_put_be16(f, virtio_queue_vector(vdev, n));
221 222
}

223
static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
224
{
225
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
226 227
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

228 229
    int ret;
    ret = pci_device_load(&proxy->pci_dev, f);
230
    if (ret) {
231
        return ret;
232
    }
233
    msix_unuse_all_vectors(&proxy->pci_dev);
234
    msix_load(&proxy->pci_dev, f);
235
    if (msix_present(&proxy->pci_dev)) {
P
Paolo Bonzini 已提交
236
        qemu_get_be16s(f, &vdev->config_vector);
237
    } else {
P
Paolo Bonzini 已提交
238
        vdev->config_vector = VIRTIO_NO_VECTOR;
239
    }
P
Paolo Bonzini 已提交
240 241
    if (vdev->config_vector != VIRTIO_NO_VECTOR) {
        return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
242
    }
243 244 245
    return 0;
}

246
static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
247
{
248
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
249 250
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

251
    uint16_t vector;
252 253 254 255 256
    if (msix_present(&proxy->pci_dev)) {
        qemu_get_be16s(f, &vector);
    } else {
        vector = VIRTIO_NO_VECTOR;
    }
P
Paolo Bonzini 已提交
257
    virtio_queue_set_vector(vdev, n, vector);
258 259 260
    if (vector != VIRTIO_NO_VECTOR) {
        return msix_vector_use(&proxy->pci_dev, vector);
    }
261

262 263 264
    return 0;
}

265
static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
266 267 268
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);

269
    return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
270 271
}

272 273
#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000

274 275 276 277 278 279
static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
{
    return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
        QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
}

280 281
static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
                                       int n, bool assign)
282
{
283
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
284 285
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtQueue *vq = virtio_get_queue(vdev, n);
286 287
    bool legacy = virtio_pci_legacy(proxy);
    bool modern = virtio_pci_modern(proxy);
288
    bool fast_mmio = kvm_ioeventfd_any_length_enabled();
289
    bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
290
    MemoryRegion *modern_mr = &proxy->notify.mr;
291
    MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
292
    MemoryRegion *legacy_mr = &proxy->bar;
293
    hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
294 295
                         virtio_get_queue_index(vq);
    hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
A
Avi Kivity 已提交
296

297
    if (assign) {
298
        if (modern) {
299 300 301 302 303 304 305
            if (fast_mmio) {
                memory_region_add_eventfd(modern_mr, modern_addr, 0,
                                          false, n, notifier);
            } else {
                memory_region_add_eventfd(modern_mr, modern_addr, 2,
                                          false, n, notifier);
            }
306 307 308 309
            if (modern_pio) {
                memory_region_add_eventfd(modern_notify_mr, 0, 2,
                                              true, n, notifier);
            }
310 311 312 313 314
        }
        if (legacy) {
            memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
                                      true, n, notifier);
        }
315
    } else {
316
        if (modern) {
317 318 319 320 321 322 323
            if (fast_mmio) {
                memory_region_del_eventfd(modern_mr, modern_addr, 0,
                                          false, n, notifier);
            } else {
                memory_region_del_eventfd(modern_mr, modern_addr, 2,
                                          false, n, notifier);
            }
324 325 326 327
            if (modern_pio) {
                memory_region_del_eventfd(modern_notify_mr, 0, 2,
                                          true, n, notifier);
            }
328 329 330 331 332
        }
        if (legacy) {
            memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
                                      true, n, notifier);
        }
333
    }
334
    return 0;
335 336
}

337
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
338
{
339
    virtio_bus_start_ioeventfd(&proxy->bus);
340 341
}

342
static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
343
{
344
    virtio_bus_stop_ioeventfd(&proxy->bus);
345 346
}

P
Paul Brook 已提交
347 348 349
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
    VirtIOPCIProxy *proxy = opaque;
P
Paolo Bonzini 已提交
350
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
A
Avi Kivity 已提交
351
    hwaddr pa;
P
Paul Brook 已提交
352 353 354

    switch (addr) {
    case VIRTIO_PCI_GUEST_FEATURES:
355 356 357 358
        /* Guest does not negotiate properly?  We have to assume nothing. */
        if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
            val = virtio_bus_get_vdev_bad_features(&proxy->bus);
        }
359
        virtio_set_features(vdev, val);
P
Paul Brook 已提交
360 361
        break;
    case VIRTIO_PCI_QUEUE_PFN:
A
Avi Kivity 已提交
362
        pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
363
        if (pa == 0) {
364
            virtio_pci_reset(DEVICE(proxy));
365
        }
366 367
        else
            virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
P
Paul Brook 已提交
368 369
        break;
    case VIRTIO_PCI_QUEUE_SEL:
370
        if (val < VIRTIO_QUEUE_MAX)
P
Paul Brook 已提交
371 372 373
            vdev->queue_sel = val;
        break;
    case VIRTIO_PCI_QUEUE_NOTIFY:
374
        if (val < VIRTIO_QUEUE_MAX) {
375 376
            virtio_queue_notify(vdev, val);
        }
P
Paul Brook 已提交
377 378
        break;
    case VIRTIO_PCI_STATUS:
379 380 381 382
        if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
            virtio_pci_stop_ioeventfd(proxy);
        }

383
        virtio_set_status(vdev, val & 0xFF);
384 385 386 387 388

        if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
            virtio_pci_start_ioeventfd(proxy);
        }

389
        if (vdev->status == 0) {
390
            virtio_pci_reset(DEVICE(proxy));
391
        }
392

393 394 395 396 397 398 399 400 401
        /* Linux before 2.6.34 drives the device without enabling
           the PCI device bus master bit. Enable it automatically
           for the guest. This is a PCI spec violation but so is
           initiating DMA with bus master bit clear. */
        if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
            pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
                                     proxy->pci_dev.config[PCI_COMMAND] |
                                     PCI_COMMAND_MASTER, 1);
        }
P
Paul Brook 已提交
402
        break;
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
    case VIRTIO_MSI_CONFIG_VECTOR:
        msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0)
            val = VIRTIO_NO_VECTOR;
        vdev->config_vector = val;
        break;
    case VIRTIO_MSI_QUEUE_VECTOR:
        msix_vector_unuse(&proxy->pci_dev,
                          virtio_queue_vector(vdev, vdev->queue_sel));
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0)
            val = VIRTIO_NO_VECTOR;
        virtio_queue_set_vector(vdev, vdev->queue_sel, val);
        break;
    default:
419 420
        error_report("%s: unexpected address 0x%x value 0x%x",
                     __func__, addr, val);
421
        break;
P
Paul Brook 已提交
422 423 424
    }
}

425
static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
P
Paul Brook 已提交
426
{
P
Paolo Bonzini 已提交
427
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
P
Paul Brook 已提交
428 429 430 431
    uint32_t ret = 0xFFFFFFFF;

    switch (addr) {
    case VIRTIO_PCI_HOST_FEATURES:
C
Cornelia Huck 已提交
432
        ret = vdev->host_features;
P
Paul Brook 已提交
433 434
        break;
    case VIRTIO_PCI_GUEST_FEATURES:
435
        ret = vdev->guest_features;
P
Paul Brook 已提交
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
        break;
    case VIRTIO_PCI_QUEUE_PFN:
        ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
              >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
        break;
    case VIRTIO_PCI_QUEUE_NUM:
        ret = virtio_queue_get_num(vdev, vdev->queue_sel);
        break;
    case VIRTIO_PCI_QUEUE_SEL:
        ret = vdev->queue_sel;
        break;
    case VIRTIO_PCI_STATUS:
        ret = vdev->status;
        break;
    case VIRTIO_PCI_ISR:
        /* reading from the ISR also clears it. */
P
Paolo Bonzini 已提交
452
        ret = atomic_xchg(&vdev->isr, 0);
453
        pci_irq_deassert(&proxy->pci_dev);
P
Paul Brook 已提交
454
        break;
455 456 457 458 459 460
    case VIRTIO_MSI_CONFIG_VECTOR:
        ret = vdev->config_vector;
        break;
    case VIRTIO_MSI_QUEUE_VECTOR:
        ret = virtio_queue_vector(vdev, vdev->queue_sel);
        break;
P
Paul Brook 已提交
461 462 463 464 465 466 467
    default:
        break;
    }

    return ret;
}

468 469
static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
                                       unsigned size)
P
Paul Brook 已提交
470 471
{
    VirtIOPCIProxy *proxy = opaque;
P
Paolo Bonzini 已提交
472
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
473
    uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
474
    uint64_t val = 0;
475
    if (addr < config) {
476
        return virtio_ioport_read(proxy, addr);
477 478
    }
    addr -= config;
P
Paul Brook 已提交
479

480 481
    switch (size) {
    case 1:
P
Paolo Bonzini 已提交
482
        val = virtio_config_readb(vdev, addr);
483 484
        break;
    case 2:
P
Paolo Bonzini 已提交
485
        val = virtio_config_readw(vdev, addr);
486
        if (virtio_is_big_endian(vdev)) {
487 488
            val = bswap16(val);
        }
489 490
        break;
    case 4:
P
Paolo Bonzini 已提交
491
        val = virtio_config_readl(vdev, addr);
492
        if (virtio_is_big_endian(vdev)) {
493 494
            val = bswap32(val);
        }
495
        break;
496
    }
497
    return val;
P
Paul Brook 已提交
498 499
}

500 501
static void virtio_pci_config_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
P
Paul Brook 已提交
502 503
{
    VirtIOPCIProxy *proxy = opaque;
504
    uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
P
Paolo Bonzini 已提交
505
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
506 507 508 509 510
    if (addr < config) {
        virtio_ioport_write(proxy, addr, val);
        return;
    }
    addr -= config;
511 512 513 514 515 516
    /*
     * Virtio-PCI is odd. Ioports are LE but config space is target native
     * endian.
     */
    switch (size) {
    case 1:
P
Paolo Bonzini 已提交
517
        virtio_config_writeb(vdev, addr, val);
518 519
        break;
    case 2:
520
        if (virtio_is_big_endian(vdev)) {
521 522
            val = bswap16(val);
        }
P
Paolo Bonzini 已提交
523
        virtio_config_writew(vdev, addr, val);
524 525
        break;
    case 4:
526
        if (virtio_is_big_endian(vdev)) {
527 528
            val = bswap32(val);
        }
P
Paolo Bonzini 已提交
529
        virtio_config_writel(vdev, addr, val);
530
        break;
531
    }
P
Paul Brook 已提交
532 533
}

A
Avi Kivity 已提交
534
static const MemoryRegionOps virtio_pci_config_ops = {
535 536 537 538 539 540
    .read = virtio_pci_config_read,
    .write = virtio_pci_config_write,
    .impl = {
        .min_access_size = 1,
        .max_access_size = 4,
    },
541
    .endianness = DEVICE_LITTLE_ENDIAN,
A
Avi Kivity 已提交
542
};
543

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
/* Below are generic functions to do memcpy from/to an address space,
 * without byteswaps, with input validation.
 *
 * As regular address_space_* APIs all do some kind of byteswap at least for
 * some host/target combinations, we are forced to explicitly convert to a
 * known-endianness integer value.
 * It doesn't really matter which endian format to go through, so the code
 * below selects the endian that causes the least amount of work on the given
 * host.
 *
 * Note: host pointer must be aligned.
 */
static
void virtio_address_space_write(AddressSpace *as, hwaddr addr,
                                const uint8_t *buf, int len)
{
    uint32_t val;

    /* address_space_* APIs assume an aligned address.
     * As address is under guest control, handle illegal values.
     */
    addr &= ~(len - 1);

    /* Make sure caller aligned buf properly */
    assert(!(((uintptr_t)buf) & (len - 1)));

    switch (len) {
    case 1:
        val = pci_get_byte(buf);
        address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
        break;
    case 2:
        val = pci_get_word(buf);
        address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
        break;
    case 4:
        val = pci_get_long(buf);
        address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
        break;
    default:
        /* As length is under guest control, handle illegal values. */
        break;
    }
}

static void
virtio_address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
{
    uint32_t val;

    /* address_space_* APIs assume an aligned address.
     * As address is under guest control, handle illegal values.
     */
    addr &= ~(len - 1);

    /* Make sure caller aligned buf properly */
    assert(!(((uintptr_t)buf) & (len - 1)));

    switch (len) {
    case 1:
        val = address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
        pci_set_byte(buf, val);
        break;
    case 2:
        val = address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
        pci_set_word(buf, val);
        break;
    case 4:
        val = address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
        pci_set_long(buf, val);
        break;
    default:
        /* As length is under guest control, handle illegal values. */
        break;
    }
}

621 622 623
static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
                                uint32_t val, int len)
{
624
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
P
Paolo Bonzini 已提交
625
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
626
    struct virtio_pci_cfg_cap *cfg;
627

628 629 630
    pci_default_write_config(pci_dev, address, val, len);

    if (range_covers_byte(address, len, PCI_COMMAND) &&
631
        !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
632
        virtio_pci_stop_ioeventfd(proxy);
633
        virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
634
    }
635 636 637 638 639 640 641 642 643 644 645 646

    if (proxy->config_cap &&
        ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
                                                                  pci_cfg_data),
                       sizeof cfg->pci_cfg_data)) {
        uint32_t off;
        uint32_t len;

        cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
        off = le32_to_cpu(cfg->cap.offset);
        len = le32_to_cpu(cfg->cap.length);

647 648
        if (len == 1 || len == 2 || len == 4) {
            assert(len <= sizeof cfg->pci_cfg_data);
649 650
            virtio_address_space_write(&proxy->modern_as, off,
                                       cfg->pci_cfg_data, len);
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
        }
    }
}

static uint32_t virtio_read_config(PCIDevice *pci_dev,
                                   uint32_t address, int len)
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
    struct virtio_pci_cfg_cap *cfg;

    if (proxy->config_cap &&
        ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
                                                                  pci_cfg_data),
                       sizeof cfg->pci_cfg_data)) {
        uint32_t off;
        uint32_t len;

        cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
        off = le32_to_cpu(cfg->cap.offset);
        len = le32_to_cpu(cfg->cap.length);

672 673
        if (len == 1 || len == 2 || len == 4) {
            assert(len <= sizeof cfg->pci_cfg_data);
674 675
            virtio_address_space_read(&proxy->modern_as, off,
                                      cfg->pci_cfg_data, len);
676 677 678 679
        }
    }

    return pci_default_read_config(pci_dev, address, len);
P
Paul Brook 已提交
680 681
}

682 683
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
                                        unsigned int queue_no,
684
                                        unsigned int vector)
685 686
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
687
    int ret;
688 689

    if (irqfd->users == 0) {
690
        ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
691 692 693 694 695 696 697 698 699 700 701
        if (ret < 0) {
            return ret;
        }
        irqfd->virq = ret;
    }
    irqfd->users++;
    return 0;
}

static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
                                             unsigned int vector)
702 703 704 705 706 707 708
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
    if (--irqfd->users == 0) {
        kvm_irqchip_release_virq(kvm_state, irqfd->virq);
    }
}

709 710 711 712 713
static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
                                 unsigned int queue_no,
                                 unsigned int vector)
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
P
Paolo Bonzini 已提交
714 715
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
716
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
717
    return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
718 719 720 721 722
}

static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
                                      unsigned int queue_no,
                                      unsigned int vector)
723
{
P
Paolo Bonzini 已提交
724 725
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
726
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
727
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
728
    int ret;
729

730
    ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
731
    assert(ret == 0);
732
}
733

734 735 736
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
{
    PCIDevice *dev = &proxy->pci_dev;
P
Paolo Bonzini 已提交
737
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
738
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
739 740 741 742 743 744 745 746 747 748 749
    unsigned int vector;
    int ret, queue_no;

    for (queue_no = 0; queue_no < nvqs; queue_no++) {
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
750
        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
751 752
        if (ret < 0) {
            goto undo;
753
        }
754 755 756
        /* If guest supports masking, set up irqfd now.
         * Otherwise, delay until unmasked in the frontend.
         */
757
        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
758 759 760 761 762 763
            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
            if (ret < 0) {
                kvm_virtio_pci_vq_vector_release(proxy, vector);
                goto undo;
            }
        }
764 765
    }
    return 0;
766 767 768 769 770 771 772

undo:
    while (--queue_no >= 0) {
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
773
        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
774
            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
775
        }
776 777 778
        kvm_virtio_pci_vq_vector_release(proxy, vector);
    }
    return ret;
779 780
}

781 782 783
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
{
    PCIDevice *dev = &proxy->pci_dev;
P
Paolo Bonzini 已提交
784
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
785 786
    unsigned int vector;
    int queue_no;
787
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
788 789 790 791 792 793 794 795 796

    for (queue_no = 0; queue_no < nvqs; queue_no++) {
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
797 798 799
        /* If guest supports masking, clean up irqfd now.
         * Otherwise, it was cleaned when masked in the frontend.
         */
800
        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
801
            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
802
        }
803 804 805 806
        kvm_virtio_pci_vq_vector_release(proxy, vector);
    }
}

807 808 809 810
static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
                                       unsigned int queue_no,
                                       unsigned int vector,
                                       MSIMessage msg)
811
{
P
Paolo Bonzini 已提交
812 813 814
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
815
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
816
    VirtIOIRQFD *irqfd;
817
    int ret = 0;
818

819 820 821
    if (proxy->vector_irqfd) {
        irqfd = &proxy->vector_irqfd[vector];
        if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
822 823
            ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
                                               &proxy->pci_dev);
824 825 826
            if (ret < 0) {
                return ret;
            }
827
            kvm_irqchip_commit_routes(kvm_state);
828 829 830
        }
    }

831 832 833
    /* If guest supports masking, irqfd is already setup, unmask it.
     * Otherwise, set it up now.
     */
834
    if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
P
Paolo Bonzini 已提交
835
        k->guest_notifier_mask(vdev, queue_no, false);
836
        /* Test after unmasking to avoid losing events. */
837
        if (k->guest_notifier_pending &&
P
Paolo Bonzini 已提交
838
            k->guest_notifier_pending(vdev, queue_no)) {
839 840 841 842
            event_notifier_set(n);
        }
    } else {
        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
843
    }
844
    return ret;
845 846
}

847
static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
848 849 850
                                             unsigned int queue_no,
                                             unsigned int vector)
{
P
Paolo Bonzini 已提交
851 852
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
853

854 855 856
    /* If guest supports masking, keep irqfd but mask it.
     * Otherwise, clean it up now.
     */ 
857
    if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
P
Paolo Bonzini 已提交
858
        k->guest_notifier_mask(vdev, queue_no, true);
859
    } else {
860
        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
861
    }
862 863
}

864 865
static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
                                    MSIMessage msg)
866 867
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
P
Paolo Bonzini 已提交
868
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
869 870
    VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
    int ret, index, unmasked = 0;
871

872 873 874
    while (vq) {
        index = virtio_get_queue_index(vq);
        if (!virtio_queue_get_num(vdev, index)) {
875 876
            break;
        }
877 878 879 880 881 882
        if (index < proxy->nvqs_with_notifiers) {
            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
            if (ret < 0) {
                goto undo;
            }
            ++unmasked;
883
        }
884
        vq = virtio_vector_next_queue(vq);
885
    }
886

887 888 889
    return 0;

undo:
890
    vq = virtio_vector_first_queue(vdev, vector);
891
    while (vq && unmasked >= 0) {
892
        index = virtio_get_queue_index(vq);
893 894 895 896
        if (index < proxy->nvqs_with_notifiers) {
            virtio_pci_vq_vector_mask(proxy, index, vector);
            --unmasked;
        }
897
        vq = virtio_vector_next_queue(vq);
898 899 900 901
    }
    return ret;
}

902
static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
903 904
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
P
Paolo Bonzini 已提交
905
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
906 907
    VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
    int index;
908

909 910 911
    while (vq) {
        index = virtio_get_queue_index(vq);
        if (!virtio_queue_get_num(vdev, index)) {
912 913
            break;
        }
914 915 916
        if (index < proxy->nvqs_with_notifiers) {
            virtio_pci_vq_vector_mask(proxy, index, vector);
        }
917
        vq = virtio_vector_next_queue(vq);
918 919 920
    }
}

921 922 923
static void virtio_pci_vector_poll(PCIDevice *dev,
                                   unsigned int vector_start,
                                   unsigned int vector_end)
924 925
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
P
Paolo Bonzini 已提交
926
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
927
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
928 929 930 931 932
    int queue_no;
    unsigned int vector;
    EventNotifier *notifier;
    VirtQueue *vq;

933
    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
934 935 936 937 938 939 940 941 942 943
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector < vector_start || vector >= vector_end ||
            !msix_is_masked(dev, vector)) {
            continue;
        }
        vq = virtio_get_queue(vdev, queue_no);
        notifier = virtio_queue_get_guest_notifier(vq);
944 945
        if (k->guest_notifier_pending) {
            if (k->guest_notifier_pending(vdev, queue_no)) {
946 947 948
                msix_set_pending(dev, vector);
            }
        } else if (event_notifier_test_and_clear(notifier)) {
949 950 951 952 953 954 955
            msix_set_pending(dev, vector);
        }
    }
}

static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
                                         bool with_irqfd)
956
{
957
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
958 959 960
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
    VirtQueue *vq = virtio_get_queue(vdev, n);
961 962 963 964 965 966 967
    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);

    if (assign) {
        int r = event_notifier_init(notifier, 0);
        if (r < 0) {
            return r;
        }
968
        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
969
    } else {
970
        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
971 972 973
        event_notifier_cleanup(notifier);
    }

974 975 976
    if (!msix_enabled(&proxy->pci_dev) &&
        vdev->use_guest_notifier_mask &&
        vdc->guest_notifier_mask) {
P
Paolo Bonzini 已提交
977
        vdc->guest_notifier_mask(vdev, n, !assign);
978 979
    }

980 981 982
    return 0;
}

983
static bool virtio_pci_query_guest_notifiers(DeviceState *d)
984
{
985
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
986 987 988
    return msix_enabled(&proxy->pci_dev);
}

989
static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
990
{
991
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
992
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
993
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
994
    int r, n;
995 996
    bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
        kvm_msi_via_irqfd_enabled();
997

998
    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
999 1000 1001 1002 1003 1004 1005 1006

    /* When deassigning, pass a consistent nvqs value
     * to avoid leaking notifiers.
     */
    assert(assign || nvqs == proxy->nvqs_with_notifiers);

    proxy->nvqs_with_notifiers = nvqs;

1007
    /* Must unset vector notifier while guest notifier is still assigned */
1008
    if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
1009
        msix_unset_vector_notifiers(&proxy->pci_dev);
1010 1011 1012 1013 1014
        if (proxy->vector_irqfd) {
            kvm_virtio_pci_vector_release(proxy, nvqs);
            g_free(proxy->vector_irqfd);
            proxy->vector_irqfd = NULL;
        }
1015 1016
    }

1017
    for (n = 0; n < nvqs; n++) {
1018 1019 1020 1021
        if (!virtio_queue_get_num(vdev, n)) {
            break;
        }

1022
        r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
1023 1024 1025 1026 1027
        if (r < 0) {
            goto assign_error;
        }
    }

1028
    /* Must set vector notifier after guest notifier has been assigned */
1029
    if ((with_irqfd || k->guest_notifier_mask) && assign) {
1030 1031 1032 1033 1034 1035 1036 1037
        if (with_irqfd) {
            proxy->vector_irqfd =
                g_malloc0(sizeof(*proxy->vector_irqfd) *
                          msix_nr_vectors_allocated(&proxy->pci_dev));
            r = kvm_virtio_pci_vector_use(proxy, nvqs);
            if (r < 0) {
                goto assign_error;
            }
1038
        }
1039
        r = msix_set_vector_notifiers(&proxy->pci_dev,
1040 1041 1042
                                      virtio_pci_vector_unmask,
                                      virtio_pci_vector_mask,
                                      virtio_pci_vector_poll);
1043
        if (r < 0) {
1044
            goto notifiers_error;
1045 1046 1047
        }
    }

1048 1049
    return 0;

1050
notifiers_error:
1051 1052 1053 1054
    if (with_irqfd) {
        assert(assign);
        kvm_virtio_pci_vector_release(proxy, nvqs);
    }
1055

1056 1057
assign_error:
    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1058
    assert(assign);
1059
    while (--n >= 0) {
1060
        virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1061 1062 1063 1064
    }
    return r;
}

1065
static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1066
{
1067
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
1068
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1069 1070

    if (running) {
1071 1072 1073 1074 1075
        /* Old QEMU versions did not set bus master enable on status write.
         * Detect DRIVER set and enable it.
         */
        if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
            (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1076
            !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1077 1078 1079
            pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
                                     proxy->pci_dev.config[PCI_COMMAND] |
                                     PCI_COMMAND_MASTER, 1);
1080
        }
1081
        virtio_pci_start_ioeventfd(proxy);
1082
    } else {
1083
        virtio_pci_stop_ioeventfd(proxy);
1084 1085 1086
    }
}

1087
#ifdef CONFIG_VIRTFS
1088
static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1089
{
1090 1091
    V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
1092

1093
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1094
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1095 1096
}

1097 1098 1099
static Property virtio_9p_pci_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1100 1101 1102 1103
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
    DEFINE_PROP_END_OF_LIST(),
};

1104
static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
1105 1106
{
    DeviceClass *dc = DEVICE_CLASS(klass);
1107 1108
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1109

1110
    k->realize = virtio_9p_pci_realize;
1111 1112 1113 1114
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = 0x2;
1115
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1116
    dc->props = virtio_9p_pci_properties;
1117 1118
}

1119 1120 1121
static void virtio_9p_pci_instance_init(Object *obj)
{
    V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
1122 1123 1124

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_9P);
1125 1126 1127 1128 1129 1130 1131 1132
}

static const TypeInfo virtio_9p_pci_info = {
    .name          = TYPE_VIRTIO_9P_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(V9fsPCIState),
    .instance_init = virtio_9p_pci_instance_init,
    .class_init    = virtio_9p_pci_class_init,
1133
};
1134
#endif /* CONFIG_VIRTFS */
1135

1136 1137 1138 1139
/*
 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
 */

1140 1141 1142 1143 1144 1145 1146
static int virtio_pci_query_nvectors(DeviceState *d)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);

    return proxy->nvectors;
}

J
Jason Wang 已提交
1147 1148 1149 1150 1151 1152 1153 1154
static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
    PCIDevice *dev = &proxy->pci_dev;

    return pci_get_address_space(dev);
}

1155
static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
                                   struct virtio_pci_cap *cap)
{
    PCIDevice *dev = &proxy->pci_dev;
    int offset;

    offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, cap->cap_len);
    assert(offset > 0);

    assert(cap->cap_len >= sizeof *cap);
    memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
           cap->cap_len - PCI_CAP_FLAGS);
1167 1168

    return offset;
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
}

static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
                                       unsigned size)
{
    VirtIOPCIProxy *proxy = opaque;
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    uint32_t val = 0;
    int i;

    switch (addr) {
    case VIRTIO_PCI_COMMON_DFSELECT:
        val = proxy->dfselect;
        break;
    case VIRTIO_PCI_COMMON_DF:
        if (proxy->dfselect <= 1) {
1185 1186 1187
            VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);

            val = (vdev->host_features & ~vdc->legacy_features) >>
1188
                (32 * proxy->dfselect);
1189 1190 1191 1192 1193 1194
        }
        break;
    case VIRTIO_PCI_COMMON_GFSELECT:
        val = proxy->gfselect;
        break;
    case VIRTIO_PCI_COMMON_GF:
G
Gonglei 已提交
1195
        if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
            val = proxy->guest_features[proxy->gfselect];
        }
        break;
    case VIRTIO_PCI_COMMON_MSIX:
        val = vdev->config_vector;
        break;
    case VIRTIO_PCI_COMMON_NUMQ:
        for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
            if (virtio_queue_get_num(vdev, i)) {
                val = i + 1;
            }
        }
        break;
    case VIRTIO_PCI_COMMON_STATUS:
        val = vdev->status;
        break;
    case VIRTIO_PCI_COMMON_CFGGENERATION:
1213
        val = vdev->generation;
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
        break;
    case VIRTIO_PCI_COMMON_Q_SELECT:
        val = vdev->queue_sel;
        break;
    case VIRTIO_PCI_COMMON_Q_SIZE:
        val = virtio_queue_get_num(vdev, vdev->queue_sel);
        break;
    case VIRTIO_PCI_COMMON_Q_MSIX:
        val = virtio_queue_vector(vdev, vdev->queue_sel);
        break;
    case VIRTIO_PCI_COMMON_Q_ENABLE:
        val = proxy->vqs[vdev->queue_sel].enabled;
        break;
    case VIRTIO_PCI_COMMON_Q_NOFF:
        /* Simply map queues in order */
        val = vdev->queue_sel;
        break;
    case VIRTIO_PCI_COMMON_Q_DESCLO:
        val = proxy->vqs[vdev->queue_sel].desc[0];
        break;
    case VIRTIO_PCI_COMMON_Q_DESCHI:
        val = proxy->vqs[vdev->queue_sel].desc[1];
        break;
    case VIRTIO_PCI_COMMON_Q_AVAILLO:
        val = proxy->vqs[vdev->queue_sel].avail[0];
        break;
    case VIRTIO_PCI_COMMON_Q_AVAILHI:
        val = proxy->vqs[vdev->queue_sel].avail[1];
        break;
    case VIRTIO_PCI_COMMON_Q_USEDLO:
        val = proxy->vqs[vdev->queue_sel].used[0];
        break;
    case VIRTIO_PCI_COMMON_Q_USEDHI:
        val = proxy->vqs[vdev->queue_sel].used[1];
        break;
    default:
        val = 0;
    }

    return val;
}

static void virtio_pci_common_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
{
    VirtIOPCIProxy *proxy = opaque;
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

    switch (addr) {
    case VIRTIO_PCI_COMMON_DFSELECT:
        proxy->dfselect = val;
        break;
    case VIRTIO_PCI_COMMON_GFSELECT:
        proxy->gfselect = val;
        break;
    case VIRTIO_PCI_COMMON_GF:
G
Gonglei 已提交
1270
        if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
            proxy->guest_features[proxy->gfselect] = val;
            virtio_set_features(vdev,
                                (((uint64_t)proxy->guest_features[1]) << 32) |
                                proxy->guest_features[0]);
        }
        break;
    case VIRTIO_PCI_COMMON_MSIX:
        msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0) {
            val = VIRTIO_NO_VECTOR;
        }
        vdev->config_vector = val;
        break;
    case VIRTIO_PCI_COMMON_STATUS:
        if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
            virtio_pci_stop_ioeventfd(proxy);
        }

        virtio_set_status(vdev, val & 0xFF);

        if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
            virtio_pci_start_ioeventfd(proxy);
        }

        if (vdev->status == 0) {
1297
            virtio_pci_reset(DEVICE(proxy));
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
        }

        break;
    case VIRTIO_PCI_COMMON_Q_SELECT:
        if (val < VIRTIO_QUEUE_MAX) {
            vdev->queue_sel = val;
        }
        break;
    case VIRTIO_PCI_COMMON_Q_SIZE:
        proxy->vqs[vdev->queue_sel].num = val;
        break;
    case VIRTIO_PCI_COMMON_Q_MSIX:
        msix_vector_unuse(&proxy->pci_dev,
                          virtio_queue_vector(vdev, vdev->queue_sel));
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0) {
            val = VIRTIO_NO_VECTOR;
        }
        virtio_queue_set_vector(vdev, vdev->queue_sel, val);
        break;
    case VIRTIO_PCI_COMMON_Q_ENABLE:
        virtio_queue_set_num(vdev, vdev->queue_sel,
                             proxy->vqs[vdev->queue_sel].num);
        virtio_queue_set_rings(vdev, vdev->queue_sel,
                       ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
                       proxy->vqs[vdev->queue_sel].desc[0],
                       ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
                       proxy->vqs[vdev->queue_sel].avail[0],
                       ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
                       proxy->vqs[vdev->queue_sel].used[0]);
J
Jason Wang 已提交
1328
        proxy->vqs[vdev->queue_sel].enabled = 1;
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
        break;
    case VIRTIO_PCI_COMMON_Q_DESCLO:
        proxy->vqs[vdev->queue_sel].desc[0] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_DESCHI:
        proxy->vqs[vdev->queue_sel].desc[1] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_AVAILLO:
        proxy->vqs[vdev->queue_sel].avail[0] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_AVAILHI:
        proxy->vqs[vdev->queue_sel].avail[1] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_USEDLO:
        proxy->vqs[vdev->queue_sel].used[0] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_USEDHI:
        proxy->vqs[vdev->queue_sel].used[1] = val;
        break;
    default:
        break;
    }
}


static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
                                       unsigned size)
{
    return 0;
}

static void virtio_pci_notify_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
{
    VirtIODevice *vdev = opaque;
1364 1365
    VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent);
    unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
1366 1367 1368 1369 1370 1371

    if (queue < VIRTIO_QUEUE_MAX) {
        virtio_queue_notify(vdev, queue);
    }
}

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
                                        uint64_t val, unsigned size)
{
    VirtIODevice *vdev = opaque;
    unsigned queue = val;

    if (queue < VIRTIO_QUEUE_MAX) {
        virtio_queue_notify(vdev, queue);
    }
}

1383 1384 1385 1386 1387
static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
                                    unsigned size)
{
    VirtIOPCIProxy *proxy = opaque;
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
P
Paolo Bonzini 已提交
1388
    uint64_t val = atomic_xchg(&vdev->isr, 0);
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
    pci_irq_deassert(&proxy->pci_dev);

    return val;
}

static void virtio_pci_isr_write(void *opaque, hwaddr addr,
                                 uint64_t val, unsigned size)
{
}

static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
                                       unsigned size)
{
    VirtIODevice *vdev = opaque;
    uint64_t val = 0;

    switch (size) {
    case 1:
1407
        val = virtio_config_modern_readb(vdev, addr);
1408 1409
        break;
    case 2:
1410
        val = virtio_config_modern_readw(vdev, addr);
1411 1412
        break;
    case 4:
1413
        val = virtio_config_modern_readl(vdev, addr);
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
        break;
    }
    return val;
}

static void virtio_pci_device_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
{
    VirtIODevice *vdev = opaque;
    switch (size) {
    case 1:
1425
        virtio_config_modern_writeb(vdev, addr, val);
1426 1427
        break;
    case 2:
1428
        virtio_config_modern_writew(vdev, addr, val);
1429 1430
        break;
    case 4:
1431
        virtio_config_modern_writel(vdev, addr, val);
1432 1433 1434 1435
        break;
    }
}

1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
{
    static const MemoryRegionOps common_ops = {
        .read = virtio_pci_common_read,
        .write = virtio_pci_common_write,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };
    static const MemoryRegionOps isr_ops = {
        .read = virtio_pci_isr_read,
        .write = virtio_pci_isr_write,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };
    static const MemoryRegionOps device_ops = {
        .read = virtio_pci_device_read,
        .write = virtio_pci_device_write,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };
    static const MemoryRegionOps notify_ops = {
        .read = virtio_pci_notify_read,
        .write = virtio_pci_notify_write,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
    static const MemoryRegionOps notify_pio_ops = {
        .read = virtio_pci_notify_read,
        .write = virtio_pci_notify_write_pio,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };

1484 1485 1486 1487

    memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
                          &common_ops,
                          proxy,
1488 1489
                          "virtio-pci-common",
                          proxy->common.size);
1490

1491 1492 1493
    memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
                          &isr_ops,
                          proxy,
1494 1495
                          "virtio-pci-isr",
                          proxy->isr.size);
1496

1497 1498 1499
    memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
                          &device_ops,
                          virtio_bus_get_device(&proxy->bus),
1500 1501
                          "virtio-pci-device",
                          proxy->device.size);
1502

1503 1504 1505 1506
    memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
                          &notify_ops,
                          virtio_bus_get_device(&proxy->bus),
                          "virtio-pci-notify",
1507
                          proxy->notify.size);
1508 1509 1510 1511 1512

    memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
                          &notify_pio_ops,
                          virtio_bus_get_device(&proxy->bus),
                          "virtio-pci-notify-pio",
1513
                          proxy->notify_pio.size);
1514 1515 1516
}

static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1517
                                         VirtIOPCIRegion *region,
1518 1519 1520
                                         struct virtio_pci_cap *cap,
                                         MemoryRegion *mr,
                                         uint8_t bar)
1521
{
1522
    memory_region_add_subregion(mr, region->offset, &region->mr);
1523

1524
    cap->cfg_type = region->type;
1525
    cap->bar = bar;
1526
    cap->offset = cpu_to_le32(region->offset);
1527
    cap->length = cpu_to_le32(region->size);
1528
    virtio_pci_add_mem_cap(proxy, cap);
1529 1530 1531 1532 1533 1534 1535 1536

}

static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
                                             VirtIOPCIRegion *region,
                                             struct virtio_pci_cap *cap)
{
    virtio_pci_modern_region_map(proxy, region, cap,
1537
                                 &proxy->modern_bar, proxy->modern_mem_bar_idx);
1538
}
1539

1540 1541 1542 1543 1544
static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
                                            VirtIOPCIRegion *region,
                                            struct virtio_pci_cap *cap)
{
    virtio_pci_modern_region_map(proxy, region, cap,
1545
                                 &proxy->io_bar, proxy->modern_io_bar_idx);
1546 1547 1548 1549
}

static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
                                               VirtIOPCIRegion *region)
1550 1551 1552 1553 1554
{
    memory_region_del_subregion(&proxy->modern_bar,
                                &region->mr);
}

1555 1556 1557 1558 1559 1560 1561
static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
                                              VirtIOPCIRegion *region)
{
    memory_region_del_subregion(&proxy->io_bar,
                                &region->mr);
}

1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

    if (virtio_pci_modern(proxy)) {
        virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
    }

    virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
}

1574
/* This is called by virtio-bus just after the device is plugged. */
J
Jason Wang 已提交
1575
static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1576 1577 1578
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
    VirtioBusState *bus = &proxy->bus;
1579
    bool legacy = virtio_pci_legacy(proxy);
1580
    bool modern;
1581
    bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1582 1583
    uint8_t *config;
    uint32_t size;
C
Cornelia Huck 已提交
1584
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1585

1586 1587 1588 1589
    /*
     * Virtio capabilities present without
     * VIRTIO_F_VERSION_1 confuses guests
     */
1590 1591
    if (!proxy->ignore_backend_features &&
            !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
        virtio_pci_disable_modern(proxy);

        if (!legacy) {
            error_setg(errp, "Device doesn't support modern mode, and legacy"
                             " mode is disabled");
            error_append_hint(errp, "Set disable-legacy to off\n");

            return;
        }
    }

    modern = virtio_pci_modern(proxy);

1605 1606 1607 1608
    config = proxy->pci_dev.config;
    if (proxy->class_code) {
        pci_config_set_class(config, proxy->class_code);
    }
1609 1610

    if (legacy) {
J
Jason Wang 已提交
1611 1612 1613 1614 1615
        if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
            error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
                       "neither legacy nor transitional device.");
            return ;
        }
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
        /* legacy and transitional */
        pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
                     pci_get_word(config + PCI_VENDOR_ID));
        pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
    } else {
        /* pure virtio-1.0 */
        pci_set_word(config + PCI_VENDOR_ID,
                     PCI_VENDOR_ID_REDHAT_QUMRANET);
        pci_set_word(config + PCI_DEVICE_ID,
                     0x1040 + virtio_bus_get_vdev_id(bus));
        pci_config_set_revision(config, 1);
    }
1628 1629
    config[PCI_INTERRUPT_PIN] = 1;

1630

1631
    if (modern) {
1632 1633
        struct virtio_pci_cap cap = {
            .cap_len = sizeof cap,
1634 1635 1636 1637
        };
        struct virtio_pci_notify_cap notify = {
            .cap.cap_len = sizeof notify,
            .notify_off_multiplier =
1638
                cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
1639
        };
1640 1641 1642 1643
        struct virtio_pci_cfg_cap cfg = {
            .cap.cap_len = sizeof cfg,
            .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
        };
1644 1645 1646 1647
        struct virtio_pci_notify_cap notify_pio = {
            .cap.cap_len = sizeof notify,
            .notify_off_multiplier = cpu_to_le32(0x0),
        };
1648

1649
        struct virtio_pci_cfg_cap *cfg_mask;
1650

1651
        virtio_pci_modern_regions_init(proxy);
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661

        virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
        virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
        virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
        virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);

        if (modern_pio) {
            memory_region_init(&proxy->io_bar, OBJECT(proxy),
                               "virtio-pci-io", 0x4);

1662
            pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
1663 1664 1665 1666 1667
                             PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);

            virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
                                            &notify_pio.cap);
        }
1668

1669
        pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
1670 1671 1672
                         PCI_BASE_ADDRESS_SPACE_MEMORY |
                         PCI_BASE_ADDRESS_MEM_PREFETCH |
                         PCI_BASE_ADDRESS_MEM_TYPE_64,
1673
                         &proxy->modern_bar);
1674 1675 1676 1677 1678 1679 1680

        proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
        cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
        pci_set_byte(&cfg_mask->cap.bar, ~0x0);
        pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
        pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
        pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1681 1682
    }

1683 1684
    if (proxy->nvectors) {
        int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1685
                                          proxy->msix_bar_idx);
1686 1687 1688 1689 1690 1691 1692 1693
        if (err) {
            /* Notice when a system that supports MSIx can't initialize it.  */
            if (err != -ENOTSUP) {
                error_report("unable to init msix vectors to %" PRIu32,
                             proxy->nvectors);
            }
            proxy->nvectors = 0;
        }
1694 1695 1696
    }

    proxy->pci_dev.config_write = virtio_write_config;
1697
    proxy->pci_dev.config_read = virtio_read_config;
1698

1699 1700 1701
    if (legacy) {
        size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
            + virtio_bus_get_vdev_config_len(bus);
1702
        size = pow2ceil(size);
1703

1704 1705 1706
        memory_region_init_io(&proxy->bar, OBJECT(proxy),
                              &virtio_pci_config_ops,
                              proxy, "virtio-pci", size);
1707

1708
        pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
1709
                         PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1710
    }
1711 1712
}

1713 1714 1715
static void virtio_pci_device_unplugged(DeviceState *d)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1716
    bool modern = virtio_pci_modern(proxy);
1717
    bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1718 1719

    virtio_pci_stop_ioeventfd(proxy);
1720 1721

    if (modern) {
1722 1723 1724 1725 1726 1727 1728
        virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
        virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
        virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
        virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
        if (modern_pio) {
            virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
        }
1729
    }
1730 1731
}

1732
static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1733
{
1734
    VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1735
    VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1736 1737
    bool pcie_port = pci_bus_is_express(pci_dev->bus) &&
                     !pci_bus_is_root(pci_dev->bus);
1738

1739 1740 1741 1742
    if (!kvm_has_many_ioeventfds()) {
        proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
    }

1743 1744 1745 1746 1747 1748 1749 1750 1751
    /*
     * virtio pci bar layout used by default.
     * subclasses can re-arrange things if needed.
     *
     *   region 0   --  virtio legacy io bar
     *   region 1   --  msi-x bar
     *   region 4+5 --  virtio modern memory (64bit) bar
     *
     */
1752 1753 1754 1755
    proxy->legacy_io_bar_idx  = 0;
    proxy->msix_bar_idx       = 1;
    proxy->modern_io_bar_idx  = 2;
    proxy->modern_mem_bar_idx = 4;
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769

    proxy->common.offset = 0x0;
    proxy->common.size = 0x1000;
    proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;

    proxy->isr.offset = 0x1000;
    proxy->isr.size = 0x1000;
    proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;

    proxy->device.offset = 0x2000;
    proxy->device.size = 0x1000;
    proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;

    proxy->notify.offset = 0x3000;
1770
    proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
1771 1772
    proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;

1773 1774 1775 1776
    proxy->notify_pio.offset = 0x0;
    proxy->notify_pio.size = 0x4;
    proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;

1777 1778
    /* subclasses can enforce modern, so do this unconditionally */
    memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1779 1780
                       /* PCI BAR regions must be powers of 2 */
                       pow2ceil(proxy->notify.offset + proxy->notify.size));
1781

1782 1783 1784 1785 1786 1787 1788 1789 1790
    memory_region_init_alias(&proxy->modern_cfg,
                             OBJECT(proxy),
                             "virtio-pci-cfg",
                             &proxy->modern_bar,
                             0,
                             memory_region_size(&proxy->modern_bar));

    address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");

1791 1792 1793 1794
    if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
        proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
    }

1795
    if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
1796 1797 1798 1799 1800 1801 1802
        error_setg(errp, "device cannot work as neither modern nor legacy mode"
                   " is enabled");
        error_append_hint(errp, "Set either disable-modern or disable-legacy"
                          " to off\n");
        return;
    }

1803
    if (pcie_port && pci_is_express(pci_dev)) {
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
        int pos;

        pos = pcie_endpoint_cap_init(pci_dev, 0);
        assert(pos > 0);

        pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF);
        assert(pos > 0);

        /*
         * Indicates that this function complies with revision 1.2 of the
         * PCI Power Management Interface Specification.
         */
        pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1817 1818 1819 1820 1821

        if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
            pcie_ats_init(pci_dev, 256);
        }

1822 1823 1824 1825 1826 1827
    } else {
        /*
         * make future invocations of pci_is_express() return false
         * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
         */
        pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1828 1829
    }

1830
    virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1831
    if (k->realize) {
1832
        k->realize(proxy, errp);
1833 1834 1835 1836 1837
    }
}

static void virtio_pci_exit(PCIDevice *pci_dev)
{
1838 1839
    VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);

1840
    msix_uninit_exclusive_bar(pci_dev);
1841
    address_space_destroy(&proxy->modern_as);
1842 1843
}

1844
static void virtio_pci_reset(DeviceState *qdev)
1845 1846 1847
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
    VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
J
Jason Wang 已提交
1848 1849
    int i;

1850 1851 1852
    virtio_pci_stop_ioeventfd(proxy);
    virtio_bus_reset(bus);
    msix_unuse_all_vectors(&proxy->pci_dev);
J
Jason Wang 已提交
1853 1854 1855 1856

    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
        proxy->vqs[i].enabled = 0;
    }
1857 1858
}

1859
static Property virtio_pci_properties[] = {
1860 1861
    DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1862 1863 1864
    DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
                            ON_OFF_AUTO_AUTO),
    DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
1865 1866
    DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1867 1868
    DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1869 1870
    DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1871 1872
    DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
1873 1874
    DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
                     ignore_backend_features, false),
1875 1876
    DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_ATS_BIT, false),
1877 1878 1879
    DEFINE_PROP_END_OF_LIST(),
};

1880 1881 1882 1883 1884 1885 1886
static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
{
    VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
    VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
    PCIDevice *pci_dev = &proxy->pci_dev;

    if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1887
        virtio_pci_modern(proxy)) {
1888 1889 1890 1891 1892 1893
        pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
    }

    vpciklass->parent_dc_realize(qdev, errp);
}

1894 1895 1896 1897
static void virtio_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1898
    VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1899

1900
    dc->props = virtio_pci_properties;
1901
    k->realize = virtio_pci_realize;
1902 1903 1904 1905
    k->exit = virtio_pci_exit;
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = PCI_CLASS_OTHERS;
1906 1907
    vpciklass->parent_dc_realize = dc->realize;
    dc->realize = virtio_pci_dc_realize;
1908
    dc->reset = virtio_pci_reset;
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
}

static const TypeInfo virtio_pci_info = {
    .name          = TYPE_VIRTIO_PCI,
    .parent        = TYPE_PCI_DEVICE,
    .instance_size = sizeof(VirtIOPCIProxy),
    .class_init    = virtio_pci_class_init,
    .class_size    = sizeof(VirtioPCIClass),
    .abstract      = true,
};

1920 1921 1922
/* virtio-blk-pci */

static Property virtio_blk_pci_properties[] = {
1923
    DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
1924 1925 1926 1927 1928 1929
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
    DEFINE_PROP_END_OF_LIST(),
};

1930
static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1931 1932 1933
{
    VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
1934

1935
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1936
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1937 1938 1939 1940 1941 1942 1943 1944
}

static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

1945
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1946
    dc->props = virtio_blk_pci_properties;
1947
    k->realize = virtio_blk_pci_realize;
1948 1949 1950 1951 1952 1953 1954 1955 1956
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void virtio_blk_pci_instance_init(Object *obj)
{
    VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1957 1958 1959

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_BLK);
1960 1961
    object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread",
                              &error_abort);
1962 1963
    object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
                              "bootindex", &error_abort);
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973
}

static const TypeInfo virtio_blk_pci_info = {
    .name          = TYPE_VIRTIO_BLK_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIOBlkPCI),
    .instance_init = virtio_blk_pci_instance_init,
    .class_init    = virtio_blk_pci_class_init,
};

1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
/* virtio-scsi-pci */

static Property virtio_scsi_pci_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                       DEV_NVECTORS_UNSPECIFIED),
    DEFINE_PROP_END_OF_LIST(),
};

1984
static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1985 1986 1987
{
    VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
1988
    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
1989 1990
    DeviceState *proxy = DEVICE(vpci_dev);
    char *bus_name;
1991 1992

    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1993
        vpci_dev->nvectors = vs->conf.num_queues + 3;
1994 1995
    }

1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
    /*
     * For command line compatibility, this sets the virtio-scsi-device bus
     * name as before.
     */
    if (proxy->id) {
        bus_name = g_strdup_printf("%s.0", proxy->id);
        virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
        g_free(bus_name);
    }

2006
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2007
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2008 2009 2010 2011 2012 2013 2014
}

static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2015 2016

    k->realize = virtio_scsi_pci_realize;
2017
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
    dc->props = virtio_scsi_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
    pcidev_k->revision = 0x00;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void virtio_scsi_pci_instance_init(Object *obj)
{
    VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
2028 2029 2030

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_SCSI);
2031 2032
    object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev), "iothread",
                              &error_abort);
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
}

static const TypeInfo virtio_scsi_pci_info = {
    .name          = TYPE_VIRTIO_SCSI_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIOSCSIPCI),
    .instance_init = virtio_scsi_pci_instance_init,
    .class_init    = virtio_scsi_pci_class_init,
};

2043 2044 2045 2046 2047 2048 2049 2050 2051
/* vhost-scsi-pci */

#ifdef CONFIG_VHOST_SCSI
static Property vhost_scsi_pci_properties[] = {
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                       DEV_NVECTORS_UNSPECIFIED),
    DEFINE_PROP_END_OF_LIST(),
};

2052
static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
{
    VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);

    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
        vpci_dev->nvectors = vs->conf.num_queues + 3;
    }

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2063
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2064 2065 2066 2067 2068 2069 2070
}

static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2071
    k->realize = vhost_scsi_pci_realize;
2072
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
    dc->props = vhost_scsi_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
    pcidev_k->revision = 0x00;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void vhost_scsi_pci_instance_init(Object *obj)
{
    VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
2083 2084 2085

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VHOST_SCSI);
G
Gonglei 已提交
2086 2087
    object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
                              "bootindex", &error_abort);
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
}

static const TypeInfo vhost_scsi_pci_info = {
    .name          = TYPE_VHOST_SCSI_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VHostSCSIPCI),
    .instance_init = vhost_scsi_pci_instance_init,
    .class_init    = vhost_scsi_pci_class_init,
};
#endif

2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
/* vhost-vsock-pci */

#ifdef CONFIG_VHOST_VSOCK
static Property vhost_vsock_pci_properties[] = {
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
    DEFINE_PROP_END_OF_LIST(),
};

static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
    VHostVSockPCI *dev = VHOST_VSOCK_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
}

static void vhost_vsock_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
    k->realize = vhost_vsock_pci_realize;
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
    dc->props = vhost_vsock_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_VSOCK;
    pcidev_k->revision = 0x00;
    pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
}

static void vhost_vsock_pci_instance_init(Object *obj)
{
    VHostVSockPCI *dev = VHOST_VSOCK_PCI(obj);

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VHOST_VSOCK);
}

static const TypeInfo vhost_vsock_pci_info = {
    .name          = TYPE_VHOST_VSOCK_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VHostVSockPCI),
    .instance_init = vhost_vsock_pci_instance_init,
    .class_init    = vhost_vsock_pci_class_init,
};
#endif

2147 2148 2149
/* virtio-balloon-pci */

static Property virtio_balloon_pci_properties[] = {
2150
    DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2151 2152 2153
    DEFINE_PROP_END_OF_LIST(),
};

2154
static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
{
    VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);

    if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
        vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
        vpci_dev->class_code = PCI_CLASS_OTHERS;
    }

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2165
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2166 2167 2168 2169 2170 2171 2172
}

static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2173
    k->realize = virtio_balloon_pci_realize;
2174
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
    dc->props = virtio_balloon_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_OTHERS;
}

static void virtio_balloon_pci_instance_init(Object *obj)
{
    VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
2185

2186 2187
    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_BALLOON);
2188 2189 2190 2191 2192
    object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
                                  "guest-stats", &error_abort);
    object_property_add_alias(obj, "guest-stats-polling-interval",
                              OBJECT(&dev->vdev),
                              "guest-stats-polling-interval", &error_abort);
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
}

static const TypeInfo virtio_balloon_pci_info = {
    .name          = TYPE_VIRTIO_BALLOON_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIOBalloonPCI),
    .instance_init = virtio_balloon_pci_instance_init,
    .class_init    = virtio_balloon_pci_class_init,
};

2203 2204
/* virtio-serial-pci */

2205
static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2206 2207 2208
{
    VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
2209 2210
    DeviceState *proxy = DEVICE(vpci_dev);
    char *bus_name;
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223

    if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
        vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
        vpci_dev->class_code != PCI_CLASS_OTHERS) {        /* qemu-kvm  */
            vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
    }

    /* backwards-compatibility with machines that were created with
       DEV_NVECTORS_UNSPECIFIED */
    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
        vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
    }

2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
    /*
     * For command line compatibility, this sets the virtio-serial-device bus
     * name as before.
     */
    if (proxy->id) {
        bus_name = g_strdup_printf("%s.0", proxy->id);
        virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
        g_free(bus_name);
    }

2234
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2235
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2236 2237 2238 2239 2240 2241
}

static Property virtio_serial_pci_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2242
    DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2243 2244 2245 2246 2247 2248 2249 2250
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2251
    k->realize = virtio_serial_pci_realize;
2252
    set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
    dc->props = virtio_serial_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
}

static void virtio_serial_pci_instance_init(Object *obj)
{
    VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
2263 2264 2265

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_SERIAL);
2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
}

static const TypeInfo virtio_serial_pci_info = {
    .name          = TYPE_VIRTIO_SERIAL_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIOSerialPCI),
    .instance_init = virtio_serial_pci_instance_init,
    .class_init    = virtio_serial_pci_class_init,
};

2276 2277 2278 2279
/* virtio-net-pci */

static Property virtio_net_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2280
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2281 2282 2283 2284
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
    DEFINE_PROP_END_OF_LIST(),
};

2285
static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2286
{
2287
    DeviceState *qdev = DEVICE(vpci_dev);
2288 2289 2290
    VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);

2291 2292
    virtio_net_set_netclient_name(&dev->vdev, qdev->id,
                                  object_get_typename(OBJECT(qdev)));
2293
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2294
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
}

static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
    VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);

    k->romfile = "efi-virtio.rom";
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = PCI_CLASS_NETWORK_ETHERNET;
2308
    set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2309
    dc->props = virtio_net_properties;
2310
    vpciklass->realize = virtio_net_pci_realize;
2311 2312 2313 2314 2315
}

static void virtio_net_pci_instance_init(Object *obj)
{
    VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
2316 2317 2318

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_NET);
2319 2320
    object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
                              "bootindex", &error_abort);
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330
}

static const TypeInfo virtio_net_pci_info = {
    .name          = TYPE_VIRTIO_NET_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIONetPCI),
    .instance_init = virtio_net_pci_instance_init,
    .class_init    = virtio_net_pci_class_init,
};

2331 2332
/* virtio-rng-pci */

2333
static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2334 2335 2336
{
    VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&vrng->vdev);
2337
    Error *err = NULL;
2338 2339

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2340 2341 2342 2343
    object_property_set_bool(OBJECT(vdev), true, "realized", &err);
    if (err) {
        error_propagate(errp, err);
        return;
2344 2345 2346
    }

    object_property_set_link(OBJECT(vrng),
2347
                             OBJECT(vrng->vdev.conf.rng), "rng",
2348 2349 2350 2351 2352 2353 2354 2355 2356
                             NULL);
}

static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

2357
    k->realize = virtio_rng_pci_realize;
2358
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368

    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_OTHERS;
}

static void virtio_rng_initfn(Object *obj)
{
    VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
2369 2370 2371

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_RNG);
2372 2373
    object_property_add_alias(obj, "rng", OBJECT(&dev->vdev), "rng",
                              &error_abort);
2374 2375 2376 2377 2378 2379 2380 2381 2382 2383
}

static const TypeInfo virtio_rng_pci_info = {
    .name          = TYPE_VIRTIO_RNG_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIORngPCI),
    .instance_init = virtio_rng_initfn,
    .class_init    = virtio_rng_pci_class_init,
};

2384 2385
/* virtio-input-pci */

2386
static Property virtio_input_pci_properties[] = {
2387 2388 2389 2390
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
    DEFINE_PROP_END_OF_LIST(),
};

2391 2392 2393 2394 2395 2396
static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
    VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&vinput->vdev);

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2397
    virtio_pci_force_virtio_1(vpci_dev);
2398 2399 2400 2401 2402 2403 2404 2405 2406
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
}

static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

2407
    dc->props = virtio_input_pci_properties;
2408 2409 2410 2411 2412 2413
    k->realize = virtio_input_pci_realize;
    set_bit(DEVICE_CATEGORY_INPUT, dc->categories);

    pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
}

2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
{
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

    pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
}

static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
                                                  void *data)
{
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

    pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
}

static void virtio_keyboard_initfn(Object *obj)
{
    VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2432 2433 2434

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_KEYBOARD);
2435 2436 2437 2438 2439
}

static void virtio_mouse_initfn(Object *obj)
{
    VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2440 2441 2442

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_MOUSE);
2443 2444 2445 2446 2447
}

static void virtio_tablet_initfn(Object *obj)
{
    VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2448 2449 2450

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_TABLET);
2451 2452
}

2453 2454 2455 2456 2457 2458 2459 2460
static const TypeInfo virtio_input_pci_info = {
    .name          = TYPE_VIRTIO_INPUT_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIOInputPCI),
    .class_init    = virtio_input_pci_class_init,
    .abstract      = true,
};

2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
static const TypeInfo virtio_input_hid_pci_info = {
    .name          = TYPE_VIRTIO_INPUT_HID_PCI,
    .parent        = TYPE_VIRTIO_INPUT_PCI,
    .instance_size = sizeof(VirtIOInputHIDPCI),
    .abstract      = true,
};

static const TypeInfo virtio_keyboard_pci_info = {
    .name          = TYPE_VIRTIO_KEYBOARD_PCI,
    .parent        = TYPE_VIRTIO_INPUT_HID_PCI,
    .class_init    = virtio_input_hid_kbd_pci_class_init,
    .instance_size = sizeof(VirtIOInputHIDPCI),
    .instance_init = virtio_keyboard_initfn,
};

static const TypeInfo virtio_mouse_pci_info = {
    .name          = TYPE_VIRTIO_MOUSE_PCI,
    .parent        = TYPE_VIRTIO_INPUT_HID_PCI,
    .class_init    = virtio_input_hid_mouse_pci_class_init,
    .instance_size = sizeof(VirtIOInputHIDPCI),
    .instance_init = virtio_mouse_initfn,
};

static const TypeInfo virtio_tablet_pci_info = {
    .name          = TYPE_VIRTIO_TABLET_PCI,
    .parent        = TYPE_VIRTIO_INPUT_HID_PCI,
    .instance_size = sizeof(VirtIOInputHIDPCI),
    .instance_init = virtio_tablet_initfn,
};

2491 2492 2493 2494 2495 2496 2497 2498 2499
#ifdef CONFIG_LINUX
static void virtio_host_initfn(Object *obj)
{
    VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj);

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_INPUT_HOST);
}

G
Gerd Hoffmann 已提交
2500 2501 2502 2503 2504 2505
static const TypeInfo virtio_host_pci_info = {
    .name          = TYPE_VIRTIO_INPUT_HOST_PCI,
    .parent        = TYPE_VIRTIO_INPUT_PCI,
    .instance_size = sizeof(VirtIOInputHostPCI),
    .instance_init = virtio_host_initfn,
};
2506
#endif
G
Gerd Hoffmann 已提交
2507

2508 2509
/* virtio-pci-bus */

2510 2511
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
                               VirtIOPCIProxy *dev)
2512 2513
{
    DeviceState *qdev = DEVICE(dev);
2514 2515
    char virtio_bus_name[] = "virtio-bus";

2516
    qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2517
                        virtio_bus_name);
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529
}

static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
{
    BusClass *bus_class = BUS_CLASS(klass);
    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
    bus_class->max_dev = 1;
    k->notify = virtio_pci_notify;
    k->save_config = virtio_pci_save_config;
    k->load_config = virtio_pci_load_config;
    k->save_queue = virtio_pci_save_queue;
    k->load_queue = virtio_pci_load_queue;
2530 2531 2532
    k->save_extra_state = virtio_pci_save_extra_state;
    k->load_extra_state = virtio_pci_load_extra_state;
    k->has_extra_state = virtio_pci_has_extra_state;
2533 2534 2535
    k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
    k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
    k->vmstate_change = virtio_pci_vmstate_change;
2536
    k->pre_plugged = virtio_pci_pre_plugged;
2537
    k->device_plugged = virtio_pci_device_plugged;
2538
    k->device_unplugged = virtio_pci_device_unplugged;
2539
    k->query_nvectors = virtio_pci_query_nvectors;
2540
    k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
2541
    k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
J
Jason Wang 已提交
2542
    k->get_dma_as = virtio_pci_get_dma_as;
2543 2544 2545 2546 2547 2548 2549 2550 2551
}

static const TypeInfo virtio_pci_bus_info = {
    .name          = TYPE_VIRTIO_PCI_BUS,
    .parent        = TYPE_VIRTIO_BUS,
    .instance_size = sizeof(VirtioPCIBusState),
    .class_init    = virtio_pci_bus_class_init,
};

A
Andreas Färber 已提交
2552
static void virtio_pci_register_types(void)
P
Paul Brook 已提交
2553
{
2554
    type_register_static(&virtio_rng_pci_info);
2555
    type_register_static(&virtio_input_pci_info);
2556 2557 2558 2559
    type_register_static(&virtio_input_hid_pci_info);
    type_register_static(&virtio_keyboard_pci_info);
    type_register_static(&virtio_mouse_pci_info);
    type_register_static(&virtio_tablet_pci_info);
2560
#ifdef CONFIG_LINUX
G
Gerd Hoffmann 已提交
2561
    type_register_static(&virtio_host_pci_info);
2562
#endif
2563
    type_register_static(&virtio_pci_bus_info);
2564
    type_register_static(&virtio_pci_info);
2565
#ifdef CONFIG_VIRTFS
2566
    type_register_static(&virtio_9p_pci_info);
2567
#endif
2568
    type_register_static(&virtio_blk_pci_info);
2569
    type_register_static(&virtio_scsi_pci_info);
2570
    type_register_static(&virtio_balloon_pci_info);
2571
    type_register_static(&virtio_serial_pci_info);
2572
    type_register_static(&virtio_net_pci_info);
2573 2574 2575
#ifdef CONFIG_VHOST_SCSI
    type_register_static(&vhost_scsi_pci_info);
#endif
2576 2577 2578
#ifdef CONFIG_VHOST_VSOCK
    type_register_static(&vhost_vsock_pci_info);
#endif
P
Paul Brook 已提交
2579 2580
}

A
Andreas Färber 已提交
2581
type_init(virtio_pci_register_types)