virtio-pci.c 91.1 KB
Newer Older
P
Paul Brook 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Virtio PCI Bindings
 *
 * Copyright IBM, Corp. 2007
 * Copyright (c) 2009 CodeSourcery
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *  Paul Brook        <paul@codesourcery.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
14 15
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
P
Paul Brook 已提交
16 17
 */

P
Peter Maydell 已提交
18
#include "qemu/osdep.h"
P
Paul Brook 已提交
19

20
#include "standard-headers/linux/virtio_pci.h"
P
Paolo Bonzini 已提交
21 22 23 24 25 26
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-blk.h"
#include "hw/virtio/virtio-net.h"
#include "hw/virtio/virtio-serial.h"
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/virtio-balloon.h"
27
#include "hw/virtio/virtio-input.h"
28
#include "hw/pci/pci.h"
29
#include "qapi/error.h"
30
#include "qemu/error-report.h"
31 32 33
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/loader.h"
34
#include "sysemu/kvm.h"
35
#include "virtio-pci.h"
36
#include "qemu/range.h"
P
Paolo Bonzini 已提交
37
#include "hw/virtio/virtio-bus.h"
38
#include "qapi/visitor.h"
P
Paul Brook 已提交
39

40
#define VIRTIO_PCI_REGION_SIZE(dev)     VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
41

42 43
#undef VIRTIO_PCI_CONFIG

44 45
/* The remaining space is defined by each driver as the per-driver
 * configuration space */
46
#define VIRTIO_PCI_CONFIG_SIZE(dev)     VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
P
Paul Brook 已提交
47

48 49
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
                               VirtIOPCIProxy *dev);
50
static void virtio_pci_reset(DeviceState *qdev);
51

P
Paul Brook 已提交
52
/* virtio device */
53 54 55 56 57
/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
{
    return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
}
P
Paul Brook 已提交
58

59 60 61 62
/* DeviceState to VirtIOPCIProxy. Note: used on datapath,
 * be careful and test performance if you change this.
 */
static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
P
Paul Brook 已提交
63
{
64 65 66 67 68 69
    return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
}

static void virtio_pci_notify(DeviceState *d, uint16_t vector)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
P
Paolo Bonzini 已提交
70

71 72
    if (msix_enabled(&proxy->pci_dev))
        msix_notify(&proxy->pci_dev, vector);
P
Paolo Bonzini 已提交
73 74
    else {
        VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
P
Paolo Bonzini 已提交
75
        pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
P
Paolo Bonzini 已提交
76
    }
P
Paul Brook 已提交
77 78
}

79
static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
80
{
81
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
82 83
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

84 85 86
    pci_device_save(&proxy->pci_dev, f);
    msix_save(&proxy->pci_dev, f);
    if (msix_present(&proxy->pci_dev))
P
Paolo Bonzini 已提交
87
        qemu_put_be16(f, vdev->config_vector);
88 89
}

90 91 92 93 94 95 96 97 98 99 100 101
static const VMStateDescription vmstate_virtio_pci_modern_queue_state = {
    .name = "virtio_pci/modern_queue_state",
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_UINT16(num, VirtIOPCIQueue),
        VMSTATE_UNUSED(1), /* enabled was stored as be16 */
        VMSTATE_BOOL(enabled, VirtIOPCIQueue),
        VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2),
        VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2),
        VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2),
        VMSTATE_END_OF_LIST()
102 103 104 105 106 107 108
    }
};

static bool virtio_pci_modern_state_needed(void *opaque)
{
    VirtIOPCIProxy *proxy = opaque;

109
    return virtio_pci_modern(proxy);
110 111
}

112
static const VMStateDescription vmstate_virtio_pci_modern_state_sub = {
113 114 115 116 117
    .name = "virtio_pci/modern_state",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_pci_modern_state_needed,
    .fields = (VMStateField[]) {
118 119 120 121 122 123
        VMSTATE_UINT32(dfselect, VirtIOPCIProxy),
        VMSTATE_UINT32(gfselect, VirtIOPCIProxy),
        VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2),
        VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0,
                             vmstate_virtio_pci_modern_queue_state,
                             VirtIOPCIQueue),
124 125 126 127 128 129 130 131 132 133 134 135 136
        VMSTATE_END_OF_LIST()
    }
};

static const VMStateDescription vmstate_virtio_pci = {
    .name = "virtio_pci",
    .version_id = 1,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .fields = (VMStateField[]) {
        VMSTATE_END_OF_LIST()
    },
    .subsections = (const VMStateDescription*[]) {
137
        &vmstate_virtio_pci_modern_state_sub,
138 139 140 141
        NULL
    }
};

142 143 144 145 146 147 148
static bool virtio_pci_has_extra_state(DeviceState *d)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);

    return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
}

149 150 151 152 153 154 155 156 157 158 159 160 161 162
static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);

    vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
}

static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);

    return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
}

163
static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
164
{
165
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
166 167
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

168
    if (msix_present(&proxy->pci_dev))
P
Paolo Bonzini 已提交
169
        qemu_put_be16(f, virtio_queue_vector(vdev, n));
170 171
}

172
static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
173
{
174
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
175 176
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

177 178
    int ret;
    ret = pci_device_load(&proxy->pci_dev, f);
179
    if (ret) {
180
        return ret;
181
    }
182
    msix_unuse_all_vectors(&proxy->pci_dev);
183
    msix_load(&proxy->pci_dev, f);
184
    if (msix_present(&proxy->pci_dev)) {
P
Paolo Bonzini 已提交
185
        qemu_get_be16s(f, &vdev->config_vector);
186
    } else {
P
Paolo Bonzini 已提交
187
        vdev->config_vector = VIRTIO_NO_VECTOR;
188
    }
P
Paolo Bonzini 已提交
189 190
    if (vdev->config_vector != VIRTIO_NO_VECTOR) {
        return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
191
    }
192 193 194
    return 0;
}

195
static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
196
{
197
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
198 199
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

200
    uint16_t vector;
201 202 203 204 205
    if (msix_present(&proxy->pci_dev)) {
        qemu_get_be16s(f, &vector);
    } else {
        vector = VIRTIO_NO_VECTOR;
    }
P
Paolo Bonzini 已提交
206
    virtio_queue_set_vector(vdev, n, vector);
207 208 209
    if (vector != VIRTIO_NO_VECTOR) {
        return msix_vector_use(&proxy->pci_dev, vector);
    }
210

211 212 213
    return 0;
}

214
static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
215 216 217
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);

218
    return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
219 220
}

221 222
#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000

223 224 225 226 227 228
static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
{
    return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
        QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
}

229 230
static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
                                       int n, bool assign)
231
{
232
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
233 234
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtQueue *vq = virtio_get_queue(vdev, n);
235 236
    bool legacy = virtio_pci_legacy(proxy);
    bool modern = virtio_pci_modern(proxy);
237
    bool fast_mmio = kvm_ioeventfd_any_length_enabled();
238
    bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
239
    MemoryRegion *modern_mr = &proxy->notify.mr;
240
    MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
241
    MemoryRegion *legacy_mr = &proxy->bar;
242
    hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
243 244
                         virtio_get_queue_index(vq);
    hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
A
Avi Kivity 已提交
245

246
    if (assign) {
247
        if (modern) {
248 249 250 251 252 253 254
            if (fast_mmio) {
                memory_region_add_eventfd(modern_mr, modern_addr, 0,
                                          false, n, notifier);
            } else {
                memory_region_add_eventfd(modern_mr, modern_addr, 2,
                                          false, n, notifier);
            }
255 256 257 258
            if (modern_pio) {
                memory_region_add_eventfd(modern_notify_mr, 0, 2,
                                              true, n, notifier);
            }
259 260 261 262 263
        }
        if (legacy) {
            memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
                                      true, n, notifier);
        }
264
    } else {
265
        if (modern) {
266 267 268 269 270 271 272
            if (fast_mmio) {
                memory_region_del_eventfd(modern_mr, modern_addr, 0,
                                          false, n, notifier);
            } else {
                memory_region_del_eventfd(modern_mr, modern_addr, 2,
                                          false, n, notifier);
            }
273 274 275 276
            if (modern_pio) {
                memory_region_del_eventfd(modern_notify_mr, 0, 2,
                                          true, n, notifier);
            }
277 278 279 280 281
        }
        if (legacy) {
            memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
                                      true, n, notifier);
        }
282
    }
283
    return 0;
284 285
}

286
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
287
{
288
    virtio_bus_start_ioeventfd(&proxy->bus);
289 290
}

291
static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
292
{
293
    virtio_bus_stop_ioeventfd(&proxy->bus);
294 295
}

P
Paul Brook 已提交
296 297 298
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
    VirtIOPCIProxy *proxy = opaque;
P
Paolo Bonzini 已提交
299
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
A
Avi Kivity 已提交
300
    hwaddr pa;
P
Paul Brook 已提交
301 302 303

    switch (addr) {
    case VIRTIO_PCI_GUEST_FEATURES:
304 305 306 307
        /* Guest does not negotiate properly?  We have to assume nothing. */
        if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
            val = virtio_bus_get_vdev_bad_features(&proxy->bus);
        }
308
        virtio_set_features(vdev, val);
P
Paul Brook 已提交
309 310
        break;
    case VIRTIO_PCI_QUEUE_PFN:
A
Avi Kivity 已提交
311
        pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
312
        if (pa == 0) {
313
            virtio_pci_reset(DEVICE(proxy));
314
        }
315 316
        else
            virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
P
Paul Brook 已提交
317 318
        break;
    case VIRTIO_PCI_QUEUE_SEL:
319
        if (val < VIRTIO_QUEUE_MAX)
P
Paul Brook 已提交
320 321 322
            vdev->queue_sel = val;
        break;
    case VIRTIO_PCI_QUEUE_NOTIFY:
323
        if (val < VIRTIO_QUEUE_MAX) {
324 325
            virtio_queue_notify(vdev, val);
        }
P
Paul Brook 已提交
326 327
        break;
    case VIRTIO_PCI_STATUS:
328 329 330 331
        if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
            virtio_pci_stop_ioeventfd(proxy);
        }

332
        virtio_set_status(vdev, val & 0xFF);
333 334 335 336 337

        if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
            virtio_pci_start_ioeventfd(proxy);
        }

338
        if (vdev->status == 0) {
339
            virtio_pci_reset(DEVICE(proxy));
340
        }
341

342 343 344 345 346 347 348 349 350
        /* Linux before 2.6.34 drives the device without enabling
           the PCI device bus master bit. Enable it automatically
           for the guest. This is a PCI spec violation but so is
           initiating DMA with bus master bit clear. */
        if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
            pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
                                     proxy->pci_dev.config[PCI_COMMAND] |
                                     PCI_COMMAND_MASTER, 1);
        }
P
Paul Brook 已提交
351
        break;
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
    case VIRTIO_MSI_CONFIG_VECTOR:
        msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0)
            val = VIRTIO_NO_VECTOR;
        vdev->config_vector = val;
        break;
    case VIRTIO_MSI_QUEUE_VECTOR:
        msix_vector_unuse(&proxy->pci_dev,
                          virtio_queue_vector(vdev, vdev->queue_sel));
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0)
            val = VIRTIO_NO_VECTOR;
        virtio_queue_set_vector(vdev, vdev->queue_sel, val);
        break;
    default:
368 369
        error_report("%s: unexpected address 0x%x value 0x%x",
                     __func__, addr, val);
370
        break;
P
Paul Brook 已提交
371 372 373
    }
}

374
static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
P
Paul Brook 已提交
375
{
P
Paolo Bonzini 已提交
376
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
P
Paul Brook 已提交
377 378 379 380
    uint32_t ret = 0xFFFFFFFF;

    switch (addr) {
    case VIRTIO_PCI_HOST_FEATURES:
C
Cornelia Huck 已提交
381
        ret = vdev->host_features;
P
Paul Brook 已提交
382 383
        break;
    case VIRTIO_PCI_GUEST_FEATURES:
384
        ret = vdev->guest_features;
P
Paul Brook 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
        break;
    case VIRTIO_PCI_QUEUE_PFN:
        ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
              >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
        break;
    case VIRTIO_PCI_QUEUE_NUM:
        ret = virtio_queue_get_num(vdev, vdev->queue_sel);
        break;
    case VIRTIO_PCI_QUEUE_SEL:
        ret = vdev->queue_sel;
        break;
    case VIRTIO_PCI_STATUS:
        ret = vdev->status;
        break;
    case VIRTIO_PCI_ISR:
        /* reading from the ISR also clears it. */
P
Paolo Bonzini 已提交
401
        ret = atomic_xchg(&vdev->isr, 0);
402
        pci_irq_deassert(&proxy->pci_dev);
P
Paul Brook 已提交
403
        break;
404 405 406 407 408 409
    case VIRTIO_MSI_CONFIG_VECTOR:
        ret = vdev->config_vector;
        break;
    case VIRTIO_MSI_QUEUE_VECTOR:
        ret = virtio_queue_vector(vdev, vdev->queue_sel);
        break;
P
Paul Brook 已提交
410 411 412 413 414 415 416
    default:
        break;
    }

    return ret;
}

417 418
static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
                                       unsigned size)
P
Paul Brook 已提交
419 420
{
    VirtIOPCIProxy *proxy = opaque;
P
Paolo Bonzini 已提交
421
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
422
    uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
423
    uint64_t val = 0;
424
    if (addr < config) {
425
        return virtio_ioport_read(proxy, addr);
426 427
    }
    addr -= config;
P
Paul Brook 已提交
428

429 430
    switch (size) {
    case 1:
P
Paolo Bonzini 已提交
431
        val = virtio_config_readb(vdev, addr);
432 433
        break;
    case 2:
P
Paolo Bonzini 已提交
434
        val = virtio_config_readw(vdev, addr);
435
        if (virtio_is_big_endian(vdev)) {
436 437
            val = bswap16(val);
        }
438 439
        break;
    case 4:
P
Paolo Bonzini 已提交
440
        val = virtio_config_readl(vdev, addr);
441
        if (virtio_is_big_endian(vdev)) {
442 443
            val = bswap32(val);
        }
444
        break;
445
    }
446
    return val;
P
Paul Brook 已提交
447 448
}

449 450
static void virtio_pci_config_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
P
Paul Brook 已提交
451 452
{
    VirtIOPCIProxy *proxy = opaque;
453
    uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
P
Paolo Bonzini 已提交
454
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
455 456 457 458 459
    if (addr < config) {
        virtio_ioport_write(proxy, addr, val);
        return;
    }
    addr -= config;
460 461 462 463 464 465
    /*
     * Virtio-PCI is odd. Ioports are LE but config space is target native
     * endian.
     */
    switch (size) {
    case 1:
P
Paolo Bonzini 已提交
466
        virtio_config_writeb(vdev, addr, val);
467 468
        break;
    case 2:
469
        if (virtio_is_big_endian(vdev)) {
470 471
            val = bswap16(val);
        }
P
Paolo Bonzini 已提交
472
        virtio_config_writew(vdev, addr, val);
473 474
        break;
    case 4:
475
        if (virtio_is_big_endian(vdev)) {
476 477
            val = bswap32(val);
        }
P
Paolo Bonzini 已提交
478
        virtio_config_writel(vdev, addr, val);
479
        break;
480
    }
P
Paul Brook 已提交
481 482
}

A
Avi Kivity 已提交
483
static const MemoryRegionOps virtio_pci_config_ops = {
484 485 486 487 488 489
    .read = virtio_pci_config_read,
    .write = virtio_pci_config_write,
    .impl = {
        .min_access_size = 1,
        .max_access_size = 4,
    },
490
    .endianness = DEVICE_LITTLE_ENDIAN,
A
Avi Kivity 已提交
491
};
492

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
                                                 hwaddr *off, int len)
{
    int i;
    VirtIOPCIRegion *reg;

    for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) {
        reg = &proxy->regs[i];
        if (*off >= reg->offset &&
            *off + len <= reg->offset + reg->size) {
            *off -= reg->offset;
            return &reg->mr;
        }
    }

    return NULL;
}

511 512 513 514 515 516 517 518 519 520 521 522 523
/* Below are generic functions to do memcpy from/to an address space,
 * without byteswaps, with input validation.
 *
 * As regular address_space_* APIs all do some kind of byteswap at least for
 * some host/target combinations, we are forced to explicitly convert to a
 * known-endianness integer value.
 * It doesn't really matter which endian format to go through, so the code
 * below selects the endian that causes the least amount of work on the given
 * host.
 *
 * Note: host pointer must be aligned.
 */
static
524
void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
525 526
                                const uint8_t *buf, int len)
{
527 528
    uint64_t val;
    MemoryRegion *mr;
529 530 531 532 533 534

    /* address_space_* APIs assume an aligned address.
     * As address is under guest control, handle illegal values.
     */
    addr &= ~(len - 1);

535 536 537 538 539
    mr = virtio_address_space_lookup(proxy, &addr, len);
    if (!mr) {
        return;
    }

540 541 542 543 544 545 546 547
    /* Make sure caller aligned buf properly */
    assert(!(((uintptr_t)buf) & (len - 1)));

    switch (len) {
    case 1:
        val = pci_get_byte(buf);
        break;
    case 2:
548
        val = cpu_to_le16(pci_get_word(buf));
549 550
        break;
    case 4:
551
        val = cpu_to_le32(pci_get_long(buf));
552 553 554
        break;
    default:
        /* As length is under guest control, handle illegal values. */
555
        return;
556
    }
557
    memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED);
558 559 560
}

static void
561 562
virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
                          uint8_t *buf, int len)
563
{
564 565
    uint64_t val;
    MemoryRegion *mr;
566 567 568 569 570 571

    /* address_space_* APIs assume an aligned address.
     * As address is under guest control, handle illegal values.
     */
    addr &= ~(len - 1);

572 573 574 575 576
    mr = virtio_address_space_lookup(proxy, &addr, len);
    if (!mr) {
        return;
    }

577 578 579
    /* Make sure caller aligned buf properly */
    assert(!(((uintptr_t)buf) & (len - 1)));

580
    memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED);
581 582 583 584 585
    switch (len) {
    case 1:
        pci_set_byte(buf, val);
        break;
    case 2:
586
        pci_set_word(buf, le16_to_cpu(val));
587 588
        break;
    case 4:
589
        pci_set_long(buf, le32_to_cpu(val));
590 591 592 593 594 595 596
        break;
    default:
        /* As length is under guest control, handle illegal values. */
        break;
    }
}

597 598 599
static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
                                uint32_t val, int len)
{
600
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
P
Paolo Bonzini 已提交
601
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
602
    struct virtio_pci_cfg_cap *cfg;
603

604 605 606
    pci_default_write_config(pci_dev, address, val, len);

    if (range_covers_byte(address, len, PCI_COMMAND) &&
607
        !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
608
        virtio_pci_stop_ioeventfd(proxy);
609
        virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
610
    }
611 612 613 614 615 616 617 618 619 620 621 622

    if (proxy->config_cap &&
        ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
                                                                  pci_cfg_data),
                       sizeof cfg->pci_cfg_data)) {
        uint32_t off;
        uint32_t len;

        cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
        off = le32_to_cpu(cfg->cap.offset);
        len = le32_to_cpu(cfg->cap.length);

623 624
        if (len == 1 || len == 2 || len == 4) {
            assert(len <= sizeof cfg->pci_cfg_data);
625
            virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len);
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
        }
    }
}

static uint32_t virtio_read_config(PCIDevice *pci_dev,
                                   uint32_t address, int len)
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
    struct virtio_pci_cfg_cap *cfg;

    if (proxy->config_cap &&
        ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
                                                                  pci_cfg_data),
                       sizeof cfg->pci_cfg_data)) {
        uint32_t off;
        uint32_t len;

        cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
        off = le32_to_cpu(cfg->cap.offset);
        len = le32_to_cpu(cfg->cap.length);

647 648
        if (len == 1 || len == 2 || len == 4) {
            assert(len <= sizeof cfg->pci_cfg_data);
649
            virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len);
650 651 652 653
        }
    }

    return pci_default_read_config(pci_dev, address, len);
P
Paul Brook 已提交
654 655
}

656 657
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
                                        unsigned int queue_no,
658
                                        unsigned int vector)
659 660
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
661
    int ret;
662 663

    if (irqfd->users == 0) {
664
        ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
665 666 667 668 669 670 671 672 673 674 675
        if (ret < 0) {
            return ret;
        }
        irqfd->virq = ret;
    }
    irqfd->users++;
    return 0;
}

static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
                                             unsigned int vector)
676 677 678 679 680 681 682
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
    if (--irqfd->users == 0) {
        kvm_irqchip_release_virq(kvm_state, irqfd->virq);
    }
}

683 684 685 686 687
static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
                                 unsigned int queue_no,
                                 unsigned int vector)
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
P
Paolo Bonzini 已提交
688 689
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
690
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
691
    return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
692 693 694 695 696
}

static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
                                      unsigned int queue_no,
                                      unsigned int vector)
697
{
P
Paolo Bonzini 已提交
698 699
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
700
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
701
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
702
    int ret;
703

704
    ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
705
    assert(ret == 0);
706
}
707

708 709 710
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
{
    PCIDevice *dev = &proxy->pci_dev;
P
Paolo Bonzini 已提交
711
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
712
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
713 714 715 716 717 718 719 720 721 722 723
    unsigned int vector;
    int ret, queue_no;

    for (queue_no = 0; queue_no < nvqs; queue_no++) {
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
724
        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
725 726
        if (ret < 0) {
            goto undo;
727
        }
728 729 730
        /* If guest supports masking, set up irqfd now.
         * Otherwise, delay until unmasked in the frontend.
         */
731
        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
732 733 734 735 736 737
            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
            if (ret < 0) {
                kvm_virtio_pci_vq_vector_release(proxy, vector);
                goto undo;
            }
        }
738 739
    }
    return 0;
740 741 742 743 744 745 746

undo:
    while (--queue_no >= 0) {
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
747
        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
748
            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
749
        }
750 751 752
        kvm_virtio_pci_vq_vector_release(proxy, vector);
    }
    return ret;
753 754
}

755 756 757
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
{
    PCIDevice *dev = &proxy->pci_dev;
P
Paolo Bonzini 已提交
758
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
759 760
    unsigned int vector;
    int queue_no;
761
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
762 763 764 765 766 767 768 769 770

    for (queue_no = 0; queue_no < nvqs; queue_no++) {
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
771 772 773
        /* If guest supports masking, clean up irqfd now.
         * Otherwise, it was cleaned when masked in the frontend.
         */
774
        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
775
            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
776
        }
777 778 779 780
        kvm_virtio_pci_vq_vector_release(proxy, vector);
    }
}

781 782 783 784
static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
                                       unsigned int queue_no,
                                       unsigned int vector,
                                       MSIMessage msg)
785
{
P
Paolo Bonzini 已提交
786 787 788
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
789
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
790
    VirtIOIRQFD *irqfd;
791
    int ret = 0;
792

793 794 795
    if (proxy->vector_irqfd) {
        irqfd = &proxy->vector_irqfd[vector];
        if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
796 797
            ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
                                               &proxy->pci_dev);
798 799 800
            if (ret < 0) {
                return ret;
            }
801
            kvm_irqchip_commit_routes(kvm_state);
802 803 804
        }
    }

805 806 807
    /* If guest supports masking, irqfd is already setup, unmask it.
     * Otherwise, set it up now.
     */
808
    if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
P
Paolo Bonzini 已提交
809
        k->guest_notifier_mask(vdev, queue_no, false);
810
        /* Test after unmasking to avoid losing events. */
811
        if (k->guest_notifier_pending &&
P
Paolo Bonzini 已提交
812
            k->guest_notifier_pending(vdev, queue_no)) {
813 814 815 816
            event_notifier_set(n);
        }
    } else {
        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
817
    }
818
    return ret;
819 820
}

821
static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
822 823 824
                                             unsigned int queue_no,
                                             unsigned int vector)
{
P
Paolo Bonzini 已提交
825 826
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
827

828 829 830
    /* If guest supports masking, keep irqfd but mask it.
     * Otherwise, clean it up now.
     */ 
831
    if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
P
Paolo Bonzini 已提交
832
        k->guest_notifier_mask(vdev, queue_no, true);
833
    } else {
834
        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
835
    }
836 837
}

838 839
static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
                                    MSIMessage msg)
840 841
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
P
Paolo Bonzini 已提交
842
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
843 844
    VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
    int ret, index, unmasked = 0;
845

846 847 848
    while (vq) {
        index = virtio_get_queue_index(vq);
        if (!virtio_queue_get_num(vdev, index)) {
849 850
            break;
        }
851 852 853 854 855 856
        if (index < proxy->nvqs_with_notifiers) {
            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
            if (ret < 0) {
                goto undo;
            }
            ++unmasked;
857
        }
858
        vq = virtio_vector_next_queue(vq);
859
    }
860

861 862 863
    return 0;

undo:
864
    vq = virtio_vector_first_queue(vdev, vector);
865
    while (vq && unmasked >= 0) {
866
        index = virtio_get_queue_index(vq);
867 868 869 870
        if (index < proxy->nvqs_with_notifiers) {
            virtio_pci_vq_vector_mask(proxy, index, vector);
            --unmasked;
        }
871
        vq = virtio_vector_next_queue(vq);
872 873 874 875
    }
    return ret;
}

876
static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
877 878
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
P
Paolo Bonzini 已提交
879
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
880 881
    VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
    int index;
882

883 884 885
    while (vq) {
        index = virtio_get_queue_index(vq);
        if (!virtio_queue_get_num(vdev, index)) {
886 887
            break;
        }
888 889 890
        if (index < proxy->nvqs_with_notifiers) {
            virtio_pci_vq_vector_mask(proxy, index, vector);
        }
891
        vq = virtio_vector_next_queue(vq);
892 893 894
    }
}

895 896 897
static void virtio_pci_vector_poll(PCIDevice *dev,
                                   unsigned int vector_start,
                                   unsigned int vector_end)
898 899
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
P
Paolo Bonzini 已提交
900
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
901
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
902 903 904 905 906
    int queue_no;
    unsigned int vector;
    EventNotifier *notifier;
    VirtQueue *vq;

907
    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
908 909 910 911 912 913 914 915 916 917
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector < vector_start || vector >= vector_end ||
            !msix_is_masked(dev, vector)) {
            continue;
        }
        vq = virtio_get_queue(vdev, queue_no);
        notifier = virtio_queue_get_guest_notifier(vq);
918 919
        if (k->guest_notifier_pending) {
            if (k->guest_notifier_pending(vdev, queue_no)) {
920 921 922
                msix_set_pending(dev, vector);
            }
        } else if (event_notifier_test_and_clear(notifier)) {
923 924 925 926 927 928 929
            msix_set_pending(dev, vector);
        }
    }
}

static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
                                         bool with_irqfd)
930
{
931
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
932 933 934
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
    VirtQueue *vq = virtio_get_queue(vdev, n);
935 936 937 938 939 940 941
    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);

    if (assign) {
        int r = event_notifier_init(notifier, 0);
        if (r < 0) {
            return r;
        }
942
        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
943
    } else {
944
        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
945 946 947
        event_notifier_cleanup(notifier);
    }

948 949 950
    if (!msix_enabled(&proxy->pci_dev) &&
        vdev->use_guest_notifier_mask &&
        vdc->guest_notifier_mask) {
P
Paolo Bonzini 已提交
951
        vdc->guest_notifier_mask(vdev, n, !assign);
952 953
    }

954 955 956
    return 0;
}

957
static bool virtio_pci_query_guest_notifiers(DeviceState *d)
958
{
959
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
960 961 962
    return msix_enabled(&proxy->pci_dev);
}

963
static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
964
{
965
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
966
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
967
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
968
    int r, n;
969 970
    bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
        kvm_msi_via_irqfd_enabled();
971

972
    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
973 974 975 976 977 978 979 980

    /* When deassigning, pass a consistent nvqs value
     * to avoid leaking notifiers.
     */
    assert(assign || nvqs == proxy->nvqs_with_notifiers);

    proxy->nvqs_with_notifiers = nvqs;

981
    /* Must unset vector notifier while guest notifier is still assigned */
982
    if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
983
        msix_unset_vector_notifiers(&proxy->pci_dev);
984 985 986 987 988
        if (proxy->vector_irqfd) {
            kvm_virtio_pci_vector_release(proxy, nvqs);
            g_free(proxy->vector_irqfd);
            proxy->vector_irqfd = NULL;
        }
989 990
    }

991
    for (n = 0; n < nvqs; n++) {
992 993 994 995
        if (!virtio_queue_get_num(vdev, n)) {
            break;
        }

996
        r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
997 998 999 1000 1001
        if (r < 0) {
            goto assign_error;
        }
    }

1002
    /* Must set vector notifier after guest notifier has been assigned */
1003
    if ((with_irqfd || k->guest_notifier_mask) && assign) {
1004 1005 1006 1007 1008 1009 1010 1011
        if (with_irqfd) {
            proxy->vector_irqfd =
                g_malloc0(sizeof(*proxy->vector_irqfd) *
                          msix_nr_vectors_allocated(&proxy->pci_dev));
            r = kvm_virtio_pci_vector_use(proxy, nvqs);
            if (r < 0) {
                goto assign_error;
            }
1012
        }
1013
        r = msix_set_vector_notifiers(&proxy->pci_dev,
1014 1015 1016
                                      virtio_pci_vector_unmask,
                                      virtio_pci_vector_mask,
                                      virtio_pci_vector_poll);
1017
        if (r < 0) {
1018
            goto notifiers_error;
1019 1020 1021
        }
    }

1022 1023
    return 0;

1024
notifiers_error:
1025 1026 1027 1028
    if (with_irqfd) {
        assert(assign);
        kvm_virtio_pci_vector_release(proxy, nvqs);
    }
1029

1030 1031
assign_error:
    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1032
    assert(assign);
1033
    while (--n >= 0) {
1034
        virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1035 1036 1037 1038
    }
    return r;
}

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n,
                                           MemoryRegion *mr, bool assign)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
    int offset;

    if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) ||
        virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) {
        return -1;
    }

    if (assign) {
        offset = virtio_pci_queue_mem_mult(proxy) * n;
        memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1);
    } else {
        memory_region_del_subregion(&proxy->notify.mr, mr);
    }

    return 0;
}

1060
static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1061
{
1062
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
P
Paolo Bonzini 已提交
1063
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1064 1065

    if (running) {
1066 1067 1068 1069 1070
        /* Old QEMU versions did not set bus master enable on status write.
         * Detect DRIVER set and enable it.
         */
        if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
            (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1071
            !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1072 1073 1074
            pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
                                     proxy->pci_dev.config[PCI_COMMAND] |
                                     PCI_COMMAND_MASTER, 1);
1075
        }
1076
        virtio_pci_start_ioeventfd(proxy);
1077
    } else {
1078
        virtio_pci_stop_ioeventfd(proxy);
1079 1080 1081
    }
}

1082
#ifdef CONFIG_VIRTFS
1083
static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1084
{
1085 1086
    V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
1087

1088
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1089
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1090 1091
}

1092 1093 1094
static Property virtio_9p_pci_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1095 1096 1097 1098
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
    DEFINE_PROP_END_OF_LIST(),
};

1099
static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
1100 1101
{
    DeviceClass *dc = DEVICE_CLASS(klass);
1102 1103
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1104

1105
    k->realize = virtio_9p_pci_realize;
1106 1107 1108 1109
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = 0x2;
1110
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1111
    dc->props = virtio_9p_pci_properties;
1112 1113
}

1114 1115 1116
static void virtio_9p_pci_instance_init(Object *obj)
{
    V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
1117 1118 1119

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_9P);
1120 1121
}

1122
static const VirtioPCIDeviceTypeInfo virtio_9p_pci_info = {
1123 1124 1125 1126
    .base_name              = TYPE_VIRTIO_9P_PCI,
    .generic_name           = "virtio-9p-pci",
    .transitional_name      = "virtio-9p-pci-transitional",
    .non_transitional_name  = "virtio-9p-pci-non-transitional",
1127 1128 1129
    .instance_size = sizeof(V9fsPCIState),
    .instance_init = virtio_9p_pci_instance_init,
    .class_init    = virtio_9p_pci_class_init,
1130
};
1131
#endif /* CONFIG_VIRTFS */
1132

1133 1134 1135 1136
/*
 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
 */

1137 1138 1139 1140 1141 1142 1143
static int virtio_pci_query_nvectors(DeviceState *d)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);

    return proxy->nvectors;
}

J
Jason Wang 已提交
1144 1145 1146 1147 1148
static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
    PCIDevice *dev = &proxy->pci_dev;

1149
    return pci_get_address_space(dev);
J
Jason Wang 已提交
1150 1151
}

1152
static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1153 1154 1155 1156 1157
                                   struct virtio_pci_cap *cap)
{
    PCIDevice *dev = &proxy->pci_dev;
    int offset;

1158 1159
    offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0,
                                cap->cap_len, &error_abort);
1160 1161 1162 1163

    assert(cap->cap_len >= sizeof *cap);
    memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
           cap->cap_len - PCI_CAP_FLAGS);
1164 1165

    return offset;
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
}

static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
                                       unsigned size)
{
    VirtIOPCIProxy *proxy = opaque;
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    uint32_t val = 0;
    int i;

    switch (addr) {
    case VIRTIO_PCI_COMMON_DFSELECT:
        val = proxy->dfselect;
        break;
    case VIRTIO_PCI_COMMON_DF:
        if (proxy->dfselect <= 1) {
1182 1183 1184
            VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);

            val = (vdev->host_features & ~vdc->legacy_features) >>
1185
                (32 * proxy->dfselect);
1186 1187 1188 1189 1190 1191
        }
        break;
    case VIRTIO_PCI_COMMON_GFSELECT:
        val = proxy->gfselect;
        break;
    case VIRTIO_PCI_COMMON_GF:
G
Gonglei 已提交
1192
        if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
            val = proxy->guest_features[proxy->gfselect];
        }
        break;
    case VIRTIO_PCI_COMMON_MSIX:
        val = vdev->config_vector;
        break;
    case VIRTIO_PCI_COMMON_NUMQ:
        for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
            if (virtio_queue_get_num(vdev, i)) {
                val = i + 1;
            }
        }
        break;
    case VIRTIO_PCI_COMMON_STATUS:
        val = vdev->status;
        break;
    case VIRTIO_PCI_COMMON_CFGGENERATION:
1210
        val = vdev->generation;
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
        break;
    case VIRTIO_PCI_COMMON_Q_SELECT:
        val = vdev->queue_sel;
        break;
    case VIRTIO_PCI_COMMON_Q_SIZE:
        val = virtio_queue_get_num(vdev, vdev->queue_sel);
        break;
    case VIRTIO_PCI_COMMON_Q_MSIX:
        val = virtio_queue_vector(vdev, vdev->queue_sel);
        break;
    case VIRTIO_PCI_COMMON_Q_ENABLE:
        val = proxy->vqs[vdev->queue_sel].enabled;
        break;
    case VIRTIO_PCI_COMMON_Q_NOFF:
        /* Simply map queues in order */
        val = vdev->queue_sel;
        break;
    case VIRTIO_PCI_COMMON_Q_DESCLO:
        val = proxy->vqs[vdev->queue_sel].desc[0];
        break;
    case VIRTIO_PCI_COMMON_Q_DESCHI:
        val = proxy->vqs[vdev->queue_sel].desc[1];
        break;
    case VIRTIO_PCI_COMMON_Q_AVAILLO:
        val = proxy->vqs[vdev->queue_sel].avail[0];
        break;
    case VIRTIO_PCI_COMMON_Q_AVAILHI:
        val = proxy->vqs[vdev->queue_sel].avail[1];
        break;
    case VIRTIO_PCI_COMMON_Q_USEDLO:
        val = proxy->vqs[vdev->queue_sel].used[0];
        break;
    case VIRTIO_PCI_COMMON_Q_USEDHI:
        val = proxy->vqs[vdev->queue_sel].used[1];
        break;
    default:
        val = 0;
    }

    return val;
}

static void virtio_pci_common_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
{
    VirtIOPCIProxy *proxy = opaque;
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

    switch (addr) {
    case VIRTIO_PCI_COMMON_DFSELECT:
        proxy->dfselect = val;
        break;
    case VIRTIO_PCI_COMMON_GFSELECT:
        proxy->gfselect = val;
        break;
    case VIRTIO_PCI_COMMON_GF:
G
Gonglei 已提交
1267
        if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
            proxy->guest_features[proxy->gfselect] = val;
            virtio_set_features(vdev,
                                (((uint64_t)proxy->guest_features[1]) << 32) |
                                proxy->guest_features[0]);
        }
        break;
    case VIRTIO_PCI_COMMON_MSIX:
        msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0) {
            val = VIRTIO_NO_VECTOR;
        }
        vdev->config_vector = val;
        break;
    case VIRTIO_PCI_COMMON_STATUS:
        if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
            virtio_pci_stop_ioeventfd(proxy);
        }

        virtio_set_status(vdev, val & 0xFF);

        if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
            virtio_pci_start_ioeventfd(proxy);
        }

        if (vdev->status == 0) {
1294
            virtio_pci_reset(DEVICE(proxy));
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
        }

        break;
    case VIRTIO_PCI_COMMON_Q_SELECT:
        if (val < VIRTIO_QUEUE_MAX) {
            vdev->queue_sel = val;
        }
        break;
    case VIRTIO_PCI_COMMON_Q_SIZE:
        proxy->vqs[vdev->queue_sel].num = val;
        break;
    case VIRTIO_PCI_COMMON_Q_MSIX:
        msix_vector_unuse(&proxy->pci_dev,
                          virtio_queue_vector(vdev, vdev->queue_sel));
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0) {
            val = VIRTIO_NO_VECTOR;
        }
        virtio_queue_set_vector(vdev, vdev->queue_sel, val);
        break;
    case VIRTIO_PCI_COMMON_Q_ENABLE:
        virtio_queue_set_num(vdev, vdev->queue_sel,
                             proxy->vqs[vdev->queue_sel].num);
        virtio_queue_set_rings(vdev, vdev->queue_sel,
                       ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
                       proxy->vqs[vdev->queue_sel].desc[0],
                       ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
                       proxy->vqs[vdev->queue_sel].avail[0],
                       ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
                       proxy->vqs[vdev->queue_sel].used[0]);
J
Jason Wang 已提交
1325
        proxy->vqs[vdev->queue_sel].enabled = 1;
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
        break;
    case VIRTIO_PCI_COMMON_Q_DESCLO:
        proxy->vqs[vdev->queue_sel].desc[0] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_DESCHI:
        proxy->vqs[vdev->queue_sel].desc[1] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_AVAILLO:
        proxy->vqs[vdev->queue_sel].avail[0] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_AVAILHI:
        proxy->vqs[vdev->queue_sel].avail[1] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_USEDLO:
        proxy->vqs[vdev->queue_sel].used[0] = val;
        break;
    case VIRTIO_PCI_COMMON_Q_USEDHI:
        proxy->vqs[vdev->queue_sel].used[1] = val;
        break;
    default:
        break;
    }
}


static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
                                       unsigned size)
{
    return 0;
}

static void virtio_pci_notify_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
{
    VirtIODevice *vdev = opaque;
1361 1362
    VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent);
    unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
1363 1364 1365 1366 1367 1368

    if (queue < VIRTIO_QUEUE_MAX) {
        virtio_queue_notify(vdev, queue);
    }
}

1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
                                        uint64_t val, unsigned size)
{
    VirtIODevice *vdev = opaque;
    unsigned queue = val;

    if (queue < VIRTIO_QUEUE_MAX) {
        virtio_queue_notify(vdev, queue);
    }
}

1380 1381 1382 1383 1384
static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
                                    unsigned size)
{
    VirtIOPCIProxy *proxy = opaque;
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
P
Paolo Bonzini 已提交
1385
    uint64_t val = atomic_xchg(&vdev->isr, 0);
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
    pci_irq_deassert(&proxy->pci_dev);

    return val;
}

static void virtio_pci_isr_write(void *opaque, hwaddr addr,
                                 uint64_t val, unsigned size)
{
}

static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
                                       unsigned size)
{
    VirtIODevice *vdev = opaque;
    uint64_t val = 0;

    switch (size) {
    case 1:
1404
        val = virtio_config_modern_readb(vdev, addr);
1405 1406
        break;
    case 2:
1407
        val = virtio_config_modern_readw(vdev, addr);
1408 1409
        break;
    case 4:
1410
        val = virtio_config_modern_readl(vdev, addr);
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
        break;
    }
    return val;
}

static void virtio_pci_device_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
{
    VirtIODevice *vdev = opaque;
    switch (size) {
    case 1:
1422
        virtio_config_modern_writeb(vdev, addr, val);
1423 1424
        break;
    case 2:
1425
        virtio_config_modern_writew(vdev, addr, val);
1426 1427
        break;
    case 4:
1428
        virtio_config_modern_writel(vdev, addr, val);
1429 1430 1431 1432
        break;
    }
}

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
{
    static const MemoryRegionOps common_ops = {
        .read = virtio_pci_common_read,
        .write = virtio_pci_common_write,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };
    static const MemoryRegionOps isr_ops = {
        .read = virtio_pci_isr_read,
        .write = virtio_pci_isr_write,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };
    static const MemoryRegionOps device_ops = {
        .read = virtio_pci_device_read,
        .write = virtio_pci_device_write,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };
    static const MemoryRegionOps notify_ops = {
        .read = virtio_pci_notify_read,
        .write = virtio_pci_notify_write,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
    static const MemoryRegionOps notify_pio_ops = {
        .read = virtio_pci_notify_read,
        .write = virtio_pci_notify_write_pio,
        .impl = {
            .min_access_size = 1,
            .max_access_size = 4,
        },
        .endianness = DEVICE_LITTLE_ENDIAN,
    };

1481 1482 1483 1484

    memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
                          &common_ops,
                          proxy,
1485 1486
                          "virtio-pci-common",
                          proxy->common.size);
1487

1488 1489 1490
    memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
                          &isr_ops,
                          proxy,
1491 1492
                          "virtio-pci-isr",
                          proxy->isr.size);
1493

1494 1495 1496
    memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
                          &device_ops,
                          virtio_bus_get_device(&proxy->bus),
1497 1498
                          "virtio-pci-device",
                          proxy->device.size);
1499

1500 1501 1502 1503
    memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
                          &notify_ops,
                          virtio_bus_get_device(&proxy->bus),
                          "virtio-pci-notify",
1504
                          proxy->notify.size);
1505 1506 1507 1508 1509

    memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
                          &notify_pio_ops,
                          virtio_bus_get_device(&proxy->bus),
                          "virtio-pci-notify-pio",
1510
                          proxy->notify_pio.size);
1511 1512 1513
}

static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1514
                                         VirtIOPCIRegion *region,
1515 1516 1517
                                         struct virtio_pci_cap *cap,
                                         MemoryRegion *mr,
                                         uint8_t bar)
1518
{
1519
    memory_region_add_subregion(mr, region->offset, &region->mr);
1520

1521
    cap->cfg_type = region->type;
1522
    cap->bar = bar;
1523
    cap->offset = cpu_to_le32(region->offset);
1524
    cap->length = cpu_to_le32(region->size);
1525
    virtio_pci_add_mem_cap(proxy, cap);
1526 1527 1528 1529 1530 1531 1532 1533

}

static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
                                             VirtIOPCIRegion *region,
                                             struct virtio_pci_cap *cap)
{
    virtio_pci_modern_region_map(proxy, region, cap,
1534
                                 &proxy->modern_bar, proxy->modern_mem_bar_idx);
1535
}
1536

1537 1538 1539 1540 1541
static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
                                            VirtIOPCIRegion *region,
                                            struct virtio_pci_cap *cap)
{
    virtio_pci_modern_region_map(proxy, region, cap,
1542
                                 &proxy->io_bar, proxy->modern_io_bar_idx);
1543 1544 1545 1546
}

static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
                                               VirtIOPCIRegion *region)
1547 1548 1549 1550 1551
{
    memory_region_del_subregion(&proxy->modern_bar,
                                &region->mr);
}

1552 1553 1554 1555 1556 1557 1558
static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
                                              VirtIOPCIRegion *region)
{
    memory_region_del_subregion(&proxy->io_bar,
                                &region->mr);
}

1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);

    if (virtio_pci_modern(proxy)) {
        virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
    }

    virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
}

1571
/* This is called by virtio-bus just after the device is plugged. */
J
Jason Wang 已提交
1572
static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1573 1574 1575
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
    VirtioBusState *bus = &proxy->bus;
1576
    bool legacy = virtio_pci_legacy(proxy);
1577
    bool modern;
1578
    bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1579 1580
    uint8_t *config;
    uint32_t size;
C
Cornelia Huck 已提交
1581
    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1582

1583 1584 1585 1586
    /*
     * Virtio capabilities present without
     * VIRTIO_F_VERSION_1 confuses guests
     */
1587 1588
    if (!proxy->ignore_backend_features &&
            !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
        virtio_pci_disable_modern(proxy);

        if (!legacy) {
            error_setg(errp, "Device doesn't support modern mode, and legacy"
                             " mode is disabled");
            error_append_hint(errp, "Set disable-legacy to off\n");

            return;
        }
    }

    modern = virtio_pci_modern(proxy);

1602 1603 1604 1605
    config = proxy->pci_dev.config;
    if (proxy->class_code) {
        pci_config_set_class(config, proxy->class_code);
    }
1606 1607

    if (legacy) {
J
Jason Wang 已提交
1608 1609
        if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
            error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1610
                       " neither legacy nor transitional device");
J
Jason Wang 已提交
1611 1612
            return ;
        }
1613 1614 1615 1616 1617
        /*
         * Legacy and transitional devices use specific subsystem IDs.
         * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
         * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
         */
1618 1619 1620 1621 1622 1623 1624 1625 1626
        pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
    } else {
        /* pure virtio-1.0 */
        pci_set_word(config + PCI_VENDOR_ID,
                     PCI_VENDOR_ID_REDHAT_QUMRANET);
        pci_set_word(config + PCI_DEVICE_ID,
                     0x1040 + virtio_bus_get_vdev_id(bus));
        pci_config_set_revision(config, 1);
    }
1627 1628
    config[PCI_INTERRUPT_PIN] = 1;

1629

1630
    if (modern) {
1631 1632
        struct virtio_pci_cap cap = {
            .cap_len = sizeof cap,
1633 1634 1635 1636
        };
        struct virtio_pci_notify_cap notify = {
            .cap.cap_len = sizeof notify,
            .notify_off_multiplier =
1637
                cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
1638
        };
1639 1640 1641 1642
        struct virtio_pci_cfg_cap cfg = {
            .cap.cap_len = sizeof cfg,
            .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
        };
1643 1644 1645 1646
        struct virtio_pci_notify_cap notify_pio = {
            .cap.cap_len = sizeof notify,
            .notify_off_multiplier = cpu_to_le32(0x0),
        };
1647

1648
        struct virtio_pci_cfg_cap *cfg_mask;
1649

1650
        virtio_pci_modern_regions_init(proxy);
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660

        virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
        virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
        virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
        virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);

        if (modern_pio) {
            memory_region_init(&proxy->io_bar, OBJECT(proxy),
                               "virtio-pci-io", 0x4);

1661
            pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
1662 1663 1664 1665 1666
                             PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);

            virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
                                            &notify_pio.cap);
        }
1667

1668
        pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
1669 1670 1671
                         PCI_BASE_ADDRESS_SPACE_MEMORY |
                         PCI_BASE_ADDRESS_MEM_PREFETCH |
                         PCI_BASE_ADDRESS_MEM_TYPE_64,
1672
                         &proxy->modern_bar);
1673 1674 1675 1676 1677 1678 1679

        proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
        cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
        pci_set_byte(&cfg_mask->cap.bar, ~0x0);
        pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
        pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
        pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1680 1681
    }

1682 1683
    if (proxy->nvectors) {
        int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1684
                                          proxy->msix_bar_idx, NULL);
1685
        if (err) {
1686
            /* Notice when a system that supports MSIx can't initialize it */
1687
            if (err != -ENOTSUP) {
1688 1689
                warn_report("unable to init msix vectors to %" PRIu32,
                            proxy->nvectors);
1690 1691 1692
            }
            proxy->nvectors = 0;
        }
1693 1694 1695
    }

    proxy->pci_dev.config_write = virtio_write_config;
1696
    proxy->pci_dev.config_read = virtio_read_config;
1697

1698 1699 1700
    if (legacy) {
        size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
            + virtio_bus_get_vdev_config_len(bus);
1701
        size = pow2ceil(size);
1702

1703 1704 1705
        memory_region_init_io(&proxy->bar, OBJECT(proxy),
                              &virtio_pci_config_ops,
                              proxy, "virtio-pci", size);
1706

1707
        pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
1708
                         PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1709
    }
1710 1711
}

1712 1713 1714
static void virtio_pci_device_unplugged(DeviceState *d)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1715
    bool modern = virtio_pci_modern(proxy);
1716
    bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1717 1718

    virtio_pci_stop_ioeventfd(proxy);
1719 1720

    if (modern) {
1721 1722 1723 1724 1725 1726 1727
        virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
        virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
        virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
        virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
        if (modern_pio) {
            virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
        }
1728
    }
1729 1730
}

1731
static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1732
{
1733
    VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1734
    VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1735 1736
    bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
                     !pci_bus_is_root(pci_get_bus(pci_dev));
1737

1738
    if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
1739 1740 1741
        proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
    }

1742 1743 1744 1745 1746 1747 1748 1749 1750
    /*
     * virtio pci bar layout used by default.
     * subclasses can re-arrange things if needed.
     *
     *   region 0   --  virtio legacy io bar
     *   region 1   --  msi-x bar
     *   region 4+5 --  virtio modern memory (64bit) bar
     *
     */
1751 1752 1753 1754
    proxy->legacy_io_bar_idx  = 0;
    proxy->msix_bar_idx       = 1;
    proxy->modern_io_bar_idx  = 2;
    proxy->modern_mem_bar_idx = 4;
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768

    proxy->common.offset = 0x0;
    proxy->common.size = 0x1000;
    proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;

    proxy->isr.offset = 0x1000;
    proxy->isr.size = 0x1000;
    proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;

    proxy->device.offset = 0x2000;
    proxy->device.size = 0x1000;
    proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;

    proxy->notify.offset = 0x3000;
1769
    proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
1770 1771
    proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;

1772 1773 1774 1775
    proxy->notify_pio.offset = 0x0;
    proxy->notify_pio.size = 0x4;
    proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;

1776 1777
    /* subclasses can enforce modern, so do this unconditionally */
    memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1778 1779
                       /* PCI BAR regions must be powers of 2 */
                       pow2ceil(proxy->notify.offset + proxy->notify.size));
1780

1781 1782 1783 1784
    if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
        proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
    }

1785
    if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
1786 1787 1788 1789 1790 1791 1792
        error_setg(errp, "device cannot work as neither modern nor legacy mode"
                   " is enabled");
        error_append_hint(errp, "Set either disable-modern or disable-legacy"
                          " to off\n");
        return;
    }

1793
    if (pcie_port && pci_is_express(pci_dev)) {
1794 1795 1796 1797 1798
        int pos;

        pos = pcie_endpoint_cap_init(pci_dev, 0);
        assert(pos > 0);

1799 1800 1801 1802 1803 1804
        pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
                                 PCI_PM_SIZEOF, errp);
        if (pos < 0) {
            return;
        }

1805
        pci_dev->exp.pm_cap = pos;
1806 1807 1808 1809 1810 1811

        /*
         * Indicates that this function complies with revision 1.2 of the
         * PCI Power Management Interface Specification.
         */
        pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1812

1813 1814 1815 1816 1817
        if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
            /* Init error enabling flags */
            pcie_cap_deverr_init(pci_dev);
        }

1818 1819 1820 1821 1822
        if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) {
            /* Init Link Control Register */
            pcie_cap_lnkctl_init(pci_dev);
        }

1823 1824 1825 1826 1827 1828
        if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
            /* Init Power Management Control Register */
            pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL,
                         PCI_PM_CTRL_STATE_MASK);
        }

1829 1830 1831 1832
        if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
            pcie_ats_init(pci_dev, 256);
        }

1833 1834 1835 1836 1837 1838
    } else {
        /*
         * make future invocations of pci_is_express() return false
         * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
         */
        pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1839 1840
    }

1841
    virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1842
    if (k->realize) {
1843
        k->realize(proxy, errp);
1844 1845 1846 1847 1848
    }
}

static void virtio_pci_exit(PCIDevice *pci_dev)
{
1849
    msix_uninit_exclusive_bar(pci_dev);
1850 1851
}

1852
static void virtio_pci_reset(DeviceState *qdev)
1853 1854 1855
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
    VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1856
    PCIDevice *dev = PCI_DEVICE(qdev);
J
Jason Wang 已提交
1857 1858
    int i;

1859 1860 1861
    virtio_pci_stop_ioeventfd(proxy);
    virtio_bus_reset(bus);
    msix_unuse_all_vectors(&proxy->pci_dev);
J
Jason Wang 已提交
1862 1863 1864

    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
        proxy->vqs[i].enabled = 0;
1865 1866 1867 1868
        proxy->vqs[i].num = 0;
        proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
        proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
        proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
J
Jason Wang 已提交
1869
    }
1870 1871 1872

    if (pci_is_express(dev)) {
        pcie_cap_deverr_reset(dev);
1873
        pcie_cap_lnkctl_reset(dev);
1874 1875

        pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0);
1876
    }
1877 1878
}

1879
static Property virtio_pci_properties[] = {
1880 1881
    DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1882 1883
    DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1884 1885
    DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1886 1887
    DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1888 1889
    DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
1890 1891
    DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
                     ignore_backend_features, false),
1892 1893
    DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_ATS_BIT, false),
1894 1895
    DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true),
1896 1897
    DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true),
1898 1899
    DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_INIT_PM_BIT, true),
1900 1901 1902
    DEFINE_PROP_END_OF_LIST(),
};

1903 1904 1905 1906 1907 1908 1909
static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
{
    VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
    VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
    PCIDevice *pci_dev = &proxy->pci_dev;

    if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1910
        virtio_pci_modern(proxy)) {
1911 1912 1913 1914 1915 1916
        pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
    }

    vpciklass->parent_dc_realize(qdev, errp);
}

1917 1918 1919 1920
static void virtio_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1921
    VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1922

1923
    dc->props = virtio_pci_properties;
1924
    k->realize = virtio_pci_realize;
1925 1926 1927 1928
    k->exit = virtio_pci_exit;
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = PCI_CLASS_OTHERS;
1929 1930
    device_class_set_parent_realize(dc, virtio_pci_dc_realize,
                                    &vpciklass->parent_dc_realize);
1931
    dc->reset = virtio_pci_reset;
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
}

static const TypeInfo virtio_pci_info = {
    .name          = TYPE_VIRTIO_PCI,
    .parent        = TYPE_PCI_DEVICE,
    .instance_size = sizeof(VirtIOPCIProxy),
    .class_init    = virtio_pci_class_init,
    .class_size    = sizeof(VirtioPCIClass),
    .abstract      = true,
};

1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
static Property virtio_pci_generic_properties[] = {
    DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
                            ON_OFF_AUTO_AUTO),
    DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
{
    const VirtioPCIDeviceTypeInfo *t = data;
    if (t->class_init) {
        t->class_init(klass, NULL);
    }
}

static void virtio_pci_generic_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);

    dc->props = virtio_pci_generic_properties;
}

/* Used when the generic type and the base type is the same */
static void virtio_pci_generic_base_class_init(ObjectClass *klass, void *data)
{
    virtio_pci_base_class_init(klass, data);
    virtio_pci_generic_class_init(klass, NULL);
}

static void virtio_pci_transitional_instance_init(Object *obj)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);

    proxy->disable_legacy = ON_OFF_AUTO_OFF;
    proxy->disable_modern = false;
}

static void virtio_pci_non_transitional_instance_init(Object *obj)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);

    proxy->disable_legacy = ON_OFF_AUTO_ON;
    proxy->disable_modern = false;
}

void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
{
    TypeInfo base_type_info = {
        .name          = t->base_name,
        .parent        = t->parent ? t->parent : TYPE_VIRTIO_PCI,
        .instance_size = t->instance_size,
        .instance_init = t->instance_init,
        .class_init    = virtio_pci_base_class_init,
        .class_data    = (void *)t,
        .abstract      = true,
    };
    TypeInfo generic_type_info = {
        .name = t->generic_name,
        .parent = base_type_info.name,
        .class_init = virtio_pci_generic_class_init,
        .interfaces = (InterfaceInfo[]) {
            { INTERFACE_PCIE_DEVICE },
            { INTERFACE_CONVENTIONAL_PCI_DEVICE },
            { }
        },
    };

    if (!base_type_info.name) {
        /* No base type -> register a single generic device type */
        base_type_info.name = t->generic_name;
        base_type_info.class_init = virtio_pci_generic_base_class_init;
        base_type_info.interfaces = generic_type_info.interfaces;
        base_type_info.abstract = false;
        generic_type_info.name = NULL;
        assert(!t->non_transitional_name);
        assert(!t->transitional_name);
    }

    type_register(&base_type_info);
    if (generic_type_info.name) {
        type_register(&generic_type_info);
    }

    if (t->non_transitional_name) {
        const TypeInfo non_transitional_type_info = {
            .name          = t->non_transitional_name,
            .parent        = base_type_info.name,
            .instance_init = virtio_pci_non_transitional_instance_init,
            .interfaces = (InterfaceInfo[]) {
                { INTERFACE_PCIE_DEVICE },
                { INTERFACE_CONVENTIONAL_PCI_DEVICE },
                { }
            },
        };
        type_register(&non_transitional_type_info);
    }

    if (t->transitional_name) {
        const TypeInfo transitional_type_info = {
            .name          = t->transitional_name,
            .parent        = base_type_info.name,
            .instance_init = virtio_pci_transitional_instance_init,
            .interfaces = (InterfaceInfo[]) {
                /*
                 * Transitional virtio devices work only as Conventional PCI
                 * devices because they require PIO ports.
                 */
                { INTERFACE_CONVENTIONAL_PCI_DEVICE },
                { }
            },
        };
        type_register(&transitional_type_info);
    }
}

2058 2059 2060
/* virtio-blk-pci */

static Property virtio_blk_pci_properties[] = {
2061
    DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2062 2063
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2064 2065
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                       DEV_NVECTORS_UNSPECIFIED),
2066 2067 2068
    DEFINE_PROP_END_OF_LIST(),
};

2069
static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2070 2071 2072
{
    VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
2073

2074 2075 2076 2077
    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
        vpci_dev->nvectors = dev->vdev.conf.num_queues + 1;
    }

2078
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2079
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2080 2081 2082 2083 2084 2085 2086 2087
}

static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

2088
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2089
    dc->props = virtio_blk_pci_properties;
2090
    k->realize = virtio_blk_pci_realize;
2091 2092 2093 2094 2095 2096 2097 2098 2099
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void virtio_blk_pci_instance_init(Object *obj)
{
    VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
2100 2101 2102

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_BLK);
2103 2104
    object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
                              "bootindex", &error_abort);
2105 2106
}

2107
static const VirtioPCIDeviceTypeInfo virtio_blk_pci_info = {
2108 2109 2110 2111
    .base_name              = TYPE_VIRTIO_BLK_PCI,
    .generic_name           = "virtio-blk-pci",
    .transitional_name      = "virtio-blk-pci-transitional",
    .non_transitional_name  = "virtio-blk-pci-non-transitional",
2112 2113 2114 2115 2116
    .instance_size = sizeof(VirtIOBlkPCI),
    .instance_init = virtio_blk_pci_instance_init,
    .class_init    = virtio_blk_pci_class_init,
};

2117 2118 2119 2120 2121
#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
/* vhost-user-blk */

static Property vhost_user_blk_pci_properties[] = {
    DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2122 2123
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                       DEV_NVECTORS_UNSPECIFIED),
2124 2125 2126 2127 2128 2129 2130 2131
    DEFINE_PROP_END_OF_LIST(),
};

static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
    VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);

2132 2133 2134 2135
    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
        vpci_dev->nvectors = dev->vdev.num_queues + 1;
    }

2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
}

static void vhost_user_blk_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
    dc->props = vhost_user_blk_pci_properties;
    k->realize = vhost_user_blk_pci_realize;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void vhost_user_blk_pci_instance_init(Object *obj)
{
    VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(obj);

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VHOST_USER_BLK);
    object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
                              "bootindex", &error_abort);
}

2165
static const VirtioPCIDeviceTypeInfo vhost_user_blk_pci_info = {
2166 2167 2168 2169
    .base_name               = TYPE_VHOST_USER_BLK_PCI,
    .generic_name            = "vhost-user-blk-pci",
    .transitional_name       = "vhost-user-blk-pci-transitional",
    .non_transitional_name   = "vhost-user-blk-pci-non-transitional",
2170 2171 2172 2173 2174 2175
    .instance_size  = sizeof(VHostUserBlkPCI),
    .instance_init  = vhost_user_blk_pci_instance_init,
    .class_init     = vhost_user_blk_pci_class_init,
};
#endif

2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
/* virtio-scsi-pci */

static Property virtio_scsi_pci_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                       DEV_NVECTORS_UNSPECIFIED),
    DEFINE_PROP_END_OF_LIST(),
};

2186
static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2187 2188 2189
{
    VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
2190
    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2191 2192
    DeviceState *proxy = DEVICE(vpci_dev);
    char *bus_name;
2193 2194

    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2195
        vpci_dev->nvectors = vs->conf.num_queues + 3;
2196 2197
    }

2198 2199 2200 2201 2202 2203 2204 2205 2206 2207
    /*
     * For command line compatibility, this sets the virtio-scsi-device bus
     * name as before.
     */
    if (proxy->id) {
        bus_name = g_strdup_printf("%s.0", proxy->id);
        virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
        g_free(bus_name);
    }

2208
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2209
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2210 2211 2212 2213 2214 2215 2216
}

static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2217 2218

    k->realize = virtio_scsi_pci_realize;
2219
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
    dc->props = virtio_scsi_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
    pcidev_k->revision = 0x00;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void virtio_scsi_pci_instance_init(Object *obj)
{
    VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
2230 2231 2232

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_SCSI);
2233 2234
}

2235
static const VirtioPCIDeviceTypeInfo virtio_scsi_pci_info = {
2236 2237 2238 2239
    .base_name              = TYPE_VIRTIO_SCSI_PCI,
    .generic_name           = "virtio-scsi-pci",
    .transitional_name      = "virtio-scsi-pci-transitional",
    .non_transitional_name  = "virtio-scsi-pci-non-transitional",
2240 2241 2242 2243 2244
    .instance_size = sizeof(VirtIOSCSIPCI),
    .instance_init = virtio_scsi_pci_instance_init,
    .class_init    = virtio_scsi_pci_class_init,
};

2245 2246 2247 2248 2249 2250 2251 2252 2253
/* vhost-scsi-pci */

#ifdef CONFIG_VHOST_SCSI
static Property vhost_scsi_pci_properties[] = {
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                       DEV_NVECTORS_UNSPECIFIED),
    DEFINE_PROP_END_OF_LIST(),
};

2254
static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
{
    VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);

    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
        vpci_dev->nvectors = vs->conf.num_queues + 3;
    }

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2265
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2266 2267 2268 2269 2270 2271 2272
}

static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2273
    k->realize = vhost_scsi_pci_realize;
2274
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
    dc->props = vhost_scsi_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
    pcidev_k->revision = 0x00;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void vhost_scsi_pci_instance_init(Object *obj)
{
    VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
2285 2286 2287

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VHOST_SCSI);
G
Gonglei 已提交
2288 2289
    object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
                              "bootindex", &error_abort);
2290 2291
}

2292
static const VirtioPCIDeviceTypeInfo vhost_scsi_pci_info = {
2293 2294 2295 2296
    .base_name             = TYPE_VHOST_SCSI_PCI,
    .generic_name          = "vhost-scsi-pci",
    .transitional_name     = "vhost-scsi-pci-transitional",
    .non_transitional_name = "vhost-scsi-pci-non-transitional",
2297 2298 2299 2300 2301 2302
    .instance_size = sizeof(VHostSCSIPCI),
    .instance_init = vhost_scsi_pci_instance_init,
    .class_init    = vhost_scsi_pci_class_init,
};
#endif

2303
#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
/* vhost-user-scsi-pci */
static Property vhost_user_scsi_pci_properties[] = {
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                       DEV_NVECTORS_UNSPECIFIED),
    DEFINE_PROP_END_OF_LIST(),
};

static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
    VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);

    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
        vpci_dev->nvectors = vs->conf.num_queues + 3;
    }

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
}

static void vhost_user_scsi_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
    k->realize = vhost_user_scsi_pci_realize;
    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
    dc->props = vhost_user_scsi_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
    pcidev_k->revision = 0x00;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void vhost_user_scsi_pci_instance_init(Object *obj)
{
    VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(obj);

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VHOST_USER_SCSI);
    object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
                              "bootindex", &error_abort);
}

2349
static const VirtioPCIDeviceTypeInfo vhost_user_scsi_pci_info = {
2350 2351 2352 2353
    .base_name             = TYPE_VHOST_USER_SCSI_PCI,
    .generic_name          = "vhost-user-scsi-pci",
    .transitional_name     = "vhost-user-scsi-pci-transitional",
    .non_transitional_name = "vhost-user-scsi-pci-non-transitional",
2354 2355 2356 2357 2358 2359
    .instance_size = sizeof(VHostUserSCSIPCI),
    .instance_init = vhost_user_scsi_pci_instance_init,
    .class_init    = vhost_user_scsi_pci_class_init,
};
#endif

2360 2361 2362
/* virtio-balloon-pci */

static Property virtio_balloon_pci_properties[] = {
2363
    DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2364 2365 2366
    DEFINE_PROP_END_OF_LIST(),
};

2367
static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
{
    VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);

    if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
        vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
        vpci_dev->class_code = PCI_CLASS_OTHERS;
    }

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2378
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2379 2380 2381 2382 2383 2384 2385
}

static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2386
    k->realize = virtio_balloon_pci_realize;
2387
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
    dc->props = virtio_balloon_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_OTHERS;
}

static void virtio_balloon_pci_instance_init(Object *obj)
{
    VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
2398

2399 2400
    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_BALLOON);
2401 2402 2403 2404 2405
    object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
                                  "guest-stats", &error_abort);
    object_property_add_alias(obj, "guest-stats-polling-interval",
                              OBJECT(&dev->vdev),
                              "guest-stats-polling-interval", &error_abort);
2406 2407
}

2408
static const VirtioPCIDeviceTypeInfo virtio_balloon_pci_info = {
2409 2410 2411 2412
    .base_name             = TYPE_VIRTIO_BALLOON_PCI,
    .generic_name          = "virtio-balloon-pci",
    .transitional_name     = "virtio-balloon-pci-transitional",
    .non_transitional_name = "virtio-balloon-pci-non-transitional",
2413 2414 2415 2416 2417
    .instance_size = sizeof(VirtIOBalloonPCI),
    .instance_init = virtio_balloon_pci_instance_init,
    .class_init    = virtio_balloon_pci_class_init,
};

2418 2419
/* virtio-serial-pci */

2420
static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2421 2422 2423
{
    VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
2424 2425
    DeviceState *proxy = DEVICE(vpci_dev);
    char *bus_name;
2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438

    if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
        vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
        vpci_dev->class_code != PCI_CLASS_OTHERS) {        /* qemu-kvm  */
            vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
    }

    /* backwards-compatibility with machines that were created with
       DEV_NVECTORS_UNSPECIFIED */
    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
        vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
    }

2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
    /*
     * For command line compatibility, this sets the virtio-serial-device bus
     * name as before.
     */
    if (proxy->id) {
        bus_name = g_strdup_printf("%s.0", proxy->id);
        virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
        g_free(bus_name);
    }

2449
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2450
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2451 2452 2453 2454 2455 2456
}

static Property virtio_serial_pci_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2457
    DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2458 2459 2460 2461 2462 2463 2464 2465
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2466
    k->realize = virtio_serial_pci_realize;
2467
    set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
    dc->props = virtio_serial_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
}

static void virtio_serial_pci_instance_init(Object *obj)
{
    VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
2478 2479 2480

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_SERIAL);
2481 2482
}

2483
static const VirtioPCIDeviceTypeInfo virtio_serial_pci_info = {
2484 2485 2486 2487
    .base_name             = TYPE_VIRTIO_SERIAL_PCI,
    .generic_name          = "virtio-serial-pci",
    .transitional_name     = "virtio-serial-pci-transitional",
    .non_transitional_name = "virtio-serial-pci-non-transitional",
2488 2489 2490 2491 2492
    .instance_size = sizeof(VirtIOSerialPCI),
    .instance_init = virtio_serial_pci_instance_init,
    .class_init    = virtio_serial_pci_class_init,
};

2493 2494 2495 2496
/* virtio-net-pci */

static Property virtio_net_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2497
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2498 2499 2500 2501
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
    DEFINE_PROP_END_OF_LIST(),
};

2502
static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2503
{
2504
    DeviceState *qdev = DEVICE(vpci_dev);
2505 2506 2507
    VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);

2508 2509
    virtio_net_set_netclient_name(&dev->vdev, qdev->id,
                                  object_get_typename(OBJECT(qdev)));
2510
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2511
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524
}

static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
    VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);

    k->romfile = "efi-virtio.rom";
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = PCI_CLASS_NETWORK_ETHERNET;
2525
    set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2526
    dc->props = virtio_net_properties;
2527
    vpciklass->realize = virtio_net_pci_realize;
2528 2529 2530 2531 2532
}

static void virtio_net_pci_instance_init(Object *obj)
{
    VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
2533 2534 2535

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_NET);
2536 2537
    object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
                              "bootindex", &error_abort);
2538 2539
}

2540
static const VirtioPCIDeviceTypeInfo virtio_net_pci_info = {
2541 2542 2543 2544
    .base_name             = TYPE_VIRTIO_NET_PCI,
    .generic_name          = "virtio-net-pci",
    .transitional_name     = "virtio-net-pci-transitional",
    .non_transitional_name = "virtio-net-pci-non-transitional",
2545 2546 2547 2548 2549
    .instance_size = sizeof(VirtIONetPCI),
    .instance_init = virtio_net_pci_instance_init,
    .class_init    = virtio_net_pci_class_init,
};

2550 2551
/* virtio-rng-pci */

2552
static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2553 2554 2555
{
    VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&vrng->vdev);
2556
    Error *err = NULL;
2557 2558

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2559 2560 2561 2562
    object_property_set_bool(OBJECT(vdev), true, "realized", &err);
    if (err) {
        error_propagate(errp, err);
        return;
2563 2564 2565
    }

    object_property_set_link(OBJECT(vrng),
2566
                             OBJECT(vrng->vdev.conf.rng), "rng",
2567 2568 2569 2570 2571 2572 2573 2574 2575
                             NULL);
}

static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

2576
    k->realize = virtio_rng_pci_realize;
2577
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587

    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_OTHERS;
}

static void virtio_rng_initfn(Object *obj)
{
    VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
2588 2589 2590

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_RNG);
2591 2592
}

2593
static const VirtioPCIDeviceTypeInfo virtio_rng_pci_info = {
2594 2595 2596 2597
    .base_name             = TYPE_VIRTIO_RNG_PCI,
    .generic_name          = "virtio-rng-pci",
    .transitional_name     = "virtio-rng-pci-transitional",
    .non_transitional_name = "virtio-rng-pci-non-transitional",
2598 2599 2600 2601 2602
    .instance_size = sizeof(VirtIORngPCI),
    .instance_init = virtio_rng_initfn,
    .class_init    = virtio_rng_pci_class_init,
};

2603 2604
/* virtio-input-pci */

2605
static Property virtio_input_pci_properties[] = {
2606 2607 2608 2609
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
    DEFINE_PROP_END_OF_LIST(),
};

2610 2611 2612 2613 2614 2615
static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
    VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&vinput->vdev);

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2616
    virtio_pci_force_virtio_1(vpci_dev);
2617 2618 2619 2620 2621 2622 2623 2624 2625
    object_property_set_bool(OBJECT(vdev), true, "realized", errp);
}

static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

2626
    dc->props = virtio_input_pci_properties;
2627 2628 2629 2630 2631 2632
    k->realize = virtio_input_pci_realize;
    set_bit(DEVICE_CATEGORY_INPUT, dc->categories);

    pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
}

2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650
static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
{
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

    pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
}

static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
                                                  void *data)
{
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

    pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
}

static void virtio_keyboard_initfn(Object *obj)
{
    VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2651 2652 2653

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_KEYBOARD);
2654 2655 2656 2657 2658
}

static void virtio_mouse_initfn(Object *obj)
{
    VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2659 2660 2661

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_MOUSE);
2662 2663 2664 2665 2666
}

static void virtio_tablet_initfn(Object *obj)
{
    VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2667 2668 2669

    virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                TYPE_VIRTIO_TABLET);
2670 2671
}

2672 2673 2674 2675 2676 2677 2678 2679
static const TypeInfo virtio_input_pci_info = {
    .name          = TYPE_VIRTIO_INPUT_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIOInputPCI),
    .class_init    = virtio_input_pci_class_init,
    .abstract      = true,
};

2680 2681 2682 2683 2684 2685 2686
static const TypeInfo virtio_input_hid_pci_info = {
    .name          = TYPE_VIRTIO_INPUT_HID_PCI,
    .parent        = TYPE_VIRTIO_INPUT_PCI,
    .instance_size = sizeof(VirtIOInputHIDPCI),
    .abstract      = true,
};

2687 2688
static const VirtioPCIDeviceTypeInfo virtio_keyboard_pci_info = {
    .generic_name  = TYPE_VIRTIO_KEYBOARD_PCI,
2689 2690 2691 2692 2693 2694
    .parent        = TYPE_VIRTIO_INPUT_HID_PCI,
    .class_init    = virtio_input_hid_kbd_pci_class_init,
    .instance_size = sizeof(VirtIOInputHIDPCI),
    .instance_init = virtio_keyboard_initfn,
};

2695 2696
static const VirtioPCIDeviceTypeInfo virtio_mouse_pci_info = {
    .generic_name  = TYPE_VIRTIO_MOUSE_PCI,
2697 2698 2699 2700 2701 2702
    .parent        = TYPE_VIRTIO_INPUT_HID_PCI,
    .class_init    = virtio_input_hid_mouse_pci_class_init,
    .instance_size = sizeof(VirtIOInputHIDPCI),
    .instance_init = virtio_mouse_initfn,
};

2703 2704
static const VirtioPCIDeviceTypeInfo virtio_tablet_pci_info = {
    .generic_name  = TYPE_VIRTIO_TABLET_PCI,
2705 2706 2707 2708 2709
    .parent        = TYPE_VIRTIO_INPUT_HID_PCI,
    .instance_size = sizeof(VirtIOInputHIDPCI),
    .instance_init = virtio_tablet_initfn,
};

2710 2711
/* virtio-pci-bus */

2712 2713
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
                               VirtIOPCIProxy *dev)
2714 2715
{
    DeviceState *qdev = DEVICE(dev);
2716 2717
    char virtio_bus_name[] = "virtio-bus";

2718
    qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2719
                        virtio_bus_name);
2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731
}

static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
{
    BusClass *bus_class = BUS_CLASS(klass);
    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
    bus_class->max_dev = 1;
    k->notify = virtio_pci_notify;
    k->save_config = virtio_pci_save_config;
    k->load_config = virtio_pci_load_config;
    k->save_queue = virtio_pci_save_queue;
    k->load_queue = virtio_pci_load_queue;
2732 2733 2734
    k->save_extra_state = virtio_pci_save_extra_state;
    k->load_extra_state = virtio_pci_load_extra_state;
    k->has_extra_state = virtio_pci_has_extra_state;
2735 2736
    k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
    k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2737
    k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr;
2738
    k->vmstate_change = virtio_pci_vmstate_change;
2739
    k->pre_plugged = virtio_pci_pre_plugged;
2740
    k->device_plugged = virtio_pci_device_plugged;
2741
    k->device_unplugged = virtio_pci_device_unplugged;
2742
    k->query_nvectors = virtio_pci_query_nvectors;
2743
    k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
2744
    k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
J
Jason Wang 已提交
2745
    k->get_dma_as = virtio_pci_get_dma_as;
2746 2747 2748 2749 2750 2751 2752 2753 2754
}

static const TypeInfo virtio_pci_bus_info = {
    .name          = TYPE_VIRTIO_PCI_BUS,
    .parent        = TYPE_VIRTIO_BUS,
    .instance_size = sizeof(VirtioPCIBusState),
    .class_init    = virtio_pci_bus_class_init,
};

A
Andreas Färber 已提交
2755
static void virtio_pci_register_types(void)
P
Paul Brook 已提交
2756
{
2757 2758 2759
    /* Base types: */
    type_register_static(&virtio_pci_bus_info);
    type_register_static(&virtio_pci_info);
2760
    type_register_static(&virtio_input_pci_info);
2761
    type_register_static(&virtio_input_hid_pci_info);
2762 2763 2764 2765 2766 2767

    /* Implementations: */
    virtio_pci_types_register(&virtio_rng_pci_info);
    virtio_pci_types_register(&virtio_keyboard_pci_info);
    virtio_pci_types_register(&virtio_mouse_pci_info);
    virtio_pci_types_register(&virtio_tablet_pci_info);
2768
#ifdef CONFIG_VIRTFS
2769
    virtio_pci_types_register(&virtio_9p_pci_info);
2770
#endif
2771
    virtio_pci_types_register(&virtio_blk_pci_info);
2772
#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2773
    virtio_pci_types_register(&vhost_user_blk_pci_info);
2774
#endif
2775 2776 2777 2778
    virtio_pci_types_register(&virtio_scsi_pci_info);
    virtio_pci_types_register(&virtio_balloon_pci_info);
    virtio_pci_types_register(&virtio_serial_pci_info);
    virtio_pci_types_register(&virtio_net_pci_info);
2779
#ifdef CONFIG_VHOST_SCSI
2780
    virtio_pci_types_register(&vhost_scsi_pci_info);
2781
#endif
2782
#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2783
    virtio_pci_types_register(&vhost_user_scsi_pci_info);
2784
#endif
P
Paul Brook 已提交
2785 2786
}

A
Andreas Färber 已提交
2787
type_init(virtio_pci_register_types)