virtio-pci.c 47.2 KB
Newer Older
P
Paul Brook 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Virtio PCI Bindings
 *
 * Copyright IBM, Corp. 2007
 * Copyright (c) 2009 CodeSourcery
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *  Paul Brook        <paul@codesourcery.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
14 15
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
P
Paul Brook 已提交
16 17 18 19
 */

#include <inttypes.h>

20 21 22 23 24 25
#include "hw/virtio.h"
#include "hw/virtio-blk.h"
#include "hw/virtio-net.h"
#include "hw/virtio-serial.h"
#include "hw/virtio-scsi.h"
#include "hw/pci/pci.h"
26
#include "qemu/error-report.h"
27 28 29
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/loader.h"
30 31
#include "sysemu/kvm.h"
#include "sysemu/blockdev.h"
32
#include "hw/virtio-pci.h"
33
#include "qemu/range.h"
34
#include "hw/virtio-bus.h"
P
Paul Brook 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

/* from Linux's linux/virtio_pci.h */

/* A 32-bit r/o bitmask of the features supported by the host */
#define VIRTIO_PCI_HOST_FEATURES        0

/* A 32-bit r/w bitmask of features activated by the guest */
#define VIRTIO_PCI_GUEST_FEATURES       4

/* A 32-bit r/w PFN for the currently selected queue */
#define VIRTIO_PCI_QUEUE_PFN            8

/* A 16-bit r/o queue size for the currently selected queue */
#define VIRTIO_PCI_QUEUE_NUM            12

/* A 16-bit r/w queue selector */
#define VIRTIO_PCI_QUEUE_SEL            14

/* A 16-bit r/w queue notifier */
#define VIRTIO_PCI_QUEUE_NOTIFY         16

/* An 8-bit device status register.  */
#define VIRTIO_PCI_STATUS               18

/* An 8-bit r/o interrupt status register.  Reading the value will return the
 * current contents of the ISR and will also clear it.  This is effectively
 * a read-and-acknowledge. */
#define VIRTIO_PCI_ISR                  19

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/* MSI-X registers: only enabled if MSI-X is enabled. */
/* A 16-bit vector for configuration changes. */
#define VIRTIO_MSI_CONFIG_VECTOR        20
/* A 16-bit vector for selected queue notifications. */
#define VIRTIO_MSI_QUEUE_VECTOR         22

/* Config space size */
#define VIRTIO_PCI_CONFIG_NOMSI         20
#define VIRTIO_PCI_CONFIG_MSI           24
#define VIRTIO_PCI_REGION_SIZE(dev)     (msix_present(dev) ? \
                                         VIRTIO_PCI_CONFIG_MSI : \
                                         VIRTIO_PCI_CONFIG_NOMSI)

/* The remaining space is defined by each driver as the per-driver
 * configuration space */
#define VIRTIO_PCI_CONFIG(dev)          (msix_enabled(dev) ? \
                                         VIRTIO_PCI_CONFIG_MSI : \
                                         VIRTIO_PCI_CONFIG_NOMSI)
P
Paul Brook 已提交
82 83 84 85 86

/* How many bits to shift physical queue address written to QUEUE_PFN.
 * 12 is historical, and due to x86 page size. */
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT    12

87 88
/* Flags track per-device state like workarounds for quirks in older guests. */
#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG  (1 << 0)
89

P
Paul Brook 已提交
90 91 92 93 94 95
/* QEMU doesn't strictly need write barriers since everything runs in
 * lock-step.  We'll leave the calls to wmb() in though to make it obvious for
 * KVM or if kqemu gets SMP support.
 */
#define wmb() do { } while (0)

96 97 98
/* HACK for virtio to determine if it's running a big endian guest */
bool virtio_is_big_endian(void);

P
Paul Brook 已提交
99
/* virtio device */
100 101 102 103 104
/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
{
    return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
}
P
Paul Brook 已提交
105

106 107 108 109
/* DeviceState to VirtIOPCIProxy. Note: used on datapath,
 * be careful and test performance if you change this.
 */
static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
P
Paul Brook 已提交
110
{
111 112 113 114 115 116
    return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
}

static void virtio_pci_notify(DeviceState *d, uint16_t vector)
{
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
117 118 119 120
    if (msix_enabled(&proxy->pci_dev))
        msix_notify(&proxy->pci_dev, vector);
    else
        qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
P
Paul Brook 已提交
121 122
}

123
static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
124
{
125
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
126 127 128 129 130 131
    pci_device_save(&proxy->pci_dev, f);
    msix_save(&proxy->pci_dev, f);
    if (msix_present(&proxy->pci_dev))
        qemu_put_be16(f, proxy->vdev->config_vector);
}

132
static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
133
{
134
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
135 136 137 138
    if (msix_present(&proxy->pci_dev))
        qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
}

139
static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
140
{
141
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
142 143
    int ret;
    ret = pci_device_load(&proxy->pci_dev, f);
144
    if (ret) {
145
        return ret;
146
    }
147
    msix_unuse_all_vectors(&proxy->pci_dev);
148
    msix_load(&proxy->pci_dev, f);
149
    if (msix_present(&proxy->pci_dev)) {
150
        qemu_get_be16s(f, &proxy->vdev->config_vector);
151 152 153 154 155 156
    } else {
        proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
    }
    if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
        return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
    }
157 158 159
    return 0;
}

160
static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
161
{
162
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
163
    uint16_t vector;
164 165 166 167 168
    if (msix_present(&proxy->pci_dev)) {
        qemu_get_be16s(f, &vector);
    } else {
        vector = VIRTIO_NO_VECTOR;
    }
169
    virtio_queue_set_vector(proxy->vdev, n, vector);
170 171 172
    if (vector != VIRTIO_NO_VECTOR) {
        return msix_vector_use(&proxy->pci_dev, vector);
    }
173 174 175
    return 0;
}

176
static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
P
Paolo Bonzini 已提交
177
                                                 int n, bool assign, bool set_handler)
178 179 180
{
    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
A
Avi Kivity 已提交
181 182
    int r = 0;

183 184 185
    if (assign) {
        r = event_notifier_init(notifier, 1);
        if (r < 0) {
186 187
            error_report("%s: unable to init event notifier: %d",
                         __func__, r);
188 189
            return r;
        }
P
Paolo Bonzini 已提交
190
        virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
A
Avi Kivity 已提交
191
        memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
192
                                  true, n, notifier);
193
    } else {
A
Avi Kivity 已提交
194
        memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
195
                                  true, n, notifier);
P
Paolo Bonzini 已提交
196
        virtio_queue_set_host_notifier_fd_handler(vq, false, false);
197 198 199 200 201
        event_notifier_cleanup(notifier);
    }
    return r;
}

202
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
203 204 205 206 207 208
{
    int n, r;

    if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
        proxy->ioeventfd_disabled ||
        proxy->ioeventfd_started) {
209
        return;
210 211 212 213 214 215 216
    }

    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
        if (!virtio_queue_get_num(proxy->vdev, n)) {
            continue;
        }

P
Paolo Bonzini 已提交
217
        r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
218 219 220 221 222
        if (r < 0) {
            goto assign_error;
        }
    }
    proxy->ioeventfd_started = true;
223
    return;
224 225 226 227 228 229 230

assign_error:
    while (--n >= 0) {
        if (!virtio_queue_get_num(proxy->vdev, n)) {
            continue;
        }

P
Paolo Bonzini 已提交
231
        r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
232
        assert(r >= 0);
233 234
    }
    proxy->ioeventfd_started = false;
235
    error_report("%s: failed. Fallback to a userspace (slower).", __func__);
236 237
}

238
static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
239
{
240
    int r;
241 242 243
    int n;

    if (!proxy->ioeventfd_started) {
244
        return;
245 246 247 248 249 250 251
    }

    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
        if (!virtio_queue_get_num(proxy->vdev, n)) {
            continue;
        }

P
Paolo Bonzini 已提交
252
        r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
253
        assert(r >= 0);
254 255 256 257
    }
    proxy->ioeventfd_started = false;
}

258
static void virtio_pci_reset(DeviceState *d)
259
{
260
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
261
    virtio_pci_stop_ioeventfd(proxy);
262
    virtio_reset(proxy->vdev);
263
    msix_unuse_all_vectors(&proxy->pci_dev);
264
    proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
265 266
}

P
Paul Brook 已提交
267 268 269 270
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
    VirtIOPCIProxy *proxy = opaque;
    VirtIODevice *vdev = proxy->vdev;
A
Avi Kivity 已提交
271
    hwaddr pa;
P
Paul Brook 已提交
272 273 274 275 276

    switch (addr) {
    case VIRTIO_PCI_GUEST_FEATURES:
	/* Guest does not negotiate properly?  We have to assume nothing. */
	if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
277
            val = vdev->bad_features ? vdev->bad_features(vdev) : 0;
P
Paul Brook 已提交
278
	}
279
        virtio_set_features(vdev, val);
P
Paul Brook 已提交
280 281
        break;
    case VIRTIO_PCI_QUEUE_PFN:
A
Avi Kivity 已提交
282
        pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
283
        if (pa == 0) {
284
            virtio_pci_stop_ioeventfd(proxy);
285 286 287
            virtio_reset(proxy->vdev);
            msix_unuse_all_vectors(&proxy->pci_dev);
        }
288 289
        else
            virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
P
Paul Brook 已提交
290 291 292 293 294 295
        break;
    case VIRTIO_PCI_QUEUE_SEL:
        if (val < VIRTIO_PCI_QUEUE_MAX)
            vdev->queue_sel = val;
        break;
    case VIRTIO_PCI_QUEUE_NOTIFY:
296 297 298
        if (val < VIRTIO_PCI_QUEUE_MAX) {
            virtio_queue_notify(vdev, val);
        }
P
Paul Brook 已提交
299 300
        break;
    case VIRTIO_PCI_STATUS:
301 302 303 304
        if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
            virtio_pci_stop_ioeventfd(proxy);
        }

305
        virtio_set_status(vdev, val & 0xFF);
306 307 308 309 310

        if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
            virtio_pci_start_ioeventfd(proxy);
        }

311 312 313 314
        if (vdev->status == 0) {
            virtio_reset(proxy->vdev);
            msix_unuse_all_vectors(&proxy->pci_dev);
        }
315 316 317 318 319 320

        /* Linux before 2.6.34 sets the device as OK without enabling
           the PCI device bus master bit. In this case we need to disable
           some safety checks. */
        if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
            !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
321
            proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
322
        }
P
Paul Brook 已提交
323
        break;
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
    case VIRTIO_MSI_CONFIG_VECTOR:
        msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0)
            val = VIRTIO_NO_VECTOR;
        vdev->config_vector = val;
        break;
    case VIRTIO_MSI_QUEUE_VECTOR:
        msix_vector_unuse(&proxy->pci_dev,
                          virtio_queue_vector(vdev, vdev->queue_sel));
        /* Make it possible for guest to discover an error took place. */
        if (msix_vector_use(&proxy->pci_dev, val) < 0)
            val = VIRTIO_NO_VECTOR;
        virtio_queue_set_vector(vdev, vdev->queue_sel, val);
        break;
    default:
340 341
        error_report("%s: unexpected address 0x%x value 0x%x",
                     __func__, addr, val);
342
        break;
P
Paul Brook 已提交
343 344 345
    }
}

346
static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
P
Paul Brook 已提交
347 348 349 350 351 352
{
    VirtIODevice *vdev = proxy->vdev;
    uint32_t ret = 0xFFFFFFFF;

    switch (addr) {
    case VIRTIO_PCI_HOST_FEATURES:
353
        ret = proxy->host_features;
P
Paul Brook 已提交
354 355
        break;
    case VIRTIO_PCI_GUEST_FEATURES:
356
        ret = vdev->guest_features;
P
Paul Brook 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
        break;
    case VIRTIO_PCI_QUEUE_PFN:
        ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
              >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
        break;
    case VIRTIO_PCI_QUEUE_NUM:
        ret = virtio_queue_get_num(vdev, vdev->queue_sel);
        break;
    case VIRTIO_PCI_QUEUE_SEL:
        ret = vdev->queue_sel;
        break;
    case VIRTIO_PCI_STATUS:
        ret = vdev->status;
        break;
    case VIRTIO_PCI_ISR:
        /* reading from the ISR also clears it. */
        ret = vdev->isr;
        vdev->isr = 0;
375
        qemu_set_irq(proxy->pci_dev.irq[0], 0);
P
Paul Brook 已提交
376
        break;
377 378 379 380 381 382
    case VIRTIO_MSI_CONFIG_VECTOR:
        ret = vdev->config_vector;
        break;
    case VIRTIO_MSI_QUEUE_VECTOR:
        ret = virtio_queue_vector(vdev, vdev->queue_sel);
        break;
P
Paul Brook 已提交
383 384 385 386 387 388 389
    default:
        break;
    }

    return ret;
}

390 391
static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
                                       unsigned size)
P
Paul Brook 已提交
392 393
{
    VirtIOPCIProxy *proxy = opaque;
394
    uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
395
    uint64_t val = 0;
396
    if (addr < config) {
397
        return virtio_ioport_read(proxy, addr);
398 399
    }
    addr -= config;
P
Paul Brook 已提交
400

401 402 403 404 405 406
    switch (size) {
    case 1:
        val = virtio_config_readb(proxy->vdev, addr);
        break;
    case 2:
        val = virtio_config_readw(proxy->vdev, addr);
407 408 409
        if (virtio_is_big_endian()) {
            val = bswap16(val);
        }
410 411 412
        break;
    case 4:
        val = virtio_config_readl(proxy->vdev, addr);
413 414 415
        if (virtio_is_big_endian()) {
            val = bswap32(val);
        }
416
        break;
417
    }
418
    return val;
P
Paul Brook 已提交
419 420
}

421 422
static void virtio_pci_config_write(void *opaque, hwaddr addr,
                                    uint64_t val, unsigned size)
P
Paul Brook 已提交
423 424
{
    VirtIOPCIProxy *proxy = opaque;
425 426 427 428 429 430
    uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
    if (addr < config) {
        virtio_ioport_write(proxy, addr, val);
        return;
    }
    addr -= config;
431 432 433 434 435 436 437 438 439
    /*
     * Virtio-PCI is odd. Ioports are LE but config space is target native
     * endian.
     */
    switch (size) {
    case 1:
        virtio_config_writeb(proxy->vdev, addr, val);
        break;
    case 2:
440 441 442
        if (virtio_is_big_endian()) {
            val = bswap16(val);
        }
443 444 445
        virtio_config_writew(proxy->vdev, addr, val);
        break;
    case 4:
446 447 448
        if (virtio_is_big_endian()) {
            val = bswap32(val);
        }
449 450
        virtio_config_writel(proxy->vdev, addr, val);
        break;
451
    }
P
Paul Brook 已提交
452 453
}

A
Avi Kivity 已提交
454
static const MemoryRegionOps virtio_pci_config_ops = {
455 456 457 458 459 460
    .read = virtio_pci_config_read,
    .write = virtio_pci_config_write,
    .impl = {
        .min_access_size = 1,
        .max_access_size = 4,
    },
461
    .endianness = DEVICE_LITTLE_ENDIAN,
A
Avi Kivity 已提交
462
};
463 464 465 466

static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
                                uint32_t val, int len)
{
467 468
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);

469 470 471 472 473 474 475 476
    pci_default_write_config(pci_dev, address, val, len);

    if (range_covers_byte(address, len, PCI_COMMAND) &&
        !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
        !(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
        virtio_pci_stop_ioeventfd(proxy);
        virtio_set_status(proxy->vdev,
                          proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
477
    }
P
Paul Brook 已提交
478 479
}

480
static unsigned virtio_pci_get_features(DeviceState *d)
481
{
482
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
483
    return proxy->host_features;
484 485
}

486 487 488 489 490 491
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
                                        unsigned int queue_no,
                                        unsigned int vector,
                                        MSIMessage msg)
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
492
    int ret;
493 494 495 496 497 498 499 500 501 502 503 504 505 506

    if (irqfd->users == 0) {
        ret = kvm_irqchip_add_msi_route(kvm_state, msg);
        if (ret < 0) {
            return ret;
        }
        irqfd->virq = ret;
    }
    irqfd->users++;
    return 0;
}

static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
                                             unsigned int vector)
507 508 509 510 511 512 513
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
    if (--irqfd->users == 0) {
        kvm_irqchip_release_virq(kvm_state, irqfd->virq);
    }
}

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
                                 unsigned int queue_no,
                                 unsigned int vector)
{
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
    int ret;
    ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
    return ret;
}

static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
                                      unsigned int queue_no,
                                      unsigned int vector)
529 530
{
    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
531
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
532
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
533
    int ret;
534

J
Jan Kiszka 已提交
535
    ret = kvm_irqchip_remove_irqfd_notifier(kvm_state, n, irqfd->virq);
536
    assert(ret == 0);
537
}
538

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
{
    PCIDevice *dev = &proxy->pci_dev;
    VirtIODevice *vdev = proxy->vdev;
    unsigned int vector;
    int ret, queue_no;
    MSIMessage msg;

    for (queue_no = 0; queue_no < nvqs; queue_no++) {
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
        msg = msix_get_message(dev, vector);
        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
        if (ret < 0) {
            goto undo;
559
        }
560 561 562 563 564 565 566 567 568 569
        /* If guest supports masking, set up irqfd now.
         * Otherwise, delay until unmasked in the frontend.
         */
        if (proxy->vdev->guest_notifier_mask) {
            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
            if (ret < 0) {
                kvm_virtio_pci_vq_vector_release(proxy, vector);
                goto undo;
            }
        }
570 571
    }
    return 0;
572 573 574 575 576 577 578

undo:
    while (--queue_no >= 0) {
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
579
        if (proxy->vdev->guest_notifier_mask) {
580
            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
581
        }
582 583 584
        kvm_virtio_pci_vq_vector_release(proxy, vector);
    }
    return ret;
585 586
}

587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
{
    PCIDevice *dev = &proxy->pci_dev;
    VirtIODevice *vdev = proxy->vdev;
    unsigned int vector;
    int queue_no;

    for (queue_no = 0; queue_no < nvqs; queue_no++) {
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector >= msix_nr_vectors_allocated(dev)) {
            continue;
        }
602 603 604 605
        /* If guest supports masking, clean up irqfd now.
         * Otherwise, it was cleaned when masked in the frontend.
         */
        if (proxy->vdev->guest_notifier_mask) {
606
            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
607
        }
608 609 610 611 612 613 614 615 616 617 618 619
        kvm_virtio_pci_vq_vector_release(proxy, vector);
    }
}

static int kvm_virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
                                        unsigned int queue_no,
                                        unsigned int vector,
                                        MSIMessage msg)
{
    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
620
    int ret = 0;
621 622 623 624 625 626 627 628

    if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
        ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg);
        if (ret < 0) {
            return ret;
        }
    }

629 630 631 632 633 634 635 636 637 638 639 640
    /* If guest supports masking, irqfd is already setup, unmask it.
     * Otherwise, set it up now.
     */
    if (proxy->vdev->guest_notifier_mask) {
        proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, false);
        /* Test after unmasking to avoid losing events. */
        if (proxy->vdev->guest_notifier_pending &&
            proxy->vdev->guest_notifier_pending(proxy->vdev, queue_no)) {
            event_notifier_set(n);
        }
    } else {
        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
641
    }
642
    return ret;
643 644
}

645
static void kvm_virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
646 647 648
                                             unsigned int queue_no,
                                             unsigned int vector)
{
649 650 651 652 653 654
    /* If guest supports masking, keep irqfd but mask it.
     * Otherwise, clean it up now.
     */ 
    if (proxy->vdev->guest_notifier_mask) {
        proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, true);
    } else {
655
        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
656
    }
657 658
}

659
static int kvm_virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
660 661 662 663 664 665
                                     MSIMessage msg)
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
    VirtIODevice *vdev = proxy->vdev;
    int ret, queue_no;

666
    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
667 668 669 670 671 672
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        if (virtio_queue_vector(vdev, queue_no) != vector) {
            continue;
        }
673
        ret = kvm_virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
674 675 676 677 678 679 680 681 682 683 684
        if (ret < 0) {
            goto undo;
        }
    }
    return 0;

undo:
    while (--queue_no >= 0) {
        if (virtio_queue_vector(vdev, queue_no) != vector) {
            continue;
        }
685
        kvm_virtio_pci_vq_vector_mask(proxy, queue_no, vector);
686 687 688 689
    }
    return ret;
}

690
static void kvm_virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
691 692 693 694 695
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
    VirtIODevice *vdev = proxy->vdev;
    int queue_no;

696
    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
697 698 699 700 701 702
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        if (virtio_queue_vector(vdev, queue_no) != vector) {
            continue;
        }
703
        kvm_virtio_pci_vq_vector_mask(proxy, queue_no, vector);
704 705 706
    }
}

707 708 709 710 711 712 713 714 715 716 717
static void kvm_virtio_pci_vector_poll(PCIDevice *dev,
                                       unsigned int vector_start,
                                       unsigned int vector_end)
{
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
    VirtIODevice *vdev = proxy->vdev;
    int queue_no;
    unsigned int vector;
    EventNotifier *notifier;
    VirtQueue *vq;

718
    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
719 720 721 722 723 724 725 726 727 728
        if (!virtio_queue_get_num(vdev, queue_no)) {
            break;
        }
        vector = virtio_queue_vector(vdev, queue_no);
        if (vector < vector_start || vector >= vector_end ||
            !msix_is_masked(dev, vector)) {
            continue;
        }
        vq = virtio_get_queue(vdev, queue_no);
        notifier = virtio_queue_get_guest_notifier(vq);
729 730 731 732 733
        if (vdev->guest_notifier_pending) {
            if (vdev->guest_notifier_pending(vdev, queue_no)) {
                msix_set_pending(dev, vector);
            }
        } else if (event_notifier_test_and_clear(notifier)) {
734 735 736 737 738 739 740
            msix_set_pending(dev, vector);
        }
    }
}

static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
                                         bool with_irqfd)
741
{
742
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
743 744 745 746 747 748 749 750
    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);

    if (assign) {
        int r = event_notifier_init(notifier, 0);
        if (r < 0) {
            return r;
        }
751
        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
752
    } else {
753
        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
754 755 756 757 758 759
        event_notifier_cleanup(notifier);
    }

    return 0;
}

760
static bool virtio_pci_query_guest_notifiers(DeviceState *d)
761
{
762
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
763 764 765
    return msix_enabled(&proxy->pci_dev);
}

766
static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
767
{
768
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
769 770
    VirtIODevice *vdev = proxy->vdev;
    int r, n;
771 772
    bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
        kvm_msi_via_irqfd_enabled();
773

774 775 776 777 778 779 780 781 782
    nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);

    /* When deassigning, pass a consistent nvqs value
     * to avoid leaking notifiers.
     */
    assert(assign || nvqs == proxy->nvqs_with_notifiers);

    proxy->nvqs_with_notifiers = nvqs;

783
    /* Must unset vector notifier while guest notifier is still assigned */
784
    if (proxy->vector_irqfd && !assign) {
785
        msix_unset_vector_notifiers(&proxy->pci_dev);
786
        kvm_virtio_pci_vector_release(proxy, nvqs);
787 788 789 790
        g_free(proxy->vector_irqfd);
        proxy->vector_irqfd = NULL;
    }

791
    for (n = 0; n < nvqs; n++) {
792 793 794 795
        if (!virtio_queue_get_num(vdev, n)) {
            break;
        }

796 797
        r = virtio_pci_set_guest_notifier(d, n, assign,
                                          kvm_msi_via_irqfd_enabled());
798 799 800 801 802
        if (r < 0) {
            goto assign_error;
        }
    }

803
    /* Must set vector notifier after guest notifier has been assigned */
804
    if (with_irqfd && assign) {
805 806 807
        proxy->vector_irqfd =
            g_malloc0(sizeof(*proxy->vector_irqfd) *
                      msix_nr_vectors_allocated(&proxy->pci_dev));
808 809 810 811
        r = kvm_virtio_pci_vector_use(proxy, nvqs);
        if (r < 0) {
            goto assign_error;
        }
812
        r = msix_set_vector_notifiers(&proxy->pci_dev,
813 814
                                      kvm_virtio_pci_vector_unmask,
                                      kvm_virtio_pci_vector_mask,
815
                                      kvm_virtio_pci_vector_poll);
816
        if (r < 0) {
817
            goto notifiers_error;
818 819 820
        }
    }

821 822
    return 0;

823 824 825 826
notifiers_error:
    assert(assign);
    kvm_virtio_pci_vector_release(proxy, nvqs);

827 828
assign_error:
    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
829
    assert(assign);
830
    while (--n >= 0) {
831
        virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
832 833 834 835
    }
    return r;
}

836
static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
837
{
838
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
839 840 841 842 843

    /* Stop using ioeventfd for virtqueue kick if the device starts using host
     * notifiers.  This makes it easy to avoid stepping on each others' toes.
     */
    proxy->ioeventfd_disabled = assign;
844
    if (assign) {
845 846 847 848 849 850
        virtio_pci_stop_ioeventfd(proxy);
    }
    /* We don't need to start here: it's not needed because backend
     * currently only stops on status change away from ok,
     * reset, vmstop and such. If we do add code to start here,
     * need to check vmstate, device state etc. */
P
Paolo Bonzini 已提交
851
    return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
852 853
}

854
static void virtio_pci_vmstate_change(DeviceState *d, bool running)
855
{
856
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
857 858

    if (running) {
859 860 861 862 863 864
        /* Try to find out if the guest has bus master disabled, but is
           in ready state. Then we have a buggy guest OS. */
        if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
            !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
            proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
        }
865
        virtio_pci_start_ioeventfd(proxy);
866
    } else {
867
        virtio_pci_stop_ioeventfd(proxy);
868 869 870
    }
}

P
Paul Brook 已提交
871
static const VirtIOBindings virtio_pci_bindings = {
872 873 874 875 876
    .notify = virtio_pci_notify,
    .save_config = virtio_pci_save_config,
    .load_config = virtio_pci_load_config,
    .save_queue = virtio_pci_save_queue,
    .load_queue = virtio_pci_load_queue,
877
    .get_features = virtio_pci_get_features,
878
    .query_guest_notifiers = virtio_pci_query_guest_notifiers,
879
    .set_host_notifier = virtio_pci_set_host_notifier,
880
    .set_guest_notifiers = virtio_pci_set_guest_notifiers,
881
    .vmstate_change = virtio_pci_vmstate_change,
P
Paul Brook 已提交
882 883
};

884
void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev)
P
Paul Brook 已提交
885 886 887 888 889 890 891 892
{
    uint8_t *config;
    uint32_t size;

    proxy->vdev = vdev;

    config = proxy->pci_dev.config;

893 894 895
    if (proxy->class_code) {
        pci_config_set_class(config, proxy->class_code);
    }
H
Hui Kai Ran 已提交
896 897 898 899
    pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
                 pci_get_word(config + PCI_VENDOR_ID));
    pci_set_word(config + PCI_SUBSYSTEM_ID, vdev->device_id);
    config[PCI_INTERRUPT_PIN] = 1;
P
Paul Brook 已提交
900

901 902
    if (vdev->nvectors &&
        msix_init_exclusive_bar(&proxy->pci_dev, vdev->nvectors, 1)) {
903
        vdev->nvectors = 0;
904
    }
905

906 907
    proxy->pci_dev.config_write = virtio_write_config;

908
    size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
P
Paul Brook 已提交
909 910 911
    if (size & (size-1))
        size = 1 << qemu_fls(size);

A
Avi Kivity 已提交
912 913
    memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
                          "virtio-pci", size);
914 915
    pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
                     &proxy->bar);
P
Paul Brook 已提交
916

917 918 919 920
    if (!kvm_has_many_ioeventfds()) {
        proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
    }

921
    virtio_bind_device(vdev, &virtio_pci_bindings, DEVICE(proxy));
922 923 924
    proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
    proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
    proxy->host_features = vdev->get_features(vdev, proxy->host_features);
P
Paul Brook 已提交
925 926
}

927
static void virtio_exit_pci(PCIDevice *pci_dev)
928
{
A
Avi Kivity 已提交
929 930 931
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);

    memory_region_destroy(&proxy->bar);
932
    msix_uninit_exclusive_bar(pci_dev);
933 934
}

935
static int virtio_serial_init_pci(PCIDevice *pci_dev)
936
{
937
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
938 939
    VirtIODevice *vdev;

940 941 942 943 944
    if (proxy->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
        proxy->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
        proxy->class_code != PCI_CLASS_OTHERS)          /* qemu-kvm  */
        proxy->class_code = PCI_CLASS_COMMUNICATION_OTHER;

945
    vdev = virtio_serial_init(&pci_dev->qdev, &proxy->serial);
946 947 948
    if (!vdev) {
        return -1;
    }
949 950 951

    /* backwards-compatibility with machines that were created with
       DEV_NVECTORS_UNSPECIFIED */
952
    vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED
953
                                        ? proxy->serial.max_virtserial_ports + 1
954
                                        : proxy->nvectors;
955
    virtio_init_pci(proxy, vdev);
956
    proxy->nvectors = vdev->nvectors;
957
    return 0;
P
Paul Brook 已提交
958 959
}

960
static void virtio_serial_exit_pci(PCIDevice *pci_dev)
961 962 963
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);

A
Amit Shah 已提交
964
    virtio_pci_stop_ioeventfd(proxy);
965
    virtio_serial_exit(proxy->vdev);
966
    virtio_exit_pci(pci_dev);
967 968
}

969
static int virtio_net_init_pci(PCIDevice *pci_dev)
P
Paul Brook 已提交
970 971 972 973
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
    VirtIODevice *vdev;

974 975
    vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net,
                           proxy->host_features);
976

977
    vdev->nvectors = proxy->nvectors;
978
    virtio_init_pci(proxy, vdev);
979 980 981

    /* make the actual value visible */
    proxy->nvectors = vdev->nvectors;
982
    return 0;
P
Paul Brook 已提交
983 984
}

985
static void virtio_net_exit_pci(PCIDevice *pci_dev)
986 987 988
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);

989
    virtio_pci_stop_ioeventfd(proxy);
990
    virtio_net_exit(proxy->vdev);
991
    virtio_exit_pci(pci_dev);
992 993
}

994
static int virtio_balloon_init_pci(PCIDevice *pci_dev)
P
Paul Brook 已提交
995 996 997 998
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
    VirtIODevice *vdev;

999 1000 1001 1002 1003
    if (proxy->class_code != PCI_CLASS_OTHERS &&
        proxy->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
        proxy->class_code = PCI_CLASS_OTHERS;
    }

P
Paul Brook 已提交
1004
    vdev = virtio_balloon_init(&pci_dev->qdev);
1005 1006 1007
    if (!vdev) {
        return -1;
    }
1008
    virtio_init_pci(proxy, vdev);
1009
    return 0;
P
Paul Brook 已提交
1010 1011
}

1012
static void virtio_balloon_exit_pci(PCIDevice *pci_dev)
1013 1014 1015 1016 1017
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);

    virtio_pci_stop_ioeventfd(proxy);
    virtio_balloon_exit(proxy->vdev);
1018
    virtio_exit_pci(pci_dev);
1019 1020
}

1021 1022 1023 1024 1025
static int virtio_rng_init_pci(PCIDevice *pci_dev)
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
    VirtIODevice *vdev;

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
    if (proxy->rng.rng == NULL) {
        proxy->rng.default_backend = RNG_RANDOM(object_new(TYPE_RNG_RANDOM));

        object_property_add_child(OBJECT(pci_dev),
                                  "default-backend",
                                  OBJECT(proxy->rng.default_backend),
                                  NULL);

        object_property_set_link(OBJECT(pci_dev),
                                 OBJECT(proxy->rng.default_backend),
                                 "rng", NULL);
    }

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
    vdev = virtio_rng_init(&pci_dev->qdev, &proxy->rng);
    if (!vdev) {
        return -1;
    }
    virtio_init_pci(proxy, vdev);
    return 0;
}

static void virtio_rng_exit_pci(PCIDevice *pci_dev)
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);

    virtio_pci_stop_ioeventfd(proxy);
    virtio_rng_exit(proxy->vdev);
    virtio_exit_pci(pci_dev);
}

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
static Property virtio_net_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
    DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
    DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
    DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy, net.txtimer, TX_TIMER_INTERVAL),
    DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy, net.txburst, TX_BURST),
    DEFINE_PROP_STRING("tx", VirtIOPCIProxy, net.tx),
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_net_class_init(ObjectClass *klass, void *data)
{
1069
    DeviceClass *dc = DEVICE_CLASS(klass);
1070 1071 1072 1073
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);

    k->init = virtio_net_init_pci;
    k->exit = virtio_net_exit_pci;
1074
    k->romfile = "efi-virtio.rom";
1075 1076 1077 1078
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1079 1080
    dc->reset = virtio_pci_reset;
    dc->props = virtio_net_properties;
1081 1082
}

1083
static const TypeInfo virtio_net_info = {
1084 1085 1086 1087
    .name          = "virtio-net-pci",
    .parent        = TYPE_PCI_DEVICE,
    .instance_size = sizeof(VirtIOPCIProxy),
    .class_init    = virtio_net_class_init,
1088 1089
};

1090 1091
static Property virtio_serial_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1092
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1093 1094 1095 1096
    DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
    DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy, serial.max_virtserial_ports, 31),
    DEFINE_PROP_END_OF_LIST(),
1097 1098
};

1099 1100
static void virtio_serial_class_init(ObjectClass *klass, void *data)
{
1101
    DeviceClass *dc = DEVICE_CLASS(klass);
1102 1103 1104 1105 1106 1107 1108 1109
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);

    k->init = virtio_serial_init_pci;
    k->exit = virtio_serial_exit_pci;
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
1110 1111
    dc->reset = virtio_pci_reset;
    dc->props = virtio_serial_properties;
1112 1113
}

1114
static const TypeInfo virtio_serial_info = {
1115 1116 1117 1118
    .name          = "virtio-serial-pci",
    .parent        = TYPE_PCI_DEVICE,
    .instance_size = sizeof(VirtIOPCIProxy),
    .class_init    = virtio_serial_class_init,
1119 1120 1121 1122
};

static Property virtio_balloon_properties[] = {
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1123
    DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1124 1125 1126 1127 1128
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_balloon_class_init(ObjectClass *klass, void *data)
{
1129
    DeviceClass *dc = DEVICE_CLASS(klass);
1130 1131 1132 1133 1134 1135 1136
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);

    k->init = virtio_balloon_init_pci;
    k->exit = virtio_balloon_exit_pci;
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
    k->revision = VIRTIO_PCI_ABI_VERSION;
1137
    k->class_id = PCI_CLASS_OTHERS;
1138 1139
    dc->reset = virtio_pci_reset;
    dc->props = virtio_balloon_properties;
1140 1141
}

1142
static const TypeInfo virtio_balloon_info = {
1143 1144 1145 1146
    .name          = "virtio-balloon-pci",
    .parent        = TYPE_PCI_DEVICE,
    .instance_size = sizeof(VirtIOPCIProxy),
    .class_init    = virtio_balloon_class_init,
1147 1148
};

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
static void virtio_rng_initfn(Object *obj)
{
    PCIDevice *pci_dev = PCI_DEVICE(obj);
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);

    object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
                             (Object **)&proxy->rng.rng, NULL);
}

static Property virtio_rng_properties[] = {
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1160 1161 1162 1163 1164 1165 1166
    /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s.  If
       you have an entropy source capable of generating more entropy than this
       and you can pass it through via virtio-rng, then hats off to you.  Until
       then, this is unlimited for all practical purposes.
    */
    DEFINE_PROP_UINT64("max-bytes", VirtIOPCIProxy, rng.max_bytes, INT64_MAX),
    DEFINE_PROP_UINT32("period", VirtIOPCIProxy, rng.period_ms, 1 << 16),
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_rng_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);

    k->init = virtio_rng_init_pci;
    k->exit = virtio_rng_exit_pci;
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = PCI_CLASS_OTHERS;
    dc->reset = virtio_pci_reset;
    dc->props = virtio_rng_properties;
}

1185
static const TypeInfo virtio_rng_info = {
1186 1187 1188 1189 1190 1191 1192
    .name          = "virtio-rng-pci",
    .parent        = TYPE_PCI_DEVICE,
    .instance_size = sizeof(VirtIOPCIProxy),
    .instance_init = virtio_rng_initfn,
    .class_init    = virtio_rng_class_init,
};

1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
#ifdef CONFIG_VIRTFS
static int virtio_9p_init_pci(PCIDevice *pci_dev)
{
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
    VirtIODevice *vdev;

    vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf);
    vdev->nvectors = proxy->nvectors;
    virtio_init_pci(proxy, vdev);
    /* make the actual value visible */
    proxy->nvectors = vdev->nvectors;
    return 0;
}

static Property virtio_9p_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
    DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag),
    DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id),
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_9p_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);

    k->init = virtio_9p_init_pci;
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = 0x2;
    dc->props = virtio_9p_properties;
    dc->reset = virtio_pci_reset;
}

static const TypeInfo virtio_9p_info = {
    .name          = "virtio-9p-pci",
    .parent        = TYPE_PCI_DEVICE,
    .instance_size = sizeof(VirtIOPCIProxy),
    .class_init    = virtio_9p_class_init,
};
#endif

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
/*
 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
 */

/* This is called by virtio-bus just after the device is plugged. */
static void virtio_pci_device_plugged(DeviceState *d)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
    VirtioBusState *bus = &proxy->bus;
    uint8_t *config;
    uint32_t size;

    proxy->vdev = bus->vdev;

    config = proxy->pci_dev.config;
    if (proxy->class_code) {
        pci_config_set_class(config, proxy->class_code);
    }
    pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
                 pci_get_word(config + PCI_VENDOR_ID));
    pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
    config[PCI_INTERRUPT_PIN] = 1;

    if (proxy->nvectors &&
        msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
        proxy->nvectors = 0;
    }

    proxy->pci_dev.config_write = virtio_write_config;

    size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
         + virtio_bus_get_vdev_config_len(bus);
    if (size & (size - 1)) {
        size = 1 << qemu_fls(size);
    }

    memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
                          "virtio-pci", size);
    pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
                     &proxy->bar);

    if (!kvm_has_many_ioeventfds()) {
        proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
    }

    proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
    proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
    proxy->host_features = virtio_bus_get_vdev_features(bus,
                                                      proxy->host_features);
}

static int virtio_pci_init(PCIDevice *pci_dev)
{
    VirtIOPCIProxy *dev = VIRTIO_PCI(pci_dev);
    VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
    virtio_pci_bus_new(&dev->bus, dev);
    if (k->init != NULL) {
        return k->init(dev);
    }
    return 0;
}

static void virtio_pci_exit(PCIDevice *pci_dev)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
K
KONRAD Frederic 已提交
1303
    virtio_pci_stop_ioeventfd(proxy);
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
    virtio_exit_pci(pci_dev);
}

/*
 * This will be renamed virtio_pci_reset at the end of the series.
 * virtio_pci_reset is still in use at this moment.
 */
static void virtio_pci_rst(DeviceState *qdev)
{
    VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
    VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
    virtio_pci_stop_ioeventfd(proxy);
    virtio_bus_reset(bus);
    msix_unuse_all_vectors(&proxy->pci_dev);
    proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
}

static void virtio_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);

    k->init = virtio_pci_init;
    k->exit = virtio_pci_exit;
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    k->revision = VIRTIO_PCI_ABI_VERSION;
    k->class_id = PCI_CLASS_OTHERS;
    dc->reset = virtio_pci_rst;
}

static const TypeInfo virtio_pci_info = {
    .name          = TYPE_VIRTIO_PCI,
    .parent        = TYPE_PCI_DEVICE,
    .instance_size = sizeof(VirtIOPCIProxy),
    .class_init    = virtio_pci_class_init,
    .class_size    = sizeof(VirtioPCIClass),
    .abstract      = true,
};

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
/* virtio-blk-pci */

static Property virtio_blk_pci_properties[] = {
    DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
    DEFINE_PROP_BIT("x-data-plane", VirtIOBlkPCI, blk.data_plane, 0, false),
#endif
    DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
    DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlkPCI, blk),
    DEFINE_PROP_END_OF_LIST(),
};

static int virtio_blk_pci_init(VirtIOPCIProxy *vpci_dev)
{
    VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);
    virtio_blk_set_conf(vdev, &(dev->blk));
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
    if (qdev_init(vdev) < 0) {
        return -1;
    }
    return 0;
}

static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);

    dc->props = virtio_blk_pci_properties;
    k->init = virtio_blk_pci_init;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void virtio_blk_pci_instance_init(Object *obj)
{
    VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
    object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BLK);
    object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
}

static const TypeInfo virtio_blk_pci_info = {
    .name          = TYPE_VIRTIO_BLK_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIOBlkPCI),
    .instance_init = virtio_blk_pci_instance_init,
    .class_init    = virtio_blk_pci_class_init,
};

1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
/* virtio-scsi-pci */

static Property virtio_scsi_pci_properties[] = {
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                       DEV_NVECTORS_UNSPECIFIED),
    DEFINE_VIRTIO_SCSI_FEATURES(VirtIOPCIProxy, host_features),
    DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOSCSIPCI, vdev.conf),
    DEFINE_PROP_END_OF_LIST(),
};

static int virtio_scsi_pci_init_pci(VirtIOPCIProxy *vpci_dev)
{
    VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
    DeviceState *vdev = DEVICE(&dev->vdev);

    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
        vpci_dev->nvectors = dev->vdev.conf.num_queues + 3;
    }

    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
    if (qdev_init(vdev) < 0) {
        return -1;
    }
    return 0;
}

static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
    k->init = virtio_scsi_pci_init_pci;
    dc->props = virtio_scsi_pci_properties;
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
    pcidev_k->revision = 0x00;
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
}

static void virtio_scsi_pci_instance_init(Object *obj)
{
    VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
    object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_SCSI);
    object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
}

static const TypeInfo virtio_scsi_pci_info = {
    .name          = TYPE_VIRTIO_SCSI_PCI,
    .parent        = TYPE_VIRTIO_PCI,
    .instance_size = sizeof(VirtIOSCSIPCI),
    .instance_init = virtio_scsi_pci_instance_init,
    .class_init    = virtio_scsi_pci_class_init,
};

1455 1456 1457 1458 1459 1460 1461 1462
/* virtio-pci-bus */

void virtio_pci_bus_new(VirtioBusState *bus, VirtIOPCIProxy *dev)
{
    DeviceState *qdev = DEVICE(dev);
    BusState *qbus;
    qbus_create_inplace((BusState *)bus, TYPE_VIRTIO_PCI_BUS, qdev, NULL);
    qbus = BUS(bus);
1463
    qbus->allow_hotplug = 1;
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
}

static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
{
    BusClass *bus_class = BUS_CLASS(klass);
    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
    bus_class->max_dev = 1;
    k->notify = virtio_pci_notify;
    k->save_config = virtio_pci_save_config;
    k->load_config = virtio_pci_load_config;
    k->save_queue = virtio_pci_save_queue;
    k->load_queue = virtio_pci_load_queue;
    k->get_features = virtio_pci_get_features;
    k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
    k->set_host_notifier = virtio_pci_set_host_notifier;
    k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
    k->vmstate_change = virtio_pci_vmstate_change;
1481
    k->device_plugged = virtio_pci_device_plugged;
1482 1483 1484 1485 1486 1487 1488 1489 1490
}

static const TypeInfo virtio_pci_bus_info = {
    .name          = TYPE_VIRTIO_PCI_BUS,
    .parent        = TYPE_VIRTIO_BUS,
    .instance_size = sizeof(VirtioPCIBusState),
    .class_init    = virtio_pci_bus_class_init,
};

A
Andreas Färber 已提交
1491
static void virtio_pci_register_types(void)
P
Paul Brook 已提交
1492
{
1493 1494 1495
    type_register_static(&virtio_net_info);
    type_register_static(&virtio_serial_info);
    type_register_static(&virtio_balloon_info);
1496
    type_register_static(&virtio_rng_info);
1497
    type_register_static(&virtio_pci_bus_info);
1498
    type_register_static(&virtio_pci_info);
1499 1500 1501
#ifdef CONFIG_VIRTFS
    type_register_static(&virtio_9p_info);
#endif
1502
    type_register_static(&virtio_blk_pci_info);
1503
    type_register_static(&virtio_scsi_pci_info);
P
Paul Brook 已提交
1504 1505
}

A
Andreas Färber 已提交
1506
type_init(virtio_pci_register_types)