virtio.c 57.7 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Virtio Support
 *
 * Copyright IBM, Corp. 2007
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

P
Peter Maydell 已提交
14
#include "qemu/osdep.h"
15
#include "qapi/error.h"
16 17
#include "qemu-common.h"
#include "cpu.h"
18
#include "trace.h"
19
#include "exec/address-spaces.h"
20
#include "qemu/error-report.h"
P
Paolo Bonzini 已提交
21
#include "hw/virtio/virtio.h"
22
#include "qemu/atomic.h"
P
Paolo Bonzini 已提交
23
#include "hw/virtio/virtio-bus.h"
24
#include "migration/migration.h"
25
#include "hw/virtio/virtio-access.h"
A
aliguori 已提交
26

27 28 29 30 31
/*
 * The alignment to use between consumer and producer parts of vring.
 * x86 pagesize again. This is the default, used by transports like PCI
 * which don't provide a means for the guest to tell the host the alignment.
 */
32 33
#define VIRTIO_PCI_VRING_ALIGN         4096

A
aliguori 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
typedef struct VRingDesc
{
    uint64_t addr;
    uint32_t len;
    uint16_t flags;
    uint16_t next;
} VRingDesc;

typedef struct VRingAvail
{
    uint16_t flags;
    uint16_t idx;
    uint16_t ring[0];
} VRingAvail;

typedef struct VRingUsedElem
{
    uint32_t id;
    uint32_t len;
} VRingUsedElem;

typedef struct VRingUsed
{
    uint16_t flags;
    uint16_t idx;
    VRingUsedElem ring[0];
} VRingUsed;

typedef struct VRing
{
    unsigned int num;
C
Cornelia Huck 已提交
65
    unsigned int num_default;
66
    unsigned int align;
A
Avi Kivity 已提交
67 68 69
    hwaddr desc;
    hwaddr avail;
    hwaddr used;
A
aliguori 已提交
70 71 72 73 74
} VRing;

struct VirtQueue
{
    VRing vring;
75 76

    /* Next head to pop */
A
aliguori 已提交
77
    uint16_t last_avail_idx;
78

79 80 81
    /* Last avail_idx read from VQ. */
    uint16_t shadow_avail_idx;

82 83
    uint16_t used_idx;

M
Michael S. Tsirkin 已提交
84 85 86 87 88 89 90 91 92
    /* Last used index value we have signalled on */
    uint16_t signalled_used;

    /* Last used index value we have signalled on */
    bool signalled_used_valid;

    /* Notification enabled? */
    bool notification;

93 94
    uint16_t queue_index;

A
aliguori 已提交
95
    int inuse;
M
Michael S. Tsirkin 已提交
96

97
    uint16_t vector;
98 99
    VirtIOHandleOutput handle_output;
    VirtIOHandleOutput handle_aio_output;
100
    bool use_aio;
101 102 103
    VirtIODevice *vdev;
    EventNotifier guest_notifier;
    EventNotifier host_notifier;
104
    QLIST_ENTRY(VirtQueue) node;
A
aliguori 已提交
105 106 107
};

/* virt queue functions */
108
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
A
aliguori 已提交
109
{
110
    VRing *vring = &vdev->vq[n].vring;
P
Paul Brook 已提交
111

112 113 114 115 116 117 118 119
    if (!vring->desc) {
        /* not yet setup -> nothing to do */
        return;
    }
    vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
    vring->used = vring_align(vring->avail +
                              offsetof(VRingAvail, ring[vring->num]),
                              vring->align);
A
aliguori 已提交
120 121
}

122 123
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
                            hwaddr desc_pa, int i)
A
aliguori 已提交
124
{
125 126 127 128 129 130
    address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
                       MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
    virtio_tswap64s(vdev, &desc->addr);
    virtio_tswap32s(vdev, &desc->len);
    virtio_tswap16s(vdev, &desc->flags);
    virtio_tswap16s(vdev, &desc->next);
A
aliguori 已提交
131 132 133 134
}

static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
A
Avi Kivity 已提交
135
    hwaddr pa;
A
aliguori 已提交
136
    pa = vq->vring.avail + offsetof(VRingAvail, flags);
137
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
138 139 140 141
}

static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
A
Avi Kivity 已提交
142
    hwaddr pa;
A
aliguori 已提交
143
    pa = vq->vring.avail + offsetof(VRingAvail, idx);
144 145
    vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
    return vq->shadow_avail_idx;
A
aliguori 已提交
146 147 148 149
}

static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
A
Avi Kivity 已提交
150
    hwaddr pa;
A
aliguori 已提交
151
    pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
152
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
153 154
}

155
static inline uint16_t vring_get_used_event(VirtQueue *vq)
M
Michael S. Tsirkin 已提交
156 157 158 159
{
    return vring_avail_ring(vq, vq->vring.num);
}

160 161
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
                                    int i)
A
aliguori 已提交
162
{
A
Avi Kivity 已提交
163
    hwaddr pa;
164 165 166 167 168
    virtio_tswap32s(vq->vdev, &uelem->id);
    virtio_tswap32s(vq->vdev, &uelem->len);
    pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
    address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
                       (void *)uelem, sizeof(VRingUsedElem));
A
aliguori 已提交
169 170 171 172
}

static uint16_t vring_used_idx(VirtQueue *vq)
{
A
Avi Kivity 已提交
173
    hwaddr pa;
A
aliguori 已提交
174
    pa = vq->vring.used + offsetof(VRingUsed, idx);
175
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
176 177
}

M
Michael S. Tsirkin 已提交
178
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
A
aliguori 已提交
179
{
A
Avi Kivity 已提交
180
    hwaddr pa;
A
aliguori 已提交
181
    pa = vq->vring.used + offsetof(VRingUsed, idx);
182
    virtio_stw_phys(vq->vdev, pa, val);
183
    vq->used_idx = val;
A
aliguori 已提交
184 185 186 187
}

static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
188
    VirtIODevice *vdev = vq->vdev;
A
Avi Kivity 已提交
189
    hwaddr pa;
A
aliguori 已提交
190
    pa = vq->vring.used + offsetof(VRingUsed, flags);
191
    virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
A
aliguori 已提交
192 193 194 195
}

static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
196
    VirtIODevice *vdev = vq->vdev;
A
Avi Kivity 已提交
197
    hwaddr pa;
A
aliguori 已提交
198
    pa = vq->vring.used + offsetof(VRingUsed, flags);
199
    virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
A
aliguori 已提交
200 201
}

202
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
M
Michael S. Tsirkin 已提交
203
{
A
Avi Kivity 已提交
204
    hwaddr pa;
M
Michael S. Tsirkin 已提交
205 206 207 208
    if (!vq->notification) {
        return;
    }
    pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
209
    virtio_stw_phys(vq->vdev, pa, val);
M
Michael S. Tsirkin 已提交
210 211
}

A
aliguori 已提交
212 213
void virtio_queue_set_notification(VirtQueue *vq, int enable)
{
M
Michael S. Tsirkin 已提交
214
    vq->notification = enable;
215
    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
216
        vring_set_avail_event(vq, vring_avail_idx(vq));
M
Michael S. Tsirkin 已提交
217
    } else if (enable) {
A
aliguori 已提交
218
        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
M
Michael S. Tsirkin 已提交
219
    } else {
A
aliguori 已提交
220
        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
M
Michael S. Tsirkin 已提交
221
    }
222 223 224 225
    if (enable) {
        /* Expose avail event/used flags before caller checks the avail idx. */
        smp_mb();
    }
A
aliguori 已提交
226 227 228 229 230 231 232
}

int virtio_queue_ready(VirtQueue *vq)
{
    return vq->vring.avail != 0;
}

233 234
/* Fetch avail_idx from VQ memory only when we really need to know if
 * guest has added some buffers. */
A
aliguori 已提交
235 236
int virtio_queue_empty(VirtQueue *vq)
{
237 238 239 240
    if (vq->shadow_avail_idx != vq->last_avail_idx) {
        return 0;
    }

A
aliguori 已提交
241 242 243
    return vring_avail_idx(vq) == vq->last_avail_idx;
}

244 245
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
                               unsigned int len)
A
aliguori 已提交
246 247 248 249 250 251 252 253
{
    unsigned int offset;
    int i;

    offset = 0;
    for (i = 0; i < elem->in_num; i++) {
        size_t size = MIN(len - offset, elem->in_sg[i].iov_len);

254 255 256
        cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
                                  elem->in_sg[i].iov_len,
                                  1, size);
A
aliguori 已提交
257

258
        offset += size;
A
aliguori 已提交
259 260
    }

261 262 263 264
    for (i = 0; i < elem->out_num; i++)
        cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
                                  elem->out_sg[i].iov_len,
                                  0, elem->out_sg[i].iov_len);
265 266
}

J
Jason Wang 已提交
267 268 269 270
void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
                       unsigned int len)
{
    vq->last_avail_idx--;
271
    vq->inuse--;
J
Jason Wang 已提交
272 273 274
    virtqueue_unmap_sg(vq, elem, len);
}

S
Stefan Hajnoczi 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
/* virtqueue_rewind:
 * @vq: The #VirtQueue
 * @num: Number of elements to push back
 *
 * Pretend that elements weren't popped from the virtqueue.  The next
 * virtqueue_pop() will refetch the oldest element.
 *
 * Use virtqueue_discard() instead if you have a VirtQueueElement.
 *
 * Returns: true on success, false if @num is greater than the number of in use
 * elements.
 */
bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
{
    if (num > vq->inuse) {
        return false;
    }
    vq->last_avail_idx -= num;
    vq->inuse -= num;
    return true;
}

297 298 299
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
                    unsigned int len, unsigned int idx)
{
300 301
    VRingUsedElem uelem;

302 303 304
    trace_virtqueue_fill(vq, elem, len, idx);

    virtqueue_unmap_sg(vq, elem, len);
305

306 307 308 309
    if (unlikely(vq->vdev->broken)) {
        return;
    }

310
    idx = (idx + vq->used_idx) % vq->vring.num;
A
aliguori 已提交
311

312 313 314
    uelem.id = elem->index;
    uelem.len = len;
    vring_used_write(vq, &uelem, idx);
A
aliguori 已提交
315 316 317 318
}

void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
M
Michael S. Tsirkin 已提交
319
    uint16_t old, new;
320 321 322 323 324 325

    if (unlikely(vq->vdev->broken)) {
        vq->inuse -= count;
        return;
    }

A
aliguori 已提交
326
    /* Make sure buffer is written before we update index. */
327
    smp_wmb();
328
    trace_virtqueue_flush(vq, count);
329
    old = vq->used_idx;
M
Michael S. Tsirkin 已提交
330 331
    new = old + count;
    vring_used_idx_set(vq, new);
A
aliguori 已提交
332
    vq->inuse -= count;
M
Michael S. Tsirkin 已提交
333 334
    if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
        vq->signalled_used_valid = false;
A
aliguori 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347 348
}

void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
                    unsigned int len)
{
    virtqueue_fill(vq, elem, len, 0);
    virtqueue_flush(vq, 1);
}

static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
{
    uint16_t num_heads = vring_avail_idx(vq) - idx;

    /* Check it isn't doing very strange things with descriptor numbers. */
A
aliguori 已提交
349
    if (num_heads > vq->vring.num) {
350
        virtio_error(vq->vdev, "Guest moved used index from %u to %u",
351
                     idx, vq->shadow_avail_idx);
352
        return -EINVAL;
A
aliguori 已提交
353
    }
354 355 356 357 358
    /* On success, callers read a descriptor at vq->last_avail_idx.
     * Make sure descriptor read does not bypass avail index read. */
    if (num_heads) {
        smp_rmb();
    }
A
aliguori 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371

    return num_heads;
}

static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
{
    unsigned int head;

    /* Grab the next descriptor number they're advertising, and increment
     * the index we've seen. */
    head = vring_avail_ring(vq, idx % vq->vring.num);

    /* If their number is silly, that's a fatal mistake. */
A
aliguori 已提交
372
    if (head >= vq->vring.num) {
373
        error_report("Guest says index %u is available", head);
A
aliguori 已提交
374 375
        exit(1);
    }
A
aliguori 已提交
376 377 378 379

    return head;
}

380 381 382 383 384
enum {
    VIRTQUEUE_READ_DESC_ERROR = -1,
    VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
    VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
};
A
aliguori 已提交
385

386 387 388 389
static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
                                    hwaddr desc_pa, unsigned int max,
                                    unsigned int *next)
{
A
aliguori 已提交
390
    /* If this descriptor says it doesn't chain, we're done. */
391
    if (!(desc->flags & VRING_DESC_F_NEXT)) {
392
        return VIRTQUEUE_READ_DESC_DONE;
393
    }
A
aliguori 已提交
394 395

    /* Check they're not leading us off end of descriptors. */
396
    *next = desc->next;
A
aliguori 已提交
397
    /* Make sure compiler knows to grab that: we don't want it changing! */
398
    smp_wmb();
A
aliguori 已提交
399

400 401 402
    if (*next >= max) {
        virtio_error(vdev, "Desc next is %u", *next);
        return VIRTQUEUE_READ_DESC_ERROR;
A
aliguori 已提交
403
    }
A
aliguori 已提交
404

405 406
    vring_desc_read(vdev, desc, desc_pa, *next);
    return VIRTQUEUE_READ_DESC_MORE;
A
aliguori 已提交
407 408
}

409
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
410 411
                               unsigned int *out_bytes,
                               unsigned max_in_bytes, unsigned max_out_bytes)
A
aliguori 已提交
412
{
413
    unsigned int idx;
414
    unsigned int total_bufs, in_total, out_total;
415
    int rc;
A
aliguori 已提交
416 417 418

    idx = vq->last_avail_idx;

419
    total_bufs = in_total = out_total = 0;
420
    while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
421
        VirtIODevice *vdev = vq->vdev;
422
        unsigned int max, num_bufs, indirect = 0;
423
        VRingDesc desc;
A
Avi Kivity 已提交
424
        hwaddr desc_pa;
425
        unsigned int i;
A
aliguori 已提交
426

427 428
        max = vq->vring.num;
        num_bufs = total_bufs;
A
aliguori 已提交
429
        i = virtqueue_get_head(vq, idx++);
430
        desc_pa = vq->vring.desc;
431
        vring_desc_read(vdev, &desc, desc_pa, i);
432

433 434
        if (desc.flags & VRING_DESC_F_INDIRECT) {
            if (desc.len % sizeof(VRingDesc)) {
435 436
                virtio_error(vdev, "Invalid size for indirect buffer table");
                goto err;
437 438 439 440
            }

            /* If we've got too many, that implies a descriptor loop. */
            if (num_bufs >= max) {
441 442
                virtio_error(vdev, "Looped descriptor");
                goto err;
443 444 445 446
            }

            /* loop over the indirect descriptor table */
            indirect = 1;
447 448
            max = desc.len / sizeof(VRingDesc);
            desc_pa = desc.addr;
449
            num_bufs = i = 0;
450
            vring_desc_read(vdev, &desc, desc_pa, i);
451 452
        }

A
aliguori 已提交
453 454
        do {
            /* If we've got too many, that implies a descriptor loop. */
455
            if (++num_bufs > max) {
456 457
                virtio_error(vdev, "Looped descriptor");
                goto err;
A
aliguori 已提交
458
            }
A
aliguori 已提交
459

460 461
            if (desc.flags & VRING_DESC_F_WRITE) {
                in_total += desc.len;
A
aliguori 已提交
462
            } else {
463
                out_total += desc.len;
A
aliguori 已提交
464
            }
465 466 467
            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
                goto done;
            }
468 469 470 471 472 473 474

            rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
        } while (rc == VIRTQUEUE_READ_DESC_MORE);

        if (rc == VIRTQUEUE_READ_DESC_ERROR) {
            goto err;
        }
475 476 477 478 479

        if (!indirect)
            total_bufs = num_bufs;
        else
            total_bufs++;
A
aliguori 已提交
480
    }
481 482 483 484 485

    if (rc < 0) {
        goto err;
    }

486
done:
487 488 489 490 491 492
    if (in_bytes) {
        *in_bytes = in_total;
    }
    if (out_bytes) {
        *out_bytes = out_total;
    }
493 494 495 496 497
    return;

err:
    in_total = out_total = 0;
    goto done;
498
}
A
aliguori 已提交
499

500 501 502 503 504
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
                          unsigned int out_bytes)
{
    unsigned int in_total, out_total;

505 506
    virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
    return in_bytes <= in_total && out_bytes <= out_total;
A
aliguori 已提交
507 508
}

509 510
static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
                               hwaddr *addr, struct iovec *iov,
511 512 513
                               unsigned int max_num_sg, bool is_write,
                               hwaddr pa, size_t sz)
{
514
    bool ok = false;
515 516 517
    unsigned num_sg = *p_num_sg;
    assert(num_sg <= max_num_sg);

518
    if (!sz) {
519 520
        virtio_error(vdev, "virtio: zero sized buffers are not allowed");
        goto out;
521 522
    }

523 524 525 526
    while (sz) {
        hwaddr len = sz;

        if (num_sg == max_num_sg) {
527 528 529
            virtio_error(vdev, "virtio: too many write descriptors in "
                               "indirect table");
            goto out;
530 531 532
        }

        iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
533
        if (!iov[num_sg].iov_base) {
534 535
            virtio_error(vdev, "virtio: bogus descriptor or out of resources");
            goto out;
536 537
        }

538 539 540 541 542 543 544
        iov[num_sg].iov_len = len;
        addr[num_sg] = pa;

        sz -= len;
        pa += len;
        num_sg++;
    }
545 546 547
    ok = true;

out:
548
    *p_num_sg = num_sg;
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
    return ok;
}

/* Only used by error code paths before we have a VirtQueueElement (therefore
 * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
 * yet.
 */
static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
                                    struct iovec *iov)
{
    unsigned int i;

    for (i = 0; i < out_num + in_num; i++) {
        int is_write = i >= out_num;

        cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
        iov++;
    }
567 568
}

M
Michael S. Tsirkin 已提交
569 570 571
static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
                                unsigned int *num_sg, unsigned int max_size,
                                int is_write)
K
Kevin Wolf 已提交
572 573
{
    unsigned int i;
A
Avi Kivity 已提交
574
    hwaddr len;
K
Kevin Wolf 已提交
575

M
Michael S. Tsirkin 已提交
576 577 578 579 580 581 582 583 584 585 586 587 588
    /* Note: this function MUST validate input, some callers
     * are passing in num_sg values received over the network.
     */
    /* TODO: teach all callers that this can fail, and return failure instead
     * of asserting here.
     * When we do, we might be able to re-enable NDEBUG below.
     */
#ifdef NDEBUG
#error building with NDEBUG is not supported
#endif
    assert(*num_sg <= max_size);

    for (i = 0; i < *num_sg; i++) {
K
Kevin Wolf 已提交
589 590
        len = sg[i].iov_len;
        sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
M
Michael S. Tsirkin 已提交
591
        if (!sg[i].iov_base) {
M
Michael Tokarev 已提交
592
            error_report("virtio: error trying to map MMIO memory");
K
Kevin Wolf 已提交
593 594
            exit(1);
        }
595 596
        if (len != sg[i].iov_len) {
            error_report("virtio: unexpected memory split");
M
Michael S. Tsirkin 已提交
597 598
            exit(1);
        }
K
Kevin Wolf 已提交
599 600 601
    }
}

M
Michael S. Tsirkin 已提交
602 603 604
void virtqueue_map(VirtQueueElement *elem)
{
    virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
605
                        VIRTQUEUE_MAX_SIZE, 1);
M
Michael S. Tsirkin 已提交
606
    virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
                        VIRTQUEUE_MAX_SIZE, 0);
}

void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
{
    VirtQueueElement *elem;
    size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
    size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
    size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
    size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
    size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
    size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);

    assert(sz >= sizeof(VirtQueueElement));
    elem = g_malloc(out_sg_end);
    elem->out_num = out_num;
    elem->in_num = in_num;
    elem->in_addr = (void *)elem + in_addr_ofs;
    elem->out_addr = (void *)elem + out_addr_ofs;
    elem->in_sg = (void *)elem + in_sg_ofs;
    elem->out_sg = (void *)elem + out_sg_ofs;
    return elem;
M
Michael S. Tsirkin 已提交
629 630
}

631
void *virtqueue_pop(VirtQueue *vq, size_t sz)
A
aliguori 已提交
632
{
633
    unsigned int i, head, max;
A
Avi Kivity 已提交
634
    hwaddr desc_pa = vq->vring.desc;
635
    VirtIODevice *vdev = vq->vdev;
636
    VirtQueueElement *elem;
637 638 639
    unsigned out_num, in_num;
    hwaddr addr[VIRTQUEUE_MAX_SIZE];
    struct iovec iov[VIRTQUEUE_MAX_SIZE];
640
    VRingDesc desc;
641
    int rc;
A
aliguori 已提交
642

643 644 645
    if (unlikely(vdev->broken)) {
        return NULL;
    }
646
    if (virtio_queue_empty(vq)) {
647 648
        return NULL;
    }
649 650 651
    /* Needed after virtio_queue_empty(), see comment in
     * virtqueue_num_heads(). */
    smp_rmb();
A
aliguori 已提交
652 653

    /* When we start there are none of either input nor output. */
654
    out_num = in_num = 0;
A
aliguori 已提交
655

656 657
    max = vq->vring.num;

658
    if (vq->inuse >= vq->vring.num) {
659 660
        virtio_error(vdev, "Virtqueue size exceeded");
        return NULL;
661 662
    }

A
aliguori 已提交
663
    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
664
    if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
665
        vring_set_avail_event(vq, vq->last_avail_idx);
M
Michael S. Tsirkin 已提交
666
    }
667

668 669 670
    vring_desc_read(vdev, &desc, desc_pa, i);
    if (desc.flags & VRING_DESC_F_INDIRECT) {
        if (desc.len % sizeof(VRingDesc)) {
671 672
            virtio_error(vdev, "Invalid size for indirect buffer table");
            return NULL;
673 674 675
        }

        /* loop over the indirect descriptor table */
676 677
        max = desc.len / sizeof(VRingDesc);
        desc_pa = desc.addr;
678
        i = 0;
679
        vring_desc_read(vdev, &desc, desc_pa, i);
680 681
    }

K
Kevin Wolf 已提交
682
    /* Collect all the descriptors */
A
aliguori 已提交
683
    do {
684 685
        bool map_ok;

686
        if (desc.flags & VRING_DESC_F_WRITE) {
687 688 689 690
            map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
                                        iov + out_num,
                                        VIRTQUEUE_MAX_SIZE - out_num, true,
                                        desc.addr, desc.len);
K
Kevin Wolf 已提交
691
        } else {
692
            if (in_num) {
693 694
                virtio_error(vdev, "Incorrect order for descriptors");
                goto err_undo_map;
695
            }
696 697 698 699 700 701
            map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
                                        VIRTQUEUE_MAX_SIZE, false,
                                        desc.addr, desc.len);
        }
        if (!map_ok) {
            goto err_undo_map;
K
Kevin Wolf 已提交
702
        }
A
aliguori 已提交
703 704

        /* If we've got too many, that implies a descriptor loop. */
705
        if ((in_num + out_num) > max) {
706 707
            virtio_error(vdev, "Looped descriptor");
            goto err_undo_map;
A
aliguori 已提交
708
        }
709 710 711 712 713 714 715

        rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
    } while (rc == VIRTQUEUE_READ_DESC_MORE);

    if (rc == VIRTQUEUE_READ_DESC_ERROR) {
        goto err_undo_map;
    }
A
aliguori 已提交
716

717 718
    /* Now copy what we have collected and mapped */
    elem = virtqueue_alloc_element(sz, out_num, in_num);
A
aliguori 已提交
719
    elem->index = head;
720 721 722 723 724 725 726 727
    for (i = 0; i < out_num; i++) {
        elem->out_addr[i] = addr[i];
        elem->out_sg[i] = iov[i];
    }
    for (i = 0; i < in_num; i++) {
        elem->in_addr[i] = addr[out_num + i];
        elem->in_sg[i] = iov[out_num + i];
    }
A
aliguori 已提交
728 729 730

    vq->inuse++;

731
    trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
732
    return elem;
733 734 735 736

err_undo_map:
    virtqueue_undo_map_desc(out_num, in_num, iov);
    return NULL;
A
aliguori 已提交
737 738
}

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
/* Reading and writing a structure directly to QEMUFile is *awful*, but
 * it is what QEMU has always done by mistake.  We can change it sooner
 * or later by bumping the version number of the affected vm states.
 * In the meanwhile, since the in-memory layout of VirtQueueElement
 * has changed, we need to marshal to and from the layout that was
 * used before the change.
 */
typedef struct VirtQueueElementOld {
    unsigned int index;
    unsigned int out_num;
    unsigned int in_num;
    hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
    hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
    struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
    struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElementOld;

756 757
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
{
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
    VirtQueueElement *elem;
    VirtQueueElementOld data;
    int i;

    qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));

    elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
    elem->index = data.index;

    for (i = 0; i < elem->in_num; i++) {
        elem->in_addr[i] = data.in_addr[i];
    }

    for (i = 0; i < elem->out_num; i++) {
        elem->out_addr[i] = data.out_addr[i];
    }

    for (i = 0; i < elem->in_num; i++) {
        /* Base is overwritten by virtqueue_map.  */
        elem->in_sg[i].iov_base = 0;
        elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
    }

    for (i = 0; i < elem->out_num; i++) {
        /* Base is overwritten by virtqueue_map.  */
        elem->out_sg[i].iov_base = 0;
        elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
    }

787 788 789 790 791 792
    virtqueue_map(elem);
    return elem;
}

void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
{
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
    VirtQueueElementOld data;
    int i;

    memset(&data, 0, sizeof(data));
    data.index = elem->index;
    data.in_num = elem->in_num;
    data.out_num = elem->out_num;

    for (i = 0; i < elem->in_num; i++) {
        data.in_addr[i] = elem->in_addr[i];
    }

    for (i = 0; i < elem->out_num; i++) {
        data.out_addr[i] = elem->out_addr[i];
    }

    for (i = 0; i < elem->in_num; i++) {
        /* Base is overwritten by virtqueue_map when loading.  Do not
         * save it, as it would leak the QEMU address space layout.  */
        data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
    }

    for (i = 0; i < elem->out_num; i++) {
        /* Do not save iov_base as above.  */
        data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
    }
    qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
820 821
}

A
aliguori 已提交
822
/* virtio device */
823 824
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
{
K
KONRAD Frederic 已提交
825 826 827
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

828 829 830 831
    if (unlikely(vdev->broken)) {
        return;
    }

K
KONRAD Frederic 已提交
832 833
    if (k->notify) {
        k->notify(qbus->parent, vector);
834 835
    }
}
A
aliguori 已提交
836

P
Paul Brook 已提交
837
void virtio_update_irq(VirtIODevice *vdev)
A
aliguori 已提交
838
{
839
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
A
aliguori 已提交
840 841
}

842 843 844 845 846 847 848 849 850 851 852 853
static int virtio_validate_features(VirtIODevice *vdev)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);

    if (k->validate_features) {
        return k->validate_features(vdev);
    } else {
        return 0;
    }
}

int virtio_set_status(VirtIODevice *vdev, uint8_t val)
854
{
855
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
856 857
    trace_virtio_set_status(vdev, val);

858
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
859 860 861 862 863 864 865 866 867
        if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
            val & VIRTIO_CONFIG_S_FEATURES_OK) {
            int ret = virtio_validate_features(vdev);

            if (ret) {
                return ret;
            }
        }
    }
868 869
    if (k->set_status) {
        k->set_status(vdev, val);
870 871
    }
    vdev->status = val;
872
    return 0;
873 874
}

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
bool target_words_bigendian(void);
static enum virtio_device_endian virtio_default_endian(void)
{
    if (target_words_bigendian()) {
        return VIRTIO_DEVICE_ENDIAN_BIG;
    } else {
        return VIRTIO_DEVICE_ENDIAN_LITTLE;
    }
}

static enum virtio_device_endian virtio_current_cpu_endian(void)
{
    CPUClass *cc = CPU_GET_CLASS(current_cpu);

    if (cc->virtio_is_big_endian(current_cpu)) {
        return VIRTIO_DEVICE_ENDIAN_BIG;
    } else {
        return VIRTIO_DEVICE_ENDIAN_LITTLE;
    }
}

P
Paul Brook 已提交
896
void virtio_reset(void *opaque)
A
aliguori 已提交
897 898
{
    VirtIODevice *vdev = opaque;
899
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
900 901
    int i;

902
    virtio_set_status(vdev, 0);
903 904 905 906 907 908 909
    if (current_cpu) {
        /* Guest initiated reset */
        vdev->device_endian = virtio_current_cpu_endian();
    } else {
        /* System reset */
        vdev->device_endian = virtio_default_endian();
    }
910

911 912 913
    if (k->reset) {
        k->reset(vdev);
    }
A
aliguori 已提交
914

915
    vdev->broken = false;
916
    vdev->guest_features = 0;
A
aliguori 已提交
917 918 919
    vdev->queue_sel = 0;
    vdev->status = 0;
    vdev->isr = 0;
920 921
    vdev->config_vector = VIRTIO_NO_VECTOR;
    virtio_notify_vector(vdev, vdev->config_vector);
A
aliguori 已提交
922

923
    for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
924 925 926 927
        vdev->vq[i].vring.desc = 0;
        vdev->vq[i].vring.avail = 0;
        vdev->vq[i].vring.used = 0;
        vdev->vq[i].last_avail_idx = 0;
928
        vdev->vq[i].shadow_avail_idx = 0;
929
        vdev->vq[i].used_idx = 0;
930
        virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
M
Michael S. Tsirkin 已提交
931 932 933
        vdev->vq[i].signalled_used = 0;
        vdev->vq[i].signalled_used_valid = false;
        vdev->vq[i].notification = true;
C
Cornelia Huck 已提交
934
        vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
935
        vdev->vq[i].inuse = 0;
A
aliguori 已提交
936 937 938
    }
}

P
Paul Brook 已提交
939
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
940
{
941
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
942 943
    uint8_t val;

944
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
945
        return (uint32_t)-1;
946 947 948
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
949

950
    val = ldub_p(vdev->config + addr);
A
aliguori 已提交
951 952 953
    return val;
}

P
Paul Brook 已提交
954
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
955
{
956
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
957 958
    uint16_t val;

959
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
960
        return (uint32_t)-1;
961 962 963
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
964

965
    val = lduw_p(vdev->config + addr);
A
aliguori 已提交
966 967 968
    return val;
}

P
Paul Brook 已提交
969
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
970
{
971
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
972 973
    uint32_t val;

974
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
975
        return (uint32_t)-1;
976 977 978
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
979

980
    val = ldl_p(vdev->config + addr);
A
aliguori 已提交
981 982 983
    return val;
}

P
Paul Brook 已提交
984
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
985
{
986
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
987 988
    uint8_t val = data;

989
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
990
        return;
991
    }
A
aliguori 已提交
992

993
    stb_p(vdev->config + addr, val);
A
aliguori 已提交
994

995 996 997
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
998 999
}

P
Paul Brook 已提交
1000
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
1001
{
1002
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
1003 1004
    uint16_t val = data;

1005
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
1006
        return;
1007
    }
A
aliguori 已提交
1008

1009
    stw_p(vdev->config + addr, val);
A
aliguori 已提交
1010

1011 1012 1013
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
1014 1015
}

P
Paul Brook 已提交
1016
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
1017
{
1018
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
1019 1020
    uint32_t val = data;

1021
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
1022
        return;
1023
    }
A
aliguori 已提交
1024

1025
    stl_p(vdev->config + addr, val);
A
aliguori 已提交
1026

1027 1028 1029
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
1030 1031
}

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint8_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = ldub_p(vdev->config + addr);
    return val;
}

uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint16_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = lduw_le_p(vdev->config + addr);
    return val;
}

uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint32_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = ldl_le_p(vdev->config + addr);
    return val;
}

void virtio_config_modern_writeb(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint8_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stb_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

void virtio_config_modern_writew(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint16_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stw_le_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

void virtio_config_modern_writel(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint32_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stl_le_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

A
Avi Kivity 已提交
1128
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
A
aliguori 已提交
1129
{
1130 1131
    vdev->vq[n].vring.desc = addr;
    virtio_queue_update_rings(vdev, n);
P
Paul Brook 已提交
1132 1133
}

A
Avi Kivity 已提交
1134
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
P
Paul Brook 已提交
1135
{
1136 1137 1138 1139 1140 1141 1142 1143 1144
    return vdev->vq[n].vring.desc;
}

void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
                            hwaddr avail, hwaddr used)
{
    vdev->vq[n].vring.desc = desc;
    vdev->vq[n].vring.avail = avail;
    vdev->vq[n].vring.used = used;
P
Paul Brook 已提交
1145 1146
}

1147 1148
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
{
1149 1150 1151 1152 1153 1154 1155
    /* Don't allow guest to flip queue between existent and
     * nonexistent states, or to set it to an invalid size.
     */
    if (!!num != !!vdev->vq[n].vring.num ||
        num > VIRTQUEUE_MAX_SIZE ||
        num < 0) {
        return;
1156
    }
1157
    vdev->vq[n].vring.num = num;
1158 1159
}

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
{
    return QLIST_FIRST(&vdev->vector_queues[vector]);
}

VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
{
    return QLIST_NEXT(vq, node);
}

P
Paul Brook 已提交
1170 1171 1172 1173
int virtio_queue_get_num(VirtIODevice *vdev, int n)
{
    return vdev->vq[n].vring.num;
}
A
aliguori 已提交
1174

1175 1176 1177 1178
int virtio_get_num_queues(VirtIODevice *vdev)
{
    int i;

1179
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1180 1181 1182 1183 1184 1185 1186 1187
        if (!virtio_queue_get_num(vdev, i)) {
            break;
        }
    }

    return i;
}

1188 1189 1190 1191 1192
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
{
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

1193
    /* virtio-1 compliant devices cannot change the alignment */
1194
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1195 1196 1197
        error_report("tried to modify queue alignment for virtio-1 device");
        return;
    }
1198 1199 1200 1201 1202 1203 1204
    /* Check that the transport told us it was going to do this
     * (so a buggy transport will immediately assert rather than
     * silently failing to migrate this state)
     */
    assert(k->has_variable_vring_alignment);

    vdev->vq[n].vring.align = align;
1205
    virtio_queue_update_rings(vdev, n);
1206 1207
}

M
Michael S. Tsirkin 已提交
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
static void virtio_queue_notify_aio_vq(VirtQueue *vq)
{
    if (vq->vring.desc && vq->handle_aio_output) {
        VirtIODevice *vdev = vq->vdev;

        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
        vq->handle_aio_output(vdev, vq);
    }
}

1218
static void virtio_queue_notify_vq(VirtQueue *vq)
1219
{
1220
    if (vq->vring.desc && vq->handle_output) {
1221
        VirtIODevice *vdev = vq->vdev;
1222

1223 1224 1225 1226
        if (unlikely(vdev->broken)) {
            return;
        }

1227 1228 1229 1230 1231
        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
        vq->handle_output(vdev, vq);
    }
}

P
Paul Brook 已提交
1232 1233
void virtio_queue_notify(VirtIODevice *vdev, int n)
{
1234
    virtio_queue_notify_vq(&vdev->vq[n]);
A
aliguori 已提交
1235 1236
}

1237 1238
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
{
1239
    return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1240 1241 1242 1243 1244
        VIRTIO_NO_VECTOR;
}

void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
{
1245 1246
    VirtQueue *vq = &vdev->vq[n];

1247
    if (n < VIRTIO_QUEUE_MAX) {
1248 1249 1250 1251
        if (vdev->vector_queues &&
            vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
            QLIST_REMOVE(vq, node);
        }
1252
        vdev->vq[n].vector = vector;
1253 1254 1255 1256 1257
        if (vdev->vector_queues &&
            vector != VIRTIO_NO_VECTOR) {
            QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
        }
    }
1258 1259
}

1260 1261 1262
static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
                                            VirtIOHandleOutput handle_output,
                                            bool use_aio)
A
aliguori 已提交
1263 1264 1265
{
    int i;

1266
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1267 1268 1269 1270
        if (vdev->vq[i].vring.num == 0)
            break;
    }

1271
    if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
A
aliguori 已提交
1272 1273 1274
        abort();

    vdev->vq[i].vring.num = queue_size;
C
Cornelia Huck 已提交
1275
    vdev->vq[i].vring.num_default = queue_size;
1276
    vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
A
aliguori 已提交
1277
    vdev->vq[i].handle_output = handle_output;
M
Michael S. Tsirkin 已提交
1278
    vdev->vq[i].handle_aio_output = NULL;
1279
    vdev->vq[i].use_aio = use_aio;
A
aliguori 已提交
1280 1281 1282 1283

    return &vdev->vq[i];
}

1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
/* Add a virt queue and mark AIO.
 * An AIO queue will use the AioContext based event interface instead of the
 * default IOHandler and EventNotifier interface.
 */
VirtQueue *virtio_add_queue_aio(VirtIODevice *vdev, int queue_size,
                                VirtIOHandleOutput handle_output)
{
    return virtio_add_queue_internal(vdev, queue_size, handle_output, true);
}

/* Add a normal virt queue (on the contrary to the AIO version above. */
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
                            VirtIOHandleOutput handle_output)
{
    return virtio_add_queue_internal(vdev, queue_size, handle_output, false);
}

1301 1302
void virtio_del_queue(VirtIODevice *vdev, int n)
{
1303
    if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1304 1305 1306 1307
        abort();
    }

    vdev->vq[n].vring.num = 0;
C
Cornelia Huck 已提交
1308
    vdev->vq[n].vring.num_default = 0;
1309 1310
}

1311 1312
void virtio_irq(VirtQueue *vq)
{
1313
    trace_virtio_irq(vq);
1314 1315 1316 1317
    vq->vdev->isr |= 0x01;
    virtio_notify_vector(vq->vdev, vq->vector);
}

1318
bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
M
Michael S. Tsirkin 已提交
1319 1320 1321
{
    uint16_t old, new;
    bool v;
1322 1323
    /* We need to expose used array entries before checking used event. */
    smp_mb();
1324
    /* Always notify when queue is empty (when feature acknowledge) */
1325
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1326
        !vq->inuse && virtio_queue_empty(vq)) {
M
Michael S. Tsirkin 已提交
1327 1328 1329
        return true;
    }

1330
    if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
M
Michael S. Tsirkin 已提交
1331 1332 1333 1334 1335 1336
        return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
    }

    v = vq->signalled_used_valid;
    vq->signalled_used_valid = true;
    old = vq->signalled_used;
1337
    new = vq->signalled_used = vq->used_idx;
1338
    return !v || vring_need_event(vring_get_used_event(vq), new, old);
M
Michael S. Tsirkin 已提交
1339 1340 1341 1342
}

void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
1343
    if (!virtio_should_notify(vdev, vq)) {
A
aliguori 已提交
1344
        return;
M
Michael S. Tsirkin 已提交
1345
    }
A
aliguori 已提交
1346

1347
    trace_virtio_notify(vdev, vq);
A
aliguori 已提交
1348
    vdev->isr |= 0x01;
1349
    virtio_notify_vector(vdev, vq->vector);
A
aliguori 已提交
1350 1351 1352 1353
}

void virtio_notify_config(VirtIODevice *vdev)
{
1354 1355 1356
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
        return;

A
aliguori 已提交
1357
    vdev->isr |= 0x03;
1358
    vdev->generation++;
1359
    virtio_notify_vector(vdev, vdev->config_vector);
A
aliguori 已提交
1360 1361
}

1362 1363 1364 1365 1366
static bool virtio_device_endian_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1367
    if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1368 1369 1370 1371
        return vdev->device_endian != virtio_default_endian();
    }
    /* Devices conforming to VIRTIO 1.0 or later are always LE. */
    return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1372 1373
}

G
Gerd Hoffmann 已提交
1374 1375 1376 1377 1378 1379 1380
static bool virtio_64bit_features_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return (vdev->host_features >> 32) != 0;
}

J
Jason Wang 已提交
1381 1382 1383 1384 1385 1386 1387
static bool virtio_virtqueue_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
}

C
Cornelia Huck 已提交
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
static bool virtio_ringsize_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;
    int i;

    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
        if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
            return true;
        }
    }
    return false;
}

1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
static bool virtio_extra_state_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    return k->has_extra_state &&
        k->has_extra_state(qbus->parent);
}

1411 1412 1413 1414 1415 1416 1417
static bool virtio_broken_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return vdev->broken;
}

1418
static const VMStateDescription vmstate_virtqueue = {
J
Jason Wang 已提交
1419
    .name = "virtqueue_state",
1420 1421 1422 1423 1424 1425 1426
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_UINT64(vring.avail, struct VirtQueue),
        VMSTATE_UINT64(vring.used, struct VirtQueue),
        VMSTATE_END_OF_LIST()
    }
J
Jason Wang 已提交
1427 1428 1429 1430 1431 1432 1433 1434
};

static const VMStateDescription vmstate_virtio_virtqueues = {
    .name = "virtio/virtqueues",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_virtqueue_needed,
    .fields = (VMStateField[]) {
D
Dr. David Alan Gilbert 已提交
1435 1436
        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
                      VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
J
Jason Wang 已提交
1437 1438 1439 1440
        VMSTATE_END_OF_LIST()
    }
};

1441
static const VMStateDescription vmstate_ringsize = {
C
Cornelia Huck 已提交
1442
    .name = "ringsize_state",
1443 1444 1445 1446 1447 1448
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_UINT32(vring.num_default, struct VirtQueue),
        VMSTATE_END_OF_LIST()
    }
C
Cornelia Huck 已提交
1449 1450 1451 1452 1453 1454 1455 1456
};

static const VMStateDescription vmstate_virtio_ringsize = {
    .name = "virtio/ringsize",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_ringsize_needed,
    .fields = (VMStateField[]) {
D
Dr. David Alan Gilbert 已提交
1457 1458
        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
                      VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
C
Cornelia Huck 已提交
1459 1460 1461 1462
        VMSTATE_END_OF_LIST()
    }
};

1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
static int get_extra_state(QEMUFile *f, void *pv, size_t size)
{
    VirtIODevice *vdev = pv;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    if (!k->load_extra_state) {
        return -1;
    } else {
        return k->load_extra_state(qbus->parent, f);
    }
}

static void put_extra_state(QEMUFile *f, void *pv, size_t size)
{
    VirtIODevice *vdev = pv;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    k->save_extra_state(qbus->parent, f);
}

static const VMStateInfo vmstate_info_extra_state = {
    .name = "virtqueue_extra_state",
    .get = get_extra_state,
    .put = put_extra_state,
};

static const VMStateDescription vmstate_virtio_extra_state = {
    .name = "virtio/extra_state",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_extra_state_needed,
    .fields = (VMStateField[]) {
        {
            .name         = "extra_state",
            .version_id   = 0,
            .field_exists = NULL,
            .size         = 0,
            .info         = &vmstate_info_extra_state,
            .flags        = VMS_SINGLE,
            .offset       = 0,
        },
        VMSTATE_END_OF_LIST()
    }
};

1510 1511 1512 1513
static const VMStateDescription vmstate_virtio_device_endian = {
    .name = "virtio/device_endian",
    .version_id = 1,
    .minimum_version_id = 1,
1514
    .needed = &virtio_device_endian_needed,
1515 1516 1517 1518 1519 1520
    .fields = (VMStateField[]) {
        VMSTATE_UINT8(device_endian, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

G
Gerd Hoffmann 已提交
1521 1522 1523 1524
static const VMStateDescription vmstate_virtio_64bit_features = {
    .name = "virtio/64bit_features",
    .version_id = 1,
    .minimum_version_id = 1,
1525
    .needed = &virtio_64bit_features_needed,
G
Gerd Hoffmann 已提交
1526 1527 1528 1529 1530 1531
    .fields = (VMStateField[]) {
        VMSTATE_UINT64(guest_features, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
static const VMStateDescription vmstate_virtio_broken = {
    .name = "virtio/broken",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_broken_needed,
    .fields = (VMStateField[]) {
        VMSTATE_BOOL(broken, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

1543 1544 1545 1546 1547 1548 1549
static const VMStateDescription vmstate_virtio = {
    .name = "virtio",
    .version_id = 1,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .fields = (VMStateField[]) {
        VMSTATE_END_OF_LIST()
1550
    },
1551 1552 1553
    .subsections = (const VMStateDescription*[]) {
        &vmstate_virtio_device_endian,
        &vmstate_virtio_64bit_features,
J
Jason Wang 已提交
1554
        &vmstate_virtio_virtqueues,
C
Cornelia Huck 已提交
1555
        &vmstate_virtio_ringsize,
1556
        &vmstate_virtio_broken,
1557
        &vmstate_virtio_extra_state,
1558
        NULL
1559 1560 1561
    }
};

A
aliguori 已提交
1562 1563
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
{
K
KONRAD Frederic 已提交
1564 1565
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1566
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
G
Gerd Hoffmann 已提交
1567
    uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
A
aliguori 已提交
1568 1569
    int i;

K
KONRAD Frederic 已提交
1570 1571 1572
    if (k->save_config) {
        k->save_config(qbus->parent, f);
    }
A
aliguori 已提交
1573 1574 1575 1576

    qemu_put_8s(f, &vdev->status);
    qemu_put_8s(f, &vdev->isr);
    qemu_put_be16s(f, &vdev->queue_sel);
G
Gerd Hoffmann 已提交
1577
    qemu_put_be32s(f, &guest_features_lo);
A
aliguori 已提交
1578 1579 1580
    qemu_put_be32(f, vdev->config_len);
    qemu_put_buffer(f, vdev->config, vdev->config_len);

1581
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1582 1583 1584 1585 1586 1587
        if (vdev->vq[i].vring.num == 0)
            break;
    }

    qemu_put_be32(f, i);

1588
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1589 1590 1591 1592
        if (vdev->vq[i].vring.num == 0)
            break;

        qemu_put_be32(f, vdev->vq[i].vring.num);
1593 1594 1595
        if (k->has_variable_vring_alignment) {
            qemu_put_be32(f, vdev->vq[i].vring.align);
        }
1596 1597
        /* XXX virtio-1 devices */
        qemu_put_be64(f, vdev->vq[i].vring.desc);
A
aliguori 已提交
1598
        qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
K
KONRAD Frederic 已提交
1599 1600 1601
        if (k->save_queue) {
            k->save_queue(qbus->parent, i, f);
        }
A
aliguori 已提交
1602
    }
1603 1604 1605 1606

    if (vdc->save != NULL) {
        vdc->save(vdev, f);
    }
1607 1608

    /* Subsections */
1609
    vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
A
aliguori 已提交
1610 1611
}

1612 1613 1614 1615 1616 1617
/* A wrapper for use as a VMState .put function */
void virtio_vmstate_save(QEMUFile *f, void *opaque, size_t size)
{
    virtio_save(VIRTIO_DEVICE(opaque), f);
}

1618
static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
1619
{
1620
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
C
Cornelia Huck 已提交
1621
    bool bad = (val & ~(vdev->host_features)) != 0;
1622

C
Cornelia Huck 已提交
1623
    val &= vdev->host_features;
1624 1625
    if (k->set_features) {
        k->set_features(vdev, val);
1626 1627 1628 1629 1630
    }
    vdev->guest_features = val;
    return bad ? -1 : 0;
}

1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
int virtio_set_features(VirtIODevice *vdev, uint64_t val)
{
   /*
     * The driver must not attempt to set features after feature negotiation
     * has finished.
     */
    if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
        return -EINVAL;
    }
    return virtio_set_features_nocheck(vdev, val);
}

1643
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
A
aliguori 已提交
1644
{
1645
    int i, ret;
1646
    int32_t config_len;
1647
    uint32_t num;
1648
    uint32_t features;
K
KONRAD Frederic 已提交
1649 1650
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1651
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
1652

1653 1654 1655 1656 1657 1658
    /*
     * We poison the endianness to ensure it does not get used before
     * subsections have been loaded.
     */
    vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;

K
KONRAD Frederic 已提交
1659 1660
    if (k->load_config) {
        ret = k->load_config(qbus->parent, f);
1661 1662 1663
        if (ret)
            return ret;
    }
A
aliguori 已提交
1664 1665 1666 1667

    qemu_get_8s(f, &vdev->status);
    qemu_get_8s(f, &vdev->isr);
    qemu_get_be16s(f, &vdev->queue_sel);
1668
    if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
1669 1670
        return -1;
    }
1671
    qemu_get_be32s(f, &features);
1672

1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
    /*
     * Temporarily set guest_features low bits - needed by
     * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
     * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
     *
     * Note: devices should always test host features in future - don't create
     * new dependencies like this.
     */
    vdev->guest_features = features;

1683
    config_len = qemu_get_be32(f);
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694

    /*
     * There are cases where the incoming config can be bigger or smaller
     * than what we have; so load what we have space for, and skip
     * any excess that's in the stream.
     */
    qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));

    while (config_len > vdev->config_len) {
        qemu_get_byte(f);
        config_len--;
1695
    }
A
aliguori 已提交
1696 1697 1698

    num = qemu_get_be32(f);

1699
    if (num > VIRTIO_QUEUE_MAX) {
1700
        error_report("Invalid number of virtqueues: 0x%x", num);
1701 1702 1703
        return -1;
    }

A
aliguori 已提交
1704 1705
    for (i = 0; i < num; i++) {
        vdev->vq[i].vring.num = qemu_get_be32(f);
1706 1707 1708
        if (k->has_variable_vring_alignment) {
            vdev->vq[i].vring.align = qemu_get_be32(f);
        }
1709
        vdev->vq[i].vring.desc = qemu_get_be64(f);
A
aliguori 已提交
1710
        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
M
Michael S. Tsirkin 已提交
1711 1712
        vdev->vq[i].signalled_used_valid = false;
        vdev->vq[i].notification = true;
A
aliguori 已提交
1713

1714 1715 1716
        if (vdev->vq[i].vring.desc) {
            /* XXX virtio-1 devices */
            virtio_queue_update_rings(vdev, i);
M
Michael S. Tsirkin 已提交
1717 1718
        } else if (vdev->vq[i].last_avail_idx) {
            error_report("VQ %d address 0x0 "
1719
                         "inconsistent with Host index 0x%x",
M
Michael S. Tsirkin 已提交
1720 1721
                         i, vdev->vq[i].last_avail_idx);
                return -1;
1722
        }
K
KONRAD Frederic 已提交
1723 1724
        if (k->load_queue) {
            ret = k->load_queue(qbus->parent, i, f);
1725 1726
            if (ret)
                return ret;
1727
        }
A
aliguori 已提交
1728 1729
    }

1730
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1731 1732

    if (vdc->load != NULL) {
1733 1734 1735 1736
        ret = vdc->load(vdev, f, version_id);
        if (ret) {
            return ret;
        }
1737 1738
    }

1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
    /* Subsections */
    ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
    if (ret) {
        return ret;
    }

    if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
        vdev->device_endian = virtio_default_endian();
    }

G
Gerd Hoffmann 已提交
1749 1750 1751 1752 1753 1754 1755
    if (virtio_64bit_features_needed(vdev)) {
        /*
         * Subsection load filled vdev->guest_features.  Run them
         * through virtio_set_features to sanity-check them against
         * host_features.
         */
        uint64_t features64 = vdev->guest_features;
1756
        if (virtio_set_features_nocheck(vdev, features64) < 0) {
G
Gerd Hoffmann 已提交
1757 1758 1759 1760 1761 1762
            error_report("Features 0x%" PRIx64 " unsupported. "
                         "Allowed features: 0x%" PRIx64,
                         features64, vdev->host_features);
            return -1;
        }
    } else {
1763
        if (virtio_set_features_nocheck(vdev, features) < 0) {
G
Gerd Hoffmann 已提交
1764 1765 1766 1767 1768 1769 1770
            error_report("Features 0x%x unsupported. "
                         "Allowed features: 0x%" PRIx64,
                         features, vdev->host_features);
            return -1;
        }
    }

1771
    for (i = 0; i < num; i++) {
1772
        if (vdev->vq[i].vring.desc) {
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
            uint16_t nheads;
            nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
            /* Check it isn't doing strange things with descriptor numbers. */
            if (nheads > vdev->vq[i].vring.num) {
                error_report("VQ %d size 0x%x Guest index 0x%x "
                             "inconsistent with Host index 0x%x: delta 0x%x",
                             i, vdev->vq[i].vring.num,
                             vring_avail_idx(&vdev->vq[i]),
                             vdev->vq[i].last_avail_idx, nheads);
                return -1;
            }
1784
            vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
1785
            vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800

            /*
             * Some devices migrate VirtQueueElements that have been popped
             * from the avail ring but not yet returned to the used ring.
             */
            vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
                                vdev->vq[i].used_idx;
            if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
                error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
                             "used_idx 0x%x",
                             i, vdev->vq[i].vring.num,
                             vdev->vq[i].last_avail_idx,
                             vdev->vq[i].used_idx);
                return -1;
            }
1801 1802 1803 1804
        }
    }

    return 0;
A
aliguori 已提交
1805 1806
}

1807
void virtio_cleanup(VirtIODevice *vdev)
1808
{
1809
    qemu_del_vm_change_state_handler(vdev->vmstate);
1810
    g_free(vdev->config);
1811
    g_free(vdev->vq);
1812
    g_free(vdev->vector_queues);
1813 1814
}

1815
static void virtio_vmstate_change(void *opaque, int running, RunState state)
1816 1817
{
    VirtIODevice *vdev = opaque;
K
KONRAD Frederic 已提交
1818 1819
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1820
    bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
1821
    vdev->vm_running = running;
1822 1823 1824 1825 1826

    if (backend_run) {
        virtio_set_status(vdev, vdev->status);
    }

K
KONRAD Frederic 已提交
1827 1828
    if (k->vmstate_change) {
        k->vmstate_change(qbus->parent, backend_run);
1829 1830 1831 1832 1833 1834 1835
    }

    if (!backend_run) {
        virtio_set_status(vdev, vdev->status);
    }
}

1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
void virtio_instance_init_common(Object *proxy_obj, void *data,
                                 size_t vdev_size, const char *vdev_name)
{
    DeviceState *vdev = data;

    object_initialize(vdev, vdev_size, vdev_name);
    object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
    object_unref(OBJECT(vdev));
    qdev_alias_all_properties(vdev, proxy_obj);
}

1847 1848
void virtio_init(VirtIODevice *vdev, const char *name,
                 uint16_t device_id, size_t config_size)
A
aliguori 已提交
1849
{
1850 1851
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1852
    int i;
1853 1854 1855 1856 1857 1858 1859
    int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;

    if (nvectors) {
        vdev->vector_queues =
            g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
    }

P
Paul Brook 已提交
1860
    vdev->device_id = device_id;
A
aliguori 已提交
1861 1862 1863
    vdev->status = 0;
    vdev->isr = 0;
    vdev->queue_sel = 0;
1864
    vdev->config_vector = VIRTIO_NO_VECTOR;
1865
    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
1866
    vdev->vm_running = runstate_is_running();
1867
    vdev->broken = false;
1868
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1869
        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1870
        vdev->vq[i].vdev = vdev;
1871
        vdev->vq[i].queue_index = i;
1872
    }
A
aliguori 已提交
1873 1874 1875

    vdev->name = name;
    vdev->config_len = config_size;
1876
    if (vdev->config_len) {
1877
        vdev->config = g_malloc0(config_size);
1878
    } else {
A
aliguori 已提交
1879
        vdev->config = NULL;
1880 1881 1882
    }
    vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
                                                     vdev);
1883
    vdev->device_endian = virtio_default_endian();
1884
    vdev->use_guest_notifier_mask = true;
1885
}
A
aliguori 已提交
1886

A
Avi Kivity 已提交
1887
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1888 1889 1890 1891
{
    return vdev->vq[n].vring.desc;
}

A
Avi Kivity 已提交
1892
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1893 1894 1895 1896
{
    return vdev->vq[n].vring.avail;
}

A
Avi Kivity 已提交
1897
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1898 1899 1900 1901
{
    return vdev->vq[n].vring.used;
}

A
Avi Kivity 已提交
1902
hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
1903 1904 1905 1906
{
    return vdev->vq[n].vring.desc;
}

A
Avi Kivity 已提交
1907
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1908 1909 1910 1911
{
    return sizeof(VRingDesc) * vdev->vq[n].vring.num;
}

A
Avi Kivity 已提交
1912
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1913 1914
{
    return offsetof(VRingAvail, ring) +
1915
        sizeof(uint16_t) * vdev->vq[n].vring.num;
1916 1917
}

A
Avi Kivity 已提交
1918
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1919 1920 1921 1922 1923
{
    return offsetof(VRingUsed, ring) +
        sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
}

A
Avi Kivity 已提交
1924
hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
{
    return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
	    virtio_queue_get_used_size(vdev, n);
}

uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
{
    return vdev->vq[n].last_avail_idx;
}

void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
{
    vdev->vq[n].last_avail_idx = idx;
1938
    vdev->vq[n].shadow_avail_idx = idx;
1939 1940
}

1941 1942 1943 1944 1945
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
{
    vdev->vq[n].signalled_used_valid = false;
}

1946 1947 1948 1949 1950
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
{
    return vdev->vq + n;
}

1951 1952 1953 1954 1955
uint16_t virtio_get_queue_index(VirtQueue *vq)
{
    return vq->queue_index;
}

1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
static void virtio_queue_guest_notifier_read(EventNotifier *n)
{
    VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
    if (event_notifier_test_and_clear(n)) {
        virtio_irq(vq);
    }
}

void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                bool with_irqfd)
{
    if (assign && !with_irqfd) {
1968
        event_notifier_set_handler(&vq->guest_notifier, false,
1969 1970
                                   virtio_queue_guest_notifier_read);
    } else {
1971
        event_notifier_set_handler(&vq->guest_notifier, false, NULL);
1972 1973 1974 1975 1976 1977 1978 1979
    }
    if (!assign) {
        /* Test and clear notifier before closing it,
         * in case poll callback didn't have time to run. */
        virtio_queue_guest_notifier_read(&vq->guest_notifier);
    }
}

1980 1981 1982 1983
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
{
    return &vq->guest_notifier;
}
1984

M
Michael S. Tsirkin 已提交
1985
static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
1986 1987 1988
{
    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
    if (event_notifier_test_and_clear(n)) {
M
Michael S. Tsirkin 已提交
1989
        virtio_queue_notify_aio_vq(vq);
1990 1991 1992
    }
}

1993
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
1994
                                                VirtIOHandleOutput handle_output)
1995
{
1996 1997
    if (handle_output) {
        vq->handle_aio_output = handle_output;
1998
        aio_set_event_notifier(ctx, &vq->host_notifier, true,
M
Michael S. Tsirkin 已提交
1999
                               virtio_queue_host_notifier_aio_read);
2000 2001 2002 2003
    } else {
        aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
        /* Test and clear notifier before after disabling event,
         * in case poll callback didn't have time to run. */
M
Michael S. Tsirkin 已提交
2004
        virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2005
        vq->handle_aio_output = NULL;
M
Michael S. Tsirkin 已提交
2006 2007 2008 2009 2010 2011 2012 2013
    }
}

static void virtio_queue_host_notifier_read(EventNotifier *n)
{
    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
    if (event_notifier_test_and_clear(n)) {
        virtio_queue_notify_vq(vq);
2014 2015 2016
    }
}

P
Paolo Bonzini 已提交
2017 2018
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
                                               bool set_handler)
2019
{
2020
    AioContext *ctx = qemu_get_aio_context();
P
Paolo Bonzini 已提交
2021
    if (assign && set_handler) {
2022 2023
        if (vq->use_aio) {
            aio_set_event_notifier(ctx, &vq->host_notifier, true,
2024
                                   virtio_queue_host_notifier_read);
2025 2026 2027 2028
        } else {
            event_notifier_set_handler(&vq->host_notifier, true,
                                       virtio_queue_host_notifier_read);
        }
2029
    } else {
2030 2031 2032 2033 2034
        if (vq->use_aio) {
            aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
        } else {
            event_notifier_set_handler(&vq->host_notifier, true, NULL);
        }
P
Paolo Bonzini 已提交
2035 2036
    }
    if (!assign) {
2037 2038 2039 2040 2041 2042
        /* Test and clear notifier before after disabling event,
         * in case poll callback didn't have time to run. */
        virtio_queue_host_notifier_read(&vq->host_notifier);
    }
}

2043 2044 2045 2046
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
{
    return &vq->host_notifier;
}
2047

2048 2049
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
{
2050
    g_free(vdev->bus_name);
2051
    vdev->bus_name = g_strdup(bus_name);
2052 2053
}

2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
{
    va_list ap;

    va_start(ap, fmt);
    error_vreport(fmt, ap);
    va_end(ap);

    vdev->broken = true;

    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
        virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
        virtio_notify_config(vdev);
    }
}

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
    Error *err = NULL;

    if (vdc->realize != NULL) {
        vdc->realize(dev, &err);
        if (err != NULL) {
            error_propagate(errp, err);
            return;
        }
2082
    }
J
Jason Wang 已提交
2083 2084 2085 2086 2087 2088

    virtio_bus_device_plugged(vdev, &err);
    if (err != NULL) {
        error_propagate(errp, err);
        return;
    }
2089 2090
}

2091
static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2092
{
2093
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2094 2095
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
    Error *err = NULL;
2096

2097 2098
    virtio_bus_device_unplugged(vdev);

2099 2100 2101 2102 2103 2104
    if (vdc->unrealize != NULL) {
        vdc->unrealize(dev, &err);
        if (err != NULL) {
            error_propagate(errp, err);
            return;
        }
2105
    }
2106

2107 2108
    g_free(vdev->bus_name);
    vdev->bus_name = NULL;
2109 2110
}

C
Cornelia Huck 已提交
2111 2112 2113 2114 2115
static Property virtio_properties[] = {
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
    DEFINE_PROP_END_OF_LIST(),
};

2116 2117 2118 2119
static void virtio_device_class_init(ObjectClass *klass, void *data)
{
    /* Set the default value here. */
    DeviceClass *dc = DEVICE_CLASS(klass);
2120 2121 2122

    dc->realize = virtio_device_realize;
    dc->unrealize = virtio_device_unrealize;
2123
    dc->bus_type = TYPE_VIRTIO_BUS;
C
Cornelia Huck 已提交
2124
    dc->props = virtio_properties;
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
}

static const TypeInfo virtio_device_info = {
    .name = TYPE_VIRTIO_DEVICE,
    .parent = TYPE_DEVICE,
    .instance_size = sizeof(VirtIODevice),
    .class_init = virtio_device_class_init,
    .abstract = true,
    .class_size = sizeof(VirtioDeviceClass),
};

static void virtio_register_types(void)
{
    type_register_static(&virtio_device_info);
}

type_init(virtio_register_types)