“0d2b962d16feaf1eb1a4658a4c1b85642418cd07”上不存在“hw/virtio/virtio.c”
virtio.c 57.1 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Virtio Support
 *
 * Copyright IBM, Corp. 2007
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

P
Peter Maydell 已提交
14
#include "qemu/osdep.h"
15
#include "qapi/error.h"
16 17
#include "qemu-common.h"
#include "cpu.h"
18
#include "trace.h"
19
#include "exec/address-spaces.h"
20
#include "qemu/error-report.h"
P
Paolo Bonzini 已提交
21
#include "hw/virtio/virtio.h"
22
#include "qemu/atomic.h"
P
Paolo Bonzini 已提交
23
#include "hw/virtio/virtio-bus.h"
24
#include "migration/migration.h"
25
#include "hw/virtio/virtio-access.h"
A
aliguori 已提交
26

27 28 29 30 31
/*
 * The alignment to use between consumer and producer parts of vring.
 * x86 pagesize again. This is the default, used by transports like PCI
 * which don't provide a means for the guest to tell the host the alignment.
 */
32 33
#define VIRTIO_PCI_VRING_ALIGN         4096

A
aliguori 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
typedef struct VRingDesc
{
    uint64_t addr;
    uint32_t len;
    uint16_t flags;
    uint16_t next;
} VRingDesc;

typedef struct VRingAvail
{
    uint16_t flags;
    uint16_t idx;
    uint16_t ring[0];
} VRingAvail;

typedef struct VRingUsedElem
{
    uint32_t id;
    uint32_t len;
} VRingUsedElem;

typedef struct VRingUsed
{
    uint16_t flags;
    uint16_t idx;
    VRingUsedElem ring[0];
} VRingUsed;

typedef struct VRing
{
    unsigned int num;
C
Cornelia Huck 已提交
65
    unsigned int num_default;
66
    unsigned int align;
A
Avi Kivity 已提交
67 68 69
    hwaddr desc;
    hwaddr avail;
    hwaddr used;
A
aliguori 已提交
70 71 72 73 74
} VRing;

struct VirtQueue
{
    VRing vring;
75 76

    /* Next head to pop */
A
aliguori 已提交
77
    uint16_t last_avail_idx;
78

79 80 81
    /* Last avail_idx read from VQ. */
    uint16_t shadow_avail_idx;

82 83
    uint16_t used_idx;

M
Michael S. Tsirkin 已提交
84 85 86 87 88 89 90 91 92
    /* Last used index value we have signalled on */
    uint16_t signalled_used;

    /* Last used index value we have signalled on */
    bool signalled_used_valid;

    /* Notification enabled? */
    bool notification;

93 94
    uint16_t queue_index;

A
aliguori 已提交
95
    int inuse;
M
Michael S. Tsirkin 已提交
96

97
    uint16_t vector;
98 99
    VirtIOHandleOutput handle_output;
    VirtIOHandleOutput handle_aio_output;
100
    bool use_aio;
101 102 103
    VirtIODevice *vdev;
    EventNotifier guest_notifier;
    EventNotifier host_notifier;
104
    QLIST_ENTRY(VirtQueue) node;
A
aliguori 已提交
105 106 107
};

/* virt queue functions */
108
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
A
aliguori 已提交
109
{
110
    VRing *vring = &vdev->vq[n].vring;
P
Paul Brook 已提交
111

112 113 114 115 116 117 118 119
    if (!vring->desc) {
        /* not yet setup -> nothing to do */
        return;
    }
    vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
    vring->used = vring_align(vring->avail +
                              offsetof(VRingAvail, ring[vring->num]),
                              vring->align);
A
aliguori 已提交
120 121
}

122 123
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
                            hwaddr desc_pa, int i)
A
aliguori 已提交
124
{
125 126 127 128 129 130
    address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
                       MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
    virtio_tswap64s(vdev, &desc->addr);
    virtio_tswap32s(vdev, &desc->len);
    virtio_tswap16s(vdev, &desc->flags);
    virtio_tswap16s(vdev, &desc->next);
A
aliguori 已提交
131 132 133 134
}

static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
A
Avi Kivity 已提交
135
    hwaddr pa;
A
aliguori 已提交
136
    pa = vq->vring.avail + offsetof(VRingAvail, flags);
137
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
138 139 140 141
}

static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
A
Avi Kivity 已提交
142
    hwaddr pa;
A
aliguori 已提交
143
    pa = vq->vring.avail + offsetof(VRingAvail, idx);
144 145
    vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
    return vq->shadow_avail_idx;
A
aliguori 已提交
146 147 148 149
}

static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
A
Avi Kivity 已提交
150
    hwaddr pa;
A
aliguori 已提交
151
    pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
152
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
153 154
}

155
static inline uint16_t vring_get_used_event(VirtQueue *vq)
M
Michael S. Tsirkin 已提交
156 157 158 159
{
    return vring_avail_ring(vq, vq->vring.num);
}

160 161
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
                                    int i)
A
aliguori 已提交
162
{
A
Avi Kivity 已提交
163
    hwaddr pa;
164 165 166 167 168
    virtio_tswap32s(vq->vdev, &uelem->id);
    virtio_tswap32s(vq->vdev, &uelem->len);
    pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
    address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
                       (void *)uelem, sizeof(VRingUsedElem));
A
aliguori 已提交
169 170 171 172
}

static uint16_t vring_used_idx(VirtQueue *vq)
{
A
Avi Kivity 已提交
173
    hwaddr pa;
A
aliguori 已提交
174
    pa = vq->vring.used + offsetof(VRingUsed, idx);
175
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
176 177
}

M
Michael S. Tsirkin 已提交
178
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
A
aliguori 已提交
179
{
A
Avi Kivity 已提交
180
    hwaddr pa;
A
aliguori 已提交
181
    pa = vq->vring.used + offsetof(VRingUsed, idx);
182
    virtio_stw_phys(vq->vdev, pa, val);
183
    vq->used_idx = val;
A
aliguori 已提交
184 185 186 187
}

static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
188
    VirtIODevice *vdev = vq->vdev;
A
Avi Kivity 已提交
189
    hwaddr pa;
A
aliguori 已提交
190
    pa = vq->vring.used + offsetof(VRingUsed, flags);
191
    virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
A
aliguori 已提交
192 193 194 195
}

static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
196
    VirtIODevice *vdev = vq->vdev;
A
Avi Kivity 已提交
197
    hwaddr pa;
A
aliguori 已提交
198
    pa = vq->vring.used + offsetof(VRingUsed, flags);
199
    virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
A
aliguori 已提交
200 201
}

202
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
M
Michael S. Tsirkin 已提交
203
{
A
Avi Kivity 已提交
204
    hwaddr pa;
M
Michael S. Tsirkin 已提交
205 206 207 208
    if (!vq->notification) {
        return;
    }
    pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
209
    virtio_stw_phys(vq->vdev, pa, val);
M
Michael S. Tsirkin 已提交
210 211
}

A
aliguori 已提交
212 213
void virtio_queue_set_notification(VirtQueue *vq, int enable)
{
M
Michael S. Tsirkin 已提交
214
    vq->notification = enable;
215
    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
216
        vring_set_avail_event(vq, vring_avail_idx(vq));
M
Michael S. Tsirkin 已提交
217
    } else if (enable) {
A
aliguori 已提交
218
        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
M
Michael S. Tsirkin 已提交
219
    } else {
A
aliguori 已提交
220
        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
M
Michael S. Tsirkin 已提交
221
    }
222 223 224 225
    if (enable) {
        /* Expose avail event/used flags before caller checks the avail idx. */
        smp_mb();
    }
A
aliguori 已提交
226 227 228 229 230 231 232
}

int virtio_queue_ready(VirtQueue *vq)
{
    return vq->vring.avail != 0;
}

233 234
/* Fetch avail_idx from VQ memory only when we really need to know if
 * guest has added some buffers. */
A
aliguori 已提交
235 236
int virtio_queue_empty(VirtQueue *vq)
{
237 238 239 240
    if (vq->shadow_avail_idx != vq->last_avail_idx) {
        return 0;
    }

A
aliguori 已提交
241 242 243
    return vring_avail_idx(vq) == vq->last_avail_idx;
}

244 245
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
                               unsigned int len)
A
aliguori 已提交
246 247 248 249 250 251 252 253
{
    unsigned int offset;
    int i;

    offset = 0;
    for (i = 0; i < elem->in_num; i++) {
        size_t size = MIN(len - offset, elem->in_sg[i].iov_len);

254 255 256
        cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
                                  elem->in_sg[i].iov_len,
                                  1, size);
A
aliguori 已提交
257

258
        offset += size;
A
aliguori 已提交
259 260
    }

261 262 263 264
    for (i = 0; i < elem->out_num; i++)
        cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
                                  elem->out_sg[i].iov_len,
                                  0, elem->out_sg[i].iov_len);
265 266
}

J
Jason Wang 已提交
267 268 269 270
void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
                       unsigned int len)
{
    vq->last_avail_idx--;
271
    vq->inuse--;
J
Jason Wang 已提交
272 273 274
    virtqueue_unmap_sg(vq, elem, len);
}

S
Stefan Hajnoczi 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
/* virtqueue_rewind:
 * @vq: The #VirtQueue
 * @num: Number of elements to push back
 *
 * Pretend that elements weren't popped from the virtqueue.  The next
 * virtqueue_pop() will refetch the oldest element.
 *
 * Use virtqueue_discard() instead if you have a VirtQueueElement.
 *
 * Returns: true on success, false if @num is greater than the number of in use
 * elements.
 */
bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
{
    if (num > vq->inuse) {
        return false;
    }
    vq->last_avail_idx -= num;
    vq->inuse -= num;
    return true;
}

297 298 299
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
                    unsigned int len, unsigned int idx)
{
300 301
    VRingUsedElem uelem;

302 303 304
    trace_virtqueue_fill(vq, elem, len, idx);

    virtqueue_unmap_sg(vq, elem, len);
305

306 307 308 309
    if (unlikely(vq->vdev->broken)) {
        return;
    }

310
    idx = (idx + vq->used_idx) % vq->vring.num;
A
aliguori 已提交
311

312 313 314
    uelem.id = elem->index;
    uelem.len = len;
    vring_used_write(vq, &uelem, idx);
A
aliguori 已提交
315 316 317 318
}

void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
M
Michael S. Tsirkin 已提交
319
    uint16_t old, new;
320 321 322 323 324 325

    if (unlikely(vq->vdev->broken)) {
        vq->inuse -= count;
        return;
    }

A
aliguori 已提交
326
    /* Make sure buffer is written before we update index. */
327
    smp_wmb();
328
    trace_virtqueue_flush(vq, count);
329
    old = vq->used_idx;
M
Michael S. Tsirkin 已提交
330 331
    new = old + count;
    vring_used_idx_set(vq, new);
A
aliguori 已提交
332
    vq->inuse -= count;
M
Michael S. Tsirkin 已提交
333 334
    if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
        vq->signalled_used_valid = false;
A
aliguori 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347 348
}

void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
                    unsigned int len)
{
    virtqueue_fill(vq, elem, len, 0);
    virtqueue_flush(vq, 1);
}

static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
{
    uint16_t num_heads = vring_avail_idx(vq) - idx;

    /* Check it isn't doing very strange things with descriptor numbers. */
A
aliguori 已提交
349
    if (num_heads > vq->vring.num) {
350
        error_report("Guest moved used index from %u to %u",
351
                     idx, vq->shadow_avail_idx);
A
aliguori 已提交
352 353
        exit(1);
    }
354 355 356 357 358
    /* On success, callers read a descriptor at vq->last_avail_idx.
     * Make sure descriptor read does not bypass avail index read. */
    if (num_heads) {
        smp_rmb();
    }
A
aliguori 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371

    return num_heads;
}

static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
{
    unsigned int head;

    /* Grab the next descriptor number they're advertising, and increment
     * the index we've seen. */
    head = vring_avail_ring(vq, idx % vq->vring.num);

    /* If their number is silly, that's a fatal mistake. */
A
aliguori 已提交
372
    if (head >= vq->vring.num) {
373
        error_report("Guest says index %u is available", head);
A
aliguori 已提交
374 375
        exit(1);
    }
A
aliguori 已提交
376 377 378 379

    return head;
}

380 381
static unsigned virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
                                         hwaddr desc_pa, unsigned int max)
A
aliguori 已提交
382 383 384 385
{
    unsigned int next;

    /* If this descriptor says it doesn't chain, we're done. */
386
    if (!(desc->flags & VRING_DESC_F_NEXT)) {
387
        return max;
388
    }
A
aliguori 已提交
389 390

    /* Check they're not leading us off end of descriptors. */
391
    next = desc->next;
A
aliguori 已提交
392
    /* Make sure compiler knows to grab that: we don't want it changing! */
393
    smp_wmb();
A
aliguori 已提交
394

395
    if (next >= max) {
396
        error_report("Desc next is %u", next);
A
aliguori 已提交
397 398
        exit(1);
    }
A
aliguori 已提交
399

400
    vring_desc_read(vdev, desc, desc_pa, next);
A
aliguori 已提交
401 402 403
    return next;
}

404
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
405 406
                               unsigned int *out_bytes,
                               unsigned max_in_bytes, unsigned max_out_bytes)
A
aliguori 已提交
407
{
408
    unsigned int idx;
409
    unsigned int total_bufs, in_total, out_total;
A
aliguori 已提交
410 411 412

    idx = vq->last_avail_idx;

413
    total_bufs = in_total = out_total = 0;
A
aliguori 已提交
414
    while (virtqueue_num_heads(vq, idx)) {
415
        VirtIODevice *vdev = vq->vdev;
416
        unsigned int max, num_bufs, indirect = 0;
417
        VRingDesc desc;
A
Avi Kivity 已提交
418
        hwaddr desc_pa;
419
        unsigned int i;
A
aliguori 已提交
420

421 422
        max = vq->vring.num;
        num_bufs = total_bufs;
A
aliguori 已提交
423
        i = virtqueue_get_head(vq, idx++);
424
        desc_pa = vq->vring.desc;
425
        vring_desc_read(vdev, &desc, desc_pa, i);
426

427 428
        if (desc.flags & VRING_DESC_F_INDIRECT) {
            if (desc.len % sizeof(VRingDesc)) {
429 430
                virtio_error(vdev, "Invalid size for indirect buffer table");
                goto err;
431 432 433 434
            }

            /* If we've got too many, that implies a descriptor loop. */
            if (num_bufs >= max) {
435 436
                virtio_error(vdev, "Looped descriptor");
                goto err;
437 438 439 440
            }

            /* loop over the indirect descriptor table */
            indirect = 1;
441 442
            max = desc.len / sizeof(VRingDesc);
            desc_pa = desc.addr;
443
            num_bufs = i = 0;
444
            vring_desc_read(vdev, &desc, desc_pa, i);
445 446
        }

A
aliguori 已提交
447 448
        do {
            /* If we've got too many, that implies a descriptor loop. */
449
            if (++num_bufs > max) {
450 451
                virtio_error(vdev, "Looped descriptor");
                goto err;
A
aliguori 已提交
452
            }
A
aliguori 已提交
453

454 455
            if (desc.flags & VRING_DESC_F_WRITE) {
                in_total += desc.len;
A
aliguori 已提交
456
            } else {
457
                out_total += desc.len;
A
aliguori 已提交
458
            }
459 460 461
            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
                goto done;
            }
462
        } while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
463 464 465 466 467

        if (!indirect)
            total_bufs = num_bufs;
        else
            total_bufs++;
A
aliguori 已提交
468
    }
469
done:
470 471 472 473 474 475
    if (in_bytes) {
        *in_bytes = in_total;
    }
    if (out_bytes) {
        *out_bytes = out_total;
    }
476 477 478 479 480
    return;

err:
    in_total = out_total = 0;
    goto done;
481
}
A
aliguori 已提交
482

483 484 485 486 487
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
                          unsigned int out_bytes)
{
    unsigned int in_total, out_total;

488 489
    virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
    return in_bytes <= in_total && out_bytes <= out_total;
A
aliguori 已提交
490 491
}

492 493
static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
                               hwaddr *addr, struct iovec *iov,
494 495 496
                               unsigned int max_num_sg, bool is_write,
                               hwaddr pa, size_t sz)
{
497
    bool ok = false;
498 499 500
    unsigned num_sg = *p_num_sg;
    assert(num_sg <= max_num_sg);

501
    if (!sz) {
502 503
        virtio_error(vdev, "virtio: zero sized buffers are not allowed");
        goto out;
504 505
    }

506 507 508 509
    while (sz) {
        hwaddr len = sz;

        if (num_sg == max_num_sg) {
510 511 512
            virtio_error(vdev, "virtio: too many write descriptors in "
                               "indirect table");
            goto out;
513 514 515
        }

        iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
516
        if (!iov[num_sg].iov_base) {
517 518
            virtio_error(vdev, "virtio: bogus descriptor or out of resources");
            goto out;
519 520
        }

521 522 523 524 525 526 527
        iov[num_sg].iov_len = len;
        addr[num_sg] = pa;

        sz -= len;
        pa += len;
        num_sg++;
    }
528 529 530
    ok = true;

out:
531
    *p_num_sg = num_sg;
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
    return ok;
}

/* Only used by error code paths before we have a VirtQueueElement (therefore
 * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
 * yet.
 */
static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
                                    struct iovec *iov)
{
    unsigned int i;

    for (i = 0; i < out_num + in_num; i++) {
        int is_write = i >= out_num;

        cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
        iov++;
    }
550 551
}

M
Michael S. Tsirkin 已提交
552 553 554
static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
                                unsigned int *num_sg, unsigned int max_size,
                                int is_write)
K
Kevin Wolf 已提交
555 556
{
    unsigned int i;
A
Avi Kivity 已提交
557
    hwaddr len;
K
Kevin Wolf 已提交
558

M
Michael S. Tsirkin 已提交
559 560 561 562 563 564 565 566 567 568 569 570 571
    /* Note: this function MUST validate input, some callers
     * are passing in num_sg values received over the network.
     */
    /* TODO: teach all callers that this can fail, and return failure instead
     * of asserting here.
     * When we do, we might be able to re-enable NDEBUG below.
     */
#ifdef NDEBUG
#error building with NDEBUG is not supported
#endif
    assert(*num_sg <= max_size);

    for (i = 0; i < *num_sg; i++) {
K
Kevin Wolf 已提交
572 573
        len = sg[i].iov_len;
        sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
M
Michael S. Tsirkin 已提交
574
        if (!sg[i].iov_base) {
M
Michael Tokarev 已提交
575
            error_report("virtio: error trying to map MMIO memory");
K
Kevin Wolf 已提交
576 577
            exit(1);
        }
578 579
        if (len != sg[i].iov_len) {
            error_report("virtio: unexpected memory split");
M
Michael S. Tsirkin 已提交
580 581
            exit(1);
        }
K
Kevin Wolf 已提交
582 583 584
    }
}

M
Michael S. Tsirkin 已提交
585 586 587
void virtqueue_map(VirtQueueElement *elem)
{
    virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
588
                        VIRTQUEUE_MAX_SIZE, 1);
M
Michael S. Tsirkin 已提交
589
    virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
                        VIRTQUEUE_MAX_SIZE, 0);
}

void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
{
    VirtQueueElement *elem;
    size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
    size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
    size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
    size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
    size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
    size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);

    assert(sz >= sizeof(VirtQueueElement));
    elem = g_malloc(out_sg_end);
    elem->out_num = out_num;
    elem->in_num = in_num;
    elem->in_addr = (void *)elem + in_addr_ofs;
    elem->out_addr = (void *)elem + out_addr_ofs;
    elem->in_sg = (void *)elem + in_sg_ofs;
    elem->out_sg = (void *)elem + out_sg_ofs;
    return elem;
M
Michael S. Tsirkin 已提交
612 613
}

614
void *virtqueue_pop(VirtQueue *vq, size_t sz)
A
aliguori 已提交
615
{
616
    unsigned int i, head, max;
A
Avi Kivity 已提交
617
    hwaddr desc_pa = vq->vring.desc;
618
    VirtIODevice *vdev = vq->vdev;
619
    VirtQueueElement *elem;
620 621 622
    unsigned out_num, in_num;
    hwaddr addr[VIRTQUEUE_MAX_SIZE];
    struct iovec iov[VIRTQUEUE_MAX_SIZE];
623
    VRingDesc desc;
A
aliguori 已提交
624

625 626 627
    if (unlikely(vdev->broken)) {
        return NULL;
    }
628
    if (virtio_queue_empty(vq)) {
629 630
        return NULL;
    }
631 632 633
    /* Needed after virtio_queue_empty(), see comment in
     * virtqueue_num_heads(). */
    smp_rmb();
A
aliguori 已提交
634 635

    /* When we start there are none of either input nor output. */
636
    out_num = in_num = 0;
A
aliguori 已提交
637

638 639
    max = vq->vring.num;

640
    if (vq->inuse >= vq->vring.num) {
641 642
        virtio_error(vdev, "Virtqueue size exceeded");
        return NULL;
643 644
    }

A
aliguori 已提交
645
    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
646
    if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
647
        vring_set_avail_event(vq, vq->last_avail_idx);
M
Michael S. Tsirkin 已提交
648
    }
649

650 651 652
    vring_desc_read(vdev, &desc, desc_pa, i);
    if (desc.flags & VRING_DESC_F_INDIRECT) {
        if (desc.len % sizeof(VRingDesc)) {
653 654
            virtio_error(vdev, "Invalid size for indirect buffer table");
            return NULL;
655 656 657
        }

        /* loop over the indirect descriptor table */
658 659
        max = desc.len / sizeof(VRingDesc);
        desc_pa = desc.addr;
660
        i = 0;
661
        vring_desc_read(vdev, &desc, desc_pa, i);
662 663
    }

K
Kevin Wolf 已提交
664
    /* Collect all the descriptors */
A
aliguori 已提交
665
    do {
666 667
        bool map_ok;

668
        if (desc.flags & VRING_DESC_F_WRITE) {
669 670 671 672
            map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
                                        iov + out_num,
                                        VIRTQUEUE_MAX_SIZE - out_num, true,
                                        desc.addr, desc.len);
K
Kevin Wolf 已提交
673
        } else {
674
            if (in_num) {
675 676
                virtio_error(vdev, "Incorrect order for descriptors");
                goto err_undo_map;
677
            }
678 679 680 681 682 683
            map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
                                        VIRTQUEUE_MAX_SIZE, false,
                                        desc.addr, desc.len);
        }
        if (!map_ok) {
            goto err_undo_map;
K
Kevin Wolf 已提交
684
        }
A
aliguori 已提交
685 686

        /* If we've got too many, that implies a descriptor loop. */
687
        if ((in_num + out_num) > max) {
688 689
            virtio_error(vdev, "Looped descriptor");
            goto err_undo_map;
A
aliguori 已提交
690
        }
691
    } while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
A
aliguori 已提交
692

693 694
    /* Now copy what we have collected and mapped */
    elem = virtqueue_alloc_element(sz, out_num, in_num);
A
aliguori 已提交
695
    elem->index = head;
696 697 698 699 700 701 702 703
    for (i = 0; i < out_num; i++) {
        elem->out_addr[i] = addr[i];
        elem->out_sg[i] = iov[i];
    }
    for (i = 0; i < in_num; i++) {
        elem->in_addr[i] = addr[out_num + i];
        elem->in_sg[i] = iov[out_num + i];
    }
A
aliguori 已提交
704 705 706

    vq->inuse++;

707
    trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
708
    return elem;
709 710 711 712

err_undo_map:
    virtqueue_undo_map_desc(out_num, in_num, iov);
    return NULL;
A
aliguori 已提交
713 714
}

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
/* Reading and writing a structure directly to QEMUFile is *awful*, but
 * it is what QEMU has always done by mistake.  We can change it sooner
 * or later by bumping the version number of the affected vm states.
 * In the meanwhile, since the in-memory layout of VirtQueueElement
 * has changed, we need to marshal to and from the layout that was
 * used before the change.
 */
typedef struct VirtQueueElementOld {
    unsigned int index;
    unsigned int out_num;
    unsigned int in_num;
    hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
    hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
    struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
    struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElementOld;

732 733
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
{
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
    VirtQueueElement *elem;
    VirtQueueElementOld data;
    int i;

    qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));

    elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
    elem->index = data.index;

    for (i = 0; i < elem->in_num; i++) {
        elem->in_addr[i] = data.in_addr[i];
    }

    for (i = 0; i < elem->out_num; i++) {
        elem->out_addr[i] = data.out_addr[i];
    }

    for (i = 0; i < elem->in_num; i++) {
        /* Base is overwritten by virtqueue_map.  */
        elem->in_sg[i].iov_base = 0;
        elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
    }

    for (i = 0; i < elem->out_num; i++) {
        /* Base is overwritten by virtqueue_map.  */
        elem->out_sg[i].iov_base = 0;
        elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
    }

763 764 765 766 767 768
    virtqueue_map(elem);
    return elem;
}

void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
{
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
    VirtQueueElementOld data;
    int i;

    memset(&data, 0, sizeof(data));
    data.index = elem->index;
    data.in_num = elem->in_num;
    data.out_num = elem->out_num;

    for (i = 0; i < elem->in_num; i++) {
        data.in_addr[i] = elem->in_addr[i];
    }

    for (i = 0; i < elem->out_num; i++) {
        data.out_addr[i] = elem->out_addr[i];
    }

    for (i = 0; i < elem->in_num; i++) {
        /* Base is overwritten by virtqueue_map when loading.  Do not
         * save it, as it would leak the QEMU address space layout.  */
        data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
    }

    for (i = 0; i < elem->out_num; i++) {
        /* Do not save iov_base as above.  */
        data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
    }
    qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
796 797
}

A
aliguori 已提交
798
/* virtio device */
799 800
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
{
K
KONRAD Frederic 已提交
801 802 803
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

804 805 806 807
    if (unlikely(vdev->broken)) {
        return;
    }

K
KONRAD Frederic 已提交
808 809
    if (k->notify) {
        k->notify(qbus->parent, vector);
810 811
    }
}
A
aliguori 已提交
812

P
Paul Brook 已提交
813
void virtio_update_irq(VirtIODevice *vdev)
A
aliguori 已提交
814
{
815
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
A
aliguori 已提交
816 817
}

818 819 820 821 822 823 824 825 826 827 828 829
static int virtio_validate_features(VirtIODevice *vdev)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);

    if (k->validate_features) {
        return k->validate_features(vdev);
    } else {
        return 0;
    }
}

int virtio_set_status(VirtIODevice *vdev, uint8_t val)
830
{
831
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
832 833
    trace_virtio_set_status(vdev, val);

834
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
835 836 837 838 839 840 841 842 843
        if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
            val & VIRTIO_CONFIG_S_FEATURES_OK) {
            int ret = virtio_validate_features(vdev);

            if (ret) {
                return ret;
            }
        }
    }
844 845
    if (k->set_status) {
        k->set_status(vdev, val);
846 847
    }
    vdev->status = val;
848
    return 0;
849 850
}

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
bool target_words_bigendian(void);
static enum virtio_device_endian virtio_default_endian(void)
{
    if (target_words_bigendian()) {
        return VIRTIO_DEVICE_ENDIAN_BIG;
    } else {
        return VIRTIO_DEVICE_ENDIAN_LITTLE;
    }
}

static enum virtio_device_endian virtio_current_cpu_endian(void)
{
    CPUClass *cc = CPU_GET_CLASS(current_cpu);

    if (cc->virtio_is_big_endian(current_cpu)) {
        return VIRTIO_DEVICE_ENDIAN_BIG;
    } else {
        return VIRTIO_DEVICE_ENDIAN_LITTLE;
    }
}

P
Paul Brook 已提交
872
void virtio_reset(void *opaque)
A
aliguori 已提交
873 874
{
    VirtIODevice *vdev = opaque;
875
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
876 877
    int i;

878
    virtio_set_status(vdev, 0);
879 880 881 882 883 884 885
    if (current_cpu) {
        /* Guest initiated reset */
        vdev->device_endian = virtio_current_cpu_endian();
    } else {
        /* System reset */
        vdev->device_endian = virtio_default_endian();
    }
886

887 888 889
    if (k->reset) {
        k->reset(vdev);
    }
A
aliguori 已提交
890

891
    vdev->broken = false;
892
    vdev->guest_features = 0;
A
aliguori 已提交
893 894 895
    vdev->queue_sel = 0;
    vdev->status = 0;
    vdev->isr = 0;
896 897
    vdev->config_vector = VIRTIO_NO_VECTOR;
    virtio_notify_vector(vdev, vdev->config_vector);
A
aliguori 已提交
898

899
    for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
900 901 902 903
        vdev->vq[i].vring.desc = 0;
        vdev->vq[i].vring.avail = 0;
        vdev->vq[i].vring.used = 0;
        vdev->vq[i].last_avail_idx = 0;
904
        vdev->vq[i].shadow_avail_idx = 0;
905
        vdev->vq[i].used_idx = 0;
906
        virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
M
Michael S. Tsirkin 已提交
907 908 909
        vdev->vq[i].signalled_used = 0;
        vdev->vq[i].signalled_used_valid = false;
        vdev->vq[i].notification = true;
C
Cornelia Huck 已提交
910
        vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
911
        vdev->vq[i].inuse = 0;
A
aliguori 已提交
912 913 914
    }
}

P
Paul Brook 已提交
915
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
916
{
917
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
918 919
    uint8_t val;

920
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
921
        return (uint32_t)-1;
922 923 924
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
925

926
    val = ldub_p(vdev->config + addr);
A
aliguori 已提交
927 928 929
    return val;
}

P
Paul Brook 已提交
930
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
931
{
932
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
933 934
    uint16_t val;

935
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
936
        return (uint32_t)-1;
937 938 939
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
940

941
    val = lduw_p(vdev->config + addr);
A
aliguori 已提交
942 943 944
    return val;
}

P
Paul Brook 已提交
945
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
946
{
947
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
948 949
    uint32_t val;

950
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
951
        return (uint32_t)-1;
952 953 954
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
955

956
    val = ldl_p(vdev->config + addr);
A
aliguori 已提交
957 958 959
    return val;
}

P
Paul Brook 已提交
960
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
961
{
962
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
963 964
    uint8_t val = data;

965
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
966
        return;
967
    }
A
aliguori 已提交
968

969
    stb_p(vdev->config + addr, val);
A
aliguori 已提交
970

971 972 973
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
974 975
}

P
Paul Brook 已提交
976
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
977
{
978
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
979 980
    uint16_t val = data;

981
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
982
        return;
983
    }
A
aliguori 已提交
984

985
    stw_p(vdev->config + addr, val);
A
aliguori 已提交
986

987 988 989
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
990 991
}

P
Paul Brook 已提交
992
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
993
{
994
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
995 996
    uint32_t val = data;

997
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
998
        return;
999
    }
A
aliguori 已提交
1000

1001
    stl_p(vdev->config + addr, val);
A
aliguori 已提交
1002

1003 1004 1005
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
1006 1007
}

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint8_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = ldub_p(vdev->config + addr);
    return val;
}

uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint16_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = lduw_le_p(vdev->config + addr);
    return val;
}

uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint32_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = ldl_le_p(vdev->config + addr);
    return val;
}

void virtio_config_modern_writeb(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint8_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stb_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

void virtio_config_modern_writew(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint16_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stw_le_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

void virtio_config_modern_writel(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint32_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stl_le_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

A
Avi Kivity 已提交
1104
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
A
aliguori 已提交
1105
{
1106 1107
    vdev->vq[n].vring.desc = addr;
    virtio_queue_update_rings(vdev, n);
P
Paul Brook 已提交
1108 1109
}

A
Avi Kivity 已提交
1110
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
P
Paul Brook 已提交
1111
{
1112 1113 1114 1115 1116 1117 1118 1119 1120
    return vdev->vq[n].vring.desc;
}

void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
                            hwaddr avail, hwaddr used)
{
    vdev->vq[n].vring.desc = desc;
    vdev->vq[n].vring.avail = avail;
    vdev->vq[n].vring.used = used;
P
Paul Brook 已提交
1121 1122
}

1123 1124
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
{
1125 1126 1127 1128 1129 1130 1131
    /* Don't allow guest to flip queue between existent and
     * nonexistent states, or to set it to an invalid size.
     */
    if (!!num != !!vdev->vq[n].vring.num ||
        num > VIRTQUEUE_MAX_SIZE ||
        num < 0) {
        return;
1132
    }
1133
    vdev->vq[n].vring.num = num;
1134 1135
}

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
{
    return QLIST_FIRST(&vdev->vector_queues[vector]);
}

VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
{
    return QLIST_NEXT(vq, node);
}

P
Paul Brook 已提交
1146 1147 1148 1149
int virtio_queue_get_num(VirtIODevice *vdev, int n)
{
    return vdev->vq[n].vring.num;
}
A
aliguori 已提交
1150

1151 1152 1153 1154
int virtio_get_num_queues(VirtIODevice *vdev)
{
    int i;

1155
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1156 1157 1158 1159 1160 1161 1162 1163
        if (!virtio_queue_get_num(vdev, i)) {
            break;
        }
    }

    return i;
}

1164 1165 1166 1167 1168
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
{
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

1169
    /* virtio-1 compliant devices cannot change the alignment */
1170
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1171 1172 1173
        error_report("tried to modify queue alignment for virtio-1 device");
        return;
    }
1174 1175 1176 1177 1178 1179 1180
    /* Check that the transport told us it was going to do this
     * (so a buggy transport will immediately assert rather than
     * silently failing to migrate this state)
     */
    assert(k->has_variable_vring_alignment);

    vdev->vq[n].vring.align = align;
1181
    virtio_queue_update_rings(vdev, n);
1182 1183
}

M
Michael S. Tsirkin 已提交
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
static void virtio_queue_notify_aio_vq(VirtQueue *vq)
{
    if (vq->vring.desc && vq->handle_aio_output) {
        VirtIODevice *vdev = vq->vdev;

        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
        vq->handle_aio_output(vdev, vq);
    }
}

1194
static void virtio_queue_notify_vq(VirtQueue *vq)
1195
{
1196
    if (vq->vring.desc && vq->handle_output) {
1197
        VirtIODevice *vdev = vq->vdev;
1198

1199 1200 1201 1202
        if (unlikely(vdev->broken)) {
            return;
        }

1203 1204 1205 1206 1207
        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
        vq->handle_output(vdev, vq);
    }
}

P
Paul Brook 已提交
1208 1209
void virtio_queue_notify(VirtIODevice *vdev, int n)
{
1210
    virtio_queue_notify_vq(&vdev->vq[n]);
A
aliguori 已提交
1211 1212
}

1213 1214
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
{
1215
    return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1216 1217 1218 1219 1220
        VIRTIO_NO_VECTOR;
}

void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
{
1221 1222
    VirtQueue *vq = &vdev->vq[n];

1223
    if (n < VIRTIO_QUEUE_MAX) {
1224 1225 1226 1227
        if (vdev->vector_queues &&
            vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
            QLIST_REMOVE(vq, node);
        }
1228
        vdev->vq[n].vector = vector;
1229 1230 1231 1232 1233
        if (vdev->vector_queues &&
            vector != VIRTIO_NO_VECTOR) {
            QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
        }
    }
1234 1235
}

1236 1237 1238
static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
                                            VirtIOHandleOutput handle_output,
                                            bool use_aio)
A
aliguori 已提交
1239 1240 1241
{
    int i;

1242
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1243 1244 1245 1246
        if (vdev->vq[i].vring.num == 0)
            break;
    }

1247
    if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
A
aliguori 已提交
1248 1249 1250
        abort();

    vdev->vq[i].vring.num = queue_size;
C
Cornelia Huck 已提交
1251
    vdev->vq[i].vring.num_default = queue_size;
1252
    vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
A
aliguori 已提交
1253
    vdev->vq[i].handle_output = handle_output;
M
Michael S. Tsirkin 已提交
1254
    vdev->vq[i].handle_aio_output = NULL;
1255
    vdev->vq[i].use_aio = use_aio;
A
aliguori 已提交
1256 1257 1258 1259

    return &vdev->vq[i];
}

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
/* Add a virt queue and mark AIO.
 * An AIO queue will use the AioContext based event interface instead of the
 * default IOHandler and EventNotifier interface.
 */
VirtQueue *virtio_add_queue_aio(VirtIODevice *vdev, int queue_size,
                                VirtIOHandleOutput handle_output)
{
    return virtio_add_queue_internal(vdev, queue_size, handle_output, true);
}

/* Add a normal virt queue (on the contrary to the AIO version above. */
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
                            VirtIOHandleOutput handle_output)
{
    return virtio_add_queue_internal(vdev, queue_size, handle_output, false);
}

1277 1278
void virtio_del_queue(VirtIODevice *vdev, int n)
{
1279
    if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1280 1281 1282 1283
        abort();
    }

    vdev->vq[n].vring.num = 0;
C
Cornelia Huck 已提交
1284
    vdev->vq[n].vring.num_default = 0;
1285 1286
}

1287 1288
void virtio_irq(VirtQueue *vq)
{
1289
    trace_virtio_irq(vq);
1290 1291 1292 1293
    vq->vdev->isr |= 0x01;
    virtio_notify_vector(vq->vdev, vq->vector);
}

1294
bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
M
Michael S. Tsirkin 已提交
1295 1296 1297
{
    uint16_t old, new;
    bool v;
1298 1299
    /* We need to expose used array entries before checking used event. */
    smp_mb();
1300
    /* Always notify when queue is empty (when feature acknowledge) */
1301
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1302
        !vq->inuse && virtio_queue_empty(vq)) {
M
Michael S. Tsirkin 已提交
1303 1304 1305
        return true;
    }

1306
    if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
M
Michael S. Tsirkin 已提交
1307 1308 1309 1310 1311 1312
        return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
    }

    v = vq->signalled_used_valid;
    vq->signalled_used_valid = true;
    old = vq->signalled_used;
1313
    new = vq->signalled_used = vq->used_idx;
1314
    return !v || vring_need_event(vring_get_used_event(vq), new, old);
M
Michael S. Tsirkin 已提交
1315 1316 1317 1318
}

void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
1319
    if (!virtio_should_notify(vdev, vq)) {
A
aliguori 已提交
1320
        return;
M
Michael S. Tsirkin 已提交
1321
    }
A
aliguori 已提交
1322

1323
    trace_virtio_notify(vdev, vq);
A
aliguori 已提交
1324
    vdev->isr |= 0x01;
1325
    virtio_notify_vector(vdev, vq->vector);
A
aliguori 已提交
1326 1327 1328 1329
}

void virtio_notify_config(VirtIODevice *vdev)
{
1330 1331 1332
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
        return;

A
aliguori 已提交
1333
    vdev->isr |= 0x03;
1334
    vdev->generation++;
1335
    virtio_notify_vector(vdev, vdev->config_vector);
A
aliguori 已提交
1336 1337
}

1338 1339 1340 1341 1342
static bool virtio_device_endian_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1343
    if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1344 1345 1346 1347
        return vdev->device_endian != virtio_default_endian();
    }
    /* Devices conforming to VIRTIO 1.0 or later are always LE. */
    return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1348 1349
}

G
Gerd Hoffmann 已提交
1350 1351 1352 1353 1354 1355 1356
static bool virtio_64bit_features_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return (vdev->host_features >> 32) != 0;
}

J
Jason Wang 已提交
1357 1358 1359 1360 1361 1362 1363
static bool virtio_virtqueue_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
}

C
Cornelia Huck 已提交
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
static bool virtio_ringsize_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;
    int i;

    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
        if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
            return true;
        }
    }
    return false;
}

1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
static bool virtio_extra_state_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    return k->has_extra_state &&
        k->has_extra_state(qbus->parent);
}

1387 1388 1389 1390 1391 1392 1393
static bool virtio_broken_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return vdev->broken;
}

1394
static const VMStateDescription vmstate_virtqueue = {
J
Jason Wang 已提交
1395
    .name = "virtqueue_state",
1396 1397 1398 1399 1400 1401 1402
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_UINT64(vring.avail, struct VirtQueue),
        VMSTATE_UINT64(vring.used, struct VirtQueue),
        VMSTATE_END_OF_LIST()
    }
J
Jason Wang 已提交
1403 1404 1405 1406 1407 1408 1409 1410
};

static const VMStateDescription vmstate_virtio_virtqueues = {
    .name = "virtio/virtqueues",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_virtqueue_needed,
    .fields = (VMStateField[]) {
D
Dr. David Alan Gilbert 已提交
1411 1412
        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
                      VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
J
Jason Wang 已提交
1413 1414 1415 1416
        VMSTATE_END_OF_LIST()
    }
};

1417
static const VMStateDescription vmstate_ringsize = {
C
Cornelia Huck 已提交
1418
    .name = "ringsize_state",
1419 1420 1421 1422 1423 1424
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_UINT32(vring.num_default, struct VirtQueue),
        VMSTATE_END_OF_LIST()
    }
C
Cornelia Huck 已提交
1425 1426 1427 1428 1429 1430 1431 1432
};

static const VMStateDescription vmstate_virtio_ringsize = {
    .name = "virtio/ringsize",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_ringsize_needed,
    .fields = (VMStateField[]) {
D
Dr. David Alan Gilbert 已提交
1433 1434
        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
                      VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
C
Cornelia Huck 已提交
1435 1436 1437 1438
        VMSTATE_END_OF_LIST()
    }
};

1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
static int get_extra_state(QEMUFile *f, void *pv, size_t size)
{
    VirtIODevice *vdev = pv;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    if (!k->load_extra_state) {
        return -1;
    } else {
        return k->load_extra_state(qbus->parent, f);
    }
}

static void put_extra_state(QEMUFile *f, void *pv, size_t size)
{
    VirtIODevice *vdev = pv;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    k->save_extra_state(qbus->parent, f);
}

static const VMStateInfo vmstate_info_extra_state = {
    .name = "virtqueue_extra_state",
    .get = get_extra_state,
    .put = put_extra_state,
};

static const VMStateDescription vmstate_virtio_extra_state = {
    .name = "virtio/extra_state",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_extra_state_needed,
    .fields = (VMStateField[]) {
        {
            .name         = "extra_state",
            .version_id   = 0,
            .field_exists = NULL,
            .size         = 0,
            .info         = &vmstate_info_extra_state,
            .flags        = VMS_SINGLE,
            .offset       = 0,
        },
        VMSTATE_END_OF_LIST()
    }
};

1486 1487 1488 1489
static const VMStateDescription vmstate_virtio_device_endian = {
    .name = "virtio/device_endian",
    .version_id = 1,
    .minimum_version_id = 1,
1490
    .needed = &virtio_device_endian_needed,
1491 1492 1493 1494 1495 1496
    .fields = (VMStateField[]) {
        VMSTATE_UINT8(device_endian, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

G
Gerd Hoffmann 已提交
1497 1498 1499 1500
static const VMStateDescription vmstate_virtio_64bit_features = {
    .name = "virtio/64bit_features",
    .version_id = 1,
    .minimum_version_id = 1,
1501
    .needed = &virtio_64bit_features_needed,
G
Gerd Hoffmann 已提交
1502 1503 1504 1505 1506 1507
    .fields = (VMStateField[]) {
        VMSTATE_UINT64(guest_features, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
static const VMStateDescription vmstate_virtio_broken = {
    .name = "virtio/broken",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_broken_needed,
    .fields = (VMStateField[]) {
        VMSTATE_BOOL(broken, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

1519 1520 1521 1522 1523 1524 1525
static const VMStateDescription vmstate_virtio = {
    .name = "virtio",
    .version_id = 1,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .fields = (VMStateField[]) {
        VMSTATE_END_OF_LIST()
1526
    },
1527 1528 1529
    .subsections = (const VMStateDescription*[]) {
        &vmstate_virtio_device_endian,
        &vmstate_virtio_64bit_features,
J
Jason Wang 已提交
1530
        &vmstate_virtio_virtqueues,
C
Cornelia Huck 已提交
1531
        &vmstate_virtio_ringsize,
1532
        &vmstate_virtio_broken,
1533
        &vmstate_virtio_extra_state,
1534
        NULL
1535 1536 1537
    }
};

A
aliguori 已提交
1538 1539
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
{
K
KONRAD Frederic 已提交
1540 1541
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1542
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
G
Gerd Hoffmann 已提交
1543
    uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
A
aliguori 已提交
1544 1545
    int i;

K
KONRAD Frederic 已提交
1546 1547 1548
    if (k->save_config) {
        k->save_config(qbus->parent, f);
    }
A
aliguori 已提交
1549 1550 1551 1552

    qemu_put_8s(f, &vdev->status);
    qemu_put_8s(f, &vdev->isr);
    qemu_put_be16s(f, &vdev->queue_sel);
G
Gerd Hoffmann 已提交
1553
    qemu_put_be32s(f, &guest_features_lo);
A
aliguori 已提交
1554 1555 1556
    qemu_put_be32(f, vdev->config_len);
    qemu_put_buffer(f, vdev->config, vdev->config_len);

1557
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1558 1559 1560 1561 1562 1563
        if (vdev->vq[i].vring.num == 0)
            break;
    }

    qemu_put_be32(f, i);

1564
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1565 1566 1567 1568
        if (vdev->vq[i].vring.num == 0)
            break;

        qemu_put_be32(f, vdev->vq[i].vring.num);
1569 1570 1571
        if (k->has_variable_vring_alignment) {
            qemu_put_be32(f, vdev->vq[i].vring.align);
        }
1572 1573
        /* XXX virtio-1 devices */
        qemu_put_be64(f, vdev->vq[i].vring.desc);
A
aliguori 已提交
1574
        qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
K
KONRAD Frederic 已提交
1575 1576 1577
        if (k->save_queue) {
            k->save_queue(qbus->parent, i, f);
        }
A
aliguori 已提交
1578
    }
1579 1580 1581 1582

    if (vdc->save != NULL) {
        vdc->save(vdev, f);
    }
1583 1584

    /* Subsections */
1585
    vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
A
aliguori 已提交
1586 1587
}

1588 1589 1590 1591 1592 1593
/* A wrapper for use as a VMState .put function */
void virtio_vmstate_save(QEMUFile *f, void *opaque, size_t size)
{
    virtio_save(VIRTIO_DEVICE(opaque), f);
}

1594
static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
1595
{
1596
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
C
Cornelia Huck 已提交
1597
    bool bad = (val & ~(vdev->host_features)) != 0;
1598

C
Cornelia Huck 已提交
1599
    val &= vdev->host_features;
1600 1601
    if (k->set_features) {
        k->set_features(vdev, val);
1602 1603 1604 1605 1606
    }
    vdev->guest_features = val;
    return bad ? -1 : 0;
}

1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
int virtio_set_features(VirtIODevice *vdev, uint64_t val)
{
   /*
     * The driver must not attempt to set features after feature negotiation
     * has finished.
     */
    if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
        return -EINVAL;
    }
    return virtio_set_features_nocheck(vdev, val);
}

1619
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
A
aliguori 已提交
1620
{
1621
    int i, ret;
1622
    int32_t config_len;
1623
    uint32_t num;
1624
    uint32_t features;
K
KONRAD Frederic 已提交
1625 1626
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1627
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
1628

1629 1630 1631 1632 1633 1634
    /*
     * We poison the endianness to ensure it does not get used before
     * subsections have been loaded.
     */
    vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;

K
KONRAD Frederic 已提交
1635 1636
    if (k->load_config) {
        ret = k->load_config(qbus->parent, f);
1637 1638 1639
        if (ret)
            return ret;
    }
A
aliguori 已提交
1640 1641 1642 1643

    qemu_get_8s(f, &vdev->status);
    qemu_get_8s(f, &vdev->isr);
    qemu_get_be16s(f, &vdev->queue_sel);
1644
    if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
1645 1646
        return -1;
    }
1647
    qemu_get_be32s(f, &features);
1648

1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
    /*
     * Temporarily set guest_features low bits - needed by
     * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
     * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
     *
     * Note: devices should always test host features in future - don't create
     * new dependencies like this.
     */
    vdev->guest_features = features;

1659
    config_len = qemu_get_be32(f);
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670

    /*
     * There are cases where the incoming config can be bigger or smaller
     * than what we have; so load what we have space for, and skip
     * any excess that's in the stream.
     */
    qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));

    while (config_len > vdev->config_len) {
        qemu_get_byte(f);
        config_len--;
1671
    }
A
aliguori 已提交
1672 1673 1674

    num = qemu_get_be32(f);

1675
    if (num > VIRTIO_QUEUE_MAX) {
1676
        error_report("Invalid number of virtqueues: 0x%x", num);
1677 1678 1679
        return -1;
    }

A
aliguori 已提交
1680 1681
    for (i = 0; i < num; i++) {
        vdev->vq[i].vring.num = qemu_get_be32(f);
1682 1683 1684
        if (k->has_variable_vring_alignment) {
            vdev->vq[i].vring.align = qemu_get_be32(f);
        }
1685
        vdev->vq[i].vring.desc = qemu_get_be64(f);
A
aliguori 已提交
1686
        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
M
Michael S. Tsirkin 已提交
1687 1688
        vdev->vq[i].signalled_used_valid = false;
        vdev->vq[i].notification = true;
A
aliguori 已提交
1689

1690 1691 1692
        if (vdev->vq[i].vring.desc) {
            /* XXX virtio-1 devices */
            virtio_queue_update_rings(vdev, i);
M
Michael S. Tsirkin 已提交
1693 1694
        } else if (vdev->vq[i].last_avail_idx) {
            error_report("VQ %d address 0x0 "
1695
                         "inconsistent with Host index 0x%x",
M
Michael S. Tsirkin 已提交
1696 1697
                         i, vdev->vq[i].last_avail_idx);
                return -1;
1698
        }
K
KONRAD Frederic 已提交
1699 1700
        if (k->load_queue) {
            ret = k->load_queue(qbus->parent, i, f);
1701 1702
            if (ret)
                return ret;
1703
        }
A
aliguori 已提交
1704 1705
    }

1706
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1707 1708

    if (vdc->load != NULL) {
1709 1710 1711 1712
        ret = vdc->load(vdev, f, version_id);
        if (ret) {
            return ret;
        }
1713 1714
    }

1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
    /* Subsections */
    ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
    if (ret) {
        return ret;
    }

    if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
        vdev->device_endian = virtio_default_endian();
    }

G
Gerd Hoffmann 已提交
1725 1726 1727 1728 1729 1730 1731
    if (virtio_64bit_features_needed(vdev)) {
        /*
         * Subsection load filled vdev->guest_features.  Run them
         * through virtio_set_features to sanity-check them against
         * host_features.
         */
        uint64_t features64 = vdev->guest_features;
1732
        if (virtio_set_features_nocheck(vdev, features64) < 0) {
G
Gerd Hoffmann 已提交
1733 1734 1735 1736 1737 1738
            error_report("Features 0x%" PRIx64 " unsupported. "
                         "Allowed features: 0x%" PRIx64,
                         features64, vdev->host_features);
            return -1;
        }
    } else {
1739
        if (virtio_set_features_nocheck(vdev, features) < 0) {
G
Gerd Hoffmann 已提交
1740 1741 1742 1743 1744 1745 1746
            error_report("Features 0x%x unsupported. "
                         "Allowed features: 0x%" PRIx64,
                         features, vdev->host_features);
            return -1;
        }
    }

1747
    for (i = 0; i < num; i++) {
1748
        if (vdev->vq[i].vring.desc) {
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
            uint16_t nheads;
            nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
            /* Check it isn't doing strange things with descriptor numbers. */
            if (nheads > vdev->vq[i].vring.num) {
                error_report("VQ %d size 0x%x Guest index 0x%x "
                             "inconsistent with Host index 0x%x: delta 0x%x",
                             i, vdev->vq[i].vring.num,
                             vring_avail_idx(&vdev->vq[i]),
                             vdev->vq[i].last_avail_idx, nheads);
                return -1;
            }
1760
            vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
1761
            vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776

            /*
             * Some devices migrate VirtQueueElements that have been popped
             * from the avail ring but not yet returned to the used ring.
             */
            vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
                                vdev->vq[i].used_idx;
            if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
                error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
                             "used_idx 0x%x",
                             i, vdev->vq[i].vring.num,
                             vdev->vq[i].last_avail_idx,
                             vdev->vq[i].used_idx);
                return -1;
            }
1777 1778 1779 1780
        }
    }

    return 0;
A
aliguori 已提交
1781 1782
}

1783
void virtio_cleanup(VirtIODevice *vdev)
1784
{
1785
    qemu_del_vm_change_state_handler(vdev->vmstate);
1786
    g_free(vdev->config);
1787
    g_free(vdev->vq);
1788
    g_free(vdev->vector_queues);
1789 1790
}

1791
static void virtio_vmstate_change(void *opaque, int running, RunState state)
1792 1793
{
    VirtIODevice *vdev = opaque;
K
KONRAD Frederic 已提交
1794 1795
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1796
    bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
1797
    vdev->vm_running = running;
1798 1799 1800 1801 1802

    if (backend_run) {
        virtio_set_status(vdev, vdev->status);
    }

K
KONRAD Frederic 已提交
1803 1804
    if (k->vmstate_change) {
        k->vmstate_change(qbus->parent, backend_run);
1805 1806 1807 1808 1809 1810 1811
    }

    if (!backend_run) {
        virtio_set_status(vdev, vdev->status);
    }
}

1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
void virtio_instance_init_common(Object *proxy_obj, void *data,
                                 size_t vdev_size, const char *vdev_name)
{
    DeviceState *vdev = data;

    object_initialize(vdev, vdev_size, vdev_name);
    object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
    object_unref(OBJECT(vdev));
    qdev_alias_all_properties(vdev, proxy_obj);
}

1823 1824
void virtio_init(VirtIODevice *vdev, const char *name,
                 uint16_t device_id, size_t config_size)
A
aliguori 已提交
1825
{
1826 1827
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1828
    int i;
1829 1830 1831 1832 1833 1834 1835
    int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;

    if (nvectors) {
        vdev->vector_queues =
            g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
    }

P
Paul Brook 已提交
1836
    vdev->device_id = device_id;
A
aliguori 已提交
1837 1838 1839
    vdev->status = 0;
    vdev->isr = 0;
    vdev->queue_sel = 0;
1840
    vdev->config_vector = VIRTIO_NO_VECTOR;
1841
    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
1842
    vdev->vm_running = runstate_is_running();
1843
    vdev->broken = false;
1844
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1845
        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1846
        vdev->vq[i].vdev = vdev;
1847
        vdev->vq[i].queue_index = i;
1848
    }
A
aliguori 已提交
1849 1850 1851

    vdev->name = name;
    vdev->config_len = config_size;
1852
    if (vdev->config_len) {
1853
        vdev->config = g_malloc0(config_size);
1854
    } else {
A
aliguori 已提交
1855
        vdev->config = NULL;
1856 1857 1858
    }
    vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
                                                     vdev);
1859
    vdev->device_endian = virtio_default_endian();
1860
    vdev->use_guest_notifier_mask = true;
1861
}
A
aliguori 已提交
1862

A
Avi Kivity 已提交
1863
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1864 1865 1866 1867
{
    return vdev->vq[n].vring.desc;
}

A
Avi Kivity 已提交
1868
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1869 1870 1871 1872
{
    return vdev->vq[n].vring.avail;
}

A
Avi Kivity 已提交
1873
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1874 1875 1876 1877
{
    return vdev->vq[n].vring.used;
}

A
Avi Kivity 已提交
1878
hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
1879 1880 1881 1882
{
    return vdev->vq[n].vring.desc;
}

A
Avi Kivity 已提交
1883
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1884 1885 1886 1887
{
    return sizeof(VRingDesc) * vdev->vq[n].vring.num;
}

A
Avi Kivity 已提交
1888
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1889 1890
{
    return offsetof(VRingAvail, ring) +
1891
        sizeof(uint16_t) * vdev->vq[n].vring.num;
1892 1893
}

A
Avi Kivity 已提交
1894
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1895 1896 1897 1898 1899
{
    return offsetof(VRingUsed, ring) +
        sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
}

A
Avi Kivity 已提交
1900
hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
{
    return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
	    virtio_queue_get_used_size(vdev, n);
}

uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
{
    return vdev->vq[n].last_avail_idx;
}

void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
{
    vdev->vq[n].last_avail_idx = idx;
1914
    vdev->vq[n].shadow_avail_idx = idx;
1915 1916
}

1917 1918 1919 1920 1921
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
{
    vdev->vq[n].signalled_used_valid = false;
}

1922 1923 1924 1925 1926
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
{
    return vdev->vq + n;
}

1927 1928 1929 1930 1931
uint16_t virtio_get_queue_index(VirtQueue *vq)
{
    return vq->queue_index;
}

1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
static void virtio_queue_guest_notifier_read(EventNotifier *n)
{
    VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
    if (event_notifier_test_and_clear(n)) {
        virtio_irq(vq);
    }
}

void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                bool with_irqfd)
{
    if (assign && !with_irqfd) {
1944
        event_notifier_set_handler(&vq->guest_notifier, false,
1945 1946
                                   virtio_queue_guest_notifier_read);
    } else {
1947
        event_notifier_set_handler(&vq->guest_notifier, false, NULL);
1948 1949 1950 1951 1952 1953 1954 1955
    }
    if (!assign) {
        /* Test and clear notifier before closing it,
         * in case poll callback didn't have time to run. */
        virtio_queue_guest_notifier_read(&vq->guest_notifier);
    }
}

1956 1957 1958 1959
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
{
    return &vq->guest_notifier;
}
1960

M
Michael S. Tsirkin 已提交
1961
static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
1962 1963 1964
{
    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
    if (event_notifier_test_and_clear(n)) {
M
Michael S. Tsirkin 已提交
1965
        virtio_queue_notify_aio_vq(vq);
1966 1967 1968
    }
}

1969
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
1970
                                                VirtIOHandleOutput handle_output)
1971
{
1972 1973
    if (handle_output) {
        vq->handle_aio_output = handle_output;
1974
        aio_set_event_notifier(ctx, &vq->host_notifier, true,
M
Michael S. Tsirkin 已提交
1975
                               virtio_queue_host_notifier_aio_read);
1976 1977 1978 1979
    } else {
        aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
        /* Test and clear notifier before after disabling event,
         * in case poll callback didn't have time to run. */
M
Michael S. Tsirkin 已提交
1980
        virtio_queue_host_notifier_aio_read(&vq->host_notifier);
1981
        vq->handle_aio_output = NULL;
M
Michael S. Tsirkin 已提交
1982 1983 1984 1985 1986 1987 1988 1989
    }
}

static void virtio_queue_host_notifier_read(EventNotifier *n)
{
    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
    if (event_notifier_test_and_clear(n)) {
        virtio_queue_notify_vq(vq);
1990 1991 1992
    }
}

P
Paolo Bonzini 已提交
1993 1994
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
                                               bool set_handler)
1995
{
1996
    AioContext *ctx = qemu_get_aio_context();
P
Paolo Bonzini 已提交
1997
    if (assign && set_handler) {
1998 1999
        if (vq->use_aio) {
            aio_set_event_notifier(ctx, &vq->host_notifier, true,
2000
                                   virtio_queue_host_notifier_read);
2001 2002 2003 2004
        } else {
            event_notifier_set_handler(&vq->host_notifier, true,
                                       virtio_queue_host_notifier_read);
        }
2005
    } else {
2006 2007 2008 2009 2010
        if (vq->use_aio) {
            aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
        } else {
            event_notifier_set_handler(&vq->host_notifier, true, NULL);
        }
P
Paolo Bonzini 已提交
2011 2012
    }
    if (!assign) {
2013 2014 2015 2016 2017 2018
        /* Test and clear notifier before after disabling event,
         * in case poll callback didn't have time to run. */
        virtio_queue_host_notifier_read(&vq->host_notifier);
    }
}

2019 2020 2021 2022
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
{
    return &vq->host_notifier;
}
2023

2024 2025
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
{
2026
    g_free(vdev->bus_name);
2027
    vdev->bus_name = g_strdup(bus_name);
2028 2029
}

2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
{
    va_list ap;

    va_start(ap, fmt);
    error_vreport(fmt, ap);
    va_end(ap);

    vdev->broken = true;

    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
        virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
        virtio_notify_config(vdev);
    }
}

2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
    Error *err = NULL;

    if (vdc->realize != NULL) {
        vdc->realize(dev, &err);
        if (err != NULL) {
            error_propagate(errp, err);
            return;
        }
2058
    }
J
Jason Wang 已提交
2059 2060 2061 2062 2063 2064

    virtio_bus_device_plugged(vdev, &err);
    if (err != NULL) {
        error_propagate(errp, err);
        return;
    }
2065 2066
}

2067
static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2068
{
2069
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2070 2071
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
    Error *err = NULL;
2072

2073 2074
    virtio_bus_device_unplugged(vdev);

2075 2076 2077 2078 2079 2080
    if (vdc->unrealize != NULL) {
        vdc->unrealize(dev, &err);
        if (err != NULL) {
            error_propagate(errp, err);
            return;
        }
2081
    }
2082

2083 2084
    g_free(vdev->bus_name);
    vdev->bus_name = NULL;
2085 2086
}

C
Cornelia Huck 已提交
2087 2088 2089 2090 2091
static Property virtio_properties[] = {
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
    DEFINE_PROP_END_OF_LIST(),
};

2092 2093 2094 2095
static void virtio_device_class_init(ObjectClass *klass, void *data)
{
    /* Set the default value here. */
    DeviceClass *dc = DEVICE_CLASS(klass);
2096 2097 2098

    dc->realize = virtio_device_realize;
    dc->unrealize = virtio_device_unrealize;
2099
    dc->bus_type = TYPE_VIRTIO_BUS;
C
Cornelia Huck 已提交
2100
    dc->props = virtio_properties;
2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117
}

static const TypeInfo virtio_device_info = {
    .name = TYPE_VIRTIO_DEVICE,
    .parent = TYPE_DEVICE,
    .instance_size = sizeof(VirtIODevice),
    .class_init = virtio_device_class_init,
    .abstract = true,
    .class_size = sizeof(VirtioDeviceClass),
};

static void virtio_register_types(void)
{
    type_register_static(&virtio_device_info);
}

type_init(virtio_register_types)