virtio.c 57.6 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Virtio Support
 *
 * Copyright IBM, Corp. 2007
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

P
Peter Maydell 已提交
14
#include "qemu/osdep.h"
15
#include "qapi/error.h"
16 17
#include "qemu-common.h"
#include "cpu.h"
18
#include "trace.h"
19
#include "exec/address-spaces.h"
20
#include "qemu/error-report.h"
P
Paolo Bonzini 已提交
21
#include "hw/virtio/virtio.h"
22
#include "qemu/atomic.h"
P
Paolo Bonzini 已提交
23
#include "hw/virtio/virtio-bus.h"
24
#include "migration/migration.h"
25
#include "hw/virtio/virtio-access.h"
A
aliguori 已提交
26

27 28 29 30 31
/*
 * The alignment to use between consumer and producer parts of vring.
 * x86 pagesize again. This is the default, used by transports like PCI
 * which don't provide a means for the guest to tell the host the alignment.
 */
32 33
#define VIRTIO_PCI_VRING_ALIGN         4096

A
aliguori 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
typedef struct VRingDesc
{
    uint64_t addr;
    uint32_t len;
    uint16_t flags;
    uint16_t next;
} VRingDesc;

typedef struct VRingAvail
{
    uint16_t flags;
    uint16_t idx;
    uint16_t ring[0];
} VRingAvail;

typedef struct VRingUsedElem
{
    uint32_t id;
    uint32_t len;
} VRingUsedElem;

typedef struct VRingUsed
{
    uint16_t flags;
    uint16_t idx;
    VRingUsedElem ring[0];
} VRingUsed;

typedef struct VRing
{
    unsigned int num;
C
Cornelia Huck 已提交
65
    unsigned int num_default;
66
    unsigned int align;
A
Avi Kivity 已提交
67 68 69
    hwaddr desc;
    hwaddr avail;
    hwaddr used;
A
aliguori 已提交
70 71 72 73 74
} VRing;

struct VirtQueue
{
    VRing vring;
75 76

    /* Next head to pop */
A
aliguori 已提交
77
    uint16_t last_avail_idx;
78

79 80 81
    /* Last avail_idx read from VQ. */
    uint16_t shadow_avail_idx;

82 83
    uint16_t used_idx;

M
Michael S. Tsirkin 已提交
84 85 86 87 88 89 90 91 92
    /* Last used index value we have signalled on */
    uint16_t signalled_used;

    /* Last used index value we have signalled on */
    bool signalled_used_valid;

    /* Notification enabled? */
    bool notification;

93 94
    uint16_t queue_index;

A
aliguori 已提交
95
    int inuse;
M
Michael S. Tsirkin 已提交
96

97
    uint16_t vector;
98 99
    VirtIOHandleOutput handle_output;
    VirtIOHandleOutput handle_aio_output;
100
    bool use_aio;
101 102 103
    VirtIODevice *vdev;
    EventNotifier guest_notifier;
    EventNotifier host_notifier;
104
    QLIST_ENTRY(VirtQueue) node;
A
aliguori 已提交
105 106 107
};

/* virt queue functions */
108
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
A
aliguori 已提交
109
{
110
    VRing *vring = &vdev->vq[n].vring;
P
Paul Brook 已提交
111

112 113 114 115 116 117 118 119
    if (!vring->desc) {
        /* not yet setup -> nothing to do */
        return;
    }
    vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
    vring->used = vring_align(vring->avail +
                              offsetof(VRingAvail, ring[vring->num]),
                              vring->align);
A
aliguori 已提交
120 121
}

122 123
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
                            hwaddr desc_pa, int i)
A
aliguori 已提交
124
{
125 126 127 128 129 130
    address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
                       MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
    virtio_tswap64s(vdev, &desc->addr);
    virtio_tswap32s(vdev, &desc->len);
    virtio_tswap16s(vdev, &desc->flags);
    virtio_tswap16s(vdev, &desc->next);
A
aliguori 已提交
131 132 133 134
}

static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
A
Avi Kivity 已提交
135
    hwaddr pa;
A
aliguori 已提交
136
    pa = vq->vring.avail + offsetof(VRingAvail, flags);
137
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
138 139 140 141
}

static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
A
Avi Kivity 已提交
142
    hwaddr pa;
A
aliguori 已提交
143
    pa = vq->vring.avail + offsetof(VRingAvail, idx);
144 145
    vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
    return vq->shadow_avail_idx;
A
aliguori 已提交
146 147 148 149
}

static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
A
Avi Kivity 已提交
150
    hwaddr pa;
A
aliguori 已提交
151
    pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
152
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
153 154
}

155
static inline uint16_t vring_get_used_event(VirtQueue *vq)
M
Michael S. Tsirkin 已提交
156 157 158 159
{
    return vring_avail_ring(vq, vq->vring.num);
}

160 161
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
                                    int i)
A
aliguori 已提交
162
{
A
Avi Kivity 已提交
163
    hwaddr pa;
164 165 166 167 168
    virtio_tswap32s(vq->vdev, &uelem->id);
    virtio_tswap32s(vq->vdev, &uelem->len);
    pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
    address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
                       (void *)uelem, sizeof(VRingUsedElem));
A
aliguori 已提交
169 170 171 172
}

static uint16_t vring_used_idx(VirtQueue *vq)
{
A
Avi Kivity 已提交
173
    hwaddr pa;
A
aliguori 已提交
174
    pa = vq->vring.used + offsetof(VRingUsed, idx);
175
    return virtio_lduw_phys(vq->vdev, pa);
A
aliguori 已提交
176 177
}

M
Michael S. Tsirkin 已提交
178
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
A
aliguori 已提交
179
{
A
Avi Kivity 已提交
180
    hwaddr pa;
A
aliguori 已提交
181
    pa = vq->vring.used + offsetof(VRingUsed, idx);
182
    virtio_stw_phys(vq->vdev, pa, val);
183
    vq->used_idx = val;
A
aliguori 已提交
184 185 186 187
}

static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
188
    VirtIODevice *vdev = vq->vdev;
A
Avi Kivity 已提交
189
    hwaddr pa;
A
aliguori 已提交
190
    pa = vq->vring.used + offsetof(VRingUsed, flags);
191
    virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
A
aliguori 已提交
192 193 194 195
}

static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
196
    VirtIODevice *vdev = vq->vdev;
A
Avi Kivity 已提交
197
    hwaddr pa;
A
aliguori 已提交
198
    pa = vq->vring.used + offsetof(VRingUsed, flags);
199
    virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
A
aliguori 已提交
200 201
}

202
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
M
Michael S. Tsirkin 已提交
203
{
A
Avi Kivity 已提交
204
    hwaddr pa;
M
Michael S. Tsirkin 已提交
205 206 207 208
    if (!vq->notification) {
        return;
    }
    pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
209
    virtio_stw_phys(vq->vdev, pa, val);
M
Michael S. Tsirkin 已提交
210 211
}

A
aliguori 已提交
212 213
void virtio_queue_set_notification(VirtQueue *vq, int enable)
{
M
Michael S. Tsirkin 已提交
214
    vq->notification = enable;
215
    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
216
        vring_set_avail_event(vq, vring_avail_idx(vq));
M
Michael S. Tsirkin 已提交
217
    } else if (enable) {
A
aliguori 已提交
218
        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
M
Michael S. Tsirkin 已提交
219
    } else {
A
aliguori 已提交
220
        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
M
Michael S. Tsirkin 已提交
221
    }
222 223 224 225
    if (enable) {
        /* Expose avail event/used flags before caller checks the avail idx. */
        smp_mb();
    }
A
aliguori 已提交
226 227 228 229 230 231 232
}

int virtio_queue_ready(VirtQueue *vq)
{
    return vq->vring.avail != 0;
}

233 234
/* Fetch avail_idx from VQ memory only when we really need to know if
 * guest has added some buffers. */
A
aliguori 已提交
235 236
int virtio_queue_empty(VirtQueue *vq)
{
237 238 239 240
    if (vq->shadow_avail_idx != vq->last_avail_idx) {
        return 0;
    }

A
aliguori 已提交
241 242 243
    return vring_avail_idx(vq) == vq->last_avail_idx;
}

244 245
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
                               unsigned int len)
A
aliguori 已提交
246 247 248 249 250 251 252 253
{
    unsigned int offset;
    int i;

    offset = 0;
    for (i = 0; i < elem->in_num; i++) {
        size_t size = MIN(len - offset, elem->in_sg[i].iov_len);

254 255 256
        cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
                                  elem->in_sg[i].iov_len,
                                  1, size);
A
aliguori 已提交
257

258
        offset += size;
A
aliguori 已提交
259 260
    }

261 262 263 264
    for (i = 0; i < elem->out_num; i++)
        cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
                                  elem->out_sg[i].iov_len,
                                  0, elem->out_sg[i].iov_len);
265 266
}

J
Jason Wang 已提交
267 268 269 270
void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
                       unsigned int len)
{
    vq->last_avail_idx--;
271
    vq->inuse--;
J
Jason Wang 已提交
272 273 274
    virtqueue_unmap_sg(vq, elem, len);
}

S
Stefan Hajnoczi 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
/* virtqueue_rewind:
 * @vq: The #VirtQueue
 * @num: Number of elements to push back
 *
 * Pretend that elements weren't popped from the virtqueue.  The next
 * virtqueue_pop() will refetch the oldest element.
 *
 * Use virtqueue_discard() instead if you have a VirtQueueElement.
 *
 * Returns: true on success, false if @num is greater than the number of in use
 * elements.
 */
bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
{
    if (num > vq->inuse) {
        return false;
    }
    vq->last_avail_idx -= num;
    vq->inuse -= num;
    return true;
}

297 298 299
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
                    unsigned int len, unsigned int idx)
{
300 301
    VRingUsedElem uelem;

302 303 304
    trace_virtqueue_fill(vq, elem, len, idx);

    virtqueue_unmap_sg(vq, elem, len);
305

306 307 308 309
    if (unlikely(vq->vdev->broken)) {
        return;
    }

310
    idx = (idx + vq->used_idx) % vq->vring.num;
A
aliguori 已提交
311

312 313 314
    uelem.id = elem->index;
    uelem.len = len;
    vring_used_write(vq, &uelem, idx);
A
aliguori 已提交
315 316 317 318
}

void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
M
Michael S. Tsirkin 已提交
319
    uint16_t old, new;
320 321 322 323 324 325

    if (unlikely(vq->vdev->broken)) {
        vq->inuse -= count;
        return;
    }

A
aliguori 已提交
326
    /* Make sure buffer is written before we update index. */
327
    smp_wmb();
328
    trace_virtqueue_flush(vq, count);
329
    old = vq->used_idx;
M
Michael S. Tsirkin 已提交
330 331
    new = old + count;
    vring_used_idx_set(vq, new);
A
aliguori 已提交
332
    vq->inuse -= count;
M
Michael S. Tsirkin 已提交
333 334
    if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
        vq->signalled_used_valid = false;
A
aliguori 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347 348
}

void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
                    unsigned int len)
{
    virtqueue_fill(vq, elem, len, 0);
    virtqueue_flush(vq, 1);
}

static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
{
    uint16_t num_heads = vring_avail_idx(vq) - idx;

    /* Check it isn't doing very strange things with descriptor numbers. */
A
aliguori 已提交
349
    if (num_heads > vq->vring.num) {
350
        error_report("Guest moved used index from %u to %u",
351
                     idx, vq->shadow_avail_idx);
A
aliguori 已提交
352 353
        exit(1);
    }
354 355 356 357 358
    /* On success, callers read a descriptor at vq->last_avail_idx.
     * Make sure descriptor read does not bypass avail index read. */
    if (num_heads) {
        smp_rmb();
    }
A
aliguori 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371

    return num_heads;
}

static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
{
    unsigned int head;

    /* Grab the next descriptor number they're advertising, and increment
     * the index we've seen. */
    head = vring_avail_ring(vq, idx % vq->vring.num);

    /* If their number is silly, that's a fatal mistake. */
A
aliguori 已提交
372
    if (head >= vq->vring.num) {
373
        error_report("Guest says index %u is available", head);
A
aliguori 已提交
374 375
        exit(1);
    }
A
aliguori 已提交
376 377 378 379

    return head;
}

380 381 382 383 384
enum {
    VIRTQUEUE_READ_DESC_ERROR = -1,
    VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
    VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
};
A
aliguori 已提交
385

386 387 388 389
static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
                                    hwaddr desc_pa, unsigned int max,
                                    unsigned int *next)
{
A
aliguori 已提交
390
    /* If this descriptor says it doesn't chain, we're done. */
391
    if (!(desc->flags & VRING_DESC_F_NEXT)) {
392
        return VIRTQUEUE_READ_DESC_DONE;
393
    }
A
aliguori 已提交
394 395

    /* Check they're not leading us off end of descriptors. */
396
    *next = desc->next;
A
aliguori 已提交
397
    /* Make sure compiler knows to grab that: we don't want it changing! */
398
    smp_wmb();
A
aliguori 已提交
399

400 401 402
    if (*next >= max) {
        virtio_error(vdev, "Desc next is %u", *next);
        return VIRTQUEUE_READ_DESC_ERROR;
A
aliguori 已提交
403
    }
A
aliguori 已提交
404

405 406
    vring_desc_read(vdev, desc, desc_pa, *next);
    return VIRTQUEUE_READ_DESC_MORE;
A
aliguori 已提交
407 408
}

409
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
410 411
                               unsigned int *out_bytes,
                               unsigned max_in_bytes, unsigned max_out_bytes)
A
aliguori 已提交
412
{
413
    unsigned int idx;
414
    unsigned int total_bufs, in_total, out_total;
415
    int rc;
A
aliguori 已提交
416 417 418

    idx = vq->last_avail_idx;

419
    total_bufs = in_total = out_total = 0;
A
aliguori 已提交
420
    while (virtqueue_num_heads(vq, idx)) {
421
        VirtIODevice *vdev = vq->vdev;
422
        unsigned int max, num_bufs, indirect = 0;
423
        VRingDesc desc;
A
Avi Kivity 已提交
424
        hwaddr desc_pa;
425
        unsigned int i;
A
aliguori 已提交
426

427 428
        max = vq->vring.num;
        num_bufs = total_bufs;
A
aliguori 已提交
429
        i = virtqueue_get_head(vq, idx++);
430
        desc_pa = vq->vring.desc;
431
        vring_desc_read(vdev, &desc, desc_pa, i);
432

433 434
        if (desc.flags & VRING_DESC_F_INDIRECT) {
            if (desc.len % sizeof(VRingDesc)) {
435 436
                virtio_error(vdev, "Invalid size for indirect buffer table");
                goto err;
437 438 439 440
            }

            /* If we've got too many, that implies a descriptor loop. */
            if (num_bufs >= max) {
441 442
                virtio_error(vdev, "Looped descriptor");
                goto err;
443 444 445 446
            }

            /* loop over the indirect descriptor table */
            indirect = 1;
447 448
            max = desc.len / sizeof(VRingDesc);
            desc_pa = desc.addr;
449
            num_bufs = i = 0;
450
            vring_desc_read(vdev, &desc, desc_pa, i);
451 452
        }

A
aliguori 已提交
453 454
        do {
            /* If we've got too many, that implies a descriptor loop. */
455
            if (++num_bufs > max) {
456 457
                virtio_error(vdev, "Looped descriptor");
                goto err;
A
aliguori 已提交
458
            }
A
aliguori 已提交
459

460 461
            if (desc.flags & VRING_DESC_F_WRITE) {
                in_total += desc.len;
A
aliguori 已提交
462
            } else {
463
                out_total += desc.len;
A
aliguori 已提交
464
            }
465 466 467
            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
                goto done;
            }
468 469 470 471 472 473 474

            rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
        } while (rc == VIRTQUEUE_READ_DESC_MORE);

        if (rc == VIRTQUEUE_READ_DESC_ERROR) {
            goto err;
        }
475 476 477 478 479

        if (!indirect)
            total_bufs = num_bufs;
        else
            total_bufs++;
A
aliguori 已提交
480
    }
481
done:
482 483 484 485 486 487
    if (in_bytes) {
        *in_bytes = in_total;
    }
    if (out_bytes) {
        *out_bytes = out_total;
    }
488 489 490 491 492
    return;

err:
    in_total = out_total = 0;
    goto done;
493
}
A
aliguori 已提交
494

495 496 497 498 499
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
                          unsigned int out_bytes)
{
    unsigned int in_total, out_total;

500 501
    virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
    return in_bytes <= in_total && out_bytes <= out_total;
A
aliguori 已提交
502 503
}

504 505
static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
                               hwaddr *addr, struct iovec *iov,
506 507 508
                               unsigned int max_num_sg, bool is_write,
                               hwaddr pa, size_t sz)
{
509
    bool ok = false;
510 511 512
    unsigned num_sg = *p_num_sg;
    assert(num_sg <= max_num_sg);

513
    if (!sz) {
514 515
        virtio_error(vdev, "virtio: zero sized buffers are not allowed");
        goto out;
516 517
    }

518 519 520 521
    while (sz) {
        hwaddr len = sz;

        if (num_sg == max_num_sg) {
522 523 524
            virtio_error(vdev, "virtio: too many write descriptors in "
                               "indirect table");
            goto out;
525 526 527
        }

        iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
528
        if (!iov[num_sg].iov_base) {
529 530
            virtio_error(vdev, "virtio: bogus descriptor or out of resources");
            goto out;
531 532
        }

533 534 535 536 537 538 539
        iov[num_sg].iov_len = len;
        addr[num_sg] = pa;

        sz -= len;
        pa += len;
        num_sg++;
    }
540 541 542
    ok = true;

out:
543
    *p_num_sg = num_sg;
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
    return ok;
}

/* Only used by error code paths before we have a VirtQueueElement (therefore
 * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
 * yet.
 */
static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
                                    struct iovec *iov)
{
    unsigned int i;

    for (i = 0; i < out_num + in_num; i++) {
        int is_write = i >= out_num;

        cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
        iov++;
    }
562 563
}

M
Michael S. Tsirkin 已提交
564 565 566
static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
                                unsigned int *num_sg, unsigned int max_size,
                                int is_write)
K
Kevin Wolf 已提交
567 568
{
    unsigned int i;
A
Avi Kivity 已提交
569
    hwaddr len;
K
Kevin Wolf 已提交
570

M
Michael S. Tsirkin 已提交
571 572 573 574 575 576 577 578 579 580 581 582 583
    /* Note: this function MUST validate input, some callers
     * are passing in num_sg values received over the network.
     */
    /* TODO: teach all callers that this can fail, and return failure instead
     * of asserting here.
     * When we do, we might be able to re-enable NDEBUG below.
     */
#ifdef NDEBUG
#error building with NDEBUG is not supported
#endif
    assert(*num_sg <= max_size);

    for (i = 0; i < *num_sg; i++) {
K
Kevin Wolf 已提交
584 585
        len = sg[i].iov_len;
        sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
M
Michael S. Tsirkin 已提交
586
        if (!sg[i].iov_base) {
M
Michael Tokarev 已提交
587
            error_report("virtio: error trying to map MMIO memory");
K
Kevin Wolf 已提交
588 589
            exit(1);
        }
590 591
        if (len != sg[i].iov_len) {
            error_report("virtio: unexpected memory split");
M
Michael S. Tsirkin 已提交
592 593
            exit(1);
        }
K
Kevin Wolf 已提交
594 595 596
    }
}

M
Michael S. Tsirkin 已提交
597 598 599
void virtqueue_map(VirtQueueElement *elem)
{
    virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
600
                        VIRTQUEUE_MAX_SIZE, 1);
M
Michael S. Tsirkin 已提交
601
    virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
                        VIRTQUEUE_MAX_SIZE, 0);
}

void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
{
    VirtQueueElement *elem;
    size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
    size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
    size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
    size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
    size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
    size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);

    assert(sz >= sizeof(VirtQueueElement));
    elem = g_malloc(out_sg_end);
    elem->out_num = out_num;
    elem->in_num = in_num;
    elem->in_addr = (void *)elem + in_addr_ofs;
    elem->out_addr = (void *)elem + out_addr_ofs;
    elem->in_sg = (void *)elem + in_sg_ofs;
    elem->out_sg = (void *)elem + out_sg_ofs;
    return elem;
M
Michael S. Tsirkin 已提交
624 625
}

626
void *virtqueue_pop(VirtQueue *vq, size_t sz)
A
aliguori 已提交
627
{
628
    unsigned int i, head, max;
A
Avi Kivity 已提交
629
    hwaddr desc_pa = vq->vring.desc;
630
    VirtIODevice *vdev = vq->vdev;
631
    VirtQueueElement *elem;
632 633 634
    unsigned out_num, in_num;
    hwaddr addr[VIRTQUEUE_MAX_SIZE];
    struct iovec iov[VIRTQUEUE_MAX_SIZE];
635
    VRingDesc desc;
636
    int rc;
A
aliguori 已提交
637

638 639 640
    if (unlikely(vdev->broken)) {
        return NULL;
    }
641
    if (virtio_queue_empty(vq)) {
642 643
        return NULL;
    }
644 645 646
    /* Needed after virtio_queue_empty(), see comment in
     * virtqueue_num_heads(). */
    smp_rmb();
A
aliguori 已提交
647 648

    /* When we start there are none of either input nor output. */
649
    out_num = in_num = 0;
A
aliguori 已提交
650

651 652
    max = vq->vring.num;

653
    if (vq->inuse >= vq->vring.num) {
654 655
        virtio_error(vdev, "Virtqueue size exceeded");
        return NULL;
656 657
    }

A
aliguori 已提交
658
    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
659
    if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
660
        vring_set_avail_event(vq, vq->last_avail_idx);
M
Michael S. Tsirkin 已提交
661
    }
662

663 664 665
    vring_desc_read(vdev, &desc, desc_pa, i);
    if (desc.flags & VRING_DESC_F_INDIRECT) {
        if (desc.len % sizeof(VRingDesc)) {
666 667
            virtio_error(vdev, "Invalid size for indirect buffer table");
            return NULL;
668 669 670
        }

        /* loop over the indirect descriptor table */
671 672
        max = desc.len / sizeof(VRingDesc);
        desc_pa = desc.addr;
673
        i = 0;
674
        vring_desc_read(vdev, &desc, desc_pa, i);
675 676
    }

K
Kevin Wolf 已提交
677
    /* Collect all the descriptors */
A
aliguori 已提交
678
    do {
679 680
        bool map_ok;

681
        if (desc.flags & VRING_DESC_F_WRITE) {
682 683 684 685
            map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
                                        iov + out_num,
                                        VIRTQUEUE_MAX_SIZE - out_num, true,
                                        desc.addr, desc.len);
K
Kevin Wolf 已提交
686
        } else {
687
            if (in_num) {
688 689
                virtio_error(vdev, "Incorrect order for descriptors");
                goto err_undo_map;
690
            }
691 692 693 694 695 696
            map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
                                        VIRTQUEUE_MAX_SIZE, false,
                                        desc.addr, desc.len);
        }
        if (!map_ok) {
            goto err_undo_map;
K
Kevin Wolf 已提交
697
        }
A
aliguori 已提交
698 699

        /* If we've got too many, that implies a descriptor loop. */
700
        if ((in_num + out_num) > max) {
701 702
            virtio_error(vdev, "Looped descriptor");
            goto err_undo_map;
A
aliguori 已提交
703
        }
704 705 706 707 708 709 710

        rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
    } while (rc == VIRTQUEUE_READ_DESC_MORE);

    if (rc == VIRTQUEUE_READ_DESC_ERROR) {
        goto err_undo_map;
    }
A
aliguori 已提交
711

712 713
    /* Now copy what we have collected and mapped */
    elem = virtqueue_alloc_element(sz, out_num, in_num);
A
aliguori 已提交
714
    elem->index = head;
715 716 717 718 719 720 721 722
    for (i = 0; i < out_num; i++) {
        elem->out_addr[i] = addr[i];
        elem->out_sg[i] = iov[i];
    }
    for (i = 0; i < in_num; i++) {
        elem->in_addr[i] = addr[out_num + i];
        elem->in_sg[i] = iov[out_num + i];
    }
A
aliguori 已提交
723 724 725

    vq->inuse++;

726
    trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
727
    return elem;
728 729 730 731

err_undo_map:
    virtqueue_undo_map_desc(out_num, in_num, iov);
    return NULL;
A
aliguori 已提交
732 733
}

734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
/* Reading and writing a structure directly to QEMUFile is *awful*, but
 * it is what QEMU has always done by mistake.  We can change it sooner
 * or later by bumping the version number of the affected vm states.
 * In the meanwhile, since the in-memory layout of VirtQueueElement
 * has changed, we need to marshal to and from the layout that was
 * used before the change.
 */
typedef struct VirtQueueElementOld {
    unsigned int index;
    unsigned int out_num;
    unsigned int in_num;
    hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
    hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
    struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
    struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElementOld;

751 752
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
{
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
    VirtQueueElement *elem;
    VirtQueueElementOld data;
    int i;

    qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));

    elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
    elem->index = data.index;

    for (i = 0; i < elem->in_num; i++) {
        elem->in_addr[i] = data.in_addr[i];
    }

    for (i = 0; i < elem->out_num; i++) {
        elem->out_addr[i] = data.out_addr[i];
    }

    for (i = 0; i < elem->in_num; i++) {
        /* Base is overwritten by virtqueue_map.  */
        elem->in_sg[i].iov_base = 0;
        elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
    }

    for (i = 0; i < elem->out_num; i++) {
        /* Base is overwritten by virtqueue_map.  */
        elem->out_sg[i].iov_base = 0;
        elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
    }

782 783 784 785 786 787
    virtqueue_map(elem);
    return elem;
}

void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
{
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
    VirtQueueElementOld data;
    int i;

    memset(&data, 0, sizeof(data));
    data.index = elem->index;
    data.in_num = elem->in_num;
    data.out_num = elem->out_num;

    for (i = 0; i < elem->in_num; i++) {
        data.in_addr[i] = elem->in_addr[i];
    }

    for (i = 0; i < elem->out_num; i++) {
        data.out_addr[i] = elem->out_addr[i];
    }

    for (i = 0; i < elem->in_num; i++) {
        /* Base is overwritten by virtqueue_map when loading.  Do not
         * save it, as it would leak the QEMU address space layout.  */
        data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
    }

    for (i = 0; i < elem->out_num; i++) {
        /* Do not save iov_base as above.  */
        data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
    }
    qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
815 816
}

A
aliguori 已提交
817
/* virtio device */
818 819
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
{
K
KONRAD Frederic 已提交
820 821 822
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

823 824 825 826
    if (unlikely(vdev->broken)) {
        return;
    }

K
KONRAD Frederic 已提交
827 828
    if (k->notify) {
        k->notify(qbus->parent, vector);
829 830
    }
}
A
aliguori 已提交
831

P
Paul Brook 已提交
832
void virtio_update_irq(VirtIODevice *vdev)
A
aliguori 已提交
833
{
834
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
A
aliguori 已提交
835 836
}

837 838 839 840 841 842 843 844 845 846 847 848
static int virtio_validate_features(VirtIODevice *vdev)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);

    if (k->validate_features) {
        return k->validate_features(vdev);
    } else {
        return 0;
    }
}

int virtio_set_status(VirtIODevice *vdev, uint8_t val)
849
{
850
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
851 852
    trace_virtio_set_status(vdev, val);

853
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
854 855 856 857 858 859 860 861 862
        if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
            val & VIRTIO_CONFIG_S_FEATURES_OK) {
            int ret = virtio_validate_features(vdev);

            if (ret) {
                return ret;
            }
        }
    }
863 864
    if (k->set_status) {
        k->set_status(vdev, val);
865 866
    }
    vdev->status = val;
867
    return 0;
868 869
}

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
bool target_words_bigendian(void);
static enum virtio_device_endian virtio_default_endian(void)
{
    if (target_words_bigendian()) {
        return VIRTIO_DEVICE_ENDIAN_BIG;
    } else {
        return VIRTIO_DEVICE_ENDIAN_LITTLE;
    }
}

static enum virtio_device_endian virtio_current_cpu_endian(void)
{
    CPUClass *cc = CPU_GET_CLASS(current_cpu);

    if (cc->virtio_is_big_endian(current_cpu)) {
        return VIRTIO_DEVICE_ENDIAN_BIG;
    } else {
        return VIRTIO_DEVICE_ENDIAN_LITTLE;
    }
}

P
Paul Brook 已提交
891
void virtio_reset(void *opaque)
A
aliguori 已提交
892 893
{
    VirtIODevice *vdev = opaque;
894
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
895 896
    int i;

897
    virtio_set_status(vdev, 0);
898 899 900 901 902 903 904
    if (current_cpu) {
        /* Guest initiated reset */
        vdev->device_endian = virtio_current_cpu_endian();
    } else {
        /* System reset */
        vdev->device_endian = virtio_default_endian();
    }
905

906 907 908
    if (k->reset) {
        k->reset(vdev);
    }
A
aliguori 已提交
909

910
    vdev->broken = false;
911
    vdev->guest_features = 0;
A
aliguori 已提交
912 913 914
    vdev->queue_sel = 0;
    vdev->status = 0;
    vdev->isr = 0;
915 916
    vdev->config_vector = VIRTIO_NO_VECTOR;
    virtio_notify_vector(vdev, vdev->config_vector);
A
aliguori 已提交
917

918
    for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
919 920 921 922
        vdev->vq[i].vring.desc = 0;
        vdev->vq[i].vring.avail = 0;
        vdev->vq[i].vring.used = 0;
        vdev->vq[i].last_avail_idx = 0;
923
        vdev->vq[i].shadow_avail_idx = 0;
924
        vdev->vq[i].used_idx = 0;
925
        virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
M
Michael S. Tsirkin 已提交
926 927 928
        vdev->vq[i].signalled_used = 0;
        vdev->vq[i].signalled_used_valid = false;
        vdev->vq[i].notification = true;
C
Cornelia Huck 已提交
929
        vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
930
        vdev->vq[i].inuse = 0;
A
aliguori 已提交
931 932 933
    }
}

P
Paul Brook 已提交
934
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
935
{
936
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
937 938
    uint8_t val;

939
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
940
        return (uint32_t)-1;
941 942 943
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
944

945
    val = ldub_p(vdev->config + addr);
A
aliguori 已提交
946 947 948
    return val;
}

P
Paul Brook 已提交
949
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
950
{
951
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
952 953
    uint16_t val;

954
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
955
        return (uint32_t)-1;
956 957 958
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
959

960
    val = lduw_p(vdev->config + addr);
A
aliguori 已提交
961 962 963
    return val;
}

P
Paul Brook 已提交
964
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
A
aliguori 已提交
965
{
966
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
967 968
    uint32_t val;

969
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
970
        return (uint32_t)-1;
971 972 973
    }

    k->get_config(vdev, vdev->config);
A
aliguori 已提交
974

975
    val = ldl_p(vdev->config + addr);
A
aliguori 已提交
976 977 978
    return val;
}

P
Paul Brook 已提交
979
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
980
{
981
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
982 983
    uint8_t val = data;

984
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
985
        return;
986
    }
A
aliguori 已提交
987

988
    stb_p(vdev->config + addr, val);
A
aliguori 已提交
989

990 991 992
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
993 994
}

P
Paul Brook 已提交
995
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
996
{
997
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
998 999
    uint16_t val = data;

1000
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
1001
        return;
1002
    }
A
aliguori 已提交
1003

1004
    stw_p(vdev->config + addr, val);
A
aliguori 已提交
1005

1006 1007 1008
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
1009 1010
}

P
Paul Brook 已提交
1011
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
A
aliguori 已提交
1012
{
1013
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
1014 1015
    uint32_t val = data;

1016
    if (addr + sizeof(val) > vdev->config_len) {
A
aliguori 已提交
1017
        return;
1018
    }
A
aliguori 已提交
1019

1020
    stl_p(vdev->config + addr, val);
A
aliguori 已提交
1021

1022 1023 1024
    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
A
aliguori 已提交
1025 1026
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint8_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = ldub_p(vdev->config + addr);
    return val;
}

uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint16_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = lduw_le_p(vdev->config + addr);
    return val;
}

uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint32_t val;

    if (addr + sizeof(val) > vdev->config_len) {
        return (uint32_t)-1;
    }

    k->get_config(vdev, vdev->config);

    val = ldl_le_p(vdev->config + addr);
    return val;
}

void virtio_config_modern_writeb(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint8_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stb_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

void virtio_config_modern_writew(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint16_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stw_le_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

void virtio_config_modern_writel(VirtIODevice *vdev,
                                 uint32_t addr, uint32_t data)
{
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
    uint32_t val = data;

    if (addr + sizeof(val) > vdev->config_len) {
        return;
    }

    stl_le_p(vdev->config + addr, val);

    if (k->set_config) {
        k->set_config(vdev, vdev->config);
    }
}

A
Avi Kivity 已提交
1123
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
A
aliguori 已提交
1124
{
1125 1126
    vdev->vq[n].vring.desc = addr;
    virtio_queue_update_rings(vdev, n);
P
Paul Brook 已提交
1127 1128
}

A
Avi Kivity 已提交
1129
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
P
Paul Brook 已提交
1130
{
1131 1132 1133 1134 1135 1136 1137 1138 1139
    return vdev->vq[n].vring.desc;
}

void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
                            hwaddr avail, hwaddr used)
{
    vdev->vq[n].vring.desc = desc;
    vdev->vq[n].vring.avail = avail;
    vdev->vq[n].vring.used = used;
P
Paul Brook 已提交
1140 1141
}

1142 1143
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
{
1144 1145 1146 1147 1148 1149 1150
    /* Don't allow guest to flip queue between existent and
     * nonexistent states, or to set it to an invalid size.
     */
    if (!!num != !!vdev->vq[n].vring.num ||
        num > VIRTQUEUE_MAX_SIZE ||
        num < 0) {
        return;
1151
    }
1152
    vdev->vq[n].vring.num = num;
1153 1154
}

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
{
    return QLIST_FIRST(&vdev->vector_queues[vector]);
}

VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
{
    return QLIST_NEXT(vq, node);
}

P
Paul Brook 已提交
1165 1166 1167 1168
int virtio_queue_get_num(VirtIODevice *vdev, int n)
{
    return vdev->vq[n].vring.num;
}
A
aliguori 已提交
1169

1170 1171 1172 1173
int virtio_get_num_queues(VirtIODevice *vdev)
{
    int i;

1174
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1175 1176 1177 1178 1179 1180 1181 1182
        if (!virtio_queue_get_num(vdev, i)) {
            break;
        }
    }

    return i;
}

1183 1184 1185 1186 1187
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
{
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

1188
    /* virtio-1 compliant devices cannot change the alignment */
1189
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1190 1191 1192
        error_report("tried to modify queue alignment for virtio-1 device");
        return;
    }
1193 1194 1195 1196 1197 1198 1199
    /* Check that the transport told us it was going to do this
     * (so a buggy transport will immediately assert rather than
     * silently failing to migrate this state)
     */
    assert(k->has_variable_vring_alignment);

    vdev->vq[n].vring.align = align;
1200
    virtio_queue_update_rings(vdev, n);
1201 1202
}

M
Michael S. Tsirkin 已提交
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
static void virtio_queue_notify_aio_vq(VirtQueue *vq)
{
    if (vq->vring.desc && vq->handle_aio_output) {
        VirtIODevice *vdev = vq->vdev;

        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
        vq->handle_aio_output(vdev, vq);
    }
}

1213
static void virtio_queue_notify_vq(VirtQueue *vq)
1214
{
1215
    if (vq->vring.desc && vq->handle_output) {
1216
        VirtIODevice *vdev = vq->vdev;
1217

1218 1219 1220 1221
        if (unlikely(vdev->broken)) {
            return;
        }

1222 1223 1224 1225 1226
        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
        vq->handle_output(vdev, vq);
    }
}

P
Paul Brook 已提交
1227 1228
void virtio_queue_notify(VirtIODevice *vdev, int n)
{
1229
    virtio_queue_notify_vq(&vdev->vq[n]);
A
aliguori 已提交
1230 1231
}

1232 1233
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
{
1234
    return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1235 1236 1237 1238 1239
        VIRTIO_NO_VECTOR;
}

void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
{
1240 1241
    VirtQueue *vq = &vdev->vq[n];

1242
    if (n < VIRTIO_QUEUE_MAX) {
1243 1244 1245 1246
        if (vdev->vector_queues &&
            vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
            QLIST_REMOVE(vq, node);
        }
1247
        vdev->vq[n].vector = vector;
1248 1249 1250 1251 1252
        if (vdev->vector_queues &&
            vector != VIRTIO_NO_VECTOR) {
            QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
        }
    }
1253 1254
}

1255 1256 1257
static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
                                            VirtIOHandleOutput handle_output,
                                            bool use_aio)
A
aliguori 已提交
1258 1259 1260
{
    int i;

1261
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1262 1263 1264 1265
        if (vdev->vq[i].vring.num == 0)
            break;
    }

1266
    if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
A
aliguori 已提交
1267 1268 1269
        abort();

    vdev->vq[i].vring.num = queue_size;
C
Cornelia Huck 已提交
1270
    vdev->vq[i].vring.num_default = queue_size;
1271
    vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
A
aliguori 已提交
1272
    vdev->vq[i].handle_output = handle_output;
M
Michael S. Tsirkin 已提交
1273
    vdev->vq[i].handle_aio_output = NULL;
1274
    vdev->vq[i].use_aio = use_aio;
A
aliguori 已提交
1275 1276 1277 1278

    return &vdev->vq[i];
}

1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
/* Add a virt queue and mark AIO.
 * An AIO queue will use the AioContext based event interface instead of the
 * default IOHandler and EventNotifier interface.
 */
VirtQueue *virtio_add_queue_aio(VirtIODevice *vdev, int queue_size,
                                VirtIOHandleOutput handle_output)
{
    return virtio_add_queue_internal(vdev, queue_size, handle_output, true);
}

/* Add a normal virt queue (on the contrary to the AIO version above. */
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
                            VirtIOHandleOutput handle_output)
{
    return virtio_add_queue_internal(vdev, queue_size, handle_output, false);
}

1296 1297
void virtio_del_queue(VirtIODevice *vdev, int n)
{
1298
    if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1299 1300 1301 1302
        abort();
    }

    vdev->vq[n].vring.num = 0;
C
Cornelia Huck 已提交
1303
    vdev->vq[n].vring.num_default = 0;
1304 1305
}

1306 1307
void virtio_irq(VirtQueue *vq)
{
1308
    trace_virtio_irq(vq);
1309 1310 1311 1312
    vq->vdev->isr |= 0x01;
    virtio_notify_vector(vq->vdev, vq->vector);
}

1313
bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
M
Michael S. Tsirkin 已提交
1314 1315 1316
{
    uint16_t old, new;
    bool v;
1317 1318
    /* We need to expose used array entries before checking used event. */
    smp_mb();
1319
    /* Always notify when queue is empty (when feature acknowledge) */
1320
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1321
        !vq->inuse && virtio_queue_empty(vq)) {
M
Michael S. Tsirkin 已提交
1322 1323 1324
        return true;
    }

1325
    if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
M
Michael S. Tsirkin 已提交
1326 1327 1328 1329 1330 1331
        return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
    }

    v = vq->signalled_used_valid;
    vq->signalled_used_valid = true;
    old = vq->signalled_used;
1332
    new = vq->signalled_used = vq->used_idx;
1333
    return !v || vring_need_event(vring_get_used_event(vq), new, old);
M
Michael S. Tsirkin 已提交
1334 1335 1336 1337
}

void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
1338
    if (!virtio_should_notify(vdev, vq)) {
A
aliguori 已提交
1339
        return;
M
Michael S. Tsirkin 已提交
1340
    }
A
aliguori 已提交
1341

1342
    trace_virtio_notify(vdev, vq);
A
aliguori 已提交
1343
    vdev->isr |= 0x01;
1344
    virtio_notify_vector(vdev, vq->vector);
A
aliguori 已提交
1345 1346 1347 1348
}

void virtio_notify_config(VirtIODevice *vdev)
{
1349 1350 1351
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
        return;

A
aliguori 已提交
1352
    vdev->isr |= 0x03;
1353
    vdev->generation++;
1354
    virtio_notify_vector(vdev, vdev->config_vector);
A
aliguori 已提交
1355 1356
}

1357 1358 1359 1360 1361
static bool virtio_device_endian_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1362
    if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1363 1364 1365 1366
        return vdev->device_endian != virtio_default_endian();
    }
    /* Devices conforming to VIRTIO 1.0 or later are always LE. */
    return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1367 1368
}

G
Gerd Hoffmann 已提交
1369 1370 1371 1372 1373 1374 1375
static bool virtio_64bit_features_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return (vdev->host_features >> 32) != 0;
}

J
Jason Wang 已提交
1376 1377 1378 1379 1380 1381 1382
static bool virtio_virtqueue_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
}

C
Cornelia Huck 已提交
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
static bool virtio_ringsize_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;
    int i;

    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
        if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
            return true;
        }
    }
    return false;
}

1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
static bool virtio_extra_state_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    return k->has_extra_state &&
        k->has_extra_state(qbus->parent);
}

1406 1407 1408 1409 1410 1411 1412
static bool virtio_broken_needed(void *opaque)
{
    VirtIODevice *vdev = opaque;

    return vdev->broken;
}

1413
static const VMStateDescription vmstate_virtqueue = {
J
Jason Wang 已提交
1414
    .name = "virtqueue_state",
1415 1416 1417 1418 1419 1420 1421
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_UINT64(vring.avail, struct VirtQueue),
        VMSTATE_UINT64(vring.used, struct VirtQueue),
        VMSTATE_END_OF_LIST()
    }
J
Jason Wang 已提交
1422 1423 1424 1425 1426 1427 1428 1429
};

static const VMStateDescription vmstate_virtio_virtqueues = {
    .name = "virtio/virtqueues",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_virtqueue_needed,
    .fields = (VMStateField[]) {
D
Dr. David Alan Gilbert 已提交
1430 1431
        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
                      VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
J
Jason Wang 已提交
1432 1433 1434 1435
        VMSTATE_END_OF_LIST()
    }
};

1436
static const VMStateDescription vmstate_ringsize = {
C
Cornelia Huck 已提交
1437
    .name = "ringsize_state",
1438 1439 1440 1441 1442 1443
    .version_id = 1,
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_UINT32(vring.num_default, struct VirtQueue),
        VMSTATE_END_OF_LIST()
    }
C
Cornelia Huck 已提交
1444 1445 1446 1447 1448 1449 1450 1451
};

static const VMStateDescription vmstate_virtio_ringsize = {
    .name = "virtio/ringsize",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_ringsize_needed,
    .fields = (VMStateField[]) {
D
Dr. David Alan Gilbert 已提交
1452 1453
        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
                      VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
C
Cornelia Huck 已提交
1454 1455 1456 1457
        VMSTATE_END_OF_LIST()
    }
};

1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
static int get_extra_state(QEMUFile *f, void *pv, size_t size)
{
    VirtIODevice *vdev = pv;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    if (!k->load_extra_state) {
        return -1;
    } else {
        return k->load_extra_state(qbus->parent, f);
    }
}

static void put_extra_state(QEMUFile *f, void *pv, size_t size)
{
    VirtIODevice *vdev = pv;
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    k->save_extra_state(qbus->parent, f);
}

static const VMStateInfo vmstate_info_extra_state = {
    .name = "virtqueue_extra_state",
    .get = get_extra_state,
    .put = put_extra_state,
};

static const VMStateDescription vmstate_virtio_extra_state = {
    .name = "virtio/extra_state",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_extra_state_needed,
    .fields = (VMStateField[]) {
        {
            .name         = "extra_state",
            .version_id   = 0,
            .field_exists = NULL,
            .size         = 0,
            .info         = &vmstate_info_extra_state,
            .flags        = VMS_SINGLE,
            .offset       = 0,
        },
        VMSTATE_END_OF_LIST()
    }
};

1505 1506 1507 1508
static const VMStateDescription vmstate_virtio_device_endian = {
    .name = "virtio/device_endian",
    .version_id = 1,
    .minimum_version_id = 1,
1509
    .needed = &virtio_device_endian_needed,
1510 1511 1512 1513 1514 1515
    .fields = (VMStateField[]) {
        VMSTATE_UINT8(device_endian, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

G
Gerd Hoffmann 已提交
1516 1517 1518 1519
static const VMStateDescription vmstate_virtio_64bit_features = {
    .name = "virtio/64bit_features",
    .version_id = 1,
    .minimum_version_id = 1,
1520
    .needed = &virtio_64bit_features_needed,
G
Gerd Hoffmann 已提交
1521 1522 1523 1524 1525 1526
    .fields = (VMStateField[]) {
        VMSTATE_UINT64(guest_features, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
static const VMStateDescription vmstate_virtio_broken = {
    .name = "virtio/broken",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = &virtio_broken_needed,
    .fields = (VMStateField[]) {
        VMSTATE_BOOL(broken, VirtIODevice),
        VMSTATE_END_OF_LIST()
    }
};

1538 1539 1540 1541 1542 1543 1544
static const VMStateDescription vmstate_virtio = {
    .name = "virtio",
    .version_id = 1,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .fields = (VMStateField[]) {
        VMSTATE_END_OF_LIST()
1545
    },
1546 1547 1548
    .subsections = (const VMStateDescription*[]) {
        &vmstate_virtio_device_endian,
        &vmstate_virtio_64bit_features,
J
Jason Wang 已提交
1549
        &vmstate_virtio_virtqueues,
C
Cornelia Huck 已提交
1550
        &vmstate_virtio_ringsize,
1551
        &vmstate_virtio_broken,
1552
        &vmstate_virtio_extra_state,
1553
        NULL
1554 1555 1556
    }
};

A
aliguori 已提交
1557 1558
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
{
K
KONRAD Frederic 已提交
1559 1560
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1561
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
G
Gerd Hoffmann 已提交
1562
    uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
A
aliguori 已提交
1563 1564
    int i;

K
KONRAD Frederic 已提交
1565 1566 1567
    if (k->save_config) {
        k->save_config(qbus->parent, f);
    }
A
aliguori 已提交
1568 1569 1570 1571

    qemu_put_8s(f, &vdev->status);
    qemu_put_8s(f, &vdev->isr);
    qemu_put_be16s(f, &vdev->queue_sel);
G
Gerd Hoffmann 已提交
1572
    qemu_put_be32s(f, &guest_features_lo);
A
aliguori 已提交
1573 1574 1575
    qemu_put_be32(f, vdev->config_len);
    qemu_put_buffer(f, vdev->config, vdev->config_len);

1576
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1577 1578 1579 1580 1581 1582
        if (vdev->vq[i].vring.num == 0)
            break;
    }

    qemu_put_be32(f, i);

1583
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
A
aliguori 已提交
1584 1585 1586 1587
        if (vdev->vq[i].vring.num == 0)
            break;

        qemu_put_be32(f, vdev->vq[i].vring.num);
1588 1589 1590
        if (k->has_variable_vring_alignment) {
            qemu_put_be32(f, vdev->vq[i].vring.align);
        }
1591 1592
        /* XXX virtio-1 devices */
        qemu_put_be64(f, vdev->vq[i].vring.desc);
A
aliguori 已提交
1593
        qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
K
KONRAD Frederic 已提交
1594 1595 1596
        if (k->save_queue) {
            k->save_queue(qbus->parent, i, f);
        }
A
aliguori 已提交
1597
    }
1598 1599 1600 1601

    if (vdc->save != NULL) {
        vdc->save(vdev, f);
    }
1602 1603

    /* Subsections */
1604
    vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
A
aliguori 已提交
1605 1606
}

1607 1608 1609 1610 1611 1612
/* A wrapper for use as a VMState .put function */
void virtio_vmstate_save(QEMUFile *f, void *opaque, size_t size)
{
    virtio_save(VIRTIO_DEVICE(opaque), f);
}

1613
static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
1614
{
1615
    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
C
Cornelia Huck 已提交
1616
    bool bad = (val & ~(vdev->host_features)) != 0;
1617

C
Cornelia Huck 已提交
1618
    val &= vdev->host_features;
1619 1620
    if (k->set_features) {
        k->set_features(vdev, val);
1621 1622 1623 1624 1625
    }
    vdev->guest_features = val;
    return bad ? -1 : 0;
}

1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
int virtio_set_features(VirtIODevice *vdev, uint64_t val)
{
   /*
     * The driver must not attempt to set features after feature negotiation
     * has finished.
     */
    if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
        return -EINVAL;
    }
    return virtio_set_features_nocheck(vdev, val);
}

1638
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
A
aliguori 已提交
1639
{
1640
    int i, ret;
1641
    int32_t config_len;
1642
    uint32_t num;
1643
    uint32_t features;
K
KONRAD Frederic 已提交
1644 1645
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1646
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
A
aliguori 已提交
1647

1648 1649 1650 1651 1652 1653
    /*
     * We poison the endianness to ensure it does not get used before
     * subsections have been loaded.
     */
    vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;

K
KONRAD Frederic 已提交
1654 1655
    if (k->load_config) {
        ret = k->load_config(qbus->parent, f);
1656 1657 1658
        if (ret)
            return ret;
    }
A
aliguori 已提交
1659 1660 1661 1662

    qemu_get_8s(f, &vdev->status);
    qemu_get_8s(f, &vdev->isr);
    qemu_get_be16s(f, &vdev->queue_sel);
1663
    if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
1664 1665
        return -1;
    }
1666
    qemu_get_be32s(f, &features);
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
    /*
     * Temporarily set guest_features low bits - needed by
     * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
     * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
     *
     * Note: devices should always test host features in future - don't create
     * new dependencies like this.
     */
    vdev->guest_features = features;

1678
    config_len = qemu_get_be32(f);
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689

    /*
     * There are cases where the incoming config can be bigger or smaller
     * than what we have; so load what we have space for, and skip
     * any excess that's in the stream.
     */
    qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));

    while (config_len > vdev->config_len) {
        qemu_get_byte(f);
        config_len--;
1690
    }
A
aliguori 已提交
1691 1692 1693

    num = qemu_get_be32(f);

1694
    if (num > VIRTIO_QUEUE_MAX) {
1695
        error_report("Invalid number of virtqueues: 0x%x", num);
1696 1697 1698
        return -1;
    }

A
aliguori 已提交
1699 1700
    for (i = 0; i < num; i++) {
        vdev->vq[i].vring.num = qemu_get_be32(f);
1701 1702 1703
        if (k->has_variable_vring_alignment) {
            vdev->vq[i].vring.align = qemu_get_be32(f);
        }
1704
        vdev->vq[i].vring.desc = qemu_get_be64(f);
A
aliguori 已提交
1705
        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
M
Michael S. Tsirkin 已提交
1706 1707
        vdev->vq[i].signalled_used_valid = false;
        vdev->vq[i].notification = true;
A
aliguori 已提交
1708

1709 1710 1711
        if (vdev->vq[i].vring.desc) {
            /* XXX virtio-1 devices */
            virtio_queue_update_rings(vdev, i);
M
Michael S. Tsirkin 已提交
1712 1713
        } else if (vdev->vq[i].last_avail_idx) {
            error_report("VQ %d address 0x0 "
1714
                         "inconsistent with Host index 0x%x",
M
Michael S. Tsirkin 已提交
1715 1716
                         i, vdev->vq[i].last_avail_idx);
                return -1;
1717
        }
K
KONRAD Frederic 已提交
1718 1719
        if (k->load_queue) {
            ret = k->load_queue(qbus->parent, i, f);
1720 1721
            if (ret)
                return ret;
1722
        }
A
aliguori 已提交
1723 1724
    }

1725
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1726 1727

    if (vdc->load != NULL) {
1728 1729 1730 1731
        ret = vdc->load(vdev, f, version_id);
        if (ret) {
            return ret;
        }
1732 1733
    }

1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
    /* Subsections */
    ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
    if (ret) {
        return ret;
    }

    if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
        vdev->device_endian = virtio_default_endian();
    }

G
Gerd Hoffmann 已提交
1744 1745 1746 1747 1748 1749 1750
    if (virtio_64bit_features_needed(vdev)) {
        /*
         * Subsection load filled vdev->guest_features.  Run them
         * through virtio_set_features to sanity-check them against
         * host_features.
         */
        uint64_t features64 = vdev->guest_features;
1751
        if (virtio_set_features_nocheck(vdev, features64) < 0) {
G
Gerd Hoffmann 已提交
1752 1753 1754 1755 1756 1757
            error_report("Features 0x%" PRIx64 " unsupported. "
                         "Allowed features: 0x%" PRIx64,
                         features64, vdev->host_features);
            return -1;
        }
    } else {
1758
        if (virtio_set_features_nocheck(vdev, features) < 0) {
G
Gerd Hoffmann 已提交
1759 1760 1761 1762 1763 1764 1765
            error_report("Features 0x%x unsupported. "
                         "Allowed features: 0x%" PRIx64,
                         features, vdev->host_features);
            return -1;
        }
    }

1766
    for (i = 0; i < num; i++) {
1767
        if (vdev->vq[i].vring.desc) {
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
            uint16_t nheads;
            nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
            /* Check it isn't doing strange things with descriptor numbers. */
            if (nheads > vdev->vq[i].vring.num) {
                error_report("VQ %d size 0x%x Guest index 0x%x "
                             "inconsistent with Host index 0x%x: delta 0x%x",
                             i, vdev->vq[i].vring.num,
                             vring_avail_idx(&vdev->vq[i]),
                             vdev->vq[i].last_avail_idx, nheads);
                return -1;
            }
1779
            vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
1780
            vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795

            /*
             * Some devices migrate VirtQueueElements that have been popped
             * from the avail ring but not yet returned to the used ring.
             */
            vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
                                vdev->vq[i].used_idx;
            if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
                error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
                             "used_idx 0x%x",
                             i, vdev->vq[i].vring.num,
                             vdev->vq[i].last_avail_idx,
                             vdev->vq[i].used_idx);
                return -1;
            }
1796 1797 1798 1799
        }
    }

    return 0;
A
aliguori 已提交
1800 1801
}

1802
void virtio_cleanup(VirtIODevice *vdev)
1803
{
1804
    qemu_del_vm_change_state_handler(vdev->vmstate);
1805
    g_free(vdev->config);
1806
    g_free(vdev->vq);
1807
    g_free(vdev->vector_queues);
1808 1809
}

1810
static void virtio_vmstate_change(void *opaque, int running, RunState state)
1811 1812
{
    VirtIODevice *vdev = opaque;
K
KONRAD Frederic 已提交
1813 1814
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1815
    bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
1816
    vdev->vm_running = running;
1817 1818 1819 1820 1821

    if (backend_run) {
        virtio_set_status(vdev, vdev->status);
    }

K
KONRAD Frederic 已提交
1822 1823
    if (k->vmstate_change) {
        k->vmstate_change(qbus->parent, backend_run);
1824 1825 1826 1827 1828 1829 1830
    }

    if (!backend_run) {
        virtio_set_status(vdev, vdev->status);
    }
}

1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
void virtio_instance_init_common(Object *proxy_obj, void *data,
                                 size_t vdev_size, const char *vdev_name)
{
    DeviceState *vdev = data;

    object_initialize(vdev, vdev_size, vdev_name);
    object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
    object_unref(OBJECT(vdev));
    qdev_alias_all_properties(vdev, proxy_obj);
}

1842 1843
void virtio_init(VirtIODevice *vdev, const char *name,
                 uint16_t device_id, size_t config_size)
A
aliguori 已提交
1844
{
1845 1846
    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1847
    int i;
1848 1849 1850 1851 1852 1853 1854
    int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;

    if (nvectors) {
        vdev->vector_queues =
            g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
    }

P
Paul Brook 已提交
1855
    vdev->device_id = device_id;
A
aliguori 已提交
1856 1857 1858
    vdev->status = 0;
    vdev->isr = 0;
    vdev->queue_sel = 0;
1859
    vdev->config_vector = VIRTIO_NO_VECTOR;
1860
    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
1861
    vdev->vm_running = runstate_is_running();
1862
    vdev->broken = false;
1863
    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1864
        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1865
        vdev->vq[i].vdev = vdev;
1866
        vdev->vq[i].queue_index = i;
1867
    }
A
aliguori 已提交
1868 1869 1870

    vdev->name = name;
    vdev->config_len = config_size;
1871
    if (vdev->config_len) {
1872
        vdev->config = g_malloc0(config_size);
1873
    } else {
A
aliguori 已提交
1874
        vdev->config = NULL;
1875 1876 1877
    }
    vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
                                                     vdev);
1878
    vdev->device_endian = virtio_default_endian();
1879
    vdev->use_guest_notifier_mask = true;
1880
}
A
aliguori 已提交
1881

A
Avi Kivity 已提交
1882
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1883 1884 1885 1886
{
    return vdev->vq[n].vring.desc;
}

A
Avi Kivity 已提交
1887
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1888 1889 1890 1891
{
    return vdev->vq[n].vring.avail;
}

A
Avi Kivity 已提交
1892
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1893 1894 1895 1896
{
    return vdev->vq[n].vring.used;
}

A
Avi Kivity 已提交
1897
hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
1898 1899 1900 1901
{
    return vdev->vq[n].vring.desc;
}

A
Avi Kivity 已提交
1902
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1903 1904 1905 1906
{
    return sizeof(VRingDesc) * vdev->vq[n].vring.num;
}

A
Avi Kivity 已提交
1907
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1908 1909
{
    return offsetof(VRingAvail, ring) +
1910
        sizeof(uint16_t) * vdev->vq[n].vring.num;
1911 1912
}

A
Avi Kivity 已提交
1913
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1914 1915 1916 1917 1918
{
    return offsetof(VRingUsed, ring) +
        sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
}

A
Avi Kivity 已提交
1919
hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
{
    return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
	    virtio_queue_get_used_size(vdev, n);
}

uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
{
    return vdev->vq[n].last_avail_idx;
}

void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
{
    vdev->vq[n].last_avail_idx = idx;
1933
    vdev->vq[n].shadow_avail_idx = idx;
1934 1935
}

1936 1937 1938 1939 1940
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
{
    vdev->vq[n].signalled_used_valid = false;
}

1941 1942 1943 1944 1945
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
{
    return vdev->vq + n;
}

1946 1947 1948 1949 1950
uint16_t virtio_get_queue_index(VirtQueue *vq)
{
    return vq->queue_index;
}

1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
static void virtio_queue_guest_notifier_read(EventNotifier *n)
{
    VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
    if (event_notifier_test_and_clear(n)) {
        virtio_irq(vq);
    }
}

void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                bool with_irqfd)
{
    if (assign && !with_irqfd) {
1963
        event_notifier_set_handler(&vq->guest_notifier, false,
1964 1965
                                   virtio_queue_guest_notifier_read);
    } else {
1966
        event_notifier_set_handler(&vq->guest_notifier, false, NULL);
1967 1968 1969 1970 1971 1972 1973 1974
    }
    if (!assign) {
        /* Test and clear notifier before closing it,
         * in case poll callback didn't have time to run. */
        virtio_queue_guest_notifier_read(&vq->guest_notifier);
    }
}

1975 1976 1977 1978
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
{
    return &vq->guest_notifier;
}
1979

M
Michael S. Tsirkin 已提交
1980
static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
1981 1982 1983
{
    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
    if (event_notifier_test_and_clear(n)) {
M
Michael S. Tsirkin 已提交
1984
        virtio_queue_notify_aio_vq(vq);
1985 1986 1987
    }
}

1988
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
1989
                                                VirtIOHandleOutput handle_output)
1990
{
1991 1992
    if (handle_output) {
        vq->handle_aio_output = handle_output;
1993
        aio_set_event_notifier(ctx, &vq->host_notifier, true,
M
Michael S. Tsirkin 已提交
1994
                               virtio_queue_host_notifier_aio_read);
1995 1996 1997 1998
    } else {
        aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
        /* Test and clear notifier before after disabling event,
         * in case poll callback didn't have time to run. */
M
Michael S. Tsirkin 已提交
1999
        virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2000
        vq->handle_aio_output = NULL;
M
Michael S. Tsirkin 已提交
2001 2002 2003 2004 2005 2006 2007 2008
    }
}

static void virtio_queue_host_notifier_read(EventNotifier *n)
{
    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
    if (event_notifier_test_and_clear(n)) {
        virtio_queue_notify_vq(vq);
2009 2010 2011
    }
}

P
Paolo Bonzini 已提交
2012 2013
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
                                               bool set_handler)
2014
{
2015
    AioContext *ctx = qemu_get_aio_context();
P
Paolo Bonzini 已提交
2016
    if (assign && set_handler) {
2017 2018
        if (vq->use_aio) {
            aio_set_event_notifier(ctx, &vq->host_notifier, true,
2019
                                   virtio_queue_host_notifier_read);
2020 2021 2022 2023
        } else {
            event_notifier_set_handler(&vq->host_notifier, true,
                                       virtio_queue_host_notifier_read);
        }
2024
    } else {
2025 2026 2027 2028 2029
        if (vq->use_aio) {
            aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
        } else {
            event_notifier_set_handler(&vq->host_notifier, true, NULL);
        }
P
Paolo Bonzini 已提交
2030 2031
    }
    if (!assign) {
2032 2033 2034 2035 2036 2037
        /* Test and clear notifier before after disabling event,
         * in case poll callback didn't have time to run. */
        virtio_queue_host_notifier_read(&vq->host_notifier);
    }
}

2038 2039 2040 2041
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
{
    return &vq->host_notifier;
}
2042

2043 2044
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
{
2045
    g_free(vdev->bus_name);
2046
    vdev->bus_name = g_strdup(bus_name);
2047 2048
}

2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064
void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
{
    va_list ap;

    va_start(ap, fmt);
    error_vreport(fmt, ap);
    va_end(ap);

    vdev->broken = true;

    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
        virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
        virtio_notify_config(vdev);
    }
}

2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
    Error *err = NULL;

    if (vdc->realize != NULL) {
        vdc->realize(dev, &err);
        if (err != NULL) {
            error_propagate(errp, err);
            return;
        }
2077
    }
J
Jason Wang 已提交
2078 2079 2080 2081 2082 2083

    virtio_bus_device_plugged(vdev, &err);
    if (err != NULL) {
        error_propagate(errp, err);
        return;
    }
2084 2085
}

2086
static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2087
{
2088
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2089 2090
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
    Error *err = NULL;
2091

2092 2093
    virtio_bus_device_unplugged(vdev);

2094 2095 2096 2097 2098 2099
    if (vdc->unrealize != NULL) {
        vdc->unrealize(dev, &err);
        if (err != NULL) {
            error_propagate(errp, err);
            return;
        }
2100
    }
2101

2102 2103
    g_free(vdev->bus_name);
    vdev->bus_name = NULL;
2104 2105
}

C
Cornelia Huck 已提交
2106 2107 2108 2109 2110
static Property virtio_properties[] = {
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
    DEFINE_PROP_END_OF_LIST(),
};

2111 2112 2113 2114
static void virtio_device_class_init(ObjectClass *klass, void *data)
{
    /* Set the default value here. */
    DeviceClass *dc = DEVICE_CLASS(klass);
2115 2116 2117

    dc->realize = virtio_device_realize;
    dc->unrealize = virtio_device_unrealize;
2118
    dc->bus_type = TYPE_VIRTIO_BUS;
C
Cornelia Huck 已提交
2119
    dc->props = virtio_properties;
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
}

static const TypeInfo virtio_device_info = {
    .name = TYPE_VIRTIO_DEVICE,
    .parent = TYPE_DEVICE,
    .instance_size = sizeof(VirtIODevice),
    .class_init = virtio_device_class_init,
    .abstract = true,
    .class_size = sizeof(VirtioDeviceClass),
};

static void virtio_register_types(void)
{
    type_register_static(&virtio_device_info);
}

type_init(virtio_register_types)