vhost.c 46.2 KB
Newer Older
M
Michael S. Tsirkin 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * vhost support
 *
 * Copyright Red Hat, Inc. 2010
 *
 * Authors:
 *  Michael S. Tsirkin <mst@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
11 12 13
 *
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
M
Michael S. Tsirkin 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qapi/error.h"
P
Paolo Bonzini 已提交
18
#include "hw/virtio/vhost.h"
M
Michael S. Tsirkin 已提交
19
#include "hw/hw.h"
20
#include "qemu/atomic.h"
21
#include "qemu/range.h"
22
#include "qemu/error-report.h"
M
Marc-André Lureau 已提交
23
#include "qemu/memfd.h"
24
#include <linux/vhost.h>
25
#include "exec/address-spaces.h"
K
KONRAD Frederic 已提交
26
#include "hw/virtio/virtio-bus.h"
27
#include "hw/virtio/virtio-access.h"
28
#include "migration/blocker.h"
J
Jason Wang 已提交
29
#include "sysemu/dma.h"
30
#include "trace.h"
M
Michael S. Tsirkin 已提交
31

32 33 34 35 36 37 38 39 40 41 42 43
/* enabled until disconnected backend stabilizes */
#define _VHOST_DEBUG 1

#ifdef _VHOST_DEBUG
#define VHOST_OPS_DEBUG(fmt, ...) \
    do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
                      strerror(errno), errno); } while (0)
#else
#define VHOST_OPS_DEBUG(fmt, ...) \
    do { } while (0)
#endif

J
Jason Wang 已提交
44
static struct vhost_log *vhost_log;
M
Marc-André Lureau 已提交
45
static struct vhost_log *vhost_log_shm;
J
Jason Wang 已提交
46

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
static unsigned int used_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
    QLIST_HEAD_INITIALIZER(vhost_devices);

bool vhost_has_free_slot(void)
{
    unsigned int slots_limit = ~0U;
    struct vhost_dev *hdev;

    QLIST_FOREACH(hdev, &vhost_devices, entry) {
        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
        slots_limit = MIN(slots_limit, r);
    }
    return slots_limit > used_memslots;
}

M
Michael S. Tsirkin 已提交
63
static void vhost_dev_sync_region(struct vhost_dev *dev,
64
                                  MemoryRegionSection *section,
M
Michael S. Tsirkin 已提交
65 66 67
                                  uint64_t mfirst, uint64_t mlast,
                                  uint64_t rfirst, uint64_t rlast)
{
J
Jason Wang 已提交
68 69
    vhost_log_chunk_t *log = dev->log->log;

M
Michael S. Tsirkin 已提交
70 71
    uint64_t start = MAX(mfirst, rfirst);
    uint64_t end = MIN(mlast, rlast);
J
Jason Wang 已提交
72 73
    vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
    vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
M
Marc-André Lureau 已提交
74
    uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
M
Michael S. Tsirkin 已提交
75 76 77 78

    if (end < start) {
        return;
    }
79
    assert(end / VHOST_LOG_CHUNK < dev->log_size);
80
    assert(start / VHOST_LOG_CHUNK < dev->log_size);
81

M
Michael S. Tsirkin 已提交
82 83 84 85 86
    for (;from < to; ++from) {
        vhost_log_chunk_t log;
        /* We first check with non-atomic: much cheaper,
         * and we expect non-dirty to be the common case. */
        if (!*from) {
87
            addr += VHOST_LOG_CHUNK;
M
Michael S. Tsirkin 已提交
88 89
            continue;
        }
90 91 92
        /* Data must be read atomically. We don't really need barrier semantics
         * but it's easier to use atomic_* than roll our own. */
        log = atomic_xchg(from, 0);
N
Natanael Copa 已提交
93 94
        while (log) {
            int bit = ctzl(log);
M
Michael S. Tsirkin 已提交
95 96 97 98 99 100 101
            hwaddr page_addr;
            hwaddr section_offset;
            hwaddr mr_offset;
            page_addr = addr + bit * VHOST_LOG_PAGE;
            section_offset = page_addr - section->offset_within_address_space;
            mr_offset = section_offset + section->offset_within_region;
            memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
M
Michael S. Tsirkin 已提交
102 103 104 105 106 107
            log &= ~(0x1ull << bit);
        }
        addr += VHOST_LOG_CHUNK;
    }
}

A
Avi Kivity 已提交
108
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
109
                                   MemoryRegionSection *section,
M
Michael S. Tsirkin 已提交
110 111
                                   hwaddr first,
                                   hwaddr last)
M
Michael S. Tsirkin 已提交
112 113
{
    int i;
M
Michael S. Tsirkin 已提交
114 115
    hwaddr start_addr;
    hwaddr end_addr;
A
Avi Kivity 已提交
116

M
Michael S. Tsirkin 已提交
117 118 119
    if (!dev->log_enabled || !dev->started) {
        return 0;
    }
M
Michael S. Tsirkin 已提交
120
    start_addr = section->offset_within_address_space;
121
    end_addr = range_get_last(start_addr, int128_get64(section->size));
M
Michael S. Tsirkin 已提交
122 123 124
    start_addr = MAX(first, start_addr);
    end_addr = MIN(last, end_addr);

M
Michael S. Tsirkin 已提交
125 126
    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
127
        vhost_dev_sync_region(dev, section, start_addr, end_addr,
M
Michael S. Tsirkin 已提交
128 129 130 131 132 133
                              reg->guest_phys_addr,
                              range_get_last(reg->guest_phys_addr,
                                             reg->memory_size));
    }
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
134
        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
M
Michael S. Tsirkin 已提交
135 136 137 138 139
                              range_get_last(vq->used_phys, vq->used_size));
    }
    return 0;
}

A
Avi Kivity 已提交
140 141 142 143 144
static void vhost_log_sync(MemoryListener *listener,
                          MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
M
Michael S. Tsirkin 已提交
145 146
    vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
}
A
Avi Kivity 已提交
147

M
Michael S. Tsirkin 已提交
148 149 150 151 152 153 154 155 156
static void vhost_log_sync_range(struct vhost_dev *dev,
                                 hwaddr first, hwaddr last)
{
    int i;
    /* FIXME: this is N^2 in number of sections */
    for (i = 0; i < dev->n_mem_sections; ++i) {
        MemoryRegionSection *section = &dev->mem_sections[i];
        vhost_sync_dirty_bitmap(dev, section, first, last);
    }
A
Avi Kivity 已提交
157 158
}

M
Michael S. Tsirkin 已提交
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
static uint64_t vhost_get_log_size(struct vhost_dev *dev)
{
    uint64_t log_size = 0;
    int i;
    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
        uint64_t last = range_get_last(reg->guest_phys_addr,
                                       reg->memory_size);
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
    }
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
        uint64_t last = vq->used_phys + vq->used_size - 1;
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
    }
    return log_size;
}
M
Marc-André Lureau 已提交
176 177

static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
J
Jason Wang 已提交
178
{
179
    Error *err = NULL;
M
Marc-André Lureau 已提交
180 181 182 183 184 185 186 187
    struct vhost_log *log;
    uint64_t logsize = size * sizeof(*(log->log));
    int fd = -1;

    log = g_new0(struct vhost_log, 1);
    if (share) {
        log->log = qemu_memfd_alloc("vhost-log", logsize,
                                    F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
188 189 190 191 192 193
                                    &fd, &err);
        if (err) {
            error_report_err(err);
            g_free(log);
            return NULL;
        }
M
Marc-André Lureau 已提交
194 195 196 197
        memset(log->log, 0, logsize);
    } else {
        log->log = g_malloc0(logsize);
    }
J
Jason Wang 已提交
198 199 200

    log->size = size;
    log->refcnt = 1;
M
Marc-André Lureau 已提交
201
    log->fd = fd;
J
Jason Wang 已提交
202 203 204 205

    return log;
}

M
Marc-André Lureau 已提交
206
static struct vhost_log *vhost_log_get(uint64_t size, bool share)
J
Jason Wang 已提交
207
{
M
Marc-André Lureau 已提交
208 209 210 211 212 213 214 215 216
    struct vhost_log *log = share ? vhost_log_shm : vhost_log;

    if (!log || log->size != size) {
        log = vhost_log_alloc(size, share);
        if (share) {
            vhost_log_shm = log;
        } else {
            vhost_log = log;
        }
J
Jason Wang 已提交
217
    } else {
M
Marc-André Lureau 已提交
218
        ++log->refcnt;
J
Jason Wang 已提交
219 220
    }

M
Marc-André Lureau 已提交
221
    return log;
J
Jason Wang 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
}

static void vhost_log_put(struct vhost_dev *dev, bool sync)
{
    struct vhost_log *log = dev->log;

    if (!log) {
        return;
    }

    --log->refcnt;
    if (log->refcnt == 0) {
        /* Sync only the range covered by the old log */
        if (dev->log_size && sync) {
            vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
        }
M
Marc-André Lureau 已提交
238

J
Jason Wang 已提交
239
        if (vhost_log == log) {
M
Marc-André Lureau 已提交
240
            g_free(log->log);
J
Jason Wang 已提交
241
            vhost_log = NULL;
M
Marc-André Lureau 已提交
242 243 244 245
        } else if (vhost_log_shm == log) {
            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
                            log->fd);
            vhost_log_shm = NULL;
J
Jason Wang 已提交
246
        }
M
Marc-André Lureau 已提交
247

J
Jason Wang 已提交
248 249
        g_free(log);
    }
250 251 252

    dev->log = NULL;
    dev->log_size = 0;
J
Jason Wang 已提交
253
}
M
Michael S. Tsirkin 已提交
254

M
Marc-André Lureau 已提交
255 256 257 258 259 260 261
static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
{
    return dev->vhost_ops->vhost_requires_shm_log &&
           dev->vhost_ops->vhost_requires_shm_log(dev);
}

static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
M
Michael S. Tsirkin 已提交
262
{
M
Marc-André Lureau 已提交
263
    struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
J
Jason Wang 已提交
264
    uint64_t log_base = (uintptr_t)log->log;
M
Michael S. Tsirkin 已提交
265
    int r;
266

M
Marc-André Lureau 已提交
267 268
    /* inform backend of log switching, this must be done before
       releasing the current log, to ensure no logging is lost */
269
    r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
270 271 272 273
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_log_base failed");
    }

J
Jason Wang 已提交
274
    vhost_log_put(dev, true);
M
Michael S. Tsirkin 已提交
275 276 277 278
    dev->log = log;
    dev->log_size = size;
}

J
Jason Wang 已提交
279 280 281 282
static int vhost_dev_has_iommu(struct vhost_dev *dev)
{
    VirtIODevice *vdev = dev->vdev;

283
    return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
J
Jason Wang 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
}

static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
                              hwaddr *plen, int is_write)
{
    if (!vhost_dev_has_iommu(dev)) {
        return cpu_physical_memory_map(addr, plen, is_write);
    } else {
        return (void *)(uintptr_t)addr;
    }
}

static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
                               hwaddr len, int is_write,
                               hwaddr access_len)
{
    if (!vhost_dev_has_iommu(dev)) {
        cpu_physical_memory_unmap(buffer, len, is_write, access_len);
    }
}
304

305 306 307 308 309 310
static int vhost_verify_ring_part_mapping(void *ring_hva,
                                          uint64_t ring_gpa,
                                          uint64_t ring_size,
                                          void *reg_hva,
                                          uint64_t reg_gpa,
                                          uint64_t reg_size)
311
{
312 313 314
    uint64_t hva_ring_offset;
    uint64_t ring_last = range_get_last(ring_gpa, ring_size);
    uint64_t reg_last = range_get_last(reg_gpa, reg_size);
315

316
    if (ring_last < reg_gpa || ring_gpa > reg_last) {
317 318
        return 0;
    }
319 320 321
    /* check that whole ring's is mapped */
    if (ring_last > reg_last) {
        return -ENOMEM;
322
    }
323 324 325 326
    /* check that ring's MemoryRegion wasn't replaced */
    hva_ring_offset = ring_gpa - reg_gpa;
    if (ring_hva != reg_hva + hva_ring_offset) {
        return -EBUSY;
327
    }
328 329

    return 0;
330 331
}

M
Michael S. Tsirkin 已提交
332
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
333 334 335
                                      void *reg_hva,
                                      uint64_t reg_gpa,
                                      uint64_t reg_size)
M
Michael S. Tsirkin 已提交
336
{
337
    int i, j;
338
    int r = 0;
339 340 341 342 343
    const char *part_name[] = {
        "descriptor table",
        "available ring",
        "used ring"
    };
344

345
    for (i = 0; i < dev->nvqs; ++i) {
M
Michael S. Tsirkin 已提交
346 347
        struct vhost_virtqueue *vq = dev->vqs + i;

348 349 350 351
        if (vq->desc_phys == 0) {
            continue;
        }

352
        j = 0;
353 354 355
        r = vhost_verify_ring_part_mapping(
                vq->desc, vq->desc_phys, vq->desc_size,
                reg_hva, reg_gpa, reg_size);
356
        if (r) {
357
            break;
M
Michael S. Tsirkin 已提交
358
        }
359 360

        j++;
361
        r = vhost_verify_ring_part_mapping(
362
                vq->avail, vq->avail_phys, vq->avail_size,
363
                reg_hva, reg_gpa, reg_size);
364
        if (r) {
365
            break;
M
Michael S. Tsirkin 已提交
366
        }
367 368

        j++;
369
        r = vhost_verify_ring_part_mapping(
370
                vq->used, vq->used_phys, vq->used_size,
371
                reg_hva, reg_gpa, reg_size);
372
        if (r) {
373
            break;
M
Michael S. Tsirkin 已提交
374
        }
375 376 377 378 379 380
    }

    if (r == -ENOMEM) {
        error_report("Unable to map %s for ring %d", part_name[j], i);
    } else if (r == -EBUSY) {
        error_report("%s relocated for ring %d", part_name[j], i);
M
Michael S. Tsirkin 已提交
381
    }
382
    return r;
M
Michael S. Tsirkin 已提交
383 384
}

385 386
static bool vhost_section(MemoryRegionSection *section)
{
387 388 389 390
    bool result;
    bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
                     ~(1 << DIRTY_MEMORY_MIGRATION);
    result = memory_region_is_ram(section->mr) &&
M
Michael S. Tsirkin 已提交
391
        !memory_region_is_rom(section->mr);
392 393 394 395 396 397 398 399

    /* Vhost doesn't handle any block which is doing dirty-tracking other
     * than migration; this typically fires on VGA areas.
     */
    result &= !log_dirty;

    trace_vhost_section(section->mr->name, result);
    return result;
400 401 402 403 404 405
}

static void vhost_begin(MemoryListener *listener)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
406 407
    dev->tmp_sections = NULL;
    dev->n_tmp_sections = 0;
408
}
M
Michael S. Tsirkin 已提交
409

410 411 412 413
static void vhost_commit(MemoryListener *listener)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
414 415
    MemoryRegionSection *old_sections;
    int n_old_sections;
416
    uint64_t log_size;
417
    size_t regions_size;
418
    int r;
419
    int i;
420
    bool changed = false;
421

422 423 424 425
    /* Note we can be called before the device is started, but then
     * starting the device calls set_mem_table, so we need to have
     * built the data structures.
     */
426 427 428 429 430
    old_sections = dev->mem_sections;
    n_old_sections = dev->n_mem_sections;
    dev->mem_sections = dev->tmp_sections;
    dev->n_mem_sections = dev->n_tmp_sections;

431 432 433 434 435 436
    if (dev->n_mem_sections != n_old_sections) {
        changed = true;
    } else {
        /* Same size, lets check the contents */
        changed = n_old_sections && memcmp(dev->mem_sections, old_sections,
                         n_old_sections * sizeof(old_sections[0])) != 0;
437
    }
438 439 440

    trace_vhost_commit(dev->started, changed);
    if (!changed) {
441
        goto out;
M
Michael S. Tsirkin 已提交
442
    }
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462

    /* Rebuild the regions list from the new sections list */
    regions_size = offsetof(struct vhost_memory, regions) +
                       dev->n_mem_sections * sizeof dev->mem->regions[0];
    dev->mem = g_realloc(dev->mem, regions_size);
    dev->mem->nregions = dev->n_mem_sections;
    used_memslots = dev->mem->nregions;
    for (i = 0; i < dev->n_mem_sections; i++) {
        struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
        struct MemoryRegionSection *mrs = dev->mem_sections + i;

        cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
        cur_vmr->memory_size     = int128_get64(mrs->size);
        cur_vmr->userspace_addr  =
            (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
            mrs->offset_within_region;
        cur_vmr->flags_padding   = 0;
    }

    if (!dev->started) {
463
        goto out;
464
    }
M
Michael S. Tsirkin 已提交
465

466 467 468 469 470 471 472 473
    for (i = 0; i < dev->mem->nregions; i++) {
        if (vhost_verify_ring_mappings(dev,
                       (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
                       dev->mem->regions[i].guest_phys_addr,
                       dev->mem->regions[i].memory_size)) {
            error_report("Verify ring failure on region %d", i);
            abort();
        }
M
Michael S. Tsirkin 已提交
474 475 476
    }

    if (!dev->log_enabled) {
477
        r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
478 479 480
        if (r < 0) {
            VHOST_OPS_DEBUG("vhost_set_mem_table failed");
        }
481
        goto out;
M
Michael S. Tsirkin 已提交
482 483 484 485 486 487 488 489 490
    }
    log_size = vhost_get_log_size(dev);
    /* We allocate an extra 4K bytes to log,
     * to reduce the * number of reallocations. */
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
    /* To log more, must increase log size before table update. */
    if (dev->log_size < log_size) {
        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
    }
491
    r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
492 493 494
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
    }
M
Michael S. Tsirkin 已提交
495 496 497 498
    /* To log less, can only decrease log size after table update. */
    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
        vhost_dev_log_resize(dev, log_size);
    }
499 500 501 502 503 504 505 506 507 508 509 510 511

out:
    /* Deref the old list of sections, this must happen _after_ the
     * vhost_set_mem_table to ensure the client isn't still using the
     * section we're about to unref.
     */
    while (n_old_sections--) {
        memory_region_unref(old_sections[n_old_sections].mr);
    }
    g_free(old_sections);
    return;
}

512 513 514 515 516 517 518
/* Adds the section data to the tmp_section structure.
 * It relies on the listener calling us in memory address order
 * and for each region (via the _add and _nop methods) to
 * join neighbours.
 */
static void vhost_region_add_section(struct vhost_dev *dev,
                                     MemoryRegionSection *section)
519
{
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
    bool need_add = true;
    uint64_t mrs_size = int128_get64(section->size);
    uint64_t mrs_gpa = section->offset_within_address_space;
    uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
                         section->offset_within_region;

    trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
                                   mrs_host);

    if (dev->n_tmp_sections) {
        /* Since we already have at least one section, lets see if
         * this extends it; since we're scanning in order, we only
         * have to look at the last one, and the FlatView that calls
         * us shouldn't have overlaps.
         */
        MemoryRegionSection *prev_sec = dev->tmp_sections +
                                               (dev->n_tmp_sections - 1);
        uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
        uint64_t prev_size = int128_get64(prev_sec->size);
        uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
        uint64_t prev_host_start =
                        (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
                        prev_sec->offset_within_region;
        uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);

        if (prev_gpa_end + 1 == mrs_gpa &&
            prev_host_end + 1 == mrs_host &&
            section->mr == prev_sec->mr &&
            (!dev->vhost_ops->vhost_backend_can_merge ||
                dev->vhost_ops->vhost_backend_can_merge(dev,
                    mrs_host, mrs_size,
                    prev_host_start, prev_size))) {
            /* The two sections abut */
            need_add = false;
            prev_sec->size = int128_add(prev_sec->size, section->size);
            trace_vhost_region_add_section_abut(section->mr->name,
                                                mrs_size + prev_size);
        }
    }

    if (need_add) {
        ++dev->n_tmp_sections;
        dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
                                    dev->n_tmp_sections);
        dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
        /* The flatview isn't stable and we don't use it, making it NULL
         * means we can memcmp the list.
         */
        dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
        memory_region_ref(section->mr);
    }
571 572
}

573 574 575
/* Used for both add and nop callbacks */
static void vhost_region_addnop(MemoryListener *listener,
                                MemoryRegionSection *section)
A
Avi Kivity 已提交
576
{
577 578 579
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);

A
Avi Kivity 已提交
580 581 582
    if (!vhost_section(section)) {
        return;
    }
583
    vhost_region_add_section(dev, section);
A
Avi Kivity 已提交
584 585
}

586 587 588 589 590 591
static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
    struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
    struct vhost_dev *hdev = iommu->hdev;
    hwaddr iova = iotlb->iova + iommu->iommu_offset;

M
Maxime Coquelin 已提交
592 593
    if (vhost_backend_invalidate_device_iotlb(hdev, iova,
                                              iotlb->addr_mask + 1)) {
594 595 596 597 598 599 600 601 602 603
        error_report("Fail to invalidate device iotlb");
    }
}

static void vhost_iommu_region_add(MemoryListener *listener,
                                   MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         iommu_listener);
    struct vhost_iommu *iommu;
604
    Int128 end;
605 606 607 608 609 610

    if (!memory_region_is_iommu(section->mr)) {
        return;
    }

    iommu = g_malloc0(sizeof(*iommu));
611 612 613 614 615 616 617
    end = int128_add(int128_make64(section->offset_within_region),
                     section->size);
    end = int128_sub(end, int128_one());
    iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
                        IOMMU_NOTIFIER_UNMAP,
                        section->offset_within_region,
                        int128_get64(end));
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
    iommu->mr = section->mr;
    iommu->iommu_offset = section->offset_within_address_space -
                          section->offset_within_region;
    iommu->hdev = dev;
    memory_region_register_iommu_notifier(section->mr, &iommu->n);
    QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
    /* TODO: can replay help performance here? */
}

static void vhost_iommu_region_del(MemoryListener *listener,
                                   MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         iommu_listener);
    struct vhost_iommu *iommu;

    if (!memory_region_is_iommu(section->mr)) {
        return;
    }

    QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
639 640
        if (iommu->mr == section->mr &&
            iommu->n.start == section->offset_within_region) {
641 642 643 644 645 646 647 648 649
            memory_region_unregister_iommu_notifier(iommu->mr,
                                                    &iommu->n);
            QLIST_REMOVE(iommu, iommu_next);
            g_free(iommu);
            break;
        }
    }
}

M
Michael S. Tsirkin 已提交
650 651 652 653 654 655
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx, bool enable_log)
{
    struct vhost_vring_addr addr = {
        .index = idx,
656 657 658
        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
        .used_user_addr = (uint64_t)(unsigned long)vq->used,
M
Michael S. Tsirkin 已提交
659 660 661
        .log_guest_addr = vq->used_phys,
        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
    };
662
    int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
M
Michael S. Tsirkin 已提交
663
    if (r < 0) {
664
        VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
M
Michael S. Tsirkin 已提交
665 666 667 668 669
        return -errno;
    }
    return 0;
}

J
Jason Wang 已提交
670 671
static int vhost_dev_set_features(struct vhost_dev *dev,
                                  bool enable_log)
M
Michael S. Tsirkin 已提交
672 673 674 675
{
    uint64_t features = dev->acked_features;
    int r;
    if (enable_log) {
C
Cornelia Huck 已提交
676
        features |= 0x1ULL << VHOST_F_LOG_ALL;
M
Michael S. Tsirkin 已提交
677
    }
678
    r = dev->vhost_ops->vhost_set_features(dev, features);
679 680 681
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_features failed");
    }
M
Michael S. Tsirkin 已提交
682 683 684 685 686
    return r < 0 ? -errno : 0;
}

static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
{
687
    int r, i, idx;
M
Michael S. Tsirkin 已提交
688 689 690 691 692
    r = vhost_dev_set_features(dev, enable_log);
    if (r < 0) {
        goto err_features;
    }
    for (i = 0; i < dev->nvqs; ++i) {
693 694
        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
M
Michael S. Tsirkin 已提交
695 696 697 698 699 700 701 702
                                     enable_log);
        if (r < 0) {
            goto err_vq;
        }
    }
    return 0;
err_vq:
    for (; i >= 0; --i) {
703
        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
704 705
        vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
                                 dev->log_enabled);
M
Michael S. Tsirkin 已提交
706
    }
707
    vhost_dev_set_features(dev, dev->log_enabled);
M
Michael S. Tsirkin 已提交
708 709 710 711
err_features:
    return r;
}

A
Avi Kivity 已提交
712
static int vhost_migration_log(MemoryListener *listener, int enable)
M
Michael S. Tsirkin 已提交
713
{
A
Avi Kivity 已提交
714 715
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
M
Michael S. Tsirkin 已提交
716 717 718 719 720 721 722 723 724 725 726 727 728
    int r;
    if (!!enable == dev->log_enabled) {
        return 0;
    }
    if (!dev->started) {
        dev->log_enabled = enable;
        return 0;
    }
    if (!enable) {
        r = vhost_dev_set_log(dev, false);
        if (r < 0) {
            return r;
        }
J
Jason Wang 已提交
729
        vhost_log_put(dev, false);
M
Michael S. Tsirkin 已提交
730 731 732 733 734 735 736 737 738 739 740
    } else {
        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
        r = vhost_dev_set_log(dev, true);
        if (r < 0) {
            return r;
        }
    }
    dev->log_enabled = enable;
    return 0;
}

A
Avi Kivity 已提交
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
static void vhost_log_global_start(MemoryListener *listener)
{
    int r;

    r = vhost_migration_log(listener, true);
    if (r < 0) {
        abort();
    }
}

static void vhost_log_global_stop(MemoryListener *listener)
{
    int r;

    r = vhost_migration_log(listener, false);
    if (r < 0) {
        abort();
    }
}

static void vhost_log_start(MemoryListener *listener,
762 763
                            MemoryRegionSection *section,
                            int old, int new)
A
Avi Kivity 已提交
764 765 766 767 768
{
    /* FIXME: implement */
}

static void vhost_log_stop(MemoryListener *listener,
769 770
                           MemoryRegionSection *section,
                           int old, int new)
A
Avi Kivity 已提交
771 772 773 774
{
    /* FIXME: implement */
}

775 776 777 778 779
/* The vhost driver natively knows how to handle the vrings of non
 * cross-endian legacy devices and modern devices. Only legacy devices
 * exposed to a bi-endian guest may require the vhost driver to use a
 * specific endianness.
 */
780 781
static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
{
782 783 784
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
        return false;
    }
785
#ifdef HOST_WORDS_BIGENDIAN
786
    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
787
#else
788
    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
789 790 791
#endif
}

792 793 794 795 796 797 798 799 800
static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
                                                   bool is_big_endian,
                                                   int vhost_vq_index)
{
    struct vhost_vring_state s = {
        .index = vhost_vq_index,
        .num = is_big_endian
    };

801
    if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
802 803 804
        return 0;
    }

805
    VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
806 807 808 809 810 811 812 813
    if (errno == ENOTTY) {
        error_report("vhost does not support cross-endian");
        return -ENOSYS;
    }

    return -errno;
}

J
Jason Wang 已提交
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
static int vhost_memory_region_lookup(struct vhost_dev *hdev,
                                      uint64_t gpa, uint64_t *uaddr,
                                      uint64_t *len)
{
    int i;

    for (i = 0; i < hdev->mem->nregions; i++) {
        struct vhost_memory_region *reg = hdev->mem->regions + i;

        if (gpa >= reg->guest_phys_addr &&
            reg->guest_phys_addr + reg->memory_size > gpa) {
            *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
            *len = reg->guest_phys_addr + reg->memory_size - gpa;
            return 0;
        }
    }

    return -EFAULT;
}

834
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
J
Jason Wang 已提交
835 836 837
{
    IOMMUTLBEntry iotlb;
    uint64_t uaddr, len;
838
    int ret = -EFAULT;
J
Jason Wang 已提交
839 840 841 842 843 844

    rcu_read_lock();

    iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
                                          iova, write);
    if (iotlb.target_as != NULL) {
845 846 847
        ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
                                         &uaddr, &len);
        if (ret) {
J
Jason Wang 已提交
848 849 850 851 852 853 854 855
            error_report("Fail to lookup the translated address "
                         "%"PRIx64, iotlb.translated_addr);
            goto out;
        }

        len = MIN(iotlb.addr_mask + 1, len);
        iova = iova & ~iotlb.addr_mask;

M
Maxime Coquelin 已提交
856 857
        ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
                                                len, iotlb.perm);
858
        if (ret) {
J
Jason Wang 已提交
859 860 861 862 863 864
            error_report("Fail to update device iotlb");
            goto out;
        }
    }
out:
    rcu_read_unlock();
865 866

    return ret;
J
Jason Wang 已提交
867 868
}

869
static int vhost_virtqueue_start(struct vhost_dev *dev,
M
Michael S. Tsirkin 已提交
870 871 872 873
                                struct VirtIODevice *vdev,
                                struct vhost_virtqueue *vq,
                                unsigned idx)
{
874 875 876
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
A
Avi Kivity 已提交
877
    hwaddr s, l, a;
M
Michael S. Tsirkin 已提交
878
    int r;
879
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
M
Michael S. Tsirkin 已提交
880
    struct vhost_vring_file file = {
J
Jason Wang 已提交
881
        .index = vhost_vq_index
M
Michael S. Tsirkin 已提交
882 883
    };
    struct vhost_vring_state state = {
J
Jason Wang 已提交
884
        .index = vhost_vq_index
M
Michael S. Tsirkin 已提交
885 886 887
    };
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);

888 889 890 891 892
    a = virtio_queue_get_desc_addr(vdev, idx);
    if (a == 0) {
        /* Queue might not be ready for start */
        return 0;
    }
J
Jason Wang 已提交
893

M
Michael S. Tsirkin 已提交
894
    vq->num = state.num = virtio_queue_get_num(vdev, idx);
895
    r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
M
Michael S. Tsirkin 已提交
896
    if (r) {
897
        VHOST_OPS_DEBUG("vhost_set_vring_num failed");
M
Michael S. Tsirkin 已提交
898 899 900 901
        return -errno;
    }

    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
902
    r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
M
Michael S. Tsirkin 已提交
903
    if (r) {
904
        VHOST_OPS_DEBUG("vhost_set_vring_base failed");
M
Michael S. Tsirkin 已提交
905 906 907
        return -errno;
    }

908
    if (vhost_needs_vring_endian(vdev)) {
909 910 911 912 913 914 915 916
        r = vhost_virtqueue_set_vring_endian_legacy(dev,
                                                    virtio_is_big_endian(vdev),
                                                    vhost_vq_index);
        if (r) {
            return -errno;
        }
    }

917
    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
918
    vq->desc_phys = a;
J
Jason Wang 已提交
919
    vq->desc = vhost_memory_map(dev, a, &l, 0);
M
Michael S. Tsirkin 已提交
920 921 922 923
    if (!vq->desc || l != s) {
        r = -ENOMEM;
        goto fail_alloc_desc;
    }
924 925
    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
    vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
J
Jason Wang 已提交
926
    vq->avail = vhost_memory_map(dev, a, &l, 0);
M
Michael S. Tsirkin 已提交
927 928 929 930 931 932
    if (!vq->avail || l != s) {
        r = -ENOMEM;
        goto fail_alloc_avail;
    }
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
J
Jason Wang 已提交
933
    vq->used = vhost_memory_map(dev, a, &l, 1);
M
Michael S. Tsirkin 已提交
934 935 936 937 938
    if (!vq->used || l != s) {
        r = -ENOMEM;
        goto fail_alloc_used;
    }

J
Jason Wang 已提交
939
    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
M
Michael S. Tsirkin 已提交
940 941 942 943
    if (r < 0) {
        r = -errno;
        goto fail_alloc;
    }
J
Jason Wang 已提交
944

M
Michael S. Tsirkin 已提交
945
    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
946
    r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
M
Michael S. Tsirkin 已提交
947
    if (r) {
948
        VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
M
Michael S. Tsirkin 已提交
949
        r = -errno;
M
Michael S. Tsirkin 已提交
950 951 952
        goto fail_kick;
    }

953 954
    /* Clear and discard previous events if any. */
    event_notifier_test_and_clear(&vq->masked_notifier);
M
Michael S. Tsirkin 已提交
955

956 957 958 959 960 961 962 963
    /* Init vring in unmasked state, unless guest_notifier_mask
     * will do it later.
     */
    if (!vdev->use_guest_notifier_mask) {
        /* TODO: check and handle errors. */
        vhost_virtqueue_mask(dev, vdev, idx, false);
    }

964 965 966 967 968 969 970 971 972 973
    if (k->query_guest_notifiers &&
        k->query_guest_notifiers(qbus->parent) &&
        virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
        file.fd = -1;
        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
        if (r) {
            goto fail_vector;
        }
    }

M
Michael S. Tsirkin 已提交
974 975
    return 0;

976
fail_vector:
M
Michael S. Tsirkin 已提交
977 978
fail_kick:
fail_alloc:
J
Jason Wang 已提交
979 980
    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
981
fail_alloc_used:
J
Jason Wang 已提交
982 983
    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
984
fail_alloc_avail:
J
Jason Wang 已提交
985 986
    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
987 988 989 990
fail_alloc_desc:
    return r;
}

991
static void vhost_virtqueue_stop(struct vhost_dev *dev,
M
Michael S. Tsirkin 已提交
992 993 994 995
                                    struct VirtIODevice *vdev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx)
{
996
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
M
Michael S. Tsirkin 已提交
997
    struct vhost_vring_state state = {
998
        .index = vhost_vq_index,
M
Michael S. Tsirkin 已提交
999 1000
    };
    int r;
1001 1002 1003 1004 1005 1006 1007
    int a;

    a = virtio_queue_get_desc_addr(vdev, idx);
    if (a == 0) {
        /* Don't stop the virtqueue which might have not been started */
        return;
    }
1008

1009
    r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
M
Michael S. Tsirkin 已提交
1010
    if (r < 0) {
1011
        VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1012 1013 1014 1015
        /* Connection to the backend is broken, so let's sync internal
         * last avail idx to the device used idx.
         */
        virtio_queue_restore_last_avail_idx(vdev, idx);
1016 1017
    } else {
        virtio_queue_set_last_avail_idx(vdev, idx, state.num);
M
Michael S. Tsirkin 已提交
1018
    }
1019
    virtio_queue_invalidate_signalled_used(vdev, idx);
1020
    virtio_queue_update_used_idx(vdev, idx);
1021 1022 1023 1024

    /* In the cross-endian case, we need to reset the vring endianness to
     * native as legacy devices expect so by default.
     */
1025
    if (vhost_needs_vring_endian(vdev)) {
1026 1027 1028
        vhost_virtqueue_set_vring_endian_legacy(dev,
                                                !virtio_is_big_endian(vdev),
                                                vhost_vq_index);
1029 1030
    }

J
Jason Wang 已提交
1031 1032 1033 1034 1035 1036
    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
                       1, virtio_queue_get_used_size(vdev, idx));
    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
                       0, virtio_queue_get_avail_size(vdev, idx));
    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
                       0, virtio_queue_get_desc_size(vdev, idx));
M
Michael S. Tsirkin 已提交
1037 1038
}

1039 1040
static void vhost_eventfd_add(MemoryListener *listener,
                              MemoryRegionSection *section,
1041
                              bool match_data, uint64_t data, EventNotifier *e)
1042 1043 1044 1045 1046
{
}

static void vhost_eventfd_del(MemoryListener *listener,
                              MemoryRegionSection *section,
1047
                              bool match_data, uint64_t data, EventNotifier *e)
1048 1049 1050
{
}

J
Jason Wang 已提交
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
                                                int n, uint32_t timeout)
{
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
    struct vhost_vring_state state = {
        .index = vhost_vq_index,
        .num = timeout,
    };
    int r;

    if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
        return -EINVAL;
    }

    r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
    if (r) {
1067
        VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
J
Jason Wang 已提交
1068 1069 1070 1071 1072 1073
        return r;
    }

    return 0;
}

1074 1075 1076
static int vhost_virtqueue_init(struct vhost_dev *dev,
                                struct vhost_virtqueue *vq, int n)
{
1077
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1078
    struct vhost_vring_file file = {
1079
        .index = vhost_vq_index,
1080 1081 1082 1083 1084 1085 1086
    };
    int r = event_notifier_init(&vq->masked_notifier, 0);
    if (r < 0) {
        return r;
    }

    file.fd = event_notifier_get_fd(&vq->masked_notifier);
1087
    r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1088
    if (r) {
1089
        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1090 1091 1092
        r = -errno;
        goto fail_call;
    }
J
Jason Wang 已提交
1093 1094 1095

    vq->dev = dev;

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
    return 0;
fail_call:
    event_notifier_cleanup(&vq->masked_notifier);
    return r;
}

static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
{
    event_notifier_cleanup(&vq->masked_notifier);
}

1107
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
J
Jason Wang 已提交
1108
                   VhostBackendType backend_type, uint32_t busyloop_timeout)
M
Michael S. Tsirkin 已提交
1109 1110
{
    uint64_t features;
1111
    int i, r, n_initialized_vqs = 0;
1112
    Error *local_err = NULL;
1113

J
Jason Wang 已提交
1114
    hdev->vdev = NULL;
1115 1116
    hdev->migration_blocker = NULL;

1117 1118
    r = vhost_set_backend_type(hdev, backend_type);
    assert(r >= 0);
1119

1120 1121 1122
    r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
    if (r < 0) {
        goto fail;
1123 1124
    }

1125
    r = hdev->vhost_ops->vhost_set_owner(hdev);
M
Michael S. Tsirkin 已提交
1126
    if (r < 0) {
1127
        VHOST_OPS_DEBUG("vhost_set_owner failed");
M
Michael S. Tsirkin 已提交
1128 1129 1130
        goto fail;
    }

1131
    r = hdev->vhost_ops->vhost_get_features(hdev, &features);
M
Michael S. Tsirkin 已提交
1132
    if (r < 0) {
1133
        VHOST_OPS_DEBUG("vhost_get_features failed");
M
Michael S. Tsirkin 已提交
1134 1135
        goto fail;
    }
1136

1137
    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1138
        r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1139
        if (r < 0) {
1140
            goto fail;
1141 1142
        }
    }
J
Jason Wang 已提交
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153

    if (busyloop_timeout) {
        for (i = 0; i < hdev->nvqs; ++i) {
            r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
                                                     busyloop_timeout);
            if (r < 0) {
                goto fail_busyloop;
            }
        }
    }

M
Michael S. Tsirkin 已提交
1154 1155
    hdev->features = features;

A
Avi Kivity 已提交
1156
    hdev->memory_listener = (MemoryListener) {
1157 1158
        .begin = vhost_begin,
        .commit = vhost_commit,
1159 1160
        .region_add = vhost_region_addnop,
        .region_nop = vhost_region_addnop,
A
Avi Kivity 已提交
1161 1162 1163 1164 1165
        .log_start = vhost_log_start,
        .log_stop = vhost_log_stop,
        .log_sync = vhost_log_sync,
        .log_global_start = vhost_log_global_start,
        .log_global_stop = vhost_log_global_stop,
1166 1167
        .eventfd_add = vhost_eventfd_add,
        .eventfd_del = vhost_eventfd_del,
1168
        .priority = 10
A
Avi Kivity 已提交
1169
    };
1170

1171 1172 1173 1174
    hdev->iommu_listener = (MemoryListener) {
        .region_add = vhost_iommu_region_add,
        .region_del = vhost_iommu_region_del,
    };
J
Jason Wang 已提交
1175

1176 1177 1178 1179
    if (hdev->migration_blocker == NULL) {
        if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
            error_setg(&hdev->migration_blocker,
                       "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1180
        } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1181 1182
            error_setg(&hdev->migration_blocker,
                       "Migration disabled: failed to allocate shared memory");
1183 1184 1185 1186
        }
    }

    if (hdev->migration_blocker != NULL) {
1187 1188 1189 1190 1191 1192
        r = migrate_add_blocker(hdev->migration_blocker, &local_err);
        if (local_err) {
            error_report_err(local_err);
            error_free(hdev->migration_blocker);
            goto fail_busyloop;
        }
1193
    }
1194

1195
    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1196 1197
    hdev->n_mem_sections = 0;
    hdev->mem_sections = NULL;
M
Michael S. Tsirkin 已提交
1198 1199 1200 1201
    hdev->log = NULL;
    hdev->log_size = 0;
    hdev->log_enabled = false;
    hdev->started = false;
1202
    memory_listener_register(&hdev->memory_listener, &address_space_memory);
1203
    QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
J
Jay Zhou 已提交
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215

    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
        error_report("vhost backend memory slots limit is less"
                " than current number of present memory slots");
        r = -1;
        if (busyloop_timeout) {
            goto fail_busyloop;
        } else {
            goto fail;
        }
    }

M
Michael S. Tsirkin 已提交
1216
    return 0;
1217

J
Jason Wang 已提交
1218 1219 1220 1221
fail_busyloop:
    while (--i >= 0) {
        vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
    }
M
Michael S. Tsirkin 已提交
1222
fail:
1223 1224
    hdev->nvqs = n_initialized_vqs;
    vhost_dev_cleanup(hdev);
M
Michael S. Tsirkin 已提交
1225 1226 1227 1228 1229
    return r;
}

void vhost_dev_cleanup(struct vhost_dev *hdev)
{
1230
    int i;
1231

1232 1233 1234
    for (i = 0; i < hdev->nvqs; ++i) {
        vhost_virtqueue_cleanup(hdev->vqs + i);
    }
1235 1236 1237 1238 1239
    if (hdev->mem) {
        /* those are only safe after successful init */
        memory_listener_unregister(&hdev->memory_listener);
        QLIST_REMOVE(hdev, entry);
    }
1240 1241 1242 1243
    if (hdev->migration_blocker) {
        migrate_del_blocker(hdev->migration_blocker);
        error_free(hdev->migration_blocker);
    }
1244
    g_free(hdev->mem);
1245
    g_free(hdev->mem_sections);
1246 1247 1248
    if (hdev->vhost_ops) {
        hdev->vhost_ops->vhost_backend_cleanup(hdev);
    }
1249
    assert(!hdev->log);
1250 1251

    memset(hdev, 0, sizeof(struct vhost_dev));
M
Michael S. Tsirkin 已提交
1252 1253
}

1254 1255 1256 1257 1258
/* Stop processing guest IO notifications in qemu.
 * Start processing them in vhost in kernel.
 */
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
K
KONRAD Frederic 已提交
1259
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1260
    int i, r, e;
1261

1262 1263 1264 1265 1266
    /* We will pass the notifiers to the kernel, make sure that QEMU
     * doesn't interfere.
     */
    r = virtio_device_grab_ioeventfd(vdev);
    if (r < 0) {
1267
        error_report("binding does not support host notifiers");
1268 1269 1270 1271
        goto fail;
    }

    for (i = 0; i < hdev->nvqs; ++i) {
1272 1273
        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         true);
1274
        if (r < 0) {
1275
            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1276 1277 1278 1279 1280 1281 1282
            goto fail_vq;
        }
    }

    return 0;
fail_vq:
    while (--i >= 0) {
1283 1284
        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         false);
1285
        if (e < 0) {
1286
            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1287
        }
1288
        assert (e >= 0);
1289
        virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1290
    }
1291
    virtio_device_release_ioeventfd(vdev);
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
fail:
    return r;
}

/* Stop processing guest IO notifications in vhost.
 * Start processing them in qemu.
 * This might actually run the qemu handlers right away,
 * so virtio in qemu must be completely setup when this is called.
 */
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
K
KONRAD Frederic 已提交
1303
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1304 1305 1306
    int i, r;

    for (i = 0; i < hdev->nvqs; ++i) {
1307 1308
        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         false);
1309
        if (r < 0) {
1310
            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1311 1312
        }
        assert (r >= 0);
1313
        virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1314
    }
1315
    virtio_device_release_ioeventfd(vdev);
1316 1317
}

1318 1319 1320 1321 1322
/* Test and clear event pending status.
 * Should be called after unmask to avoid losing events.
 */
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
{
J
Jason Wang 已提交
1323 1324
    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1325 1326 1327 1328 1329 1330 1331 1332
    return event_notifier_test_and_clear(&vq->masked_notifier);
}

/* Mask/unmask events from this vq. */
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                         bool mask)
{
    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
J
Jason Wang 已提交
1333
    int r, index = n - hdev->vq_index;
1334
    struct vhost_vring_file file;
1335

1336 1337 1338
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

1339
    if (mask) {
1340
        assert(vdev->use_guest_notifier_mask);
J
Jason Wang 已提交
1341
        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1342 1343 1344
    } else {
        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
    }
1345

1346 1347
    file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
    r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1348 1349 1350
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
    }
1351 1352
}

C
Cornelia Huck 已提交
1353 1354
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                            uint64_t features)
1355 1356 1357
{
    const int *bit = feature_bits;
    while (*bit != VHOST_INVALID_FEATURE_BIT) {
C
Cornelia Huck 已提交
1358
        uint64_t bit_mask = (1ULL << *bit);
1359 1360 1361 1362 1363 1364 1365 1366 1367
        if (!(hdev->features & bit_mask)) {
            features &= ~bit_mask;
        }
        bit++;
    }
    return features;
}

void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
C
Cornelia Huck 已提交
1368
                        uint64_t features)
1369 1370 1371
{
    const int *bit = feature_bits;
    while (*bit != VHOST_INVALID_FEATURE_BIT) {
C
Cornelia Huck 已提交
1372
        uint64_t bit_mask = (1ULL << *bit);
1373 1374 1375 1376 1377 1378 1379
        if (features & bit_mask) {
            hdev->acked_features |= bit_mask;
        }
        bit++;
    }
}

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
                         uint32_t config_len)
{
    assert(hdev->vhost_ops);

    if (hdev->vhost_ops->vhost_get_config) {
        return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
    }

    return -1;
}

int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
                         uint32_t offset, uint32_t size, uint32_t flags)
{
    assert(hdev->vhost_ops);

    if (hdev->vhost_ops->vhost_set_config) {
        return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
                                                 size, flags);
    }

    return -1;
}

void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
                                   const VhostDevConfigOps *ops)
{
    assert(hdev->vhost_ops);
    hdev->config_ops = ops;
}

1412
/* Host notifiers must be enabled at this point. */
M
Michael S. Tsirkin 已提交
1413 1414 1415
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{
    int i, r;
1416

1417 1418 1419
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

1420
    hdev->started = true;
J
Jason Wang 已提交
1421
    hdev->vdev = vdev;
1422

M
Michael S. Tsirkin 已提交
1423 1424
    r = vhost_dev_set_features(hdev, hdev->log_enabled);
    if (r < 0) {
1425
        goto fail_features;
M
Michael S. Tsirkin 已提交
1426
    }
J
Jason Wang 已提交
1427 1428

    if (vhost_dev_has_iommu(hdev)) {
1429
        memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
J
Jason Wang 已提交
1430 1431
    }

1432
    r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
M
Michael S. Tsirkin 已提交
1433
    if (r < 0) {
1434
        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
M
Michael S. Tsirkin 已提交
1435
        r = -errno;
1436
        goto fail_mem;
M
Michael S. Tsirkin 已提交
1437
    }
1438
    for (i = 0; i < hdev->nvqs; ++i) {
1439
        r = vhost_virtqueue_start(hdev,
J
Jason Wang 已提交
1440 1441 1442
                                  vdev,
                                  hdev->vqs + i,
                                  hdev->vq_index + i);
1443 1444 1445 1446 1447
        if (r < 0) {
            goto fail_vq;
        }
    }

M
Michael S. Tsirkin 已提交
1448
    if (hdev->log_enabled) {
M
Michael S. Tsirkin 已提交
1449 1450
        uint64_t log_base;

M
Michael S. Tsirkin 已提交
1451
        hdev->log_size = vhost_get_log_size(hdev);
M
Marc-André Lureau 已提交
1452 1453
        hdev->log = vhost_log_get(hdev->log_size,
                                  vhost_dev_log_is_shared(hdev));
J
Jason Wang 已提交
1454
        log_base = (uintptr_t)hdev->log->log;
1455
        r = hdev->vhost_ops->vhost_set_log_base(hdev,
1456 1457
                                                hdev->log_size ? log_base : 0,
                                                hdev->log);
M
Michael S. Tsirkin 已提交
1458
        if (r < 0) {
1459
            VHOST_OPS_DEBUG("vhost_set_log_base failed");
M
Michael S. Tsirkin 已提交
1460
            r = -errno;
1461
            goto fail_log;
M
Michael S. Tsirkin 已提交
1462 1463
        }
    }
1464

J
Jason Wang 已提交
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
    if (vhost_dev_has_iommu(hdev)) {
        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);

        /* Update used ring information for IOTLB to work correctly,
         * vhost-kernel code requires for this.*/
        for (i = 0; i < hdev->nvqs; ++i) {
            struct vhost_virtqueue *vq = hdev->vqs + i;
            vhost_device_iotlb_miss(hdev, vq->used_phys, true);
        }
    }
M
Michael S. Tsirkin 已提交
1475
    return 0;
1476
fail_log:
1477
    vhost_log_put(hdev, false);
M
Michael S. Tsirkin 已提交
1478 1479
fail_vq:
    while (--i >= 0) {
1480
        vhost_virtqueue_stop(hdev,
J
Jason Wang 已提交
1481 1482 1483
                             vdev,
                             hdev->vqs + i,
                             hdev->vq_index + i);
M
Michael S. Tsirkin 已提交
1484
    }
J
Jason Wang 已提交
1485
    i = hdev->nvqs;
J
Jason Wang 已提交
1486

1487 1488
fail_mem:
fail_features:
1489 1490

    hdev->started = false;
M
Michael S. Tsirkin 已提交
1491 1492 1493
    return r;
}

1494
/* Host notifiers must be enabled at this point. */
M
Michael S. Tsirkin 已提交
1495 1496
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{
J
Jason Wang 已提交
1497
    int i;
1498

1499 1500 1501
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

M
Michael S. Tsirkin 已提交
1502
    for (i = 0; i < hdev->nvqs; ++i) {
1503
        vhost_virtqueue_stop(hdev,
J
Jason Wang 已提交
1504 1505 1506
                             vdev,
                             hdev->vqs + i,
                             hdev->vq_index + i);
M
Michael S. Tsirkin 已提交
1507
    }
1508

J
Jason Wang 已提交
1509 1510
    if (vhost_dev_has_iommu(hdev)) {
        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1511
        memory_listener_unregister(&hdev->iommu_listener);
J
Jason Wang 已提交
1512
    }
J
Jason Wang 已提交
1513
    vhost_log_put(hdev, true);
M
Michael S. Tsirkin 已提交
1514
    hdev->started = false;
J
Jason Wang 已提交
1515
    hdev->vdev = NULL;
M
Michael S. Tsirkin 已提交
1516
}
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526

int vhost_net_set_backend(struct vhost_dev *hdev,
                          struct vhost_vring_file *file)
{
    if (hdev->vhost_ops->vhost_net_set_backend) {
        return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
    }

    return -1;
}