vhost.c 43.1 KB
Newer Older
M
Michael S. Tsirkin 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * vhost support
 *
 * Copyright Red Hat, Inc. 2010
 *
 * Authors:
 *  Michael S. Tsirkin <mst@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
11 12 13
 *
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
M
Michael S. Tsirkin 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qapi/error.h"
P
Paolo Bonzini 已提交
18
#include "hw/virtio/vhost.h"
M
Michael S. Tsirkin 已提交
19
#include "hw/hw.h"
20
#include "qemu/atomic.h"
21
#include "qemu/range.h"
22
#include "qemu/error-report.h"
M
Marc-André Lureau 已提交
23
#include "qemu/memfd.h"
24
#include <linux/vhost.h>
25
#include "exec/address-spaces.h"
K
KONRAD Frederic 已提交
26
#include "hw/virtio/virtio-bus.h"
27
#include "hw/virtio/virtio-access.h"
28
#include "migration/migration.h"
M
Michael S. Tsirkin 已提交
29

30 31 32 33 34 35 36 37 38 39 40 41
/* enabled until disconnected backend stabilizes */
#define _VHOST_DEBUG 1

#ifdef _VHOST_DEBUG
#define VHOST_OPS_DEBUG(fmt, ...) \
    do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
                      strerror(errno), errno); } while (0)
#else
#define VHOST_OPS_DEBUG(fmt, ...) \
    do { } while (0)
#endif

J
Jason Wang 已提交
42
static struct vhost_log *vhost_log;
M
Marc-André Lureau 已提交
43
static struct vhost_log *vhost_log_shm;
J
Jason Wang 已提交
44

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
static unsigned int used_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
    QLIST_HEAD_INITIALIZER(vhost_devices);

bool vhost_has_free_slot(void)
{
    unsigned int slots_limit = ~0U;
    struct vhost_dev *hdev;

    QLIST_FOREACH(hdev, &vhost_devices, entry) {
        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
        slots_limit = MIN(slots_limit, r);
    }
    return slots_limit > used_memslots;
}

M
Michael S. Tsirkin 已提交
61
static void vhost_dev_sync_region(struct vhost_dev *dev,
62
                                  MemoryRegionSection *section,
M
Michael S. Tsirkin 已提交
63 64 65
                                  uint64_t mfirst, uint64_t mlast,
                                  uint64_t rfirst, uint64_t rlast)
{
J
Jason Wang 已提交
66 67
    vhost_log_chunk_t *log = dev->log->log;

M
Michael S. Tsirkin 已提交
68 69
    uint64_t start = MAX(mfirst, rfirst);
    uint64_t end = MIN(mlast, rlast);
J
Jason Wang 已提交
70 71
    vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
    vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
M
Michael S. Tsirkin 已提交
72 73 74 75 76
    uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;

    if (end < start) {
        return;
    }
77
    assert(end / VHOST_LOG_CHUNK < dev->log_size);
78
    assert(start / VHOST_LOG_CHUNK < dev->log_size);
79

M
Michael S. Tsirkin 已提交
80 81 82 83 84
    for (;from < to; ++from) {
        vhost_log_chunk_t log;
        /* We first check with non-atomic: much cheaper,
         * and we expect non-dirty to be the common case. */
        if (!*from) {
85
            addr += VHOST_LOG_CHUNK;
M
Michael S. Tsirkin 已提交
86 87
            continue;
        }
88 89 90
        /* Data must be read atomically. We don't really need barrier semantics
         * but it's easier to use atomic_* than roll our own. */
        log = atomic_xchg(from, 0);
N
Natanael Copa 已提交
91 92
        while (log) {
            int bit = ctzl(log);
M
Michael S. Tsirkin 已提交
93 94 95 96 97 98 99
            hwaddr page_addr;
            hwaddr section_offset;
            hwaddr mr_offset;
            page_addr = addr + bit * VHOST_LOG_PAGE;
            section_offset = page_addr - section->offset_within_address_space;
            mr_offset = section_offset + section->offset_within_region;
            memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
M
Michael S. Tsirkin 已提交
100 101 102 103 104 105
            log &= ~(0x1ull << bit);
        }
        addr += VHOST_LOG_CHUNK;
    }
}

A
Avi Kivity 已提交
106
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
107
                                   MemoryRegionSection *section,
M
Michael S. Tsirkin 已提交
108 109
                                   hwaddr first,
                                   hwaddr last)
M
Michael S. Tsirkin 已提交
110 111
{
    int i;
M
Michael S. Tsirkin 已提交
112 113
    hwaddr start_addr;
    hwaddr end_addr;
A
Avi Kivity 已提交
114

M
Michael S. Tsirkin 已提交
115 116 117
    if (!dev->log_enabled || !dev->started) {
        return 0;
    }
M
Michael S. Tsirkin 已提交
118
    start_addr = section->offset_within_address_space;
119
    end_addr = range_get_last(start_addr, int128_get64(section->size));
M
Michael S. Tsirkin 已提交
120 121 122
    start_addr = MAX(first, start_addr);
    end_addr = MIN(last, end_addr);

M
Michael S. Tsirkin 已提交
123 124
    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
125
        vhost_dev_sync_region(dev, section, start_addr, end_addr,
M
Michael S. Tsirkin 已提交
126 127 128 129 130 131
                              reg->guest_phys_addr,
                              range_get_last(reg->guest_phys_addr,
                                             reg->memory_size));
    }
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
132
        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
M
Michael S. Tsirkin 已提交
133 134 135 136 137
                              range_get_last(vq->used_phys, vq->used_size));
    }
    return 0;
}

A
Avi Kivity 已提交
138 139 140 141 142
static void vhost_log_sync(MemoryListener *listener,
                          MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
M
Michael S. Tsirkin 已提交
143 144
    vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
}
A
Avi Kivity 已提交
145

M
Michael S. Tsirkin 已提交
146 147 148 149 150 151 152 153 154
static void vhost_log_sync_range(struct vhost_dev *dev,
                                 hwaddr first, hwaddr last)
{
    int i;
    /* FIXME: this is N^2 in number of sections */
    for (i = 0; i < dev->n_mem_sections; ++i) {
        MemoryRegionSection *section = &dev->mem_sections[i];
        vhost_sync_dirty_bitmap(dev, section, first, last);
    }
A
Avi Kivity 已提交
155 156
}

M
Michael S. Tsirkin 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
/* Assign/unassign. Keep an unsorted array of non-overlapping
 * memory regions in dev->mem. */
static void vhost_dev_unassign_memory(struct vhost_dev *dev,
                                      uint64_t start_addr,
                                      uint64_t size)
{
    int from, to, n = dev->mem->nregions;
    /* Track overlapping/split regions for sanity checking. */
    int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;

    for (from = 0, to = 0; from < n; ++from, ++to) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        uint64_t reglast;
        uint64_t memlast;
        uint64_t change;

        /* clone old region */
        if (to != from) {
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
        }

        /* No overlap is simple */
        if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
                            start_addr, size)) {
            continue;
        }

        /* Split only happens if supplied region
         * is in the middle of an existing one. Thus it can not
         * overlap with any other existing region. */
        assert(!split);

        reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
        memlast = range_get_last(start_addr, size);

        /* Remove whole region */
        if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
            --dev->mem->nregions;
            --to;
            ++overlap_middle;
            continue;
        }

        /* Shrink region */
        if (memlast >= reglast) {
            reg->memory_size = start_addr - reg->guest_phys_addr;
            assert(reg->memory_size);
            assert(!overlap_end);
            ++overlap_end;
            continue;
        }

        /* Shift region */
        if (start_addr <= reg->guest_phys_addr) {
            change = memlast + 1 - reg->guest_phys_addr;
            reg->memory_size -= change;
            reg->guest_phys_addr += change;
            reg->userspace_addr += change;
            assert(reg->memory_size);
            assert(!overlap_start);
            ++overlap_start;
            continue;
        }

        /* This only happens if supplied region
         * is in the middle of an existing one. Thus it can not
         * overlap with any other existing region. */
        assert(!overlap_start);
        assert(!overlap_end);
        assert(!overlap_middle);
        /* Split region: shrink first part, shift second part. */
        memcpy(dev->mem->regions + n, reg, sizeof *reg);
        reg->memory_size = start_addr - reg->guest_phys_addr;
        assert(reg->memory_size);
        change = memlast + 1 - reg->guest_phys_addr;
        reg = dev->mem->regions + n;
        reg->memory_size -= change;
        assert(reg->memory_size);
        reg->guest_phys_addr += change;
        reg->userspace_addr += change;
        /* Never add more than 1 region */
        assert(dev->mem->nregions == n);
        ++dev->mem->nregions;
        ++split;
    }
}

/* Called after unassign, so no regions overlap the given range. */
static void vhost_dev_assign_memory(struct vhost_dev *dev,
                                    uint64_t start_addr,
                                    uint64_t size,
                                    uint64_t uaddr)
{
    int from, to;
    struct vhost_memory_region *merged = NULL;
    for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        uint64_t prlast, urlast;
        uint64_t pmlast, umlast;
        uint64_t s, e, u;

        /* clone old region */
        if (to != from) {
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
        }
        prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
        pmlast = range_get_last(start_addr, size);
        urlast = range_get_last(reg->userspace_addr, reg->memory_size);
        umlast = range_get_last(uaddr, size);

        /* check for overlapping regions: should never happen. */
        assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
        /* Not an adjacent or overlapping region - do not merge. */
        if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
            (pmlast + 1 != reg->guest_phys_addr ||
             umlast + 1 != reg->userspace_addr)) {
            continue;
        }

276 277 278 279 280 281 282
        if (dev->vhost_ops->vhost_backend_can_merge &&
            !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
                                                     reg->userspace_addr,
                                                     reg->memory_size)) {
            continue;
        }

M
Michael S. Tsirkin 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
        if (merged) {
            --to;
            assert(to >= 0);
        } else {
            merged = reg;
        }
        u = MIN(uaddr, reg->userspace_addr);
        s = MIN(start_addr, reg->guest_phys_addr);
        e = MAX(pmlast, prlast);
        uaddr = merged->userspace_addr = u;
        start_addr = merged->guest_phys_addr = s;
        size = merged->memory_size = e - s + 1;
        assert(merged->memory_size);
    }

    if (!merged) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        memset(reg, 0, sizeof *reg);
        reg->memory_size = size;
        assert(reg->memory_size);
        reg->guest_phys_addr = start_addr;
        reg->userspace_addr = uaddr;
        ++to;
    }
    assert(to <= dev->mem->nregions + 1);
    dev->mem->nregions = to;
}

static uint64_t vhost_get_log_size(struct vhost_dev *dev)
{
    uint64_t log_size = 0;
    int i;
    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
        uint64_t last = range_get_last(reg->guest_phys_addr,
                                       reg->memory_size);
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
    }
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
        uint64_t last = vq->used_phys + vq->used_size - 1;
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
    }
    return log_size;
}
M
Marc-André Lureau 已提交
328 329

static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
J
Jason Wang 已提交
330
{
M
Marc-André Lureau 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343
    struct vhost_log *log;
    uint64_t logsize = size * sizeof(*(log->log));
    int fd = -1;

    log = g_new0(struct vhost_log, 1);
    if (share) {
        log->log = qemu_memfd_alloc("vhost-log", logsize,
                                    F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
                                    &fd);
        memset(log->log, 0, logsize);
    } else {
        log->log = g_malloc0(logsize);
    }
J
Jason Wang 已提交
344 345 346

    log->size = size;
    log->refcnt = 1;
M
Marc-André Lureau 已提交
347
    log->fd = fd;
J
Jason Wang 已提交
348 349 350 351

    return log;
}

M
Marc-André Lureau 已提交
352
static struct vhost_log *vhost_log_get(uint64_t size, bool share)
J
Jason Wang 已提交
353
{
M
Marc-André Lureau 已提交
354 355 356 357 358 359 360 361 362
    struct vhost_log *log = share ? vhost_log_shm : vhost_log;

    if (!log || log->size != size) {
        log = vhost_log_alloc(size, share);
        if (share) {
            vhost_log_shm = log;
        } else {
            vhost_log = log;
        }
J
Jason Wang 已提交
363
    } else {
M
Marc-André Lureau 已提交
364
        ++log->refcnt;
J
Jason Wang 已提交
365 366
    }

M
Marc-André Lureau 已提交
367
    return log;
J
Jason Wang 已提交
368 369 370 371 372 373 374 375 376
}

static void vhost_log_put(struct vhost_dev *dev, bool sync)
{
    struct vhost_log *log = dev->log;

    if (!log) {
        return;
    }
377 378
    dev->log = NULL;
    dev->log_size = 0;
J
Jason Wang 已提交
379 380 381 382 383 384 385

    --log->refcnt;
    if (log->refcnt == 0) {
        /* Sync only the range covered by the old log */
        if (dev->log_size && sync) {
            vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
        }
M
Marc-André Lureau 已提交
386

J
Jason Wang 已提交
387
        if (vhost_log == log) {
M
Marc-André Lureau 已提交
388
            g_free(log->log);
J
Jason Wang 已提交
389
            vhost_log = NULL;
M
Marc-André Lureau 已提交
390 391 392 393
        } else if (vhost_log_shm == log) {
            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
                            log->fd);
            vhost_log_shm = NULL;
J
Jason Wang 已提交
394
        }
M
Marc-André Lureau 已提交
395

J
Jason Wang 已提交
396 397 398
        g_free(log);
    }
}
M
Michael S. Tsirkin 已提交
399

M
Marc-André Lureau 已提交
400 401 402 403 404 405 406
static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
{
    return dev->vhost_ops->vhost_requires_shm_log &&
           dev->vhost_ops->vhost_requires_shm_log(dev);
}

static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
M
Michael S. Tsirkin 已提交
407
{
M
Marc-André Lureau 已提交
408
    struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
J
Jason Wang 已提交
409
    uint64_t log_base = (uintptr_t)log->log;
M
Michael S. Tsirkin 已提交
410
    int r;
411

M
Marc-André Lureau 已提交
412 413
    /* inform backend of log switching, this must be done before
       releasing the current log, to ensure no logging is lost */
414
    r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
415 416 417 418
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_log_base failed");
    }

J
Jason Wang 已提交
419
    vhost_log_put(dev, true);
M
Michael S. Tsirkin 已提交
420 421 422 423
    dev->log = log;
    dev->log_size = size;
}

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449

static int vhost_verify_ring_part_mapping(void *part,
                                          uint64_t part_addr,
                                          uint64_t part_size,
                                          uint64_t start_addr,
                                          uint64_t size)
{
    hwaddr l;
    void *p;
    int r = 0;

    if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
        return 0;
    }
    l = part_size;
    p = cpu_physical_memory_map(part_addr, &l, 1);
    if (!p || l != part_size) {
        r = -ENOMEM;
    }
    if (p != part) {
        r = -EBUSY;
    }
    cpu_physical_memory_unmap(p, l, 0, 0);
    return r;
}

M
Michael S. Tsirkin 已提交
450 451 452 453
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
                                      uint64_t start_addr,
                                      uint64_t size)
{
454
    int i, j;
455
    int r = 0;
456 457 458 459 460
    const char *part_name[] = {
        "descriptor table",
        "available ring",
        "used ring"
    };
461

462
    for (i = 0; i < dev->nvqs; ++i) {
M
Michael S. Tsirkin 已提交
463 464
        struct vhost_virtqueue *vq = dev->vqs + i;

465 466 467 468 469
        j = 0;
        r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
                                           vq->desc_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
470
        }
471 472 473 474 475 476

        j++;
        r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
                                           vq->avail_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
477
        }
478 479 480 481 482 483

        j++;
        r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
                                           vq->used_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
484
        }
485 486 487 488 489 490
    }

    if (r == -ENOMEM) {
        error_report("Unable to map %s for ring %d", part_name[j], i);
    } else if (r == -EBUSY) {
        error_report("%s relocated for ring %d", part_name[j], i);
M
Michael S. Tsirkin 已提交
491
    }
492
    return r;
M
Michael S. Tsirkin 已提交
493 494
}

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
						      uint64_t start_addr,
						      uint64_t size)
{
    int i, n = dev->mem->nregions;
    for (i = 0; i < n; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
        if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
                           start_addr, size)) {
            return reg;
        }
    }
    return NULL;
}

static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
                                 uint64_t start_addr,
                                 uint64_t size,
                                 uint64_t uaddr)
{
    struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
    uint64_t reglast;
    uint64_t memlast;

    if (!reg) {
        return true;
    }

    reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
    memlast = range_get_last(start_addr, size);

    /* Need to extend region? */
    if (start_addr < reg->guest_phys_addr || memlast > reglast) {
        return true;
    }
    /* userspace_addr changed? */
    return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
}

A
Avi Kivity 已提交
534 535 536
static void vhost_set_memory(MemoryListener *listener,
                             MemoryRegionSection *section,
                             bool add)
M
Michael S. Tsirkin 已提交
537
{
A
Avi Kivity 已提交
538 539
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
A
Avi Kivity 已提交
540
    hwaddr start_addr = section->offset_within_address_space;
541
    ram_addr_t size = int128_get64(section->size);
542 543
    bool log_dirty =
        memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
M
Michael S. Tsirkin 已提交
544 545
    int s = offsetof(struct vhost_memory, regions) +
        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
A
Avi Kivity 已提交
546 547
    void *ram;

548
    dev->mem = g_realloc(dev->mem, s);
M
Michael S. Tsirkin 已提交
549

550
    if (log_dirty) {
A
Avi Kivity 已提交
551
        add = false;
552 553
    }

M
Michael S. Tsirkin 已提交
554 555
    assert(size);

556
    /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
557
    ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
A
Avi Kivity 已提交
558 559
    if (add) {
        if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
560 561 562 563 564 565 566 567 568 569
            /* Region exists with same address. Nothing to do. */
            return;
        }
    } else {
        if (!vhost_dev_find_reg(dev, start_addr, size)) {
            /* Removing region that we don't access. Nothing to do. */
            return;
        }
    }

M
Michael S. Tsirkin 已提交
570
    vhost_dev_unassign_memory(dev, start_addr, size);
A
Avi Kivity 已提交
571
    if (add) {
M
Michael S. Tsirkin 已提交
572
        /* Add given mapping, merging adjacent regions if any */
A
Avi Kivity 已提交
573
        vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
M
Michael S. Tsirkin 已提交
574 575 576 577
    } else {
        /* Remove old mapping for this memory, if any. */
        vhost_dev_unassign_memory(dev, start_addr, size);
    }
578 579 580
    dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
    dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
    dev->memory_changed = true;
581
    used_memslots = dev->mem->nregions;
582 583 584 585 586 587 588 589 590 591 592 593 594 595
}

static bool vhost_section(MemoryRegionSection *section)
{
    return memory_region_is_ram(section->mr);
}

static void vhost_begin(MemoryListener *listener)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    dev->mem_changed_end_addr = 0;
    dev->mem_changed_start_addr = -1;
}
M
Michael S. Tsirkin 已提交
596

597 598 599 600 601 602 603 604 605 606 607 608
static void vhost_commit(MemoryListener *listener)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    hwaddr start_addr = 0;
    ram_addr_t size = 0;
    uint64_t log_size;
    int r;

    if (!dev->memory_changed) {
        return;
    }
M
Michael S. Tsirkin 已提交
609 610 611
    if (!dev->started) {
        return;
    }
612 613 614
    if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
        return;
    }
M
Michael S. Tsirkin 已提交
615 616

    if (dev->started) {
617 618 619
        start_addr = dev->mem_changed_start_addr;
        size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;

M
Michael S. Tsirkin 已提交
620 621 622 623 624
        r = vhost_verify_ring_mappings(dev, start_addr, size);
        assert(r >= 0);
    }

    if (!dev->log_enabled) {
625
        r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
626 627 628
        if (r < 0) {
            VHOST_OPS_DEBUG("vhost_set_mem_table failed");
        }
629
        dev->memory_changed = false;
M
Michael S. Tsirkin 已提交
630 631 632 633 634 635 636 637 638 639
        return;
    }
    log_size = vhost_get_log_size(dev);
    /* We allocate an extra 4K bytes to log,
     * to reduce the * number of reallocations. */
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
    /* To log more, must increase log size before table update. */
    if (dev->log_size < log_size) {
        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
    }
640
    r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
641 642 643
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
    }
M
Michael S. Tsirkin 已提交
644 645 646 647
    /* To log less, can only decrease log size after table update. */
    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
        vhost_dev_log_resize(dev, log_size);
    }
648
    dev->memory_changed = false;
649 650
}

A
Avi Kivity 已提交
651 652 653
static void vhost_region_add(MemoryListener *listener,
                             MemoryRegionSection *section)
{
654 655 656
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);

A
Avi Kivity 已提交
657 658 659 660
    if (!vhost_section(section)) {
        return;
    }

661 662 663 664
    ++dev->n_mem_sections;
    dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
                                dev->n_mem_sections);
    dev->mem_sections[dev->n_mem_sections - 1] = *section;
P
Paolo Bonzini 已提交
665
    memory_region_ref(section->mr);
A
Avi Kivity 已提交
666 667 668 669 670 671
    vhost_set_memory(listener, section, true);
}

static void vhost_region_del(MemoryListener *listener,
                             MemoryRegionSection *section)
{
672 673 674 675
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    int i;

A
Avi Kivity 已提交
676 677 678 679
    if (!vhost_section(section)) {
        return;
    }

A
Avi Kivity 已提交
680
    vhost_set_memory(listener, section, false);
P
Paolo Bonzini 已提交
681
    memory_region_unref(section->mr);
682 683 684 685 686
    for (i = 0; i < dev->n_mem_sections; ++i) {
        if (dev->mem_sections[i].offset_within_address_space
            == section->offset_within_address_space) {
            --dev->n_mem_sections;
            memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
687
                    (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
688 689 690
            break;
        }
    }
A
Avi Kivity 已提交
691 692
}

693 694 695 696 697
static void vhost_region_nop(MemoryListener *listener,
                             MemoryRegionSection *section)
{
}

M
Michael S. Tsirkin 已提交
698 699 700 701 702 703
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx, bool enable_log)
{
    struct vhost_vring_addr addr = {
        .index = idx,
704 705 706
        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
        .used_user_addr = (uint64_t)(unsigned long)vq->used,
M
Michael S. Tsirkin 已提交
707 708 709
        .log_guest_addr = vq->used_phys,
        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
    };
710
    int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
M
Michael S. Tsirkin 已提交
711
    if (r < 0) {
712
        VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
M
Michael S. Tsirkin 已提交
713 714 715 716 717 718 719 720 721 722
        return -errno;
    }
    return 0;
}

static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
{
    uint64_t features = dev->acked_features;
    int r;
    if (enable_log) {
C
Cornelia Huck 已提交
723
        features |= 0x1ULL << VHOST_F_LOG_ALL;
M
Michael S. Tsirkin 已提交
724
    }
725
    r = dev->vhost_ops->vhost_set_features(dev, features);
726 727 728
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_features failed");
    }
M
Michael S. Tsirkin 已提交
729 730 731 732 733
    return r < 0 ? -errno : 0;
}

static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
{
734
    int r, i, idx;
M
Michael S. Tsirkin 已提交
735 736 737 738 739
    r = vhost_dev_set_features(dev, enable_log);
    if (r < 0) {
        goto err_features;
    }
    for (i = 0; i < dev->nvqs; ++i) {
740 741
        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
M
Michael S. Tsirkin 已提交
742 743 744 745 746 747 748 749
                                     enable_log);
        if (r < 0) {
            goto err_vq;
        }
    }
    return 0;
err_vq:
    for (; i >= 0; --i) {
750
        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
751 752
        vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
                                 dev->log_enabled);
M
Michael S. Tsirkin 已提交
753
    }
754
    vhost_dev_set_features(dev, dev->log_enabled);
M
Michael S. Tsirkin 已提交
755 756 757 758
err_features:
    return r;
}

A
Avi Kivity 已提交
759
static int vhost_migration_log(MemoryListener *listener, int enable)
M
Michael S. Tsirkin 已提交
760
{
A
Avi Kivity 已提交
761 762
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
M
Michael S. Tsirkin 已提交
763 764 765 766 767 768 769 770 771 772 773 774 775
    int r;
    if (!!enable == dev->log_enabled) {
        return 0;
    }
    if (!dev->started) {
        dev->log_enabled = enable;
        return 0;
    }
    if (!enable) {
        r = vhost_dev_set_log(dev, false);
        if (r < 0) {
            return r;
        }
J
Jason Wang 已提交
776
        vhost_log_put(dev, false);
M
Michael S. Tsirkin 已提交
777 778 779 780 781 782 783 784 785 786 787
    } else {
        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
        r = vhost_dev_set_log(dev, true);
        if (r < 0) {
            return r;
        }
    }
    dev->log_enabled = enable;
    return 0;
}

A
Avi Kivity 已提交
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
static void vhost_log_global_start(MemoryListener *listener)
{
    int r;

    r = vhost_migration_log(listener, true);
    if (r < 0) {
        abort();
    }
}

static void vhost_log_global_stop(MemoryListener *listener)
{
    int r;

    r = vhost_migration_log(listener, false);
    if (r < 0) {
        abort();
    }
}

static void vhost_log_start(MemoryListener *listener,
809 810
                            MemoryRegionSection *section,
                            int old, int new)
A
Avi Kivity 已提交
811 812 813 814 815
{
    /* FIXME: implement */
}

static void vhost_log_stop(MemoryListener *listener,
816 817
                           MemoryRegionSection *section,
                           int old, int new)
A
Avi Kivity 已提交
818 819 820 821
{
    /* FIXME: implement */
}

822 823 824 825 826
/* The vhost driver natively knows how to handle the vrings of non
 * cross-endian legacy devices and modern devices. Only legacy devices
 * exposed to a bi-endian guest may require the vhost driver to use a
 * specific endianness.
 */
827 828
static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
{
829 830 831
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
        return false;
    }
832
#ifdef HOST_WORDS_BIGENDIAN
833
    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
834
#else
835
    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
836 837 838
#endif
}

839 840 841 842 843 844 845 846 847
static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
                                                   bool is_big_endian,
                                                   int vhost_vq_index)
{
    struct vhost_vring_state s = {
        .index = vhost_vq_index,
        .num = is_big_endian
    };

848
    if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
849 850 851
        return 0;
    }

852
    VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
853 854 855 856 857 858 859 860
    if (errno == ENOTTY) {
        error_report("vhost does not support cross-endian");
        return -ENOSYS;
    }

    return -errno;
}

861
static int vhost_virtqueue_start(struct vhost_dev *dev,
M
Michael S. Tsirkin 已提交
862 863 864 865
                                struct VirtIODevice *vdev,
                                struct vhost_virtqueue *vq,
                                unsigned idx)
{
866 867 868
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
A
Avi Kivity 已提交
869
    hwaddr s, l, a;
M
Michael S. Tsirkin 已提交
870
    int r;
871
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
M
Michael S. Tsirkin 已提交
872
    struct vhost_vring_file file = {
J
Jason Wang 已提交
873
        .index = vhost_vq_index
M
Michael S. Tsirkin 已提交
874 875
    };
    struct vhost_vring_state state = {
J
Jason Wang 已提交
876
        .index = vhost_vq_index
M
Michael S. Tsirkin 已提交
877 878 879
    };
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);

J
Jason Wang 已提交
880

M
Michael S. Tsirkin 已提交
881
    vq->num = state.num = virtio_queue_get_num(vdev, idx);
882
    r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
M
Michael S. Tsirkin 已提交
883
    if (r) {
884
        VHOST_OPS_DEBUG("vhost_set_vring_num failed");
M
Michael S. Tsirkin 已提交
885 886 887 888
        return -errno;
    }

    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
889
    r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
M
Michael S. Tsirkin 已提交
890
    if (r) {
891
        VHOST_OPS_DEBUG("vhost_set_vring_base failed");
M
Michael S. Tsirkin 已提交
892 893 894
        return -errno;
    }

895
    if (vhost_needs_vring_endian(vdev)) {
896 897 898 899 900 901 902 903
        r = vhost_virtqueue_set_vring_endian_legacy(dev,
                                                    virtio_is_big_endian(vdev),
                                                    vhost_vq_index);
        if (r) {
            return -errno;
        }
    }

904 905
    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
    vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
M
Michael S. Tsirkin 已提交
906 907 908 909 910
    vq->desc = cpu_physical_memory_map(a, &l, 0);
    if (!vq->desc || l != s) {
        r = -ENOMEM;
        goto fail_alloc_desc;
    }
911 912
    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
    vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
M
Michael S. Tsirkin 已提交
913 914 915 916 917 918 919 920 921 922 923 924 925
    vq->avail = cpu_physical_memory_map(a, &l, 0);
    if (!vq->avail || l != s) {
        r = -ENOMEM;
        goto fail_alloc_avail;
    }
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
    vq->used = cpu_physical_memory_map(a, &l, 1);
    if (!vq->used || l != s) {
        r = -ENOMEM;
        goto fail_alloc_used;
    }

J
Jason Wang 已提交
926
    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
M
Michael S. Tsirkin 已提交
927 928 929 930
    if (r < 0) {
        r = -errno;
        goto fail_alloc;
    }
J
Jason Wang 已提交
931

M
Michael S. Tsirkin 已提交
932
    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
933
    r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
M
Michael S. Tsirkin 已提交
934
    if (r) {
935
        VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
M
Michael S. Tsirkin 已提交
936
        r = -errno;
M
Michael S. Tsirkin 已提交
937 938 939
        goto fail_kick;
    }

940 941
    /* Clear and discard previous events if any. */
    event_notifier_test_and_clear(&vq->masked_notifier);
M
Michael S. Tsirkin 已提交
942

943 944 945 946 947 948 949 950
    /* Init vring in unmasked state, unless guest_notifier_mask
     * will do it later.
     */
    if (!vdev->use_guest_notifier_mask) {
        /* TODO: check and handle errors. */
        vhost_virtqueue_mask(dev, vdev, idx, false);
    }

951 952 953 954 955 956 957 958 959 960
    if (k->query_guest_notifiers &&
        k->query_guest_notifiers(qbus->parent) &&
        virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
        file.fd = -1;
        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
        if (r) {
            goto fail_vector;
        }
    }

M
Michael S. Tsirkin 已提交
961 962
    return 0;

963
fail_vector:
M
Michael S. Tsirkin 已提交
964 965 966 967 968 969 970 971 972 973 974 975 976 977
fail_kick:
fail_alloc:
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                              0, 0);
fail_alloc_used:
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                              0, 0);
fail_alloc_avail:
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                              0, 0);
fail_alloc_desc:
    return r;
}

978
static void vhost_virtqueue_stop(struct vhost_dev *dev,
M
Michael S. Tsirkin 已提交
979 980 981 982
                                    struct VirtIODevice *vdev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx)
{
983
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
M
Michael S. Tsirkin 已提交
984
    struct vhost_vring_state state = {
985
        .index = vhost_vq_index,
M
Michael S. Tsirkin 已提交
986 987
    };
    int r;
988

989
    r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
M
Michael S. Tsirkin 已提交
990
    if (r < 0) {
991
        VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
992 993
    } else {
        virtio_queue_set_last_avail_idx(vdev, idx, state.num);
M
Michael S. Tsirkin 已提交
994
    }
995
    virtio_queue_invalidate_signalled_used(vdev, idx);
996
    virtio_queue_update_used_idx(vdev, idx);
997 998 999 1000

    /* In the cross-endian case, we need to reset the vring endianness to
     * native as legacy devices expect so by default.
     */
1001
    if (vhost_needs_vring_endian(vdev)) {
1002 1003 1004
        vhost_virtqueue_set_vring_endian_legacy(dev,
                                                !virtio_is_big_endian(vdev),
                                                vhost_vq_index);
1005 1006
    }

M
Michael S. Tsirkin 已提交
1007 1008 1009 1010 1011 1012 1013 1014
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                              1, virtio_queue_get_used_size(vdev, idx));
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                              0, virtio_queue_get_avail_size(vdev, idx));
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                              0, virtio_queue_get_desc_size(vdev, idx));
}

1015 1016
static void vhost_eventfd_add(MemoryListener *listener,
                              MemoryRegionSection *section,
1017
                              bool match_data, uint64_t data, EventNotifier *e)
1018 1019 1020 1021 1022
{
}

static void vhost_eventfd_del(MemoryListener *listener,
                              MemoryRegionSection *section,
1023
                              bool match_data, uint64_t data, EventNotifier *e)
1024 1025 1026
{
}

J
Jason Wang 已提交
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
                                                int n, uint32_t timeout)
{
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
    struct vhost_vring_state state = {
        .index = vhost_vq_index,
        .num = timeout,
    };
    int r;

    if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
        return -EINVAL;
    }

    r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
    if (r) {
1043
        VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
J
Jason Wang 已提交
1044 1045 1046 1047 1048 1049
        return r;
    }

    return 0;
}

1050 1051 1052
static int vhost_virtqueue_init(struct vhost_dev *dev,
                                struct vhost_virtqueue *vq, int n)
{
1053
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1054
    struct vhost_vring_file file = {
1055
        .index = vhost_vq_index,
1056 1057 1058 1059 1060 1061 1062
    };
    int r = event_notifier_init(&vq->masked_notifier, 0);
    if (r < 0) {
        return r;
    }

    file.fd = event_notifier_get_fd(&vq->masked_notifier);
1063
    r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1064
    if (r) {
1065
        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
        r = -errno;
        goto fail_call;
    }
    return 0;
fail_call:
    event_notifier_cleanup(&vq->masked_notifier);
    return r;
}

static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
{
    event_notifier_cleanup(&vq->masked_notifier);
}

1080
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
J
Jason Wang 已提交
1081
                   VhostBackendType backend_type, uint32_t busyloop_timeout)
M
Michael S. Tsirkin 已提交
1082 1083
{
    uint64_t features;
1084
    int i, r, n_initialized_vqs = 0;
1085

1086 1087
    hdev->migration_blocker = NULL;

1088 1089
    r = vhost_set_backend_type(hdev, backend_type);
    assert(r >= 0);
1090

1091 1092 1093
    r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
    if (r < 0) {
        goto fail;
1094 1095
    }

1096
    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1097 1098
        error_report("vhost backend memory slots limit is less"
                " than current number of present memory slots");
1099 1100
        r = -1;
        goto fail;
1101
    }
1102

1103
    r = hdev->vhost_ops->vhost_set_owner(hdev);
M
Michael S. Tsirkin 已提交
1104
    if (r < 0) {
1105
        VHOST_OPS_DEBUG("vhost_set_owner failed");
M
Michael S. Tsirkin 已提交
1106 1107 1108
        goto fail;
    }

1109
    r = hdev->vhost_ops->vhost_get_features(hdev, &features);
M
Michael S. Tsirkin 已提交
1110
    if (r < 0) {
1111
        VHOST_OPS_DEBUG("vhost_get_features failed");
M
Michael S. Tsirkin 已提交
1112 1113
        goto fail;
    }
1114

1115
    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1116
        r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1117
        if (r < 0) {
1118
            goto fail;
1119 1120
        }
    }
J
Jason Wang 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131

    if (busyloop_timeout) {
        for (i = 0; i < hdev->nvqs; ++i) {
            r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
                                                     busyloop_timeout);
            if (r < 0) {
                goto fail_busyloop;
            }
        }
    }

M
Michael S. Tsirkin 已提交
1132 1133
    hdev->features = features;

A
Avi Kivity 已提交
1134
    hdev->memory_listener = (MemoryListener) {
1135 1136
        .begin = vhost_begin,
        .commit = vhost_commit,
A
Avi Kivity 已提交
1137 1138
        .region_add = vhost_region_add,
        .region_del = vhost_region_del,
1139
        .region_nop = vhost_region_nop,
A
Avi Kivity 已提交
1140 1141 1142 1143 1144
        .log_start = vhost_log_start,
        .log_stop = vhost_log_stop,
        .log_sync = vhost_log_sync,
        .log_global_start = vhost_log_global_start,
        .log_global_stop = vhost_log_global_stop,
1145 1146
        .eventfd_add = vhost_eventfd_add,
        .eventfd_del = vhost_eventfd_del,
1147
        .priority = 10
A
Avi Kivity 已提交
1148
    };
1149 1150 1151 1152 1153

    if (hdev->migration_blocker == NULL) {
        if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
            error_setg(&hdev->migration_blocker,
                       "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1154
        } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1155 1156
            error_setg(&hdev->migration_blocker,
                       "Migration disabled: failed to allocate shared memory");
1157 1158 1159 1160
        }
    }

    if (hdev->migration_blocker != NULL) {
1161 1162
        migrate_add_blocker(hdev->migration_blocker);
    }
1163

1164
    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1165 1166
    hdev->n_mem_sections = 0;
    hdev->mem_sections = NULL;
M
Michael S. Tsirkin 已提交
1167 1168 1169 1170
    hdev->log = NULL;
    hdev->log_size = 0;
    hdev->log_enabled = false;
    hdev->started = false;
1171
    hdev->memory_changed = false;
1172
    memory_listener_register(&hdev->memory_listener, &address_space_memory);
1173
    QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
M
Michael S. Tsirkin 已提交
1174
    return 0;
1175

J
Jason Wang 已提交
1176 1177 1178 1179
fail_busyloop:
    while (--i >= 0) {
        vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
    }
M
Michael S. Tsirkin 已提交
1180
fail:
1181 1182
    hdev->nvqs = n_initialized_vqs;
    vhost_dev_cleanup(hdev);
M
Michael S. Tsirkin 已提交
1183 1184 1185 1186 1187
    return r;
}

void vhost_dev_cleanup(struct vhost_dev *hdev)
{
1188
    int i;
1189

1190 1191 1192
    for (i = 0; i < hdev->nvqs; ++i) {
        vhost_virtqueue_cleanup(hdev->vqs + i);
    }
1193 1194 1195 1196 1197
    if (hdev->mem) {
        /* those are only safe after successful init */
        memory_listener_unregister(&hdev->memory_listener);
        QLIST_REMOVE(hdev, entry);
    }
1198 1199 1200 1201
    if (hdev->migration_blocker) {
        migrate_del_blocker(hdev->migration_blocker);
        error_free(hdev->migration_blocker);
    }
1202
    g_free(hdev->mem);
1203
    g_free(hdev->mem_sections);
1204 1205 1206
    if (hdev->vhost_ops) {
        hdev->vhost_ops->vhost_backend_cleanup(hdev);
    }
1207
    assert(!hdev->log);
1208 1209

    memset(hdev, 0, sizeof(struct vhost_dev));
M
Michael S. Tsirkin 已提交
1210 1211
}

1212 1213 1214 1215 1216
/* Stop processing guest IO notifications in qemu.
 * Start processing them in vhost in kernel.
 */
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
K
KONRAD Frederic 已提交
1217
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1218
    int i, r, e;
1219

1220 1221 1222 1223 1224
    /* We will pass the notifiers to the kernel, make sure that QEMU
     * doesn't interfere.
     */
    r = virtio_device_grab_ioeventfd(vdev);
    if (r < 0) {
1225
        error_report("binding does not support host notifiers");
1226 1227 1228 1229
        goto fail;
    }

    for (i = 0; i < hdev->nvqs; ++i) {
1230 1231
        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         true);
1232
        if (r < 0) {
1233
            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1234 1235 1236 1237 1238 1239 1240
            goto fail_vq;
        }
    }

    return 0;
fail_vq:
    while (--i >= 0) {
1241 1242
        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         false);
1243
        if (e < 0) {
1244
            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1245
        }
1246
        assert (e >= 0);
1247
    }
1248
    virtio_device_release_ioeventfd(vdev);
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
fail:
    return r;
}

/* Stop processing guest IO notifications in vhost.
 * Start processing them in qemu.
 * This might actually run the qemu handlers right away,
 * so virtio in qemu must be completely setup when this is called.
 */
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
K
KONRAD Frederic 已提交
1260
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1261 1262 1263
    int i, r;

    for (i = 0; i < hdev->nvqs; ++i) {
1264 1265
        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         false);
1266
        if (r < 0) {
1267
            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1268 1269 1270
        }
        assert (r >= 0);
    }
1271
    virtio_device_release_ioeventfd(vdev);
1272 1273
}

1274 1275 1276 1277 1278
/* Test and clear event pending status.
 * Should be called after unmask to avoid losing events.
 */
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
{
J
Jason Wang 已提交
1279 1280
    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1281 1282 1283 1284 1285 1286 1287 1288
    return event_notifier_test_and_clear(&vq->masked_notifier);
}

/* Mask/unmask events from this vq. */
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                         bool mask)
{
    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
J
Jason Wang 已提交
1289
    int r, index = n - hdev->vq_index;
1290
    struct vhost_vring_file file;
1291

1292 1293 1294
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

1295
    if (mask) {
1296
        assert(vdev->use_guest_notifier_mask);
J
Jason Wang 已提交
1297
        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1298 1299 1300
    } else {
        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
    }
1301

1302 1303
    file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
    r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1304 1305 1306
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
    }
1307 1308
}

C
Cornelia Huck 已提交
1309 1310
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                            uint64_t features)
1311 1312 1313
{
    const int *bit = feature_bits;
    while (*bit != VHOST_INVALID_FEATURE_BIT) {
C
Cornelia Huck 已提交
1314
        uint64_t bit_mask = (1ULL << *bit);
1315 1316 1317 1318 1319 1320 1321 1322 1323
        if (!(hdev->features & bit_mask)) {
            features &= ~bit_mask;
        }
        bit++;
    }
    return features;
}

void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
C
Cornelia Huck 已提交
1324
                        uint64_t features)
1325 1326 1327
{
    const int *bit = feature_bits;
    while (*bit != VHOST_INVALID_FEATURE_BIT) {
C
Cornelia Huck 已提交
1328
        uint64_t bit_mask = (1ULL << *bit);
1329 1330 1331 1332 1333 1334 1335
        if (features & bit_mask) {
            hdev->acked_features |= bit_mask;
        }
        bit++;
    }
}

1336
/* Host notifiers must be enabled at this point. */
M
Michael S. Tsirkin 已提交
1337 1338 1339
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{
    int i, r;
1340

1341 1342 1343
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

1344 1345
    hdev->started = true;

M
Michael S. Tsirkin 已提交
1346 1347
    r = vhost_dev_set_features(hdev, hdev->log_enabled);
    if (r < 0) {
1348
        goto fail_features;
M
Michael S. Tsirkin 已提交
1349
    }
1350
    r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
M
Michael S. Tsirkin 已提交
1351
    if (r < 0) {
1352
        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
M
Michael S. Tsirkin 已提交
1353
        r = -errno;
1354
        goto fail_mem;
M
Michael S. Tsirkin 已提交
1355
    }
1356
    for (i = 0; i < hdev->nvqs; ++i) {
1357
        r = vhost_virtqueue_start(hdev,
J
Jason Wang 已提交
1358 1359 1360
                                  vdev,
                                  hdev->vqs + i,
                                  hdev->vq_index + i);
1361 1362 1363 1364 1365
        if (r < 0) {
            goto fail_vq;
        }
    }

M
Michael S. Tsirkin 已提交
1366
    if (hdev->log_enabled) {
M
Michael S. Tsirkin 已提交
1367 1368
        uint64_t log_base;

M
Michael S. Tsirkin 已提交
1369
        hdev->log_size = vhost_get_log_size(hdev);
M
Marc-André Lureau 已提交
1370 1371
        hdev->log = vhost_log_get(hdev->log_size,
                                  vhost_dev_log_is_shared(hdev));
J
Jason Wang 已提交
1372
        log_base = (uintptr_t)hdev->log->log;
1373
        r = hdev->vhost_ops->vhost_set_log_base(hdev,
1374 1375
                                                hdev->log_size ? log_base : 0,
                                                hdev->log);
M
Michael S. Tsirkin 已提交
1376
        if (r < 0) {
1377
            VHOST_OPS_DEBUG("vhost_set_log_base failed");
M
Michael S. Tsirkin 已提交
1378
            r = -errno;
1379
            goto fail_log;
M
Michael S. Tsirkin 已提交
1380 1381
        }
    }
1382

M
Michael S. Tsirkin 已提交
1383
    return 0;
1384
fail_log:
1385
    vhost_log_put(hdev, false);
M
Michael S. Tsirkin 已提交
1386 1387
fail_vq:
    while (--i >= 0) {
1388
        vhost_virtqueue_stop(hdev,
J
Jason Wang 已提交
1389 1390 1391
                             vdev,
                             hdev->vqs + i,
                             hdev->vq_index + i);
M
Michael S. Tsirkin 已提交
1392
    }
J
Jason Wang 已提交
1393
    i = hdev->nvqs;
1394 1395
fail_mem:
fail_features:
1396 1397

    hdev->started = false;
M
Michael S. Tsirkin 已提交
1398 1399 1400
    return r;
}

1401
/* Host notifiers must be enabled at this point. */
M
Michael S. Tsirkin 已提交
1402 1403
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{
J
Jason Wang 已提交
1404
    int i;
1405

1406 1407 1408
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

M
Michael S. Tsirkin 已提交
1409
    for (i = 0; i < hdev->nvqs; ++i) {
1410
        vhost_virtqueue_stop(hdev,
J
Jason Wang 已提交
1411 1412 1413
                             vdev,
                             hdev->vqs + i,
                             hdev->vq_index + i);
M
Michael S. Tsirkin 已提交
1414
    }
1415

J
Jason Wang 已提交
1416
    vhost_log_put(hdev, true);
M
Michael S. Tsirkin 已提交
1417 1418
    hdev->started = false;
}
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428

int vhost_net_set_backend(struct vhost_dev *hdev,
                          struct vhost_vring_file *file)
{
    if (hdev->vhost_ops->vhost_net_set_backend) {
        return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
    }

    return -1;
}