vhost.c 49.2 KB
Newer Older
M
Michael S. Tsirkin 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * vhost support
 *
 * Copyright Red Hat, Inc. 2010
 *
 * Authors:
 *  Michael S. Tsirkin <mst@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
11 12 13
 *
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
M
Michael S. Tsirkin 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qapi/error.h"
P
Paolo Bonzini 已提交
18
#include "hw/virtio/vhost.h"
M
Michael S. Tsirkin 已提交
19
#include "hw/hw.h"
20
#include "qemu/atomic.h"
21
#include "qemu/range.h"
22
#include "qemu/error-report.h"
M
Marc-André Lureau 已提交
23
#include "qemu/memfd.h"
24
#include <linux/vhost.h>
25
#include "exec/address-spaces.h"
K
KONRAD Frederic 已提交
26
#include "hw/virtio/virtio-bus.h"
27
#include "hw/virtio/virtio-access.h"
28
#include "migration/blocker.h"
J
Jason Wang 已提交
29
#include "sysemu/dma.h"
M
Michael S. Tsirkin 已提交
30

31 32 33 34 35 36 37 38 39 40 41 42
/* enabled until disconnected backend stabilizes */
#define _VHOST_DEBUG 1

#ifdef _VHOST_DEBUG
#define VHOST_OPS_DEBUG(fmt, ...) \
    do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
                      strerror(errno), errno); } while (0)
#else
#define VHOST_OPS_DEBUG(fmt, ...) \
    do { } while (0)
#endif

J
Jason Wang 已提交
43
static struct vhost_log *vhost_log;
M
Marc-André Lureau 已提交
44
static struct vhost_log *vhost_log_shm;
J
Jason Wang 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
static unsigned int used_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
    QLIST_HEAD_INITIALIZER(vhost_devices);

bool vhost_has_free_slot(void)
{
    unsigned int slots_limit = ~0U;
    struct vhost_dev *hdev;

    QLIST_FOREACH(hdev, &vhost_devices, entry) {
        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
        slots_limit = MIN(slots_limit, r);
    }
    return slots_limit > used_memslots;
}

M
Michael S. Tsirkin 已提交
62
static void vhost_dev_sync_region(struct vhost_dev *dev,
63
                                  MemoryRegionSection *section,
M
Michael S. Tsirkin 已提交
64 65 66
                                  uint64_t mfirst, uint64_t mlast,
                                  uint64_t rfirst, uint64_t rlast)
{
J
Jason Wang 已提交
67 68
    vhost_log_chunk_t *log = dev->log->log;

M
Michael S. Tsirkin 已提交
69 70
    uint64_t start = MAX(mfirst, rfirst);
    uint64_t end = MIN(mlast, rlast);
J
Jason Wang 已提交
71 72
    vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
    vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
M
Marc-André Lureau 已提交
73
    uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
M
Michael S. Tsirkin 已提交
74 75 76 77

    if (end < start) {
        return;
    }
78
    assert(end / VHOST_LOG_CHUNK < dev->log_size);
79
    assert(start / VHOST_LOG_CHUNK < dev->log_size);
80

M
Michael S. Tsirkin 已提交
81 82 83 84 85
    for (;from < to; ++from) {
        vhost_log_chunk_t log;
        /* We first check with non-atomic: much cheaper,
         * and we expect non-dirty to be the common case. */
        if (!*from) {
86
            addr += VHOST_LOG_CHUNK;
M
Michael S. Tsirkin 已提交
87 88
            continue;
        }
89 90 91
        /* Data must be read atomically. We don't really need barrier semantics
         * but it's easier to use atomic_* than roll our own. */
        log = atomic_xchg(from, 0);
N
Natanael Copa 已提交
92 93
        while (log) {
            int bit = ctzl(log);
M
Michael S. Tsirkin 已提交
94 95 96 97 98 99 100
            hwaddr page_addr;
            hwaddr section_offset;
            hwaddr mr_offset;
            page_addr = addr + bit * VHOST_LOG_PAGE;
            section_offset = page_addr - section->offset_within_address_space;
            mr_offset = section_offset + section->offset_within_region;
            memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
M
Michael S. Tsirkin 已提交
101 102 103 104 105 106
            log &= ~(0x1ull << bit);
        }
        addr += VHOST_LOG_CHUNK;
    }
}

A
Avi Kivity 已提交
107
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
108
                                   MemoryRegionSection *section,
M
Michael S. Tsirkin 已提交
109 110
                                   hwaddr first,
                                   hwaddr last)
M
Michael S. Tsirkin 已提交
111 112
{
    int i;
M
Michael S. Tsirkin 已提交
113 114
    hwaddr start_addr;
    hwaddr end_addr;
A
Avi Kivity 已提交
115

M
Michael S. Tsirkin 已提交
116 117 118
    if (!dev->log_enabled || !dev->started) {
        return 0;
    }
M
Michael S. Tsirkin 已提交
119
    start_addr = section->offset_within_address_space;
120
    end_addr = range_get_last(start_addr, int128_get64(section->size));
M
Michael S. Tsirkin 已提交
121 122 123
    start_addr = MAX(first, start_addr);
    end_addr = MIN(last, end_addr);

M
Michael S. Tsirkin 已提交
124 125
    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
126
        vhost_dev_sync_region(dev, section, start_addr, end_addr,
M
Michael S. Tsirkin 已提交
127 128 129 130 131 132
                              reg->guest_phys_addr,
                              range_get_last(reg->guest_phys_addr,
                                             reg->memory_size));
    }
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
133
        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
M
Michael S. Tsirkin 已提交
134 135 136 137 138
                              range_get_last(vq->used_phys, vq->used_size));
    }
    return 0;
}

A
Avi Kivity 已提交
139 140 141 142 143
static void vhost_log_sync(MemoryListener *listener,
                          MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
M
Michael S. Tsirkin 已提交
144 145
    vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
}
A
Avi Kivity 已提交
146

M
Michael S. Tsirkin 已提交
147 148 149 150 151 152 153 154 155
static void vhost_log_sync_range(struct vhost_dev *dev,
                                 hwaddr first, hwaddr last)
{
    int i;
    /* FIXME: this is N^2 in number of sections */
    for (i = 0; i < dev->n_mem_sections; ++i) {
        MemoryRegionSection *section = &dev->mem_sections[i];
        vhost_sync_dirty_bitmap(dev, section, first, last);
    }
A
Avi Kivity 已提交
156 157
}

M
Michael S. Tsirkin 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
/* Assign/unassign. Keep an unsorted array of non-overlapping
 * memory regions in dev->mem. */
static void vhost_dev_unassign_memory(struct vhost_dev *dev,
                                      uint64_t start_addr,
                                      uint64_t size)
{
    int from, to, n = dev->mem->nregions;
    /* Track overlapping/split regions for sanity checking. */
    int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;

    for (from = 0, to = 0; from < n; ++from, ++to) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        uint64_t reglast;
        uint64_t memlast;
        uint64_t change;

        /* clone old region */
        if (to != from) {
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
        }

        /* No overlap is simple */
        if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
                            start_addr, size)) {
            continue;
        }

        /* Split only happens if supplied region
         * is in the middle of an existing one. Thus it can not
         * overlap with any other existing region. */
        assert(!split);

        reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
        memlast = range_get_last(start_addr, size);

        /* Remove whole region */
        if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
            --dev->mem->nregions;
            --to;
            ++overlap_middle;
            continue;
        }

        /* Shrink region */
        if (memlast >= reglast) {
            reg->memory_size = start_addr - reg->guest_phys_addr;
            assert(reg->memory_size);
            assert(!overlap_end);
            ++overlap_end;
            continue;
        }

        /* Shift region */
        if (start_addr <= reg->guest_phys_addr) {
            change = memlast + 1 - reg->guest_phys_addr;
            reg->memory_size -= change;
            reg->guest_phys_addr += change;
            reg->userspace_addr += change;
            assert(reg->memory_size);
            assert(!overlap_start);
            ++overlap_start;
            continue;
        }

        /* This only happens if supplied region
         * is in the middle of an existing one. Thus it can not
         * overlap with any other existing region. */
        assert(!overlap_start);
        assert(!overlap_end);
        assert(!overlap_middle);
        /* Split region: shrink first part, shift second part. */
        memcpy(dev->mem->regions + n, reg, sizeof *reg);
        reg->memory_size = start_addr - reg->guest_phys_addr;
        assert(reg->memory_size);
        change = memlast + 1 - reg->guest_phys_addr;
        reg = dev->mem->regions + n;
        reg->memory_size -= change;
        assert(reg->memory_size);
        reg->guest_phys_addr += change;
        reg->userspace_addr += change;
        /* Never add more than 1 region */
        assert(dev->mem->nregions == n);
        ++dev->mem->nregions;
        ++split;
    }
}

/* Called after unassign, so no regions overlap the given range. */
static void vhost_dev_assign_memory(struct vhost_dev *dev,
                                    uint64_t start_addr,
                                    uint64_t size,
                                    uint64_t uaddr)
{
    int from, to;
    struct vhost_memory_region *merged = NULL;
    for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        uint64_t prlast, urlast;
        uint64_t pmlast, umlast;
        uint64_t s, e, u;

        /* clone old region */
        if (to != from) {
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
        }
        prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
        pmlast = range_get_last(start_addr, size);
        urlast = range_get_last(reg->userspace_addr, reg->memory_size);
        umlast = range_get_last(uaddr, size);

        /* check for overlapping regions: should never happen. */
        assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
        /* Not an adjacent or overlapping region - do not merge. */
        if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
            (pmlast + 1 != reg->guest_phys_addr ||
             umlast + 1 != reg->userspace_addr)) {
            continue;
        }

277 278 279 280 281 282 283
        if (dev->vhost_ops->vhost_backend_can_merge &&
            !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
                                                     reg->userspace_addr,
                                                     reg->memory_size)) {
            continue;
        }

M
Michael S. Tsirkin 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
        if (merged) {
            --to;
            assert(to >= 0);
        } else {
            merged = reg;
        }
        u = MIN(uaddr, reg->userspace_addr);
        s = MIN(start_addr, reg->guest_phys_addr);
        e = MAX(pmlast, prlast);
        uaddr = merged->userspace_addr = u;
        start_addr = merged->guest_phys_addr = s;
        size = merged->memory_size = e - s + 1;
        assert(merged->memory_size);
    }

    if (!merged) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        memset(reg, 0, sizeof *reg);
        reg->memory_size = size;
        assert(reg->memory_size);
        reg->guest_phys_addr = start_addr;
        reg->userspace_addr = uaddr;
        ++to;
    }
    assert(to <= dev->mem->nregions + 1);
    dev->mem->nregions = to;
}

static uint64_t vhost_get_log_size(struct vhost_dev *dev)
{
    uint64_t log_size = 0;
    int i;
    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
        uint64_t last = range_get_last(reg->guest_phys_addr,
                                       reg->memory_size);
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
    }
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
        uint64_t last = vq->used_phys + vq->used_size - 1;
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
    }
    return log_size;
}
M
Marc-André Lureau 已提交
329 330

static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
J
Jason Wang 已提交
331
{
M
Marc-André Lureau 已提交
332 333 334 335 336 337 338 339 340 341 342 343 344
    struct vhost_log *log;
    uint64_t logsize = size * sizeof(*(log->log));
    int fd = -1;

    log = g_new0(struct vhost_log, 1);
    if (share) {
        log->log = qemu_memfd_alloc("vhost-log", logsize,
                                    F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
                                    &fd);
        memset(log->log, 0, logsize);
    } else {
        log->log = g_malloc0(logsize);
    }
J
Jason Wang 已提交
345 346 347

    log->size = size;
    log->refcnt = 1;
M
Marc-André Lureau 已提交
348
    log->fd = fd;
J
Jason Wang 已提交
349 350 351 352

    return log;
}

M
Marc-André Lureau 已提交
353
static struct vhost_log *vhost_log_get(uint64_t size, bool share)
J
Jason Wang 已提交
354
{
M
Marc-André Lureau 已提交
355 356 357 358 359 360 361 362 363
    struct vhost_log *log = share ? vhost_log_shm : vhost_log;

    if (!log || log->size != size) {
        log = vhost_log_alloc(size, share);
        if (share) {
            vhost_log_shm = log;
        } else {
            vhost_log = log;
        }
J
Jason Wang 已提交
364
    } else {
M
Marc-André Lureau 已提交
365
        ++log->refcnt;
J
Jason Wang 已提交
366 367
    }

M
Marc-André Lureau 已提交
368
    return log;
J
Jason Wang 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
}

static void vhost_log_put(struct vhost_dev *dev, bool sync)
{
    struct vhost_log *log = dev->log;

    if (!log) {
        return;
    }

    --log->refcnt;
    if (log->refcnt == 0) {
        /* Sync only the range covered by the old log */
        if (dev->log_size && sync) {
            vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
        }
M
Marc-André Lureau 已提交
385

J
Jason Wang 已提交
386
        if (vhost_log == log) {
M
Marc-André Lureau 已提交
387
            g_free(log->log);
J
Jason Wang 已提交
388
            vhost_log = NULL;
M
Marc-André Lureau 已提交
389 390 391 392
        } else if (vhost_log_shm == log) {
            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
                            log->fd);
            vhost_log_shm = NULL;
J
Jason Wang 已提交
393
        }
M
Marc-André Lureau 已提交
394

J
Jason Wang 已提交
395 396
        g_free(log);
    }
397 398 399

    dev->log = NULL;
    dev->log_size = 0;
J
Jason Wang 已提交
400
}
M
Michael S. Tsirkin 已提交
401

M
Marc-André Lureau 已提交
402 403 404 405 406 407 408
static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
{
    return dev->vhost_ops->vhost_requires_shm_log &&
           dev->vhost_ops->vhost_requires_shm_log(dev);
}

static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
M
Michael S. Tsirkin 已提交
409
{
M
Marc-André Lureau 已提交
410
    struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
J
Jason Wang 已提交
411
    uint64_t log_base = (uintptr_t)log->log;
M
Michael S. Tsirkin 已提交
412
    int r;
413

M
Marc-André Lureau 已提交
414 415
    /* inform backend of log switching, this must be done before
       releasing the current log, to ensure no logging is lost */
416
    r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
417 418 419 420
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_log_base failed");
    }

J
Jason Wang 已提交
421
    vhost_log_put(dev, true);
M
Michael S. Tsirkin 已提交
422 423 424 425
    dev->log = log;
    dev->log_size = size;
}

J
Jason Wang 已提交
426 427 428 429
static int vhost_dev_has_iommu(struct vhost_dev *dev)
{
    VirtIODevice *vdev = dev->vdev;

430
    return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
J
Jason Wang 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
}

static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
                              hwaddr *plen, int is_write)
{
    if (!vhost_dev_has_iommu(dev)) {
        return cpu_physical_memory_map(addr, plen, is_write);
    } else {
        return (void *)(uintptr_t)addr;
    }
}

static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
                               hwaddr len, int is_write,
                               hwaddr access_len)
{
    if (!vhost_dev_has_iommu(dev)) {
        cpu_physical_memory_unmap(buffer, len, is_write, access_len);
    }
}
451

J
Jason Wang 已提交
452 453
static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
                                          void *part,
454 455 456 457 458 459 460 461 462 463 464 465 466
                                          uint64_t part_addr,
                                          uint64_t part_size,
                                          uint64_t start_addr,
                                          uint64_t size)
{
    hwaddr l;
    void *p;
    int r = 0;

    if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
        return 0;
    }
    l = part_size;
J
Jason Wang 已提交
467
    p = vhost_memory_map(dev, part_addr, &l, 1);
468 469 470 471 472 473
    if (!p || l != part_size) {
        r = -ENOMEM;
    }
    if (p != part) {
        r = -EBUSY;
    }
J
Jason Wang 已提交
474
    vhost_memory_unmap(dev, p, l, 0, 0);
475 476 477
    return r;
}

M
Michael S. Tsirkin 已提交
478 479 480 481
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
                                      uint64_t start_addr,
                                      uint64_t size)
{
482
    int i, j;
483
    int r = 0;
484 485 486 487 488
    const char *part_name[] = {
        "descriptor table",
        "available ring",
        "used ring"
    };
489

490
    for (i = 0; i < dev->nvqs; ++i) {
M
Michael S. Tsirkin 已提交
491 492
        struct vhost_virtqueue *vq = dev->vqs + i;

493
        j = 0;
J
Jason Wang 已提交
494
        r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
495 496 497
                                           vq->desc_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
498
        }
499 500

        j++;
J
Jason Wang 已提交
501
        r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
502 503 504
                                           vq->avail_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
505
        }
506 507

        j++;
J
Jason Wang 已提交
508
        r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
509 510 511
                                           vq->used_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
512
        }
513 514 515 516 517 518
    }

    if (r == -ENOMEM) {
        error_report("Unable to map %s for ring %d", part_name[j], i);
    } else if (r == -EBUSY) {
        error_report("%s relocated for ring %d", part_name[j], i);
M
Michael S. Tsirkin 已提交
519
    }
520
    return r;
M
Michael S. Tsirkin 已提交
521 522
}

523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
						      uint64_t start_addr,
						      uint64_t size)
{
    int i, n = dev->mem->nregions;
    for (i = 0; i < n; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
        if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
                           start_addr, size)) {
            return reg;
        }
    }
    return NULL;
}

static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
                                 uint64_t start_addr,
                                 uint64_t size,
                                 uint64_t uaddr)
{
    struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
    uint64_t reglast;
    uint64_t memlast;

    if (!reg) {
        return true;
    }

    reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
    memlast = range_get_last(start_addr, size);

    /* Need to extend region? */
    if (start_addr < reg->guest_phys_addr || memlast > reglast) {
        return true;
    }
    /* userspace_addr changed? */
    return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
}

A
Avi Kivity 已提交
562 563 564
static void vhost_set_memory(MemoryListener *listener,
                             MemoryRegionSection *section,
                             bool add)
M
Michael S. Tsirkin 已提交
565
{
A
Avi Kivity 已提交
566 567
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
A
Avi Kivity 已提交
568
    hwaddr start_addr = section->offset_within_address_space;
569
    ram_addr_t size = int128_get64(section->size);
570 571
    bool log_dirty =
        memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
M
Michael S. Tsirkin 已提交
572 573
    int s = offsetof(struct vhost_memory, regions) +
        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
A
Avi Kivity 已提交
574 575
    void *ram;

576
    dev->mem = g_realloc(dev->mem, s);
M
Michael S. Tsirkin 已提交
577

578
    if (log_dirty) {
A
Avi Kivity 已提交
579
        add = false;
580 581
    }

M
Michael S. Tsirkin 已提交
582 583
    assert(size);

584
    /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
585
    ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
A
Avi Kivity 已提交
586 587
    if (add) {
        if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
588 589 590 591 592 593 594 595 596 597
            /* Region exists with same address. Nothing to do. */
            return;
        }
    } else {
        if (!vhost_dev_find_reg(dev, start_addr, size)) {
            /* Removing region that we don't access. Nothing to do. */
            return;
        }
    }

M
Michael S. Tsirkin 已提交
598
    vhost_dev_unassign_memory(dev, start_addr, size);
A
Avi Kivity 已提交
599
    if (add) {
M
Michael S. Tsirkin 已提交
600
        /* Add given mapping, merging adjacent regions if any */
A
Avi Kivity 已提交
601
        vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
M
Michael S. Tsirkin 已提交
602 603 604 605
    } else {
        /* Remove old mapping for this memory, if any. */
        vhost_dev_unassign_memory(dev, start_addr, size);
    }
606 607 608
    dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
    dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
    dev->memory_changed = true;
609
    used_memslots = dev->mem->nregions;
610 611 612 613
}

static bool vhost_section(MemoryRegionSection *section)
{
M
Michael S. Tsirkin 已提交
614 615
    return memory_region_is_ram(section->mr) &&
        !memory_region_is_rom(section->mr);
616 617 618 619 620 621 622 623 624
}

static void vhost_begin(MemoryListener *listener)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    dev->mem_changed_end_addr = 0;
    dev->mem_changed_start_addr = -1;
}
M
Michael S. Tsirkin 已提交
625

626 627 628 629 630 631 632 633 634 635 636 637
static void vhost_commit(MemoryListener *listener)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    hwaddr start_addr = 0;
    ram_addr_t size = 0;
    uint64_t log_size;
    int r;

    if (!dev->memory_changed) {
        return;
    }
M
Michael S. Tsirkin 已提交
638 639 640
    if (!dev->started) {
        return;
    }
641 642 643
    if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
        return;
    }
M
Michael S. Tsirkin 已提交
644 645

    if (dev->started) {
646 647 648
        start_addr = dev->mem_changed_start_addr;
        size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;

M
Michael S. Tsirkin 已提交
649 650 651 652 653
        r = vhost_verify_ring_mappings(dev, start_addr, size);
        assert(r >= 0);
    }

    if (!dev->log_enabled) {
654
        r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
655 656 657
        if (r < 0) {
            VHOST_OPS_DEBUG("vhost_set_mem_table failed");
        }
658
        dev->memory_changed = false;
M
Michael S. Tsirkin 已提交
659 660 661 662 663 664 665 666 667 668
        return;
    }
    log_size = vhost_get_log_size(dev);
    /* We allocate an extra 4K bytes to log,
     * to reduce the * number of reallocations. */
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
    /* To log more, must increase log size before table update. */
    if (dev->log_size < log_size) {
        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
    }
669
    r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
670 671 672
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
    }
M
Michael S. Tsirkin 已提交
673 674 675 676
    /* To log less, can only decrease log size after table update. */
    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
        vhost_dev_log_resize(dev, log_size);
    }
677
    dev->memory_changed = false;
678 679
}

A
Avi Kivity 已提交
680 681 682
static void vhost_region_add(MemoryListener *listener,
                             MemoryRegionSection *section)
{
683 684 685
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);

A
Avi Kivity 已提交
686 687 688 689
    if (!vhost_section(section)) {
        return;
    }

690 691 692 693
    ++dev->n_mem_sections;
    dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
                                dev->n_mem_sections);
    dev->mem_sections[dev->n_mem_sections - 1] = *section;
P
Paolo Bonzini 已提交
694
    memory_region_ref(section->mr);
A
Avi Kivity 已提交
695 696 697 698 699 700
    vhost_set_memory(listener, section, true);
}

static void vhost_region_del(MemoryListener *listener,
                             MemoryRegionSection *section)
{
701 702 703 704
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    int i;

A
Avi Kivity 已提交
705 706 707 708
    if (!vhost_section(section)) {
        return;
    }

A
Avi Kivity 已提交
709
    vhost_set_memory(listener, section, false);
P
Paolo Bonzini 已提交
710
    memory_region_unref(section->mr);
711 712 713 714 715
    for (i = 0; i < dev->n_mem_sections; ++i) {
        if (dev->mem_sections[i].offset_within_address_space
            == section->offset_within_address_space) {
            --dev->n_mem_sections;
            memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
716
                    (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
717 718 719
            break;
        }
    }
A
Avi Kivity 已提交
720 721
}

722 723 724 725 726 727
static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
    struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
    struct vhost_dev *hdev = iommu->hdev;
    hwaddr iova = iotlb->iova + iommu->iommu_offset;

M
Maxime Coquelin 已提交
728 729
    if (vhost_backend_invalidate_device_iotlb(hdev, iova,
                                              iotlb->addr_mask + 1)) {
730 731 732 733 734 735 736 737 738 739
        error_report("Fail to invalidate device iotlb");
    }
}

static void vhost_iommu_region_add(MemoryListener *listener,
                                   MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         iommu_listener);
    struct vhost_iommu *iommu;
740
    Int128 end;
741 742 743 744 745 746

    if (!memory_region_is_iommu(section->mr)) {
        return;
    }

    iommu = g_malloc0(sizeof(*iommu));
747 748 749 750 751 752 753
    end = int128_add(int128_make64(section->offset_within_region),
                     section->size);
    end = int128_sub(end, int128_one());
    iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
                        IOMMU_NOTIFIER_UNMAP,
                        section->offset_within_region,
                        int128_get64(end));
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
    iommu->mr = section->mr;
    iommu->iommu_offset = section->offset_within_address_space -
                          section->offset_within_region;
    iommu->hdev = dev;
    memory_region_register_iommu_notifier(section->mr, &iommu->n);
    QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
    /* TODO: can replay help performance here? */
}

static void vhost_iommu_region_del(MemoryListener *listener,
                                   MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         iommu_listener);
    struct vhost_iommu *iommu;

    if (!memory_region_is_iommu(section->mr)) {
        return;
    }

    QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
775 776
        if (iommu->mr == section->mr &&
            iommu->n.start == section->offset_within_region) {
777 778 779 780 781 782 783 784 785
            memory_region_unregister_iommu_notifier(iommu->mr,
                                                    &iommu->n);
            QLIST_REMOVE(iommu, iommu_next);
            g_free(iommu);
            break;
        }
    }
}

786 787 788 789 790
static void vhost_region_nop(MemoryListener *listener,
                             MemoryRegionSection *section)
{
}

M
Michael S. Tsirkin 已提交
791 792 793 794 795 796
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx, bool enable_log)
{
    struct vhost_vring_addr addr = {
        .index = idx,
797 798 799
        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
        .used_user_addr = (uint64_t)(unsigned long)vq->used,
M
Michael S. Tsirkin 已提交
800 801 802
        .log_guest_addr = vq->used_phys,
        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
    };
803
    int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
M
Michael S. Tsirkin 已提交
804
    if (r < 0) {
805
        VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
M
Michael S. Tsirkin 已提交
806 807 808 809 810
        return -errno;
    }
    return 0;
}

J
Jason Wang 已提交
811 812
static int vhost_dev_set_features(struct vhost_dev *dev,
                                  bool enable_log)
M
Michael S. Tsirkin 已提交
813 814 815 816
{
    uint64_t features = dev->acked_features;
    int r;
    if (enable_log) {
C
Cornelia Huck 已提交
817
        features |= 0x1ULL << VHOST_F_LOG_ALL;
M
Michael S. Tsirkin 已提交
818
    }
819
    r = dev->vhost_ops->vhost_set_features(dev, features);
820 821 822
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_features failed");
    }
M
Michael S. Tsirkin 已提交
823 824 825 826 827
    return r < 0 ? -errno : 0;
}

static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
{
828
    int r, i, idx;
M
Michael S. Tsirkin 已提交
829 830 831 832 833
    r = vhost_dev_set_features(dev, enable_log);
    if (r < 0) {
        goto err_features;
    }
    for (i = 0; i < dev->nvqs; ++i) {
834 835
        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
M
Michael S. Tsirkin 已提交
836 837 838 839 840 841 842 843
                                     enable_log);
        if (r < 0) {
            goto err_vq;
        }
    }
    return 0;
err_vq:
    for (; i >= 0; --i) {
844
        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
845 846
        vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
                                 dev->log_enabled);
M
Michael S. Tsirkin 已提交
847
    }
848
    vhost_dev_set_features(dev, dev->log_enabled);
M
Michael S. Tsirkin 已提交
849 850 851 852
err_features:
    return r;
}

A
Avi Kivity 已提交
853
static int vhost_migration_log(MemoryListener *listener, int enable)
M
Michael S. Tsirkin 已提交
854
{
A
Avi Kivity 已提交
855 856
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
M
Michael S. Tsirkin 已提交
857 858 859 860 861 862 863 864 865 866 867 868 869
    int r;
    if (!!enable == dev->log_enabled) {
        return 0;
    }
    if (!dev->started) {
        dev->log_enabled = enable;
        return 0;
    }
    if (!enable) {
        r = vhost_dev_set_log(dev, false);
        if (r < 0) {
            return r;
        }
J
Jason Wang 已提交
870
        vhost_log_put(dev, false);
M
Michael S. Tsirkin 已提交
871 872 873 874 875 876 877 878 879 880 881
    } else {
        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
        r = vhost_dev_set_log(dev, true);
        if (r < 0) {
            return r;
        }
    }
    dev->log_enabled = enable;
    return 0;
}

A
Avi Kivity 已提交
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
static void vhost_log_global_start(MemoryListener *listener)
{
    int r;

    r = vhost_migration_log(listener, true);
    if (r < 0) {
        abort();
    }
}

static void vhost_log_global_stop(MemoryListener *listener)
{
    int r;

    r = vhost_migration_log(listener, false);
    if (r < 0) {
        abort();
    }
}

static void vhost_log_start(MemoryListener *listener,
903 904
                            MemoryRegionSection *section,
                            int old, int new)
A
Avi Kivity 已提交
905 906 907 908 909
{
    /* FIXME: implement */
}

static void vhost_log_stop(MemoryListener *listener,
910 911
                           MemoryRegionSection *section,
                           int old, int new)
A
Avi Kivity 已提交
912 913 914 915
{
    /* FIXME: implement */
}

916 917 918 919 920
/* The vhost driver natively knows how to handle the vrings of non
 * cross-endian legacy devices and modern devices. Only legacy devices
 * exposed to a bi-endian guest may require the vhost driver to use a
 * specific endianness.
 */
921 922
static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
{
923 924 925
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
        return false;
    }
926
#ifdef HOST_WORDS_BIGENDIAN
927
    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
928
#else
929
    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
930 931 932
#endif
}

933 934 935 936 937 938 939 940 941
static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
                                                   bool is_big_endian,
                                                   int vhost_vq_index)
{
    struct vhost_vring_state s = {
        .index = vhost_vq_index,
        .num = is_big_endian
    };

942
    if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
943 944 945
        return 0;
    }

946
    VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
947 948 949 950 951 952 953 954
    if (errno == ENOTTY) {
        error_report("vhost does not support cross-endian");
        return -ENOSYS;
    }

    return -errno;
}

J
Jason Wang 已提交
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
static int vhost_memory_region_lookup(struct vhost_dev *hdev,
                                      uint64_t gpa, uint64_t *uaddr,
                                      uint64_t *len)
{
    int i;

    for (i = 0; i < hdev->mem->nregions; i++) {
        struct vhost_memory_region *reg = hdev->mem->regions + i;

        if (gpa >= reg->guest_phys_addr &&
            reg->guest_phys_addr + reg->memory_size > gpa) {
            *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
            *len = reg->guest_phys_addr + reg->memory_size - gpa;
            return 0;
        }
    }

    return -EFAULT;
}

975
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
J
Jason Wang 已提交
976 977 978
{
    IOMMUTLBEntry iotlb;
    uint64_t uaddr, len;
979
    int ret = -EFAULT;
J
Jason Wang 已提交
980 981 982 983 984 985

    rcu_read_lock();

    iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
                                          iova, write);
    if (iotlb.target_as != NULL) {
986 987 988
        ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
                                         &uaddr, &len);
        if (ret) {
J
Jason Wang 已提交
989 990 991 992 993 994 995 996
            error_report("Fail to lookup the translated address "
                         "%"PRIx64, iotlb.translated_addr);
            goto out;
        }

        len = MIN(iotlb.addr_mask + 1, len);
        iova = iova & ~iotlb.addr_mask;

M
Maxime Coquelin 已提交
997 998
        ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
                                                len, iotlb.perm);
999
        if (ret) {
J
Jason Wang 已提交
1000 1001 1002 1003 1004 1005
            error_report("Fail to update device iotlb");
            goto out;
        }
    }
out:
    rcu_read_unlock();
1006 1007

    return ret;
J
Jason Wang 已提交
1008 1009
}

1010
static int vhost_virtqueue_start(struct vhost_dev *dev,
M
Michael S. Tsirkin 已提交
1011 1012 1013 1014
                                struct VirtIODevice *vdev,
                                struct vhost_virtqueue *vq,
                                unsigned idx)
{
1015 1016 1017
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
A
Avi Kivity 已提交
1018
    hwaddr s, l, a;
M
Michael S. Tsirkin 已提交
1019
    int r;
1020
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
M
Michael S. Tsirkin 已提交
1021
    struct vhost_vring_file file = {
J
Jason Wang 已提交
1022
        .index = vhost_vq_index
M
Michael S. Tsirkin 已提交
1023 1024
    };
    struct vhost_vring_state state = {
J
Jason Wang 已提交
1025
        .index = vhost_vq_index
M
Michael S. Tsirkin 已提交
1026 1027 1028
    };
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);

J
Jason Wang 已提交
1029

M
Michael S. Tsirkin 已提交
1030
    vq->num = state.num = virtio_queue_get_num(vdev, idx);
1031
    r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
M
Michael S. Tsirkin 已提交
1032
    if (r) {
1033
        VHOST_OPS_DEBUG("vhost_set_vring_num failed");
M
Michael S. Tsirkin 已提交
1034 1035 1036 1037
        return -errno;
    }

    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1038
    r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
M
Michael S. Tsirkin 已提交
1039
    if (r) {
1040
        VHOST_OPS_DEBUG("vhost_set_vring_base failed");
M
Michael S. Tsirkin 已提交
1041 1042 1043
        return -errno;
    }

1044
    if (vhost_needs_vring_endian(vdev)) {
1045 1046 1047 1048 1049 1050 1051 1052
        r = vhost_virtqueue_set_vring_endian_legacy(dev,
                                                    virtio_is_big_endian(vdev),
                                                    vhost_vq_index);
        if (r) {
            return -errno;
        }
    }

1053 1054
    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
    vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
J
Jason Wang 已提交
1055
    vq->desc = vhost_memory_map(dev, a, &l, 0);
M
Michael S. Tsirkin 已提交
1056 1057 1058 1059
    if (!vq->desc || l != s) {
        r = -ENOMEM;
        goto fail_alloc_desc;
    }
1060 1061
    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
    vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
J
Jason Wang 已提交
1062
    vq->avail = vhost_memory_map(dev, a, &l, 0);
M
Michael S. Tsirkin 已提交
1063 1064 1065 1066 1067 1068
    if (!vq->avail || l != s) {
        r = -ENOMEM;
        goto fail_alloc_avail;
    }
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
J
Jason Wang 已提交
1069
    vq->used = vhost_memory_map(dev, a, &l, 1);
M
Michael S. Tsirkin 已提交
1070 1071 1072 1073 1074
    if (!vq->used || l != s) {
        r = -ENOMEM;
        goto fail_alloc_used;
    }

J
Jason Wang 已提交
1075
    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
M
Michael S. Tsirkin 已提交
1076 1077 1078 1079
    if (r < 0) {
        r = -errno;
        goto fail_alloc;
    }
J
Jason Wang 已提交
1080

M
Michael S. Tsirkin 已提交
1081
    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1082
    r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
M
Michael S. Tsirkin 已提交
1083
    if (r) {
1084
        VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
M
Michael S. Tsirkin 已提交
1085
        r = -errno;
M
Michael S. Tsirkin 已提交
1086 1087 1088
        goto fail_kick;
    }

1089 1090
    /* Clear and discard previous events if any. */
    event_notifier_test_and_clear(&vq->masked_notifier);
M
Michael S. Tsirkin 已提交
1091

1092 1093 1094 1095 1096 1097 1098 1099
    /* Init vring in unmasked state, unless guest_notifier_mask
     * will do it later.
     */
    if (!vdev->use_guest_notifier_mask) {
        /* TODO: check and handle errors. */
        vhost_virtqueue_mask(dev, vdev, idx, false);
    }

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
    if (k->query_guest_notifiers &&
        k->query_guest_notifiers(qbus->parent) &&
        virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
        file.fd = -1;
        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
        if (r) {
            goto fail_vector;
        }
    }

M
Michael S. Tsirkin 已提交
1110 1111
    return 0;

1112
fail_vector:
M
Michael S. Tsirkin 已提交
1113 1114
fail_kick:
fail_alloc:
J
Jason Wang 已提交
1115 1116
    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
1117
fail_alloc_used:
J
Jason Wang 已提交
1118 1119
    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
1120
fail_alloc_avail:
J
Jason Wang 已提交
1121 1122
    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
1123 1124 1125 1126
fail_alloc_desc:
    return r;
}

1127
static void vhost_virtqueue_stop(struct vhost_dev *dev,
M
Michael S. Tsirkin 已提交
1128 1129 1130 1131
                                    struct VirtIODevice *vdev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx)
{
1132
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
M
Michael S. Tsirkin 已提交
1133
    struct vhost_vring_state state = {
1134
        .index = vhost_vq_index,
M
Michael S. Tsirkin 已提交
1135 1136
    };
    int r;
1137

1138
    r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
M
Michael S. Tsirkin 已提交
1139
    if (r < 0) {
1140
        VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1141 1142 1143 1144
        /* Connection to the backend is broken, so let's sync internal
         * last avail idx to the device used idx.
         */
        virtio_queue_restore_last_avail_idx(vdev, idx);
1145 1146
    } else {
        virtio_queue_set_last_avail_idx(vdev, idx, state.num);
M
Michael S. Tsirkin 已提交
1147
    }
1148
    virtio_queue_invalidate_signalled_used(vdev, idx);
1149
    virtio_queue_update_used_idx(vdev, idx);
1150 1151 1152 1153

    /* In the cross-endian case, we need to reset the vring endianness to
     * native as legacy devices expect so by default.
     */
1154
    if (vhost_needs_vring_endian(vdev)) {
1155 1156 1157
        vhost_virtqueue_set_vring_endian_legacy(dev,
                                                !virtio_is_big_endian(vdev),
                                                vhost_vq_index);
1158 1159
    }

J
Jason Wang 已提交
1160 1161 1162 1163 1164 1165
    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
                       1, virtio_queue_get_used_size(vdev, idx));
    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
                       0, virtio_queue_get_avail_size(vdev, idx));
    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
                       0, virtio_queue_get_desc_size(vdev, idx));
M
Michael S. Tsirkin 已提交
1166 1167
}

1168 1169
static void vhost_eventfd_add(MemoryListener *listener,
                              MemoryRegionSection *section,
1170
                              bool match_data, uint64_t data, EventNotifier *e)
1171 1172 1173 1174 1175
{
}

static void vhost_eventfd_del(MemoryListener *listener,
                              MemoryRegionSection *section,
1176
                              bool match_data, uint64_t data, EventNotifier *e)
1177 1178 1179
{
}

J
Jason Wang 已提交
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
                                                int n, uint32_t timeout)
{
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
    struct vhost_vring_state state = {
        .index = vhost_vq_index,
        .num = timeout,
    };
    int r;

    if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
        return -EINVAL;
    }

    r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
    if (r) {
1196
        VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
J
Jason Wang 已提交
1197 1198 1199 1200 1201 1202
        return r;
    }

    return 0;
}

1203 1204 1205
static int vhost_virtqueue_init(struct vhost_dev *dev,
                                struct vhost_virtqueue *vq, int n)
{
1206
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1207
    struct vhost_vring_file file = {
1208
        .index = vhost_vq_index,
1209 1210 1211 1212 1213 1214 1215
    };
    int r = event_notifier_init(&vq->masked_notifier, 0);
    if (r < 0) {
        return r;
    }

    file.fd = event_notifier_get_fd(&vq->masked_notifier);
1216
    r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1217
    if (r) {
1218
        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1219 1220 1221
        r = -errno;
        goto fail_call;
    }
J
Jason Wang 已提交
1222 1223 1224

    vq->dev = dev;

1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
    return 0;
fail_call:
    event_notifier_cleanup(&vq->masked_notifier);
    return r;
}

static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
{
    event_notifier_cleanup(&vq->masked_notifier);
}

1236
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
J
Jason Wang 已提交
1237
                   VhostBackendType backend_type, uint32_t busyloop_timeout)
M
Michael S. Tsirkin 已提交
1238 1239
{
    uint64_t features;
1240
    int i, r, n_initialized_vqs = 0;
1241
    Error *local_err = NULL;
1242

J
Jason Wang 已提交
1243
    hdev->vdev = NULL;
1244 1245
    hdev->migration_blocker = NULL;

1246 1247
    r = vhost_set_backend_type(hdev, backend_type);
    assert(r >= 0);
1248

1249 1250 1251
    r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
    if (r < 0) {
        goto fail;
1252 1253
    }

1254
    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1255 1256
        error_report("vhost backend memory slots limit is less"
                " than current number of present memory slots");
1257 1258
        r = -1;
        goto fail;
1259
    }
1260

1261
    r = hdev->vhost_ops->vhost_set_owner(hdev);
M
Michael S. Tsirkin 已提交
1262
    if (r < 0) {
1263
        VHOST_OPS_DEBUG("vhost_set_owner failed");
M
Michael S. Tsirkin 已提交
1264 1265 1266
        goto fail;
    }

1267
    r = hdev->vhost_ops->vhost_get_features(hdev, &features);
M
Michael S. Tsirkin 已提交
1268
    if (r < 0) {
1269
        VHOST_OPS_DEBUG("vhost_get_features failed");
M
Michael S. Tsirkin 已提交
1270 1271
        goto fail;
    }
1272

1273
    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1274
        r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1275
        if (r < 0) {
1276
            goto fail;
1277 1278
        }
    }
J
Jason Wang 已提交
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289

    if (busyloop_timeout) {
        for (i = 0; i < hdev->nvqs; ++i) {
            r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
                                                     busyloop_timeout);
            if (r < 0) {
                goto fail_busyloop;
            }
        }
    }

M
Michael S. Tsirkin 已提交
1290 1291
    hdev->features = features;

A
Avi Kivity 已提交
1292
    hdev->memory_listener = (MemoryListener) {
1293 1294
        .begin = vhost_begin,
        .commit = vhost_commit,
A
Avi Kivity 已提交
1295 1296
        .region_add = vhost_region_add,
        .region_del = vhost_region_del,
1297
        .region_nop = vhost_region_nop,
A
Avi Kivity 已提交
1298 1299 1300 1301 1302
        .log_start = vhost_log_start,
        .log_stop = vhost_log_stop,
        .log_sync = vhost_log_sync,
        .log_global_start = vhost_log_global_start,
        .log_global_stop = vhost_log_global_stop,
1303 1304
        .eventfd_add = vhost_eventfd_add,
        .eventfd_del = vhost_eventfd_del,
1305
        .priority = 10
A
Avi Kivity 已提交
1306
    };
1307

1308 1309 1310 1311
    hdev->iommu_listener = (MemoryListener) {
        .region_add = vhost_iommu_region_add,
        .region_del = vhost_iommu_region_del,
    };
J
Jason Wang 已提交
1312

1313 1314 1315 1316
    if (hdev->migration_blocker == NULL) {
        if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
            error_setg(&hdev->migration_blocker,
                       "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1317
        } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1318 1319
            error_setg(&hdev->migration_blocker,
                       "Migration disabled: failed to allocate shared memory");
1320 1321 1322 1323
        }
    }

    if (hdev->migration_blocker != NULL) {
1324 1325 1326 1327 1328 1329
        r = migrate_add_blocker(hdev->migration_blocker, &local_err);
        if (local_err) {
            error_report_err(local_err);
            error_free(hdev->migration_blocker);
            goto fail_busyloop;
        }
1330
    }
1331

1332
    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1333 1334
    hdev->n_mem_sections = 0;
    hdev->mem_sections = NULL;
M
Michael S. Tsirkin 已提交
1335 1336 1337 1338
    hdev->log = NULL;
    hdev->log_size = 0;
    hdev->log_enabled = false;
    hdev->started = false;
1339
    hdev->memory_changed = false;
1340
    memory_listener_register(&hdev->memory_listener, &address_space_memory);
1341
    QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
M
Michael S. Tsirkin 已提交
1342
    return 0;
1343

J
Jason Wang 已提交
1344 1345 1346 1347
fail_busyloop:
    while (--i >= 0) {
        vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
    }
M
Michael S. Tsirkin 已提交
1348
fail:
1349 1350
    hdev->nvqs = n_initialized_vqs;
    vhost_dev_cleanup(hdev);
M
Michael S. Tsirkin 已提交
1351 1352 1353 1354 1355
    return r;
}

void vhost_dev_cleanup(struct vhost_dev *hdev)
{
1356
    int i;
1357

1358 1359 1360
    for (i = 0; i < hdev->nvqs; ++i) {
        vhost_virtqueue_cleanup(hdev->vqs + i);
    }
1361 1362 1363
    if (hdev->mem) {
        /* those are only safe after successful init */
        memory_listener_unregister(&hdev->memory_listener);
1364 1365 1366 1367
        for (i = 0; i < hdev->n_mem_sections; ++i) {
            MemoryRegionSection *section = &hdev->mem_sections[i];
            memory_region_unref(section->mr);
        }
1368 1369
        QLIST_REMOVE(hdev, entry);
    }
1370 1371 1372 1373
    if (hdev->migration_blocker) {
        migrate_del_blocker(hdev->migration_blocker);
        error_free(hdev->migration_blocker);
    }
1374
    g_free(hdev->mem);
1375
    g_free(hdev->mem_sections);
1376 1377 1378
    if (hdev->vhost_ops) {
        hdev->vhost_ops->vhost_backend_cleanup(hdev);
    }
1379
    assert(!hdev->log);
1380 1381

    memset(hdev, 0, sizeof(struct vhost_dev));
M
Michael S. Tsirkin 已提交
1382 1383
}

1384 1385 1386 1387 1388
/* Stop processing guest IO notifications in qemu.
 * Start processing them in vhost in kernel.
 */
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
K
KONRAD Frederic 已提交
1389
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1390
    int i, r, e;
1391

1392 1393 1394 1395 1396
    /* We will pass the notifiers to the kernel, make sure that QEMU
     * doesn't interfere.
     */
    r = virtio_device_grab_ioeventfd(vdev);
    if (r < 0) {
1397
        error_report("binding does not support host notifiers");
1398 1399 1400 1401
        goto fail;
    }

    for (i = 0; i < hdev->nvqs; ++i) {
1402 1403
        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         true);
1404
        if (r < 0) {
1405
            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1406 1407 1408 1409 1410 1411 1412
            goto fail_vq;
        }
    }

    return 0;
fail_vq:
    while (--i >= 0) {
1413 1414
        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         false);
1415
        if (e < 0) {
1416
            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1417
        }
1418
        assert (e >= 0);
1419
    }
1420
    virtio_device_release_ioeventfd(vdev);
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
fail:
    return r;
}

/* Stop processing guest IO notifications in vhost.
 * Start processing them in qemu.
 * This might actually run the qemu handlers right away,
 * so virtio in qemu must be completely setup when this is called.
 */
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
K
KONRAD Frederic 已提交
1432
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1433 1434 1435
    int i, r;

    for (i = 0; i < hdev->nvqs; ++i) {
1436 1437
        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         false);
1438
        if (r < 0) {
1439
            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1440 1441 1442
        }
        assert (r >= 0);
    }
1443
    virtio_device_release_ioeventfd(vdev);
1444 1445
}

1446 1447 1448 1449 1450
/* Test and clear event pending status.
 * Should be called after unmask to avoid losing events.
 */
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
{
J
Jason Wang 已提交
1451 1452
    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1453 1454 1455 1456 1457 1458 1459 1460
    return event_notifier_test_and_clear(&vq->masked_notifier);
}

/* Mask/unmask events from this vq. */
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                         bool mask)
{
    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
J
Jason Wang 已提交
1461
    int r, index = n - hdev->vq_index;
1462
    struct vhost_vring_file file;
1463

1464 1465 1466
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

1467
    if (mask) {
1468
        assert(vdev->use_guest_notifier_mask);
J
Jason Wang 已提交
1469
        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1470 1471 1472
    } else {
        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
    }
1473

1474 1475
    file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
    r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1476 1477 1478
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
    }
1479 1480
}

C
Cornelia Huck 已提交
1481 1482
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                            uint64_t features)
1483 1484 1485
{
    const int *bit = feature_bits;
    while (*bit != VHOST_INVALID_FEATURE_BIT) {
C
Cornelia Huck 已提交
1486
        uint64_t bit_mask = (1ULL << *bit);
1487 1488 1489 1490 1491 1492 1493 1494 1495
        if (!(hdev->features & bit_mask)) {
            features &= ~bit_mask;
        }
        bit++;
    }
    return features;
}

void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
C
Cornelia Huck 已提交
1496
                        uint64_t features)
1497 1498 1499
{
    const int *bit = feature_bits;
    while (*bit != VHOST_INVALID_FEATURE_BIT) {
C
Cornelia Huck 已提交
1500
        uint64_t bit_mask = (1ULL << *bit);
1501 1502 1503 1504 1505 1506 1507
        if (features & bit_mask) {
            hdev->acked_features |= bit_mask;
        }
        bit++;
    }
}

1508
/* Host notifiers must be enabled at this point. */
M
Michael S. Tsirkin 已提交
1509 1510 1511
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{
    int i, r;
1512

1513 1514 1515
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

1516
    hdev->started = true;
J
Jason Wang 已提交
1517
    hdev->vdev = vdev;
1518

M
Michael S. Tsirkin 已提交
1519 1520
    r = vhost_dev_set_features(hdev, hdev->log_enabled);
    if (r < 0) {
1521
        goto fail_features;
M
Michael S. Tsirkin 已提交
1522
    }
J
Jason Wang 已提交
1523 1524

    if (vhost_dev_has_iommu(hdev)) {
1525
        memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
J
Jason Wang 已提交
1526 1527
    }

1528
    r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
M
Michael S. Tsirkin 已提交
1529
    if (r < 0) {
1530
        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
M
Michael S. Tsirkin 已提交
1531
        r = -errno;
1532
        goto fail_mem;
M
Michael S. Tsirkin 已提交
1533
    }
1534
    for (i = 0; i < hdev->nvqs; ++i) {
1535
        r = vhost_virtqueue_start(hdev,
J
Jason Wang 已提交
1536 1537 1538
                                  vdev,
                                  hdev->vqs + i,
                                  hdev->vq_index + i);
1539 1540 1541 1542 1543
        if (r < 0) {
            goto fail_vq;
        }
    }

M
Michael S. Tsirkin 已提交
1544
    if (hdev->log_enabled) {
M
Michael S. Tsirkin 已提交
1545 1546
        uint64_t log_base;

M
Michael S. Tsirkin 已提交
1547
        hdev->log_size = vhost_get_log_size(hdev);
M
Marc-André Lureau 已提交
1548 1549
        hdev->log = vhost_log_get(hdev->log_size,
                                  vhost_dev_log_is_shared(hdev));
J
Jason Wang 已提交
1550
        log_base = (uintptr_t)hdev->log->log;
1551
        r = hdev->vhost_ops->vhost_set_log_base(hdev,
1552 1553
                                                hdev->log_size ? log_base : 0,
                                                hdev->log);
M
Michael S. Tsirkin 已提交
1554
        if (r < 0) {
1555
            VHOST_OPS_DEBUG("vhost_set_log_base failed");
M
Michael S. Tsirkin 已提交
1556
            r = -errno;
1557
            goto fail_log;
M
Michael S. Tsirkin 已提交
1558 1559
        }
    }
1560

J
Jason Wang 已提交
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
    if (vhost_dev_has_iommu(hdev)) {
        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);

        /* Update used ring information for IOTLB to work correctly,
         * vhost-kernel code requires for this.*/
        for (i = 0; i < hdev->nvqs; ++i) {
            struct vhost_virtqueue *vq = hdev->vqs + i;
            vhost_device_iotlb_miss(hdev, vq->used_phys, true);
        }
    }
M
Michael S. Tsirkin 已提交
1571
    return 0;
1572
fail_log:
1573
    vhost_log_put(hdev, false);
M
Michael S. Tsirkin 已提交
1574 1575
fail_vq:
    while (--i >= 0) {
1576
        vhost_virtqueue_stop(hdev,
J
Jason Wang 已提交
1577 1578 1579
                             vdev,
                             hdev->vqs + i,
                             hdev->vq_index + i);
M
Michael S. Tsirkin 已提交
1580
    }
J
Jason Wang 已提交
1581
    i = hdev->nvqs;
J
Jason Wang 已提交
1582

1583 1584
fail_mem:
fail_features:
1585 1586

    hdev->started = false;
M
Michael S. Tsirkin 已提交
1587 1588 1589
    return r;
}

1590
/* Host notifiers must be enabled at this point. */
M
Michael S. Tsirkin 已提交
1591 1592
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{
J
Jason Wang 已提交
1593
    int i;
1594

1595 1596 1597
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

M
Michael S. Tsirkin 已提交
1598
    for (i = 0; i < hdev->nvqs; ++i) {
1599
        vhost_virtqueue_stop(hdev,
J
Jason Wang 已提交
1600 1601 1602
                             vdev,
                             hdev->vqs + i,
                             hdev->vq_index + i);
M
Michael S. Tsirkin 已提交
1603
    }
1604

J
Jason Wang 已提交
1605 1606
    if (vhost_dev_has_iommu(hdev)) {
        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1607
        memory_listener_unregister(&hdev->iommu_listener);
J
Jason Wang 已提交
1608
    }
J
Jason Wang 已提交
1609
    vhost_log_put(hdev, true);
M
Michael S. Tsirkin 已提交
1610
    hdev->started = false;
J
Jason Wang 已提交
1611
    hdev->vdev = NULL;
M
Michael S. Tsirkin 已提交
1612
}
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622

int vhost_net_set_backend(struct vhost_dev *hdev,
                          struct vhost_vring_file *file)
{
    if (hdev->vhost_ops->vhost_net_set_backend) {
        return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
    }

    return -1;
}