vhost.c 49.0 KB
Newer Older
M
Michael S. Tsirkin 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * vhost support
 *
 * Copyright Red Hat, Inc. 2010
 *
 * Authors:
 *  Michael S. Tsirkin <mst@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
11 12 13
 *
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
M
Michael S. Tsirkin 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qapi/error.h"
P
Paolo Bonzini 已提交
18
#include "hw/virtio/vhost.h"
M
Michael S. Tsirkin 已提交
19
#include "hw/hw.h"
20
#include "qemu/atomic.h"
21
#include "qemu/range.h"
22
#include "qemu/error-report.h"
M
Marc-André Lureau 已提交
23
#include "qemu/memfd.h"
24
#include <linux/vhost.h>
25
#include "exec/address-spaces.h"
K
KONRAD Frederic 已提交
26
#include "hw/virtio/virtio-bus.h"
27
#include "hw/virtio/virtio-access.h"
28
#include "migration/blocker.h"
J
Jason Wang 已提交
29
#include "sysemu/dma.h"
M
Michael S. Tsirkin 已提交
30

31 32 33 34 35 36 37 38 39 40 41 42
/* enabled until disconnected backend stabilizes */
#define _VHOST_DEBUG 1

#ifdef _VHOST_DEBUG
#define VHOST_OPS_DEBUG(fmt, ...) \
    do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
                      strerror(errno), errno); } while (0)
#else
#define VHOST_OPS_DEBUG(fmt, ...) \
    do { } while (0)
#endif

J
Jason Wang 已提交
43
static struct vhost_log *vhost_log;
M
Marc-André Lureau 已提交
44
static struct vhost_log *vhost_log_shm;
J
Jason Wang 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
static unsigned int used_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
    QLIST_HEAD_INITIALIZER(vhost_devices);

bool vhost_has_free_slot(void)
{
    unsigned int slots_limit = ~0U;
    struct vhost_dev *hdev;

    QLIST_FOREACH(hdev, &vhost_devices, entry) {
        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
        slots_limit = MIN(slots_limit, r);
    }
    return slots_limit > used_memslots;
}

M
Michael S. Tsirkin 已提交
62
static void vhost_dev_sync_region(struct vhost_dev *dev,
63
                                  MemoryRegionSection *section,
M
Michael S. Tsirkin 已提交
64 65 66
                                  uint64_t mfirst, uint64_t mlast,
                                  uint64_t rfirst, uint64_t rlast)
{
J
Jason Wang 已提交
67 68
    vhost_log_chunk_t *log = dev->log->log;

M
Michael S. Tsirkin 已提交
69 70
    uint64_t start = MAX(mfirst, rfirst);
    uint64_t end = MIN(mlast, rlast);
J
Jason Wang 已提交
71 72
    vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
    vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
M
Marc-André Lureau 已提交
73
    uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
M
Michael S. Tsirkin 已提交
74 75 76 77

    if (end < start) {
        return;
    }
78
    assert(end / VHOST_LOG_CHUNK < dev->log_size);
79
    assert(start / VHOST_LOG_CHUNK < dev->log_size);
80

M
Michael S. Tsirkin 已提交
81 82 83 84 85
    for (;from < to; ++from) {
        vhost_log_chunk_t log;
        /* We first check with non-atomic: much cheaper,
         * and we expect non-dirty to be the common case. */
        if (!*from) {
86
            addr += VHOST_LOG_CHUNK;
M
Michael S. Tsirkin 已提交
87 88
            continue;
        }
89 90 91
        /* Data must be read atomically. We don't really need barrier semantics
         * but it's easier to use atomic_* than roll our own. */
        log = atomic_xchg(from, 0);
N
Natanael Copa 已提交
92 93
        while (log) {
            int bit = ctzl(log);
M
Michael S. Tsirkin 已提交
94 95 96 97 98 99 100
            hwaddr page_addr;
            hwaddr section_offset;
            hwaddr mr_offset;
            page_addr = addr + bit * VHOST_LOG_PAGE;
            section_offset = page_addr - section->offset_within_address_space;
            mr_offset = section_offset + section->offset_within_region;
            memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
M
Michael S. Tsirkin 已提交
101 102 103 104 105 106
            log &= ~(0x1ull << bit);
        }
        addr += VHOST_LOG_CHUNK;
    }
}

A
Avi Kivity 已提交
107
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
108
                                   MemoryRegionSection *section,
M
Michael S. Tsirkin 已提交
109 110
                                   hwaddr first,
                                   hwaddr last)
M
Michael S. Tsirkin 已提交
111 112
{
    int i;
M
Michael S. Tsirkin 已提交
113 114
    hwaddr start_addr;
    hwaddr end_addr;
A
Avi Kivity 已提交
115

M
Michael S. Tsirkin 已提交
116 117 118
    if (!dev->log_enabled || !dev->started) {
        return 0;
    }
M
Michael S. Tsirkin 已提交
119
    start_addr = section->offset_within_address_space;
120
    end_addr = range_get_last(start_addr, int128_get64(section->size));
M
Michael S. Tsirkin 已提交
121 122 123
    start_addr = MAX(first, start_addr);
    end_addr = MIN(last, end_addr);

M
Michael S. Tsirkin 已提交
124 125
    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
126
        vhost_dev_sync_region(dev, section, start_addr, end_addr,
M
Michael S. Tsirkin 已提交
127 128 129 130 131 132
                              reg->guest_phys_addr,
                              range_get_last(reg->guest_phys_addr,
                                             reg->memory_size));
    }
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
133
        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
M
Michael S. Tsirkin 已提交
134 135 136 137 138
                              range_get_last(vq->used_phys, vq->used_size));
    }
    return 0;
}

A
Avi Kivity 已提交
139 140 141 142 143
static void vhost_log_sync(MemoryListener *listener,
                          MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
M
Michael S. Tsirkin 已提交
144 145
    vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
}
A
Avi Kivity 已提交
146

M
Michael S. Tsirkin 已提交
147 148 149 150 151 152 153 154 155
static void vhost_log_sync_range(struct vhost_dev *dev,
                                 hwaddr first, hwaddr last)
{
    int i;
    /* FIXME: this is N^2 in number of sections */
    for (i = 0; i < dev->n_mem_sections; ++i) {
        MemoryRegionSection *section = &dev->mem_sections[i];
        vhost_sync_dirty_bitmap(dev, section, first, last);
    }
A
Avi Kivity 已提交
156 157
}

M
Michael S. Tsirkin 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
/* Assign/unassign. Keep an unsorted array of non-overlapping
 * memory regions in dev->mem. */
static void vhost_dev_unassign_memory(struct vhost_dev *dev,
                                      uint64_t start_addr,
                                      uint64_t size)
{
    int from, to, n = dev->mem->nregions;
    /* Track overlapping/split regions for sanity checking. */
    int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;

    for (from = 0, to = 0; from < n; ++from, ++to) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        uint64_t reglast;
        uint64_t memlast;
        uint64_t change;

        /* clone old region */
        if (to != from) {
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
        }

        /* No overlap is simple */
        if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
                            start_addr, size)) {
            continue;
        }

        /* Split only happens if supplied region
         * is in the middle of an existing one. Thus it can not
         * overlap with any other existing region. */
        assert(!split);

        reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
        memlast = range_get_last(start_addr, size);

        /* Remove whole region */
        if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
            --dev->mem->nregions;
            --to;
            ++overlap_middle;
            continue;
        }

        /* Shrink region */
        if (memlast >= reglast) {
            reg->memory_size = start_addr - reg->guest_phys_addr;
            assert(reg->memory_size);
            assert(!overlap_end);
            ++overlap_end;
            continue;
        }

        /* Shift region */
        if (start_addr <= reg->guest_phys_addr) {
            change = memlast + 1 - reg->guest_phys_addr;
            reg->memory_size -= change;
            reg->guest_phys_addr += change;
            reg->userspace_addr += change;
            assert(reg->memory_size);
            assert(!overlap_start);
            ++overlap_start;
            continue;
        }

        /* This only happens if supplied region
         * is in the middle of an existing one. Thus it can not
         * overlap with any other existing region. */
        assert(!overlap_start);
        assert(!overlap_end);
        assert(!overlap_middle);
        /* Split region: shrink first part, shift second part. */
        memcpy(dev->mem->regions + n, reg, sizeof *reg);
        reg->memory_size = start_addr - reg->guest_phys_addr;
        assert(reg->memory_size);
        change = memlast + 1 - reg->guest_phys_addr;
        reg = dev->mem->regions + n;
        reg->memory_size -= change;
        assert(reg->memory_size);
        reg->guest_phys_addr += change;
        reg->userspace_addr += change;
        /* Never add more than 1 region */
        assert(dev->mem->nregions == n);
        ++dev->mem->nregions;
        ++split;
    }
}

/* Called after unassign, so no regions overlap the given range. */
static void vhost_dev_assign_memory(struct vhost_dev *dev,
                                    uint64_t start_addr,
                                    uint64_t size,
                                    uint64_t uaddr)
{
    int from, to;
    struct vhost_memory_region *merged = NULL;
    for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        uint64_t prlast, urlast;
        uint64_t pmlast, umlast;
        uint64_t s, e, u;

        /* clone old region */
        if (to != from) {
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
        }
        prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
        pmlast = range_get_last(start_addr, size);
        urlast = range_get_last(reg->userspace_addr, reg->memory_size);
        umlast = range_get_last(uaddr, size);

        /* check for overlapping regions: should never happen. */
        assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
        /* Not an adjacent or overlapping region - do not merge. */
        if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
            (pmlast + 1 != reg->guest_phys_addr ||
             umlast + 1 != reg->userspace_addr)) {
            continue;
        }

277 278 279 280 281 282 283
        if (dev->vhost_ops->vhost_backend_can_merge &&
            !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
                                                     reg->userspace_addr,
                                                     reg->memory_size)) {
            continue;
        }

M
Michael S. Tsirkin 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
        if (merged) {
            --to;
            assert(to >= 0);
        } else {
            merged = reg;
        }
        u = MIN(uaddr, reg->userspace_addr);
        s = MIN(start_addr, reg->guest_phys_addr);
        e = MAX(pmlast, prlast);
        uaddr = merged->userspace_addr = u;
        start_addr = merged->guest_phys_addr = s;
        size = merged->memory_size = e - s + 1;
        assert(merged->memory_size);
    }

    if (!merged) {
        struct vhost_memory_region *reg = dev->mem->regions + to;
        memset(reg, 0, sizeof *reg);
        reg->memory_size = size;
        assert(reg->memory_size);
        reg->guest_phys_addr = start_addr;
        reg->userspace_addr = uaddr;
        ++to;
    }
    assert(to <= dev->mem->nregions + 1);
    dev->mem->nregions = to;
}

static uint64_t vhost_get_log_size(struct vhost_dev *dev)
{
    uint64_t log_size = 0;
    int i;
    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
        uint64_t last = range_get_last(reg->guest_phys_addr,
                                       reg->memory_size);
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
    }
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
        uint64_t last = vq->used_phys + vq->used_size - 1;
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
    }
    return log_size;
}
M
Marc-André Lureau 已提交
329 330

static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
J
Jason Wang 已提交
331
{
M
Marc-André Lureau 已提交
332 333 334 335 336 337 338 339 340 341 342 343 344
    struct vhost_log *log;
    uint64_t logsize = size * sizeof(*(log->log));
    int fd = -1;

    log = g_new0(struct vhost_log, 1);
    if (share) {
        log->log = qemu_memfd_alloc("vhost-log", logsize,
                                    F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
                                    &fd);
        memset(log->log, 0, logsize);
    } else {
        log->log = g_malloc0(logsize);
    }
J
Jason Wang 已提交
345 346 347

    log->size = size;
    log->refcnt = 1;
M
Marc-André Lureau 已提交
348
    log->fd = fd;
J
Jason Wang 已提交
349 350 351 352

    return log;
}

M
Marc-André Lureau 已提交
353
static struct vhost_log *vhost_log_get(uint64_t size, bool share)
J
Jason Wang 已提交
354
{
M
Marc-André Lureau 已提交
355 356 357 358 359 360 361 362 363
    struct vhost_log *log = share ? vhost_log_shm : vhost_log;

    if (!log || log->size != size) {
        log = vhost_log_alloc(size, share);
        if (share) {
            vhost_log_shm = log;
        } else {
            vhost_log = log;
        }
J
Jason Wang 已提交
364
    } else {
M
Marc-André Lureau 已提交
365
        ++log->refcnt;
J
Jason Wang 已提交
366 367
    }

M
Marc-André Lureau 已提交
368
    return log;
J
Jason Wang 已提交
369 370 371 372 373 374 375 376 377
}

static void vhost_log_put(struct vhost_dev *dev, bool sync)
{
    struct vhost_log *log = dev->log;

    if (!log) {
        return;
    }
378 379
    dev->log = NULL;
    dev->log_size = 0;
J
Jason Wang 已提交
380 381 382 383 384 385 386

    --log->refcnt;
    if (log->refcnt == 0) {
        /* Sync only the range covered by the old log */
        if (dev->log_size && sync) {
            vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
        }
M
Marc-André Lureau 已提交
387

J
Jason Wang 已提交
388
        if (vhost_log == log) {
M
Marc-André Lureau 已提交
389
            g_free(log->log);
J
Jason Wang 已提交
390
            vhost_log = NULL;
M
Marc-André Lureau 已提交
391 392 393 394
        } else if (vhost_log_shm == log) {
            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
                            log->fd);
            vhost_log_shm = NULL;
J
Jason Wang 已提交
395
        }
M
Marc-André Lureau 已提交
396

J
Jason Wang 已提交
397 398 399
        g_free(log);
    }
}
M
Michael S. Tsirkin 已提交
400

M
Marc-André Lureau 已提交
401 402 403 404 405 406 407
static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
{
    return dev->vhost_ops->vhost_requires_shm_log &&
           dev->vhost_ops->vhost_requires_shm_log(dev);
}

static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
M
Michael S. Tsirkin 已提交
408
{
M
Marc-André Lureau 已提交
409
    struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
J
Jason Wang 已提交
410
    uint64_t log_base = (uintptr_t)log->log;
M
Michael S. Tsirkin 已提交
411
    int r;
412

M
Marc-André Lureau 已提交
413 414
    /* inform backend of log switching, this must be done before
       releasing the current log, to ensure no logging is lost */
415
    r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
416 417 418 419
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_log_base failed");
    }

J
Jason Wang 已提交
420
    vhost_log_put(dev, true);
M
Michael S. Tsirkin 已提交
421 422 423 424
    dev->log = log;
    dev->log_size = size;
}

J
Jason Wang 已提交
425 426 427 428
static int vhost_dev_has_iommu(struct vhost_dev *dev)
{
    VirtIODevice *vdev = dev->vdev;

429
    return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
J
Jason Wang 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
}

static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
                              hwaddr *plen, int is_write)
{
    if (!vhost_dev_has_iommu(dev)) {
        return cpu_physical_memory_map(addr, plen, is_write);
    } else {
        return (void *)(uintptr_t)addr;
    }
}

static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
                               hwaddr len, int is_write,
                               hwaddr access_len)
{
    if (!vhost_dev_has_iommu(dev)) {
        cpu_physical_memory_unmap(buffer, len, is_write, access_len);
    }
}
450

J
Jason Wang 已提交
451 452
static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
                                          void *part,
453 454 455 456 457 458 459 460 461 462 463 464 465
                                          uint64_t part_addr,
                                          uint64_t part_size,
                                          uint64_t start_addr,
                                          uint64_t size)
{
    hwaddr l;
    void *p;
    int r = 0;

    if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
        return 0;
    }
    l = part_size;
J
Jason Wang 已提交
466
    p = vhost_memory_map(dev, part_addr, &l, 1);
467 468 469 470 471 472
    if (!p || l != part_size) {
        r = -ENOMEM;
    }
    if (p != part) {
        r = -EBUSY;
    }
J
Jason Wang 已提交
473
    vhost_memory_unmap(dev, p, l, 0, 0);
474 475 476
    return r;
}

M
Michael S. Tsirkin 已提交
477 478 479 480
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
                                      uint64_t start_addr,
                                      uint64_t size)
{
481
    int i, j;
482
    int r = 0;
483 484 485 486 487
    const char *part_name[] = {
        "descriptor table",
        "available ring",
        "used ring"
    };
488

489
    for (i = 0; i < dev->nvqs; ++i) {
M
Michael S. Tsirkin 已提交
490 491
        struct vhost_virtqueue *vq = dev->vqs + i;

492
        j = 0;
J
Jason Wang 已提交
493
        r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
494 495 496
                                           vq->desc_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
497
        }
498 499

        j++;
J
Jason Wang 已提交
500
        r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
501 502 503
                                           vq->avail_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
504
        }
505 506

        j++;
J
Jason Wang 已提交
507
        r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
508 509 510
                                           vq->used_size, start_addr, size);
        if (!r) {
            break;
M
Michael S. Tsirkin 已提交
511
        }
512 513 514 515 516 517
    }

    if (r == -ENOMEM) {
        error_report("Unable to map %s for ring %d", part_name[j], i);
    } else if (r == -EBUSY) {
        error_report("%s relocated for ring %d", part_name[j], i);
M
Michael S. Tsirkin 已提交
518
    }
519
    return r;
M
Michael S. Tsirkin 已提交
520 521
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
						      uint64_t start_addr,
						      uint64_t size)
{
    int i, n = dev->mem->nregions;
    for (i = 0; i < n; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
        if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
                           start_addr, size)) {
            return reg;
        }
    }
    return NULL;
}

static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
                                 uint64_t start_addr,
                                 uint64_t size,
                                 uint64_t uaddr)
{
    struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
    uint64_t reglast;
    uint64_t memlast;

    if (!reg) {
        return true;
    }

    reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
    memlast = range_get_last(start_addr, size);

    /* Need to extend region? */
    if (start_addr < reg->guest_phys_addr || memlast > reglast) {
        return true;
    }
    /* userspace_addr changed? */
    return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
}

A
Avi Kivity 已提交
561 562 563
static void vhost_set_memory(MemoryListener *listener,
                             MemoryRegionSection *section,
                             bool add)
M
Michael S. Tsirkin 已提交
564
{
A
Avi Kivity 已提交
565 566
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
A
Avi Kivity 已提交
567
    hwaddr start_addr = section->offset_within_address_space;
568
    ram_addr_t size = int128_get64(section->size);
569 570
    bool log_dirty =
        memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
M
Michael S. Tsirkin 已提交
571 572
    int s = offsetof(struct vhost_memory, regions) +
        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
A
Avi Kivity 已提交
573 574
    void *ram;

575
    dev->mem = g_realloc(dev->mem, s);
M
Michael S. Tsirkin 已提交
576

577
    if (log_dirty) {
A
Avi Kivity 已提交
578
        add = false;
579 580
    }

M
Michael S. Tsirkin 已提交
581 582
    assert(size);

583
    /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
584
    ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
A
Avi Kivity 已提交
585 586
    if (add) {
        if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
587 588 589 590 591 592 593 594 595 596
            /* Region exists with same address. Nothing to do. */
            return;
        }
    } else {
        if (!vhost_dev_find_reg(dev, start_addr, size)) {
            /* Removing region that we don't access. Nothing to do. */
            return;
        }
    }

M
Michael S. Tsirkin 已提交
597
    vhost_dev_unassign_memory(dev, start_addr, size);
A
Avi Kivity 已提交
598
    if (add) {
M
Michael S. Tsirkin 已提交
599
        /* Add given mapping, merging adjacent regions if any */
A
Avi Kivity 已提交
600
        vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
M
Michael S. Tsirkin 已提交
601 602 603 604
    } else {
        /* Remove old mapping for this memory, if any. */
        vhost_dev_unassign_memory(dev, start_addr, size);
    }
605 606 607
    dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
    dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
    dev->memory_changed = true;
608
    used_memslots = dev->mem->nregions;
609 610 611 612
}

static bool vhost_section(MemoryRegionSection *section)
{
M
Michael S. Tsirkin 已提交
613 614
    return memory_region_is_ram(section->mr) &&
        !memory_region_is_rom(section->mr);
615 616 617 618 619 620 621 622 623
}

static void vhost_begin(MemoryListener *listener)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    dev->mem_changed_end_addr = 0;
    dev->mem_changed_start_addr = -1;
}
M
Michael S. Tsirkin 已提交
624

625 626 627 628 629 630 631 632 633 634 635 636
static void vhost_commit(MemoryListener *listener)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    hwaddr start_addr = 0;
    ram_addr_t size = 0;
    uint64_t log_size;
    int r;

    if (!dev->memory_changed) {
        return;
    }
M
Michael S. Tsirkin 已提交
637 638 639
    if (!dev->started) {
        return;
    }
640 641 642
    if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
        return;
    }
M
Michael S. Tsirkin 已提交
643 644

    if (dev->started) {
645 646 647
        start_addr = dev->mem_changed_start_addr;
        size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;

M
Michael S. Tsirkin 已提交
648 649 650 651 652
        r = vhost_verify_ring_mappings(dev, start_addr, size);
        assert(r >= 0);
    }

    if (!dev->log_enabled) {
653
        r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
654 655 656
        if (r < 0) {
            VHOST_OPS_DEBUG("vhost_set_mem_table failed");
        }
657
        dev->memory_changed = false;
M
Michael S. Tsirkin 已提交
658 659 660 661 662 663 664 665 666 667
        return;
    }
    log_size = vhost_get_log_size(dev);
    /* We allocate an extra 4K bytes to log,
     * to reduce the * number of reallocations. */
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
    /* To log more, must increase log size before table update. */
    if (dev->log_size < log_size) {
        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
    }
668
    r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
669 670 671
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
    }
M
Michael S. Tsirkin 已提交
672 673 674 675
    /* To log less, can only decrease log size after table update. */
    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
        vhost_dev_log_resize(dev, log_size);
    }
676
    dev->memory_changed = false;
677 678
}

A
Avi Kivity 已提交
679 680 681
static void vhost_region_add(MemoryListener *listener,
                             MemoryRegionSection *section)
{
682 683 684
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);

A
Avi Kivity 已提交
685 686 687 688
    if (!vhost_section(section)) {
        return;
    }

689 690 691 692
    ++dev->n_mem_sections;
    dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
                                dev->n_mem_sections);
    dev->mem_sections[dev->n_mem_sections - 1] = *section;
P
Paolo Bonzini 已提交
693
    memory_region_ref(section->mr);
A
Avi Kivity 已提交
694 695 696 697 698 699
    vhost_set_memory(listener, section, true);
}

static void vhost_region_del(MemoryListener *listener,
                             MemoryRegionSection *section)
{
700 701 702 703
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
    int i;

A
Avi Kivity 已提交
704 705 706 707
    if (!vhost_section(section)) {
        return;
    }

A
Avi Kivity 已提交
708
    vhost_set_memory(listener, section, false);
P
Paolo Bonzini 已提交
709
    memory_region_unref(section->mr);
710 711 712 713 714
    for (i = 0; i < dev->n_mem_sections; ++i) {
        if (dev->mem_sections[i].offset_within_address_space
            == section->offset_within_address_space) {
            --dev->n_mem_sections;
            memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
715
                    (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
716 717 718
            break;
        }
    }
A
Avi Kivity 已提交
719 720
}

721 722 723 724 725 726
static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
    struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
    struct vhost_dev *hdev = iommu->hdev;
    hwaddr iova = iotlb->iova + iommu->iommu_offset;

M
Maxime Coquelin 已提交
727 728
    if (vhost_backend_invalidate_device_iotlb(hdev, iova,
                                              iotlb->addr_mask + 1)) {
729 730 731 732 733 734 735 736 737 738
        error_report("Fail to invalidate device iotlb");
    }
}

static void vhost_iommu_region_add(MemoryListener *listener,
                                   MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         iommu_listener);
    struct vhost_iommu *iommu;
739
    Int128 end;
740 741 742 743 744 745

    if (!memory_region_is_iommu(section->mr)) {
        return;
    }

    iommu = g_malloc0(sizeof(*iommu));
746 747 748 749 750 751 752
    end = int128_add(int128_make64(section->offset_within_region),
                     section->size);
    end = int128_sub(end, int128_one());
    iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
                        IOMMU_NOTIFIER_UNMAP,
                        section->offset_within_region,
                        int128_get64(end));
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
    iommu->mr = section->mr;
    iommu->iommu_offset = section->offset_within_address_space -
                          section->offset_within_region;
    iommu->hdev = dev;
    memory_region_register_iommu_notifier(section->mr, &iommu->n);
    QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
    /* TODO: can replay help performance here? */
}

static void vhost_iommu_region_del(MemoryListener *listener,
                                   MemoryRegionSection *section)
{
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         iommu_listener);
    struct vhost_iommu *iommu;

    if (!memory_region_is_iommu(section->mr)) {
        return;
    }

    QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
774 775
        if (iommu->mr == section->mr &&
            iommu->n.start == section->offset_within_region) {
776 777 778 779 780 781 782 783 784
            memory_region_unregister_iommu_notifier(iommu->mr,
                                                    &iommu->n);
            QLIST_REMOVE(iommu, iommu_next);
            g_free(iommu);
            break;
        }
    }
}

785 786 787 788 789
static void vhost_region_nop(MemoryListener *listener,
                             MemoryRegionSection *section)
{
}

M
Michael S. Tsirkin 已提交
790 791 792 793 794 795
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx, bool enable_log)
{
    struct vhost_vring_addr addr = {
        .index = idx,
796 797 798
        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
        .used_user_addr = (uint64_t)(unsigned long)vq->used,
M
Michael S. Tsirkin 已提交
799 800 801
        .log_guest_addr = vq->used_phys,
        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
    };
802
    int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
M
Michael S. Tsirkin 已提交
803
    if (r < 0) {
804
        VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
M
Michael S. Tsirkin 已提交
805 806 807 808 809
        return -errno;
    }
    return 0;
}

J
Jason Wang 已提交
810 811
static int vhost_dev_set_features(struct vhost_dev *dev,
                                  bool enable_log)
M
Michael S. Tsirkin 已提交
812 813 814 815
{
    uint64_t features = dev->acked_features;
    int r;
    if (enable_log) {
C
Cornelia Huck 已提交
816
        features |= 0x1ULL << VHOST_F_LOG_ALL;
M
Michael S. Tsirkin 已提交
817
    }
818
    r = dev->vhost_ops->vhost_set_features(dev, features);
819 820 821
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_features failed");
    }
M
Michael S. Tsirkin 已提交
822 823 824 825 826
    return r < 0 ? -errno : 0;
}

static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
{
827
    int r, i, idx;
M
Michael S. Tsirkin 已提交
828 829 830 831 832
    r = vhost_dev_set_features(dev, enable_log);
    if (r < 0) {
        goto err_features;
    }
    for (i = 0; i < dev->nvqs; ++i) {
833 834
        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
M
Michael S. Tsirkin 已提交
835 836 837 838 839 840 841 842
                                     enable_log);
        if (r < 0) {
            goto err_vq;
        }
    }
    return 0;
err_vq:
    for (; i >= 0; --i) {
843
        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
844 845
        vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
                                 dev->log_enabled);
M
Michael S. Tsirkin 已提交
846
    }
847
    vhost_dev_set_features(dev, dev->log_enabled);
M
Michael S. Tsirkin 已提交
848 849 850 851
err_features:
    return r;
}

A
Avi Kivity 已提交
852
static int vhost_migration_log(MemoryListener *listener, int enable)
M
Michael S. Tsirkin 已提交
853
{
A
Avi Kivity 已提交
854 855
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
                                         memory_listener);
M
Michael S. Tsirkin 已提交
856 857 858 859 860 861 862 863 864 865 866 867 868
    int r;
    if (!!enable == dev->log_enabled) {
        return 0;
    }
    if (!dev->started) {
        dev->log_enabled = enable;
        return 0;
    }
    if (!enable) {
        r = vhost_dev_set_log(dev, false);
        if (r < 0) {
            return r;
        }
J
Jason Wang 已提交
869
        vhost_log_put(dev, false);
M
Michael S. Tsirkin 已提交
870 871 872 873 874 875 876 877 878 879 880
    } else {
        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
        r = vhost_dev_set_log(dev, true);
        if (r < 0) {
            return r;
        }
    }
    dev->log_enabled = enable;
    return 0;
}

A
Avi Kivity 已提交
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
static void vhost_log_global_start(MemoryListener *listener)
{
    int r;

    r = vhost_migration_log(listener, true);
    if (r < 0) {
        abort();
    }
}

static void vhost_log_global_stop(MemoryListener *listener)
{
    int r;

    r = vhost_migration_log(listener, false);
    if (r < 0) {
        abort();
    }
}

static void vhost_log_start(MemoryListener *listener,
902 903
                            MemoryRegionSection *section,
                            int old, int new)
A
Avi Kivity 已提交
904 905 906 907 908
{
    /* FIXME: implement */
}

static void vhost_log_stop(MemoryListener *listener,
909 910
                           MemoryRegionSection *section,
                           int old, int new)
A
Avi Kivity 已提交
911 912 913 914
{
    /* FIXME: implement */
}

915 916 917 918 919
/* The vhost driver natively knows how to handle the vrings of non
 * cross-endian legacy devices and modern devices. Only legacy devices
 * exposed to a bi-endian guest may require the vhost driver to use a
 * specific endianness.
 */
920 921
static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
{
922 923 924
    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
        return false;
    }
925
#ifdef HOST_WORDS_BIGENDIAN
926
    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
927
#else
928
    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
929 930 931
#endif
}

932 933 934 935 936 937 938 939 940
static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
                                                   bool is_big_endian,
                                                   int vhost_vq_index)
{
    struct vhost_vring_state s = {
        .index = vhost_vq_index,
        .num = is_big_endian
    };

941
    if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
942 943 944
        return 0;
    }

945
    VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
946 947 948 949 950 951 952 953
    if (errno == ENOTTY) {
        error_report("vhost does not support cross-endian");
        return -ENOSYS;
    }

    return -errno;
}

J
Jason Wang 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
static int vhost_memory_region_lookup(struct vhost_dev *hdev,
                                      uint64_t gpa, uint64_t *uaddr,
                                      uint64_t *len)
{
    int i;

    for (i = 0; i < hdev->mem->nregions; i++) {
        struct vhost_memory_region *reg = hdev->mem->regions + i;

        if (gpa >= reg->guest_phys_addr &&
            reg->guest_phys_addr + reg->memory_size > gpa) {
            *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
            *len = reg->guest_phys_addr + reg->memory_size - gpa;
            return 0;
        }
    }

    return -EFAULT;
}

974
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
J
Jason Wang 已提交
975 976 977
{
    IOMMUTLBEntry iotlb;
    uint64_t uaddr, len;
978
    int ret = -EFAULT;
J
Jason Wang 已提交
979 980 981 982 983 984

    rcu_read_lock();

    iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
                                          iova, write);
    if (iotlb.target_as != NULL) {
985 986 987
        ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
                                         &uaddr, &len);
        if (ret) {
J
Jason Wang 已提交
988 989 990 991 992 993 994 995
            error_report("Fail to lookup the translated address "
                         "%"PRIx64, iotlb.translated_addr);
            goto out;
        }

        len = MIN(iotlb.addr_mask + 1, len);
        iova = iova & ~iotlb.addr_mask;

M
Maxime Coquelin 已提交
996 997
        ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
                                                len, iotlb.perm);
998
        if (ret) {
J
Jason Wang 已提交
999 1000 1001 1002 1003 1004
            error_report("Fail to update device iotlb");
            goto out;
        }
    }
out:
    rcu_read_unlock();
1005 1006

    return ret;
J
Jason Wang 已提交
1007 1008
}

1009
static int vhost_virtqueue_start(struct vhost_dev *dev,
M
Michael S. Tsirkin 已提交
1010 1011 1012 1013
                                struct VirtIODevice *vdev,
                                struct vhost_virtqueue *vq,
                                unsigned idx)
{
1014 1015 1016
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
A
Avi Kivity 已提交
1017
    hwaddr s, l, a;
M
Michael S. Tsirkin 已提交
1018
    int r;
1019
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
M
Michael S. Tsirkin 已提交
1020
    struct vhost_vring_file file = {
J
Jason Wang 已提交
1021
        .index = vhost_vq_index
M
Michael S. Tsirkin 已提交
1022 1023
    };
    struct vhost_vring_state state = {
J
Jason Wang 已提交
1024
        .index = vhost_vq_index
M
Michael S. Tsirkin 已提交
1025 1026 1027
    };
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);

J
Jason Wang 已提交
1028

M
Michael S. Tsirkin 已提交
1029
    vq->num = state.num = virtio_queue_get_num(vdev, idx);
1030
    r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
M
Michael S. Tsirkin 已提交
1031
    if (r) {
1032
        VHOST_OPS_DEBUG("vhost_set_vring_num failed");
M
Michael S. Tsirkin 已提交
1033 1034 1035 1036
        return -errno;
    }

    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1037
    r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
M
Michael S. Tsirkin 已提交
1038
    if (r) {
1039
        VHOST_OPS_DEBUG("vhost_set_vring_base failed");
M
Michael S. Tsirkin 已提交
1040 1041 1042
        return -errno;
    }

1043
    if (vhost_needs_vring_endian(vdev)) {
1044 1045 1046 1047 1048 1049 1050 1051
        r = vhost_virtqueue_set_vring_endian_legacy(dev,
                                                    virtio_is_big_endian(vdev),
                                                    vhost_vq_index);
        if (r) {
            return -errno;
        }
    }

1052 1053
    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
    vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
J
Jason Wang 已提交
1054
    vq->desc = vhost_memory_map(dev, a, &l, 0);
M
Michael S. Tsirkin 已提交
1055 1056 1057 1058
    if (!vq->desc || l != s) {
        r = -ENOMEM;
        goto fail_alloc_desc;
    }
1059 1060
    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
    vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
J
Jason Wang 已提交
1061
    vq->avail = vhost_memory_map(dev, a, &l, 0);
M
Michael S. Tsirkin 已提交
1062 1063 1064 1065 1066 1067
    if (!vq->avail || l != s) {
        r = -ENOMEM;
        goto fail_alloc_avail;
    }
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
J
Jason Wang 已提交
1068
    vq->used = vhost_memory_map(dev, a, &l, 1);
M
Michael S. Tsirkin 已提交
1069 1070 1071 1072 1073
    if (!vq->used || l != s) {
        r = -ENOMEM;
        goto fail_alloc_used;
    }

J
Jason Wang 已提交
1074
    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
M
Michael S. Tsirkin 已提交
1075 1076 1077 1078
    if (r < 0) {
        r = -errno;
        goto fail_alloc;
    }
J
Jason Wang 已提交
1079

M
Michael S. Tsirkin 已提交
1080
    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1081
    r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
M
Michael S. Tsirkin 已提交
1082
    if (r) {
1083
        VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
M
Michael S. Tsirkin 已提交
1084
        r = -errno;
M
Michael S. Tsirkin 已提交
1085 1086 1087
        goto fail_kick;
    }

1088 1089
    /* Clear and discard previous events if any. */
    event_notifier_test_and_clear(&vq->masked_notifier);
M
Michael S. Tsirkin 已提交
1090

1091 1092 1093 1094 1095 1096 1097 1098
    /* Init vring in unmasked state, unless guest_notifier_mask
     * will do it later.
     */
    if (!vdev->use_guest_notifier_mask) {
        /* TODO: check and handle errors. */
        vhost_virtqueue_mask(dev, vdev, idx, false);
    }

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
    if (k->query_guest_notifiers &&
        k->query_guest_notifiers(qbus->parent) &&
        virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
        file.fd = -1;
        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
        if (r) {
            goto fail_vector;
        }
    }

M
Michael S. Tsirkin 已提交
1109 1110
    return 0;

1111
fail_vector:
M
Michael S. Tsirkin 已提交
1112 1113
fail_kick:
fail_alloc:
J
Jason Wang 已提交
1114 1115
    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
1116
fail_alloc_used:
J
Jason Wang 已提交
1117 1118
    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
1119
fail_alloc_avail:
J
Jason Wang 已提交
1120 1121
    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
                       0, 0);
M
Michael S. Tsirkin 已提交
1122 1123 1124 1125
fail_alloc_desc:
    return r;
}

1126
static void vhost_virtqueue_stop(struct vhost_dev *dev,
M
Michael S. Tsirkin 已提交
1127 1128 1129 1130
                                    struct VirtIODevice *vdev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx)
{
1131
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
M
Michael S. Tsirkin 已提交
1132
    struct vhost_vring_state state = {
1133
        .index = vhost_vq_index,
M
Michael S. Tsirkin 已提交
1134 1135
    };
    int r;
1136

1137
    r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
M
Michael S. Tsirkin 已提交
1138
    if (r < 0) {
1139
        VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1140 1141
    } else {
        virtio_queue_set_last_avail_idx(vdev, idx, state.num);
M
Michael S. Tsirkin 已提交
1142
    }
1143
    virtio_queue_invalidate_signalled_used(vdev, idx);
1144
    virtio_queue_update_used_idx(vdev, idx);
1145 1146 1147 1148

    /* In the cross-endian case, we need to reset the vring endianness to
     * native as legacy devices expect so by default.
     */
1149
    if (vhost_needs_vring_endian(vdev)) {
1150 1151 1152
        vhost_virtqueue_set_vring_endian_legacy(dev,
                                                !virtio_is_big_endian(vdev),
                                                vhost_vq_index);
1153 1154
    }

J
Jason Wang 已提交
1155 1156 1157 1158 1159 1160
    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
                       1, virtio_queue_get_used_size(vdev, idx));
    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
                       0, virtio_queue_get_avail_size(vdev, idx));
    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
                       0, virtio_queue_get_desc_size(vdev, idx));
M
Michael S. Tsirkin 已提交
1161 1162
}

1163 1164
static void vhost_eventfd_add(MemoryListener *listener,
                              MemoryRegionSection *section,
1165
                              bool match_data, uint64_t data, EventNotifier *e)
1166 1167 1168 1169 1170
{
}

static void vhost_eventfd_del(MemoryListener *listener,
                              MemoryRegionSection *section,
1171
                              bool match_data, uint64_t data, EventNotifier *e)
1172 1173 1174
{
}

J
Jason Wang 已提交
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
                                                int n, uint32_t timeout)
{
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
    struct vhost_vring_state state = {
        .index = vhost_vq_index,
        .num = timeout,
    };
    int r;

    if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
        return -EINVAL;
    }

    r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
    if (r) {
1191
        VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
J
Jason Wang 已提交
1192 1193 1194 1195 1196 1197
        return r;
    }

    return 0;
}

1198 1199 1200
static int vhost_virtqueue_init(struct vhost_dev *dev,
                                struct vhost_virtqueue *vq, int n)
{
1201
    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1202
    struct vhost_vring_file file = {
1203
        .index = vhost_vq_index,
1204 1205 1206 1207 1208 1209 1210
    };
    int r = event_notifier_init(&vq->masked_notifier, 0);
    if (r < 0) {
        return r;
    }

    file.fd = event_notifier_get_fd(&vq->masked_notifier);
1211
    r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1212
    if (r) {
1213
        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1214 1215 1216
        r = -errno;
        goto fail_call;
    }
J
Jason Wang 已提交
1217 1218 1219

    vq->dev = dev;

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
    return 0;
fail_call:
    event_notifier_cleanup(&vq->masked_notifier);
    return r;
}

static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
{
    event_notifier_cleanup(&vq->masked_notifier);
}

1231
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
J
Jason Wang 已提交
1232
                   VhostBackendType backend_type, uint32_t busyloop_timeout)
M
Michael S. Tsirkin 已提交
1233 1234
{
    uint64_t features;
1235
    int i, r, n_initialized_vqs = 0;
1236
    Error *local_err = NULL;
1237

J
Jason Wang 已提交
1238
    hdev->vdev = NULL;
1239 1240
    hdev->migration_blocker = NULL;

1241 1242
    r = vhost_set_backend_type(hdev, backend_type);
    assert(r >= 0);
1243

1244 1245 1246
    r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
    if (r < 0) {
        goto fail;
1247 1248
    }

1249
    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1250 1251
        error_report("vhost backend memory slots limit is less"
                " than current number of present memory slots");
1252 1253
        r = -1;
        goto fail;
1254
    }
1255

1256
    r = hdev->vhost_ops->vhost_set_owner(hdev);
M
Michael S. Tsirkin 已提交
1257
    if (r < 0) {
1258
        VHOST_OPS_DEBUG("vhost_set_owner failed");
M
Michael S. Tsirkin 已提交
1259 1260 1261
        goto fail;
    }

1262
    r = hdev->vhost_ops->vhost_get_features(hdev, &features);
M
Michael S. Tsirkin 已提交
1263
    if (r < 0) {
1264
        VHOST_OPS_DEBUG("vhost_get_features failed");
M
Michael S. Tsirkin 已提交
1265 1266
        goto fail;
    }
1267

1268
    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1269
        r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1270
        if (r < 0) {
1271
            goto fail;
1272 1273
        }
    }
J
Jason Wang 已提交
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284

    if (busyloop_timeout) {
        for (i = 0; i < hdev->nvqs; ++i) {
            r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
                                                     busyloop_timeout);
            if (r < 0) {
                goto fail_busyloop;
            }
        }
    }

M
Michael S. Tsirkin 已提交
1285 1286
    hdev->features = features;

A
Avi Kivity 已提交
1287
    hdev->memory_listener = (MemoryListener) {
1288 1289
        .begin = vhost_begin,
        .commit = vhost_commit,
A
Avi Kivity 已提交
1290 1291
        .region_add = vhost_region_add,
        .region_del = vhost_region_del,
1292
        .region_nop = vhost_region_nop,
A
Avi Kivity 已提交
1293 1294 1295 1296 1297
        .log_start = vhost_log_start,
        .log_stop = vhost_log_stop,
        .log_sync = vhost_log_sync,
        .log_global_start = vhost_log_global_start,
        .log_global_stop = vhost_log_global_stop,
1298 1299
        .eventfd_add = vhost_eventfd_add,
        .eventfd_del = vhost_eventfd_del,
1300
        .priority = 10
A
Avi Kivity 已提交
1301
    };
1302

1303 1304 1305 1306
    hdev->iommu_listener = (MemoryListener) {
        .region_add = vhost_iommu_region_add,
        .region_del = vhost_iommu_region_del,
    };
J
Jason Wang 已提交
1307

1308 1309 1310 1311
    if (hdev->migration_blocker == NULL) {
        if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
            error_setg(&hdev->migration_blocker,
                       "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1312
        } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1313 1314
            error_setg(&hdev->migration_blocker,
                       "Migration disabled: failed to allocate shared memory");
1315 1316 1317 1318
        }
    }

    if (hdev->migration_blocker != NULL) {
1319 1320 1321 1322 1323 1324
        r = migrate_add_blocker(hdev->migration_blocker, &local_err);
        if (local_err) {
            error_report_err(local_err);
            error_free(hdev->migration_blocker);
            goto fail_busyloop;
        }
1325
    }
1326

1327
    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1328 1329
    hdev->n_mem_sections = 0;
    hdev->mem_sections = NULL;
M
Michael S. Tsirkin 已提交
1330 1331 1332 1333
    hdev->log = NULL;
    hdev->log_size = 0;
    hdev->log_enabled = false;
    hdev->started = false;
1334
    hdev->memory_changed = false;
1335
    memory_listener_register(&hdev->memory_listener, &address_space_memory);
1336
    QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
M
Michael S. Tsirkin 已提交
1337
    return 0;
1338

J
Jason Wang 已提交
1339 1340 1341 1342
fail_busyloop:
    while (--i >= 0) {
        vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
    }
M
Michael S. Tsirkin 已提交
1343
fail:
1344 1345
    hdev->nvqs = n_initialized_vqs;
    vhost_dev_cleanup(hdev);
M
Michael S. Tsirkin 已提交
1346 1347 1348 1349 1350
    return r;
}

void vhost_dev_cleanup(struct vhost_dev *hdev)
{
1351
    int i;
1352

1353 1354 1355
    for (i = 0; i < hdev->nvqs; ++i) {
        vhost_virtqueue_cleanup(hdev->vqs + i);
    }
1356 1357 1358
    if (hdev->mem) {
        /* those are only safe after successful init */
        memory_listener_unregister(&hdev->memory_listener);
1359 1360 1361 1362
        for (i = 0; i < hdev->n_mem_sections; ++i) {
            MemoryRegionSection *section = &hdev->mem_sections[i];
            memory_region_unref(section->mr);
        }
1363 1364
        QLIST_REMOVE(hdev, entry);
    }
1365 1366 1367 1368
    if (hdev->migration_blocker) {
        migrate_del_blocker(hdev->migration_blocker);
        error_free(hdev->migration_blocker);
    }
1369
    g_free(hdev->mem);
1370
    g_free(hdev->mem_sections);
1371 1372 1373
    if (hdev->vhost_ops) {
        hdev->vhost_ops->vhost_backend_cleanup(hdev);
    }
1374
    assert(!hdev->log);
1375 1376

    memset(hdev, 0, sizeof(struct vhost_dev));
M
Michael S. Tsirkin 已提交
1377 1378
}

1379 1380 1381 1382 1383
/* Stop processing guest IO notifications in qemu.
 * Start processing them in vhost in kernel.
 */
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
K
KONRAD Frederic 已提交
1384
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1385
    int i, r, e;
1386

1387 1388 1389 1390 1391
    /* We will pass the notifiers to the kernel, make sure that QEMU
     * doesn't interfere.
     */
    r = virtio_device_grab_ioeventfd(vdev);
    if (r < 0) {
1392
        error_report("binding does not support host notifiers");
1393 1394 1395 1396
        goto fail;
    }

    for (i = 0; i < hdev->nvqs; ++i) {
1397 1398
        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         true);
1399
        if (r < 0) {
1400
            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1401 1402 1403 1404 1405 1406 1407
            goto fail_vq;
        }
    }

    return 0;
fail_vq:
    while (--i >= 0) {
1408 1409
        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         false);
1410
        if (e < 0) {
1411
            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1412
        }
1413
        assert (e >= 0);
1414
    }
1415
    virtio_device_release_ioeventfd(vdev);
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
fail:
    return r;
}

/* Stop processing guest IO notifications in vhost.
 * Start processing them in qemu.
 * This might actually run the qemu handlers right away,
 * so virtio in qemu must be completely setup when this is called.
 */
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
K
KONRAD Frederic 已提交
1427
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1428 1429 1430
    int i, r;

    for (i = 0; i < hdev->nvqs; ++i) {
1431 1432
        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
                                         false);
1433
        if (r < 0) {
1434
            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1435 1436 1437
        }
        assert (r >= 0);
    }
1438
    virtio_device_release_ioeventfd(vdev);
1439 1440
}

1441 1442 1443 1444 1445
/* Test and clear event pending status.
 * Should be called after unmask to avoid losing events.
 */
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
{
J
Jason Wang 已提交
1446 1447
    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1448 1449 1450 1451 1452 1453 1454 1455
    return event_notifier_test_and_clear(&vq->masked_notifier);
}

/* Mask/unmask events from this vq. */
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                         bool mask)
{
    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
J
Jason Wang 已提交
1456
    int r, index = n - hdev->vq_index;
1457
    struct vhost_vring_file file;
1458

1459 1460 1461
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

1462
    if (mask) {
1463
        assert(vdev->use_guest_notifier_mask);
J
Jason Wang 已提交
1464
        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1465 1466 1467
    } else {
        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
    }
1468

1469 1470
    file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
    r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1471 1472 1473
    if (r < 0) {
        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
    }
1474 1475
}

C
Cornelia Huck 已提交
1476 1477
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                            uint64_t features)
1478 1479 1480
{
    const int *bit = feature_bits;
    while (*bit != VHOST_INVALID_FEATURE_BIT) {
C
Cornelia Huck 已提交
1481
        uint64_t bit_mask = (1ULL << *bit);
1482 1483 1484 1485 1486 1487 1488 1489 1490
        if (!(hdev->features & bit_mask)) {
            features &= ~bit_mask;
        }
        bit++;
    }
    return features;
}

void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
C
Cornelia Huck 已提交
1491
                        uint64_t features)
1492 1493 1494
{
    const int *bit = feature_bits;
    while (*bit != VHOST_INVALID_FEATURE_BIT) {
C
Cornelia Huck 已提交
1495
        uint64_t bit_mask = (1ULL << *bit);
1496 1497 1498 1499 1500 1501 1502
        if (features & bit_mask) {
            hdev->acked_features |= bit_mask;
        }
        bit++;
    }
}

1503
/* Host notifiers must be enabled at this point. */
M
Michael S. Tsirkin 已提交
1504 1505 1506
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{
    int i, r;
1507

1508 1509 1510
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

1511
    hdev->started = true;
J
Jason Wang 已提交
1512
    hdev->vdev = vdev;
1513

M
Michael S. Tsirkin 已提交
1514 1515
    r = vhost_dev_set_features(hdev, hdev->log_enabled);
    if (r < 0) {
1516
        goto fail_features;
M
Michael S. Tsirkin 已提交
1517
    }
J
Jason Wang 已提交
1518 1519

    if (vhost_dev_has_iommu(hdev)) {
1520
        memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
J
Jason Wang 已提交
1521 1522
    }

1523
    r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
M
Michael S. Tsirkin 已提交
1524
    if (r < 0) {
1525
        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
M
Michael S. Tsirkin 已提交
1526
        r = -errno;
1527
        goto fail_mem;
M
Michael S. Tsirkin 已提交
1528
    }
1529
    for (i = 0; i < hdev->nvqs; ++i) {
1530
        r = vhost_virtqueue_start(hdev,
J
Jason Wang 已提交
1531 1532 1533
                                  vdev,
                                  hdev->vqs + i,
                                  hdev->vq_index + i);
1534 1535 1536 1537 1538
        if (r < 0) {
            goto fail_vq;
        }
    }

M
Michael S. Tsirkin 已提交
1539
    if (hdev->log_enabled) {
M
Michael S. Tsirkin 已提交
1540 1541
        uint64_t log_base;

M
Michael S. Tsirkin 已提交
1542
        hdev->log_size = vhost_get_log_size(hdev);
M
Marc-André Lureau 已提交
1543 1544
        hdev->log = vhost_log_get(hdev->log_size,
                                  vhost_dev_log_is_shared(hdev));
J
Jason Wang 已提交
1545
        log_base = (uintptr_t)hdev->log->log;
1546
        r = hdev->vhost_ops->vhost_set_log_base(hdev,
1547 1548
                                                hdev->log_size ? log_base : 0,
                                                hdev->log);
M
Michael S. Tsirkin 已提交
1549
        if (r < 0) {
1550
            VHOST_OPS_DEBUG("vhost_set_log_base failed");
M
Michael S. Tsirkin 已提交
1551
            r = -errno;
1552
            goto fail_log;
M
Michael S. Tsirkin 已提交
1553 1554
        }
    }
1555

J
Jason Wang 已提交
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
    if (vhost_dev_has_iommu(hdev)) {
        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);

        /* Update used ring information for IOTLB to work correctly,
         * vhost-kernel code requires for this.*/
        for (i = 0; i < hdev->nvqs; ++i) {
            struct vhost_virtqueue *vq = hdev->vqs + i;
            vhost_device_iotlb_miss(hdev, vq->used_phys, true);
        }
    }
M
Michael S. Tsirkin 已提交
1566
    return 0;
1567
fail_log:
1568
    vhost_log_put(hdev, false);
M
Michael S. Tsirkin 已提交
1569 1570
fail_vq:
    while (--i >= 0) {
1571
        vhost_virtqueue_stop(hdev,
J
Jason Wang 已提交
1572 1573 1574
                             vdev,
                             hdev->vqs + i,
                             hdev->vq_index + i);
M
Michael S. Tsirkin 已提交
1575
    }
J
Jason Wang 已提交
1576
    i = hdev->nvqs;
J
Jason Wang 已提交
1577

1578 1579
fail_mem:
fail_features:
1580 1581

    hdev->started = false;
M
Michael S. Tsirkin 已提交
1582 1583 1584
    return r;
}

1585
/* Host notifiers must be enabled at this point. */
M
Michael S. Tsirkin 已提交
1586 1587
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{
J
Jason Wang 已提交
1588
    int i;
1589

1590 1591 1592
    /* should only be called after backend is connected */
    assert(hdev->vhost_ops);

M
Michael S. Tsirkin 已提交
1593
    for (i = 0; i < hdev->nvqs; ++i) {
1594
        vhost_virtqueue_stop(hdev,
J
Jason Wang 已提交
1595 1596 1597
                             vdev,
                             hdev->vqs + i,
                             hdev->vq_index + i);
M
Michael S. Tsirkin 已提交
1598
    }
1599

J
Jason Wang 已提交
1600 1601
    if (vhost_dev_has_iommu(hdev)) {
        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1602
        memory_listener_unregister(&hdev->iommu_listener);
J
Jason Wang 已提交
1603
    }
J
Jason Wang 已提交
1604
    vhost_log_put(hdev, true);
M
Michael S. Tsirkin 已提交
1605
    hdev->started = false;
J
Jason Wang 已提交
1606
    hdev->vdev = NULL;
M
Michael S. Tsirkin 已提交
1607
}
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617

int vhost_net_set_backend(struct vhost_dev *hdev,
                          struct vhost_vring_file *file)
{
    if (hdev->vhost_ops->vhost_net_set_backend) {
        return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
    }

    return -1;
}