vhost.c 61.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7
/* Copyright (C) 2009 Red Hat, Inc.
 * Copyright (C) 2006 Rusty Russell IBM Corporation
 *
 * Author: Michael S. Tsirkin <mst@redhat.com>
 *
 * Inspiration, some code, and most witty comments come from
R
Rob Landley 已提交
8
 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9 10 11 12 13 14
 *
 * Generic code for virtio server in host kernel.
 */

#include <linux/eventfd.h>
#include <linux/vhost.h>
15
#include <linux/uio.h>
16 17 18 19 20 21
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/highmem.h>
22
#include <linux/slab.h>
23
#include <linux/vmalloc.h>
24
#include <linux/kthread.h>
25
#include <linux/cgroup.h>
A
Asias He 已提交
26
#include <linux/module.h>
27
#include <linux/sort.h>
28
#include <linux/sched/mm.h>
29
#include <linux/sched/signal.h>
30
#include <linux/interval_tree_generic.h>
J
Jason Wang 已提交
31
#include <linux/nospec.h>
32
#include <linux/kcov.h>
33 34 35

#include "vhost.h"

36 37 38 39
static ushort max_mem_regions = 64;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC(max_mem_regions,
	"Maximum number of memory regions in memory map. (default: 64)");
J
Jason Wang 已提交
40 41 42 43
static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
	"Maximum number of iotlb entries. (default: 2048)");
44

45 46 47 48
enum {
	VHOST_MEMORY_F_LOG = 0x1,
};

49 50
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
M
Michael S. Tsirkin 已提交
51

52
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
G
Greg Kurz 已提交
53
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
54 55 56 57
{
	vq->user_be = !virtio_legacy_is_little_endian();
}

G
Greg Kurz 已提交
58 59 60 61 62 63 64 65 66 67
static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
{
	vq->user_be = true;
}

static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
{
	vq->user_be = false;
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
	struct vhost_vring_state s;

	if (vq->private_data)
		return -EBUSY;

	if (copy_from_user(&s, argp, sizeof(s)))
		return -EFAULT;

	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
	    s.num != VHOST_VRING_BIG_ENDIAN)
		return -EINVAL;

G
Greg Kurz 已提交
82 83 84 85
	if (s.num == VHOST_VRING_BIG_ENDIAN)
		vhost_enable_cross_endian_big(vq);
	else
		vhost_enable_cross_endian_little(vq);
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

	return 0;
}

static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
				   int __user *argp)
{
	struct vhost_vring_state s = {
		.index = idx,
		.num = vq->user_be
	};

	if (copy_to_user(argp, &s, sizeof(s)))
		return -EFAULT;

	return 0;
}

static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
	/* Note for legacy virtio: user_be is initialized at reset time
	 * according to the host endianness. If userspace does not set an
	 * explicit endianness, the default behavior is native endian, as
	 * expected by legacy virtio.
	 */
	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
}
#else
G
Greg Kurz 已提交
114
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
{
}

static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
	return -ENOIOCTLCMD;
}

static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
				   int __user *argp)
{
	return -ENOIOCTLCMD;
}

static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
131 132
	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
		|| virtio_legacy_is_little_endian();
133 134 135
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */

G
Greg Kurz 已提交
136 137
static void vhost_reset_is_le(struct vhost_virtqueue *vq)
{
138
	vhost_init_is_le(vq);
G
Greg Kurz 已提交
139 140
}

J
Jason Wang 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153
struct vhost_flush_struct {
	struct vhost_work work;
	struct completion wait_event;
};

static void vhost_flush_work(struct vhost_work *work)
{
	struct vhost_flush_struct *s;

	s = container_of(work, struct vhost_flush_struct, work);
	complete(&s->wait_event);
}

154 155 156 157 158
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
			    poll_table *pt)
{
	struct vhost_poll *poll;

K
Krishna Kumar 已提交
159
	poll = container_of(pt, struct vhost_poll, table);
160 161 162 163
	poll->wqh = wqh;
	add_wait_queue(wqh, &poll->wait);
}

164
static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
165 166
			     void *key)
{
167 168
	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);

A
Al Viro 已提交
169
	if (!(key_to_poll(key) & poll->mask))
170 171
		return 0;

172
	vhost_poll_queue(poll);
173 174 175
	return 0;
}

176
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
177
{
J
Jason Wang 已提交
178
	clear_bit(VHOST_WORK_QUEUED, &work->flags);
179 180
	work->fn = fn;
}
A
Asias He 已提交
181
EXPORT_SYMBOL_GPL(vhost_work_init);
182

183
/* Init poll structure */
184
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
A
Al Viro 已提交
185
		     __poll_t mask, struct vhost_dev *dev)
186 187 188 189
{
	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
	init_poll_funcptr(&poll->table, vhost_poll_func);
	poll->mask = mask;
190
	poll->dev = dev;
191
	poll->wqh = NULL;
192

193
	vhost_work_init(&poll->work, fn);
194
}
A
Asias He 已提交
195
EXPORT_SYMBOL_GPL(vhost_poll_init);
196 197 198

/* Start polling a file. We add ourselves to file's wait queue. The caller must
 * keep a reference to a file until after vhost_poll_stop is called. */
199
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
200
{
201
	__poll_t mask;
K
Krishna Kumar 已提交
202

J
Jason Wang 已提交
203 204 205
	if (poll->wqh)
		return 0;

206
	mask = vfs_poll(file, &poll->table);
207
	if (mask)
A
Al Viro 已提交
208
		vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
209
	if (mask & EPOLLERR) {
210
		vhost_poll_stop(poll);
211
		return -EINVAL;
212 213
	}

214
	return 0;
215
}
A
Asias He 已提交
216
EXPORT_SYMBOL_GPL(vhost_poll_start);
217 218 219 220 221

/* Stop polling a file. After this function returns, it becomes safe to drop the
 * file reference. You must also flush afterwards. */
void vhost_poll_stop(struct vhost_poll *poll)
{
222 223 224 225
	if (poll->wqh) {
		remove_wait_queue(poll->wqh, &poll->wait);
		poll->wqh = NULL;
	}
226
}
A
Asias He 已提交
227
EXPORT_SYMBOL_GPL(vhost_poll_stop);
228

J
Jason Wang 已提交
229
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
230
{
J
Jason Wang 已提交
231
	struct vhost_flush_struct flush;
K
Krishna Kumar 已提交
232

J
Jason Wang 已提交
233 234 235
	if (dev->worker) {
		init_completion(&flush.wait_event);
		vhost_work_init(&flush.work, vhost_flush_work);
236

J
Jason Wang 已提交
237 238 239
		vhost_work_queue(dev, &flush.work);
		wait_for_completion(&flush.wait_event);
	}
240
}
A
Asias He 已提交
241
EXPORT_SYMBOL_GPL(vhost_work_flush);
242

243 244 245 246 247 248
/* Flush any work that has been scheduled. When calling this, don't hold any
 * locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
	vhost_work_flush(poll->dev, &poll->work);
}
A
Asias He 已提交
249
EXPORT_SYMBOL_GPL(vhost_poll_flush);
250

251
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
252
{
J
Jason Wang 已提交
253 254
	if (!dev->worker)
		return;
255

J
Jason Wang 已提交
256 257 258
	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
		/* We can only add the work to the list after we're
		 * sure it was not in the list.
259
		 * test_and_set_bit() implies a memory barrier.
J
Jason Wang 已提交
260 261
		 */
		llist_add(&work->node, &dev->work_list);
262 263
		wake_up_process(dev->worker);
	}
264
}
A
Asias He 已提交
265
EXPORT_SYMBOL_GPL(vhost_work_queue);
266

J
Jason Wang 已提交
267 268 269
/* A lockless hint for busy polling code to exit the loop */
bool vhost_has_work(struct vhost_dev *dev)
{
J
Jason Wang 已提交
270
	return !llist_empty(&dev->work_list);
J
Jason Wang 已提交
271 272 273
}
EXPORT_SYMBOL_GPL(vhost_has_work);

274 275 276 277
void vhost_poll_queue(struct vhost_poll *poll)
{
	vhost_work_queue(poll->dev, &poll->work);
}
A
Asias He 已提交
278
EXPORT_SYMBOL_GPL(vhost_poll_queue);
279

280 281 282 283 284 285 286 287 288 289 290 291
static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
{
	int j;

	for (j = 0; j < VHOST_NUM_ADDRS; j++)
		vq->meta_iotlb[j] = NULL;
}

static void vhost_vq_meta_reset(struct vhost_dev *d)
{
	int i;

292
	for (i = 0; i < d->nvqs; ++i)
293 294 295
		__vhost_vq_meta_reset(d->vqs[i]);
}

296 297 298 299 300 301 302 303 304 305
static void vhost_vq_reset(struct vhost_dev *dev,
			   struct vhost_virtqueue *vq)
{
	vq->num = 1;
	vq->desc = NULL;
	vq->avail = NULL;
	vq->used = NULL;
	vq->last_avail_idx = 0;
	vq->avail_idx = 0;
	vq->last_used_idx = 0;
M
Michael S. Tsirkin 已提交
306 307
	vq->signalled_used = 0;
	vq->signalled_used_valid = false;
308 309 310 311
	vq->used_flags = 0;
	vq->log_used = false;
	vq->log_addr = -1ull;
	vq->private_data = NULL;
312
	vq->acked_features = 0;
313
	vq->acked_backend_features = 0;
314 315 316 317
	vq->log_base = NULL;
	vq->error_ctx = NULL;
	vq->kick = NULL;
	vq->call_ctx = NULL;
318
	vq->log_ctx = NULL;
G
Greg Kurz 已提交
319 320
	vhost_reset_is_le(vq);
	vhost_disable_cross_endian(vq);
J
Jason Wang 已提交
321
	vq->busyloop_timeout = 0;
322
	vq->umem = NULL;
J
Jason Wang 已提交
323
	vq->iotlb = NULL;
324
	__vhost_vq_meta_reset(vq);
325 326
}

327 328 329
static int vhost_worker(void *data)
{
	struct vhost_dev *dev = data;
J
Jason Wang 已提交
330 331
	struct vhost_work *work, *work_next;
	struct llist_node *node;
332
	mm_segment_t oldfs = get_fs();
333

334
	set_fs(USER_DS);
M
Michael S. Tsirkin 已提交
335 336
	use_mm(dev->mm);

337 338 339 340 341 342
	for (;;) {
		/* mb paired w/ kthread_stop */
		set_current_state(TASK_INTERRUPTIBLE);

		if (kthread_should_stop()) {
			__set_current_state(TASK_RUNNING);
M
Michael S. Tsirkin 已提交
343
			break;
344
		}
J
Jason Wang 已提交
345 346 347 348 349 350 351 352 353 354

		node = llist_del_all(&dev->work_list);
		if (!node)
			schedule();

		node = llist_reverse_order(node);
		/* make sure flag is seen after deletion */
		smp_wmb();
		llist_for_each_entry_safe(work, work_next, node, node) {
			clear_bit(VHOST_WORK_QUEUED, &work->flags);
355
			__set_current_state(TASK_RUNNING);
356
			kcov_remote_start_common(dev->kcov_handle);
357
			work->fn(work);
358
			kcov_remote_stop();
N
Nadav Har'El 已提交
359 360
			if (need_resched())
				schedule();
J
Jason Wang 已提交
361
		}
362
	}
M
Michael S. Tsirkin 已提交
363
	unuse_mm(dev->mm);
364
	set_fs(oldfs);
M
Michael S. Tsirkin 已提交
365
	return 0;
366 367
}

368 369 370 371 372 373 374 375 376 377
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
	kfree(vq->indirect);
	vq->indirect = NULL;
	kfree(vq->log);
	vq->log = NULL;
	kfree(vq->heads);
	vq->heads = NULL;
}

J
Jason Wang 已提交
378 379 380
/* Helper to allocate iovec buffers for all vqs. */
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
A
Asias He 已提交
381
	struct vhost_virtqueue *vq;
J
Jason Wang 已提交
382
	int i;
K
Krishna Kumar 已提交
383

J
Jason Wang 已提交
384
	for (i = 0; i < dev->nvqs; ++i) {
A
Asias He 已提交
385
		vq = dev->vqs[i];
386 387 388
		vq->indirect = kmalloc_array(UIO_MAXIOV,
					     sizeof(*vq->indirect),
					     GFP_KERNEL);
J
Jason Wang 已提交
389
		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
390
					GFP_KERNEL);
J
Jason Wang 已提交
391
		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
392
					  GFP_KERNEL);
A
Asias He 已提交
393
		if (!vq->indirect || !vq->log || !vq->heads)
J
Jason Wang 已提交
394 395 396
			goto err_nomem;
	}
	return 0;
K
Krishna Kumar 已提交
397

J
Jason Wang 已提交
398
err_nomem:
399
	for (; i >= 0; --i)
400
		vhost_vq_free_iovecs(dev->vqs[i]);
J
Jason Wang 已提交
401 402 403 404 405 406
	return -ENOMEM;
}

static void vhost_dev_free_iovecs(struct vhost_dev *dev)
{
	int i;
K
Krishna Kumar 已提交
407

408
	for (i = 0; i < dev->nvqs; ++i)
409
		vhost_vq_free_iovecs(dev->vqs[i]);
J
Jason Wang 已提交
410 411
}

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
			  int pkts, int total_len)
{
	struct vhost_dev *dev = vq->dev;

	if ((dev->byte_weight && total_len >= dev->byte_weight) ||
	    pkts >= dev->weight) {
		vhost_poll_queue(&vq->poll);
		return true;
	}

	return false;
}
EXPORT_SYMBOL_GPL(vhost_exceeds_weight);

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
				   unsigned int num)
{
	size_t event __maybe_unused =
	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;

	return sizeof(*vq->avail) +
	       sizeof(*vq->avail->ring) * num + event;
}

static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
				  unsigned int num)
{
	size_t event __maybe_unused =
	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;

	return sizeof(*vq->used) +
	       sizeof(*vq->used->ring) * num + event;
}

static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
				  unsigned int num)
{
	return sizeof(*vq->desc) * num;
}

Z
Zhi Yong Wu 已提交
453
void vhost_dev_init(struct vhost_dev *dev,
454
		    struct vhost_virtqueue **vqs, int nvqs,
455 456 457
		    int iov_limit, int weight, int byte_weight,
		    int (*msg_handler)(struct vhost_dev *dev,
				       struct vhost_iotlb_msg *msg))
458
{
A
Asias He 已提交
459
	struct vhost_virtqueue *vq;
460
	int i;
461

462 463 464 465
	dev->vqs = vqs;
	dev->nvqs = nvqs;
	mutex_init(&dev->mutex);
	dev->log_ctx = NULL;
466
	dev->umem = NULL;
J
Jason Wang 已提交
467
	dev->iotlb = NULL;
468
	dev->mm = NULL;
469
	dev->worker = NULL;
J
Jason Wang 已提交
470
	dev->iov_limit = iov_limit;
471 472
	dev->weight = weight;
	dev->byte_weight = byte_weight;
473
	dev->msg_handler = msg_handler;
J
Jason Wang 已提交
474
	init_llist_head(&dev->work_list);
J
Jason Wang 已提交
475 476 477 478
	init_waitqueue_head(&dev->wait);
	INIT_LIST_HEAD(&dev->read_list);
	INIT_LIST_HEAD(&dev->pending_list);
	spin_lock_init(&dev->iotlb_lock);
479

480 481

	for (i = 0; i < dev->nvqs; ++i) {
A
Asias He 已提交
482 483 484 485 486 487 488 489 490
		vq = dev->vqs[i];
		vq->log = NULL;
		vq->indirect = NULL;
		vq->heads = NULL;
		vq->dev = dev;
		mutex_init(&vq->mutex);
		vhost_vq_reset(dev, vq);
		if (vq->handle_kick)
			vhost_poll_init(&vq->poll, vq->handle_kick,
491
					EPOLLIN, dev);
492 493
	}
}
A
Asias He 已提交
494
EXPORT_SYMBOL_GPL(vhost_dev_init);
495 496 497 498 499 500 501

/* Caller should have device mutex */
long vhost_dev_check_owner(struct vhost_dev *dev)
{
	/* Are you the owner? If not, I don't think you mean to do that */
	return dev->mm == current->mm ? 0 : -EPERM;
}
A
Asias He 已提交
502
EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
503

504
struct vhost_attach_cgroups_struct {
K
Krishna Kumar 已提交
505 506 507
	struct vhost_work work;
	struct task_struct *owner;
	int ret;
508 509 510 511
};

static void vhost_attach_cgroups_work(struct vhost_work *work)
{
K
Krishna Kumar 已提交
512 513 514 515
	struct vhost_attach_cgroups_struct *s;

	s = container_of(work, struct vhost_attach_cgroups_struct, work);
	s->ret = cgroup_attach_task_all(s->owner, current);
516 517 518 519
}

static int vhost_attach_cgroups(struct vhost_dev *dev)
{
K
Krishna Kumar 已提交
520 521 522 523 524 525 526
	struct vhost_attach_cgroups_struct attach;

	attach.owner = current;
	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
	vhost_work_queue(dev, &attach.work);
	vhost_work_flush(dev, &attach.work);
	return attach.ret;
527 528
}

529 530 531 532 533
/* Caller should have device mutex */
bool vhost_dev_has_owner(struct vhost_dev *dev)
{
	return dev->mm;
}
A
Asias He 已提交
534
EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
535

536
/* Caller should have device mutex */
A
Asias He 已提交
537
long vhost_dev_set_owner(struct vhost_dev *dev)
538
{
539 540
	struct task_struct *worker;
	int err;
K
Krishna Kumar 已提交
541

542
	/* Is there an owner already? */
543
	if (vhost_dev_has_owner(dev)) {
544 545 546
		err = -EBUSY;
		goto err_mm;
	}
K
Krishna Kumar 已提交
547

548 549
	/* No owner, become one */
	dev->mm = get_task_mm(current);
550
	dev->kcov_handle = kcov_common_handle();
551 552 553 554 555 556 557
	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
	if (IS_ERR(worker)) {
		err = PTR_ERR(worker);
		goto err_worker;
	}

	dev->worker = worker;
558 559 560
	wake_up_process(worker);	/* avoid contributing to loadavg */

	err = vhost_attach_cgroups(dev);
561 562
	if (err)
		goto err_cgroup;
563

J
Jason Wang 已提交
564 565 566 567
	err = vhost_dev_alloc_iovecs(dev);
	if (err)
		goto err_cgroup;

568
	return 0;
569 570
err_cgroup:
	kthread_stop(worker);
M
Michael S. Tsirkin 已提交
571
	dev->worker = NULL;
572 573 574 575
err_worker:
	if (dev->mm)
		mmput(dev->mm);
	dev->mm = NULL;
576
	dev->kcov_handle = 0;
577 578
err_mm:
	return err;
579
}
A
Asias He 已提交
580
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
581

J
Jason Wang 已提交
582 583 584 585 586 587 588
static struct vhost_iotlb *iotlb_alloc(void)
{
	return vhost_iotlb_alloc(max_iotlb_entries,
				 VHOST_IOTLB_FLAG_RETIRE);
}

struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
589
{
J
Jason Wang 已提交
590
	return iotlb_alloc();
591
}
A
Asias He 已提交
592
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
593

594
/* Caller should have device mutex */
J
Jason Wang 已提交
595
void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
596
{
597 598
	int i;

599
	vhost_dev_cleanup(dev);
600

601
	dev->umem = umem;
602 603 604 605
	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
	 * VQs aren't running.
	 */
	for (i = 0; i < dev->nvqs; ++i)
606
		dev->vqs[i]->umem = umem;
607
}
A
Asias He 已提交
608
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
609

610
void vhost_dev_stop(struct vhost_dev *dev)
611 612
{
	int i;
613 614

	for (i = 0; i < dev->nvqs; ++i) {
615 616 617
		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
			vhost_poll_stop(&dev->vqs[i]->poll);
			vhost_poll_flush(&dev->vqs[i]->poll);
618
		}
619 620
	}
}
A
Asias He 已提交
621
EXPORT_SYMBOL_GPL(vhost_dev_stop);
622

J
Jason Wang 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
static void vhost_clear_msg(struct vhost_dev *dev)
{
	struct vhost_msg_node *node, *n;

	spin_lock(&dev->iotlb_lock);

	list_for_each_entry_safe(node, n, &dev->read_list, node) {
		list_del(&node->node);
		kfree(node);
	}

	list_for_each_entry_safe(node, n, &dev->pending_list, node) {
		list_del(&node->node);
		kfree(node);
	}

	spin_unlock(&dev->iotlb_lock);
}

642
void vhost_dev_cleanup(struct vhost_dev *dev)
643 644
{
	int i;
K
Krishna Kumar 已提交
645

646
	for (i = 0; i < dev->nvqs; ++i) {
647 648 649 650 651 652 653
		if (dev->vqs[i]->error_ctx)
			eventfd_ctx_put(dev->vqs[i]->error_ctx);
		if (dev->vqs[i]->kick)
			fput(dev->vqs[i]->kick);
		if (dev->vqs[i]->call_ctx)
			eventfd_ctx_put(dev->vqs[i]->call_ctx);
		vhost_vq_reset(dev, dev->vqs[i]);
654
	}
J
Jason Wang 已提交
655
	vhost_dev_free_iovecs(dev);
656 657 658 659
	if (dev->log_ctx)
		eventfd_ctx_put(dev->log_ctx);
	dev->log_ctx = NULL;
	/* No one will access memory at this point */
J
Jason Wang 已提交
660
	vhost_iotlb_free(dev->umem);
661
	dev->umem = NULL;
J
Jason Wang 已提交
662
	vhost_iotlb_free(dev->iotlb);
J
Jason Wang 已提交
663 664
	dev->iotlb = NULL;
	vhost_clear_msg(dev);
665
	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
J
Jason Wang 已提交
666
	WARN_ON(!llist_empty(&dev->work_list));
667 668 669
	if (dev->worker) {
		kthread_stop(dev->worker);
		dev->worker = NULL;
670
		dev->kcov_handle = 0;
671
	}
672
	if (dev->mm)
673 674
		mmput(dev->mm);
	dev->mm = NULL;
675
}
A
Asias He 已提交
676
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
677

678
static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
679 680
{
	u64 a = addr / VHOST_PAGE_SIZE / 8;
K
Krishna Kumar 已提交
681

682 683 684
	/* Make sure 64 bit math will not overflow. */
	if (a > ULONG_MAX - (unsigned long)log_base ||
	    a + (unsigned long)log_base > ULONG_MAX)
685
		return false;
686

687
	return access_ok(log_base + a,
688 689 690
			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}

691 692 693 694 695 696
static bool vhost_overflow(u64 uaddr, u64 size)
{
	/* Make sure 64 bit math will not overflow. */
	return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
}

697
/* Caller should have vq mutex and device mutex. */
J
Jason Wang 已提交
698
static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
699
				int log_all)
700
{
J
Jason Wang 已提交
701
	struct vhost_iotlb_map *map;
702

703
	if (!umem)
704
		return false;
705

J
Jason Wang 已提交
706 707
	list_for_each_entry(map, &umem->list, link) {
		unsigned long a = map->addr;
708

J
Jason Wang 已提交
709
		if (vhost_overflow(map->addr, map->size))
710
			return false;
711 712


J
Jason Wang 已提交
713
		if (!access_ok((void __user *)a, map->size))
714
			return false;
715
		else if (log_all && !log_access_ok(log_base,
J
Jason Wang 已提交
716 717
						   map->start,
						   map->size))
718
			return false;
719
	}
720
	return true;
721 722
}

723 724 725 726
static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
					       u64 addr, unsigned int size,
					       int type)
{
J
Jason Wang 已提交
727
	const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
728

J
Jason Wang 已提交
729
	if (!map)
730 731
		return NULL;

M
Michael S. Tsirkin 已提交
732
	return (void __user *)(uintptr_t)(map->addr + addr - map->start);
733 734
}

735 736
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
J
Jason Wang 已提交
737
static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
738
			     int log_all)
739 740
{
	int i;
K
Krishna Kumar 已提交
741

742
	for (i = 0; i < d->nvqs; ++i) {
743
		bool ok;
744 745
		bool log;

746
		mutex_lock(&d->vqs[i]->mutex);
747
		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
748
		/* If ring is inactive, will check when it's enabled. */
749
		if (d->vqs[i]->private_data)
750 751
			ok = vq_memory_access_ok(d->vqs[i]->log_base,
						 umem, log);
752
		else
753
			ok = true;
754
		mutex_unlock(&d->vqs[i]->mutex);
755
		if (!ok)
756
			return false;
757
	}
758
	return true;
759 760
}

J
Jason Wang 已提交
761 762
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
			  struct iovec iov[], int iov_size, int access);
763

764
static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
765 766
			      const void *from, unsigned size)
{
J
Jason Wang 已提交
767
	int ret;
768

J
Jason Wang 已提交
769 770 771 772 773 774 775 776 777
	if (!vq->iotlb)
		return __copy_to_user(to, from, size);
	else {
		/* This function should be called after iotlb
		 * prefetch, which means we're sure that all vq
		 * could be access through iotlb. So -EAGAIN should
		 * not happen in this case.
		 */
		struct iov_iter t;
778 779
		void __user *uaddr = vhost_vq_meta_fetch(vq,
				     (u64)(uintptr_t)to, size,
E
Eric Auger 已提交
780
				     VHOST_ADDR_USED);
781 782 783 784

		if (uaddr)
			return __copy_to_user(uaddr, from, size);

J
Jason Wang 已提交
785 786 787 788 789 790 791 792 793 794 795 796 797
		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
				     ARRAY_SIZE(vq->iotlb_iov),
				     VHOST_ACCESS_WO);
		if (ret < 0)
			goto out;
		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
		ret = copy_to_iter(from, size, &t);
		if (ret == size)
			ret = 0;
	}
out:
	return ret;
}
798 799

static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
800
				void __user *from, unsigned size)
801
{
J
Jason Wang 已提交
802 803 804 805 806 807 808 809 810 811
	int ret;

	if (!vq->iotlb)
		return __copy_from_user(to, from, size);
	else {
		/* This function should be called after iotlb
		 * prefetch, which means we're sure that vq
		 * could be access through iotlb. So -EAGAIN should
		 * not happen in this case.
		 */
812 813 814
		void __user *uaddr = vhost_vq_meta_fetch(vq,
				     (u64)(uintptr_t)from, size,
				     VHOST_ADDR_DESC);
J
Jason Wang 已提交
815
		struct iov_iter f;
816 817 818 819

		if (uaddr)
			return __copy_from_user(to, uaddr, size);

J
Jason Wang 已提交
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
		ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
				     ARRAY_SIZE(vq->iotlb_iov),
				     VHOST_ACCESS_RO);
		if (ret < 0) {
			vq_err(vq, "IOTLB translation failure: uaddr "
			       "%p size 0x%llx\n", from,
			       (unsigned long long) size);
			goto out;
		}
		iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
		ret = copy_from_iter(to, size, &f);
		if (ret == size)
			ret = 0;
	}

out:
	return ret;
}

839 840 841
static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
					  void __user *addr, unsigned int size,
					  int type)
J
Jason Wang 已提交
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
{
	int ret;

	ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
			     ARRAY_SIZE(vq->iotlb_iov),
			     VHOST_ACCESS_RO);
	if (ret < 0) {
		vq_err(vq, "IOTLB translation failure: uaddr "
			"%p size 0x%llx\n", addr,
			(unsigned long long) size);
		return NULL;
	}

	if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
		vq_err(vq, "Non atomic userspace memory access: uaddr "
			"%p size 0x%llx\n", addr,
			(unsigned long long) size);
		return NULL;
	}

	return vq->iotlb_iov[0].iov_base;
}

865 866 867 868 869 870
/* This function should be called after iotlb
 * prefetch, which means we're sure that vq
 * could be access through iotlb. So -EAGAIN should
 * not happen in this case.
 */
static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
M
Michael S. Tsirkin 已提交
871
					    void __user *addr, unsigned int size,
872 873 874 875 876 877 878 879 880 881 882
					    int type)
{
	void __user *uaddr = vhost_vq_meta_fetch(vq,
			     (u64)(uintptr_t)addr, size, type);
	if (uaddr)
		return uaddr;

	return __vhost_get_user_slow(vq, addr, size, type);
}

#define vhost_put_user(vq, x, ptr)		\
J
Jason Wang 已提交
883 884 885 886 887 888
({ \
	int ret = -EFAULT; \
	if (!vq->iotlb) { \
		ret = __put_user(x, ptr); \
	} else { \
		__typeof__(ptr) to = \
889 890
			(__typeof__(ptr)) __vhost_get_user(vq, ptr,	\
					  sizeof(*ptr), VHOST_ADDR_USED); \
J
Jason Wang 已提交
891 892 893 894 895 896 897 898
		if (to != NULL) \
			ret = __put_user(x, to); \
		else \
			ret = -EFAULT;	\
	} \
	ret; \
})

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{
	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
			      vhost_avail_event(vq));
}

static inline int vhost_put_used(struct vhost_virtqueue *vq,
				 struct vring_used_elem *head, int idx,
				 int count)
{
	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
				  count * sizeof(*head));
}

static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)

{
	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
			      &vq->used->flags);
}

static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)

{
	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
			      &vq->used->idx);
}

927
#define vhost_get_user(vq, x, ptr, type)		\
J
Jason Wang 已提交
928 929 930 931 932 933
({ \
	int ret; \
	if (!vq->iotlb) { \
		ret = __get_user(x, ptr); \
	} else { \
		__typeof__(ptr) from = \
934 935 936
			(__typeof__(ptr)) __vhost_get_user(vq, ptr, \
							   sizeof(*ptr), \
							   type); \
J
Jason Wang 已提交
937 938 939 940 941 942 943 944
		if (from != NULL) \
			ret = __get_user(x, from); \
		else \
			ret = -EFAULT; \
	} \
	ret; \
})

945 946 947 948 949 950
#define vhost_get_avail(vq, x, ptr) \
	vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)

#define vhost_get_used(vq, x, ptr) \
	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)

951 952 953 954 955 956 957 958 959 960 961 962 963 964
static void vhost_dev_lock_vqs(struct vhost_dev *d)
{
	int i = 0;
	for (i = 0; i < d->nvqs; ++i)
		mutex_lock_nested(&d->vqs[i]->mutex, i);
}

static void vhost_dev_unlock_vqs(struct vhost_dev *d)
{
	int i = 0;
	for (i = 0; i < d->nvqs; ++i)
		mutex_unlock(&d->vqs[i]->mutex);
}

965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
				      __virtio16 *idx)
{
	return vhost_get_avail(vq, *idx, &vq->avail->idx);
}

static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
				       __virtio16 *head, int idx)
{
	return vhost_get_avail(vq, *head,
			       &vq->avail->ring[idx & (vq->num - 1)]);
}

static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
					__virtio16 *flags)
{
	return vhost_get_avail(vq, *flags, &vq->avail->flags);
}

static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
				       __virtio16 *event)
{
	return vhost_get_avail(vq, *event, vhost_used_event(vq));
}

static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
				     __virtio16 *idx)
{
	return vhost_get_used(vq, *idx, &vq->used->idx);
}

static inline int vhost_get_desc(struct vhost_virtqueue *vq,
				 struct vring_desc *desc, int idx)
{
	return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
}

J
Jason Wang 已提交
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
static void vhost_iotlb_notify_vq(struct vhost_dev *d,
				  struct vhost_iotlb_msg *msg)
{
	struct vhost_msg_node *node, *n;

	spin_lock(&d->iotlb_lock);

	list_for_each_entry_safe(node, n, &d->pending_list, node) {
		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
		if (msg->iova <= vq_msg->iova &&
1012
		    msg->iova + msg->size - 1 >= vq_msg->iova &&
J
Jason Wang 已提交
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
		    vq_msg->type == VHOST_IOTLB_MISS) {
			vhost_poll_queue(&node->vq->poll);
			list_del(&node->node);
			kfree(node);
		}
	}

	spin_unlock(&d->iotlb_lock);
}

1023
static bool umem_access_ok(u64 uaddr, u64 size, int access)
J
Jason Wang 已提交
1024 1025 1026
{
	unsigned long a = uaddr;

1027 1028
	/* Make sure 64 bit math will not overflow. */
	if (vhost_overflow(uaddr, size))
1029
		return false;
1030

J
Jason Wang 已提交
1031
	if ((access & VHOST_ACCESS_RO) &&
1032
	    !access_ok((void __user *)a, size))
1033
		return false;
J
Jason Wang 已提交
1034
	if ((access & VHOST_ACCESS_WO) &&
1035
	    !access_ok((void __user *)a, size))
1036 1037
		return false;
	return true;
J
Jason Wang 已提交
1038 1039
}

1040 1041
static int vhost_process_iotlb_msg(struct vhost_dev *dev,
				   struct vhost_iotlb_msg *msg)
J
Jason Wang 已提交
1042 1043 1044
{
	int ret = 0;

1045
	mutex_lock(&dev->mutex);
1046
	vhost_dev_lock_vqs(dev);
J
Jason Wang 已提交
1047 1048 1049 1050 1051 1052
	switch (msg->type) {
	case VHOST_IOTLB_UPDATE:
		if (!dev->iotlb) {
			ret = -EFAULT;
			break;
		}
1053
		if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
J
Jason Wang 已提交
1054 1055 1056
			ret = -EFAULT;
			break;
		}
1057
		vhost_vq_meta_reset(dev);
J
Jason Wang 已提交
1058 1059 1060
		if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
					  msg->iova + msg->size - 1,
					  msg->uaddr, msg->perm)) {
J
Jason Wang 已提交
1061 1062 1063 1064 1065 1066
			ret = -ENOMEM;
			break;
		}
		vhost_iotlb_notify_vq(dev, msg);
		break;
	case VHOST_IOTLB_INVALIDATE:
1067 1068 1069 1070
		if (!dev->iotlb) {
			ret = -EFAULT;
			break;
		}
1071
		vhost_vq_meta_reset(dev);
J
Jason Wang 已提交
1072 1073
		vhost_iotlb_del_range(dev->iotlb, msg->iova,
				      msg->iova + msg->size - 1);
J
Jason Wang 已提交
1074 1075 1076 1077 1078 1079
		break;
	default:
		ret = -EINVAL;
		break;
	}

1080
	vhost_dev_unlock_vqs(dev);
1081 1082
	mutex_unlock(&dev->mutex);

J
Jason Wang 已提交
1083 1084 1085 1086 1087
	return ret;
}
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
			     struct iov_iter *from)
{
1088 1089 1090
	struct vhost_iotlb_msg msg;
	size_t offset;
	int type, ret;
J
Jason Wang 已提交
1091

1092
	ret = copy_from_iter(&type, sizeof(type), from);
1093 1094
	if (ret != sizeof(type)) {
		ret = -EINVAL;
J
Jason Wang 已提交
1095
		goto done;
1096
	}
J
Jason Wang 已提交
1097

1098
	switch (type) {
J
Jason Wang 已提交
1099
	case VHOST_IOTLB_MSG:
1100 1101 1102 1103 1104 1105 1106
		/* There maybe a hole after type for V1 message type,
		 * so skip it here.
		 */
		offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
		break;
	case VHOST_IOTLB_MSG_V2:
		offset = sizeof(__u32);
J
Jason Wang 已提交
1107 1108 1109
		break;
	default:
		ret = -EINVAL;
1110
		goto done;
J
Jason Wang 已提交
1111 1112
	}

1113 1114
	iov_iter_advance(from, offset);
	ret = copy_from_iter(&msg, sizeof(msg), from);
1115 1116
	if (ret != sizeof(msg)) {
		ret = -EINVAL;
1117
		goto done;
1118
	}
1119 1120 1121 1122 1123 1124

	if (dev->msg_handler)
		ret = dev->msg_handler(dev, &msg);
	else
		ret = vhost_process_iotlb_msg(dev, &msg);
	if (ret) {
1125 1126 1127 1128 1129 1130
		ret = -EFAULT;
		goto done;
	}

	ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
	      sizeof(struct vhost_msg_v2);
J
Jason Wang 已提交
1131 1132 1133 1134 1135
done:
	return ret;
}
EXPORT_SYMBOL(vhost_chr_write_iter);

1136
__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
J
Jason Wang 已提交
1137 1138
			    poll_table *wait)
{
1139
	__poll_t mask = 0;
J
Jason Wang 已提交
1140 1141 1142 1143

	poll_wait(file, &dev->wait, wait);

	if (!list_empty(&dev->read_list))
1144
		mask |= EPOLLIN | EPOLLRDNORM;
J
Jason Wang 已提交
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188

	return mask;
}
EXPORT_SYMBOL(vhost_chr_poll);

ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
			    int noblock)
{
	DEFINE_WAIT(wait);
	struct vhost_msg_node *node;
	ssize_t ret = 0;
	unsigned size = sizeof(struct vhost_msg);

	if (iov_iter_count(to) < size)
		return 0;

	while (1) {
		if (!noblock)
			prepare_to_wait(&dev->wait, &wait,
					TASK_INTERRUPTIBLE);

		node = vhost_dequeue_msg(dev, &dev->read_list);
		if (node)
			break;
		if (noblock) {
			ret = -EAGAIN;
			break;
		}
		if (signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
		if (!dev->iotlb) {
			ret = -EBADFD;
			break;
		}

		schedule();
	}

	if (!noblock)
		finish_wait(&dev->wait, &wait);

	if (node) {
1189 1190
		struct vhost_iotlb_msg *msg;
		void *start = &node->msg;
J
Jason Wang 已提交
1191

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		switch (node->msg.type) {
		case VHOST_IOTLB_MSG:
			size = sizeof(node->msg);
			msg = &node->msg.iotlb;
			break;
		case VHOST_IOTLB_MSG_V2:
			size = sizeof(node->msg_v2);
			msg = &node->msg_v2.iotlb;
			break;
		default:
			BUG();
			break;
		}

		ret = copy_to_iter(start, size, to);
		if (ret != size || msg->type != VHOST_IOTLB_MISS) {
J
Jason Wang 已提交
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
			kfree(node);
			return ret;
		}
		vhost_enqueue_msg(dev, &dev->pending_list, node);
	}

	return ret;
}
EXPORT_SYMBOL_GPL(vhost_chr_read_iter);

static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
{
	struct vhost_dev *dev = vq->dev;
	struct vhost_msg_node *node;
	struct vhost_iotlb_msg *msg;
1223
	bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
J
Jason Wang 已提交
1224

1225
	node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
J
Jason Wang 已提交
1226 1227 1228
	if (!node)
		return -ENOMEM;

1229 1230 1231 1232 1233 1234 1235
	if (v2) {
		node->msg_v2.type = VHOST_IOTLB_MSG_V2;
		msg = &node->msg_v2.iotlb;
	} else {
		msg = &node->msg.iotlb;
	}

J
Jason Wang 已提交
1236 1237 1238 1239 1240 1241 1242
	msg->type = VHOST_IOTLB_MISS;
	msg->iova = iova;
	msg->perm = access;

	vhost_enqueue_msg(dev, &dev->read_list, node);

	return 0;
1243 1244
}

1245 1246 1247 1248
static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
			 struct vring_desc __user *desc,
			 struct vring_avail __user *avail,
			 struct vring_used __user *used)
J
Jason Wang 已提交
1249

1250
{
1251 1252 1253
	return access_ok(desc, vhost_get_desc_size(vq, num)) &&
	       access_ok(avail, vhost_get_avail_size(vq, num)) &&
	       access_ok(used, vhost_get_used_size(vq, num));
1254 1255
}

1256
static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
J
Jason Wang 已提交
1257
				 const struct vhost_iotlb_map *map,
1258 1259 1260 1261 1262
				 int type)
{
	int access = (type == VHOST_ADDR_USED) ?
		     VHOST_ACCESS_WO : VHOST_ACCESS_RO;

J
Jason Wang 已提交
1263 1264
	if (likely(map->perm & access))
		vq->meta_iotlb[type] = map;
1265 1266
}

1267 1268
static bool iotlb_access_ok(struct vhost_virtqueue *vq,
			    int access, u64 addr, u64 len, int type)
J
Jason Wang 已提交
1269
{
J
Jason Wang 已提交
1270 1271
	const struct vhost_iotlb_map *map;
	struct vhost_iotlb *umem = vq->iotlb;
1272
	u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1273 1274 1275

	if (vhost_vq_meta_fetch(vq, addr, len, type))
		return true;
J
Jason Wang 已提交
1276 1277

	while (len > s) {
J
Jason Wang 已提交
1278 1279
		map = vhost_iotlb_itree_first(umem, addr, last);
		if (map == NULL || map->start > addr) {
J
Jason Wang 已提交
1280 1281
			vhost_iotlb_miss(vq, addr, access);
			return false;
J
Jason Wang 已提交
1282
		} else if (!(map->perm & access)) {
J
Jason Wang 已提交
1283 1284 1285 1286 1287 1288
			/* Report the possible access violation by
			 * request another translation from userspace.
			 */
			return false;
		}

J
Jason Wang 已提交
1289
		size = map->size - addr + map->start;
1290 1291

		if (orig_addr == addr && size >= len)
J
Jason Wang 已提交
1292
			vhost_vq_meta_update(vq, map, type);
1293

J
Jason Wang 已提交
1294 1295 1296 1297 1298 1299 1300
		s += size;
		addr += size;
	}

	return true;
}

1301
int vq_meta_prefetch(struct vhost_virtqueue *vq)
J
Jason Wang 已提交
1302 1303 1304
{
	unsigned int num = vq->num;

1305
	if (!vq->iotlb)
J
Jason Wang 已提交
1306 1307
		return 1;

J
Jason Wang 已提交
1308
	return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1309
			       vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
J
Jason Wang 已提交
1310
	       iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1311
			       vhost_get_avail_size(vq, num),
1312
			       VHOST_ADDR_AVAIL) &&
J
Jason Wang 已提交
1313
	       iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1314
			       vhost_get_used_size(vq, num), VHOST_ADDR_USED);
J
Jason Wang 已提交
1315
}
1316
EXPORT_SYMBOL_GPL(vq_meta_prefetch);
J
Jason Wang 已提交
1317

1318 1319
/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
1320
bool vhost_log_access_ok(struct vhost_dev *dev)
1321
{
1322
	return memory_access_ok(dev, dev->umem, 1);
1323
}
A
Asias He 已提交
1324
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1325 1326 1327

/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
1328 1329
static bool vq_log_access_ok(struct vhost_virtqueue *vq,
			     void __user *log_base)
1330
{
1331
	return vq_memory_access_ok(log_base, vq->umem,
1332
				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1333
		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
1334
				  vhost_get_used_size(vq, vq->num)));
1335 1336 1337 1338
}

/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
1339
bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1340
{
1341
	if (!vq_log_access_ok(vq, vq->log_base))
1342
		return false;
1343

1344 1345
	/* Access validation occurs at prefetch time with IOTLB */
	if (vq->iotlb)
1346
		return true;
1347 1348

	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1349
}
A
Asias He 已提交
1350
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1351 1352 1353

static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
1354 1355
	struct vhost_memory mem, *newmem;
	struct vhost_memory_region *region;
J
Jason Wang 已提交
1356
	struct vhost_iotlb *newumem, *oldumem;
1357
	unsigned long size = offsetof(struct vhost_memory, regions);
1358
	int i;
K
Krishna Kumar 已提交
1359

1360 1361
	if (copy_from_user(&mem, m, size))
		return -EFAULT;
1362 1363
	if (mem.padding)
		return -EOPNOTSUPP;
1364
	if (mem.nregions > max_mem_regions)
1365
		return -E2BIG;
M
Matthew Wilcox 已提交
1366 1367
	newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
			GFP_KERNEL);
1368 1369 1370 1371
	if (!newmem)
		return -ENOMEM;

	memcpy(newmem, &mem, size);
1372 1373
	if (copy_from_user(newmem->regions, m->regions,
			   mem.nregions * sizeof *m->regions)) {
1374
		kvfree(newmem);
1375
		return -EFAULT;
1376 1377
	}

J
Jason Wang 已提交
1378
	newumem = iotlb_alloc();
1379
	if (!newumem) {
1380
		kvfree(newmem);
1381 1382 1383 1384 1385 1386
		return -ENOMEM;
	}

	for (region = newmem->regions;
	     region < newmem->regions + mem.nregions;
	     region++) {
J
Jason Wang 已提交
1387 1388 1389 1390 1391 1392
		if (vhost_iotlb_add_range(newumem,
					  region->guest_phys_addr,
					  region->guest_phys_addr +
					  region->memory_size - 1,
					  region->userspace_addr,
					  VHOST_MAP_RW))
1393
			goto err;
1394
	}
1395 1396 1397 1398 1399 1400

	if (!memory_access_ok(d, newumem, 0))
		goto err;

	oldumem = d->umem;
	d->umem = newumem;
1401

1402
	/* All memory accesses are done under some VQ mutex. */
1403 1404
	for (i = 0; i < d->nvqs; ++i) {
		mutex_lock(&d->vqs[i]->mutex);
1405
		d->vqs[i]->umem = newumem;
1406 1407
		mutex_unlock(&d->vqs[i]->mutex);
	}
1408 1409

	kvfree(newmem);
J
Jason Wang 已提交
1410
	vhost_iotlb_free(oldumem);
1411
	return 0;
1412 1413

err:
J
Jason Wang 已提交
1414
	vhost_iotlb_free(newumem);
1415 1416
	kvfree(newmem);
	return -EFAULT;
1417 1418
}

1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
static long vhost_vring_set_num(struct vhost_dev *d,
				struct vhost_virtqueue *vq,
				void __user *argp)
{
	struct vhost_vring_state s;

	/* Resizing ring with an active backend?
	 * You don't want to do that. */
	if (vq->private_data)
		return -EBUSY;

	if (copy_from_user(&s, argp, sizeof s))
		return -EFAULT;

	if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
		return -EINVAL;
	vq->num = s.num;

	return 0;
}

static long vhost_vring_set_addr(struct vhost_dev *d,
				 struct vhost_virtqueue *vq,
				 void __user *argp)
{
	struct vhost_vring_addr a;

	if (copy_from_user(&a, argp, sizeof a))
		return -EFAULT;
	if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
		return -EOPNOTSUPP;

	/* For 32bit, verify that the top 32bits of the user
	   data are set to zero. */
	if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
	    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
	    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
		return -EFAULT;

	/* Make sure it's safe to cast pointers to vring types. */
	BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
	BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
	if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
	    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
	    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
		return -EINVAL;

	/* We only verify access here if backend is configured.
	 * If it is not, we don't as size might not have been setup.
	 * We will verify when backend is configured. */
	if (vq->private_data) {
		if (!vq_access_ok(vq, vq->num,
			(void __user *)(unsigned long)a.desc_user_addr,
			(void __user *)(unsigned long)a.avail_user_addr,
			(void __user *)(unsigned long)a.used_user_addr))
			return -EINVAL;

		/* Also validate log access for used ring if enabled. */
		if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
			!log_access_ok(vq->log_base, a.log_guest_addr,
				sizeof *vq->used +
				vq->num * sizeof *vq->used->ring))
			return -EINVAL;
	}

	vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
	vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
	vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
	vq->log_addr = a.log_guest_addr;
	vq->used = (void __user *)(unsigned long)a.used_user_addr;

	return 0;
}

static long vhost_vring_set_num_addr(struct vhost_dev *d,
				     struct vhost_virtqueue *vq,
				     unsigned int ioctl,
				     void __user *argp)
{
	long r;

	mutex_lock(&vq->mutex);

	switch (ioctl) {
	case VHOST_SET_VRING_NUM:
		r = vhost_vring_set_num(d, vq, argp);
		break;
	case VHOST_SET_VRING_ADDR:
		r = vhost_vring_set_addr(d, vq, argp);
		break;
	default:
		BUG();
	}

	mutex_unlock(&vq->mutex);

	return r;
}
1517
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1518
{
1519 1520
	struct file *eventfp, *filep = NULL;
	bool pollstart = false, pollstop = false;
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
	struct eventfd_ctx *ctx = NULL;
	u32 __user *idxp = argp;
	struct vhost_virtqueue *vq;
	struct vhost_vring_state s;
	struct vhost_vring_file f;
	u32 idx;
	long r;

	r = get_user(idx, idxp);
	if (r < 0)
		return r;
1532
	if (idx >= d->nvqs)
1533 1534
		return -ENOBUFS;

J
Jason Wang 已提交
1535
	idx = array_index_nospec(idx, d->nvqs);
1536
	vq = d->vqs[idx];
1537

1538 1539 1540 1541 1542
	if (ioctl == VHOST_SET_VRING_NUM ||
	    ioctl == VHOST_SET_VRING_ADDR) {
		return vhost_vring_set_num_addr(d, vq, ioctl, argp);
	}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
	mutex_lock(&vq->mutex);

	switch (ioctl) {
	case VHOST_SET_VRING_BASE:
		/* Moving base with an active backend?
		 * You don't want to do that. */
		if (vq->private_data) {
			r = -EBUSY;
			break;
		}
1553 1554
		if (copy_from_user(&s, argp, sizeof s)) {
			r = -EFAULT;
1555
			break;
1556
		}
1557 1558 1559 1560
		if (s.num > 0xffff) {
			r = -EINVAL;
			break;
		}
1561
		vq->last_avail_idx = s.num;
1562 1563 1564 1565 1566 1567
		/* Forget the cached index value. */
		vq->avail_idx = vq->last_avail_idx;
		break;
	case VHOST_GET_VRING_BASE:
		s.index = idx;
		s.num = vq->last_avail_idx;
1568 1569
		if (copy_to_user(argp, &s, sizeof s))
			r = -EFAULT;
1570 1571
		break;
	case VHOST_SET_VRING_KICK:
1572 1573
		if (copy_from_user(&f, argp, sizeof f)) {
			r = -EFAULT;
1574
			break;
1575
		}
1576
		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1577 1578 1579 1580
		if (IS_ERR(eventfp)) {
			r = PTR_ERR(eventfp);
			break;
		}
1581
		if (eventfp != vq->kick) {
1582 1583
			pollstop = (filep = vq->kick) != NULL;
			pollstart = (vq->kick = eventfp) != NULL;
1584 1585 1586 1587
		} else
			filep = eventfp;
		break;
	case VHOST_SET_VRING_CALL:
1588 1589
		if (copy_from_user(&f, argp, sizeof f)) {
			r = -EFAULT;
1590
			break;
1591
		}
1592 1593 1594
		ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
		if (IS_ERR(ctx)) {
			r = PTR_ERR(ctx);
1595 1596
			break;
		}
1597
		swap(ctx, vq->call_ctx);
1598 1599
		break;
	case VHOST_SET_VRING_ERR:
1600 1601
		if (copy_from_user(&f, argp, sizeof f)) {
			r = -EFAULT;
1602
			break;
1603
		}
1604 1605 1606
		ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
		if (IS_ERR(ctx)) {
			r = PTR_ERR(ctx);
1607 1608
			break;
		}
1609
		swap(ctx, vq->error_ctx);
1610
		break;
1611 1612 1613 1614 1615 1616
	case VHOST_SET_VRING_ENDIAN:
		r = vhost_set_vring_endian(vq, argp);
		break;
	case VHOST_GET_VRING_ENDIAN:
		r = vhost_get_vring_endian(vq, idx, argp);
		break;
J
Jason Wang 已提交
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
		if (copy_from_user(&s, argp, sizeof(s))) {
			r = -EFAULT;
			break;
		}
		vq->busyloop_timeout = s.num;
		break;
	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
		s.index = idx;
		s.num = vq->busyloop_timeout;
		if (copy_to_user(argp, &s, sizeof(s)))
			r = -EFAULT;
		break;
1630 1631 1632 1633 1634 1635 1636
	default:
		r = -ENOIOCTLCMD;
	}

	if (pollstop && vq->handle_kick)
		vhost_poll_stop(&vq->poll);

1637
	if (!IS_ERR_OR_NULL(ctx))
1638 1639 1640 1641 1642
		eventfd_ctx_put(ctx);
	if (filep)
		fput(filep);

	if (pollstart && vq->handle_kick)
1643
		r = vhost_poll_start(&vq->poll, vq->kick);
1644 1645 1646 1647 1648 1649 1650

	mutex_unlock(&vq->mutex);

	if (pollstop && vq->handle_kick)
		vhost_poll_flush(&vq->poll);
	return r;
}
A
Asias He 已提交
1651
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1652

J
Jason Wang 已提交
1653 1654
int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
{
J
Jason Wang 已提交
1655
	struct vhost_iotlb *niotlb, *oiotlb;
J
Jason Wang 已提交
1656 1657
	int i;

J
Jason Wang 已提交
1658
	niotlb = iotlb_alloc();
J
Jason Wang 已提交
1659 1660 1661 1662 1663 1664 1665
	if (!niotlb)
		return -ENOMEM;

	oiotlb = d->iotlb;
	d->iotlb = niotlb;

	for (i = 0; i < d->nvqs; ++i) {
1666 1667 1668 1669 1670 1671
		struct vhost_virtqueue *vq = d->vqs[i];

		mutex_lock(&vq->mutex);
		vq->iotlb = niotlb;
		__vhost_vq_meta_reset(vq);
		mutex_unlock(&vq->mutex);
J
Jason Wang 已提交
1672 1673
	}

J
Jason Wang 已提交
1674
	vhost_iotlb_free(oiotlb);
J
Jason Wang 已提交
1675 1676 1677 1678 1679

	return 0;
}
EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);

1680
/* Caller must have device mutex */
1681
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1682
{
1683
	struct eventfd_ctx *ctx;
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
	u64 p;
	long r;
	int i, fd;

	/* If you are not the owner, you can become one */
	if (ioctl == VHOST_SET_OWNER) {
		r = vhost_dev_set_owner(d);
		goto done;
	}

	/* You must be the owner to do anything else */
	r = vhost_dev_check_owner(d);
	if (r)
		goto done;

	switch (ioctl) {
	case VHOST_SET_MEM_TABLE:
		r = vhost_set_memory(d, argp);
		break;
	case VHOST_SET_LOG_BASE:
1704 1705
		if (copy_from_user(&p, argp, sizeof p)) {
			r = -EFAULT;
1706
			break;
1707
		}
1708 1709 1710 1711 1712 1713 1714
		if ((u64)(unsigned long)p != p) {
			r = -EFAULT;
			break;
		}
		for (i = 0; i < d->nvqs; ++i) {
			struct vhost_virtqueue *vq;
			void __user *base = (void __user *)(unsigned long)p;
1715
			vq = d->vqs[i];
1716 1717
			mutex_lock(&vq->mutex);
			/* If ring is inactive, will check when it's enabled. */
1718
			if (vq->private_data && !vq_log_access_ok(vq, base))
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
				r = -EFAULT;
			else
				vq->log_base = base;
			mutex_unlock(&vq->mutex);
		}
		break;
	case VHOST_SET_LOG_FD:
		r = get_user(fd, (int __user *)argp);
		if (r < 0)
			break;
1729 1730 1731
		ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
		if (IS_ERR(ctx)) {
			r = PTR_ERR(ctx);
1732 1733
			break;
		}
1734
		swap(ctx, d->log_ctx);
1735
		for (i = 0; i < d->nvqs; ++i) {
1736 1737 1738
			mutex_lock(&d->vqs[i]->mutex);
			d->vqs[i]->log_ctx = d->log_ctx;
			mutex_unlock(&d->vqs[i]->mutex);
1739 1740 1741 1742 1743
		}
		if (ctx)
			eventfd_ctx_put(ctx);
		break;
	default:
1744
		r = -ENOIOCTLCMD;
1745 1746 1747 1748 1749
		break;
	}
done:
	return r;
}
A
Asias He 已提交
1750
EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1751 1752 1753

/* TODO: This is really inefficient.  We need something like get_user()
 * (instruction directly accesses the data, with an exception table entry
1754
 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1755 1756 1757 1758 1759 1760 1761 1762
 */
static int set_bit_to_user(int nr, void __user *addr)
{
	unsigned long log = (unsigned long)addr;
	struct page *page;
	void *base;
	int bit = nr + (log % PAGE_SIZE) * 8;
	int r;
K
Krishna Kumar 已提交
1763

1764
	r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
1765
	if (r < 0)
1766
		return r;
1767
	BUG_ON(r != 1);
1768
	base = kmap_atomic(page);
1769
	set_bit(bit, base);
1770
	kunmap_atomic(base);
1771
	unpin_user_pages_dirty_lock(&page, 1, true);
1772 1773 1774 1775 1776 1777
	return 0;
}

static int log_write(void __user *log_base,
		     u64 write_address, u64 write_length)
{
1778
	u64 write_page = write_address / VHOST_PAGE_SIZE;
1779
	int r;
K
Krishna Kumar 已提交
1780

1781 1782
	if (!write_length)
		return 0;
1783
	write_length += write_address % VHOST_PAGE_SIZE;
1784 1785
	for (;;) {
		u64 base = (u64)(unsigned long)log_base;
1786 1787
		u64 log = base + write_page / 8;
		int bit = write_page % 8;
1788 1789 1790 1791 1792 1793 1794 1795
		if ((u64)(unsigned long)log != log)
			return -EFAULT;
		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
		if (r < 0)
			return r;
		if (write_length <= VHOST_PAGE_SIZE)
			break;
		write_length -= VHOST_PAGE_SIZE;
1796
		write_page += 1;
1797 1798 1799 1800
	}
	return r;
}

J
Jason Wang 已提交
1801 1802
static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
{
J
Jason Wang 已提交
1803 1804
	struct vhost_iotlb *umem = vq->umem;
	struct vhost_iotlb_map *u;
J
Jason Wang 已提交
1805 1806 1807 1808 1809 1810 1811 1812 1813
	u64 start, end, l, min;
	int r;
	bool hit = false;

	while (len) {
		min = len;
		/* More than one GPAs can be mapped into a single HVA. So
		 * iterate all possible umems here to be safe.
		 */
J
Jason Wang 已提交
1814 1815 1816
		list_for_each_entry(u, &umem->list, link) {
			if (u->addr > hva - 1 + len ||
			    u->addr - 1 + u->size < hva)
J
Jason Wang 已提交
1817
				continue;
J
Jason Wang 已提交
1818 1819
			start = max(u->addr, hva);
			end = min(u->addr - 1 + u->size, hva - 1 + len);
J
Jason Wang 已提交
1820 1821
			l = end - start + 1;
			r = log_write(vq->log_base,
J
Jason Wang 已提交
1822
				      u->start + start - u->addr,
J
Jason Wang 已提交
1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
				      l);
			if (r < 0)
				return r;
			hit = true;
			min = min(l, min);
		}

		if (!hit)
			return -EFAULT;

		len -= min;
		hva += min;
	}

	return 0;
}

static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
{
	struct iovec iov[64];
	int i, ret;

	if (!vq->iotlb)
		return log_write(vq->log_base, vq->log_addr + used_offset, len);

	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
			     len, iov, 64, VHOST_ACCESS_WO);
1850
	if (ret < 0)
J
Jason Wang 已提交
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
		return ret;

	for (i = 0; i < ret; i++) {
		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
				    iov[i].iov_len);
		if (ret)
			return ret;
	}

	return 0;
}

1863
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
J
Jason Wang 已提交
1864
		    unsigned int log_num, u64 len, struct iovec *iov, int count)
1865 1866 1867 1868
{
	int i, r;

	/* Make sure data written is seen before log. */
1869
	smp_wmb();
J
Jason Wang 已提交
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880

	if (vq->iotlb) {
		for (i = 0; i < count; i++) {
			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
					  iov[i].iov_len);
			if (r < 0)
				return r;
		}
		return 0;
	}

1881 1882 1883 1884 1885 1886
	for (i = 0; i < log_num; ++i) {
		u64 l = min(log[i].len, len);
		r = log_write(vq->log_base, log[i].addr, l);
		if (r < 0)
			return r;
		len -= l;
1887 1888 1889
		if (!len) {
			if (vq->log_ctx)
				eventfd_signal(vq->log_ctx, 1);
1890
			return 0;
1891
		}
1892 1893 1894 1895 1896
	}
	/* Length written exceeds what we have stored. This is a bug. */
	BUG();
	return 0;
}
A
Asias He 已提交
1897
EXPORT_SYMBOL_GPL(vhost_log_write);
1898

1899 1900 1901
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
	void __user *used;
1902
	if (vhost_put_used_flags(vq))
1903 1904 1905 1906 1907 1908
		return -EFAULT;
	if (unlikely(vq->log_used)) {
		/* Make sure the flag is seen before log. */
		smp_wmb();
		/* Log used flag write. */
		used = &vq->used->flags;
J
Jason Wang 已提交
1909 1910
		log_used(vq, (used - (void __user *)vq->used),
			 sizeof vq->used->flags);
1911 1912 1913 1914 1915 1916 1917 1918
		if (vq->log_ctx)
			eventfd_signal(vq->log_ctx, 1);
	}
	return 0;
}

static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
{
1919
	if (vhost_put_avail_event(vq))
1920 1921 1922 1923 1924 1925 1926
		return -EFAULT;
	if (unlikely(vq->log_used)) {
		void __user *used;
		/* Make sure the event is seen before log. */
		smp_wmb();
		/* Log avail event write */
		used = vhost_avail_event(vq);
J
Jason Wang 已提交
1927 1928
		log_used(vq, (used - (void __user *)vq->used),
			 sizeof *vhost_avail_event(vq));
1929 1930 1931 1932 1933 1934
		if (vq->log_ctx)
			eventfd_signal(vq->log_ctx, 1);
	}
	return 0;
}

G
Greg Kurz 已提交
1935
int vhost_vq_init_access(struct vhost_virtqueue *vq)
1936
{
1937
	__virtio16 last_used_idx;
1938
	int r;
1939 1940
	bool is_le = vq->is_le;

1941
	if (!vq->private_data)
1942
		return 0;
1943 1944

	vhost_init_is_le(vq);
1945 1946 1947

	r = vhost_update_used_flags(vq);
	if (r)
1948
		goto err;
1949
	vq->signalled_used_valid = false;
J
Jason Wang 已提交
1950
	if (!vq->iotlb &&
1951
	    !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
1952 1953 1954
		r = -EFAULT;
		goto err;
	}
1955
	r = vhost_get_used_idx(vq, &last_used_idx);
J
Jason Wang 已提交
1956 1957 1958
	if (r) {
		vq_err(vq, "Can't access used idx at %p\n",
		       &vq->used->idx);
1959
		goto err;
J
Jason Wang 已提交
1960
	}
1961
	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1962
	return 0;
J
Jason Wang 已提交
1963

1964 1965 1966
err:
	vq->is_le = is_le;
	return r;
1967
}
G
Greg Kurz 已提交
1968
EXPORT_SYMBOL_GPL(vhost_vq_init_access);
1969

1970
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
J
Jason Wang 已提交
1971
			  struct iovec iov[], int iov_size, int access)
1972
{
J
Jason Wang 已提交
1973
	const struct vhost_iotlb_map *map;
J
Jason Wang 已提交
1974
	struct vhost_dev *dev = vq->dev;
J
Jason Wang 已提交
1975
	struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
1976 1977 1978 1979 1980 1981
	struct iovec *_iov;
	u64 s = 0;
	int ret = 0;

	while ((u64)len > s) {
		u64 size;
1982
		if (unlikely(ret >= iov_size)) {
1983 1984 1985
			ret = -ENOBUFS;
			break;
		}
J
Jason Wang 已提交
1986

J
Jason Wang 已提交
1987 1988
		map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
		if (map == NULL || map->start > addr) {
J
Jason Wang 已提交
1989 1990 1991 1992 1993 1994
			if (umem != dev->iotlb) {
				ret = -EFAULT;
				break;
			}
			ret = -EAGAIN;
			break;
J
Jason Wang 已提交
1995
		} else if (!(map->perm & access)) {
J
Jason Wang 已提交
1996
			ret = -EPERM;
1997 1998
			break;
		}
J
Jason Wang 已提交
1999

2000
		_iov = iov + ret;
J
Jason Wang 已提交
2001
		size = map->size - addr + map->start;
2002
		_iov->iov_len = min((u64)len - s, size);
2003
		_iov->iov_base = (void __user *)(unsigned long)
J
Jason Wang 已提交
2004
				 (map->addr + addr - map->start);
2005 2006 2007 2008 2009
		s += size;
		addr += size;
		++ret;
	}

J
Jason Wang 已提交
2010 2011
	if (ret == -EAGAIN)
		vhost_iotlb_miss(vq, addr, access);
2012 2013 2014 2015 2016 2017
	return ret;
}

/* Each buffer in the virtqueues is actually a chain of descriptors.  This
 * function returns the next descriptor in the chain,
 * or -1U if we're at the end. */
2018
static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2019 2020 2021 2022
{
	unsigned int next;

	/* If this descriptor says it doesn't chain, we're done. */
2023
	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2024 2025 2026
		return -1U;

	/* Check they're not leading us off end of descriptors. */
2027
	next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
2028 2029 2030
	return next;
}

2031
static int get_indirect(struct vhost_virtqueue *vq,
2032 2033 2034 2035
			struct iovec iov[], unsigned int iov_size,
			unsigned int *out_num, unsigned int *in_num,
			struct vhost_log *log, unsigned int *log_num,
			struct vring_desc *indirect)
2036 2037 2038
{
	struct vring_desc desc;
	unsigned int i = 0, count, found = 0;
2039
	u32 len = vhost32_to_cpu(vq, indirect->len);
2040
	struct iov_iter from;
J
Jason Wang 已提交
2041
	int ret, access;
2042 2043

	/* Sanity check */
2044
	if (unlikely(len % sizeof desc)) {
2045 2046
		vq_err(vq, "Invalid length in indirect descriptor: "
		       "len 0x%llx not multiple of 0x%zx\n",
2047
		       (unsigned long long)len,
2048 2049 2050 2051
		       sizeof desc);
		return -EINVAL;
	}

2052
	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
J
Jason Wang 已提交
2053
			     UIO_MAXIOV, VHOST_ACCESS_RO);
2054
	if (unlikely(ret < 0)) {
J
Jason Wang 已提交
2055 2056
		if (ret != -EAGAIN)
			vq_err(vq, "Translation failure %d in indirect.\n", ret);
2057 2058
		return ret;
	}
2059
	iov_iter_init(&from, READ, vq->indirect, ret, len);
2060 2061 2062 2063 2064

	/* We will use the result as an address to read from, so most
	 * architectures only need a compiler barrier here. */
	read_barrier_depends();

2065
	count = len / sizeof desc;
2066 2067
	/* Buffers are chained via a 16 bit next field, so
	 * we can have at most 2^16 of these. */
2068
	if (unlikely(count > USHRT_MAX + 1)) {
2069 2070 2071 2072 2073 2074 2075
		vq_err(vq, "Indirect buffer length too big: %d\n",
		       indirect->len);
		return -E2BIG;
	}

	do {
		unsigned iov_count = *in_num + *out_num;
2076
		if (unlikely(++found > count)) {
2077 2078 2079 2080 2081
			vq_err(vq, "Loop detected: last one at %u "
			       "indirect size %u\n",
			       i, count);
			return -EINVAL;
		}
2082
		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2083
			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2084
			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2085 2086
			return -EINVAL;
		}
2087
		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2088
			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2089
			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2090 2091 2092
			return -EINVAL;
		}

J
Jason Wang 已提交
2093 2094 2095 2096 2097
		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
			access = VHOST_ACCESS_WO;
		else
			access = VHOST_ACCESS_RO;

2098 2099
		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
J
Jason Wang 已提交
2100
				     iov_size - iov_count, access);
2101
		if (unlikely(ret < 0)) {
J
Jason Wang 已提交
2102 2103 2104
			if (ret != -EAGAIN)
				vq_err(vq, "Translation failure %d indirect idx %d\n",
					ret, i);
2105 2106 2107
			return ret;
		}
		/* If this is an input descriptor, increment that count. */
J
Jason Wang 已提交
2108
		if (access == VHOST_ACCESS_WO) {
2109
			*in_num += ret;
Y
yongduan 已提交
2110
			if (unlikely(log && ret)) {
2111 2112
				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2113 2114 2115 2116 2117
				++*log_num;
			}
		} else {
			/* If it's an output descriptor, they're all supposed
			 * to come before any input descriptors. */
2118
			if (unlikely(*in_num)) {
2119 2120 2121 2122 2123 2124
				vq_err(vq, "Indirect descriptor "
				       "has out after in: idx %d\n", i);
				return -EINVAL;
			}
			*out_num += ret;
		}
2125
	} while ((i = next_desc(vq, &desc)) != -1);
2126 2127 2128 2129 2130 2131 2132 2133
	return 0;
}

/* This looks in the virtqueue and for the first available buffer, and converts
 * it to an iovec for convenient access.  Since descriptors consist of some
 * number of output then some number of input descriptors, it's actually two
 * iovecs, but we pack them into one and note how many of each there were.
 *
2134 2135 2136
 * This function returns the descriptor number found, or vq->num (which is
 * never a valid descriptor number) if none was found.  A negative code is
 * returned on error. */
2137
int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2138 2139 2140
		      struct iovec iov[], unsigned int iov_size,
		      unsigned int *out_num, unsigned int *in_num,
		      struct vhost_log *log, unsigned int *log_num)
2141 2142 2143 2144
{
	struct vring_desc desc;
	unsigned int i, head, found = 0;
	u16 last_avail_idx;
2145 2146
	__virtio16 avail_idx;
	__virtio16 ring_head;
J
Jason Wang 已提交
2147
	int ret, access;
2148 2149 2150 2151

	/* Check it isn't doing very strange things with descriptor numbers. */
	last_avail_idx = vq->last_avail_idx;

2152
	if (vq->avail_idx == vq->last_avail_idx) {
2153
		if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2154 2155 2156 2157 2158
			vq_err(vq, "Failed to access avail idx at %p\n",
				&vq->avail->idx);
			return -EFAULT;
		}
		vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2159

2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
		if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
			vq_err(vq, "Guest moved used index from %u to %u",
				last_avail_idx, vq->avail_idx);
			return -EFAULT;
		}

		/* If there's nothing new since last we looked, return
		 * invalid.
		 */
		if (vq->avail_idx == last_avail_idx)
			return vq->num;
2171

2172 2173 2174 2175 2176
		/* Only get avail ring entries after they have been
		 * exposed by guest.
		 */
		smp_rmb();
	}
2177 2178 2179

	/* Grab the next descriptor number they're advertising, and increment
	 * the index we've seen. */
2180
	if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
2181 2182 2183
		vq_err(vq, "Failed to read head: idx %d address %p\n",
		       last_avail_idx,
		       &vq->avail->ring[last_avail_idx % vq->num]);
2184
		return -EFAULT;
2185 2186
	}

2187 2188
	head = vhost16_to_cpu(vq, ring_head);

2189
	/* If their number is silly, that's an error. */
2190
	if (unlikely(head >= vq->num)) {
2191 2192
		vq_err(vq, "Guest says index %u > %u is available",
		       head, vq->num);
2193
		return -EINVAL;
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
	}

	/* When we start there are none of either input nor output. */
	*out_num = *in_num = 0;
	if (unlikely(log))
		*log_num = 0;

	i = head;
	do {
		unsigned iov_count = *in_num + *out_num;
2204
		if (unlikely(i >= vq->num)) {
2205 2206
			vq_err(vq, "Desc index is %u > %u, head = %u",
			       i, vq->num, head);
2207
			return -EINVAL;
2208
		}
2209
		if (unlikely(++found > vq->num)) {
2210 2211 2212
			vq_err(vq, "Loop detected: last one at %u "
			       "vq size %u head %u\n",
			       i, vq->num, head);
2213
			return -EINVAL;
2214
		}
2215
		ret = vhost_get_desc(vq, &desc, i);
2216
		if (unlikely(ret)) {
2217 2218
			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
			       i, vq->desc + i);
2219
			return -EFAULT;
2220
		}
2221
		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2222
			ret = get_indirect(vq, iov, iov_size,
2223 2224
					   out_num, in_num,
					   log, log_num, &desc);
2225
			if (unlikely(ret < 0)) {
J
Jason Wang 已提交
2226 2227 2228
				if (ret != -EAGAIN)
					vq_err(vq, "Failure detected "
						"in indirect descriptor at idx %d\n", i);
2229
				return ret;
2230 2231 2232 2233
			}
			continue;
		}

J
Jason Wang 已提交
2234 2235 2236 2237
		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
			access = VHOST_ACCESS_WO;
		else
			access = VHOST_ACCESS_RO;
2238 2239
		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
J
Jason Wang 已提交
2240
				     iov_size - iov_count, access);
2241
		if (unlikely(ret < 0)) {
J
Jason Wang 已提交
2242 2243 2244
			if (ret != -EAGAIN)
				vq_err(vq, "Translation failure %d descriptor idx %d\n",
					ret, i);
2245
			return ret;
2246
		}
J
Jason Wang 已提交
2247
		if (access == VHOST_ACCESS_WO) {
2248 2249 2250
			/* If this is an input descriptor,
			 * increment that count. */
			*in_num += ret;
Y
yongduan 已提交
2251
			if (unlikely(log && ret)) {
2252 2253
				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2254 2255 2256 2257 2258
				++*log_num;
			}
		} else {
			/* If it's an output descriptor, they're all supposed
			 * to come before any input descriptors. */
2259
			if (unlikely(*in_num)) {
2260 2261
				vq_err(vq, "Descriptor has out after in: "
				       "idx %d\n", i);
2262
				return -EINVAL;
2263 2264 2265
			}
			*out_num += ret;
		}
2266
	} while ((i = next_desc(vq, &desc)) != -1);
2267 2268 2269

	/* On success, increment avail index. */
	vq->last_avail_idx++;
M
Michael S. Tsirkin 已提交
2270 2271 2272 2273

	/* Assume notifications from guest are disabled at this point,
	 * if they aren't we would need to update avail_event index. */
	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2274 2275
	return head;
}
A
Asias He 已提交
2276
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2277 2278

/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2279
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2280
{
2281
	vq->last_avail_idx -= n;
2282
}
A
Asias He 已提交
2283
EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2284 2285 2286 2287 2288

/* After we've used one of their buffers, we tell them about it.  We'll then
 * want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
2289 2290 2291 2292
	struct vring_used_elem heads = {
		cpu_to_vhost32(vq, head),
		cpu_to_vhost32(vq, len)
	};
2293

2294
	return vhost_add_used_n(vq, &heads, 1);
2295
}
A
Asias He 已提交
2296
EXPORT_SYMBOL_GPL(vhost_add_used);
2297

2298 2299 2300 2301 2302
static int __vhost_add_used_n(struct vhost_virtqueue *vq,
			    struct vring_used_elem *heads,
			    unsigned count)
{
	struct vring_used_elem __user *used;
M
Michael S. Tsirkin 已提交
2303
	u16 old, new;
2304 2305
	int start;

2306
	start = vq->last_used_idx & (vq->num - 1);
2307
	used = vq->used->ring + start;
2308
	if (vhost_put_used(vq, heads, start, count)) {
2309 2310 2311 2312 2313 2314 2315
		vq_err(vq, "Failed to write used");
		return -EFAULT;
	}
	if (unlikely(vq->log_used)) {
		/* Make sure data is seen before log. */
		smp_wmb();
		/* Log used ring entry write. */
J
Jason Wang 已提交
2316 2317
		log_used(vq, ((void __user *)used - (void __user *)vq->used),
			 count * sizeof *used);
2318
	}
M
Michael S. Tsirkin 已提交
2319 2320 2321 2322 2323 2324 2325 2326
	old = vq->last_used_idx;
	new = (vq->last_used_idx += count);
	/* If the driver never bothers to signal in a very long while,
	 * used index might wrap around. If that happens, invalidate
	 * signalled_used index we stored. TODO: make sure driver
	 * signals at least once in 2^16 and remove this. */
	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
		vq->signalled_used_valid = false;
2327 2328 2329 2330 2331 2332 2333 2334 2335 2336
	return 0;
}

/* After we've used one of their buffers, we tell them about it.  We'll then
 * want to notify the guest, using eventfd. */
int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
		     unsigned count)
{
	int start, n, r;

2337
	start = vq->last_used_idx & (vq->num - 1);
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
	n = vq->num - start;
	if (n < count) {
		r = __vhost_add_used_n(vq, heads, n);
		if (r < 0)
			return r;
		heads += n;
		count -= n;
	}
	r = __vhost_add_used_n(vq, heads, count);

	/* Make sure buffer is written before we update index. */
	smp_wmb();
2350
	if (vhost_put_used_idx(vq)) {
2351 2352 2353 2354
		vq_err(vq, "Failed to increment used idx");
		return -EFAULT;
	}
	if (unlikely(vq->log_used)) {
2355 2356
		/* Make sure used idx is seen before log. */
		smp_wmb();
2357
		/* Log used index update. */
J
Jason Wang 已提交
2358 2359
		log_used(vq, offsetof(struct vring_used, idx),
			 sizeof vq->used->idx);
2360 2361 2362 2363 2364
		if (vq->log_ctx)
			eventfd_signal(vq->log_ctx, 1);
	}
	return r;
}
A
Asias He 已提交
2365
EXPORT_SYMBOL_GPL(vhost_add_used_n);
2366

M
Michael S. Tsirkin 已提交
2367
static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2368
{
2369 2370
	__u16 old, new;
	__virtio16 event;
M
Michael S. Tsirkin 已提交
2371
	bool v;
2372 2373 2374 2375
	/* Flush out used index updates. This is paired
	 * with the barrier that the Guest executes when enabling
	 * interrupts. */
	smp_mb();
M
Michael S. Tsirkin 已提交
2376

2377
	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
M
Michael S. Tsirkin 已提交
2378 2379 2380
	    unlikely(vq->avail_idx == vq->last_avail_idx))
		return true;

2381
	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2382
		__virtio16 flags;
2383
		if (vhost_get_avail_flags(vq, &flags)) {
M
Michael S. Tsirkin 已提交
2384 2385 2386
			vq_err(vq, "Failed to get flags");
			return true;
		}
2387
		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2388
	}
M
Michael S. Tsirkin 已提交
2389 2390 2391 2392
	old = vq->signalled_used;
	v = vq->signalled_used_valid;
	new = vq->signalled_used = vq->last_used_idx;
	vq->signalled_used_valid = true;
2393

M
Michael S. Tsirkin 已提交
2394 2395
	if (unlikely(!v))
		return true;
2396

2397
	if (vhost_get_used_event(vq, &event)) {
M
Michael S. Tsirkin 已提交
2398 2399 2400
		vq_err(vq, "Failed to get used event idx");
		return true;
	}
2401
	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
M
Michael S. Tsirkin 已提交
2402 2403 2404 2405 2406
}

/* This actually signals the guest, using eventfd. */
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
2407
	/* Signal the Guest tell them we used something up. */
M
Michael S. Tsirkin 已提交
2408
	if (vq->call_ctx && vhost_notify(dev, vq))
2409 2410
		eventfd_signal(vq->call_ctx, 1);
}
A
Asias He 已提交
2411
EXPORT_SYMBOL_GPL(vhost_signal);
2412 2413 2414 2415 2416 2417 2418 2419 2420

/* And here's the combo meal deal.  Supersize me! */
void vhost_add_used_and_signal(struct vhost_dev *dev,
			       struct vhost_virtqueue *vq,
			       unsigned int head, int len)
{
	vhost_add_used(vq, head, len);
	vhost_signal(dev, vq);
}
A
Asias He 已提交
2421
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2422

2423 2424 2425 2426 2427 2428 2429 2430
/* multi-buffer version of vhost_add_used_and_signal */
void vhost_add_used_and_signal_n(struct vhost_dev *dev,
				 struct vhost_virtqueue *vq,
				 struct vring_used_elem *heads, unsigned count)
{
	vhost_add_used_n(vq, heads, count);
	vhost_signal(dev, vq);
}
A
Asias He 已提交
2431
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2432

2433 2434 2435 2436 2437 2438
/* return true if we're sure that avaiable ring is empty */
bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
	__virtio16 avail_idx;
	int r;

2439 2440 2441
	if (vq->avail_idx != vq->last_avail_idx)
		return false;

2442
	r = vhost_get_avail_idx(vq, &avail_idx);
2443
	if (unlikely(r))
2444
		return false;
2445
	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2446

2447
	return vq->avail_idx == vq->last_avail_idx;
2448 2449 2450
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);

2451
/* OK, now we need to know about added descriptors. */
M
Michael S. Tsirkin 已提交
2452
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2453
{
2454
	__virtio16 avail_idx;
2455
	int r;
K
Krishna Kumar 已提交
2456

2457 2458 2459
	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
		return false;
	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2460
	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2461
		r = vhost_update_used_flags(vq);
M
Michael S. Tsirkin 已提交
2462 2463 2464 2465 2466 2467
		if (r) {
			vq_err(vq, "Failed to enable notification at %p: %d\n",
			       &vq->used->flags, r);
			return false;
		}
	} else {
2468
		r = vhost_update_avail_event(vq, vq->avail_idx);
M
Michael S. Tsirkin 已提交
2469 2470 2471 2472 2473 2474
		if (r) {
			vq_err(vq, "Failed to update avail event index at %p: %d\n",
			       vhost_avail_event(vq), r);
			return false;
		}
	}
2475 2476
	/* They could have slipped one in as we were doing that: make
	 * sure it's written, then check again. */
2477
	smp_mb();
2478
	r = vhost_get_avail_idx(vq, &avail_idx);
2479 2480 2481 2482 2483 2484
	if (r) {
		vq_err(vq, "Failed to check avail idx at %p: %d\n",
		       &vq->avail->idx, r);
		return false;
	}

2485
	return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2486
}
A
Asias He 已提交
2487
EXPORT_SYMBOL_GPL(vhost_enable_notify);
2488 2489

/* We don't need to be notified again. */
M
Michael S. Tsirkin 已提交
2490
void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2491 2492
{
	int r;
K
Krishna Kumar 已提交
2493

2494 2495 2496
	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
		return;
	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2497
	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2498
		r = vhost_update_used_flags(vq);
M
Michael S. Tsirkin 已提交
2499 2500 2501 2502
		if (r)
			vq_err(vq, "Failed to enable notification at %p: %d\n",
			       &vq->used->flags, r);
	}
2503
}
A
Asias He 已提交
2504 2505
EXPORT_SYMBOL_GPL(vhost_disable_notify);

J
Jason Wang 已提交
2506 2507 2508 2509 2510 2511
/* Create a new message. */
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
{
	struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
	if (!node)
		return NULL;
2512 2513 2514

	/* Make sure all padding within the structure is initialized. */
	memset(&node->msg, 0, sizeof node->msg);
J
Jason Wang 已提交
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
	node->vq = vq;
	node->msg.type = type;
	return node;
}
EXPORT_SYMBOL_GPL(vhost_new_msg);

void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
		       struct vhost_msg_node *node)
{
	spin_lock(&dev->iotlb_lock);
	list_add_tail(&node->node, head);
	spin_unlock(&dev->iotlb_lock);

2528
	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
J
Jason Wang 已提交
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549
}
EXPORT_SYMBOL_GPL(vhost_enqueue_msg);

struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
					 struct list_head *head)
{
	struct vhost_msg_node *node = NULL;

	spin_lock(&dev->iotlb_lock);
	if (!list_empty(head)) {
		node = list_first_entry(head, struct vhost_msg_node,
					node);
		list_del(&node->node);
	}
	spin_unlock(&dev->iotlb_lock);

	return node;
}
EXPORT_SYMBOL_GPL(vhost_dequeue_msg);


A
Asias He 已提交
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
static int __init vhost_init(void)
{
	return 0;
}

static void __exit vhost_exit(void)
{
}

module_init(vhost_init);
module_exit(vhost_exit);

MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio");