virtio_ring.c 33.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Virtio ring implementation.
 *
 *  Copyright 2007 Rusty Russell IBM Corporation
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
21
#include <linux/virtio_config.h>
22
#include <linux/device.h>
23
#include <linux/slab.h>
24
#include <linux/module.h>
25
#include <linux/hrtimer.h>
26
#include <linux/kmemleak.h>
A
Andy Lutomirski 已提交
27
#include <linux/dma-mapping.h>
A
Andy Lutomirski 已提交
28
#include <xen/xen.h>
29 30 31

#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
32 33 34 35 36 37
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&(_vq)->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		BUG();						\
	} while (0)
38 39 40 41
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq)						\
	do {							\
		if ((_vq)->in_use)				\
42 43
			panic("%s:in_use = %i\n",		\
			      (_vq)->vq.name, (_vq)->in_use);	\
44
		(_vq)->in_use = __LINE__;			\
45
	} while (0)
46
#define END_USE(_vq) \
47
	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
48
#else
49 50 51 52 53 54
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&_vq->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		(_vq)->broken = true;				\
	} while (0)
55 56 57 58
#define START_USE(vq)
#define END_USE(vq)
#endif

A
Andy Lutomirski 已提交
59 60 61 62 63
struct vring_desc_state {
	void *data;			/* Data for callback. */
	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
};

64
struct vring_virtqueue {
65 66 67 68 69
	struct virtqueue vq;

	/* Actual memory layout for this queue */
	struct vring vring;

70 71 72
	/* Can we use weak barriers? */
	bool weak_barriers;

73 74 75
	/* Other side has made a mess, don't try any more. */
	bool broken;

76 77 78
	/* Host supports indirect buffers */
	bool indirect;

79 80 81
	/* Host publishes avail event idx */
	bool event;

82 83 84 85 86 87
	/* Head of free buffer list. */
	unsigned int free_head;
	/* Number we've added since last sync. */
	unsigned int num_added;

	/* Last used index we've seen. */
A
Anthony Liguori 已提交
88
	u16 last_used_idx;
89

90 91 92 93 94 95
	/* Last written value to avail->flags */
	u16 avail_flags_shadow;

	/* Last written value to avail->idx in guest byte order */
	u16 avail_idx_shadow;

96
	/* How to notify other side. FIXME: commonalize hcalls! */
97
	bool (*notify)(struct virtqueue *vq);
98

99 100 101 102 103
	/* DMA, allocation, and size information */
	bool we_own_ring;
	size_t queue_size_in_bytes;
	dma_addr_t queue_dma_addr;

104 105 106
#ifdef DEBUG
	/* They're supposed to lock for us. */
	unsigned int in_use;
107 108 109 110

	/* Figure out if their kicks are too delayed. */
	bool last_add_time_valid;
	ktime_t last_add_time;
111 112
#endif

A
Andy Lutomirski 已提交
113 114
	/* Per-descriptor state. */
	struct vring_desc_state desc_state[];
115 116 117 118
};

#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)

119
/*
120 121 122 123
 * Modern virtio devices have feature bits to specify whether they need a
 * quirk and bypass the IOMMU. If not there, just use the DMA API.
 *
 * If there, the interaction between virtio and DMA API is messy.
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
 *
 * On most systems with virtio, physical addresses match bus addresses,
 * and it doesn't particularly matter whether we use the DMA API.
 *
 * On some systems, including Xen and any system with a physical device
 * that speaks virtio behind a physical IOMMU, we must use the DMA API
 * for virtio DMA to work at all.
 *
 * On other systems, including SPARC and PPC64, virtio-pci devices are
 * enumerated as though they are behind an IOMMU, but the virtio host
 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
 * there or somehow map everything as the identity.
 *
 * For the time being, we preserve historic behavior and bypass the DMA
 * API.
139 140 141 142
 *
 * TODO: install a per-device DMA ops structure that does the right thing
 * taking into account all the above quirks, and use the DMA API
 * unconditionally on data path.
143 144 145 146
 */

static bool vring_use_dma_api(struct virtio_device *vdev)
{
147 148 149 150
	if (!virtio_has_iommu_quirk(vdev))
		return true;

	/* Otherwise, we are left to guess. */
A
Andy Lutomirski 已提交
151 152 153 154 155 156 157 158 159 160 161
	/*
	 * In theory, it's possible to have a buggy QEMU-supposed
	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
	 * such a configuration, virtio has never worked and will
	 * not work without an even larger kludge.  Instead, enable
	 * the DMA API if we're a Xen guest, which at least allows
	 * all of the sensible Xen configurations to work correctly.
	 */
	if (xen_domain())
		return true;

162 163 164
	return false;
}

A
Andy Lutomirski 已提交
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
/*
 * The DMA ops on various arches are rather gnarly right now, and
 * making all of the arch DMA ops work on the vring device itself
 * is a mess.  For now, we use the parent device for DMA ops.
 */
struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
	return vq->vq.vdev->dev.parent;
}

/* Map one sg entry. */
static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
				   struct scatterlist *sg,
				   enum dma_data_direction direction)
{
	if (!vring_use_dma_api(vq->vq.vdev))
		return (dma_addr_t)sg_phys(sg);

	/*
	 * We can't use dma_map_sg, because we don't use scatterlists in
	 * the way it expects (we don't guarantee that the scatterlist
	 * will exist for the lifetime of the mapping).
	 */
	return dma_map_page(vring_dma_dev(vq),
			    sg_page(sg), sg->offset, sg->length,
			    direction);
}

static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
				   void *cpu_addr, size_t size,
				   enum dma_data_direction direction)
{
	if (!vring_use_dma_api(vq->vq.vdev))
		return (dma_addr_t)virt_to_phys(cpu_addr);

	return dma_map_single(vring_dma_dev(vq),
			      cpu_addr, size, direction);
}

static void vring_unmap_one(const struct vring_virtqueue *vq,
			    struct vring_desc *desc)
{
	u16 flags;

	if (!vring_use_dma_api(vq->vq.vdev))
		return;

	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);

	if (flags & VRING_DESC_F_INDIRECT) {
		dma_unmap_single(vring_dma_dev(vq),
				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
				 virtio32_to_cpu(vq->vq.vdev, desc->len),
				 (flags & VRING_DESC_F_WRITE) ?
				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
	} else {
		dma_unmap_page(vring_dma_dev(vq),
			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
			       virtio32_to_cpu(vq->vq.vdev, desc->len),
			       (flags & VRING_DESC_F_WRITE) ?
			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
	}
}

static int vring_mapping_error(const struct vring_virtqueue *vq,
			       dma_addr_t addr)
{
	if (!vring_use_dma_api(vq->vq.vdev))
		return 0;

	return dma_mapping_error(vring_dma_dev(vq), addr);
}

238 239
static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
					 unsigned int total_sg, gfp_t gfp)
240 241
{
	struct vring_desc *desc;
242
	unsigned int i;
243

244 245 246 247 248
	/*
	 * We require lowmem mappings for the descriptors because
	 * otherwise virt_to_phys will give us bogus addresses in the
	 * virtqueue.
	 */
249
	gfp &= ~__GFP_HIGHMEM;
250

251
	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
252
	if (!desc)
253
		return NULL;
254

255
	for (i = 0; i < total_sg; i++)
256
		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
257
	return desc;
258 259
}

260 261
static inline int virtqueue_add(struct virtqueue *_vq,
				struct scatterlist *sgs[],
262
				unsigned int total_sg,
263 264 265 266
				unsigned int out_sgs,
				unsigned int in_sgs,
				void *data,
				gfp_t gfp)
267 268
{
	struct vring_virtqueue *vq = to_vvq(_vq);
269
	struct scatterlist *sg;
270
	struct vring_desc *desc;
A
Andy Lutomirski 已提交
271
	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
M
Michael S. Tsirkin 已提交
272
	int head;
273
	bool indirect;
274

275 276
	START_USE(vq);

277
	BUG_ON(data == NULL);
278

279 280 281 282 283
	if (unlikely(vq->broken)) {
		END_USE(vq);
		return -EIO;
	}

284 285 286 287 288 289 290 291 292 293 294 295 296
#ifdef DEBUG
	{
		ktime_t now = ktime_get();

		/* No kick or get, with .1 second between?  Warn. */
		if (vq->last_add_time_valid)
			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
					    > 100);
		vq->last_add_time = now;
		vq->last_add_time_valid = true;
	}
#endif

297 298 299 300 301
	BUG_ON(total_sg > vq->vring.num);
	BUG_ON(total_sg == 0);

	head = vq->free_head;

302 303
	/* If the host supports indirect descriptor tables, and we have multiple
	 * buffers, then go indirect. FIXME: tune this threshold */
304
	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
305
		desc = alloc_indirect(_vq, total_sg, gfp);
306 307 308 309 310
	else
		desc = NULL;

	if (desc) {
		/* Use a single buffer which doesn't continue */
A
Andy Lutomirski 已提交
311
		indirect = true;
312 313 314 315
		/* Set up rest to use this indirect table. */
		i = 0;
		descs_used = 1;
	} else {
A
Andy Lutomirski 已提交
316
		indirect = false;
317 318 319
		desc = vq->vring.desc;
		i = head;
		descs_used = total_sg;
320 321
	}

322
	if (vq->vq.num_free < descs_used) {
323
		pr_debug("Can't add buf len %i - avail = %i\n",
324
			 descs_used, vq->vq.num_free);
325 326 327
		/* FIXME: for historical reasons, we force a notify here if
		 * there are outgoing parts to the buffer.  Presumably the
		 * host should service the ring ASAP. */
328
		if (out_sgs)
329
			vq->notify(&vq->vq);
330 331
		if (indirect)
			kfree(desc);
332 333 334 335
		END_USE(vq);
		return -ENOSPC;
	}

336
	for (n = 0; n < out_sgs; n++) {
337
		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
A
Andy Lutomirski 已提交
338 339 340 341
			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
			if (vring_mapping_error(vq, addr))
				goto unmap_release;

342
			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
A
Andy Lutomirski 已提交
343
			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
344
			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
345
			prev = i;
346
			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
347
		}
348
	}
349
	for (; n < (out_sgs + in_sgs); n++) {
350
		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
A
Andy Lutomirski 已提交
351 352 353 354
			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
			if (vring_mapping_error(vq, addr))
				goto unmap_release;

355
			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
A
Andy Lutomirski 已提交
356
			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
357
			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
358
			prev = i;
359
			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
360
		}
361 362
	}
	/* Last one doesn't continue. */
363
	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
364

A
Andy Lutomirski 已提交
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	if (indirect) {
		/* Now that the indirect table is filled in, map it. */
		dma_addr_t addr = vring_map_single(
			vq, desc, total_sg * sizeof(struct vring_desc),
			DMA_TO_DEVICE);
		if (vring_mapping_error(vq, addr))
			goto unmap_release;

		vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
		vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);

		vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
	}

	/* We're using some buffers from the free list. */
	vq->vq.num_free -= descs_used;

382
	/* Update free pointer */
383
	if (indirect)
384
		vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
385 386
	else
		vq->free_head = i;
387

A
Andy Lutomirski 已提交
388 389 390 391
	/* Store token and indirect buffer state. */
	vq->desc_state[head].data = data;
	if (indirect)
		vq->desc_state[head].indir_desc = desc;
392 393

	/* Put entry in available array (but don't update avail->idx until they
R
Rusty Russell 已提交
394
	 * do sync). */
395
	avail = vq->avail_idx_shadow & (vq->vring.num - 1);
396
	vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
397

398 399
	/* Descriptors and available array need to be set before we expose the
	 * new available array entries. */
400
	virtio_wmb(vq->weak_barriers);
401 402
	vq->avail_idx_shadow++;
	vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
403 404
	vq->num_added++;

405 406 407
	pr_debug("Added buffer head %i to %p\n", head, vq);
	END_USE(vq);

408 409 410 411 412
	/* This is very unlikely, but theoretically possible.  Kick
	 * just in case. */
	if (unlikely(vq->num_added == (1 << 16) - 1))
		virtqueue_kick(_vq);

413
	return 0;
A
Andy Lutomirski 已提交
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431

unmap_release:
	err_idx = i;
	i = head;

	for (n = 0; n < total_sg; n++) {
		if (i == err_idx)
			break;
		vring_unmap_one(vq, &desc[i]);
		i = vq->vring.desc[i].next;
	}

	vq->vq.num_free += total_sg;

	if (indirect)
		kfree(desc);

	return -EIO;
432
}
433 434 435 436 437 438 439 440 441 442 443 444 445

/**
 * virtqueue_add_sgs - expose buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of terminated scatterlists.
 * @out_num: the number of scatterlists readable by other side
 * @in_num: the number of scatterlists which are writable (after readable ones)
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
446
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
447 448 449 450 451 452 453 454
 */
int virtqueue_add_sgs(struct virtqueue *_vq,
		      struct scatterlist *sgs[],
		      unsigned int out_sgs,
		      unsigned int in_sgs,
		      void *data,
		      gfp_t gfp)
{
455
	unsigned int i, total_sg = 0;
456 457

	/* Count them first. */
458
	for (i = 0; i < out_sgs + in_sgs; i++) {
459 460
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
461
			total_sg++;
462
	}
463
	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
464 465 466
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);

467 468 469
/**
 * virtqueue_add_outbuf - expose output buffers to other end
 * @vq: the struct virtqueue we're talking about.
470 471
 * @sg: scatterlist (must be well-formed and terminated!)
 * @num: the number of entries in @sg readable by other side
472 473 474 475 476 477
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
478
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
479 480
 */
int virtqueue_add_outbuf(struct virtqueue *vq,
481
			 struct scatterlist *sg, unsigned int num,
482 483 484
			 void *data,
			 gfp_t gfp)
{
485
	return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
486 487 488 489 490 491
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);

/**
 * virtqueue_add_inbuf - expose input buffers to other end
 * @vq: the struct virtqueue we're talking about.
492 493
 * @sg: scatterlist (must be well-formed and terminated!)
 * @num: the number of entries in @sg writable by other side
494 495 496 497 498 499
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
500
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
501 502
 */
int virtqueue_add_inbuf(struct virtqueue *vq,
503
			struct scatterlist *sg, unsigned int num,
504 505 506
			void *data,
			gfp_t gfp)
{
507
	return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
508 509 510
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);

511
/**
512
 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
513 514
 * @vq: the struct virtqueue
 *
515 516 517
 * Instead of virtqueue_kick(), you can do:
 *	if (virtqueue_kick_prepare(vq))
 *		virtqueue_notify(vq);
518
 *
519 520
 * This is sometimes useful because the virtqueue_kick_prepare() needs
 * to be serialized, but the actual virtqueue_notify() call does not.
521
 */
522
bool virtqueue_kick_prepare(struct virtqueue *_vq)
523 524
{
	struct vring_virtqueue *vq = to_vvq(_vq);
525
	u16 new, old;
526 527
	bool needs_kick;

528
	START_USE(vq);
529 530
	/* We need to expose available array entries before checking avail
	 * event. */
531
	virtio_mb(vq->weak_barriers);
532

533 534
	old = vq->avail_idx_shadow - vq->num_added;
	new = vq->avail_idx_shadow;
535 536
	vq->num_added = 0;

537 538 539 540 541 542 543 544
#ifdef DEBUG
	if (vq->last_add_time_valid) {
		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
					      vq->last_add_time)) > 100);
	}
	vq->last_add_time_valid = false;
#endif

545
	if (vq->event) {
546
		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
547 548
					      new, old);
	} else {
549
		needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
550
	}
551
	END_USE(vq);
552 553 554 555 556 557 558 559 560
	return needs_kick;
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);

/**
 * virtqueue_notify - second half of split virtqueue_kick call.
 * @vq: the struct virtqueue
 *
 * This does not need to be serialized.
561 562
 *
 * Returns false if host notify failed or queue is broken, otherwise true.
563
 */
564
bool virtqueue_notify(struct virtqueue *_vq)
565 566 567
{
	struct vring_virtqueue *vq = to_vvq(_vq);

568 569 570
	if (unlikely(vq->broken))
		return false;

571
	/* Prod other side to tell it about changes. */
572
	if (!vq->notify(_vq)) {
573 574 575 576
		vq->broken = true;
		return false;
	}
	return true;
577 578 579 580 581 582 583
}
EXPORT_SYMBOL_GPL(virtqueue_notify);

/**
 * virtqueue_kick - update after add_buf
 * @vq: the struct virtqueue
 *
584
 * After one or more virtqueue_add_* calls, invoke this to kick
585 586 587 588
 * the other side.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
589 590
 *
 * Returns false if kick failed, otherwise true.
591
 */
592
bool virtqueue_kick(struct virtqueue *vq)
593 594
{
	if (virtqueue_kick_prepare(vq))
595 596
		return virtqueue_notify(vq);
	return true;
597
}
598
EXPORT_SYMBOL_GPL(virtqueue_kick);
599 600 601

static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
A
Andy Lutomirski 已提交
602 603
	unsigned int i, j;
	u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
604 605

	/* Clear data ptr. */
A
Andy Lutomirski 已提交
606
	vq->desc_state[head].data = NULL;
607

A
Andy Lutomirski 已提交
608
	/* Put back on free list: unmap first-level descriptors and find end */
609
	i = head;
610

A
Andy Lutomirski 已提交
611 612
	while (vq->vring.desc[i].flags & nextflag) {
		vring_unmap_one(vq, &vq->vring.desc[i]);
613
		i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
614
		vq->vq.num_free++;
615 616
	}

A
Andy Lutomirski 已提交
617
	vring_unmap_one(vq, &vq->vring.desc[i]);
618
	vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
619
	vq->free_head = head;
A
Andy Lutomirski 已提交
620

621
	/* Plus final descriptor */
622
	vq->vq.num_free++;
A
Andy Lutomirski 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638

	/* Free the indirect table, if any, now that it's unmapped. */
	if (vq->desc_state[head].indir_desc) {
		struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
		u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);

		BUG_ON(!(vq->vring.desc[head].flags &
			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
		BUG_ON(len == 0 || len % sizeof(struct vring_desc));

		for (j = 0; j < len / sizeof(struct vring_desc); j++)
			vring_unmap_one(vq, &indir_desc[j]);

		kfree(vq->desc_state[head].indir_desc);
		vq->desc_state[head].indir_desc = NULL;
	}
639 640 641 642
}

static inline bool more_used(const struct vring_virtqueue *vq)
{
643
	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
644 645
}

646 647 648 649 650 651 652 653 654 655 656 657 658 659
/**
 * virtqueue_get_buf - get the next used buffer
 * @vq: the struct virtqueue we're talking about.
 * @len: the length written into the buffer
 *
 * If the driver wrote data into the buffer, @len will be set to the
 * amount written.  This means you don't need to clear the buffer
 * beforehand to ensure there's no data leakage in the case of short
 * writes.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 *
 * Returns NULL if there are no used buffers, or the "data" token
660
 * handed to virtqueue_add_*().
661
 */
662
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
663 664 665 666
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	void *ret;
	unsigned int i;
R
Rusty Russell 已提交
667
	u16 last_used;
668 669 670

	START_USE(vq);

671 672 673 674 675
	if (unlikely(vq->broken)) {
		END_USE(vq);
		return NULL;
	}

676 677 678 679 680 681
	if (!more_used(vq)) {
		pr_debug("No more buffers in queue\n");
		END_USE(vq);
		return NULL;
	}

682
	/* Only get used array entries after they have been exposed by host. */
683
	virtio_rmb(vq->weak_barriers);
684

R
Rusty Russell 已提交
685
	last_used = (vq->last_used_idx & (vq->vring.num - 1));
686 687
	i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
	*len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
688 689 690 691 692

	if (unlikely(i >= vq->vring.num)) {
		BAD_RING(vq, "id %u out of range\n", i);
		return NULL;
	}
A
Andy Lutomirski 已提交
693
	if (unlikely(!vq->desc_state[i].data)) {
694 695 696 697 698
		BAD_RING(vq, "id %u is not a head!\n", i);
		return NULL;
	}

	/* detach_buf clears data, so grab it now. */
A
Andy Lutomirski 已提交
699
	ret = vq->desc_state[i].data;
700 701
	detach_buf(vq, i);
	vq->last_used_idx++;
702 703 704
	/* If we expect an interrupt for the next entry, tell host
	 * by writing event index and flush out the write before
	 * the read in the next get_buf call. */
705 706 707 708
	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
		virtio_store_mb(vq->weak_barriers,
				&vring_used_event(&vq->vring),
				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
709

710 711 712 713
#ifdef DEBUG
	vq->last_add_time_valid = false;
#endif

714 715 716
	END_USE(vq);
	return ret;
}
717
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
718

719 720 721 722 723 724 725 726 727
/**
 * virtqueue_disable_cb - disable callbacks
 * @vq: the struct virtqueue we're talking about.
 *
 * Note that this is not necessarily synchronous, hence unreliable and only
 * useful as an optimization.
 *
 * Unlike other operations, this need not be serialized.
 */
728
void virtqueue_disable_cb(struct virtqueue *_vq)
729 730 731
{
	struct vring_virtqueue *vq = to_vvq(_vq);

732 733 734 735 736
	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
	}

737
}
738
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
739

740
/**
741
 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
742 743
 * @vq: the struct virtqueue we're talking about.
 *
744 745 746 747
 * This re-enables callbacks; it returns current queue state
 * in an opaque unsigned value. This value should be later tested by
 * virtqueue_poll, to detect a possible race between the driver checking for
 * more work, and enabling callbacks.
748 749 750 751
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
752
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
753 754
{
	struct vring_virtqueue *vq = to_vvq(_vq);
755
	u16 last_used_idx;
756 757 758 759 760

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
761 762 763
	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
764 765 766 767
	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
	}
768
	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	END_USE(vq);
	return last_used_idx;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);

/**
 * virtqueue_poll - query pending used buffers
 * @vq: the struct virtqueue we're talking about.
 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
 *
 * Returns "true" if there are pending used buffers in the queue.
 *
 * This does not need to be serialized.
 */
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

787
	virtio_mb(vq->weak_barriers);
788
	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
789 790
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
791

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
/**
 * virtqueue_enable_cb - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks; it returns "false" if there are pending
 * buffers in the queue, to detect a possible race between the driver
 * checking for more work, and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
	return !virtqueue_poll(_vq, last_used_idx);
807
}
808
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
809

810 811 812 813 814 815 816 817 818 819 820 821 822
/**
 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks but hints to the other side to delay
 * interrupts until most of the available buffers have been processed;
 * it returns "false" if there are many pending buffers in the queue,
 * to detect a possible race between the driver checking for more work,
 * and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
823 824 825 826 827 828 829 830 831 832 833 834
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 bufs;

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
835 836 837 838
	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
	}
839
	/* TODO: tune this threshold */
840
	bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
841 842 843 844 845

	virtio_store_mb(vq->weak_barriers,
			&vring_used_event(&vq->vring),
			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));

846
	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
847 848 849 850 851 852 853 854 855
		END_USE(vq);
		return false;
	}

	END_USE(vq);
	return true;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);

856 857 858 859
/**
 * virtqueue_detach_unused_buf - detach first unused buffer
 * @vq: the struct virtqueue we're talking about.
 *
860
 * Returns NULL or the "data" token handed to virtqueue_add_*().
861 862 863
 * This is not valid on an active queue; it is useful only for device
 * shutdown.
 */
864
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
865 866 867 868 869 870 871 872
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	unsigned int i;
	void *buf;

	START_USE(vq);

	for (i = 0; i < vq->vring.num; i++) {
A
Andy Lutomirski 已提交
873
		if (!vq->desc_state[i].data)
874 875
			continue;
		/* detach_buf clears data, so grab it now. */
A
Andy Lutomirski 已提交
876
		buf = vq->desc_state[i].data;
877
		detach_buf(vq, i);
878 879
		vq->avail_idx_shadow--;
		vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
880 881 882 883
		END_USE(vq);
		return buf;
	}
	/* That should have freed everything. */
884
	BUG_ON(vq->vq.num_free != vq->vring.num);
885 886 887 888

	END_USE(vq);
	return NULL;
}
889
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
890

891 892 893 894 895 896 897 898 899 900 901 902 903
irqreturn_t vring_interrupt(int irq, void *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	if (!more_used(vq)) {
		pr_debug("virtqueue interrupt with no work for %p\n", vq);
		return IRQ_NONE;
	}

	if (unlikely(vq->broken))
		return IRQ_HANDLED;

	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
904 905
	if (vq->vq.callback)
		vq->vq.callback(&vq->vq);
906 907 908

	return IRQ_HANDLED;
}
909
EXPORT_SYMBOL_GPL(vring_interrupt);
910

911 912 913 914 915 916 917
struct virtqueue *__vring_new_virtqueue(unsigned int index,
					struct vring vring,
					struct virtio_device *vdev,
					bool weak_barriers,
					bool (*notify)(struct virtqueue *),
					void (*callback)(struct virtqueue *),
					const char *name)
918 919
{
	unsigned int i;
920
	struct vring_virtqueue *vq;
921

922
	vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
A
Andy Lutomirski 已提交
923
		     GFP_KERNEL);
924 925 926
	if (!vq)
		return NULL;

927
	vq->vring = vring;
928 929
	vq->vq.callback = callback;
	vq->vq.vdev = vdev;
930
	vq->vq.name = name;
931
	vq->vq.num_free = vring.num;
932
	vq->vq.index = index;
933 934 935
	vq->we_own_ring = false;
	vq->queue_dma_addr = 0;
	vq->queue_size_in_bytes = 0;
936
	vq->notify = notify;
937
	vq->weak_barriers = weak_barriers;
938 939
	vq->broken = false;
	vq->last_used_idx = 0;
940 941
	vq->avail_flags_shadow = 0;
	vq->avail_idx_shadow = 0;
942
	vq->num_added = 0;
943
	list_add_tail(&vq->vq.list, &vdev->vqs);
944 945
#ifdef DEBUG
	vq->in_use = false;
946
	vq->last_add_time_valid = false;
947 948
#endif

949
	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
950
	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
951

952
	/* No callback?  Tell other side not to bother us. */
953 954 955 956
	if (!callback) {
		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
		vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
	}
957 958 959

	/* Put everything in free lists. */
	vq->free_head = 0;
960
	for (i = 0; i < vring.num-1; i++)
961
		vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
962
	memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
963 964 965

	return &vq->vq;
}
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);

static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
			      dma_addr_t *dma_handle, gfp_t flag)
{
	if (vring_use_dma_api(vdev)) {
		return dma_alloc_coherent(vdev->dev.parent, size,
					  dma_handle, flag);
	} else {
		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
		if (queue) {
			phys_addr_t phys_addr = virt_to_phys(queue);
			*dma_handle = (dma_addr_t)phys_addr;

			/*
			 * Sanity check: make sure we dind't truncate
			 * the address.  The only arches I can find that
			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
			 * are certain non-highmem MIPS and x86
			 * configurations, but these configurations
			 * should never allocate physical pages above 32
			 * bits, so this is fine.  Just in case, throw a
			 * warning and abort if we end up with an
			 * unrepresentable address.
			 */
			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
				free_pages_exact(queue, PAGE_ALIGN(size));
				return NULL;
			}
		}
		return queue;
	}
}

static void vring_free_queue(struct virtio_device *vdev, size_t size,
			     void *queue, dma_addr_t dma_handle)
{
	if (vring_use_dma_api(vdev)) {
		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
	} else {
		free_pages_exact(queue, PAGE_ALIGN(size));
	}
}

struct virtqueue *vring_create_virtqueue(
	unsigned int index,
	unsigned int num,
	unsigned int vring_align,
	struct virtio_device *vdev,
	bool weak_barriers,
	bool may_reduce_num,
	bool (*notify)(struct virtqueue *),
	void (*callback)(struct virtqueue *),
	const char *name)
{
	struct virtqueue *vq;
1022
	void *queue = NULL;
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	dma_addr_t dma_addr;
	size_t queue_size_in_bytes;
	struct vring vring;

	/* We assume num is a power of 2. */
	if (num & (num - 1)) {
		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
		return NULL;
	}

	/* TODO: allocate each queue chunk individually */
	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
					  &dma_addr,
					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
		if (queue)
			break;
	}

	if (!num)
		return NULL;

	if (!queue) {
		/* Try to get a single page. You are my only hope! */
		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
	}
	if (!queue)
		return NULL;

	queue_size_in_bytes = vring_size(num, vring_align);
	vring_init(&vring, num, queue, vring_align);

	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
				   notify, callback, name);
	if (!vq) {
		vring_free_queue(vdev, queue_size_in_bytes, queue,
				 dma_addr);
		return NULL;
	}

	to_vvq(vq)->queue_dma_addr = dma_addr;
	to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
	to_vvq(vq)->we_own_ring = true;

	return vq;
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);

struct virtqueue *vring_new_virtqueue(unsigned int index,
				      unsigned int num,
				      unsigned int vring_align,
				      struct virtio_device *vdev,
				      bool weak_barriers,
				      void *pages,
				      bool (*notify)(struct virtqueue *vq),
				      void (*callback)(struct virtqueue *vq),
				      const char *name)
{
	struct vring vring;
	vring_init(&vring, num, pages, vring_align);
	return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
				     notify, callback, name);
}
1087
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1088

1089
void vring_del_virtqueue(struct virtqueue *_vq)
1090
{
1091 1092 1093 1094 1095 1096 1097 1098
	struct vring_virtqueue *vq = to_vvq(_vq);

	if (vq->we_own_ring) {
		vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
				 vq->vring.desc, vq->queue_dma_addr);
	}
	list_del(&_vq->list);
	kfree(vq);
1099
}
1100
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1101

1102 1103 1104 1105 1106 1107 1108
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
	unsigned int i;

	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
		switch (i) {
1109 1110
		case VIRTIO_RING_F_INDIRECT_DESC:
			break;
1111 1112
		case VIRTIO_RING_F_EVENT_IDX:
			break;
1113 1114
		case VIRTIO_F_VERSION_1:
			break;
1115 1116
		case VIRTIO_F_IOMMU_PLATFORM:
			break;
1117 1118
		default:
			/* We don't understand this bit. */
1119
			__virtio_clear_bit(vdev, i);
1120 1121 1122 1123 1124
		}
	}
}
EXPORT_SYMBOL_GPL(vring_transport_features);

1125 1126 1127 1128 1129 1130 1131
/**
 * virtqueue_get_vring_size - return the size of the virtqueue's vring
 * @vq: the struct virtqueue containing the vring of interest.
 *
 * Returns the size of the vring.  This is mainly used for boasting to
 * userspace.  Unlike other operations, this need not be serialized.
 */
R
Rick Jones 已提交
1132 1133 1134 1135 1136 1137 1138 1139 1140
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{

	struct vring_virtqueue *vq = to_vvq(_vq);

	return vq->vring.num;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);

1141 1142 1143 1144 1145 1146 1147 1148
bool virtqueue_is_broken(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	return vq->broken;
}
EXPORT_SYMBOL_GPL(virtqueue_is_broken);

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
/*
 * This should prevent the device from being used, allowing drivers to
 * recover.  You may need to grab appropriate locks to flush.
 */
void virtio_break_device(struct virtio_device *dev)
{
	struct virtqueue *_vq;

	list_for_each_entry(_vq, &dev->vqs, list) {
		struct vring_virtqueue *vq = to_vvq(_vq);
		vq->broken = true;
	}
}
EXPORT_SYMBOL_GPL(virtio_break_device);

1164
dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1165 1166 1167
{
	struct vring_virtqueue *vq = to_vvq(_vq);

1168 1169 1170
	BUG_ON(!vq->we_own_ring);

	return vq->queue_dma_addr;
1171
}
1172
EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1173

1174
dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1175 1176 1177
{
	struct vring_virtqueue *vq = to_vvq(_vq);

1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	BUG_ON(!vq->we_own_ring);

	return vq->queue_dma_addr +
		((char *)vq->vring.avail - (char *)vq->vring.desc);
}
EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);

dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	BUG_ON(!vq->we_own_ring);

	return vq->queue_dma_addr +
		((char *)vq->vring.used - (char *)vq->vring.desc);
}
EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);

const struct vring *virtqueue_get_vring(struct virtqueue *vq)
{
	return &to_vvq(vq)->vring;
1199
}
1200
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1201

1202
MODULE_LICENSE("GPL");