virtio_ring.c 23.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Virtio ring implementation.
 *
 *  Copyright 2007 Rusty Russell IBM Corporation
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
21
#include <linux/virtio_config.h>
22
#include <linux/device.h>
23
#include <linux/slab.h>
24
#include <linux/module.h>
25
#include <linux/hrtimer.h>
26
#include <linux/kmemleak.h>
27 28 29

#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
30 31 32 33 34 35
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&(_vq)->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		BUG();						\
	} while (0)
36 37 38 39
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq)						\
	do {							\
		if ((_vq)->in_use)				\
40 41
			panic("%s:in_use = %i\n",		\
			      (_vq)->vq.name, (_vq)->in_use);	\
42
		(_vq)->in_use = __LINE__;			\
43
	} while (0)
44
#define END_USE(_vq) \
45
	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
46
#else
47 48 49 50 51 52
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&_vq->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		(_vq)->broken = true;				\
	} while (0)
53 54 55 56 57 58 59 60 61 62 63
#define START_USE(vq)
#define END_USE(vq)
#endif

struct vring_virtqueue
{
	struct virtqueue vq;

	/* Actual memory layout for this queue */
	struct vring vring;

64 65 66
	/* Can we use weak barriers? */
	bool weak_barriers;

67 68 69
	/* Other side has made a mess, don't try any more. */
	bool broken;

70 71 72
	/* Host supports indirect buffers */
	bool indirect;

73 74 75
	/* Host publishes avail event idx */
	bool event;

76 77 78 79 80 81
	/* Head of free buffer list. */
	unsigned int free_head;
	/* Number we've added since last sync. */
	unsigned int num_added;

	/* Last used index we've seen. */
A
Anthony Liguori 已提交
82
	u16 last_used_idx;
83 84

	/* How to notify other side. FIXME: commonalize hcalls! */
85
	bool (*notify)(struct virtqueue *vq);
86 87 88 89

#ifdef DEBUG
	/* They're supposed to lock for us. */
	unsigned int in_use;
90 91 92 93

	/* Figure out if their kicks are too delayed. */
	bool last_add_time_valid;
	ktime_t last_add_time;
94 95 96 97 98 99 100 101
#endif

	/* Tokens for callbacks. */
	void *data[];
};

#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)

102 103 104 105 106 107 108 109 110 111 112 113 114 115
static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
						  unsigned int *count)
{
	return sg_next(sg);
}

static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
					      unsigned int *count)
{
	if (--(*count) == 0)
		return NULL;
	return sg + 1;
}

116
/* Set up an indirect table of descriptors and add it to the queue. */
117 118 119 120 121 122 123 124 125 126
static inline int vring_add_indirect(struct vring_virtqueue *vq,
				     struct scatterlist *sgs[],
				     struct scatterlist *(*next)
				       (struct scatterlist *, unsigned int *),
				     unsigned int total_sg,
				     unsigned int total_out,
				     unsigned int total_in,
				     unsigned int out_sgs,
				     unsigned int in_sgs,
				     gfp_t gfp)
127 128 129
{
	struct vring_desc *desc;
	unsigned head;
130 131
	struct scatterlist *sg;
	int i, n;
132

133 134 135 136 137 138 139
	/*
	 * We require lowmem mappings for the descriptors because
	 * otherwise virt_to_phys will give us bogus addresses in the
	 * virtqueue.
	 */
	gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);

140
	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
141
	if (!desc)
142
		return -ENOMEM;
143

144 145 146 147 148 149 150 151 152 153
	/* Transfer entries from the sg lists into the indirect page */
	i = 0;
	for (n = 0; n < out_sgs; n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
			desc[i].flags = VRING_DESC_F_NEXT;
			desc[i].addr = sg_phys(sg);
			desc[i].len = sg->length;
			desc[i].next = i+1;
			i++;
		}
154
	}
155 156 157 158 159 160 161 162
	for (; n < (out_sgs + in_sgs); n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
			desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
			desc[i].addr = sg_phys(sg);
			desc[i].len = sg->length;
			desc[i].next = i+1;
			i++;
		}
163
	}
164
	BUG_ON(i != total_sg);
165 166 167 168 169 170

	/* Last one doesn't continue. */
	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
	desc[i-1].next = 0;

	/* We're about to use a buffer */
171
	vq->vq.num_free--;
172 173 174 175 176

	/* Use a single buffer which doesn't continue */
	head = vq->free_head;
	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
	vq->vring.desc[head].addr = virt_to_phys(desc);
177 178
	/* kmemleak gives a false positive, as it's hidden by virt_to_phys */
	kmemleak_ignore(desc);
179 180 181 182 183 184 185 186
	vq->vring.desc[head].len = i * sizeof(struct vring_desc);

	/* Update free pointer */
	vq->free_head = vq->vring.desc[head].next;

	return head;
}

187 188 189 190 191 192 193 194 195 196
static inline int virtqueue_add(struct virtqueue *_vq,
				struct scatterlist *sgs[],
				struct scatterlist *(*next)
				  (struct scatterlist *, unsigned int *),
				unsigned int total_out,
				unsigned int total_in,
				unsigned int out_sgs,
				unsigned int in_sgs,
				void *data,
				gfp_t gfp)
197 198
{
	struct vring_virtqueue *vq = to_vvq(_vq);
199 200
	struct scatterlist *sg;
	unsigned int i, n, avail, uninitialized_var(prev), total_sg;
M
Michael S. Tsirkin 已提交
201
	int head;
202

203 204
	START_USE(vq);

205
	BUG_ON(data == NULL);
206

207 208 209 210 211 212 213 214 215 216 217 218 219
#ifdef DEBUG
	{
		ktime_t now = ktime_get();

		/* No kick or get, with .1 second between?  Warn. */
		if (vq->last_add_time_valid)
			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
					    > 100);
		vq->last_add_time = now;
		vq->last_add_time_valid = true;
	}
#endif

220 221
	total_sg = total_in + total_out;

222 223
	/* If the host supports indirect descriptor tables, and we have multiple
	 * buffers, then go indirect. FIXME: tune this threshold */
224 225 226 227
	if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
		head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
					  total_in,
					  out_sgs, in_sgs, gfp);
M
Michael S. Tsirkin 已提交
228
		if (likely(head >= 0))
229 230 231
			goto add_head;
	}

232 233
	BUG_ON(total_sg > vq->vring.num);
	BUG_ON(total_sg == 0);
234

235
	if (vq->vq.num_free < total_sg) {
236
		pr_debug("Can't add buf len %i - avail = %i\n",
237
			 total_sg, vq->vq.num_free);
238 239 240
		/* FIXME: for historical reasons, we force a notify here if
		 * there are outgoing parts to the buffer.  Presumably the
		 * host should service the ring ASAP. */
241
		if (out_sgs)
242
			vq->notify(&vq->vq);
243 244 245 246 247
		END_USE(vq);
		return -ENOSPC;
	}

	/* We're about to use some buffers from the free list. */
248 249 250 251 252 253 254 255 256 257 258
	vq->vq.num_free -= total_sg;

	head = i = vq->free_head;
	for (n = 0; n < out_sgs; n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
			vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
			vq->vring.desc[i].addr = sg_phys(sg);
			vq->vring.desc[i].len = sg->length;
			prev = i;
			i = vq->vring.desc[i].next;
		}
259
	}
260 261 262 263 264 265 266 267
	for (; n < (out_sgs + in_sgs); n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
			vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
			vq->vring.desc[i].addr = sg_phys(sg);
			vq->vring.desc[i].len = sg->length;
			prev = i;
			i = vq->vring.desc[i].next;
		}
268 269 270 271 272 273 274
	}
	/* Last one doesn't continue. */
	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;

	/* Update free pointer */
	vq->free_head = i;

275
add_head:
276 277 278 279
	/* Set token. */
	vq->data[head] = data;

	/* Put entry in available array (but don't update avail->idx until they
R
Rusty Russell 已提交
280
	 * do sync). */
281
	avail = (vq->vring.avail->idx & (vq->vring.num-1));
282 283
	vq->vring.avail->ring[avail] = head;

284 285
	/* Descriptors and available array need to be set before we expose the
	 * new available array entries. */
286
	virtio_wmb(vq->weak_barriers);
287 288 289 290 291 292 293 294
	vq->vring.avail->idx++;
	vq->num_added++;

	/* This is very unlikely, but theoretically possible.  Kick
	 * just in case. */
	if (unlikely(vq->num_added == (1 << 16) - 1))
		virtqueue_kick(_vq);

295 296
	pr_debug("Added buffer head %i to %p\n", head, vq);
	END_USE(vq);
297

298
	return 0;
299
}
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339

/**
 * virtqueue_add_sgs - expose buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of terminated scatterlists.
 * @out_num: the number of scatterlists readable by other side
 * @in_num: the number of scatterlists which are writable (after readable ones)
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_sgs(struct virtqueue *_vq,
		      struct scatterlist *sgs[],
		      unsigned int out_sgs,
		      unsigned int in_sgs,
		      void *data,
		      gfp_t gfp)
{
	unsigned int i, total_out, total_in;

	/* Count them first. */
	for (i = total_out = total_in = 0; i < out_sgs; i++) {
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
			total_out++;
	}
	for (; i < out_sgs + in_sgs; i++) {
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
			total_in++;
	}
	return virtqueue_add(_vq, sgs, sg_next_chained,
			     total_out, total_in, out_sgs, in_sgs, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
/**
 * virtqueue_add_outbuf - expose output buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of scatterlists (need not be terminated!)
 * @num: the number of scatterlists readable by other side
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_outbuf(struct virtqueue *vq,
			 struct scatterlist sg[], unsigned int num,
			 void *data,
			 gfp_t gfp)
{
	return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);

/**
 * virtqueue_add_inbuf - expose input buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of scatterlists (need not be terminated!)
 * @num: the number of scatterlists writable by other side
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_inbuf(struct virtqueue *vq,
			struct scatterlist sg[], unsigned int num,
			void *data,
			gfp_t gfp)
{
	return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);

384
/**
385
 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
386 387
 * @vq: the struct virtqueue
 *
388 389 390
 * Instead of virtqueue_kick(), you can do:
 *	if (virtqueue_kick_prepare(vq))
 *		virtqueue_notify(vq);
391
 *
392 393
 * This is sometimes useful because the virtqueue_kick_prepare() needs
 * to be serialized, but the actual virtqueue_notify() call does not.
394
 */
395
bool virtqueue_kick_prepare(struct virtqueue *_vq)
396 397
{
	struct vring_virtqueue *vq = to_vvq(_vq);
398
	u16 new, old;
399 400
	bool needs_kick;

401
	START_USE(vq);
402 403
	/* We need to expose available array entries before checking avail
	 * event. */
404
	virtio_mb(vq->weak_barriers);
405

406 407
	old = vq->vring.avail->idx - vq->num_added;
	new = vq->vring.avail->idx;
408 409
	vq->num_added = 0;

410 411 412 413 414 415 416 417
#ifdef DEBUG
	if (vq->last_add_time_valid) {
		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
					      vq->last_add_time)) > 100);
	}
	vq->last_add_time_valid = false;
#endif

418 419 420 421 422 423
	if (vq->event) {
		needs_kick = vring_need_event(vring_avail_event(&vq->vring),
					      new, old);
	} else {
		needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
	}
424
	END_USE(vq);
425 426 427 428 429 430 431 432 433
	return needs_kick;
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);

/**
 * virtqueue_notify - second half of split virtqueue_kick call.
 * @vq: the struct virtqueue
 *
 * This does not need to be serialized.
434 435
 *
 * Returns false if host notify failed or queue is broken, otherwise true.
436
 */
437
bool virtqueue_notify(struct virtqueue *_vq)
438 439 440
{
	struct vring_virtqueue *vq = to_vvq(_vq);

441 442 443
	if (unlikely(vq->broken))
		return false;

444
	/* Prod other side to tell it about changes. */
445
	if (!vq->notify(_vq)) {
446 447 448 449
		vq->broken = true;
		return false;
	}
	return true;
450 451 452 453 454 455 456
}
EXPORT_SYMBOL_GPL(virtqueue_notify);

/**
 * virtqueue_kick - update after add_buf
 * @vq: the struct virtqueue
 *
457
 * After one or more virtqueue_add_* calls, invoke this to kick
458 459 460 461
 * the other side.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
462 463
 *
 * Returns false if kick failed, otherwise true.
464
 */
465
bool virtqueue_kick(struct virtqueue *vq)
466 467
{
	if (virtqueue_kick_prepare(vq))
468 469
		return virtqueue_notify(vq);
	return true;
470
}
471
EXPORT_SYMBOL_GPL(virtqueue_kick);
472 473 474 475 476 477 478 479 480 481

static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
	unsigned int i;

	/* Clear data ptr. */
	vq->data[head] = NULL;

	/* Put back on free list: find end */
	i = head;
482 483 484 485 486

	/* Free the indirect table */
	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
		kfree(phys_to_virt(vq->vring.desc[i].addr));

487 488
	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
		i = vq->vring.desc[i].next;
489
		vq->vq.num_free++;
490 491 492 493 494
	}

	vq->vring.desc[i].next = vq->free_head;
	vq->free_head = head;
	/* Plus final descriptor */
495
	vq->vq.num_free++;
496 497 498 499 500 501 502
}

static inline bool more_used(const struct vring_virtqueue *vq)
{
	return vq->last_used_idx != vq->vring.used->idx;
}

503 504 505 506 507 508 509 510 511 512 513 514 515 516
/**
 * virtqueue_get_buf - get the next used buffer
 * @vq: the struct virtqueue we're talking about.
 * @len: the length written into the buffer
 *
 * If the driver wrote data into the buffer, @len will be set to the
 * amount written.  This means you don't need to clear the buffer
 * beforehand to ensure there's no data leakage in the case of short
 * writes.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 *
 * Returns NULL if there are no used buffers, or the "data" token
517
 * handed to virtqueue_add_*().
518
 */
519
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
520 521 522 523
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	void *ret;
	unsigned int i;
R
Rusty Russell 已提交
524
	u16 last_used;
525 526 527

	START_USE(vq);

528 529 530 531 532
	if (unlikely(vq->broken)) {
		END_USE(vq);
		return NULL;
	}

533 534 535 536 537 538
	if (!more_used(vq)) {
		pr_debug("No more buffers in queue\n");
		END_USE(vq);
		return NULL;
	}

539
	/* Only get used array entries after they have been exposed by host. */
540
	virtio_rmb(vq->weak_barriers);
541

R
Rusty Russell 已提交
542 543 544
	last_used = (vq->last_used_idx & (vq->vring.num - 1));
	i = vq->vring.used->ring[last_used].id;
	*len = vq->vring.used->ring[last_used].len;
545 546 547 548 549 550 551 552 553 554 555 556 557 558

	if (unlikely(i >= vq->vring.num)) {
		BAD_RING(vq, "id %u out of range\n", i);
		return NULL;
	}
	if (unlikely(!vq->data[i])) {
		BAD_RING(vq, "id %u is not a head!\n", i);
		return NULL;
	}

	/* detach_buf clears data, so grab it now. */
	ret = vq->data[i];
	detach_buf(vq, i);
	vq->last_used_idx++;
559 560 561 562 563
	/* If we expect an interrupt for the next entry, tell host
	 * by writing event index and flush out the write before
	 * the read in the next get_buf call. */
	if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
		vring_used_event(&vq->vring) = vq->last_used_idx;
564
		virtio_mb(vq->weak_barriers);
565 566
	}

567 568 569 570
#ifdef DEBUG
	vq->last_add_time_valid = false;
#endif

571 572 573
	END_USE(vq);
	return ret;
}
574
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
575

576 577 578 579 580 581 582 583 584
/**
 * virtqueue_disable_cb - disable callbacks
 * @vq: the struct virtqueue we're talking about.
 *
 * Note that this is not necessarily synchronous, hence unreliable and only
 * useful as an optimization.
 *
 * Unlike other operations, this need not be serialized.
 */
585
void virtqueue_disable_cb(struct virtqueue *_vq)
586 587 588 589 590
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
591
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
592

593
/**
594
 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
595 596
 * @vq: the struct virtqueue we're talking about.
 *
597 598 599 600
 * This re-enables callbacks; it returns current queue state
 * in an opaque unsigned value. This value should be later tested by
 * virtqueue_poll, to detect a possible race between the driver checking for
 * more work, and enabling callbacks.
601 602 603 604
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
605
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
606 607
{
	struct vring_virtqueue *vq = to_vvq(_vq);
608
	u16 last_used_idx;
609 610 611 612 613

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
614 615 616
	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
617
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
	vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
	END_USE(vq);
	return last_used_idx;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);

/**
 * virtqueue_poll - query pending used buffers
 * @vq: the struct virtqueue we're talking about.
 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
 *
 * Returns "true" if there are pending used buffers in the queue.
 *
 * This does not need to be serialized.
 */
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

637
	virtio_mb(vq->weak_barriers);
638 639 640
	return (u16)last_used_idx != vq->vring.used->idx;
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
641

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
/**
 * virtqueue_enable_cb - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks; it returns "false" if there are pending
 * buffers in the queue, to detect a possible race between the driver
 * checking for more work, and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
	return !virtqueue_poll(_vq, last_used_idx);
657
}
658
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
659

660 661 662 663 664 665 666 667 668 669 670 671 672
/**
 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks but hints to the other side to delay
 * interrupts until most of the available buffers have been processed;
 * it returns "false" if there are many pending buffers in the queue,
 * to detect a possible race between the driver checking for more work,
 * and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 bufs;

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
	/* TODO: tune this threshold */
	bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
	vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
689
	virtio_mb(vq->weak_barriers);
690 691 692 693 694 695 696 697 698 699
	if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
		END_USE(vq);
		return false;
	}

	END_USE(vq);
	return true;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);

700 701 702 703
/**
 * virtqueue_detach_unused_buf - detach first unused buffer
 * @vq: the struct virtqueue we're talking about.
 *
704
 * Returns NULL or the "data" token handed to virtqueue_add_*().
705 706 707
 * This is not valid on an active queue; it is useful only for device
 * shutdown.
 */
708
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
709 710 711 712 713 714 715 716 717 718 719 720 721
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	unsigned int i;
	void *buf;

	START_USE(vq);

	for (i = 0; i < vq->vring.num; i++) {
		if (!vq->data[i])
			continue;
		/* detach_buf clears data, so grab it now. */
		buf = vq->data[i];
		detach_buf(vq, i);
722
		vq->vring.avail->idx--;
723 724 725 726
		END_USE(vq);
		return buf;
	}
	/* That should have freed everything. */
727
	BUG_ON(vq->vq.num_free != vq->vring.num);
728 729 730 731

	END_USE(vq);
	return NULL;
}
732
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
733

734 735 736 737 738 739 740 741 742 743 744 745 746
irqreturn_t vring_interrupt(int irq, void *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	if (!more_used(vq)) {
		pr_debug("virtqueue interrupt with no work for %p\n", vq);
		return IRQ_NONE;
	}

	if (unlikely(vq->broken))
		return IRQ_HANDLED;

	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
747 748
	if (vq->vq.callback)
		vq->vq.callback(&vq->vq);
749 750 751

	return IRQ_HANDLED;
}
752
EXPORT_SYMBOL_GPL(vring_interrupt);
753

754 755
struct virtqueue *vring_new_virtqueue(unsigned int index,
				      unsigned int num,
756
				      unsigned int vring_align,
757
				      struct virtio_device *vdev,
758
				      bool weak_barriers,
759
				      void *pages,
760
				      bool (*notify)(struct virtqueue *),
761 762
				      void (*callback)(struct virtqueue *),
				      const char *name)
763 764 765 766
{
	struct vring_virtqueue *vq;
	unsigned int i;

767 768 769 770 771 772
	/* We assume num is a power of 2. */
	if (num & (num - 1)) {
		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
		return NULL;
	}

773 774 775 776
	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
	if (!vq)
		return NULL;

777
	vring_init(&vq->vring, num, pages, vring_align);
778 779
	vq->vq.callback = callback;
	vq->vq.vdev = vdev;
780
	vq->vq.name = name;
781 782
	vq->vq.num_free = num;
	vq->vq.index = index;
783
	vq->notify = notify;
784
	vq->weak_barriers = weak_barriers;
785 786 787
	vq->broken = false;
	vq->last_used_idx = 0;
	vq->num_added = 0;
788
	list_add_tail(&vq->vq.list, &vdev->vqs);
789 790
#ifdef DEBUG
	vq->in_use = false;
791
	vq->last_add_time_valid = false;
792 793
#endif

794
	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
795
	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
796

797 798 799 800 801 802
	/* No callback?  Tell other side not to bother us. */
	if (!callback)
		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;

	/* Put everything in free lists. */
	vq->free_head = 0;
803
	for (i = 0; i < num-1; i++) {
804
		vq->vring.desc[i].next = i+1;
805 806 807
		vq->data[i] = NULL;
	}
	vq->data[i] = NULL;
808 809 810

	return &vq->vq;
}
811
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
812 813 814

void vring_del_virtqueue(struct virtqueue *vq)
{
815
	list_del(&vq->list);
816 817
	kfree(to_vvq(vq));
}
818
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
819

820 821 822 823 824 825 826
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
	unsigned int i;

	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
		switch (i) {
827 828
		case VIRTIO_RING_F_INDIRECT_DESC:
			break;
829 830
		case VIRTIO_RING_F_EVENT_IDX:
			break;
831 832 833 834 835 836 837 838
		default:
			/* We don't understand this bit. */
			clear_bit(i, vdev->features);
		}
	}
}
EXPORT_SYMBOL_GPL(vring_transport_features);

839 840 841 842 843 844 845
/**
 * virtqueue_get_vring_size - return the size of the virtqueue's vring
 * @vq: the struct virtqueue containing the vring of interest.
 *
 * Returns the size of the vring.  This is mainly used for boasting to
 * userspace.  Unlike other operations, this need not be serialized.
 */
R
Rick Jones 已提交
846 847 848 849 850 851 852 853 854
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{

	struct vring_virtqueue *vq = to_vvq(_vq);

	return vq->vring.num;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);

855 856 857 858 859 860 861 862
bool virtqueue_is_broken(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	return vq->broken;
}
EXPORT_SYMBOL_GPL(virtqueue_is_broken);

863
MODULE_LICENSE("GPL");