virtio_ring.c 22.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Virtio ring implementation.
 *
 *  Copyright 2007 Rusty Russell IBM Corporation
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
21
#include <linux/virtio_config.h>
22
#include <linux/device.h>
23
#include <linux/slab.h>
24
#include <linux/module.h>
25
#include <linux/hrtimer.h>
26 27 28

#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
29 30 31 32 33 34
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&(_vq)->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		BUG();						\
	} while (0)
35 36 37 38
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq)						\
	do {							\
		if ((_vq)->in_use)				\
39 40
			panic("%s:in_use = %i\n",		\
			      (_vq)->vq.name, (_vq)->in_use);	\
41
		(_vq)->in_use = __LINE__;			\
42
	} while (0)
43
#define END_USE(_vq) \
44
	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
45
#else
46 47 48 49 50 51
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&_vq->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		(_vq)->broken = true;				\
	} while (0)
52 53 54 55 56 57 58 59 60 61 62
#define START_USE(vq)
#define END_USE(vq)
#endif

struct vring_virtqueue
{
	struct virtqueue vq;

	/* Actual memory layout for this queue */
	struct vring vring;

63 64 65
	/* Can we use weak barriers? */
	bool weak_barriers;

66 67 68
	/* Other side has made a mess, don't try any more. */
	bool broken;

69 70 71
	/* Host supports indirect buffers */
	bool indirect;

72 73 74
	/* Host publishes avail event idx */
	bool event;

75 76 77 78 79 80
	/* Head of free buffer list. */
	unsigned int free_head;
	/* Number we've added since last sync. */
	unsigned int num_added;

	/* Last used index we've seen. */
A
Anthony Liguori 已提交
81
	u16 last_used_idx;
82 83 84 85 86 87 88

	/* How to notify other side. FIXME: commonalize hcalls! */
	void (*notify)(struct virtqueue *vq);

#ifdef DEBUG
	/* They're supposed to lock for us. */
	unsigned int in_use;
89 90 91 92

	/* Figure out if their kicks are too delayed. */
	bool last_add_time_valid;
	ktime_t last_add_time;
93 94 95 96 97 98 99 100
#endif

	/* Tokens for callbacks. */
	void *data[];
};

#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)

101 102 103 104 105 106 107 108 109 110 111 112 113 114
static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
						  unsigned int *count)
{
	return sg_next(sg);
}

static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
					      unsigned int *count)
{
	if (--(*count) == 0)
		return NULL;
	return sg + 1;
}

115
/* Set up an indirect table of descriptors and add it to the queue. */
116 117 118 119 120 121 122 123 124 125
static inline int vring_add_indirect(struct vring_virtqueue *vq,
				     struct scatterlist *sgs[],
				     struct scatterlist *(*next)
				       (struct scatterlist *, unsigned int *),
				     unsigned int total_sg,
				     unsigned int total_out,
				     unsigned int total_in,
				     unsigned int out_sgs,
				     unsigned int in_sgs,
				     gfp_t gfp)
126 127 128
{
	struct vring_desc *desc;
	unsigned head;
129 130
	struct scatterlist *sg;
	int i, n;
131

132 133 134 135 136 137 138
	/*
	 * We require lowmem mappings for the descriptors because
	 * otherwise virt_to_phys will give us bogus addresses in the
	 * virtqueue.
	 */
	gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);

139
	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
140
	if (!desc)
141
		return -ENOMEM;
142

143 144 145 146 147 148 149 150 151 152
	/* Transfer entries from the sg lists into the indirect page */
	i = 0;
	for (n = 0; n < out_sgs; n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
			desc[i].flags = VRING_DESC_F_NEXT;
			desc[i].addr = sg_phys(sg);
			desc[i].len = sg->length;
			desc[i].next = i+1;
			i++;
		}
153
	}
154 155 156 157 158 159 160 161
	for (; n < (out_sgs + in_sgs); n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
			desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
			desc[i].addr = sg_phys(sg);
			desc[i].len = sg->length;
			desc[i].next = i+1;
			i++;
		}
162
	}
163
	BUG_ON(i != total_sg);
164 165 166 167 168 169

	/* Last one doesn't continue. */
	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
	desc[i-1].next = 0;

	/* We're about to use a buffer */
170
	vq->vq.num_free--;
171 172 173 174 175

	/* Use a single buffer which doesn't continue */
	head = vq->free_head;
	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
	vq->vring.desc[head].addr = virt_to_phys(desc);
176 177
	/* kmemleak gives a false positive, as it's hidden by virt_to_phys */
	kmemleak_ignore(desc);
178 179 180 181 182 183 184 185
	vq->vring.desc[head].len = i * sizeof(struct vring_desc);

	/* Update free pointer */
	vq->free_head = vq->vring.desc[head].next;

	return head;
}

186 187 188 189 190 191 192 193 194 195
static inline int virtqueue_add(struct virtqueue *_vq,
				struct scatterlist *sgs[],
				struct scatterlist *(*next)
				  (struct scatterlist *, unsigned int *),
				unsigned int total_out,
				unsigned int total_in,
				unsigned int out_sgs,
				unsigned int in_sgs,
				void *data,
				gfp_t gfp)
196 197
{
	struct vring_virtqueue *vq = to_vvq(_vq);
198 199
	struct scatterlist *sg;
	unsigned int i, n, avail, uninitialized_var(prev), total_sg;
M
Michael S. Tsirkin 已提交
200
	int head;
201

202 203
	START_USE(vq);

204
	BUG_ON(data == NULL);
205

206 207 208 209 210 211 212 213 214 215 216 217 218
#ifdef DEBUG
	{
		ktime_t now = ktime_get();

		/* No kick or get, with .1 second between?  Warn. */
		if (vq->last_add_time_valid)
			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
					    > 100);
		vq->last_add_time = now;
		vq->last_add_time_valid = true;
	}
#endif

219 220
	total_sg = total_in + total_out;

221 222
	/* If the host supports indirect descriptor tables, and we have multiple
	 * buffers, then go indirect. FIXME: tune this threshold */
223 224 225 226
	if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
		head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
					  total_in,
					  out_sgs, in_sgs, gfp);
M
Michael S. Tsirkin 已提交
227
		if (likely(head >= 0))
228 229 230
			goto add_head;
	}

231 232
	BUG_ON(total_sg > vq->vring.num);
	BUG_ON(total_sg == 0);
233

234
	if (vq->vq.num_free < total_sg) {
235
		pr_debug("Can't add buf len %i - avail = %i\n",
236
			 total_sg, vq->vq.num_free);
237 238 239
		/* FIXME: for historical reasons, we force a notify here if
		 * there are outgoing parts to the buffer.  Presumably the
		 * host should service the ring ASAP. */
240
		if (out_sgs)
241
			vq->notify(&vq->vq);
242 243 244 245 246
		END_USE(vq);
		return -ENOSPC;
	}

	/* We're about to use some buffers from the free list. */
247 248 249 250 251 252 253 254 255 256 257
	vq->vq.num_free -= total_sg;

	head = i = vq->free_head;
	for (n = 0; n < out_sgs; n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
			vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
			vq->vring.desc[i].addr = sg_phys(sg);
			vq->vring.desc[i].len = sg->length;
			prev = i;
			i = vq->vring.desc[i].next;
		}
258
	}
259 260 261 262 263 264 265 266
	for (; n < (out_sgs + in_sgs); n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
			vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
			vq->vring.desc[i].addr = sg_phys(sg);
			vq->vring.desc[i].len = sg->length;
			prev = i;
			i = vq->vring.desc[i].next;
		}
267 268 269 270 271 272 273
	}
	/* Last one doesn't continue. */
	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;

	/* Update free pointer */
	vq->free_head = i;

274
add_head:
275 276 277 278
	/* Set token. */
	vq->data[head] = data;

	/* Put entry in available array (but don't update avail->idx until they
R
Rusty Russell 已提交
279
	 * do sync). */
280
	avail = (vq->vring.avail->idx & (vq->vring.num-1));
281 282
	vq->vring.avail->ring[avail] = head;

283 284
	/* Descriptors and available array need to be set before we expose the
	 * new available array entries. */
285
	virtio_wmb(vq->weak_barriers);
286 287 288 289 290 291 292 293
	vq->vring.avail->idx++;
	vq->num_added++;

	/* This is very unlikely, but theoretically possible.  Kick
	 * just in case. */
	if (unlikely(vq->num_added == (1 << 16) - 1))
		virtqueue_kick(_vq);

294 295
	pr_debug("Added buffer head %i to %p\n", head, vq);
	END_USE(vq);
296

297
	return 0;
298
}
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338

/**
 * virtqueue_add_sgs - expose buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of terminated scatterlists.
 * @out_num: the number of scatterlists readable by other side
 * @in_num: the number of scatterlists which are writable (after readable ones)
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_sgs(struct virtqueue *_vq,
		      struct scatterlist *sgs[],
		      unsigned int out_sgs,
		      unsigned int in_sgs,
		      void *data,
		      gfp_t gfp)
{
	unsigned int i, total_out, total_in;

	/* Count them first. */
	for (i = total_out = total_in = 0; i < out_sgs; i++) {
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
			total_out++;
	}
	for (; i < out_sgs + in_sgs; i++) {
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
			total_in++;
	}
	return virtqueue_add(_vq, sgs, sg_next_chained,
			     total_out, total_in, out_sgs, in_sgs, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
/**
 * virtqueue_add_outbuf - expose output buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of scatterlists (need not be terminated!)
 * @num: the number of scatterlists readable by other side
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_outbuf(struct virtqueue *vq,
			 struct scatterlist sg[], unsigned int num,
			 void *data,
			 gfp_t gfp)
{
	return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);

/**
 * virtqueue_add_inbuf - expose input buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of scatterlists (need not be terminated!)
 * @num: the number of scatterlists writable by other side
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_inbuf(struct virtqueue *vq,
			struct scatterlist sg[], unsigned int num,
			void *data,
			gfp_t gfp)
{
	return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);

383
/**
384
 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
385 386
 * @vq: the struct virtqueue
 *
387 388 389
 * Instead of virtqueue_kick(), you can do:
 *	if (virtqueue_kick_prepare(vq))
 *		virtqueue_notify(vq);
390
 *
391 392
 * This is sometimes useful because the virtqueue_kick_prepare() needs
 * to be serialized, but the actual virtqueue_notify() call does not.
393
 */
394
bool virtqueue_kick_prepare(struct virtqueue *_vq)
395 396
{
	struct vring_virtqueue *vq = to_vvq(_vq);
397
	u16 new, old;
398 399
	bool needs_kick;

400
	START_USE(vq);
401 402
	/* We need to expose available array entries before checking avail
	 * event. */
403
	virtio_mb(vq->weak_barriers);
404

405 406
	old = vq->vring.avail->idx - vq->num_added;
	new = vq->vring.avail->idx;
407 408
	vq->num_added = 0;

409 410 411 412 413 414 415 416
#ifdef DEBUG
	if (vq->last_add_time_valid) {
		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
					      vq->last_add_time)) > 100);
	}
	vq->last_add_time_valid = false;
#endif

417 418 419 420 421 422
	if (vq->event) {
		needs_kick = vring_need_event(vring_avail_event(&vq->vring),
					      new, old);
	} else {
		needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
	}
423
	END_USE(vq);
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	return needs_kick;
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);

/**
 * virtqueue_notify - second half of split virtqueue_kick call.
 * @vq: the struct virtqueue
 *
 * This does not need to be serialized.
 */
void virtqueue_notify(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	/* Prod other side to tell it about changes. */
	vq->notify(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_notify);

/**
 * virtqueue_kick - update after add_buf
 * @vq: the struct virtqueue
 *
447
 * After one or more virtqueue_add_* calls, invoke this to kick
448 449 450 451 452 453 454 455 456
 * the other side.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
void virtqueue_kick(struct virtqueue *vq)
{
	if (virtqueue_kick_prepare(vq))
		virtqueue_notify(vq);
457
}
458
EXPORT_SYMBOL_GPL(virtqueue_kick);
459 460 461 462 463 464 465 466 467 468

static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
	unsigned int i;

	/* Clear data ptr. */
	vq->data[head] = NULL;

	/* Put back on free list: find end */
	i = head;
469 470 471 472 473

	/* Free the indirect table */
	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
		kfree(phys_to_virt(vq->vring.desc[i].addr));

474 475
	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
		i = vq->vring.desc[i].next;
476
		vq->vq.num_free++;
477 478 479 480 481
	}

	vq->vring.desc[i].next = vq->free_head;
	vq->free_head = head;
	/* Plus final descriptor */
482
	vq->vq.num_free++;
483 484 485 486 487 488 489
}

static inline bool more_used(const struct vring_virtqueue *vq)
{
	return vq->last_used_idx != vq->vring.used->idx;
}

490 491 492 493 494 495 496 497 498 499 500 501 502 503
/**
 * virtqueue_get_buf - get the next used buffer
 * @vq: the struct virtqueue we're talking about.
 * @len: the length written into the buffer
 *
 * If the driver wrote data into the buffer, @len will be set to the
 * amount written.  This means you don't need to clear the buffer
 * beforehand to ensure there's no data leakage in the case of short
 * writes.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 *
 * Returns NULL if there are no used buffers, or the "data" token
504
 * handed to virtqueue_add_*().
505
 */
506
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
507 508 509 510
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	void *ret;
	unsigned int i;
R
Rusty Russell 已提交
511
	u16 last_used;
512 513 514

	START_USE(vq);

515 516 517 518 519
	if (unlikely(vq->broken)) {
		END_USE(vq);
		return NULL;
	}

520 521 522 523 524 525
	if (!more_used(vq)) {
		pr_debug("No more buffers in queue\n");
		END_USE(vq);
		return NULL;
	}

526
	/* Only get used array entries after they have been exposed by host. */
527
	virtio_rmb(vq->weak_barriers);
528

R
Rusty Russell 已提交
529 530 531
	last_used = (vq->last_used_idx & (vq->vring.num - 1));
	i = vq->vring.used->ring[last_used].id;
	*len = vq->vring.used->ring[last_used].len;
532 533 534 535 536 537 538 539 540 541 542 543 544 545

	if (unlikely(i >= vq->vring.num)) {
		BAD_RING(vq, "id %u out of range\n", i);
		return NULL;
	}
	if (unlikely(!vq->data[i])) {
		BAD_RING(vq, "id %u is not a head!\n", i);
		return NULL;
	}

	/* detach_buf clears data, so grab it now. */
	ret = vq->data[i];
	detach_buf(vq, i);
	vq->last_used_idx++;
546 547 548 549 550
	/* If we expect an interrupt for the next entry, tell host
	 * by writing event index and flush out the write before
	 * the read in the next get_buf call. */
	if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
		vring_used_event(&vq->vring) = vq->last_used_idx;
551
		virtio_mb(vq->weak_barriers);
552 553
	}

554 555 556 557
#ifdef DEBUG
	vq->last_add_time_valid = false;
#endif

558 559 560
	END_USE(vq);
	return ret;
}
561
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
562

563 564 565 566 567 568 569 570 571
/**
 * virtqueue_disable_cb - disable callbacks
 * @vq: the struct virtqueue we're talking about.
 *
 * Note that this is not necessarily synchronous, hence unreliable and only
 * useful as an optimization.
 *
 * Unlike other operations, this need not be serialized.
 */
572
void virtqueue_disable_cb(struct virtqueue *_vq)
573 574 575 576 577
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
578
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
579

580
/**
581
 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
582 583
 * @vq: the struct virtqueue we're talking about.
 *
584 585 586 587
 * This re-enables callbacks; it returns current queue state
 * in an opaque unsigned value. This value should be later tested by
 * virtqueue_poll, to detect a possible race between the driver checking for
 * more work, and enabling callbacks.
588 589 590 591
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
592
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
593 594
{
	struct vring_virtqueue *vq = to_vvq(_vq);
595
	u16 last_used_idx;
596 597 598 599 600

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
601 602 603
	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
604
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
	vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
	END_USE(vq);
	return last_used_idx;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);

/**
 * virtqueue_poll - query pending used buffers
 * @vq: the struct virtqueue we're talking about.
 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
 *
 * Returns "true" if there are pending used buffers in the queue.
 *
 * This does not need to be serialized.
 */
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

624
	virtio_mb(vq->weak_barriers);
625 626 627
	return (u16)last_used_idx != vq->vring.used->idx;
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
628

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
/**
 * virtqueue_enable_cb - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks; it returns "false" if there are pending
 * buffers in the queue, to detect a possible race between the driver
 * checking for more work, and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
	return !virtqueue_poll(_vq, last_used_idx);
644
}
645
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
646

647 648 649 650 651 652 653 654 655 656 657 658 659
/**
 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks but hints to the other side to delay
 * interrupts until most of the available buffers have been processed;
 * it returns "false" if there are many pending buffers in the queue,
 * to detect a possible race between the driver checking for more work,
 * and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 bufs;

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
	/* TODO: tune this threshold */
	bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
	vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
676
	virtio_mb(vq->weak_barriers);
677 678 679 680 681 682 683 684 685 686
	if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
		END_USE(vq);
		return false;
	}

	END_USE(vq);
	return true;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);

687 688 689 690
/**
 * virtqueue_detach_unused_buf - detach first unused buffer
 * @vq: the struct virtqueue we're talking about.
 *
691
 * Returns NULL or the "data" token handed to virtqueue_add_*().
692 693 694
 * This is not valid on an active queue; it is useful only for device
 * shutdown.
 */
695
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
696 697 698 699 700 701 702 703 704 705 706 707 708
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	unsigned int i;
	void *buf;

	START_USE(vq);

	for (i = 0; i < vq->vring.num; i++) {
		if (!vq->data[i])
			continue;
		/* detach_buf clears data, so grab it now. */
		buf = vq->data[i];
		detach_buf(vq, i);
709
		vq->vring.avail->idx--;
710 711 712 713
		END_USE(vq);
		return buf;
	}
	/* That should have freed everything. */
714
	BUG_ON(vq->vq.num_free != vq->vring.num);
715 716 717 718

	END_USE(vq);
	return NULL;
}
719
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
720

721 722 723 724 725 726 727 728 729 730 731 732 733
irqreturn_t vring_interrupt(int irq, void *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	if (!more_used(vq)) {
		pr_debug("virtqueue interrupt with no work for %p\n", vq);
		return IRQ_NONE;
	}

	if (unlikely(vq->broken))
		return IRQ_HANDLED;

	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
734 735
	if (vq->vq.callback)
		vq->vq.callback(&vq->vq);
736 737 738

	return IRQ_HANDLED;
}
739
EXPORT_SYMBOL_GPL(vring_interrupt);
740

741 742
struct virtqueue *vring_new_virtqueue(unsigned int index,
				      unsigned int num,
743
				      unsigned int vring_align,
744
				      struct virtio_device *vdev,
745
				      bool weak_barriers,
746 747
				      void *pages,
				      void (*notify)(struct virtqueue *),
748 749
				      void (*callback)(struct virtqueue *),
				      const char *name)
750 751 752 753
{
	struct vring_virtqueue *vq;
	unsigned int i;

754 755 756 757 758 759
	/* We assume num is a power of 2. */
	if (num & (num - 1)) {
		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
		return NULL;
	}

760 761 762 763
	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
	if (!vq)
		return NULL;

764
	vring_init(&vq->vring, num, pages, vring_align);
765 766
	vq->vq.callback = callback;
	vq->vq.vdev = vdev;
767
	vq->vq.name = name;
768 769
	vq->vq.num_free = num;
	vq->vq.index = index;
770
	vq->notify = notify;
771
	vq->weak_barriers = weak_barriers;
772 773 774
	vq->broken = false;
	vq->last_used_idx = 0;
	vq->num_added = 0;
775
	list_add_tail(&vq->vq.list, &vdev->vqs);
776 777
#ifdef DEBUG
	vq->in_use = false;
778
	vq->last_add_time_valid = false;
779 780
#endif

781
	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
782
	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
783

784 785 786 787 788 789
	/* No callback?  Tell other side not to bother us. */
	if (!callback)
		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;

	/* Put everything in free lists. */
	vq->free_head = 0;
790
	for (i = 0; i < num-1; i++) {
791
		vq->vring.desc[i].next = i+1;
792 793 794
		vq->data[i] = NULL;
	}
	vq->data[i] = NULL;
795 796 797

	return &vq->vq;
}
798
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
799 800 801

void vring_del_virtqueue(struct virtqueue *vq)
{
802
	list_del(&vq->list);
803 804
	kfree(to_vvq(vq));
}
805
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
806

807 808 809 810 811 812 813
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
	unsigned int i;

	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
		switch (i) {
814 815
		case VIRTIO_RING_F_INDIRECT_DESC:
			break;
816 817
		case VIRTIO_RING_F_EVENT_IDX:
			break;
818 819 820 821 822 823 824 825
		default:
			/* We don't understand this bit. */
			clear_bit(i, vdev->features);
		}
	}
}
EXPORT_SYMBOL_GPL(vring_transport_features);

826 827 828 829 830 831 832
/**
 * virtqueue_get_vring_size - return the size of the virtqueue's vring
 * @vq: the struct virtqueue containing the vring of interest.
 *
 * Returns the size of the vring.  This is mainly used for boasting to
 * userspace.  Unlike other operations, this need not be serialized.
 */
R
Rick Jones 已提交
833 834 835 836 837 838 839 840 841
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{

	struct vring_virtqueue *vq = to_vvq(_vq);

	return vq->vring.num;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);

842
MODULE_LICENSE("GPL");