virtio_ring.c 20.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Virtio ring implementation.
 *
 *  Copyright 2007 Rusty Russell IBM Corporation
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
21
#include <linux/virtio_config.h>
22
#include <linux/device.h>
23
#include <linux/slab.h>
24
#include <linux/module.h>
25
#include <linux/hrtimer.h>
26 27 28

#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
29 30 31 32 33 34
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&(_vq)->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		BUG();						\
	} while (0)
35 36 37 38
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq)						\
	do {							\
		if ((_vq)->in_use)				\
39 40
			panic("%s:in_use = %i\n",		\
			      (_vq)->vq.name, (_vq)->in_use);	\
41
		(_vq)->in_use = __LINE__;			\
42
	} while (0)
43
#define END_USE(_vq) \
44
	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
45
#else
46 47 48 49 50 51
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&_vq->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		(_vq)->broken = true;				\
	} while (0)
52 53 54 55 56 57 58 59 60 61 62
#define START_USE(vq)
#define END_USE(vq)
#endif

struct vring_virtqueue
{
	struct virtqueue vq;

	/* Actual memory layout for this queue */
	struct vring vring;

63 64 65
	/* Can we use weak barriers? */
	bool weak_barriers;

66 67 68
	/* Other side has made a mess, don't try any more. */
	bool broken;

69 70 71
	/* Host supports indirect buffers */
	bool indirect;

72 73 74
	/* Host publishes avail event idx */
	bool event;

75 76 77 78 79 80
	/* Head of free buffer list. */
	unsigned int free_head;
	/* Number we've added since last sync. */
	unsigned int num_added;

	/* Last used index we've seen. */
A
Anthony Liguori 已提交
81
	u16 last_used_idx;
82 83 84 85 86 87 88

	/* How to notify other side. FIXME: commonalize hcalls! */
	void (*notify)(struct virtqueue *vq);

#ifdef DEBUG
	/* They're supposed to lock for us. */
	unsigned int in_use;
89 90 91 92

	/* Figure out if their kicks are too delayed. */
	bool last_add_time_valid;
	ktime_t last_add_time;
93 94 95 96 97 98 99 100
#endif

	/* Tokens for callbacks. */
	void *data[];
};

#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)

101 102 103 104 105 106 107 108 109 110 111 112 113 114
static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
						  unsigned int *count)
{
	return sg_next(sg);
}

static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
					      unsigned int *count)
{
	if (--(*count) == 0)
		return NULL;
	return sg + 1;
}

115
/* Set up an indirect table of descriptors and add it to the queue. */
116 117 118 119 120 121 122 123 124 125
static inline int vring_add_indirect(struct vring_virtqueue *vq,
				     struct scatterlist *sgs[],
				     struct scatterlist *(*next)
				       (struct scatterlist *, unsigned int *),
				     unsigned int total_sg,
				     unsigned int total_out,
				     unsigned int total_in,
				     unsigned int out_sgs,
				     unsigned int in_sgs,
				     gfp_t gfp)
126 127 128
{
	struct vring_desc *desc;
	unsigned head;
129 130
	struct scatterlist *sg;
	int i, n;
131

132 133 134 135 136 137 138
	/*
	 * We require lowmem mappings for the descriptors because
	 * otherwise virt_to_phys will give us bogus addresses in the
	 * virtqueue.
	 */
	gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);

139
	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
140
	if (!desc)
141
		return -ENOMEM;
142

143 144 145 146 147 148 149 150 151 152
	/* Transfer entries from the sg lists into the indirect page */
	i = 0;
	for (n = 0; n < out_sgs; n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
			desc[i].flags = VRING_DESC_F_NEXT;
			desc[i].addr = sg_phys(sg);
			desc[i].len = sg->length;
			desc[i].next = i+1;
			i++;
		}
153
	}
154 155 156 157 158 159 160 161
	for (; n < (out_sgs + in_sgs); n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
			desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
			desc[i].addr = sg_phys(sg);
			desc[i].len = sg->length;
			desc[i].next = i+1;
			i++;
		}
162
	}
163
	BUG_ON(i != total_sg);
164 165 166 167 168 169

	/* Last one doesn't continue. */
	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
	desc[i-1].next = 0;

	/* We're about to use a buffer */
170
	vq->vq.num_free--;
171 172 173 174 175 176 177 178 179 180 181 182 183

	/* Use a single buffer which doesn't continue */
	head = vq->free_head;
	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
	vq->vring.desc[head].addr = virt_to_phys(desc);
	vq->vring.desc[head].len = i * sizeof(struct vring_desc);

	/* Update free pointer */
	vq->free_head = vq->vring.desc[head].next;

	return head;
}

184 185 186 187 188 189 190 191 192 193
static inline int virtqueue_add(struct virtqueue *_vq,
				struct scatterlist *sgs[],
				struct scatterlist *(*next)
				  (struct scatterlist *, unsigned int *),
				unsigned int total_out,
				unsigned int total_in,
				unsigned int out_sgs,
				unsigned int in_sgs,
				void *data,
				gfp_t gfp)
194 195
{
	struct vring_virtqueue *vq = to_vvq(_vq);
196 197
	struct scatterlist *sg;
	unsigned int i, n, avail, uninitialized_var(prev), total_sg;
M
Michael S. Tsirkin 已提交
198
	int head;
199

200 201
	START_USE(vq);

202
	BUG_ON(data == NULL);
203

204 205 206 207 208 209 210 211 212 213 214 215 216
#ifdef DEBUG
	{
		ktime_t now = ktime_get();

		/* No kick or get, with .1 second between?  Warn. */
		if (vq->last_add_time_valid)
			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
					    > 100);
		vq->last_add_time = now;
		vq->last_add_time_valid = true;
	}
#endif

217 218
	total_sg = total_in + total_out;

219 220
	/* If the host supports indirect descriptor tables, and we have multiple
	 * buffers, then go indirect. FIXME: tune this threshold */
221 222 223 224
	if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
		head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
					  total_in,
					  out_sgs, in_sgs, gfp);
M
Michael S. Tsirkin 已提交
225
		if (likely(head >= 0))
226 227 228
			goto add_head;
	}

229 230
	BUG_ON(total_sg > vq->vring.num);
	BUG_ON(total_sg == 0);
231

232
	if (vq->vq.num_free < total_sg) {
233
		pr_debug("Can't add buf len %i - avail = %i\n",
234
			 total_sg, vq->vq.num_free);
235 236 237
		/* FIXME: for historical reasons, we force a notify here if
		 * there are outgoing parts to the buffer.  Presumably the
		 * host should service the ring ASAP. */
238
		if (out_sgs)
239
			vq->notify(&vq->vq);
240 241 242 243 244
		END_USE(vq);
		return -ENOSPC;
	}

	/* We're about to use some buffers from the free list. */
245 246 247 248 249 250 251 252 253 254 255
	vq->vq.num_free -= total_sg;

	head = i = vq->free_head;
	for (n = 0; n < out_sgs; n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
			vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
			vq->vring.desc[i].addr = sg_phys(sg);
			vq->vring.desc[i].len = sg->length;
			prev = i;
			i = vq->vring.desc[i].next;
		}
256
	}
257 258 259 260 261 262 263 264
	for (; n < (out_sgs + in_sgs); n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
			vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
			vq->vring.desc[i].addr = sg_phys(sg);
			vq->vring.desc[i].len = sg->length;
			prev = i;
			i = vq->vring.desc[i].next;
		}
265 266 267 268 269 270 271
	}
	/* Last one doesn't continue. */
	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;

	/* Update free pointer */
	vq->free_head = i;

272
add_head:
273 274 275 276
	/* Set token. */
	vq->data[head] = data;

	/* Put entry in available array (but don't update avail->idx until they
R
Rusty Russell 已提交
277
	 * do sync). */
278
	avail = (vq->vring.avail->idx & (vq->vring.num-1));
279 280
	vq->vring.avail->ring[avail] = head;

281 282
	/* Descriptors and available array need to be set before we expose the
	 * new available array entries. */
283
	virtio_wmb(vq->weak_barriers);
284 285 286 287 288 289 290 291
	vq->vring.avail->idx++;
	vq->num_added++;

	/* This is very unlikely, but theoretically possible.  Kick
	 * just in case. */
	if (unlikely(vq->num_added == (1 << 16) - 1))
		virtqueue_kick(_vq);

292 293
	pr_debug("Added buffer head %i to %p\n", head, vq);
	END_USE(vq);
294

295
	return 0;
296
}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

/**
 * virtqueue_add_buf - expose buffer to other end
 * @vq: the struct virtqueue we're talking about.
 * @sg: the description of the buffer(s).
 * @out_num: the number of sg readable by other side
 * @in_num: the number of sg which are writable (after readable ones)
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_buf(struct virtqueue *_vq,
		      struct scatterlist sg[],
		      unsigned int out,
		      unsigned int in,
		      void *data,
		      gfp_t gfp)
{
	struct scatterlist *sgs[2];

	sgs[0] = sg;
	sgs[1] = sg + out;

	return virtqueue_add(_vq, sgs, sg_next_arr,
			     out, in, out ? 1 : 0, in ? 1 : 0, data, gfp);
}
327
EXPORT_SYMBOL_GPL(virtqueue_add_buf);
328

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
/**
 * virtqueue_add_sgs - expose buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of terminated scatterlists.
 * @out_num: the number of scatterlists readable by other side
 * @in_num: the number of scatterlists which are writable (after readable ones)
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_sgs(struct virtqueue *_vq,
		      struct scatterlist *sgs[],
		      unsigned int out_sgs,
		      unsigned int in_sgs,
		      void *data,
		      gfp_t gfp)
{
	unsigned int i, total_out, total_in;

	/* Count them first. */
	for (i = total_out = total_in = 0; i < out_sgs; i++) {
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
			total_out++;
	}
	for (; i < out_sgs + in_sgs; i++) {
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
			total_in++;
	}
	return virtqueue_add(_vq, sgs, sg_next_chained,
			     total_out, total_in, out_sgs, in_sgs, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);

368
/**
369
 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
370 371
 * @vq: the struct virtqueue
 *
372 373 374
 * Instead of virtqueue_kick(), you can do:
 *	if (virtqueue_kick_prepare(vq))
 *		virtqueue_notify(vq);
375
 *
376 377
 * This is sometimes useful because the virtqueue_kick_prepare() needs
 * to be serialized, but the actual virtqueue_notify() call does not.
378
 */
379
bool virtqueue_kick_prepare(struct virtqueue *_vq)
380 381
{
	struct vring_virtqueue *vq = to_vvq(_vq);
382
	u16 new, old;
383 384
	bool needs_kick;

385
	START_USE(vq);
386 387
	/* We need to expose available array entries before checking avail
	 * event. */
388
	virtio_mb(vq->weak_barriers);
389

390 391
	old = vq->vring.avail->idx - vq->num_added;
	new = vq->vring.avail->idx;
392 393
	vq->num_added = 0;

394 395 396 397 398 399 400 401
#ifdef DEBUG
	if (vq->last_add_time_valid) {
		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
					      vq->last_add_time)) > 100);
	}
	vq->last_add_time_valid = false;
#endif

402 403 404 405 406 407
	if (vq->event) {
		needs_kick = vring_need_event(vring_avail_event(&vq->vring),
					      new, old);
	} else {
		needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
	}
408
	END_USE(vq);
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
	return needs_kick;
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);

/**
 * virtqueue_notify - second half of split virtqueue_kick call.
 * @vq: the struct virtqueue
 *
 * This does not need to be serialized.
 */
void virtqueue_notify(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	/* Prod other side to tell it about changes. */
	vq->notify(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_notify);

/**
 * virtqueue_kick - update after add_buf
 * @vq: the struct virtqueue
 *
 * After one or more virtqueue_add_buf calls, invoke this to kick
 * the other side.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
void virtqueue_kick(struct virtqueue *vq)
{
	if (virtqueue_kick_prepare(vq))
		virtqueue_notify(vq);
442
}
443
EXPORT_SYMBOL_GPL(virtqueue_kick);
444 445 446 447 448 449 450 451 452 453

static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
	unsigned int i;

	/* Clear data ptr. */
	vq->data[head] = NULL;

	/* Put back on free list: find end */
	i = head;
454 455 456 457 458

	/* Free the indirect table */
	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
		kfree(phys_to_virt(vq->vring.desc[i].addr));

459 460
	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
		i = vq->vring.desc[i].next;
461
		vq->vq.num_free++;
462 463 464 465 466
	}

	vq->vring.desc[i].next = vq->free_head;
	vq->free_head = head;
	/* Plus final descriptor */
467
	vq->vq.num_free++;
468 469 470 471 472 473 474
}

static inline bool more_used(const struct vring_virtqueue *vq)
{
	return vq->last_used_idx != vq->vring.used->idx;
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488
/**
 * virtqueue_get_buf - get the next used buffer
 * @vq: the struct virtqueue we're talking about.
 * @len: the length written into the buffer
 *
 * If the driver wrote data into the buffer, @len will be set to the
 * amount written.  This means you don't need to clear the buffer
 * beforehand to ensure there's no data leakage in the case of short
 * writes.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 *
 * Returns NULL if there are no used buffers, or the "data" token
489
 * handed to virtqueue_add_buf().
490
 */
491
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
492 493 494 495
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	void *ret;
	unsigned int i;
R
Rusty Russell 已提交
496
	u16 last_used;
497 498 499

	START_USE(vq);

500 501 502 503 504
	if (unlikely(vq->broken)) {
		END_USE(vq);
		return NULL;
	}

505 506 507 508 509 510
	if (!more_used(vq)) {
		pr_debug("No more buffers in queue\n");
		END_USE(vq);
		return NULL;
	}

511
	/* Only get used array entries after they have been exposed by host. */
512
	virtio_rmb(vq->weak_barriers);
513

R
Rusty Russell 已提交
514 515 516
	last_used = (vq->last_used_idx & (vq->vring.num - 1));
	i = vq->vring.used->ring[last_used].id;
	*len = vq->vring.used->ring[last_used].len;
517 518 519 520 521 522 523 524 525 526 527 528 529 530

	if (unlikely(i >= vq->vring.num)) {
		BAD_RING(vq, "id %u out of range\n", i);
		return NULL;
	}
	if (unlikely(!vq->data[i])) {
		BAD_RING(vq, "id %u is not a head!\n", i);
		return NULL;
	}

	/* detach_buf clears data, so grab it now. */
	ret = vq->data[i];
	detach_buf(vq, i);
	vq->last_used_idx++;
531 532 533 534 535
	/* If we expect an interrupt for the next entry, tell host
	 * by writing event index and flush out the write before
	 * the read in the next get_buf call. */
	if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
		vring_used_event(&vq->vring) = vq->last_used_idx;
536
		virtio_mb(vq->weak_barriers);
537 538
	}

539 540 541 542
#ifdef DEBUG
	vq->last_add_time_valid = false;
#endif

543 544 545
	END_USE(vq);
	return ret;
}
546
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
547

548 549 550 551 552 553 554 555 556
/**
 * virtqueue_disable_cb - disable callbacks
 * @vq: the struct virtqueue we're talking about.
 *
 * Note that this is not necessarily synchronous, hence unreliable and only
 * useful as an optimization.
 *
 * Unlike other operations, this need not be serialized.
 */
557
void virtqueue_disable_cb(struct virtqueue *_vq)
558 559 560 561 562
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
563
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
564

565 566 567 568 569 570 571 572 573 574 575
/**
 * virtqueue_enable_cb - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks; it returns "false" if there are pending
 * buffers in the queue, to detect a possible race between the driver
 * checking for more work, and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
576
bool virtqueue_enable_cb(struct virtqueue *_vq)
577 578 579 580 581 582 583
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
584 585 586
	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
587
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
588
	vring_used_event(&vq->vring) = vq->last_used_idx;
589
	virtio_mb(vq->weak_barriers);
590 591 592 593 594 595 596 597
	if (unlikely(more_used(vq))) {
		END_USE(vq);
		return false;
	}

	END_USE(vq);
	return true;
}
598
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
599

600 601 602 603 604 605 606 607 608 609 610 611 612
/**
 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks but hints to the other side to delay
 * interrupts until most of the available buffers have been processed;
 * it returns "false" if there are many pending buffers in the queue,
 * to detect a possible race between the driver checking for more work,
 * and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 bufs;

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
	/* TODO: tune this threshold */
	bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
	vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
629
	virtio_mb(vq->weak_barriers);
630 631 632 633 634 635 636 637 638 639
	if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
		END_USE(vq);
		return false;
	}

	END_USE(vq);
	return true;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);

640 641 642 643
/**
 * virtqueue_detach_unused_buf - detach first unused buffer
 * @vq: the struct virtqueue we're talking about.
 *
644
 * Returns NULL or the "data" token handed to virtqueue_add_buf().
645 646 647
 * This is not valid on an active queue; it is useful only for device
 * shutdown.
 */
648
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
649 650 651 652 653 654 655 656 657 658 659 660 661
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	unsigned int i;
	void *buf;

	START_USE(vq);

	for (i = 0; i < vq->vring.num; i++) {
		if (!vq->data[i])
			continue;
		/* detach_buf clears data, so grab it now. */
		buf = vq->data[i];
		detach_buf(vq, i);
662
		vq->vring.avail->idx--;
663 664 665 666
		END_USE(vq);
		return buf;
	}
	/* That should have freed everything. */
667
	BUG_ON(vq->vq.num_free != vq->vring.num);
668 669 670 671

	END_USE(vq);
	return NULL;
}
672
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
673

674 675 676 677 678 679 680 681 682 683 684 685 686
irqreturn_t vring_interrupt(int irq, void *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	if (!more_used(vq)) {
		pr_debug("virtqueue interrupt with no work for %p\n", vq);
		return IRQ_NONE;
	}

	if (unlikely(vq->broken))
		return IRQ_HANDLED;

	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
687 688
	if (vq->vq.callback)
		vq->vq.callback(&vq->vq);
689 690 691

	return IRQ_HANDLED;
}
692
EXPORT_SYMBOL_GPL(vring_interrupt);
693

694 695
struct virtqueue *vring_new_virtqueue(unsigned int index,
				      unsigned int num,
696
				      unsigned int vring_align,
697
				      struct virtio_device *vdev,
698
				      bool weak_barriers,
699 700
				      void *pages,
				      void (*notify)(struct virtqueue *),
701 702
				      void (*callback)(struct virtqueue *),
				      const char *name)
703 704 705 706
{
	struct vring_virtqueue *vq;
	unsigned int i;

707 708 709 710 711 712
	/* We assume num is a power of 2. */
	if (num & (num - 1)) {
		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
		return NULL;
	}

713 714 715 716
	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
	if (!vq)
		return NULL;

717
	vring_init(&vq->vring, num, pages, vring_align);
718 719
	vq->vq.callback = callback;
	vq->vq.vdev = vdev;
720
	vq->vq.name = name;
721 722
	vq->vq.num_free = num;
	vq->vq.index = index;
723
	vq->notify = notify;
724
	vq->weak_barriers = weak_barriers;
725 726 727
	vq->broken = false;
	vq->last_used_idx = 0;
	vq->num_added = 0;
728
	list_add_tail(&vq->vq.list, &vdev->vqs);
729 730
#ifdef DEBUG
	vq->in_use = false;
731
	vq->last_add_time_valid = false;
732 733
#endif

734
	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
735
	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
736

737 738 739 740 741 742
	/* No callback?  Tell other side not to bother us. */
	if (!callback)
		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;

	/* Put everything in free lists. */
	vq->free_head = 0;
743
	for (i = 0; i < num-1; i++) {
744
		vq->vring.desc[i].next = i+1;
745 746 747
		vq->data[i] = NULL;
	}
	vq->data[i] = NULL;
748 749 750

	return &vq->vq;
}
751
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
752 753 754

void vring_del_virtqueue(struct virtqueue *vq)
{
755
	list_del(&vq->list);
756 757
	kfree(to_vvq(vq));
}
758
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
759

760 761 762 763 764 765 766
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
	unsigned int i;

	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
		switch (i) {
767 768
		case VIRTIO_RING_F_INDIRECT_DESC:
			break;
769 770
		case VIRTIO_RING_F_EVENT_IDX:
			break;
771 772 773 774 775 776 777 778
		default:
			/* We don't understand this bit. */
			clear_bit(i, vdev->features);
		}
	}
}
EXPORT_SYMBOL_GPL(vring_transport_features);

779 780 781 782 783 784 785
/**
 * virtqueue_get_vring_size - return the size of the virtqueue's vring
 * @vq: the struct virtqueue containing the vring of interest.
 *
 * Returns the size of the vring.  This is mainly used for boasting to
 * userspace.  Unlike other operations, this need not be serialized.
 */
R
Rick Jones 已提交
786 787 788 789 790 791 792 793 794
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{

	struct vring_virtqueue *vq = to_vvq(_vq);

	return vq->vring.num;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);

795
MODULE_LICENSE("GPL");