virtio_ring.c 22.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Virtio ring implementation.
 *
 *  Copyright 2007 Rusty Russell IBM Corporation
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
21
#include <linux/virtio_config.h>
22
#include <linux/device.h>
23
#include <linux/slab.h>
24
#include <linux/module.h>
25
#include <linux/hrtimer.h>
26 27 28

#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
29 30 31 32 33 34
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&(_vq)->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		BUG();						\
	} while (0)
35 36 37 38
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq)						\
	do {							\
		if ((_vq)->in_use)				\
39 40
			panic("%s:in_use = %i\n",		\
			      (_vq)->vq.name, (_vq)->in_use);	\
41
		(_vq)->in_use = __LINE__;			\
42
	} while (0)
43
#define END_USE(_vq) \
44
	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
45
#else
46 47 48 49 50 51
#define BAD_RING(_vq, fmt, args...)				\
	do {							\
		dev_err(&_vq->vq.vdev->dev,			\
			"%s:"fmt, (_vq)->vq.name, ##args);	\
		(_vq)->broken = true;				\
	} while (0)
52 53 54 55 56 57 58 59 60 61 62
#define START_USE(vq)
#define END_USE(vq)
#endif

struct vring_virtqueue
{
	struct virtqueue vq;

	/* Actual memory layout for this queue */
	struct vring vring;

63 64 65
	/* Can we use weak barriers? */
	bool weak_barriers;

66 67 68
	/* Other side has made a mess, don't try any more. */
	bool broken;

69 70 71
	/* Host supports indirect buffers */
	bool indirect;

72 73 74
	/* Host publishes avail event idx */
	bool event;

75 76 77 78 79 80
	/* Head of free buffer list. */
	unsigned int free_head;
	/* Number we've added since last sync. */
	unsigned int num_added;

	/* Last used index we've seen. */
A
Anthony Liguori 已提交
81
	u16 last_used_idx;
82 83 84 85 86 87 88

	/* How to notify other side. FIXME: commonalize hcalls! */
	void (*notify)(struct virtqueue *vq);

#ifdef DEBUG
	/* They're supposed to lock for us. */
	unsigned int in_use;
89 90 91 92

	/* Figure out if their kicks are too delayed. */
	bool last_add_time_valid;
	ktime_t last_add_time;
93 94 95 96 97 98 99 100
#endif

	/* Tokens for callbacks. */
	void *data[];
};

#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)

101 102 103 104 105 106 107 108 109 110 111 112 113 114
static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
						  unsigned int *count)
{
	return sg_next(sg);
}

static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
					      unsigned int *count)
{
	if (--(*count) == 0)
		return NULL;
	return sg + 1;
}

115
/* Set up an indirect table of descriptors and add it to the queue. */
116 117 118 119 120 121 122 123 124 125
static inline int vring_add_indirect(struct vring_virtqueue *vq,
				     struct scatterlist *sgs[],
				     struct scatterlist *(*next)
				       (struct scatterlist *, unsigned int *),
				     unsigned int total_sg,
				     unsigned int total_out,
				     unsigned int total_in,
				     unsigned int out_sgs,
				     unsigned int in_sgs,
				     gfp_t gfp)
126 127 128
{
	struct vring_desc *desc;
	unsigned head;
129 130
	struct scatterlist *sg;
	int i, n;
131

132 133 134 135 136 137 138
	/*
	 * We require lowmem mappings for the descriptors because
	 * otherwise virt_to_phys will give us bogus addresses in the
	 * virtqueue.
	 */
	gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);

139
	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
140
	if (!desc)
141
		return -ENOMEM;
142

143 144 145 146 147 148 149 150 151 152
	/* Transfer entries from the sg lists into the indirect page */
	i = 0;
	for (n = 0; n < out_sgs; n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
			desc[i].flags = VRING_DESC_F_NEXT;
			desc[i].addr = sg_phys(sg);
			desc[i].len = sg->length;
			desc[i].next = i+1;
			i++;
		}
153
	}
154 155 156 157 158 159 160 161
	for (; n < (out_sgs + in_sgs); n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
			desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
			desc[i].addr = sg_phys(sg);
			desc[i].len = sg->length;
			desc[i].next = i+1;
			i++;
		}
162
	}
163
	BUG_ON(i != total_sg);
164 165 166 167 168 169

	/* Last one doesn't continue. */
	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
	desc[i-1].next = 0;

	/* We're about to use a buffer */
170
	vq->vq.num_free--;
171 172 173 174 175 176 177 178 179 180 181 182 183

	/* Use a single buffer which doesn't continue */
	head = vq->free_head;
	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
	vq->vring.desc[head].addr = virt_to_phys(desc);
	vq->vring.desc[head].len = i * sizeof(struct vring_desc);

	/* Update free pointer */
	vq->free_head = vq->vring.desc[head].next;

	return head;
}

184 185 186 187 188 189 190 191 192 193
static inline int virtqueue_add(struct virtqueue *_vq,
				struct scatterlist *sgs[],
				struct scatterlist *(*next)
				  (struct scatterlist *, unsigned int *),
				unsigned int total_out,
				unsigned int total_in,
				unsigned int out_sgs,
				unsigned int in_sgs,
				void *data,
				gfp_t gfp)
194 195
{
	struct vring_virtqueue *vq = to_vvq(_vq);
196 197
	struct scatterlist *sg;
	unsigned int i, n, avail, uninitialized_var(prev), total_sg;
M
Michael S. Tsirkin 已提交
198
	int head;
199

200 201
	START_USE(vq);

202
	BUG_ON(data == NULL);
203

204 205 206 207 208 209 210 211 212 213 214 215 216
#ifdef DEBUG
	{
		ktime_t now = ktime_get();

		/* No kick or get, with .1 second between?  Warn. */
		if (vq->last_add_time_valid)
			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
					    > 100);
		vq->last_add_time = now;
		vq->last_add_time_valid = true;
	}
#endif

217 218
	total_sg = total_in + total_out;

219 220
	/* If the host supports indirect descriptor tables, and we have multiple
	 * buffers, then go indirect. FIXME: tune this threshold */
221 222 223 224
	if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
		head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
					  total_in,
					  out_sgs, in_sgs, gfp);
M
Michael S. Tsirkin 已提交
225
		if (likely(head >= 0))
226 227 228
			goto add_head;
	}

229 230
	BUG_ON(total_sg > vq->vring.num);
	BUG_ON(total_sg == 0);
231

232
	if (vq->vq.num_free < total_sg) {
233
		pr_debug("Can't add buf len %i - avail = %i\n",
234
			 total_sg, vq->vq.num_free);
235 236 237
		/* FIXME: for historical reasons, we force a notify here if
		 * there are outgoing parts to the buffer.  Presumably the
		 * host should service the ring ASAP. */
238
		if (out_sgs)
239
			vq->notify(&vq->vq);
240 241 242 243 244
		END_USE(vq);
		return -ENOSPC;
	}

	/* We're about to use some buffers from the free list. */
245 246 247 248 249 250 251 252 253 254 255
	vq->vq.num_free -= total_sg;

	head = i = vq->free_head;
	for (n = 0; n < out_sgs; n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
			vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
			vq->vring.desc[i].addr = sg_phys(sg);
			vq->vring.desc[i].len = sg->length;
			prev = i;
			i = vq->vring.desc[i].next;
		}
256
	}
257 258 259 260 261 262 263 264
	for (; n < (out_sgs + in_sgs); n++) {
		for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
			vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
			vq->vring.desc[i].addr = sg_phys(sg);
			vq->vring.desc[i].len = sg->length;
			prev = i;
			i = vq->vring.desc[i].next;
		}
265 266 267 268 269 270 271
	}
	/* Last one doesn't continue. */
	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;

	/* Update free pointer */
	vq->free_head = i;

272
add_head:
273 274 275 276
	/* Set token. */
	vq->data[head] = data;

	/* Put entry in available array (but don't update avail->idx until they
R
Rusty Russell 已提交
277
	 * do sync). */
278
	avail = (vq->vring.avail->idx & (vq->vring.num-1));
279 280
	vq->vring.avail->ring[avail] = head;

281 282
	/* Descriptors and available array need to be set before we expose the
	 * new available array entries. */
283
	virtio_wmb(vq->weak_barriers);
284 285 286 287 288 289 290 291
	vq->vring.avail->idx++;
	vq->num_added++;

	/* This is very unlikely, but theoretically possible.  Kick
	 * just in case. */
	if (unlikely(vq->num_added == (1 << 16) - 1))
		virtqueue_kick(_vq);

292 293
	pr_debug("Added buffer head %i to %p\n", head, vq);
	END_USE(vq);
294

295
	return 0;
296
}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336

/**
 * virtqueue_add_sgs - expose buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of terminated scatterlists.
 * @out_num: the number of scatterlists readable by other side
 * @in_num: the number of scatterlists which are writable (after readable ones)
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_sgs(struct virtqueue *_vq,
		      struct scatterlist *sgs[],
		      unsigned int out_sgs,
		      unsigned int in_sgs,
		      void *data,
		      gfp_t gfp)
{
	unsigned int i, total_out, total_in;

	/* Count them first. */
	for (i = total_out = total_in = 0; i < out_sgs; i++) {
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
			total_out++;
	}
	for (; i < out_sgs + in_sgs; i++) {
		struct scatterlist *sg;
		for (sg = sgs[i]; sg; sg = sg_next(sg))
			total_in++;
	}
	return virtqueue_add(_vq, sgs, sg_next_chained,
			     total_out, total_in, out_sgs, in_sgs, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
/**
 * virtqueue_add_outbuf - expose output buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of scatterlists (need not be terminated!)
 * @num: the number of scatterlists readable by other side
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_outbuf(struct virtqueue *vq,
			 struct scatterlist sg[], unsigned int num,
			 void *data,
			 gfp_t gfp)
{
	return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);

/**
 * virtqueue_add_inbuf - expose input buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sgs: array of scatterlists (need not be terminated!)
 * @num: the number of scatterlists writable by other side
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 */
int virtqueue_add_inbuf(struct virtqueue *vq,
			struct scatterlist sg[], unsigned int num,
			void *data,
			gfp_t gfp)
{
	return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);

381
/**
382
 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
383 384
 * @vq: the struct virtqueue
 *
385 386 387
 * Instead of virtqueue_kick(), you can do:
 *	if (virtqueue_kick_prepare(vq))
 *		virtqueue_notify(vq);
388
 *
389 390
 * This is sometimes useful because the virtqueue_kick_prepare() needs
 * to be serialized, but the actual virtqueue_notify() call does not.
391
 */
392
bool virtqueue_kick_prepare(struct virtqueue *_vq)
393 394
{
	struct vring_virtqueue *vq = to_vvq(_vq);
395
	u16 new, old;
396 397
	bool needs_kick;

398
	START_USE(vq);
399 400
	/* We need to expose available array entries before checking avail
	 * event. */
401
	virtio_mb(vq->weak_barriers);
402

403 404
	old = vq->vring.avail->idx - vq->num_added;
	new = vq->vring.avail->idx;
405 406
	vq->num_added = 0;

407 408 409 410 411 412 413 414
#ifdef DEBUG
	if (vq->last_add_time_valid) {
		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
					      vq->last_add_time)) > 100);
	}
	vq->last_add_time_valid = false;
#endif

415 416 417 418 419 420
	if (vq->event) {
		needs_kick = vring_need_event(vring_avail_event(&vq->vring),
					      new, old);
	} else {
		needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
	}
421
	END_USE(vq);
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	return needs_kick;
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);

/**
 * virtqueue_notify - second half of split virtqueue_kick call.
 * @vq: the struct virtqueue
 *
 * This does not need to be serialized.
 */
void virtqueue_notify(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	/* Prod other side to tell it about changes. */
	vq->notify(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_notify);

/**
 * virtqueue_kick - update after add_buf
 * @vq: the struct virtqueue
 *
445
 * After one or more virtqueue_add_* calls, invoke this to kick
446 447 448 449 450 451 452 453 454
 * the other side.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
void virtqueue_kick(struct virtqueue *vq)
{
	if (virtqueue_kick_prepare(vq))
		virtqueue_notify(vq);
455
}
456
EXPORT_SYMBOL_GPL(virtqueue_kick);
457 458 459 460 461 462 463 464 465 466

static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
	unsigned int i;

	/* Clear data ptr. */
	vq->data[head] = NULL;

	/* Put back on free list: find end */
	i = head;
467 468 469 470 471

	/* Free the indirect table */
	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
		kfree(phys_to_virt(vq->vring.desc[i].addr));

472 473
	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
		i = vq->vring.desc[i].next;
474
		vq->vq.num_free++;
475 476 477 478 479
	}

	vq->vring.desc[i].next = vq->free_head;
	vq->free_head = head;
	/* Plus final descriptor */
480
	vq->vq.num_free++;
481 482 483 484 485 486 487
}

static inline bool more_used(const struct vring_virtqueue *vq)
{
	return vq->last_used_idx != vq->vring.used->idx;
}

488 489 490 491 492 493 494 495 496 497 498 499 500 501
/**
 * virtqueue_get_buf - get the next used buffer
 * @vq: the struct virtqueue we're talking about.
 * @len: the length written into the buffer
 *
 * If the driver wrote data into the buffer, @len will be set to the
 * amount written.  This means you don't need to clear the buffer
 * beforehand to ensure there's no data leakage in the case of short
 * writes.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 *
 * Returns NULL if there are no used buffers, or the "data" token
502
 * handed to virtqueue_add_*().
503
 */
504
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
505 506 507 508
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	void *ret;
	unsigned int i;
R
Rusty Russell 已提交
509
	u16 last_used;
510 511 512

	START_USE(vq);

513 514 515 516 517
	if (unlikely(vq->broken)) {
		END_USE(vq);
		return NULL;
	}

518 519 520 521 522 523
	if (!more_used(vq)) {
		pr_debug("No more buffers in queue\n");
		END_USE(vq);
		return NULL;
	}

524
	/* Only get used array entries after they have been exposed by host. */
525
	virtio_rmb(vq->weak_barriers);
526

R
Rusty Russell 已提交
527 528 529
	last_used = (vq->last_used_idx & (vq->vring.num - 1));
	i = vq->vring.used->ring[last_used].id;
	*len = vq->vring.used->ring[last_used].len;
530 531 532 533 534 535 536 537 538 539 540 541 542 543

	if (unlikely(i >= vq->vring.num)) {
		BAD_RING(vq, "id %u out of range\n", i);
		return NULL;
	}
	if (unlikely(!vq->data[i])) {
		BAD_RING(vq, "id %u is not a head!\n", i);
		return NULL;
	}

	/* detach_buf clears data, so grab it now. */
	ret = vq->data[i];
	detach_buf(vq, i);
	vq->last_used_idx++;
544 545 546 547 548
	/* If we expect an interrupt for the next entry, tell host
	 * by writing event index and flush out the write before
	 * the read in the next get_buf call. */
	if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
		vring_used_event(&vq->vring) = vq->last_used_idx;
549
		virtio_mb(vq->weak_barriers);
550 551
	}

552 553 554 555
#ifdef DEBUG
	vq->last_add_time_valid = false;
#endif

556 557 558
	END_USE(vq);
	return ret;
}
559
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
560

561 562 563 564 565 566 567 568 569
/**
 * virtqueue_disable_cb - disable callbacks
 * @vq: the struct virtqueue we're talking about.
 *
 * Note that this is not necessarily synchronous, hence unreliable and only
 * useful as an optimization.
 *
 * Unlike other operations, this need not be serialized.
 */
570
void virtqueue_disable_cb(struct virtqueue *_vq)
571 572 573 574 575
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
576
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
577

578
/**
579
 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
580 581
 * @vq: the struct virtqueue we're talking about.
 *
582 583 584 585
 * This re-enables callbacks; it returns current queue state
 * in an opaque unsigned value. This value should be later tested by
 * virtqueue_poll, to detect a possible race between the driver checking for
 * more work, and enabling callbacks.
586 587 588 589
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
590
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
591 592
{
	struct vring_virtqueue *vq = to_vvq(_vq);
593
	u16 last_used_idx;
594 595 596 597 598

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
599 600 601
	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
602
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
	vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
	END_USE(vq);
	return last_used_idx;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);

/**
 * virtqueue_poll - query pending used buffers
 * @vq: the struct virtqueue we're talking about.
 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
 *
 * Returns "true" if there are pending used buffers in the queue.
 *
 * This does not need to be serialized.
 */
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

622
	virtio_mb(vq->weak_barriers);
623 624 625
	return (u16)last_used_idx != vq->vring.used->idx;
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
626

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
/**
 * virtqueue_enable_cb - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks; it returns "false" if there are pending
 * buffers in the queue, to detect a possible race between the driver
 * checking for more work, and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
	return !virtqueue_poll(_vq, last_used_idx);
642
}
643
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
644

645 646 647 648 649 650 651 652 653 654 655 656 657
/**
 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks but hints to the other side to delay
 * interrupts until most of the available buffers have been processed;
 * it returns "false" if there are many pending buffers in the queue,
 * to detect a possible race between the driver checking for more work,
 * and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 bufs;

	START_USE(vq);

	/* We optimistically turn back on interrupts, then check if there was
	 * more to do. */
	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
	/* TODO: tune this threshold */
	bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
	vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
674
	virtio_mb(vq->weak_barriers);
675 676 677 678 679 680 681 682 683 684
	if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
		END_USE(vq);
		return false;
	}

	END_USE(vq);
	return true;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);

685 686 687 688
/**
 * virtqueue_detach_unused_buf - detach first unused buffer
 * @vq: the struct virtqueue we're talking about.
 *
689
 * Returns NULL or the "data" token handed to virtqueue_add_*().
690 691 692
 * This is not valid on an active queue; it is useful only for device
 * shutdown.
 */
693
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
694 695 696 697 698 699 700 701 702 703 704 705 706
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	unsigned int i;
	void *buf;

	START_USE(vq);

	for (i = 0; i < vq->vring.num; i++) {
		if (!vq->data[i])
			continue;
		/* detach_buf clears data, so grab it now. */
		buf = vq->data[i];
		detach_buf(vq, i);
707
		vq->vring.avail->idx--;
708 709 710 711
		END_USE(vq);
		return buf;
	}
	/* That should have freed everything. */
712
	BUG_ON(vq->vq.num_free != vq->vring.num);
713 714 715 716

	END_USE(vq);
	return NULL;
}
717
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
718

719 720 721 722 723 724 725 726 727 728 729 730 731
irqreturn_t vring_interrupt(int irq, void *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	if (!more_used(vq)) {
		pr_debug("virtqueue interrupt with no work for %p\n", vq);
		return IRQ_NONE;
	}

	if (unlikely(vq->broken))
		return IRQ_HANDLED;

	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
732 733
	if (vq->vq.callback)
		vq->vq.callback(&vq->vq);
734 735 736

	return IRQ_HANDLED;
}
737
EXPORT_SYMBOL_GPL(vring_interrupt);
738

739 740
struct virtqueue *vring_new_virtqueue(unsigned int index,
				      unsigned int num,
741
				      unsigned int vring_align,
742
				      struct virtio_device *vdev,
743
				      bool weak_barriers,
744 745
				      void *pages,
				      void (*notify)(struct virtqueue *),
746 747
				      void (*callback)(struct virtqueue *),
				      const char *name)
748 749 750 751
{
	struct vring_virtqueue *vq;
	unsigned int i;

752 753 754 755 756 757
	/* We assume num is a power of 2. */
	if (num & (num - 1)) {
		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
		return NULL;
	}

758 759 760 761
	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
	if (!vq)
		return NULL;

762
	vring_init(&vq->vring, num, pages, vring_align);
763 764
	vq->vq.callback = callback;
	vq->vq.vdev = vdev;
765
	vq->vq.name = name;
766 767
	vq->vq.num_free = num;
	vq->vq.index = index;
768
	vq->notify = notify;
769
	vq->weak_barriers = weak_barriers;
770 771 772
	vq->broken = false;
	vq->last_used_idx = 0;
	vq->num_added = 0;
773
	list_add_tail(&vq->vq.list, &vdev->vqs);
774 775
#ifdef DEBUG
	vq->in_use = false;
776
	vq->last_add_time_valid = false;
777 778
#endif

779
	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
780
	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
781

782 783 784 785 786 787
	/* No callback?  Tell other side not to bother us. */
	if (!callback)
		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;

	/* Put everything in free lists. */
	vq->free_head = 0;
788
	for (i = 0; i < num-1; i++) {
789
		vq->vring.desc[i].next = i+1;
790 791 792
		vq->data[i] = NULL;
	}
	vq->data[i] = NULL;
793 794 795

	return &vq->vq;
}
796
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
797 798 799

void vring_del_virtqueue(struct virtqueue *vq)
{
800
	list_del(&vq->list);
801 802
	kfree(to_vvq(vq));
}
803
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
804

805 806 807 808 809 810 811
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
	unsigned int i;

	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
		switch (i) {
812 813
		case VIRTIO_RING_F_INDIRECT_DESC:
			break;
814 815
		case VIRTIO_RING_F_EVENT_IDX:
			break;
816 817 818 819 820 821 822 823
		default:
			/* We don't understand this bit. */
			clear_bit(i, vdev->features);
		}
	}
}
EXPORT_SYMBOL_GPL(vring_transport_features);

824 825 826 827 828 829 830
/**
 * virtqueue_get_vring_size - return the size of the virtqueue's vring
 * @vq: the struct virtqueue containing the vring of interest.
 *
 * Returns the size of the vring.  This is mainly used for boasting to
 * userspace.  Unlike other operations, this need not be serialized.
 */
R
Rick Jones 已提交
831 832 833 834 835 836 837 838 839
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{

	struct vring_virtqueue *vq = to_vvq(_vq);

	return vq->vring.num;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);

840
MODULE_LICENSE("GPL");