ptr_ring.h 16.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 *	Definitions for the 'struct ptr_ring' datastructure.
 *
 *	Author:
 *		Michael S. Tsirkin <mst@redhat.com>
 *
 *	Copyright (C) 2016 Red Hat, Inc.
 *
 *	This program is free software; you can redistribute it and/or modify it
 *	under the terms of the GNU General Public License as published by the
 *	Free Software Foundation; either version 2 of the License, or (at your
 *	option) any later version.
 *
 *	This is a limited-size FIFO maintaining pointers in FIFO order, with
 *	one CPU producing entries and another consuming entries from a FIFO.
 *
 *	This implementation tries to minimize cache-contention when there is a
 *	single producer and a single consumer CPU.
 */

#ifndef _LINUX_PTR_RING_H
#define _LINUX_PTR_RING_H 1

#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <asm/errno.h>
#endif

struct ptr_ring {
	int producer ____cacheline_aligned_in_smp;
	spinlock_t producer_lock;
36 37
	int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */
	int consumer_tail; /* next entry to invalidate */
38 39 40 41
	spinlock_t consumer_lock;
	/* Shared consumer/producer data */
	/* Read-only by both the producer and the consumer */
	int size ____cacheline_aligned_in_smp; /* max entries in queue */
42
	int batch; /* number of entries to consume in a batch */
43 44 45 46
	void **queue;
};

/* Note: callers invoking this in a loop must use a compiler barrier,
47 48 49 50
 * for example cpu_relax().
 *
 * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
 * see e.g. ptr_ring_full.
51 52 53 54 55 56 57 58
 */
static inline bool __ptr_ring_full(struct ptr_ring *r)
{
	return r->queue[r->producer];
}

static inline bool ptr_ring_full(struct ptr_ring *r)
{
M
Michael S. Tsirkin 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	bool ret;

	spin_lock(&r->producer_lock);
	ret = __ptr_ring_full(r);
	spin_unlock(&r->producer_lock);

	return ret;
}

static inline bool ptr_ring_full_irq(struct ptr_ring *r)
{
	bool ret;

	spin_lock_irq(&r->producer_lock);
	ret = __ptr_ring_full(r);
	spin_unlock_irq(&r->producer_lock);

	return ret;
}

static inline bool ptr_ring_full_any(struct ptr_ring *r)
{
	unsigned long flags;
	bool ret;

	spin_lock_irqsave(&r->producer_lock, flags);
	ret = __ptr_ring_full(r);
	spin_unlock_irqrestore(&r->producer_lock, flags);

	return ret;
}

static inline bool ptr_ring_full_bh(struct ptr_ring *r)
{
	bool ret;

	spin_lock_bh(&r->producer_lock);
	ret = __ptr_ring_full(r);
	spin_unlock_bh(&r->producer_lock);

	return ret;
100 101 102
}

/* Note: callers invoking this in a loop must use a compiler barrier,
M
Michael S. Tsirkin 已提交
103
 * for example cpu_relax(). Callers must hold producer_lock.
M
Michael S. Tsirkin 已提交
104 105
 * Callers are responsible for making sure pointer that is being queued
 * points to a valid data.
106 107 108
 */
static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
J
Jason Wang 已提交
109
	if (unlikely(!r->size) || r->queue[r->producer])
110 111
		return -ENOSPC;

M
Michael S. Tsirkin 已提交
112 113 114 115
	/* Make sure the pointer we are storing points to a valid data. */
	/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
	smp_wmb();

116
	WRITE_ONCE(r->queue[r->producer++], ptr);
117 118 119 120 121
	if (unlikely(r->producer >= r->size))
		r->producer = 0;
	return 0;
}

122 123 124 125 126
/*
 * Note: resize (below) nests producer lock within consumer lock, so if you
 * consume in interrupt or BH context, you must disable interrupts/BH when
 * calling this.
 */
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
	int ret;

	spin_lock(&r->producer_lock);
	ret = __ptr_ring_produce(r, ptr);
	spin_unlock(&r->producer_lock);

	return ret;
}

static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
{
	int ret;

	spin_lock_irq(&r->producer_lock);
	ret = __ptr_ring_produce(r, ptr);
	spin_unlock_irq(&r->producer_lock);

	return ret;
}

static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&r->producer_lock, flags);
	ret = __ptr_ring_produce(r, ptr);
	spin_unlock_irqrestore(&r->producer_lock, flags);

	return ret;
}

static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
{
	int ret;

	spin_lock_bh(&r->producer_lock);
	ret = __ptr_ring_produce(r, ptr);
	spin_unlock_bh(&r->producer_lock);

	return ret;
}

static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
J
Jason Wang 已提交
174
	if (likely(r->size))
175
		return READ_ONCE(r->queue[r->consumer_head]);
J
Jason Wang 已提交
176
	return NULL;
177 178
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
/*
 * Test ring empty status without taking any locks.
 *
 * NB: This is only safe to call if ring is never resized.
 *
 * However, if some other CPU consumes ring entries at the same time, the value
 * returned is not guaranteed to be correct.
 *
 * In this case - to avoid incorrectly detecting the ring
 * as empty - the CPU consuming the ring entries is responsible
 * for either consuming all ring entries until the ring is empty,
 * or synchronizing with some other CPU and causing it to
 * re-test __ptr_ring_empty and/or consume the ring enteries
 * after the synchronization point.
 *
 * Note: callers invoking this in a loop must use a compiler barrier,
 * for example cpu_relax().
 */
M
Michael S. Tsirkin 已提交
197
static inline bool __ptr_ring_empty(struct ptr_ring *r)
198
{
199 200 201
	if (likely(r->size))
		return !r->queue[READ_ONCE(r->consumer_head)];
	return true;
202 203
}

M
Michael S. Tsirkin 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
static inline bool ptr_ring_empty(struct ptr_ring *r)
{
	bool ret;

	spin_lock(&r->consumer_lock);
	ret = __ptr_ring_empty(r);
	spin_unlock(&r->consumer_lock);

	return ret;
}

static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
{
	bool ret;

	spin_lock_irq(&r->consumer_lock);
	ret = __ptr_ring_empty(r);
	spin_unlock_irq(&r->consumer_lock);

	return ret;
}

static inline bool ptr_ring_empty_any(struct ptr_ring *r)
{
	unsigned long flags;
	bool ret;

	spin_lock_irqsave(&r->consumer_lock, flags);
	ret = __ptr_ring_empty(r);
	spin_unlock_irqrestore(&r->consumer_lock, flags);

	return ret;
}

static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
{
	bool ret;

	spin_lock_bh(&r->consumer_lock);
	ret = __ptr_ring_empty(r);
	spin_unlock_bh(&r->consumer_lock);

	return ret;
}

249 250 251
/* Must only be called after __ptr_ring_peek returned !NULL */
static inline void __ptr_ring_discard_one(struct ptr_ring *r)
{
252 253 254
	/* Fundamentally, what we want to do is update consumer
	 * index and zero out the entry so producer can reuse it.
	 * Doing it naively at each consume would be as simple as:
255 256 257 258 259
	 *       consumer = r->consumer;
	 *       r->queue[consumer++] = NULL;
	 *       if (unlikely(consumer >= r->size))
	 *               consumer = 0;
	 *       r->consumer = consumer;
260 261 262 263
	 * but that is suboptimal when the ring is full as producer is writing
	 * out new entries in the same cache line.  Defer these updates until a
	 * batch of entries has been consumed.
	 */
264 265 266 267 268
	/* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
	 * to work correctly.
	 */
	int consumer_head = r->consumer_head;
	int head = consumer_head++;
269 270 271 272 273 274

	/* Once we have processed enough entries invalidate them in
	 * the ring all at once so producer can reuse their space in the ring.
	 * We also do this when we reach end of the ring - not mandatory
	 * but helps keep the implementation simple.
	 */
275 276
	if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
		     consumer_head >= r->size)) {
277 278 279 280 281 282 283
		/* Zero out entries in the reverse order: this way we touch the
		 * cache line that producer might currently be reading the last;
		 * producer won't make progress and touch other cache lines
		 * besides the first one until we write out all entries.
		 */
		while (likely(head >= r->consumer_tail))
			r->queue[head--] = NULL;
284
		r->consumer_tail = consumer_head;
285
	}
286 287
	if (unlikely(consumer_head >= r->size)) {
		consumer_head = 0;
288 289
		r->consumer_tail = 0;
	}
290 291
	/* matching READ_ONCE in __ptr_ring_empty for lockless tests */
	WRITE_ONCE(r->consumer_head, consumer_head);
292 293 294 295 296 297
}

static inline void *__ptr_ring_consume(struct ptr_ring *r)
{
	void *ptr;

298 299 300 301
	/* The READ_ONCE in __ptr_ring_peek guarantees that anyone
	 * accessing data through the pointer is up to date. Pairs
	 * with smp_wmb in __ptr_ring_produce.
	 */
302 303 304 305 306 307 308
	ptr = __ptr_ring_peek(r);
	if (ptr)
		__ptr_ring_discard_one(r);

	return ptr;
}

J
Jason Wang 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
static inline int __ptr_ring_consume_batched(struct ptr_ring *r,
					     void **array, int n)
{
	void *ptr;
	int i;

	for (i = 0; i < n; i++) {
		ptr = __ptr_ring_consume(r);
		if (!ptr)
			break;
		array[i] = ptr;
	}

	return i;
}

325 326 327 328 329
/*
 * Note: resize (below) nests producer lock within consumer lock, so if you
 * call this in interrupt or BH context, you must disable interrupts/BH when
 * producing.
 */
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
static inline void *ptr_ring_consume(struct ptr_ring *r)
{
	void *ptr;

	spin_lock(&r->consumer_lock);
	ptr = __ptr_ring_consume(r);
	spin_unlock(&r->consumer_lock);

	return ptr;
}

static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
{
	void *ptr;

	spin_lock_irq(&r->consumer_lock);
	ptr = __ptr_ring_consume(r);
	spin_unlock_irq(&r->consumer_lock);

	return ptr;
}

static inline void *ptr_ring_consume_any(struct ptr_ring *r)
{
	unsigned long flags;
	void *ptr;

	spin_lock_irqsave(&r->consumer_lock, flags);
	ptr = __ptr_ring_consume(r);
	spin_unlock_irqrestore(&r->consumer_lock, flags);

	return ptr;
}

static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
{
	void *ptr;

	spin_lock_bh(&r->consumer_lock);
	ptr = __ptr_ring_consume(r);
	spin_unlock_bh(&r->consumer_lock);

	return ptr;
}

J
Jason Wang 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
static inline int ptr_ring_consume_batched(struct ptr_ring *r,
					   void **array, int n)
{
	int ret;

	spin_lock(&r->consumer_lock);
	ret = __ptr_ring_consume_batched(r, array, n);
	spin_unlock(&r->consumer_lock);

	return ret;
}

static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r,
					       void **array, int n)
{
	int ret;

	spin_lock_irq(&r->consumer_lock);
	ret = __ptr_ring_consume_batched(r, array, n);
	spin_unlock_irq(&r->consumer_lock);

	return ret;
}

static inline int ptr_ring_consume_batched_any(struct ptr_ring *r,
					       void **array, int n)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&r->consumer_lock, flags);
	ret = __ptr_ring_consume_batched(r, array, n);
	spin_unlock_irqrestore(&r->consumer_lock, flags);

	return ret;
}

static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
					      void **array, int n)
{
	int ret;

	spin_lock_bh(&r->consumer_lock);
	ret = __ptr_ring_consume_batched(r, array, n);
	spin_unlock_bh(&r->consumer_lock);

	return ret;
}

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
/* Cast to structure type and call a function without discarding from FIFO.
 * Function must return a value.
 * Callers must take consumer_lock.
 */
#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))

#define PTR_RING_PEEK_CALL(r, f) ({ \
	typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
	\
	spin_lock(&(r)->consumer_lock); \
	__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
	spin_unlock(&(r)->consumer_lock); \
	__PTR_RING_PEEK_CALL_v; \
})

#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
	typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
	\
	spin_lock_irq(&(r)->consumer_lock); \
	__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
	spin_unlock_irq(&(r)->consumer_lock); \
	__PTR_RING_PEEK_CALL_v; \
})

#define PTR_RING_PEEK_CALL_BH(r, f) ({ \
	typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
	\
	spin_lock_bh(&(r)->consumer_lock); \
	__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
	spin_unlock_bh(&(r)->consumer_lock); \
	__PTR_RING_PEEK_CALL_v; \
})

#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
	typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
	unsigned long __PTR_RING_PEEK_CALL_f;\
	\
	spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
	__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
	spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
	__PTR_RING_PEEK_CALL_v; \
})

467 468 469
/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
 * documentation for vmalloc for which of them are legal.
 */
E
Eric Dumazet 已提交
470
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
M
Michael S. Tsirkin 已提交
471
{
472
	if (size > KMALLOC_MAX_SIZE / sizeof(void *))
473
		return NULL;
474
	return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
M
Michael S. Tsirkin 已提交
475 476
}

477 478 479 480 481 482 483 484 485 486 487 488 489
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
{
	r->size = size;
	r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue));
	/* We need to set batch at least to 1 to make logic
	 * in __ptr_ring_discard_one work correctly.
	 * Batching too much (because ring is small) would cause a lot of
	 * burstiness. Needs tuning, for now disable batching.
	 */
	if (r->batch > r->size / 2 || !r->batch)
		r->batch = 1;
}

490 491
static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
{
M
Michael S. Tsirkin 已提交
492
	r->queue = __ptr_ring_init_queue_alloc(size, gfp);
493 494 495
	if (!r->queue)
		return -ENOMEM;

496 497
	__ptr_ring_set_size(r, size);
	r->producer = r->consumer_head = r->consumer_tail = 0;
498 499 500 501 502 503
	spin_lock_init(&r->producer_lock);
	spin_lock_init(&r->consumer_lock);

	return 0;
}

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
/*
 * Return entries into ring. Destroy entries that don't fit.
 *
 * Note: this is expected to be a rare slow path operation.
 *
 * Note: producer lock is nested within consumer lock, so if you
 * resize you must make sure all uses nest correctly.
 * In particular if you consume ring in interrupt or BH context, you must
 * disable interrupts/BH when doing so.
 */
static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
				      void (*destroy)(void *))
{
	unsigned long flags;
	int head;

	spin_lock_irqsave(&r->consumer_lock, flags);
	spin_lock(&r->producer_lock);

	if (!r->size)
		goto done;

	/*
	 * Clean out buffered entries (for simplicity). This way following code
	 * can test entries for NULL and if not assume they are valid.
	 */
	head = r->consumer_head - 1;
	while (likely(head >= r->consumer_tail))
		r->queue[head--] = NULL;
	r->consumer_tail = r->consumer_head;

	/*
	 * Go over entries in batch, start moving head back and copy entries.
	 * Stop when we run into previously unconsumed entries.
	 */
	while (n) {
		head = r->consumer_head - 1;
		if (head < 0)
			head = r->size - 1;
		if (r->queue[head]) {
			/* This batch entry will have to be destroyed. */
			goto done;
		}
		r->queue[head] = batch[--n];
548 549 550
		r->consumer_tail = head;
		/* matching READ_ONCE in __ptr_ring_empty for lockless tests */
		WRITE_ONCE(r->consumer_head, head);
551 552 553 554 555 556 557 558 559 560
	}

done:
	/* Destroy all entries left in the batch. */
	while (n)
		destroy(batch[--n]);
	spin_unlock(&r->producer_lock);
	spin_unlock_irqrestore(&r->consumer_lock, flags);
}

561 562 563
static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
					   int size, gfp_t gfp,
					   void (*destroy)(void *))
M
Michael S. Tsirkin 已提交
564 565 566 567 568
{
	int producer = 0;
	void **old;
	void *ptr;

569
	while ((ptr = __ptr_ring_consume(r)))
M
Michael S. Tsirkin 已提交
570 571 572 573 574
		if (producer < size)
			queue[producer++] = ptr;
		else if (destroy)
			destroy(ptr);

575 576
	if (producer >= size)
		producer = 0;
577
	__ptr_ring_set_size(r, size);
M
Michael S. Tsirkin 已提交
578
	r->producer = producer;
579 580
	r->consumer_head = 0;
	r->consumer_tail = 0;
M
Michael S. Tsirkin 已提交
581 582 583
	old = r->queue;
	r->queue = queue;

584 585 586
	return old;
}

587 588 589 590 591 592
/*
 * Note: producer lock is nested within consumer lock, so if you
 * resize you must make sure all uses nest correctly.
 * In particular if you consume ring in interrupt or BH context, you must
 * disable interrupts/BH when doing so.
 */
593 594 595 596 597 598 599 600 601 602
static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
				  void (*destroy)(void *))
{
	unsigned long flags;
	void **queue = __ptr_ring_init_queue_alloc(size, gfp);
	void **old;

	if (!queue)
		return -ENOMEM;

603 604
	spin_lock_irqsave(&(r)->consumer_lock, flags);
	spin_lock(&(r)->producer_lock);
605 606 607

	old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);

608 609
	spin_unlock(&(r)->producer_lock);
	spin_unlock_irqrestore(&(r)->consumer_lock, flags);
M
Michael S. Tsirkin 已提交
610

611
	kvfree(old);
M
Michael S. Tsirkin 已提交
612 613 614 615

	return 0;
}

616 617 618 619 620 621
/*
 * Note: producer lock is nested within consumer lock, so if you
 * resize you must make sure all uses nest correctly.
 * In particular if you consume ring in interrupt or BH context, you must
 * disable interrupts/BH when doing so.
 */
E
Eric Dumazet 已提交
622 623
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
					   unsigned int nrings,
624 625 626 627 628 629 630
					   int size,
					   gfp_t gfp, void (*destroy)(void *))
{
	unsigned long flags;
	void ***queues;
	int i;

E
Eric Dumazet 已提交
631
	queues = kmalloc_array(nrings, sizeof(*queues), gfp);
632 633 634 635 636 637 638 639 640 641
	if (!queues)
		goto noqueues;

	for (i = 0; i < nrings; ++i) {
		queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
		if (!queues[i])
			goto nomem;
	}

	for (i = 0; i < nrings; ++i) {
642 643
		spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
		spin_lock(&(rings[i])->producer_lock);
644 645
		queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
						  size, gfp, destroy);
646 647
		spin_unlock(&(rings[i])->producer_lock);
		spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
648 649 650
	}

	for (i = 0; i < nrings; ++i)
651
		kvfree(queues[i]);
652 653 654 655 656 657 658

	kfree(queues);

	return 0;

nomem:
	while (--i >= 0)
659
		kvfree(queues[i]);
660 661 662 663 664 665 666

	kfree(queues);

noqueues:
	return -ENOMEM;
}

M
Michael S. Tsirkin 已提交
667
static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
668
{
M
Michael S. Tsirkin 已提交
669 670 671 672 673
	void *ptr;

	if (destroy)
		while ((ptr = ptr_ring_consume(r)))
			destroy(ptr);
674
	kvfree(r->queue);
675 676 677
}

#endif /* _LINUX_PTR_RING_H  */