list.h 28.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H

#ifdef __KERNEL__

#include <linux/stddef.h>
7
#include <linux/poison.h>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
#include <linux/prefetch.h>
#include <asm/system.h>

/*
 * Simple doubly linked list implementation.
 *
 * Some of the internal functions ("__xxx") are useful when
 * manipulating whole lists rather than single entries, as
 * sometimes we already know the next/prev entries and we can
 * generate better code by using them directly rather than
 * using the generic single-entry routines.
 */

struct list_head {
	struct list_head *next, *prev;
};

#define LIST_HEAD_INIT(name) { &(name), &(name) }

#define LIST_HEAD(name) \
	struct list_head name = LIST_HEAD_INIT(name)

30 31 32 33 34
static inline void INIT_LIST_HEAD(struct list_head *list)
{
	list->next = list;
	list->prev = list;
}
L
Linus Torvalds 已提交
35 36 37 38 39 40 41

/*
 * Insert a new entry between two known consecutive entries.
 *
 * This is only for internal list manipulation where we know
 * the prev/next entries already!
 */
42
#ifndef CONFIG_DEBUG_LIST
L
Linus Torvalds 已提交
43 44 45 46 47 48 49 50 51
static inline void __list_add(struct list_head *new,
			      struct list_head *prev,
			      struct list_head *next)
{
	next->prev = new;
	new->next = next;
	new->prev = prev;
	prev->next = new;
}
52 53 54 55 56
#else
extern void __list_add(struct list_head *new,
			      struct list_head *prev,
			      struct list_head *next);
#endif
L
Linus Torvalds 已提交
57 58 59 60 61 62 63 64 65

/**
 * list_add - add a new entry
 * @new: new entry to be added
 * @head: list head to add it after
 *
 * Insert a new entry after the specified head.
 * This is good for implementing stacks.
 */
66
#ifndef CONFIG_DEBUG_LIST
L
Linus Torvalds 已提交
67 68 69 70
static inline void list_add(struct list_head *new, struct list_head *head)
{
	__list_add(new, head, head->next);
}
71 72 73 74
#else
extern void list_add(struct list_head *new, struct list_head *head);
#endif

L
Linus Torvalds 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166

/**
 * list_add_tail - add a new entry
 * @new: new entry to be added
 * @head: list head to add it before
 *
 * Insert a new entry before the specified head.
 * This is useful for implementing queues.
 */
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
	__list_add(new, head->prev, head);
}

/*
 * Insert a new entry between two known consecutive entries.
 *
 * This is only for internal list manipulation where we know
 * the prev/next entries already!
 */
static inline void __list_add_rcu(struct list_head * new,
		struct list_head * prev, struct list_head * next)
{
	new->next = next;
	new->prev = prev;
	smp_wmb();
	next->prev = new;
	prev->next = new;
}

/**
 * list_add_rcu - add a new entry to rcu-protected list
 * @new: new entry to be added
 * @head: list head to add it after
 *
 * Insert a new entry after the specified head.
 * This is good for implementing stacks.
 *
 * The caller must take whatever precautions are necessary
 * (such as holding appropriate locks) to avoid racing
 * with another list-mutation primitive, such as list_add_rcu()
 * or list_del_rcu(), running on this same list.
 * However, it is perfectly legal to run concurrently with
 * the _rcu list-traversal primitives, such as
 * list_for_each_entry_rcu().
 */
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
{
	__list_add_rcu(new, head, head->next);
}

/**
 * list_add_tail_rcu - add a new entry to rcu-protected list
 * @new: new entry to be added
 * @head: list head to add it before
 *
 * Insert a new entry before the specified head.
 * This is useful for implementing queues.
 *
 * The caller must take whatever precautions are necessary
 * (such as holding appropriate locks) to avoid racing
 * with another list-mutation primitive, such as list_add_tail_rcu()
 * or list_del_rcu(), running on this same list.
 * However, it is perfectly legal to run concurrently with
 * the _rcu list-traversal primitives, such as
 * list_for_each_entry_rcu().
 */
static inline void list_add_tail_rcu(struct list_head *new,
					struct list_head *head)
{
	__list_add_rcu(new, head->prev, head);
}

/*
 * Delete a list entry by making the prev/next entries
 * point to each other.
 *
 * This is only for internal list manipulation where we know
 * the prev/next entries already!
 */
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
	next->prev = prev;
	prev->next = next;
}

/**
 * list_del - deletes entry from list.
 * @entry: the element to delete from the list.
 * Note: list_empty on entry does not return true after this, the entry is
 * in an undefined state.
 */
167
#ifndef CONFIG_DEBUG_LIST
L
Linus Torvalds 已提交
168 169 170 171 172 173
static inline void list_del(struct list_head *entry)
{
	__list_del(entry->prev, entry->next);
	entry->next = LIST_POISON1;
	entry->prev = LIST_POISON2;
}
174 175 176
#else
extern void list_del(struct list_head *entry);
#endif
L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197

/**
 * list_del_rcu - deletes entry from list without re-initialization
 * @entry: the element to delete from the list.
 *
 * Note: list_empty on entry does not return true after this,
 * the entry is in an undefined state. It is useful for RCU based
 * lockfree traversal.
 *
 * In particular, it means that we can not poison the forward
 * pointers that may still be used for walking the list.
 *
 * The caller must take whatever precautions are necessary
 * (such as holding appropriate locks) to avoid racing
 * with another list-mutation primitive, such as list_del_rcu()
 * or list_add_rcu(), running on this same list.
 * However, it is perfectly legal to run concurrently with
 * the _rcu list-traversal primitives, such as
 * list_for_each_entry_rcu().
 *
 * Note that the caller is not permitted to immediately free
198
 * the newly deleted entry.  Instead, either synchronize_rcu()
L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206 207
 * or call_rcu() must be used to defer freeing until an RCU
 * grace period has elapsed.
 */
static inline void list_del_rcu(struct list_head *entry)
{
	__list_del(entry->prev, entry->next);
	entry->prev = LIST_POISON2;
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
/**
 * list_replace - replace old entry by new one
 * @old : the element to be replaced
 * @new : the new element to insert
 * Note: if 'old' was empty, it will be overwritten.
 */
static inline void list_replace(struct list_head *old,
				struct list_head *new)
{
	new->next = old->next;
	new->next->prev = new;
	new->prev = old->prev;
	new->prev->next = new;
}

static inline void list_replace_init(struct list_head *old,
					struct list_head *new)
{
	list_replace(old, new);
	INIT_LIST_HEAD(old);
}

230
/**
L
Linus Torvalds 已提交
231 232 233 234
 * list_replace_rcu - replace old entry by new one
 * @old : the element to be replaced
 * @new : the new element to insert
 *
235 236
 * The @old entry will be replaced with the @new entry atomically.
 * Note: @old should not be empty.
L
Linus Torvalds 已提交
237
 */
I
Ingo Molnar 已提交
238 239 240
static inline void list_replace_rcu(struct list_head *old,
				struct list_head *new)
{
L
Linus Torvalds 已提交
241 242 243 244 245
	new->next = old->next;
	new->prev = old->prev;
	smp_wmb();
	new->next->prev = new;
	new->prev->next = new;
I
Ingo Molnar 已提交
246
	old->prev = LIST_POISON2;
L
Linus Torvalds 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
}

/**
 * list_del_init - deletes entry from list and reinitialize it.
 * @entry: the element to delete from the list.
 */
static inline void list_del_init(struct list_head *entry)
{
	__list_del(entry->prev, entry->next);
	INIT_LIST_HEAD(entry);
}

/**
 * list_move - delete from one list and add as another's head
 * @list: the entry to move
 * @head: the head that will precede our entry
 */
static inline void list_move(struct list_head *list, struct list_head *head)
{
        __list_del(list->prev, list->next);
        list_add(list, head);
}

/**
 * list_move_tail - delete from one list and add as another's tail
 * @list: the entry to move
 * @head: the head that will follow our entry
 */
static inline void list_move_tail(struct list_head *list,
				  struct list_head *head)
{
        __list_del(list->prev, list->next);
        list_add_tail(list, head);
}

S
Shailabh Nagar 已提交
282 283 284 285 286 287 288 289 290 291 292
/**
 * list_is_last - tests whether @list is the last entry in list @head
 * @list: the entry to test
 * @head: the head of the list
 */
static inline int list_is_last(const struct list_head *list,
				const struct list_head *head)
{
	return list->next == head;
}

L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300 301 302
/**
 * list_empty - tests whether a list is empty
 * @head: the list to test.
 */
static inline int list_empty(const struct list_head *head)
{
	return head->next == head;
}

/**
R
Randy Dunlap 已提交
303 304 305 306 307 308
 * list_empty_careful - tests whether a list is empty and not being modified
 * @head: the list to test
 *
 * Description:
 * tests whether a list is empty _and_ checks that no other CPU might be
 * in the process of modifying either member (next or prev)
L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
 *
 * NOTE: using list_empty_careful() without synchronization
 * can only be safe if the only activity that can happen
 * to the list entry is list_del_init(). Eg. it cannot be used
 * if another CPU could re-list_add() it.
 */
static inline int list_empty_careful(const struct list_head *head)
{
	struct list_head *next = head->next;
	return (next == head) && (next == head->prev);
}

static inline void __list_splice(struct list_head *list,
				 struct list_head *head)
{
	struct list_head *first = list->next;
	struct list_head *last = list->prev;
	struct list_head *at = head->next;

	first->prev = head;
	head->next = first;

	last->next = at;
	at->prev = last;
}

/**
 * list_splice - join two lists
 * @list: the new list to add.
 * @head: the place to add it in the first list.
 */
static inline void list_splice(struct list_head *list, struct list_head *head)
{
	if (!list_empty(list))
		__list_splice(list, head);
}

/**
 * list_splice_init - join two lists and reinitialise the emptied list.
 * @list: the new list to add.
 * @head: the place to add it in the first list.
 *
 * The list at @list is reinitialised
 */
static inline void list_splice_init(struct list_head *list,
				    struct list_head *head)
{
	if (!list_empty(list)) {
		__list_splice(list, head);
		INIT_LIST_HEAD(list);
	}
}

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
/**
 * list_splice_init_rcu - splice an RCU-protected list into an existing list.
 * @list:	the RCU-protected list to splice
 * @head:	the place in the list to splice the first list into
 * @sync:	function to sync: synchronize_rcu(), synchronize_sched(), ...
 *
 * @head can be RCU-read traversed concurrently with this function.
 *
 * Note that this function blocks.
 *
 * Important note: the caller must take whatever action is necessary to
 *	prevent any other updates to @head.  In principle, it is possible
 *	to modify the list as soon as sync() begins execution.
 *	If this sort of thing becomes necessary, an alternative version
 *	based on call_rcu() could be created.  But only if -really-
 *	needed -- there is no shortage of RCU API members.
 */
static inline void list_splice_init_rcu(struct list_head *list,
					struct list_head *head,
					void (*sync)(void))
{
	struct list_head *first = list->next;
	struct list_head *last = list->prev;
	struct list_head *at = head->next;

	if (list_empty(head))
		return;

	/* "first" and "last" tracking list, so initialize it. */

	INIT_LIST_HEAD(list);

	/*
	 * At this point, the list body still points to the source list.
	 * Wait for any readers to finish using the list before splicing
	 * the list body into the new list.  Any new readers will see
	 * an empty list.
	 */

	sync();

	/*
	 * Readers are finished with the source list, so perform splice.
	 * The order is important if the new list is global and accessible
	 * to concurrent RCU readers.  Note that RCU readers are not
	 * permitted to traverse the prev pointers without excluding
	 * this function.
	 */

	last->next = at;
	smp_wmb();
	head->next = first;
	first->prev = head;
	at->prev = last;
}

L
Linus Torvalds 已提交
418 419 420 421 422 423 424 425 426 427 428
/**
 * list_entry - get the struct for this entry
 * @ptr:	the &struct list_head pointer.
 * @type:	the type of the struct this is embedded in.
 * @member:	the name of the list_struct within the struct.
 */
#define list_entry(ptr, type, member) \
	container_of(ptr, type, member)

/**
 * list_for_each	-	iterate over a list
429
 * @pos:	the &struct list_head to use as a loop cursor.
L
Linus Torvalds 已提交
430 431 432 433 434 435 436 437
 * @head:	the head for your list.
 */
#define list_for_each(pos, head) \
	for (pos = (head)->next; prefetch(pos->next), pos != (head); \
        	pos = pos->next)

/**
 * __list_for_each	-	iterate over a list
438
 * @pos:	the &struct list_head to use as a loop cursor.
L
Linus Torvalds 已提交
439 440 441 442 443 444 445 446 447 448 449 450
 * @head:	the head for your list.
 *
 * This variant differs from list_for_each() in that it's the
 * simplest possible list iteration code, no prefetching is done.
 * Use this for code that knows the list to be very short (empty
 * or 1 entry) most of the time.
 */
#define __list_for_each(pos, head) \
	for (pos = (head)->next; pos != (head); pos = pos->next)

/**
 * list_for_each_prev	-	iterate over a list backwards
451
 * @pos:	the &struct list_head to use as a loop cursor.
L
Linus Torvalds 已提交
452 453 454 455 456 457 458
 * @head:	the head for your list.
 */
#define list_for_each_prev(pos, head) \
	for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
        	pos = pos->prev)

/**
R
Randy Dunlap 已提交
459
 * list_for_each_safe - iterate over a list safe against removal of list entry
460
 * @pos:	the &struct list_head to use as a loop cursor.
L
Linus Torvalds 已提交
461 462 463 464 465 466 467 468 469
 * @n:		another &struct list_head to use as temporary storage
 * @head:	the head for your list.
 */
#define list_for_each_safe(pos, n, head) \
	for (pos = (head)->next, n = pos->next; pos != (head); \
		pos = n, n = pos->next)

/**
 * list_for_each_entry	-	iterate over list of given type
470
 * @pos:	the type * to use as a loop cursor.
L
Linus Torvalds 已提交
471 472 473 474 475 476 477 478 479 480
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
 */
#define list_for_each_entry(pos, head, member)				\
	for (pos = list_entry((head)->next, typeof(*pos), member);	\
	     prefetch(pos->member.next), &pos->member != (head); 	\
	     pos = list_entry(pos->member.next, typeof(*pos), member))

/**
 * list_for_each_entry_reverse - iterate backwards over list of given type.
481
 * @pos:	the type * to use as a loop cursor.
L
Linus Torvalds 已提交
482 483 484 485 486 487 488 489 490
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
 */
#define list_for_each_entry_reverse(pos, head, member)			\
	for (pos = list_entry((head)->prev, typeof(*pos), member);	\
	     prefetch(pos->member.prev), &pos->member != (head); 	\
	     pos = list_entry(pos->member.prev, typeof(*pos), member))

/**
R
Randy Dunlap 已提交
491
 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue
L
Linus Torvalds 已提交
492 493 494
 * @pos:	the type * to use as a start point
 * @head:	the head of the list
 * @member:	the name of the list_struct within the struct.
R
Randy Dunlap 已提交
495 496
 *
 * Prepares a pos entry for use as a start point in list_for_each_entry_continue.
L
Linus Torvalds 已提交
497 498 499 500 501
 */
#define list_prepare_entry(pos, head, member) \
	((pos) ? : list_entry(head, typeof(*pos), member))

/**
R
Randy Dunlap 已提交
502
 * list_for_each_entry_continue - continue iteration over list of given type
503
 * @pos:	the type * to use as a loop cursor.
L
Linus Torvalds 已提交
504 505
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
R
Randy Dunlap 已提交
506 507 508
 *
 * Continue to iterate over list of given type, continuing after
 * the current position.
L
Linus Torvalds 已提交
509 510 511 512 513 514
 */
#define list_for_each_entry_continue(pos, head, member) 		\
	for (pos = list_entry(pos->member.next, typeof(*pos), member);	\
	     prefetch(pos->member.next), &pos->member != (head);	\
	     pos = list_entry(pos->member.next, typeof(*pos), member))

515
/**
R
Randy Dunlap 已提交
516
 * list_for_each_entry_from - iterate over list of given type from the current point
517
 * @pos:	the type * to use as a loop cursor.
518 519
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
R
Randy Dunlap 已提交
520 521
 *
 * Iterate over list of given type, continuing from current position.
522 523 524 525 526
 */
#define list_for_each_entry_from(pos, head, member) 			\
	for (; prefetch(pos->member.next), &pos->member != (head);	\
	     pos = list_entry(pos->member.next, typeof(*pos), member))

L
Linus Torvalds 已提交
527 528
/**
 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
529
 * @pos:	the type * to use as a loop cursor.
L
Linus Torvalds 已提交
530 531 532 533 534 535 536 537 538 539
 * @n:		another type * to use as temporary storage
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
 */
#define list_for_each_entry_safe(pos, n, head, member)			\
	for (pos = list_entry((head)->next, typeof(*pos), member),	\
		n = list_entry(pos->member.next, typeof(*pos), member);	\
	     &pos->member != (head); 					\
	     pos = n, n = list_entry(n->member.next, typeof(*n), member))

540
/**
R
Randy Dunlap 已提交
541
 * list_for_each_entry_safe_continue
542
 * @pos:	the type * to use as a loop cursor.
543 544 545
 * @n:		another type * to use as temporary storage
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
R
Randy Dunlap 已提交
546 547 548
 *
 * Iterate over list of given type, continuing after current point,
 * safe against removal of list entry.
549 550
 */
#define list_for_each_entry_safe_continue(pos, n, head, member) 		\
551 552
	for (pos = list_entry(pos->member.next, typeof(*pos), member), 		\
		n = list_entry(pos->member.next, typeof(*pos), member);		\
553 554 555 556
	     &pos->member != (head);						\
	     pos = n, n = list_entry(n->member.next, typeof(*n), member))

/**
R
Randy Dunlap 已提交
557
 * list_for_each_entry_safe_from
558
 * @pos:	the type * to use as a loop cursor.
559 560 561
 * @n:		another type * to use as temporary storage
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
R
Randy Dunlap 已提交
562 563 564
 *
 * Iterate over list of given type from current point, safe against
 * removal of list entry.
565 566 567
 */
#define list_for_each_entry_safe_from(pos, n, head, member) 			\
	for (n = list_entry(pos->member.next, typeof(*pos), member);		\
568 569 570
	     &pos->member != (head);						\
	     pos = n, n = list_entry(n->member.next, typeof(*n), member))

571
/**
R
Randy Dunlap 已提交
572
 * list_for_each_entry_safe_reverse
573
 * @pos:	the type * to use as a loop cursor.
574 575 576
 * @n:		another type * to use as temporary storage
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
R
Randy Dunlap 已提交
577 578 579
 *
 * Iterate backwards over list of given type, safe against removal
 * of list entry.
580 581 582 583 584 585 586
 */
#define list_for_each_entry_safe_reverse(pos, n, head, member)		\
	for (pos = list_entry((head)->prev, typeof(*pos), member),	\
		n = list_entry(pos->member.prev, typeof(*pos), member);	\
	     &pos->member != (head); 					\
	     pos = n, n = list_entry(n->member.prev, typeof(*n), member))

L
Linus Torvalds 已提交
587 588
/**
 * list_for_each_rcu	-	iterate over an rcu-protected list
589
 * @pos:	the &struct list_head to use as a loop cursor.
L
Linus Torvalds 已提交
590 591 592 593 594 595 596
 * @head:	the head for your list.
 *
 * This list-traversal primitive may safely run concurrently with
 * the _rcu list-mutation primitives such as list_add_rcu()
 * as long as the traversal is guarded by rcu_read_lock().
 */
#define list_for_each_rcu(pos, head) \
597 598 599
	for (pos = (head)->next; \
		prefetch(rcu_dereference(pos)->next), pos != (head); \
        	pos = pos->next)
L
Linus Torvalds 已提交
600 601

#define __list_for_each_rcu(pos, head) \
602 603 604
	for (pos = (head)->next; \
		rcu_dereference(pos) != (head); \
        	pos = pos->next)
L
Linus Torvalds 已提交
605 606

/**
R
Randy Dunlap 已提交
607
 * list_for_each_safe_rcu
608
 * @pos:	the &struct list_head to use as a loop cursor.
L
Linus Torvalds 已提交
609 610 611
 * @n:		another &struct list_head to use as temporary storage
 * @head:	the head for your list.
 *
R
Randy Dunlap 已提交
612 613
 * Iterate over an rcu-protected list, safe against removal of list entry.
 *
L
Linus Torvalds 已提交
614 615 616 617 618
 * This list-traversal primitive may safely run concurrently with
 * the _rcu list-mutation primitives such as list_add_rcu()
 * as long as the traversal is guarded by rcu_read_lock().
 */
#define list_for_each_safe_rcu(pos, n, head) \
619 620 621
	for (pos = (head)->next; \
		n = rcu_dereference(pos)->next, pos != (head); \
		pos = n)
L
Linus Torvalds 已提交
622 623 624

/**
 * list_for_each_entry_rcu	-	iterate over rcu list of given type
625
 * @pos:	the type * to use as a loop cursor.
L
Linus Torvalds 已提交
626 627 628 629 630 631 632
 * @head:	the head for your list.
 * @member:	the name of the list_struct within the struct.
 *
 * This list-traversal primitive may safely run concurrently with
 * the _rcu list-mutation primitives such as list_add_rcu()
 * as long as the traversal is guarded by rcu_read_lock().
 */
633 634 635 636 637
#define list_for_each_entry_rcu(pos, head, member) \
	for (pos = list_entry((head)->next, typeof(*pos), member); \
		prefetch(rcu_dereference(pos)->member.next), \
			&pos->member != (head); \
		pos = list_entry(pos->member.next, typeof(*pos), member))
L
Linus Torvalds 已提交
638 639 640


/**
R
Randy Dunlap 已提交
641
 * list_for_each_continue_rcu
642
 * @pos:	the &struct list_head to use as a loop cursor.
L
Linus Torvalds 已提交
643 644
 * @head:	the head for your list.
 *
R
Randy Dunlap 已提交
645 646
 * Iterate over an rcu-protected list, continuing after current point.
 *
L
Linus Torvalds 已提交
647 648 649 650 651
 * This list-traversal primitive may safely run concurrently with
 * the _rcu list-mutation primitives such as list_add_rcu()
 * as long as the traversal is guarded by rcu_read_lock().
 */
#define list_for_each_continue_rcu(pos, head) \
652 653 654
	for ((pos) = (pos)->next; \
		prefetch(rcu_dereference((pos))->next), (pos) != (head); \
        	(pos) = (pos)->next)
L
Linus Torvalds 已提交
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673

/*
 * Double linked lists with a single pointer list head.
 * Mostly useful for hash tables where the two pointer list head is
 * too wasteful.
 * You lose the ability to access the tail in O(1).
 */

struct hlist_head {
	struct hlist_node *first;
};

struct hlist_node {
	struct hlist_node *next, **pprev;
};

#define HLIST_HEAD_INIT { .first = NULL }
#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
674 675 676 677 678
static inline void INIT_HLIST_NODE(struct hlist_node *h)
{
	h->next = NULL;
	h->pprev = NULL;
}
L
Linus Torvalds 已提交
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732

static inline int hlist_unhashed(const struct hlist_node *h)
{
	return !h->pprev;
}

static inline int hlist_empty(const struct hlist_head *h)
{
	return !h->first;
}

static inline void __hlist_del(struct hlist_node *n)
{
	struct hlist_node *next = n->next;
	struct hlist_node **pprev = n->pprev;
	*pprev = next;
	if (next)
		next->pprev = pprev;
}

static inline void hlist_del(struct hlist_node *n)
{
	__hlist_del(n);
	n->next = LIST_POISON1;
	n->pprev = LIST_POISON2;
}

/**
 * hlist_del_rcu - deletes entry from hash list without re-initialization
 * @n: the element to delete from the hash list.
 *
 * Note: list_unhashed() on entry does not return true after this,
 * the entry is in an undefined state. It is useful for RCU based
 * lockfree traversal.
 *
 * In particular, it means that we can not poison the forward
 * pointers that may still be used for walking the hash list.
 *
 * The caller must take whatever precautions are necessary
 * (such as holding appropriate locks) to avoid racing
 * with another list-mutation primitive, such as hlist_add_head_rcu()
 * or hlist_del_rcu(), running on this same list.
 * However, it is perfectly legal to run concurrently with
 * the _rcu list-traversal primitives, such as
 * hlist_for_each_entry().
 */
static inline void hlist_del_rcu(struct hlist_node *n)
{
	__hlist_del(n);
	n->pprev = LIST_POISON2;
}

static inline void hlist_del_init(struct hlist_node *n)
{
A
Akinobu Mita 已提交
733
	if (!hlist_unhashed(n)) {
L
Linus Torvalds 已提交
734 735 736 737 738
		__hlist_del(n);
		INIT_HLIST_NODE(n);
	}
}

739
/**
I
Ingo Molnar 已提交
740 741 742 743
 * hlist_replace_rcu - replace old entry by new one
 * @old : the element to be replaced
 * @new : the new element to insert
 *
744
 * The @old entry will be replaced with the @new entry atomically.
I
Ingo Molnar 已提交
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
 */
static inline void hlist_replace_rcu(struct hlist_node *old,
					struct hlist_node *new)
{
	struct hlist_node *next = old->next;

	new->next = next;
	new->pprev = old->pprev;
	smp_wmb();
	if (next)
		new->next->pprev = &new->next;
	*new->pprev = new;
	old->pprev = LIST_POISON2;
}

L
Linus Torvalds 已提交
760 761 762 763 764 765 766 767 768 769 770 771
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
	struct hlist_node *first = h->first;
	n->next = first;
	if (first)
		first->pprev = &n->next;
	h->first = n;
	n->pprev = &h->first;
}


/**
R
Randy Dunlap 已提交
772
 * hlist_add_head_rcu
L
Linus Torvalds 已提交
773 774 775
 * @n: the element to add to the hash list.
 * @h: the list to add to.
 *
R
Randy Dunlap 已提交
776 777 778 779
 * Description:
 * Adds the specified element to the specified hlist,
 * while permitting racing traversals.
 *
L
Linus Torvalds 已提交
780 781 782 783 784 785
 * The caller must take whatever precautions are necessary
 * (such as holding appropriate locks) to avoid racing
 * with another list-mutation primitive, such as hlist_add_head_rcu()
 * or hlist_del_rcu(), running on this same list.
 * However, it is perfectly legal to run concurrently with
 * the _rcu list-traversal primitives, such as
786
 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
L
Linus Torvalds 已提交
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
 * problems on Alpha CPUs.  Regardless of the type of CPU, the
 * list-traversal primitive must be guarded by rcu_read_lock().
 */
static inline void hlist_add_head_rcu(struct hlist_node *n,
					struct hlist_head *h)
{
	struct hlist_node *first = h->first;
	n->next = first;
	n->pprev = &h->first;
	smp_wmb();
	if (first)
		first->pprev = &n->next;
	h->first = n;
}

/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
					struct hlist_node *next)
{
	n->pprev = next->pprev;
	n->next = next;
	next->pprev = &n->next;
	*(n->pprev) = n;
}

static inline void hlist_add_after(struct hlist_node *n,
					struct hlist_node *next)
{
	next->next = n->next;
	n->next = next;
	next->pprev = &n->next;

	if(next->next)
		next->next->pprev  = &next->next;
}

823
/**
R
Randy Dunlap 已提交
824
 * hlist_add_before_rcu
825 826 827
 * @n: the new element to add to the hash list.
 * @next: the existing element to add the new element before.
 *
R
Randy Dunlap 已提交
828 829 830 831
 * Description:
 * Adds the specified element to the specified hlist
 * before the specified node while permitting racing traversals.
 *
832 833 834 835 836 837
 * The caller must take whatever precautions are necessary
 * (such as holding appropriate locks) to avoid racing
 * with another list-mutation primitive, such as hlist_add_head_rcu()
 * or hlist_del_rcu(), running on this same list.
 * However, it is perfectly legal to run concurrently with
 * the _rcu list-traversal primitives, such as
838
 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
839 840
 * problems on Alpha CPUs.
 */
841 842 843 844 845 846 847 848 849 850
static inline void hlist_add_before_rcu(struct hlist_node *n,
					struct hlist_node *next)
{
	n->pprev = next->pprev;
	n->next = next;
	smp_wmb();
	next->pprev = &n->next;
	*(n->pprev) = n;
}

851
/**
R
Randy Dunlap 已提交
852
 * hlist_add_after_rcu
853 854 855
 * @prev: the existing element to add the new element after.
 * @n: the new element to add to the hash list.
 *
R
Randy Dunlap 已提交
856 857 858 859
 * Description:
 * Adds the specified element to the specified hlist
 * after the specified node while permitting racing traversals.
 *
860 861 862 863 864 865
 * The caller must take whatever precautions are necessary
 * (such as holding appropriate locks) to avoid racing
 * with another list-mutation primitive, such as hlist_add_head_rcu()
 * or hlist_del_rcu(), running on this same list.
 * However, it is perfectly legal to run concurrently with
 * the _rcu list-traversal primitives, such as
866
 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
867 868
 * problems on Alpha CPUs.
 */
869 870 871 872 873 874 875 876 877 878 879
static inline void hlist_add_after_rcu(struct hlist_node *prev,
				       struct hlist_node *n)
{
	n->next = prev->next;
	n->pprev = &prev->next;
	smp_wmb();
	prev->next = n;
	if (n->next)
		n->next->pprev = &n->next;
}

L
Linus Torvalds 已提交
880 881 882 883 884 885 886 887 888 889 890 891
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)

#define hlist_for_each(pos, head) \
	for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
	     pos = pos->next)

#define hlist_for_each_safe(pos, n, head) \
	for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
	     pos = n)

/**
 * hlist_for_each_entry	- iterate over list of given type
892 893
 * @tpos:	the type * to use as a loop cursor.
 * @pos:	the &struct hlist_node to use as a loop cursor.
L
Linus Torvalds 已提交
894 895 896 897 898 899 900 901 902 903
 * @head:	the head for your list.
 * @member:	the name of the hlist_node within the struct.
 */
#define hlist_for_each_entry(tpos, pos, head, member)			 \
	for (pos = (head)->first;					 \
	     pos && ({ prefetch(pos->next); 1;}) &&			 \
		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
	     pos = pos->next)

/**
R
Randy Dunlap 已提交
904
 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
905 906
 * @tpos:	the type * to use as a loop cursor.
 * @pos:	the &struct hlist_node to use as a loop cursor.
L
Linus Torvalds 已提交
907 908 909 910 911 912 913 914 915
 * @member:	the name of the hlist_node within the struct.
 */
#define hlist_for_each_entry_continue(tpos, pos, member)		 \
	for (pos = (pos)->next;						 \
	     pos && ({ prefetch(pos->next); 1;}) &&			 \
		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
	     pos = pos->next)

/**
R
Randy Dunlap 已提交
916
 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
917 918
 * @tpos:	the type * to use as a loop cursor.
 * @pos:	the &struct hlist_node to use as a loop cursor.
L
Linus Torvalds 已提交
919 920 921 922 923 924 925 926 927
 * @member:	the name of the hlist_node within the struct.
 */
#define hlist_for_each_entry_from(tpos, pos, member)			 \
	for (; pos && ({ prefetch(pos->next); 1;}) &&			 \
		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
	     pos = pos->next)

/**
 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
928 929
 * @tpos:	the type * to use as a loop cursor.
 * @pos:	the &struct hlist_node to use as a loop cursor.
L
Linus Torvalds 已提交
930 931 932 933 934 935 936 937 938 939 940 941
 * @n:		another &struct hlist_node to use as temporary storage
 * @head:	the head for your list.
 * @member:	the name of the hlist_node within the struct.
 */
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) 		 \
	for (pos = (head)->first;					 \
	     pos && ({ n = pos->next; 1; }) && 				 \
		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
	     pos = n)

/**
 * hlist_for_each_entry_rcu - iterate over rcu list of given type
942 943
 * @tpos:	the type * to use as a loop cursor.
 * @pos:	the &struct hlist_node to use as a loop cursor.
L
Linus Torvalds 已提交
944 945 946 947
 * @head:	the head for your list.
 * @member:	the name of the hlist_node within the struct.
 *
 * This list-traversal primitive may safely run concurrently with
948
 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
L
Linus Torvalds 已提交
949 950 951 952
 * as long as the traversal is guarded by rcu_read_lock().
 */
#define hlist_for_each_entry_rcu(tpos, pos, head, member)		 \
	for (pos = (head)->first;					 \
953
	     rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) &&	 \
L
Linus Torvalds 已提交
954
		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
955
	     pos = pos->next)
L
Linus Torvalds 已提交
956 957 958 959 960

#else
#warning "don't include kernel headers in userspace"
#endif /* __KERNEL__ */
#endif