提交 8ad7b378 编写于 作者: D Davidlohr Bueso 提交者: Ingo Molnar

futex: Rename barrier references in ordering guarantees

Ingo suggested we rename how we reference barriers A and B
regarding futex ordering guarantees. This patch replaces,
for both barriers, MB (A) with smp_mb(); (A), such that:

 - We explicitly state that the barriers are SMP, and

 - We standardize how we reference these across futex.c
   helping readers follow what barrier does what and where.
Suggested-by: NIngo Molnar <mingo@kernel.org>
Signed-off-by: NDavidlohr Bueso <dbueso@suse.de>
Reviewed-by: NThomas Gleixner <tglx@linutronix.de>
Cc: Chris Mason <clm@fb.com>
Cc: Darren Hart <dvhart@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: dave@stgolabs.net
Link: http://lkml.kernel.org/r/1455045314-8305-2-git-send-email-dave@stgolabs.netSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 e2d6f8a5
......@@ -124,16 +124,16 @@
* futex_wait(futex, val);
*
* waiters++; (a)
* mb(); (A) <-- paired with -.
* |
* lock(hash_bucket(futex)); |
* |
* uval = *futex; |
* | *futex = newval;
* | sys_futex(WAKE, futex);
* | futex_wake(futex);
* |
* `-------> mb(); (B)
* smp_mb(); (A) <-- paired with -.
* |
* lock(hash_bucket(futex)); |
* |
* uval = *futex; |
* | *futex = newval;
* | sys_futex(WAKE, futex);
* | futex_wake(futex);
* |
* `--------> smp_mb(); (B)
* if (uval == val)
* queue();
* unlock(hash_bucket(futex));
......@@ -334,7 +334,7 @@ static inline void futex_get_mm(union futex_key *key)
/*
* Ensure futex_get_mm() implies a full barrier such that
* get_futex_key() implies a full barrier. This is relied upon
* as full barrier (B), see the ordering comment above.
* as smp_mb(); (B), see the ordering comment above.
*/
smp_mb__after_atomic();
}
......@@ -407,10 +407,10 @@ static void get_futex_key_refs(union futex_key *key)
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
ihold(key->shared.inode); /* implies MB (B) */
ihold(key->shared.inode); /* implies smp_mb(); (B) */
break;
case FUT_OFF_MMSHARED:
futex_get_mm(key); /* implies MB (B) */
futex_get_mm(key); /* implies smp_mb(); (B) */
break;
default:
/*
......@@ -418,7 +418,7 @@ static void get_futex_key_refs(union futex_key *key)
* mm, therefore the only purpose of calling get_futex_key_refs
* is because we need the barrier for the lockless waiter check.
*/
smp_mb(); /* explicit MB (B) */
smp_mb(); /* explicit smp_mb(); (B) */
}
}
......@@ -497,7 +497,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
if (!fshared) {
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key); /* implies MB (B) */
get_futex_key_refs(key); /* implies smp_mb(); (B) */
return 0;
}
......@@ -572,7 +572,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
key->shared.pgoff = basepage_index(page);
}
get_futex_key_refs(key); /* implies MB (B) */
get_futex_key_refs(key); /* implies smp_mb(); (B) */
out:
unlock_page(page);
......@@ -1864,7 +1864,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock); /* implies MB (A) */
spin_lock(&hb->lock); /* implies smp_mb(); (A) */
return hb;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册