提交 506458ef 编写于 作者: W Will Deacon 提交者: Ingo Molnar

locking/barriers: Convert users of lockless_dereference() to READ_ONCE()

READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it
can be used instead of lockless_dereference() without any change in
semantics.
Signed-off-by: NWill Deacon <will.deacon@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1508840570-22169-4-git-send-email-will.deacon@arm.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 76ebbe78
......@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment)
struct ldt_struct *ldt;
/* IRQs are off, so this synchronizes with smp_store_release */
ldt = lockless_dereference(current->active_mm->context.ldt);
ldt = READ_ONCE(current->active_mm->context.ldt);
if (!ldt || idx >= ldt->nr_entries)
return 0;
......
......@@ -72,8 +72,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
/* lockless_dereference synchronizes with smp_store_release */
ldt = lockless_dereference(mm->context.ldt);
/* READ_ONCE synchronizes with smp_store_release */
ldt = READ_ONCE(mm->context.ldt);
/*
* Any change to mm->context.ldt is followed by an IPI to all
......
......@@ -101,7 +101,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
static void install_ldt(struct mm_struct *current_mm,
struct ldt_struct *ldt)
{
/* Synchronizes with lockless_dereference in load_mm_ldt. */
/* Synchronizes with READ_ONCE in load_mm_ldt. */
smp_store_release(&current_mm->context.ldt, ldt);
/* Activate the LDT for all CPUs using current_mm. */
......
......@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
pgpath = path_to_pgpath(path);
if (unlikely(lockless_dereference(m->current_pg) != pg)) {
if (unlikely(READ_ONCE(m->current_pg) != pg)) {
/* Only update current_pgpath if pg changed */
spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = pgpath;
......@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
}
/* Were we instructed to switch PG? */
if (lockless_dereference(m->next_pg)) {
if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
......@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
/* Don't change PG until it has no remaining paths */
check_current_pg:
pg = lockless_dereference(m->current_pg);
pg = READ_ONCE(m->current_pg);
if (pg) {
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath))
......@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
struct request *clone;
/* Do we need to select a new pgpath? */
pgpath = lockless_dereference(m->current_pgpath);
pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, nr_bytes);
......@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bool queue_io;
/* Do we need to select a new pgpath? */
pgpath = lockless_dereference(m->current_pgpath);
pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, nr_bytes);
......@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct pgpath *current_pgpath;
int r;
current_pgpath = lockless_dereference(m->current_pgpath);
current_pgpath = READ_ONCE(m->current_pgpath);
if (!current_pgpath)
current_pgpath = choose_pgpath(m, 0);
......@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
}
if (r == -ENOTCONN) {
if (!lockless_dereference(m->current_pg)) {
if (!READ_ONCE(m->current_pg)) {
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
......@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti)
return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
pg = lockless_dereference(m->current_pg);
next_pg = lockless_dereference(m->next_pg);
if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
pg = READ_ONCE(m->current_pg);
next_pg = READ_ONCE(m->next_pg);
if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
pg = next_pg;
if (!pg) {
......
......@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
{
/*
* Be careful about RCU walk racing with rename:
* use 'lockless_dereference' to fetch the name pointer.
* use 'READ_ONCE' to fetch the name pointer.
*
* NOTE! Even if a rename will mean that the length
* was not loaded atomically, we don't care. The
......@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
* early because the data cannot match (there can
* be no NUL in the ct/tcount data)
*/
const unsigned char *cs = lockless_dereference(dentry->d_name.name);
const unsigned char *cs = READ_ONCE(dentry->d_name.name);
return dentry_string_cmp(cs, ct, tcount);
}
......
......@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
{
return lockless_dereference(oi->__upperdentry);
return READ_ONCE(oi->__upperdentry);
}
......@@ -754,7 +754,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
struct inode *inode = file_inode(file);
realfile = lockless_dereference(od->upperfile);
realfile = READ_ONCE(od->upperfile);
if (!realfile) {
struct path upperpath;
......
......@@ -274,7 +274,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
container_of(lockless_dereference(ptr), type, member)
container_of(READ_ONCE(ptr), type, member)
/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
......@@ -367,7 +367,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* example is when items are added to the list, but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
container_of((typeof(ptr))lockless_dereference(ptr), type, member)
container_of((typeof(ptr))READ_ONCE(ptr), type, member)
/**
* list_for_each_entry_lockless - iterate over rcu list of given type
......
......@@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define __rcu_dereference_check(p, c, space) \
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
......@@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_dereference_raw(p) \
({ \
/* Dependency order vs. p above. */ \
typeof(p) ________p1 = lockless_dereference(p); \
typeof(p) ________p1 = READ_ONCE(p); \
((typeof(*p) __force __kernel *)(________p1)); \
})
......
......@@ -4231,7 +4231,7 @@ static void perf_remove_from_owner(struct perf_event *event)
* indeed free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
owner = lockless_dereference(event->owner);
owner = READ_ONCE(event->owner);
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
......@@ -4328,7 +4328,7 @@ int perf_event_release_kernel(struct perf_event *event)
* Cannot change, child events are not migrated, see the
* comment with perf_event_ctx_lock_nested().
*/
ctx = lockless_dereference(child->ctx);
ctx = READ_ONCE(child->ctx);
/*
* Since child_mutex nests inside ctx::mutex, we must jump
* through hoops. We start by grabbing a reference on the ctx.
......
......@@ -189,7 +189,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
u32 ret = SECCOMP_RET_ALLOW;
/* Make sure cross-thread synced filter points somewhere sane. */
struct seccomp_filter *f =
lockless_dereference(current->seccomp.filter);
READ_ONCE(current->seccomp.filter);
/* Ensure unexpected behavior doesn't result in failing open. */
if (unlikely(WARN_ON(f == NULL)))
......
......@@ -67,7 +67,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
* we raced with task_work_run(), *pprev == NULL/exited.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
while ((work = lockless_dereference(*pprev))) {
while ((work = READ_ONCE(*pprev))) {
if (work->func != func)
pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work)
......
......@@ -258,7 +258,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
* memcg_caches issues a write barrier to match this (see
* memcg_create_kmem_cache()).
*/
cachep = lockless_dereference(arr->entries[idx]);
cachep = READ_ONCE(arr->entries[idx]);
rcu_read_unlock();
return cachep;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册