diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 79b99d653e030d113e4401fc26c7b47e81dcff8c..78abe15f7e660e64a967404c4c3ffe46ba66c3f8 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -44,7 +44,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * * and that one the synchronize_sched() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ - __this_cpu_inc(*sem->read_count); + this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ barrier(); @@ -68,7 +68,7 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) /* * Same as in percpu_down_read(). */ - __this_cpu_inc(*sem->read_count); + this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); @@ -94,7 +94,7 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem * Same as in percpu_down_read(). */ if (likely(rcu_sync_is_idle(&sem->rss))) - __this_cpu_dec(*sem->read_count); + this_cpu_dec(*sem->read_count); else __percpu_up_read(sem); /* Unconditional memory barrier */ preempt_enable(); diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 883cf1b92d9084f30a21f699211d6cd2ca3b9362..6a2824fe7139eaa0911aa614a89487a1ce1a7122 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -99,7 +99,7 @@ void __percpu_up_read(struct percpu_rw_semaphore *sem) * zero, as that is the only time it matters) they will also see our * critical section. */ - __this_cpu_dec(*sem->read_count); + this_cpu_dec(*sem->read_count); /* Prod writer to recheck readers_active */ rcuwait_wake_up(&sem->writer);