提交 f78f5b90 编写于 作者: P Paul E. McKenney

rcu: Rename rcu_lockdep_assert() to RCU_LOCKDEP_WARN()

This commit renames rcu_lockdep_assert() to RCU_LOCKDEP_WARN() for
consistency with the WARN() series of macros.  This also requires
inverting the sense of the conditional, which this commit also does.
Reported-by: NIngo Molnar <mingo@kernel.org>
Signed-off-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: NIngo Molnar <mingo@kernel.org>
上级 46f00d18
...@@ -883,7 +883,7 @@ All: lockdep-checked RCU-protected pointer access ...@@ -883,7 +883,7 @@ All: lockdep-checked RCU-protected pointer access
rcu_access_pointer rcu_access_pointer
rcu_dereference_raw rcu_dereference_raw
rcu_lockdep_assert RCU_LOCKDEP_WARN
rcu_sleep_check rcu_sleep_check
RCU_NONIDLE RCU_NONIDLE
......
...@@ -54,8 +54,8 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); ...@@ -54,8 +54,8 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
#define rcu_dereference_check_mce(p) \ #define rcu_dereference_check_mce(p) \
({ \ ({ \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
lockdep_is_held(&mce_chrdev_read_mutex), \ !lockdep_is_held(&mce_chrdev_read_mutex), \
"suspicious rcu_dereference_check_mce() usage"); \ "suspicious rcu_dereference_check_mce() usage"); \
smp_load_acquire(&(p)); \ smp_load_acquire(&(p)); \
}) })
......
...@@ -136,7 +136,7 @@ enum ctx_state ist_enter(struct pt_regs *regs) ...@@ -136,7 +136,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
preempt_count_add(HARDIRQ_OFFSET); preempt_count_add(HARDIRQ_OFFSET);
/* This code is a bit fragile. Test it. */ /* This code is a bit fragile. Test it. */
rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work"); RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
return prev_state; return prev_state;
} }
......
...@@ -110,8 +110,8 @@ static DEFINE_MUTEX(dev_opp_list_lock); ...@@ -110,8 +110,8 @@ static DEFINE_MUTEX(dev_opp_list_lock);
#define opp_rcu_lockdep_assert() \ #define opp_rcu_lockdep_assert() \
do { \ do { \
rcu_lockdep_assert(rcu_read_lock_held() || \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
lockdep_is_held(&dev_opp_list_lock), \ !lockdep_is_held(&dev_opp_list_lock), \
"Missing rcu_read_lock() or " \ "Missing rcu_read_lock() or " \
"dev_opp_list_lock protection"); \ "dev_opp_list_lock protection"); \
} while (0) } while (0)
......
...@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i ...@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd) static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
{ {
rcu_lockdep_assert(rcu_read_lock_held() || RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
lockdep_is_held(&files->file_lock), !lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage"); "suspicious rcu_dereference_check() usage");
return __fcheck_files(files, fd); return __fcheck_files(files, fd);
} }
......
...@@ -536,6 +536,11 @@ static inline int rcu_read_lock_sched_held(void) ...@@ -536,6 +536,11 @@ static inline int rcu_read_lock_sched_held(void)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */
static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
{
}
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
/** /**
...@@ -546,16 +551,31 @@ static inline int rcu_read_lock_sched_held(void) ...@@ -546,16 +551,31 @@ static inline int rcu_read_lock_sched_held(void)
#define rcu_lockdep_assert(c, s) \ #define rcu_lockdep_assert(c, s) \
do { \ do { \
static bool __section(.data.unlikely) __warned; \ static bool __section(.data.unlikely) __warned; \
deprecate_rcu_lockdep_assert(); \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
__warned = true; \ __warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \ } \
} while (0) } while (0)
/**
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(.data.unlikely) __warned; \
if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void) static inline void rcu_preempt_sleep_check(void)
{ {
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
"Illegal context switch in RCU read-side critical section"); "Illegal context switch in RCU read-side critical section");
} }
#else /* #ifdef CONFIG_PROVE_RCU */ #else /* #ifdef CONFIG_PROVE_RCU */
...@@ -567,15 +587,16 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -567,15 +587,16 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_sleep_check() \ #define rcu_sleep_check() \
do { \ do { \
rcu_preempt_sleep_check(); \ rcu_preempt_sleep_check(); \
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh read-side critical section"); \ "Illegal context switch in RCU-bh read-side critical section"); \
rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
"Illegal context switch in RCU-sched read-side critical section"); \ "Illegal context switch in RCU-sched read-side critical section"); \
} while (0) } while (0)
#else /* #ifdef CONFIG_PROVE_RCU */ #else /* #ifdef CONFIG_PROVE_RCU */
#define rcu_lockdep_assert(c, s) do { } while (0) #define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0) #define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */ #endif /* #else #ifdef CONFIG_PROVE_RCU */
...@@ -606,13 +627,13 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -606,13 +627,13 @@ static inline void rcu_preempt_sleep_check(void)
({ \ ({ \
/* Dependency order vs. p above. */ \ /* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \ rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \ ((typeof(*p) __force __kernel *)(________p1)); \
}) })
#define __rcu_dereference_protected(p, c, space) \ #define __rcu_dereference_protected(p, c, space) \
({ \ ({ \
rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_dereference_sparse(p, space); \ rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \ ((typeof(*p) __force __kernel *)(p)); \
}) })
...@@ -836,7 +857,7 @@ static inline void rcu_read_lock(void) ...@@ -836,7 +857,7 @@ static inline void rcu_read_lock(void)
__rcu_read_lock(); __rcu_read_lock();
__acquire(RCU); __acquire(RCU);
rcu_lock_acquire(&rcu_lock_map); rcu_lock_acquire(&rcu_lock_map);
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock() used illegally while idle"); "rcu_read_lock() used illegally while idle");
} }
...@@ -887,7 +908,7 @@ static inline void rcu_read_lock(void) ...@@ -887,7 +908,7 @@ static inline void rcu_read_lock(void)
*/ */
static inline void rcu_read_unlock(void) static inline void rcu_read_unlock(void)
{ {
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle"); "rcu_read_unlock() used illegally while idle");
__release(RCU); __release(RCU);
__rcu_read_unlock(); __rcu_read_unlock();
...@@ -916,7 +937,7 @@ static inline void rcu_read_lock_bh(void) ...@@ -916,7 +937,7 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable(); local_bh_disable();
__acquire(RCU_BH); __acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map); rcu_lock_acquire(&rcu_bh_lock_map);
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle"); "rcu_read_lock_bh() used illegally while idle");
} }
...@@ -927,7 +948,7 @@ static inline void rcu_read_lock_bh(void) ...@@ -927,7 +948,7 @@ static inline void rcu_read_lock_bh(void)
*/ */
static inline void rcu_read_unlock_bh(void) static inline void rcu_read_unlock_bh(void)
{ {
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle"); "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map); rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH); __release(RCU_BH);
...@@ -952,7 +973,7 @@ static inline void rcu_read_lock_sched(void) ...@@ -952,7 +973,7 @@ static inline void rcu_read_lock_sched(void)
preempt_disable(); preempt_disable();
__acquire(RCU_SCHED); __acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map); rcu_lock_acquire(&rcu_sched_lock_map);
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle"); "rcu_read_lock_sched() used illegally while idle");
} }
...@@ -970,7 +991,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) ...@@ -970,7 +991,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/ */
static inline void rcu_read_unlock_sched(void) static inline void rcu_read_unlock_sched(void)
{ {
rcu_lockdep_assert(rcu_is_watching(), RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle"); "rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map); rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED); __release(RCU_SCHED);
......
...@@ -107,8 +107,8 @@ static DEFINE_SPINLOCK(release_agent_path_lock); ...@@ -107,8 +107,8 @@ static DEFINE_SPINLOCK(release_agent_path_lock);
struct percpu_rw_semaphore cgroup_threadgroup_rwsem; struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
#define cgroup_assert_mutex_or_rcu_locked() \ #define cgroup_assert_mutex_or_rcu_locked() \
rcu_lockdep_assert(rcu_read_lock_held() || \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
lockdep_is_held(&cgroup_mutex), \ !lockdep_is_held(&cgroup_mutex), \
"cgroup_mutex or RCU read lock required"); "cgroup_mutex or RCU read lock required");
/* /*
......
...@@ -451,9 +451,8 @@ EXPORT_SYMBOL(pid_task); ...@@ -451,9 +451,8 @@ EXPORT_SYMBOL(pid_task);
*/ */
struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
{ {
rcu_lockdep_assert(rcu_read_lock_held(), RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"find_task_by_pid_ns() needs rcu_read_lock()" "find_task_by_pid_ns() needs rcu_read_lock() protection");
" protection");
return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
} }
......
...@@ -415,11 +415,11 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) ...@@ -415,11 +415,11 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
struct rcu_head *head = &rcu.head; struct rcu_head *head = &rcu.head;
bool done = false; bool done = false;
rcu_lockdep_assert(!lock_is_held(&sp->dep_map) && RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
!lock_is_held(&rcu_bh_lock_map) && lock_is_held(&rcu_bh_lock_map) ||
!lock_is_held(&rcu_lock_map) && lock_is_held(&rcu_lock_map) ||
!lock_is_held(&rcu_sched_lock_map), lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
might_sleep(); might_sleep();
init_completion(&rcu.completion); init_completion(&rcu.completion);
......
...@@ -191,9 +191,9 @@ static void rcu_process_callbacks(struct softirq_action *unused) ...@@ -191,9 +191,9 @@ static void rcu_process_callbacks(struct softirq_action *unused)
*/ */
void synchronize_sched(void) void synchronize_sched(void)
{ {
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
!lock_is_held(&rcu_lock_map) && lock_is_held(&rcu_lock_map) ||
!lock_is_held(&rcu_sched_lock_map), lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched() in RCU read-side critical section"); "Illegal synchronize_sched() in RCU read-side critical section");
cond_resched(); cond_resched();
} }
......
...@@ -649,11 +649,11 @@ static void rcu_eqs_enter_common(long long oldval, bool user) ...@@ -649,11 +649,11 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
* It is illegal to enter an extended quiescent state while * It is illegal to enter an extended quiescent state while
* in an RCU read-side critical section. * in an RCU read-side critical section.
*/ */
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
"Illegal idle entry in RCU read-side critical section."); "Illegal idle entry in RCU read-side critical section.");
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
"Illegal idle entry in RCU-bh read-side critical section."); "Illegal idle entry in RCU-bh read-side critical section.");
rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
"Illegal idle entry in RCU-sched read-side critical section."); "Illegal idle entry in RCU-sched read-side critical section.");
} }
...@@ -3161,9 +3161,9 @@ static inline int rcu_blocking_is_gp(void) ...@@ -3161,9 +3161,9 @@ static inline int rcu_blocking_is_gp(void)
*/ */
void synchronize_sched(void) void synchronize_sched(void)
{ {
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
!lock_is_held(&rcu_lock_map) && lock_is_held(&rcu_lock_map) ||
!lock_is_held(&rcu_sched_lock_map), lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched() in RCU-sched read-side critical section"); "Illegal synchronize_sched() in RCU-sched read-side critical section");
if (rcu_blocking_is_gp()) if (rcu_blocking_is_gp())
return; return;
...@@ -3188,9 +3188,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched); ...@@ -3188,9 +3188,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
*/ */
void synchronize_rcu_bh(void) void synchronize_rcu_bh(void)
{ {
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
!lock_is_held(&rcu_lock_map) && lock_is_held(&rcu_lock_map) ||
!lock_is_held(&rcu_sched_lock_map), lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu_bh() in RCU-bh read-side critical section"); "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
if (rcu_blocking_is_gp()) if (rcu_blocking_is_gp())
return; return;
......
...@@ -538,9 +538,9 @@ EXPORT_SYMBOL_GPL(call_rcu); ...@@ -538,9 +538,9 @@ EXPORT_SYMBOL_GPL(call_rcu);
*/ */
void synchronize_rcu(void) void synchronize_rcu(void)
{ {
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
!lock_is_held(&rcu_lock_map) && lock_is_held(&rcu_lock_map) ||
!lock_is_held(&rcu_sched_lock_map), lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu() in RCU read-side critical section"); "Illegal synchronize_rcu() in RCU read-side critical section");
if (!rcu_scheduler_active) if (!rcu_scheduler_active)
return; return;
......
...@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks); ...@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
void synchronize_rcu_tasks(void) void synchronize_rcu_tasks(void)
{ {
/* Complain if the scheduler has not started. */ /* Complain if the scheduler has not started. */
rcu_lockdep_assert(!rcu_scheduler_active, RCU_LOCKDEP_WARN(rcu_scheduler_active,
"synchronize_rcu_tasks called too soon"); "synchronize_rcu_tasks called too soon");
/* Wait for the grace period. */ /* Wait for the grace period. */
......
...@@ -2200,7 +2200,7 @@ unsigned long to_ratio(u64 period, u64 runtime) ...@@ -2200,7 +2200,7 @@ unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
inline struct dl_bw *dl_bw_of(int i) inline struct dl_bw *dl_bw_of(int i)
{ {
rcu_lockdep_assert(rcu_read_lock_sched_held(), RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held"); "sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw; return &cpu_rq(i)->rd->dl_bw;
} }
...@@ -2210,7 +2210,7 @@ static inline int dl_bw_cpus(int i) ...@@ -2210,7 +2210,7 @@ static inline int dl_bw_cpus(int i)
struct root_domain *rd = cpu_rq(i)->rd; struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0; int cpus = 0;
rcu_lockdep_assert(rcu_read_lock_sched_held(), RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held"); "sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask) for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++; cpus++;
......
...@@ -338,19 +338,19 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); ...@@ -338,19 +338,19 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#include <trace/events/workqueue.h> #include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \ #define assert_rcu_or_pool_mutex() \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
lockdep_is_held(&wq_pool_mutex), \ !lockdep_is_held(&wq_pool_mutex), \
"sched RCU or wq_pool_mutex should be held") "sched RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \ #define assert_rcu_or_wq_mutex(wq) \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
lockdep_is_held(&wq->mutex), \ !lockdep_is_held(&wq->mutex), \
"sched RCU or wq->mutex should be held") "sched RCU or wq->mutex should be held")
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
lockdep_is_held(&wq->mutex) || \ !lockdep_is_held(&wq->mutex) && \
lockdep_is_held(&wq_pool_mutex), \ !lockdep_is_held(&wq_pool_mutex), \
"sched RCU, wq->mutex or wq_pool_mutex should be held") "sched RCU, wq->mutex or wq_pool_mutex should be held")
#define for_each_cpu_worker_pool(pool, cpu) \ #define for_each_cpu_worker_pool(pool, cpu) \
......
...@@ -400,7 +400,7 @@ static bool verify_new_ex(struct dev_cgroup *dev_cgroup, ...@@ -400,7 +400,7 @@ static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
{ {
bool match = false; bool match = false;
rcu_lockdep_assert(rcu_read_lock_held() || RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
lockdep_is_held(&devcgroup_mutex), lockdep_is_held(&devcgroup_mutex),
"device_cgroup:verify_new_ex called without proper synchronization"); "device_cgroup:verify_new_ex called without proper synchronization");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册