提交 a649d25d 编写于 作者: P Paul E. McKenney

rcu: Add lockdep_assert_irqs_disabled() to rcu_sched_clock_irq() and callees

This commit adds a number of lockdep_assert_irqs_disabled() calls
to rcu_sched_clock_irq() and a number of the functions that it calls.
The point of this is to help track down a situation where lockdep appears
to be insisting that interrupts are enabled within these functions, which
should only ever be invoked from the scheduling-clock interrupt handler.

Link: https://lore.kernel.org/lkml/20201111133813.GA81547@elver.google.com/Signed-off-by: NPaul E. McKenney <paulmck@kernel.org>
上级 c5586e32
...@@ -2553,6 +2553,7 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -2553,6 +2553,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
void rcu_sched_clock_irq(int user) void rcu_sched_clock_irq(int user)
{ {
trace_rcu_utilization(TPS("Start scheduler-tick")); trace_rcu_utilization(TPS("Start scheduler-tick"));
lockdep_assert_irqs_disabled();
raw_cpu_inc(rcu_data.ticks_this_gp); raw_cpu_inc(rcu_data.ticks_this_gp);
/* The load-acquire pairs with the store-release setting to true. */ /* The load-acquire pairs with the store-release setting to true. */
if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
...@@ -2566,6 +2567,7 @@ void rcu_sched_clock_irq(int user) ...@@ -2566,6 +2567,7 @@ void rcu_sched_clock_irq(int user)
rcu_flavor_sched_clock_irq(user); rcu_flavor_sched_clock_irq(user);
if (rcu_pending(user)) if (rcu_pending(user))
invoke_rcu_core(); invoke_rcu_core();
lockdep_assert_irqs_disabled();
trace_rcu_utilization(TPS("End scheduler-tick")); trace_rcu_utilization(TPS("End scheduler-tick"));
} }
...@@ -3690,6 +3692,8 @@ static int rcu_pending(int user) ...@@ -3690,6 +3692,8 @@ static int rcu_pending(int user)
struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
lockdep_assert_irqs_disabled();
/* Check for CPU stalls, if enabled. */ /* Check for CPU stalls, if enabled. */
check_cpu_stall(rdp); check_cpu_stall(rdp);
......
...@@ -682,6 +682,7 @@ static void rcu_flavor_sched_clock_irq(int user) ...@@ -682,6 +682,7 @@ static void rcu_flavor_sched_clock_irq(int user)
{ {
struct task_struct *t = current; struct task_struct *t = current;
lockdep_assert_irqs_disabled();
if (user || rcu_is_cpu_rrupt_from_idle()) { if (user || rcu_is_cpu_rrupt_from_idle()) {
rcu_note_voluntary_context_switch(current); rcu_note_voluntary_context_switch(current);
} }
......
...@@ -260,6 +260,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) ...@@ -260,6 +260,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
struct task_struct *t; struct task_struct *t;
struct task_struct *ts[8]; struct task_struct *ts[8];
lockdep_assert_irqs_disabled();
if (!rcu_preempt_blocked_readers_cgp(rnp)) if (!rcu_preempt_blocked_readers_cgp(rnp))
return 0; return 0;
pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
...@@ -284,6 +285,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) ...@@ -284,6 +285,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
".q"[rscr.rs.b.need_qs], ".q"[rscr.rs.b.need_qs],
".e"[rscr.rs.b.exp_hint], ".e"[rscr.rs.b.exp_hint],
".l"[rscr.on_blkd_list]); ".l"[rscr.on_blkd_list]);
lockdep_assert_irqs_disabled();
put_task_struct(t); put_task_struct(t);
ndetected++; ndetected++;
} }
...@@ -472,6 +474,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) ...@@ -472,6 +474,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
struct rcu_node *rnp; struct rcu_node *rnp;
long totqlen = 0; long totqlen = 0;
lockdep_assert_irqs_disabled();
/* Kick and suppress, if so configured. */ /* Kick and suppress, if so configured. */
rcu_stall_kick_kthreads(); rcu_stall_kick_kthreads();
if (rcu_stall_is_suppressed()) if (rcu_stall_is_suppressed())
...@@ -493,6 +497,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) ...@@ -493,6 +497,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
} }
} }
ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock. ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
lockdep_assert_irqs_disabled();
} }
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
...@@ -538,6 +543,8 @@ static void print_cpu_stall(unsigned long gps) ...@@ -538,6 +543,8 @@ static void print_cpu_stall(unsigned long gps)
struct rcu_node *rnp = rcu_get_root(); struct rcu_node *rnp = rcu_get_root();
long totqlen = 0; long totqlen = 0;
lockdep_assert_irqs_disabled();
/* Kick and suppress, if so configured. */ /* Kick and suppress, if so configured. */
rcu_stall_kick_kthreads(); rcu_stall_kick_kthreads();
if (rcu_stall_is_suppressed()) if (rcu_stall_is_suppressed())
...@@ -592,6 +599,7 @@ static void check_cpu_stall(struct rcu_data *rdp) ...@@ -592,6 +599,7 @@ static void check_cpu_stall(struct rcu_data *rdp)
unsigned long js; unsigned long js;
struct rcu_node *rnp; struct rcu_node *rnp;
lockdep_assert_irqs_disabled();
if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) || if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
!rcu_gp_in_progress()) !rcu_gp_in_progress())
return; return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册