提交 dec98900 编写于 作者: P Paul E. McKenney

rcu: Add ->dynticks field to rcu_dyntick trace event

Signed-off-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
上级 84585aa8
...@@ -436,24 +436,27 @@ TRACE_EVENT(rcu_fqs, ...@@ -436,24 +436,27 @@ TRACE_EVENT(rcu_fqs,
*/ */
TRACE_EVENT(rcu_dyntick, TRACE_EVENT(rcu_dyntick,
TP_PROTO(const char *polarity, long oldnesting, long newnesting), TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
TP_ARGS(polarity, oldnesting, newnesting), TP_ARGS(polarity, oldnesting, newnesting, dynticks),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const char *, polarity) __field(const char *, polarity)
__field(long, oldnesting) __field(long, oldnesting)
__field(long, newnesting) __field(long, newnesting)
__field(int, dynticks)
), ),
TP_fast_assign( TP_fast_assign(
__entry->polarity = polarity; __entry->polarity = polarity;
__entry->oldnesting = oldnesting; __entry->oldnesting = oldnesting;
__entry->newnesting = newnesting; __entry->newnesting = newnesting;
__entry->dynticks = atomic_read(&dynticks);
), ),
TP_printk("%s %lx %lx", __entry->polarity, TP_printk("%s %lx %lx %#3x", __entry->polarity,
__entry->oldnesting, __entry->newnesting) __entry->oldnesting, __entry->newnesting,
__entry->dynticks & 0xfff)
); );
/* /*
...@@ -801,7 +804,7 @@ TRACE_EVENT(rcu_barrier, ...@@ -801,7 +804,7 @@ TRACE_EVENT(rcu_barrier,
grplo, grphi, gp_tasks) do { } \ grplo, grphi, gp_tasks) do { } \
while (0) while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_prep_idle(reason) do { } while (0) #define trace_rcu_prep_idle(reason) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
......
...@@ -761,13 +761,13 @@ static void rcu_eqs_enter_common(bool user) ...@@ -761,13 +761,13 @@ static void rcu_eqs_enter_common(bool user)
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) { !user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused = struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id()); idle_task(smp_processor_id());
trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0); trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
rcu_ftrace_dump(DUMP_ORIG); rcu_ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm, current->pid, current->comm,
...@@ -880,15 +880,14 @@ void rcu_nmi_exit(void) ...@@ -880,15 +880,14 @@ void rcu_nmi_exit(void)
* leave it in non-RCU-idle state. * leave it in non-RCU-idle state.
*/ */
if (rdtp->dynticks_nmi_nesting != 1) { if (rdtp->dynticks_nmi_nesting != 1) {
trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
rdtp->dynticks_nmi_nesting - 2);
WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */ WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
rdtp->dynticks_nmi_nesting - 2); rdtp->dynticks_nmi_nesting - 2);
return; return;
} }
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0); trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
rcu_dynticks_eqs_enter(); rcu_dynticks_eqs_enter();
} }
...@@ -953,14 +952,13 @@ static void rcu_eqs_exit_common(long newval, int user) ...@@ -953,14 +952,13 @@ static void rcu_eqs_exit_common(long newval, int user)
rcu_dynticks_task_exit(); rcu_dynticks_task_exit();
rcu_dynticks_eqs_exit(); rcu_dynticks_eqs_exit();
rcu_cleanup_after_idle(); rcu_cleanup_after_idle();
trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, newval); trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, newval, rdtp->dynticks);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) { !user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused = struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id()); idle_task(smp_processor_id());
trace_rcu_dyntick(TPS("Error on exit: not idle task"), trace_rcu_dyntick(TPS("Error on exit: not idle task"), rdtp->dynticks_nesting, newval, rdtp->dynticks);
rdtp->dynticks_nesting, newval);
rcu_ftrace_dump(DUMP_ORIG); rcu_ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm, current->pid, current->comm,
...@@ -1062,7 +1060,7 @@ void rcu_nmi_enter(void) ...@@ -1062,7 +1060,7 @@ void rcu_nmi_enter(void)
} }
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting,
rdtp->dynticks_nmi_nesting + incby); rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */ WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
rdtp->dynticks_nmi_nesting + incby); rdtp->dynticks_nmi_nesting + incby);
barrier(); barrier();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册