diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 66c73a214cff8b6dd63f6a6e27e8035414ede87b..8e0711954bbff0bf2d17b10d1894e5b6caff4e6a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2691,7 +2691,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) /* Update counts and requeue any remaining callbacks. */ rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); smp_mb(); /* List handling before counting for rcu_barrier(). */ - rdp->n_cbs_invoked += count; rcu_segcblist_insert_count(&rdp->cblist, &rcl); /* Reinstate batch limit if we have worked down the excess. */ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 01abd1c4e5daabeadbc1aef5e723084f63a3787b..b258fac7352434890f9702d0045ca95f01fec3aa 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -211,8 +211,6 @@ struct rcu_data { /* different grace periods. */ long qlen_last_fqs_check; /* qlen at last check for QS forcing */ - unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ - unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */ unsigned long n_force_qs_snap; /* did other CPU force QS recently? */ long blimit; /* Upper limit on a processed batch */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0d552985b9051b9f1e47a7669d1e26f870d3d01d..1c2d58a85511342f05d6d8a22762efc372ac2852 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2243,7 +2243,6 @@ static int rcu_nocb_kthread(void *arg) smp_mb__before_atomic(); /* _add after CB invocation. */ atomic_long_add(-c, &rdp->nocb_q_count); atomic_long_add(-cl, &rdp->nocb_q_count_lazy); - rdp->n_nocbs_invoked += c; } return 0; }