diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index cd2442e13d8f5add724bb6be9de1c3dc2ae8f715..c9158a48ca058fc0aa98573efb58773070d49cc0 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -36,6 +36,7 @@ extern spinlock_t pcpu_lock; extern struct list_head *pcpu_slot; extern int pcpu_nr_slots; +extern int pcpu_nr_empty_pop_pages; extern struct pcpu_chunk *pcpu_first_chunk; extern struct pcpu_chunk *pcpu_reserved_chunk; diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c index fa0f5de26c68083c085213bd0800ba35d4c9c2ba..44e561d3ab477733750198a23ebe3e44afb73d5d 100644 --- a/mm/percpu-stats.c +++ b/mm/percpu-stats.c @@ -164,6 +164,7 @@ static int percpu_stats_show(struct seq_file *m, void *v) PU(nr_max_chunks); PU(min_alloc_size); PU(max_alloc_size); + P("empty_pop_pages", pcpu_nr_empty_pop_pages); seq_putc(m, '\n'); #undef PU diff --git a/mm/percpu.c b/mm/percpu.c index bd4130a69bbc9b6b631baf911e66aab31f08b6e0..9ec5fd498f1d529692f5222ec7d2226ea5d968ca 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -160,7 +160,7 @@ static LIST_HEAD(pcpu_map_extend_chunks); * The number of empty populated pages, protected by pcpu_lock. The * reserved chunk doesn't contribute to the count. */ -static int pcpu_nr_empty_pop_pages; +int pcpu_nr_empty_pop_pages; /* * Balance work is used to populate or destroy chunks asynchronously. We