diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index ddadf28037429a37473bced527391530e7886921..79228a1b2f0b8f21641794133baf499bbd3dab08 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -135,6 +135,14 @@ static inline void reliable_page_counter(struct page *page, if (page_reliable(page)) atomic_long_add(val, &mm->reliable_nr_page); } + +static inline void reliable_clear_page_counter(struct mm_struct *mm) +{ + if (!mem_reliable_is_enabled()) + return; + + atomic_long_set(&mm->reliable_nr_page, 0); +} #else #define reliable_enabled 0 #define pagecache_use_reliable_mem 0 @@ -178,6 +186,7 @@ static inline void reliable_page_counter(struct page *page, struct mm_struct *mm, int val) {} static inline void reliable_report_usage(struct seq_file *m, struct mm_struct *mm) {} +static inline void reliable_clear_page_counter(struct mm_struct *mm) {} #endif #endif diff --git a/kernel/fork.c b/kernel/fork.c index f0aa2da990b827330b17f7d6752e6b82e17310a3..908c4e2e7896e821101b9048de46d337335eea5d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1049,6 +1049,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, atomic_set(&mm->has_pinned, 0); atomic64_set(&mm->pinned_vm, 0); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); + reliable_clear_page_counter(mm); spin_lock_init(&mm->page_table_lock); spin_lock_init(&mm->arg_lock); mm_init_cpumask(mm);