diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 6d57c36fb676281563fcf457e72d41bb6b5ac42d..aa3fe77c8a72892515901df28550d7be04d0a1d4 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -123,6 +123,13 @@ static inline bool mem_reliable_shmem_limit_check(void) shmem_reliable_nr_page; } +static inline void reliable_clear_page_counter(struct mm_struct *mm) +{ + if (!mem_reliable_is_enabled()) + return; + + atomic_long_set(&mm->reliable_nr_page, 0); +} #else #define reliable_enabled 0 #define reliable_allow_fb_enabled() false @@ -171,6 +178,7 @@ static inline void reliable_lru_add_batch(int zid, enum lru_list lru, int val) {} static inline bool mem_reliable_counter_initialized(void) { return false; } +static inline void reliable_clear_page_counter(struct mm_struct *mm) {} #endif #endif diff --git a/kernel/fork.c b/kernel/fork.c index b5453a26655e2ad6ca9287b0b95c41d3b02d4cbc..c256525d4ce5e333fe4aea4138f201c63f80f550 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1007,6 +1007,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, atomic_long_set(&mm->locked_vm, 0); mm->pinned_vm = 0; memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); + reliable_clear_page_counter(mm); spin_lock_init(&mm->page_table_lock); spin_lock_init(&mm->arg_lock); mm_init_cpumask(mm);