From 6a2500485455d3d41fb6a21848a030ac61e1f5b7 Mon Sep 17 00:00:00 2001 From: Ma Wupeng Date: Wed, 21 Jun 2023 10:10:17 +0800 Subject: [PATCH] mm: mem_reliable: Update reliable page counter to zero if underflows hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I77BDW CVE: NA -------------------------------- Since reliable page counter is used for debug purpose only, There is no real function problem by doing this. Signed-off-by: Ma Wupeng Reviewed-by: Kefeng Wang Reviewed-by: Nanyong Sun (cherry picked from commit a687d7a000efed25b0a5ca20c5710c3ecf768a44) --- include/linux/mem_reliable.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 79228a1b2f0b..dc1344b843b0 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -132,8 +132,19 @@ static inline bool reliable_allow_fb_enabled(void) static inline void reliable_page_counter(struct page *page, struct mm_struct *mm, int val) { - if (page_reliable(page)) - atomic_long_add(val, &mm->reliable_nr_page); + if (!page_reliable(page)) + return; + + atomic_long_add(val, &mm->reliable_nr_page); + + /* + * Update reliable page counter to zero if underflows. + * + * Since reliable page counter is used for debug purpose only, + * there is no real function problem by doing this. + */ + if (unlikely(atomic_long_read(&mm->reliable_nr_page) < 0)) + atomic_long_set(&mm->reliable_nr_page, 0); } static inline void reliable_clear_page_counter(struct mm_struct *mm) -- GitLab