From 456a929c284b5221a1106074e545f88aee8aeca4 Mon Sep 17 00:00:00 2001 From: Ma Wupeng Date: Thu, 14 Apr 2022 09:33:43 +0000 Subject: [PATCH] mm: Drop reliable_reserve_size hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA -------------------------------- Commit 368d710d7f32 ("mm: Fallback to non-mirrored region below low watermark") already set the default value of reliable_reserve_size to zero which will disable reliable watermark check by default. With this patch, code related to this mechanism is removed since no one use this watermark check. Signed-off-by: Ma Wupeng Reviewed-by: Kefeng Wang Signed-off-by: Yongqiang Liu --- include/linux/mem_reliable.h | 17 ----------------- mm/mem_reliable.c | 33 --------------------------------- mm/page_alloc.c | 4 ---- 3 files changed, 54 deletions(-) diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 8340a24fe76d..6d57c36fb676 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -24,7 +24,6 @@ DECLARE_PER_CPU(long, nr_reliable_buddy_pages); extern struct percpu_counter pagecache_reliable_pages; extern struct percpu_counter anon_reliable_pages; -extern unsigned long nr_reliable_reserve_pages __read_mostly; extern long shmem_reliable_nr_page __read_mostly; extern void add_reliable_mem_size(long sz); @@ -118,21 +117,6 @@ static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) this_cpu_add(nr_reliable_buddy_pages, nr_page); } -/* reserve mirrored memory for kernel usage */ -static inline bool mem_reliable_watermark_ok(int nr_page) -{ - long sum = 0; - int cpu; - - if (!reliable_allow_fb_enabled()) - return true; - - for_each_possible_cpu(cpu) - sum += per_cpu(nr_reliable_buddy_pages, cpu); - - return sum > nr_reliable_reserve_pages; -} - static inline bool mem_reliable_shmem_limit_check(void) { return percpu_counter_read_positive(&reliable_shmem_used_nr_page) < @@ -178,7 +162,6 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) static inline bool pagecache_reliable_is_enabled(void) { return false; } static inline bool mem_reliable_status(void) { return false; } static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {} -static inline bool mem_reliable_watermark_ok(int nr_page) { return true; } static inline bool mem_reliable_shmem_limit_check(void) { return true; } static inline void reliable_lru_add(enum lru_list lru, struct page *page, diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 6d4ab4bee3d5..83bfdf265273 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -11,8 +11,6 @@ #include #include -#define MEM_RELIABLE_RESERVE_MIN 0 - enum mem_reliable_types { MEM_RELIABLE_ALL, MEM_RELIABLE_FALLBACK, @@ -31,7 +29,6 @@ bool reliable_allow_fallback __read_mostly = true; bool shmem_reliable __read_mostly = true; struct percpu_counter reliable_shmem_used_nr_page __read_mostly; DEFINE_PER_CPU(long, nr_reliable_buddy_pages); -unsigned long nr_reliable_reserve_pages = MEM_RELIABLE_RESERVE_MIN / PAGE_SIZE; long shmem_reliable_nr_page = LONG_MAX; bool pagecache_use_reliable_mem __read_mostly = true; @@ -338,29 +335,6 @@ int reliable_debug_handler(struct ctl_table *table, int write, return ret; } -static unsigned long sysctl_reliable_reserve_size = MEM_RELIABLE_RESERVE_MIN; - -int reliable_reserve_size_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos) -{ - unsigned long *data_ptr = (unsigned long *)(table->data); - unsigned long old = *data_ptr; - int ret; - - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); - if (ret == 0 && write) { - if (*data_ptr > total_reliable_mem_sz() || - *data_ptr < MEM_RELIABLE_RESERVE_MIN) { - *data_ptr = old; - return -EINVAL; - } - - nr_reliable_reserve_pages = *data_ptr / PAGE_SIZE; - } - - return ret; -} - #ifdef CONFIG_SHMEM static unsigned long sysctl_shmem_reliable_bytes_limit = ULONG_MAX; @@ -417,13 +391,6 @@ static struct ctl_table reliable_ctl_table[] = { .mode = 0600, .proc_handler = reliable_debug_handler, }, - { - .procname = "reliable_reserve_size", - .data = &sysctl_reliable_reserve_size, - .maxlen = sizeof(sysctl_reliable_reserve_size), - .mode = 0644, - .proc_handler = reliable_reserve_size_handler, - }, #ifdef CONFIG_SHMEM { .procname = "shmem_reliable_bytes_limit", diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9d4f75235420..38f2a84d3224 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4649,10 +4649,6 @@ static inline bool check_after_alloc(gfp_t *gfp_mask, unsigned int order, if (*gfp_mask & __GFP_NOFAIL) goto out; - /* check water mark, reserver mirrored mem for kernel */ - if (!mem_reliable_watermark_ok(1 << order)) - goto out_free_page; - /* percpu counter is not initialized, ignore limit check */ if (!mem_reliable_counter_initialized()) goto out; -- GitLab