提交 456a929c 编写于 作者: M Ma Wupeng 提交者: Laibin Qiu

mm: Drop reliable_reserve_size

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Commit 368d710d ("mm: Fallback to non-mirrored region below low watermark")
already set the default value of reliable_reserve_size to zero which will
disable reliable watermark check by default.

With this patch, code related to this mechanism is removed since no one use
this watermark check.
Signed-off-by: NMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYongqiang Liu <liuyongqiang13@huawei.com>
上级 3ef161fb
...@@ -24,7 +24,6 @@ DECLARE_PER_CPU(long, nr_reliable_buddy_pages); ...@@ -24,7 +24,6 @@ DECLARE_PER_CPU(long, nr_reliable_buddy_pages);
extern struct percpu_counter pagecache_reliable_pages; extern struct percpu_counter pagecache_reliable_pages;
extern struct percpu_counter anon_reliable_pages; extern struct percpu_counter anon_reliable_pages;
extern unsigned long nr_reliable_reserve_pages __read_mostly;
extern long shmem_reliable_nr_page __read_mostly; extern long shmem_reliable_nr_page __read_mostly;
extern void add_reliable_mem_size(long sz); extern void add_reliable_mem_size(long sz);
...@@ -118,21 +117,6 @@ static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) ...@@ -118,21 +117,6 @@ static inline void mem_reliable_buddy_counter(struct page *page, int nr_page)
this_cpu_add(nr_reliable_buddy_pages, nr_page); this_cpu_add(nr_reliable_buddy_pages, nr_page);
} }
/* reserve mirrored memory for kernel usage */
static inline bool mem_reliable_watermark_ok(int nr_page)
{
long sum = 0;
int cpu;
if (!reliable_allow_fb_enabled())
return true;
for_each_possible_cpu(cpu)
sum += per_cpu(nr_reliable_buddy_pages, cpu);
return sum > nr_reliable_reserve_pages;
}
static inline bool mem_reliable_shmem_limit_check(void) static inline bool mem_reliable_shmem_limit_check(void)
{ {
return percpu_counter_read_positive(&reliable_shmem_used_nr_page) < return percpu_counter_read_positive(&reliable_shmem_used_nr_page) <
...@@ -178,7 +162,6 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) ...@@ -178,7 +162,6 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
static inline bool pagecache_reliable_is_enabled(void) { return false; } static inline bool pagecache_reliable_is_enabled(void) { return false; }
static inline bool mem_reliable_status(void) { return false; } static inline bool mem_reliable_status(void) { return false; }
static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {} static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {}
static inline bool mem_reliable_watermark_ok(int nr_page) { return true; }
static inline bool mem_reliable_shmem_limit_check(void) { return true; } static inline bool mem_reliable_shmem_limit_check(void) { return true; }
static inline void reliable_lru_add(enum lru_list lru, static inline void reliable_lru_add(enum lru_list lru,
struct page *page, struct page *page,
......
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#include <linux/oom.h> #include <linux/oom.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#define MEM_RELIABLE_RESERVE_MIN 0
enum mem_reliable_types { enum mem_reliable_types {
MEM_RELIABLE_ALL, MEM_RELIABLE_ALL,
MEM_RELIABLE_FALLBACK, MEM_RELIABLE_FALLBACK,
...@@ -31,7 +29,6 @@ bool reliable_allow_fallback __read_mostly = true; ...@@ -31,7 +29,6 @@ bool reliable_allow_fallback __read_mostly = true;
bool shmem_reliable __read_mostly = true; bool shmem_reliable __read_mostly = true;
struct percpu_counter reliable_shmem_used_nr_page __read_mostly; struct percpu_counter reliable_shmem_used_nr_page __read_mostly;
DEFINE_PER_CPU(long, nr_reliable_buddy_pages); DEFINE_PER_CPU(long, nr_reliable_buddy_pages);
unsigned long nr_reliable_reserve_pages = MEM_RELIABLE_RESERVE_MIN / PAGE_SIZE;
long shmem_reliable_nr_page = LONG_MAX; long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true; bool pagecache_use_reliable_mem __read_mostly = true;
...@@ -338,29 +335,6 @@ int reliable_debug_handler(struct ctl_table *table, int write, ...@@ -338,29 +335,6 @@ int reliable_debug_handler(struct ctl_table *table, int write,
return ret; return ret;
} }
static unsigned long sysctl_reliable_reserve_size = MEM_RELIABLE_RESERVE_MIN;
int reliable_reserve_size_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
unsigned long *data_ptr = (unsigned long *)(table->data);
unsigned long old = *data_ptr;
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (ret == 0 && write) {
if (*data_ptr > total_reliable_mem_sz() ||
*data_ptr < MEM_RELIABLE_RESERVE_MIN) {
*data_ptr = old;
return -EINVAL;
}
nr_reliable_reserve_pages = *data_ptr / PAGE_SIZE;
}
return ret;
}
#ifdef CONFIG_SHMEM #ifdef CONFIG_SHMEM
static unsigned long sysctl_shmem_reliable_bytes_limit = ULONG_MAX; static unsigned long sysctl_shmem_reliable_bytes_limit = ULONG_MAX;
...@@ -417,13 +391,6 @@ static struct ctl_table reliable_ctl_table[] = { ...@@ -417,13 +391,6 @@ static struct ctl_table reliable_ctl_table[] = {
.mode = 0600, .mode = 0600,
.proc_handler = reliable_debug_handler, .proc_handler = reliable_debug_handler,
}, },
{
.procname = "reliable_reserve_size",
.data = &sysctl_reliable_reserve_size,
.maxlen = sizeof(sysctl_reliable_reserve_size),
.mode = 0644,
.proc_handler = reliable_reserve_size_handler,
},
#ifdef CONFIG_SHMEM #ifdef CONFIG_SHMEM
{ {
.procname = "shmem_reliable_bytes_limit", .procname = "shmem_reliable_bytes_limit",
......
...@@ -4649,10 +4649,6 @@ static inline bool check_after_alloc(gfp_t *gfp_mask, unsigned int order, ...@@ -4649,10 +4649,6 @@ static inline bool check_after_alloc(gfp_t *gfp_mask, unsigned int order,
if (*gfp_mask & __GFP_NOFAIL) if (*gfp_mask & __GFP_NOFAIL)
goto out; goto out;
/* check water mark, reserver mirrored mem for kernel */
if (!mem_reliable_watermark_ok(1 << order))
goto out_free_page;
/* percpu counter is not initialized, ignore limit check */ /* percpu counter is not initialized, ignore limit check */
if (!mem_reliable_counter_initialized()) if (!mem_reliable_counter_initialized())
goto out; goto out;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册