提交 b309e3df 编写于 作者: C Chen Wandun 提交者: Yang Yingliang

Revert "mm: add page cache fallback statistic"

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

This reverts commit 925368d8.
Page cache fallback statistic will be replaced by other method.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: Kefeng Wang<wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 99696395
......@@ -21,11 +21,9 @@ extern bool reliable_allow_fallback;
extern bool shmem_reliable;
extern struct percpu_counter reliable_shmem_used_nr_page;
extern bool pagecache_use_reliable_mem;
extern atomic_long_t page_cache_fallback;
DECLARE_PER_CPU(long, nr_reliable_buddy_pages);
extern unsigned long nr_reliable_reserve_pages __read_mostly;
extern long shmem_reliable_nr_page __read_mostly;
extern void page_cache_fallback_inc(gfp_t gfp, struct page *page);
extern void add_reliable_mem_size(long sz);
extern void mem_reliable_init(bool has_unmirrored_mem,
......@@ -166,7 +164,6 @@ static inline bool shmem_reliable_is_enabled(void) { return false; }
static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
{
}
static inline void page_cache_fallback_inc(gfp_t gfp, struct page *page) {}
static inline bool pagecache_reliable_is_enabled(void) { return false; }
static inline bool mem_reliable_status(void) { return false; }
......
......@@ -1050,13 +1050,9 @@ struct page *__page_cache_alloc(gfp_t gfp)
page = __alloc_pages_node(n, gfp, 0);
} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
page_cache_fallback_inc(gfp, page);
return page;
}
page = alloc_pages(gfp, 0);
page_cache_fallback_inc(gfp, page);
return page;
return alloc_pages(gfp, 0);
}
EXPORT_SYMBOL(__page_cache_alloc);
#endif
......
......@@ -35,7 +35,6 @@ unsigned long nr_reliable_reserve_pages = MEM_RELIABLE_RESERVE_MIN / PAGE_SIZE;
long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true;
atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0);
DEFINE_PER_CPU(long, pagecache_reliable_pages);
static unsigned long zero;
......@@ -57,19 +56,6 @@ bool page_reliable(struct page *page)
return mem_reliable_is_enabled() && page_zonenum(page) < ZONE_MOVABLE;
}
static bool is_fallback_page(gfp_t gfp, struct page *page)
{
bool ret = false;
if (!page)
return ret;
if ((gfp & ___GFP_RELIABILITY) && !page_reliable(page))
ret = true;
return ret;
}
static bool reliable_and_lru_check(enum lru_list lru, struct page *page)
{
if (!page || !page_reliable(page))
......@@ -89,21 +75,6 @@ void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val)
this_cpu_add(pagecache_reliable_pages, val);
}
void page_cache_fallback_inc(gfp_t gfp, struct page *page)
{
long num;
if (!pagecache_reliable_is_enabled())
return;
if (!is_fallback_page(gfp, page))
return;
num = atomic_long_inc_return(&page_cache_fallback);
if (num < 0)
atomic_long_set(&page_cache_fallback, 0);
}
static int reliable_mem_notifier(struct notifier_block *nb,
unsigned long action, void *arg)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册