提交 cc5993bd 编写于 作者: M Minchan Kim 提交者: Linus Torvalds

mm: rename deactivate_page to deactivate_file_page

"deactivate_page" was created for file invalidation so it has too
specific logic for file-backed pages.  So, let's change the name of the
function and date to a file-specific one and yield the generic name.
Signed-off-by: NMinchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Wang, Yalin <Yalin.Wang@sonymobile.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 922c0551
...@@ -307,7 +307,7 @@ extern void lru_add_drain(void); ...@@ -307,7 +307,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void); extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page); extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_page(struct page *page); extern void deactivate_file_page(struct page *page);
extern void swap_setup(void); extern void swap_setup(void);
extern void add_page_to_unevictable_list(struct page *page); extern void add_page_to_unevictable_list(struct page *page);
......
...@@ -42,7 +42,7 @@ int page_cluster; ...@@ -42,7 +42,7 @@ int page_cluster;
static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
/* /*
* This path almost never happens for VM activity - pages are normally * This path almost never happens for VM activity - pages are normally
...@@ -743,7 +743,7 @@ void lru_cache_add_active_or_unevictable(struct page *page, ...@@ -743,7 +743,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
* be write it out by flusher threads as this is much more effective * be write it out by flusher threads as this is much more effective
* than the single-page writeout from reclaim. * than the single-page writeout from reclaim.
*/ */
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
void *arg) void *arg)
{ {
int lru, file; int lru, file;
...@@ -811,36 +811,36 @@ void lru_add_drain_cpu(int cpu) ...@@ -811,36 +811,36 @@ void lru_add_drain_cpu(int cpu)
local_irq_restore(flags); local_irq_restore(flags);
} }
pvec = &per_cpu(lru_deactivate_pvecs, cpu); pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
if (pagevec_count(pvec)) if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
activate_page_drain(cpu); activate_page_drain(cpu);
} }
/** /**
* deactivate_page - forcefully deactivate a page * deactivate_file_page - forcefully deactivate a file page
* @page: page to deactivate * @page: page to deactivate
* *
* This function hints the VM that @page is a good reclaim candidate, * This function hints the VM that @page is a good reclaim candidate,
* for example if its invalidation fails due to the page being dirty * for example if its invalidation fails due to the page being dirty
* or under writeback. * or under writeback.
*/ */
void deactivate_page(struct page *page) void deactivate_file_page(struct page *page)
{ {
/* /*
* In a workload with many unevictable page such as mprotect, unevictable * In a workload with many unevictable page such as mprotect,
* page deactivation for accelerating reclaim is pointless. * unevictable page deactivation for accelerating reclaim is pointless.
*/ */
if (PageUnevictable(page)) if (PageUnevictable(page))
return; return;
if (likely(get_page_unless_zero(page))) { if (likely(get_page_unless_zero(page))) {
struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
if (!pagevec_add(pvec, page)) if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
put_cpu_var(lru_deactivate_pvecs); put_cpu_var(lru_deactivate_file_pvecs);
} }
} }
...@@ -872,7 +872,7 @@ void lru_add_drain_all(void) ...@@ -872,7 +872,7 @@ void lru_add_drain_all(void)
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
need_activate_page_drain(cpu)) { need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu); INIT_WORK(work, lru_add_drain_per_cpu);
schedule_work_on(cpu, work); schedule_work_on(cpu, work);
......
...@@ -490,7 +490,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, ...@@ -490,7 +490,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
* of interest and try to speed up its reclaim. * of interest and try to speed up its reclaim.
*/ */
if (!ret) if (!ret)
deactivate_page(page); deactivate_file_page(page);
count += ret; count += ret;
} }
pagevec_remove_exceptionals(&pvec); pagevec_remove_exceptionals(&pvec);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册