提交 a6dc60f8 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

vmscan: rename sc.may_swap to may_unmap

sc.may_swap does not only influence reclaiming of anon pages but pages
mapped into pagetables in general, which also includes mapped file pages.

In shrink_page_list():

		if (!sc->may_swap && page_mapped(page))
			goto keep_locked;

For anon pages, this makes sense as they are always mapped and reclaiming
them always requires swapping.

But mapped file pages are skipped here as well and it has nothing to do
with swapping.

The real effect of the knob is whether mapped pages are unmapped and
reclaimed or not.  Rename it to `may_unmap' to have its name match its
actual meaning more precisely.
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: NMinChan Kim <minchan.kim@gmail.com>
Reviewed-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 9de1581e
...@@ -60,8 +60,8 @@ struct scan_control { ...@@ -60,8 +60,8 @@ struct scan_control {
int may_writepage; int may_writepage;
/* Can pages be swapped as part of reclaim? */ /* Can mapped pages be reclaimed? */
int may_swap; int may_unmap;
/* This context's SWAP_CLUSTER_MAX. If freeing memory for /* This context's SWAP_CLUSTER_MAX. If freeing memory for
* suspend, we effectively ignore SWAP_CLUSTER_MAX. * suspend, we effectively ignore SWAP_CLUSTER_MAX.
...@@ -606,7 +606,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -606,7 +606,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(!page_evictable(page, NULL))) if (unlikely(!page_evictable(page, NULL)))
goto cull_mlocked; goto cull_mlocked;
if (!sc->may_swap && page_mapped(page)) if (!sc->may_unmap && page_mapped(page))
goto keep_locked; goto keep_locked;
/* Double the slab pressure for mapped and swapcache pages */ /* Double the slab pressure for mapped and swapcache pages */
...@@ -1694,7 +1694,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, ...@@ -1694,7 +1694,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.gfp_mask = gfp_mask, .gfp_mask = gfp_mask,
.may_writepage = !laptop_mode, .may_writepage = !laptop_mode,
.swap_cluster_max = SWAP_CLUSTER_MAX, .swap_cluster_max = SWAP_CLUSTER_MAX,
.may_swap = 1, .may_unmap = 1,
.swappiness = vm_swappiness, .swappiness = vm_swappiness,
.order = order, .order = order,
.mem_cgroup = NULL, .mem_cgroup = NULL,
...@@ -1713,7 +1713,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, ...@@ -1713,7 +1713,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
{ {
struct scan_control sc = { struct scan_control sc = {
.may_writepage = !laptop_mode, .may_writepage = !laptop_mode,
.may_swap = 1, .may_unmap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX, .swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = swappiness, .swappiness = swappiness,
.order = 0, .order = 0,
...@@ -1723,7 +1723,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, ...@@ -1723,7 +1723,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
struct zonelist *zonelist; struct zonelist *zonelist;
if (noswap) if (noswap)
sc.may_swap = 0; sc.may_unmap = 0;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
...@@ -1762,7 +1762,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) ...@@ -1762,7 +1762,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
struct reclaim_state *reclaim_state = current->reclaim_state; struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc = { struct scan_control sc = {
.gfp_mask = GFP_KERNEL, .gfp_mask = GFP_KERNEL,
.may_swap = 1, .may_unmap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX, .swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness, .swappiness = vm_swappiness,
.order = order, .order = order,
...@@ -2110,7 +2110,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) ...@@ -2110,7 +2110,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
struct reclaim_state reclaim_state; struct reclaim_state reclaim_state;
struct scan_control sc = { struct scan_control sc = {
.gfp_mask = GFP_KERNEL, .gfp_mask = GFP_KERNEL,
.may_swap = 0, .may_unmap = 0,
.swap_cluster_max = nr_pages, .swap_cluster_max = nr_pages,
.may_writepage = 1, .may_writepage = 1,
.isolate_pages = isolate_pages_global, .isolate_pages = isolate_pages_global,
...@@ -2147,7 +2147,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) ...@@ -2147,7 +2147,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
/* Force reclaiming mapped pages in the passes #3 and #4 */ /* Force reclaiming mapped pages in the passes #3 and #4 */
if (pass > 2) if (pass > 2)
sc.may_swap = 1; sc.may_unmap = 1;
for (prio = DEF_PRIORITY; prio >= 0; prio--) { for (prio = DEF_PRIORITY; prio >= 0; prio--) {
unsigned long nr_to_scan = nr_pages - ret; unsigned long nr_to_scan = nr_pages - ret;
...@@ -2290,7 +2290,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -2290,7 +2290,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
int priority; int priority;
struct scan_control sc = { struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.swap_cluster_max = max_t(unsigned long, nr_pages, .swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX), SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask, .gfp_mask = gfp_mask,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册