提交 970a39a3 编写于 作者: M Mel Gorman 提交者: Linus Torvalds

mm, vmscan: avoid passing in classzone_idx unnecessarily to shrink_node

shrink_node receives all information it needs about classzone_idx from
sc->reclaim_idx so remove the aliases.

Link: http://lkml.kernel.org/r/1467970510-21195-25-git-send-email-mgorman@techsingularity.netSigned-off-by: NMel Gorman <mgorman@techsingularity.net>
Acked-by: NHillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 a5f5f91d
...@@ -2428,8 +2428,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, ...@@ -2428,8 +2428,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return true; return true;
} }
static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc, static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
enum zone_type classzone_idx)
{ {
struct reclaim_state *reclaim_state = current->reclaim_state; struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_reclaimed, nr_scanned; unsigned long nr_reclaimed, nr_scanned;
...@@ -2658,7 +2657,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2658,7 +2657,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (zone->zone_pgdat == last_pgdat) if (zone->zone_pgdat == last_pgdat)
continue; continue;
last_pgdat = zone->zone_pgdat; last_pgdat = zone->zone_pgdat;
shrink_node(zone->zone_pgdat, sc, classzone_idx); shrink_node(zone->zone_pgdat, sc);
} }
/* /*
...@@ -3082,7 +3081,6 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, ...@@ -3082,7 +3081,6 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
* This is used to determine if the scanning priority needs to be raised. * This is used to determine if the scanning priority needs to be raised.
*/ */
static bool kswapd_shrink_node(pg_data_t *pgdat, static bool kswapd_shrink_node(pg_data_t *pgdat,
int classzone_idx,
struct scan_control *sc) struct scan_control *sc)
{ {
struct zone *zone; struct zone *zone;
...@@ -3090,7 +3088,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, ...@@ -3090,7 +3088,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
/* Reclaim a number of pages proportional to the number of zones */ /* Reclaim a number of pages proportional to the number of zones */
sc->nr_to_reclaim = 0; sc->nr_to_reclaim = 0;
for (z = 0; z <= classzone_idx; z++) { for (z = 0; z <= sc->reclaim_idx; z++) {
zone = pgdat->node_zones + z; zone = pgdat->node_zones + z;
if (!populated_zone(zone)) if (!populated_zone(zone))
continue; continue;
...@@ -3102,7 +3100,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, ...@@ -3102,7 +3100,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
* Historically care was taken to put equal pressure on all zones but * Historically care was taken to put equal pressure on all zones but
* now pressure is applied based on node LRU order. * now pressure is applied based on node LRU order.
*/ */
shrink_node(pgdat, sc, classzone_idx); shrink_node(pgdat, sc);
/* /*
* Fragmentation may mean that the system cannot be rebalanced for * Fragmentation may mean that the system cannot be rebalanced for
...@@ -3164,7 +3162,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3164,7 +3162,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
if (!populated_zone(zone)) if (!populated_zone(zone))
continue; continue;
classzone_idx = i; sc.reclaim_idx = i;
break; break;
} }
} }
...@@ -3177,12 +3175,12 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3177,12 +3175,12 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* zone was balanced even under extreme pressure when the * zone was balanced even under extreme pressure when the
* overall node may be congested. * overall node may be congested.
*/ */
for (i = classzone_idx; i >= 0; i--) { for (i = sc.reclaim_idx; i >= 0; i--) {
zone = pgdat->node_zones + i; zone = pgdat->node_zones + i;
if (!populated_zone(zone)) if (!populated_zone(zone))
continue; continue;
if (zone_balanced(zone, sc.order, classzone_idx)) if (zone_balanced(zone, sc.order, sc.reclaim_idx))
goto out; goto out;
} }
...@@ -3213,7 +3211,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3213,7 +3211,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* enough pages are already being scanned that that high * enough pages are already being scanned that that high
* watermark would be met at 100% efficiency. * watermark would be met at 100% efficiency.
*/ */
if (kswapd_shrink_node(pgdat, classzone_idx, &sc)) if (kswapd_shrink_node(pgdat, &sc))
raise_priority = false; raise_priority = false;
/* /*
...@@ -3676,7 +3674,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in ...@@ -3676,7 +3674,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* priorities until we have enough memory freed. * priorities until we have enough memory freed.
*/ */
do { do {
shrink_node(pgdat, &sc, classzone_idx); shrink_node(pgdat, &sc);
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册