From dac1d27bc8d5ca636d3014ecfdf94407031d1970 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 28 Apr 2008 02:12:12 -0700 Subject: [PATCH] mm: use zonelists instead of zones when direct reclaiming pages The following patches replace multiple zonelists per node with two zonelists that are filtered based on the GFP flags. The patches as a set fix a bug with regard to the use of MPOL_BIND and ZONE_MOVABLE. With this patchset, the MPOL_BIND will apply to the two highest zones when the highest zone is ZONE_MOVABLE. This should be considered as an alternative fix for the MPOL_BIND+ZONE_MOVABLE in 2.6.23 to the previously discussed hack that filters only custom zonelists. The first patch cleans up an inconsistency where direct reclaim uses zonelist->zones where other places use zonelist. The second patch introduces a helper function node_zonelist() for looking up the appropriate zonelist for a GFP mask which simplifies patches later in the set. The third patch defines/remembers the "preferred zone" for numa statistics, as it is no longer always the first zone in a zonelist. The forth patch replaces multiple zonelists with two zonelists that are filtered. The two zonelists are due to the fact that the memoryless patchset introduces a second set of zonelists for __GFP_THISNODE. The fifth patch introduces helper macros for retrieving the zone and node indices of entries in a zonelist. The final patch introduces filtering of the zonelists based on a nodemask. Two zonelists exist per node, one for normal allocations and one for __GFP_THISNODE. Performance results varied depending on the machine configuration. In real workloads the gain/loss will depend on how much the userspace portion of the benchmark benefits from having more cache available due to reduced referencing of zonelists. These are the range of performance losses/gains when running against 2.6.24-rc4-mm1. The set and these machines are a mix of i386, x86_64 and ppc64 both NUMA and non-NUMA. loss to gain Total CPU time on Kernbench: -0.86% to 1.13% Elapsed time on Kernbench: -0.79% to 0.76% page_test from aim9: -4.37% to 0.79% brk_test from aim9: -0.71% to 4.07% fork_test from aim9: -1.84% to 4.60% exec_test from aim9: -0.71% to 1.08% This patch: The allocator deals with zonelists which indicate the order in which zones should be targeted for an allocation. Similarly, direct reclaim of pages iterates over an array of zones. For consistency, this patch converts direct reclaim to use a zonelist. No functionality is changed by this patch. This simplifies zonelist iterators in the next patch. Signed-off-by: Mel Gorman Acked-by: Christoph Lameter Signed-off-by: Lee Schermerhorn Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Christoph Lameter Cc: Hugh Dickins Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/buffer.c | 8 ++++---- include/linux/swap.h | 2 +- mm/page_alloc.c | 2 +- mm/vmscan.c | 21 ++++++++++++--------- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index 8b9807523efe..1dae94acb3fe 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -360,16 +360,16 @@ void invalidate_bdev(struct block_device *bdev) */ static void free_more_memory(void) { - struct zone **zones; + struct zonelist *zonelist; pg_data_t *pgdat; wakeup_pdflush(1024); yield(); for_each_online_pgdat(pgdat) { - zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; - if (*zones) - try_to_free_pages(zones, 0, GFP_NOFS); + zonelist = &pgdat->node_zonelists[gfp_zone(GFP_NOFS)]; + if (zonelist->zones[0]) + try_to_free_pages(zonelist, 0, GFP_NOFS); } } diff --git a/include/linux/swap.h b/include/linux/swap.h index 878459ae0454..4286e7ac2b00 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -181,7 +181,7 @@ extern int rotate_reclaimable_page(struct page *page); extern void swap_setup(void); /* linux/mm/vmscan.c */ -extern unsigned long try_to_free_pages(struct zone **zones, int order, +extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, gfp_t gfp_mask); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 32e796af12a1..1bda771a072a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1569,7 +1569,7 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; - did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask); + did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); p->reclaim_state = NULL; p->flags &= ~PF_MEMALLOC; diff --git a/mm/vmscan.c b/mm/vmscan.c index f80a5b7c057f..ef8551e0d2d0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1246,10 +1246,11 @@ static unsigned long shrink_zone(int priority, struct zone *zone, * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static unsigned long shrink_zones(int priority, struct zone **zones, +static unsigned long shrink_zones(int priority, struct zonelist *zonelist, struct scan_control *sc) { unsigned long nr_reclaimed = 0; + struct zone **zones = zonelist->zones; int i; @@ -1301,8 +1302,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones, * holds filesystem locks which prevent writeout this might not work, and the * allocation attempt will fail. */ -static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, - struct scan_control *sc) +static unsigned long do_try_to_free_pages(struct zonelist *zonelist, + gfp_t gfp_mask, struct scan_control *sc) { int priority; int ret = 0; @@ -1310,6 +1311,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, unsigned long nr_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; + struct zone **zones = zonelist->zones; int i; if (scan_global_lru(sc)) @@ -1333,7 +1335,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, sc->nr_scanned = 0; if (!priority) disable_swap_token(); - nr_reclaimed += shrink_zones(priority, zones, sc); + nr_reclaimed += shrink_zones(priority, zonelist, sc); /* * Don't shrink slabs when reclaiming memory from * over limit cgroups @@ -1397,7 +1399,8 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, return ret; } -unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask) +unsigned long try_to_free_pages(struct zonelist *zonelist, int order, + gfp_t gfp_mask) { struct scan_control sc = { .gfp_mask = gfp_mask, @@ -1410,7 +1413,7 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask) .isolate_pages = isolate_pages_global, }; - return do_try_to_free_pages(zones, gfp_mask, &sc); + return do_try_to_free_pages(zonelist, gfp_mask, &sc); } #ifdef CONFIG_CGROUP_MEM_RES_CTLR @@ -1428,11 +1431,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .mem_cgroup = mem_cont, .isolate_pages = mem_cgroup_isolate_pages, }; - struct zone **zones; + struct zonelist *zonelist; int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE); - zones = NODE_DATA(numa_node_id())->node_zonelists[target_zone].zones; - if (do_try_to_free_pages(zones, sc.gfp_mask, &sc)) + zonelist = &NODE_DATA(numa_node_id())->node_zonelists[target_zone]; + if (do_try_to_free_pages(zonelist, sc.gfp_mask, &sc)) return 1; return 0; } -- GitLab