diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7fe317164b73d7d7ac46564a48e7a7fef16b0277..a703527e2b45297306c6756a27a374f942ab2ba6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -169,7 +169,7 @@ struct zone { /* * zone reclaim becomes active if more unmapped pages exist. */ - unsigned long min_unmapped_ratio; + unsigned long min_unmapped_pages; struct per_cpu_pageset *pageset[NR_CPUS]; #else struct per_cpu_pageset pageset[NR_CPUS]; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f7ea020c23ea886c63111c2bdd9ce40ee866da97..5da6bc4e0a6b8a42268b00d61bd7e32ceb6eb7a9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2002,7 +2002,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, zone->spanned_pages = size; zone->present_pages = realsize; #ifdef CONFIG_NUMA - zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio) + zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) / 100; #endif zone->name = zone_names[j]; @@ -2313,7 +2313,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, return rc; for_each_zone(zone) - zone->min_unmapped_ratio = (zone->present_pages * + zone->min_unmapped_pages = (zone->present_pages * sysctl_min_unmapped_ratio) / 100; return 0; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 8f35d7d585cba31ee65abb70ab09926b75128e74..5154c25e844056b95c147c367dc9d01005dfcef5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1618,7 +1618,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * unmapped file backed pages. */ if (zone_page_state(zone, NR_FILE_PAGES) - - zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio) + zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages) return 0; /*