提交 f3a310bc 编写于 作者: M Mel Gorman 提交者: Linus Torvalds

mm: vmscan: rename lumpy_mode to reclaim_mode

With compaction being used instead of lumpy reclaim, the name lumpy_mode
and associated variables is a bit misleading.  Rename lumpy_mode to
reclaim_mode which is a better fit.  There is no functional change.
Signed-off-by: NMel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 9927af74
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
#define trace_reclaim_flags(page, sync) ( \ #define trace_reclaim_flags(page, sync) ( \
(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
) )
#define trace_shrink_flags(file, sync) ( \ #define trace_shrink_flags(file, sync) ( \
(sync & LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
(sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
) )
TRACE_EVENT(mm_vmscan_kswapd_sleep, TRACE_EVENT(mm_vmscan_kswapd_sleep,
......
...@@ -53,22 +53,22 @@ ...@@ -53,22 +53,22 @@
#include <trace/events/vmscan.h> #include <trace/events/vmscan.h>
/* /*
* lumpy_mode determines how the inactive list is shrunk * reclaim_mode determines how the inactive list is shrunk
* LUMPY_MODE_SINGLE: Reclaim only order-0 pages * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
* LUMPY_MODE_ASYNC: Do not block * RECLAIM_MODE_ASYNC: Do not block
* LUMPY_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
* LUMPY_MODE_CONTIGRECLAIM: For high-order allocations, take a reference * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
* page from the LRU and reclaim all pages within a * page from the LRU and reclaim all pages within a
* naturally aligned range * naturally aligned range
* LUMPY_MODE_COMPACTION: For high-order allocations, reclaim a number of * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
* order-0 pages and then compact the zone * order-0 pages and then compact the zone
*/ */
typedef unsigned __bitwise__ lumpy_mode; typedef unsigned __bitwise__ reclaim_mode_t;
#define LUMPY_MODE_SINGLE ((__force lumpy_mode)0x01u) #define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
#define LUMPY_MODE_ASYNC ((__force lumpy_mode)0x02u) #define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
#define LUMPY_MODE_SYNC ((__force lumpy_mode)0x04u) #define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
#define LUMPY_MODE_CONTIGRECLAIM ((__force lumpy_mode)0x08u) #define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u)
#define LUMPY_MODE_COMPACTION ((__force lumpy_mode)0x10u) #define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
struct scan_control { struct scan_control {
/* Incremented by the number of inactive pages that were scanned */ /* Incremented by the number of inactive pages that were scanned */
...@@ -101,7 +101,7 @@ struct scan_control { ...@@ -101,7 +101,7 @@ struct scan_control {
* Intend to reclaim enough continuous memory rather than reclaim * Intend to reclaim enough continuous memory rather than reclaim
* enough amount of memory. i.e, mode for high order allocation. * enough amount of memory. i.e, mode for high order allocation.
*/ */
lumpy_mode lumpy_reclaim_mode; reclaim_mode_t reclaim_mode;
/* Which cgroup do we reclaim from */ /* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup; struct mem_cgroup *mem_cgroup;
...@@ -284,10 +284,10 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, ...@@ -284,10 +284,10 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
return ret; return ret;
} }
static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, static void set_reclaim_mode(int priority, struct scan_control *sc,
bool sync) bool sync)
{ {
lumpy_mode syncmode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
/* /*
* Initially assume we are entering either lumpy reclaim or * Initially assume we are entering either lumpy reclaim or
...@@ -295,9 +295,9 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, ...@@ -295,9 +295,9 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
* sync mode or just reclaim order-0 pages later. * sync mode or just reclaim order-0 pages later.
*/ */
if (COMPACTION_BUILD) if (COMPACTION_BUILD)
sc->lumpy_reclaim_mode = LUMPY_MODE_COMPACTION; sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
else else
sc->lumpy_reclaim_mode = LUMPY_MODE_CONTIGRECLAIM; sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
/* /*
* Avoid using lumpy reclaim or reclaim/compaction if possible by * Avoid using lumpy reclaim or reclaim/compaction if possible by
...@@ -305,16 +305,16 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, ...@@ -305,16 +305,16 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
* under memory pressure * under memory pressure
*/ */
if (sc->order > PAGE_ALLOC_COSTLY_ORDER) if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
sc->lumpy_reclaim_mode |= syncmode; sc->reclaim_mode |= syncmode;
else if (sc->order && priority < DEF_PRIORITY - 2) else if (sc->order && priority < DEF_PRIORITY - 2)
sc->lumpy_reclaim_mode |= syncmode; sc->reclaim_mode |= syncmode;
else else
sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC; sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
} }
static void disable_lumpy_reclaim_mode(struct scan_control *sc) static void reset_reclaim_mode(struct scan_control *sc)
{ {
sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC; sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
} }
static inline int is_page_cache_freeable(struct page *page) static inline int is_page_cache_freeable(struct page *page)
...@@ -445,7 +445,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, ...@@ -445,7 +445,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* first attempt to free a range of pages fails. * first attempt to free a range of pages fails.
*/ */
if (PageWriteback(page) && if (PageWriteback(page) &&
(sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC)) (sc->reclaim_mode & RECLAIM_MODE_SYNC))
wait_on_page_writeback(page); wait_on_page_writeback(page);
if (!PageWriteback(page)) { if (!PageWriteback(page)) {
...@@ -453,7 +453,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, ...@@ -453,7 +453,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
ClearPageReclaim(page); ClearPageReclaim(page);
} }
trace_mm_vmscan_writepage(page, trace_mm_vmscan_writepage(page,
trace_reclaim_flags(page, sc->lumpy_reclaim_mode)); trace_reclaim_flags(page, sc->reclaim_mode));
inc_zone_page_state(page, NR_VMSCAN_WRITE); inc_zone_page_state(page, NR_VMSCAN_WRITE);
return PAGE_SUCCESS; return PAGE_SUCCESS;
} }
...@@ -638,7 +638,7 @@ static enum page_references page_check_references(struct page *page, ...@@ -638,7 +638,7 @@ static enum page_references page_check_references(struct page *page,
referenced_page = TestClearPageReferenced(page); referenced_page = TestClearPageReferenced(page);
/* Lumpy reclaim - ignore references */ /* Lumpy reclaim - ignore references */
if (sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM) if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
return PAGEREF_RECLAIM; return PAGEREF_RECLAIM;
/* /*
...@@ -755,7 +755,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -755,7 +755,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* for any page for which writeback has already * for any page for which writeback has already
* started. * started.
*/ */
if ((sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC) && if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
may_enter_fs) may_enter_fs)
wait_on_page_writeback(page); wait_on_page_writeback(page);
else { else {
...@@ -911,7 +911,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -911,7 +911,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
try_to_free_swap(page); try_to_free_swap(page);
unlock_page(page); unlock_page(page);
putback_lru_page(page); putback_lru_page(page);
disable_lumpy_reclaim_mode(sc); reset_reclaim_mode(sc);
continue; continue;
activate_locked: activate_locked:
...@@ -924,7 +924,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -924,7 +924,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
keep_locked: keep_locked:
unlock_page(page); unlock_page(page);
keep: keep:
disable_lumpy_reclaim_mode(sc); reset_reclaim_mode(sc);
keep_lumpy: keep_lumpy:
list_add(&page->lru, &ret_pages); list_add(&page->lru, &ret_pages);
VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
...@@ -1340,7 +1340,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken, ...@@ -1340,7 +1340,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
return false; return false;
/* Only stall on lumpy reclaim */ /* Only stall on lumpy reclaim */
if (sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE) if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
return false; return false;
/* If we have relaimed everything on the isolated list, no stall */ /* If we have relaimed everything on the isolated list, no stall */
...@@ -1384,14 +1384,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1384,14 +1384,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
return SWAP_CLUSTER_MAX; return SWAP_CLUSTER_MAX;
} }
set_lumpy_reclaim_mode(priority, sc, false); set_reclaim_mode(priority, sc, false);
lru_add_drain(); lru_add_drain();
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(sc)) { if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(nr_to_scan, nr_taken = isolate_pages_global(nr_to_scan,
&page_list, &nr_scanned, sc->order, &page_list, &nr_scanned, sc->order,
sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM ? sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
ISOLATE_BOTH : ISOLATE_INACTIVE, ISOLATE_BOTH : ISOLATE_INACTIVE,
zone, 0, file); zone, 0, file);
zone->pages_scanned += nr_scanned; zone->pages_scanned += nr_scanned;
...@@ -1404,7 +1404,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1404,7 +1404,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
} else { } else {
nr_taken = mem_cgroup_isolate_pages(nr_to_scan, nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
&page_list, &nr_scanned, sc->order, &page_list, &nr_scanned, sc->order,
sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM ? sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
ISOLATE_BOTH : ISOLATE_INACTIVE, ISOLATE_BOTH : ISOLATE_INACTIVE,
zone, sc->mem_cgroup, zone, sc->mem_cgroup,
0, file); 0, file);
...@@ -1427,7 +1427,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1427,7 +1427,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
/* Check if we should syncronously wait for writeback */ /* Check if we should syncronously wait for writeback */
if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
set_lumpy_reclaim_mode(priority, sc, true); set_reclaim_mode(priority, sc, true);
nr_reclaimed += shrink_page_list(&page_list, zone, sc); nr_reclaimed += shrink_page_list(&page_list, zone, sc);
} }
...@@ -1442,7 +1442,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1442,7 +1442,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
zone_idx(zone), zone_idx(zone),
nr_scanned, nr_reclaimed, nr_scanned, nr_reclaimed,
priority, priority,
trace_shrink_flags(file, sc->lumpy_reclaim_mode)); trace_shrink_flags(file, sc->reclaim_mode));
return nr_reclaimed; return nr_reclaimed;
} }
...@@ -1836,7 +1836,7 @@ static inline bool should_continue_reclaim(struct zone *zone, ...@@ -1836,7 +1836,7 @@ static inline bool should_continue_reclaim(struct zone *zone,
unsigned long inactive_lru_pages; unsigned long inactive_lru_pages;
/* If not in reclaim/compaction mode, stop */ /* If not in reclaim/compaction mode, stop */
if (!(sc->lumpy_reclaim_mode & LUMPY_MODE_COMPACTION)) if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
return false; return false;
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册