diff --git a/include/linux/swap.h b/include/linux/swap.h index 893096e67bdbb2f8f12e81d52ce635ce5789c543..117add066f00d186ce7435167526ef8877245a23 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -178,7 +178,9 @@ extern int vm_swappiness; extern int isolate_lru_page(struct page *p); extern int putback_lru_pages(struct list_head *l); +#ifdef CONFIG_MIGRATION extern int migrate_pages(struct list_head *l, struct list_head *t); +#endif #ifdef CONFIG_MMU /* linux/mm/shmem.c */ diff --git a/mm/Kconfig b/mm/Kconfig index b3db11f137e006d937e5d16199ae4506a0771095..a9cb80ae6409df599cc3823a7a072a158c41112c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -132,3 +132,10 @@ config SPLIT_PTLOCK_CPUS default "4096" if ARM && !CPU_CACHE_VIPT default "4096" if PARISC && !PA20 default "4" + +# +# support for page migration +# +config MIGRATION + def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM + depends on SWAP diff --git a/mm/vmscan.c b/mm/vmscan.c index a537a7f1635782fbfaff538dd669c0c22c15d920..58270aea669a8e1202ccdf6cc900b26ccb10ba56 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -568,6 +568,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) return reclaimed; } +#ifdef CONFIG_MIGRATION /* * swapout a single page * page is locked upon entry, unlocked on exit @@ -656,8 +657,9 @@ int migrate_pages(struct list_head *l, struct list_head *t) /* * Skip locked pages during the first two passes to give the - * functions holding the lock time to release the page. Later we use - * lock_page to have a higher chance of acquiring the lock. + * functions holding the lock time to release the page. Later we + * use lock_page() to have a higher chance of acquiring the + * lock. */ if (pass > 2) lock_page(page); @@ -669,15 +671,15 @@ int migrate_pages(struct list_head *l, struct list_head *t) * Only wait on writeback if we have already done a pass where * we we may have triggered writeouts for lots of pages. */ - if (pass > 0) + if (pass > 0) { wait_on_page_writeback(page); - else + } else { if (PageWriteback(page)) { unlock_page(page); goto retry_later; } + } -#ifdef CONFIG_SWAP if (PageAnon(page) && !PageSwapCache(page)) { if (!add_to_swap(page)) { unlock_page(page); @@ -686,16 +688,15 @@ int migrate_pages(struct list_head *l, struct list_head *t) continue; } } -#endif /* CONFIG_SWAP */ /* * Page is properly locked and writeback is complete. * Try to migrate the page. */ - if (swap_page(page)) { + if (!swap_page(page)) + continue; retry_later: - retry++; - } + retry++; } if (retry && pass++ < 10) goto redo; @@ -708,6 +709,7 @@ int migrate_pages(struct list_head *l, struct list_head *t) return nr_failed + retry; } +#endif /* * zone->lru_lock is heavily contended. Some of the functions that