diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 47762ca695a59de5e0b3b55c3b7cbd9255f07088..49cc68af01f8e6d971d870fd952d229d31810628 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -38,3 +38,25 @@ del_page_from_lru(struct zone *zone, struct page *page) zone->nr_inactive--; } } + +/* + * Isolate one page from the LRU lists. + * + * - zone->lru_lock must be held + */ +static inline int __isolate_lru_page(struct page *page) +{ + if (unlikely(!TestClearPageLRU(page))) + return 0; + + if (get_page_testone(page)) { + /* + * It is being freed elsewhere + */ + __put_page(page); + SetPageLRU(page); + return -ENOENT; + } + + return 1; +} diff --git a/include/linux/swap.h b/include/linux/swap.h index 556617bcf7accb79a763eda248e4075df6330e1a..a49112536c02c519a6ccd77b8552dfc15af9cc8f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -175,6 +175,9 @@ extern int try_to_free_pages(struct zone **, gfp_t); extern int shrink_all_memory(int); extern int vm_swappiness; +extern int isolate_lru_page(struct page *p); +extern int putback_lru_pages(struct list_head *l); + #ifdef CONFIG_MMU /* linux/mm/shmem.c */ extern int shmem_unuse(swp_entry_t entry, struct page *page); diff --git a/mm/vmscan.c b/mm/vmscan.c index 428c5801d4b45cf19be318079ed23c8c55a7ccfd..261a56ee11b690b8e182fbbe84a0b42c097c446f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -593,20 +593,18 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); - if (!TestClearPageLRU(page)) - BUG(); - list_del(&page->lru); - if (get_page_testone(page)) { - /* - * It is being freed elsewhere - */ - __put_page(page); - SetPageLRU(page); - list_add(&page->lru, src); - continue; - } else { - list_add(&page->lru, dst); + switch (__isolate_lru_page(page)) { + case 1: + /* Succeeded to isolate page */ + list_move(&page->lru, dst); nr_taken++; + break; + case -ENOENT: + /* Not possible to isolate */ + list_move(&page->lru, src); + break; + default: + BUG(); } } @@ -614,6 +612,48 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, return nr_taken; } +static void lru_add_drain_per_cpu(void *dummy) +{ + lru_add_drain(); +} + +/* + * Isolate one page from the LRU lists and put it on the + * indicated list. Do necessary cache draining if the + * page is not on the LRU lists yet. + * + * Result: + * 0 = page not on LRU list + * 1 = page removed from LRU list and added to the specified list. + * -ENOENT = page is being freed elsewhere. + */ +int isolate_lru_page(struct page *page) +{ + int rc = 0; + struct zone *zone = page_zone(page); + +redo: + spin_lock_irq(&zone->lru_lock); + rc = __isolate_lru_page(page); + if (rc == 1) { + if (PageActive(page)) + del_page_from_active_list(zone, page); + else + del_page_from_inactive_list(zone, page); + } + spin_unlock_irq(&zone->lru_lock); + if (rc == 0) { + /* + * Maybe this page is still waiting for a cpu to drain it + * from one of the lru lists? + */ + rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); + if (rc == 0 && PageLRU(page)) + goto redo; + } + return rc; +} + /* * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed */ @@ -679,6 +719,40 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc) pagevec_release(&pvec); } +static inline void move_to_lru(struct page *page) +{ + list_del(&page->lru); + if (PageActive(page)) { + /* + * lru_cache_add_active checks that + * the PG_active bit is off. + */ + ClearPageActive(page); + lru_cache_add_active(page); + } else { + lru_cache_add(page); + } + put_page(page); +} + +/* + * Add isolated pages on the list back to the LRU + * + * returns the number of pages put back. + */ +int putback_lru_pages(struct list_head *l) +{ + struct page *page; + struct page *page2; + int count = 0; + + list_for_each_entry_safe(page, page2, l, lru) { + move_to_lru(page); + count++; + } + return count; +} + /* * This moves pages from the active list to the inactive list. *