swap.c 28.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  linux/mm/swap.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 */

/*
S
Simon Arlott 已提交
8
 * This file contains the default values for the operation of the
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 * Linux VM subsystem. Fine-tuning documentation can be found in
 * Documentation/sysctl/vm.txt.
 * Started 18.12.91
 * Swap aging added 23.2.95, Stephen Tweedie.
 * Buffermem limits added 12.3.98, Rik van Riel.
 */

#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/init.h>
24
#include <linux/export.h>
L
Linus Torvalds 已提交
25 26
#include <linux/mm_inline.h>
#include <linux/percpu_counter.h>
27
#include <linux/memremap.h>
L
Linus Torvalds 已提交
28 29 30
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
P
Peter Zijlstra 已提交
31
#include <linux/backing-dev.h>
32
#include <linux/memremap.h>
33
#include <linux/memcontrol.h>
34
#include <linux/gfp.h>
35
#include <linux/uio.h>
36
#include <linux/hugetlb.h>
37
#include <linux/page_idle.h>
L
Linus Torvalds 已提交
38

39 40
#include "internal.h"

41 42 43
#define CREATE_TRACE_POINTS
#include <trace/events/pagemap.h>

L
Linus Torvalds 已提交
44 45 46
/* How many pages do we try to swap or page in/out together? */
int page_cluster;

47
static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
48
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
49
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
50
static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
51 52 53
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
#endif
54

A
Adrian Bunk 已提交
55 56 57 58
/*
 * This path almost never happens for VM activity - pages are normally
 * freed via pagevecs.  But it gets used by networking.
 */
H
Harvey Harrison 已提交
59
static void __page_cache_release(struct page *page)
A
Adrian Bunk 已提交
60 61 62
{
	if (PageLRU(page)) {
		struct zone *zone = page_zone(page);
63 64
		struct lruvec *lruvec;
		unsigned long flags;
A
Adrian Bunk 已提交
65

66
		spin_lock_irqsave(zone_lru_lock(zone), flags);
M
Mel Gorman 已提交
67
		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
68
		VM_BUG_ON_PAGE(!PageLRU(page), page);
A
Adrian Bunk 已提交
69
		__ClearPageLRU(page);
70
		del_page_from_lru_list(page, lruvec, page_off_lru(page));
71
		spin_unlock_irqrestore(zone_lru_lock(zone), flags);
A
Adrian Bunk 已提交
72
	}
73
	__ClearPageWaiters(page);
74
	mem_cgroup_uncharge(page);
75 76 77 78 79
}

static void __put_single_page(struct page *page)
{
	__page_cache_release(page);
80
	free_unref_page(page);
A
Adrian Bunk 已提交
81 82
}

83
static void __put_compound_page(struct page *page)
L
Linus Torvalds 已提交
84
{
85
	compound_page_dtor *dtor;
L
Linus Torvalds 已提交
86

87 88 89 90 91 92 93 94
	/*
	 * __page_cache_release() is supposed to be called for thp, not for
	 * hugetlb. This is because hugetlb page does never have PageLRU set
	 * (it's never listed to any LRU lists) and no memcg routines should
	 * be called for hugetlb (it has a separate hugetlb_cgroup.)
	 */
	if (!PageHuge(page))
		__page_cache_release(page);
95 96 97 98
	dtor = get_compound_page_dtor(page);
	(*dtor)(page);
}

99
void __put_page(struct page *page)
N
Nick Piggin 已提交
100
{
101 102 103 104 105 106 107 108 109 110
	if (is_zone_device_page(page)) {
		put_dev_pagemap(page->pgmap);

		/*
		 * The page belongs to the device that created pgmap. Do
		 * not return it to page allocator.
		 */
		return;
	}

N
Nick Piggin 已提交
111
	if (unlikely(PageCompound(page)))
112 113
		__put_compound_page(page);
	else
114
		__put_single_page(page);
L
Linus Torvalds 已提交
115
}
116
EXPORT_SYMBOL(__put_page);
117

118
/**
119 120
 * put_pages_list() - release a list of pages
 * @pages: list of pages threaded on page->lru
121 122 123 124 125 126 127 128 129 130 131
 *
 * Release a list of pages which are strung together on page.lru.  Currently
 * used by read_cache_pages() and related error recovery code.
 */
void put_pages_list(struct list_head *pages)
{
	while (!list_empty(pages)) {
		struct page *victim;

		victim = list_entry(pages->prev, struct page, lru);
		list_del(&victim->lru);
132
		put_page(victim);
133 134 135 136
	}
}
EXPORT_SYMBOL(put_pages_list);

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
/*
 * get_kernel_pages() - pin kernel pages in memory
 * @kiov:	An array of struct kvec structures
 * @nr_segs:	number of segments to pin
 * @write:	pinning for read/write, currently ignored
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_segs long.
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno. Each page returned must be released
 * with a put_page() call when it is finished with.
 */
int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
		struct page **pages)
{
	int seg;

	for (seg = 0; seg < nr_segs; seg++) {
		if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
			return seg;

159
		pages[seg] = kmap_to_page(kiov[seg].iov_base);
160
		get_page(pages[seg]);
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	}

	return seg;
}
EXPORT_SYMBOL_GPL(get_kernel_pages);

/*
 * get_kernel_page() - pin a kernel page in memory
 * @start:	starting kernel address
 * @write:	pinning for read/write, currently ignored
 * @pages:	array that receives pointer to the page pinned.
 *		Must be at least nr_segs long.
 *
 * Returns 1 if page is pinned. If the page was not pinned, returns
 * -errno. The page returned must be released with a put_page() call
 * when it is finished with.
 */
int get_kernel_page(unsigned long start, int write, struct page **pages)
{
	const struct kvec kiov = {
		.iov_base = (void *)start,
		.iov_len = PAGE_SIZE
	};

	return get_kernel_pages(&kiov, 1, write, pages);
}
EXPORT_SYMBOL_GPL(get_kernel_page);

S
Shaohua Li 已提交
189
static void pagevec_lru_move_fn(struct pagevec *pvec,
190 191
	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
	void *arg)
192 193
{
	int i;
194
	struct pglist_data *pgdat = NULL;
195
	struct lruvec *lruvec;
S
Shaohua Li 已提交
196
	unsigned long flags = 0;
197 198 199

	for (i = 0; i < pagevec_count(pvec); i++) {
		struct page *page = pvec->pages[i];
200
		struct pglist_data *pagepgdat = page_pgdat(page);
201

202 203 204 205 206
		if (pagepgdat != pgdat) {
			if (pgdat)
				spin_unlock_irqrestore(&pgdat->lru_lock, flags);
			pgdat = pagepgdat;
			spin_lock_irqsave(&pgdat->lru_lock, flags);
207
		}
S
Shaohua Li 已提交
208

209
		lruvec = mem_cgroup_page_lruvec(page, pgdat);
210
		(*move_fn)(page, lruvec, arg);
211
	}
212 213
	if (pgdat)
		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
214
	release_pages(pvec->pages, pvec->nr);
215
	pagevec_reinit(pvec);
S
Shaohua Li 已提交
216 217
}

218 219
static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
				 void *arg)
S
Shaohua Li 已提交
220 221 222
{
	int *pgmoved = arg;

223 224 225 226
	if (PageLRU(page) && !PageUnevictable(page)) {
		del_page_from_lru_list(page, lruvec, page_lru(page));
		ClearPageActive(page);
		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
S
Shaohua Li 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
		(*pgmoved)++;
	}
}

/*
 * pagevec_move_tail() must be called with IRQ disabled.
 * Otherwise this may cause nasty races.
 */
static void pagevec_move_tail(struct pagevec *pvec)
{
	int pgmoved = 0;

	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
	__count_vm_events(PGROTATED, pgmoved);
}

L
Linus Torvalds 已提交
243 244 245
/*
 * Writeback is about to end against a page which has been marked for immediate
 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
246
 * inactive list.
L
Linus Torvalds 已提交
247
 */
S
Shaohua Li 已提交
248
void rotate_reclaimable_page(struct page *page)
L
Linus Torvalds 已提交
249
{
250
	if (!PageLocked(page) && !PageDirty(page) &&
L
Lee Schermerhorn 已提交
251
	    !PageUnevictable(page) && PageLRU(page)) {
252 253 254
		struct pagevec *pvec;
		unsigned long flags;

255
		get_page(page);
256
		local_irq_save(flags);
257
		pvec = this_cpu_ptr(&lru_rotate_pvecs);
258
		if (!pagevec_add(pvec, page) || PageCompound(page))
259 260 261
			pagevec_move_tail(pvec);
		local_irq_restore(flags);
	}
L
Linus Torvalds 已提交
262 263
}

264
static void update_page_reclaim_stat(struct lruvec *lruvec,
K
KOSAKI Motohiro 已提交
265 266
				     int file, int rotated)
{
267
	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
K
KOSAKI Motohiro 已提交
268 269 270 271 272 273

	reclaim_stat->recent_scanned[file]++;
	if (rotated)
		reclaim_stat->recent_rotated[file]++;
}

274 275
static void __activate_page(struct page *page, struct lruvec *lruvec,
			    void *arg)
L
Linus Torvalds 已提交
276
{
277
	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
278 279
		int file = page_is_file_cache(page);
		int lru = page_lru_base_type(page);
280

281
		del_page_from_lru_list(page, lruvec, lru);
282 283
		SetPageActive(page);
		lru += LRU_ACTIVE;
284
		add_page_to_lru_list(page, lruvec, lru);
285
		trace_mm_lru_activate(page);
286

287 288
		__count_vm_event(PGACTIVATE);
		update_page_reclaim_stat(lruvec, file, 1);
L
Linus Torvalds 已提交
289
	}
290 291 292 293 294 295 296 297 298 299 300
}

#ifdef CONFIG_SMP
static void activate_page_drain(int cpu)
{
	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);

	if (pagevec_count(pvec))
		pagevec_lru_move_fn(pvec, __activate_page, NULL);
}

301 302 303 304 305
static bool need_activate_page_drain(int cpu)
{
	return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
}

306 307
void activate_page(struct page *page)
{
308
	page = compound_head(page);
309 310 311
	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);

312
		get_page(page);
313
		if (!pagevec_add(pvec, page) || PageCompound(page))
314 315 316 317 318 319 320 321 322 323
			pagevec_lru_move_fn(pvec, __activate_page, NULL);
		put_cpu_var(activate_page_pvecs);
	}
}

#else
static inline void activate_page_drain(int cpu)
{
}

324 325 326 327 328
static bool need_activate_page_drain(int cpu)
{
	return false;
}

329 330 331 332
void activate_page(struct page *page)
{
	struct zone *zone = page_zone(page);

333
	page = compound_head(page);
334
	spin_lock_irq(zone_lru_lock(zone));
M
Mel Gorman 已提交
335
	__activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL);
336
	spin_unlock_irq(zone_lru_lock(zone));
L
Linus Torvalds 已提交
337
}
338
#endif
L
Linus Torvalds 已提交
339

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
static void __lru_cache_activate_page(struct page *page)
{
	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
	int i;

	/*
	 * Search backwards on the optimistic assumption that the page being
	 * activated has just been added to this pagevec. Note that only
	 * the local pagevec is examined as a !PageLRU page could be in the
	 * process of being released, reclaimed, migrated or on a remote
	 * pagevec that is currently being drained. Furthermore, marking
	 * a remote pagevec's page PageActive potentially hits a race where
	 * a page is marked PageActive just after it is added to the inactive
	 * list causing accounting errors and BUG_ON checks to trigger.
	 */
	for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
		struct page *pagevec_page = pvec->pages[i];

		if (pagevec_page == page) {
			SetPageActive(page);
			break;
		}
	}

	put_cpu_var(lru_add_pvec);
}

L
Linus Torvalds 已提交
367 368 369 370 371 372
/*
 * Mark a page as having seen activity.
 *
 * inactive,unreferenced	->	inactive,referenced
 * inactive,referenced		->	active,unreferenced
 * active,unreferenced		->	active,referenced
373 374 375
 *
 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
L
Linus Torvalds 已提交
376
 */
H
Harvey Harrison 已提交
377
void mark_page_accessed(struct page *page)
L
Linus Torvalds 已提交
378
{
379
	page = compound_head(page);
L
Lee Schermerhorn 已提交
380
	if (!PageActive(page) && !PageUnevictable(page) &&
381 382 383 384 385 386 387 388 389 390 391 392
			PageReferenced(page)) {

		/*
		 * If the page is on the LRU, queue it for activation via
		 * activate_page_pvecs. Otherwise, assume the page is on a
		 * pagevec, mark it active and it'll be moved to the active
		 * LRU on the next drain.
		 */
		if (PageLRU(page))
			activate_page(page);
		else
			__lru_cache_activate_page(page);
L
Linus Torvalds 已提交
393
		ClearPageReferenced(page);
394 395
		if (page_is_file_cache(page))
			workingset_activation(page);
L
Linus Torvalds 已提交
396 397 398
	} else if (!PageReferenced(page)) {
		SetPageReferenced(page);
	}
399 400
	if (page_is_idle(page))
		clear_page_idle(page);
L
Linus Torvalds 已提交
401 402 403
}
EXPORT_SYMBOL(mark_page_accessed);

404
static void __lru_cache_add(struct page *page)
L
Linus Torvalds 已提交
405
{
406 407
	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);

408
	get_page(page);
409
	if (!pagevec_add(pvec, page) || PageCompound(page))
410
		__pagevec_lru_add(pvec);
411
	put_cpu_var(lru_add_pvec);
L
Linus Torvalds 已提交
412
}
413 414

/**
415
 * lru_cache_add_anon - add a page to the page lists
416 417 418 419
 * @page: the page to add
 */
void lru_cache_add_anon(struct page *page)
{
420 421
	if (PageActive(page))
		ClearPageActive(page);
422 423 424 425 426
	__lru_cache_add(page);
}

void lru_cache_add_file(struct page *page)
{
427 428
	if (PageActive(page))
		ClearPageActive(page);
429 430 431
	__lru_cache_add(page);
}
EXPORT_SYMBOL(lru_cache_add_file);
L
Linus Torvalds 已提交
432

433
/**
434
 * lru_cache_add - add a page to a page list
435
 * @page: the page to be added to the LRU.
436 437 438 439 440
 *
 * Queue the page for addition to the LRU via pagevec. The decision on whether
 * to add the page to the [in]active [file|anon] list is deferred until the
 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
 * have the page added to the active list using mark_page_accessed().
441
 */
442
void lru_cache_add(struct page *page)
L
Linus Torvalds 已提交
443
{
444 445
	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
	VM_BUG_ON_PAGE(PageLRU(page), page);
446
	__lru_cache_add(page);
L
Linus Torvalds 已提交
447 448
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/**
 * lru_cache_add_active_or_unevictable
 * @page:  the page to be added to LRU
 * @vma:   vma in which page is mapped for determining reclaimability
 *
 * Place @page on the active or unevictable LRU list, depending on its
 * evictability.  Note that if the page is not evictable, it goes
 * directly back onto it's zone's unevictable list, it does NOT use a
 * per cpu pagevec.
 */
void lru_cache_add_active_or_unevictable(struct page *page,
					 struct vm_area_struct *vma)
{
	VM_BUG_ON_PAGE(PageLRU(page), page);

464
	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
465
		SetPageActive(page);
466
	else if (!TestSetPageMlocked(page)) {
467 468 469 470 471 472 473 474 475
		/*
		 * We use the irq-unsafe __mod_zone_page_stat because this
		 * counter is not modified from interrupt context, and the pte
		 * lock is held(spinlock), which implies preemption disabled.
		 */
		__mod_zone_page_state(page_zone(page), NR_MLOCK,
				    hpage_nr_pages(page));
		count_vm_event(UNEVICTABLE_PGMLOCKED);
	}
476
	lru_cache_add(page);
477 478
}

M
Minchan Kim 已提交
479 480 481 482 483 484
/*
 * If the page can not be invalidated, it is moved to the
 * inactive list to speed up its reclaim.  It is moved to the
 * head of the list, rather than the tail, to give the flusher
 * threads some time to write it out, as this is much more
 * effective than the single-page writeout from reclaim.
M
Minchan Kim 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498
 *
 * If the page isn't page_mapped and dirty/writeback, the page
 * could reclaim asap using PG_reclaim.
 *
 * 1. active, mapped page -> none
 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
 * 3. inactive, mapped page -> none
 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
 * 5. inactive, clean -> inactive, tail
 * 6. Others -> none
 *
 * In 4, why it moves inactive's head, the VM expects the page would
 * be write it out by flusher threads as this is much more effective
 * than the single-page writeout from reclaim.
M
Minchan Kim 已提交
499
 */
500
static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
501
			      void *arg)
M
Minchan Kim 已提交
502 503
{
	int lru, file;
M
Minchan Kim 已提交
504
	bool active;
M
Minchan Kim 已提交
505

M
Minchan Kim 已提交
506
	if (!PageLRU(page))
M
Minchan Kim 已提交
507 508
		return;

509 510 511
	if (PageUnevictable(page))
		return;

M
Minchan Kim 已提交
512 513 514 515
	/* Some processes are using the page */
	if (page_mapped(page))
		return;

M
Minchan Kim 已提交
516
	active = PageActive(page);
M
Minchan Kim 已提交
517 518
	file = page_is_file_cache(page);
	lru = page_lru_base_type(page);
519 520

	del_page_from_lru_list(page, lruvec, lru + active);
M
Minchan Kim 已提交
521 522
	ClearPageActive(page);
	ClearPageReferenced(page);
523
	add_page_to_lru_list(page, lruvec, lru);
M
Minchan Kim 已提交
524

M
Minchan Kim 已提交
525 526 527 528 529 530 531 532 533 534 535 536
	if (PageWriteback(page) || PageDirty(page)) {
		/*
		 * PG_reclaim could be raced with end_page_writeback
		 * It can make readahead confusing.  But race window
		 * is _really_ small and  it's non-critical problem.
		 */
		SetPageReclaim(page);
	} else {
		/*
		 * The page's writeback ends up during pagevec
		 * We moves tha page into tail of inactive.
		 */
537
		list_move_tail(&page->lru, &lruvec->lists[lru]);
M
Minchan Kim 已提交
538 539 540 541 542
		__count_vm_event(PGROTATED);
	}

	if (active)
		__count_vm_event(PGDEACTIVATE);
543
	update_page_reclaim_stat(lruvec, file, 0);
M
Minchan Kim 已提交
544 545
}

546

547
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
548 549
			    void *arg)
{
550
	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
551
	    !PageSwapCache(page) && !PageUnevictable(page)) {
552
		bool active = PageActive(page);
553

554 555
		del_page_from_lru_list(page, lruvec,
				       LRU_INACTIVE_ANON + active);
556 557
		ClearPageActive(page);
		ClearPageReferenced(page);
558 559 560 561 562 563 564
		/*
		 * lazyfree pages are clean anonymous pages. They have
		 * SwapBacked flag cleared to distinguish normal anonymous
		 * pages
		 */
		ClearPageSwapBacked(page);
		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
565

566
		__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
567
		count_memcg_page_event(page, PGLAZYFREE);
568
		update_page_reclaim_stat(lruvec, 1, 0);
569 570 571
	}
}

572 573 574 575 576
/*
 * Drain pages out of the cpu's pagevecs.
 * Either "cpu" is the current CPU, and preemption has already been
 * disabled; or "cpu" is being hot-unplugged, and is already dead.
 */
577
void lru_add_drain_cpu(int cpu)
L
Linus Torvalds 已提交
578
{
579
	struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
L
Linus Torvalds 已提交
580

581
	if (pagevec_count(pvec))
582
		__pagevec_lru_add(pvec);
583 584 585 586 587 588 589 590 591 592

	pvec = &per_cpu(lru_rotate_pvecs, cpu);
	if (pagevec_count(pvec)) {
		unsigned long flags;

		/* No harm done if a racing interrupt already did this */
		local_irq_save(flags);
		pagevec_move_tail(pvec);
		local_irq_restore(flags);
	}
M
Minchan Kim 已提交
593

594
	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
M
Minchan Kim 已提交
595
	if (pagevec_count(pvec))
596
		pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
597

598
	pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
599
	if (pagevec_count(pvec))
600
		pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
601

602
	activate_page_drain(cpu);
M
Minchan Kim 已提交
603 604 605
}

/**
606
 * deactivate_file_page - forcefully deactivate a file page
M
Minchan Kim 已提交
607 608 609 610 611 612
 * @page: page to deactivate
 *
 * This function hints the VM that @page is a good reclaim candidate,
 * for example if its invalidation fails due to the page being dirty
 * or under writeback.
 */
613
void deactivate_file_page(struct page *page)
M
Minchan Kim 已提交
614
{
615
	/*
616 617
	 * In a workload with many unevictable page such as mprotect,
	 * unevictable page deactivation for accelerating reclaim is pointless.
618 619 620 621
	 */
	if (PageUnevictable(page))
		return;

M
Minchan Kim 已提交
622
	if (likely(get_page_unless_zero(page))) {
623
		struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
M
Minchan Kim 已提交
624

625
		if (!pagevec_add(pvec, page) || PageCompound(page))
626 627
			pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
		put_cpu_var(lru_deactivate_file_pvecs);
M
Minchan Kim 已提交
628
	}
629 630
}

631
/**
632
 * mark_page_lazyfree - make an anon page lazyfree
633 634
 * @page: page to deactivate
 *
635 636
 * mark_page_lazyfree() moves @page to the inactive file list.
 * This is done to accelerate the reclaim of @page.
637
 */
638
void mark_page_lazyfree(struct page *page)
639
{
640
	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
641
	    !PageSwapCache(page) && !PageUnevictable(page)) {
642
		struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
643

644
		get_page(page);
645
		if (!pagevec_add(pvec, page) || PageCompound(page))
646 647
			pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
		put_cpu_var(lru_lazyfree_pvecs);
648 649 650
	}
}

651 652
void lru_add_drain(void)
{
653
	lru_add_drain_cpu(get_cpu());
654
	put_cpu();
L
Linus Torvalds 已提交
655 656
}

D
David Howells 已提交
657
static void lru_add_drain_per_cpu(struct work_struct *dummy)
658 659 660 661
{
	lru_add_drain();
}

662 663
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);

664 665 666 667 668 669 670 671
/*
 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
 * kworkers being shut down before our page_alloc_cpu_dead callback is
 * executed on the offlined cpu.
 * Calling this function with cpu hotplug locks held can actually lead
 * to obscure indirect dependencies via WQ context.
 */
void lru_add_drain_all(void)
672
{
673 674 675 676
	static DEFINE_MUTEX(lock);
	static struct cpumask has_work;
	int cpu;

677 678 679 680 681 682 683
	/*
	 * Make sure nobody triggers this path before mm_percpu_wq is fully
	 * initialized.
	 */
	if (WARN_ON(!mm_percpu_wq))
		return;

684 685 686 687 688 689 690 691
	mutex_lock(&lock);
	cpumask_clear(&has_work);

	for_each_online_cpu(cpu) {
		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);

		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
692
		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
693
		    pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
694 695
		    need_activate_page_drain(cpu)) {
			INIT_WORK(work, lru_add_drain_per_cpu);
696
			queue_work_on(cpu, mm_percpu_wq, work);
697 698 699 700 701 702 703 704
			cpumask_set_cpu(cpu, &has_work);
		}
	}

	for_each_cpu(cpu, &has_work)
		flush_work(&per_cpu(lru_add_drain_work, cpu));

	mutex_unlock(&lock);
705 706
}

707
/**
708
 * release_pages - batched put_page()
709 710
 * @pages: array of pages to release
 * @nr: number of pages
L
Linus Torvalds 已提交
711
 *
712 713
 * Decrement the reference count on all the pages in @pages.  If it
 * fell to zero, remove the page from the LRU and free it.
L
Linus Torvalds 已提交
714
 */
715
void release_pages(struct page **pages, int nr)
L
Linus Torvalds 已提交
716 717
{
	int i;
718
	LIST_HEAD(pages_to_free);
M
Mel Gorman 已提交
719
	struct pglist_data *locked_pgdat = NULL;
720
	struct lruvec *lruvec;
721
	unsigned long uninitialized_var(flags);
722
	unsigned int uninitialized_var(lock_batch);
L
Linus Torvalds 已提交
723 724 725 726

	for (i = 0; i < nr; i++) {
		struct page *page = pages[i];

727 728 729
		/*
		 * Make sure the IRQ-safe lock-holding time does not get
		 * excessive with a continuous string of pages from the
M
Mel Gorman 已提交
730
		 * same pgdat. The lock is held only if pgdat != NULL.
731
		 */
M
Mel Gorman 已提交
732 733 734
		if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
			spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
			locked_pgdat = NULL;
735 736
		}

737
		if (is_huge_zero_page(page))
738 739
			continue;

740 741 742 743 744 745 746
		/* Device public page can not be huge page */
		if (is_device_public_page(page)) {
			if (locked_pgdat) {
				spin_unlock_irqrestore(&locked_pgdat->lru_lock,
						       flags);
				locked_pgdat = NULL;
			}
747
			put_devmap_managed_page(page);
748 749 750
			continue;
		}

751
		page = compound_head(page);
N
Nick Piggin 已提交
752
		if (!put_page_testzero(page))
L
Linus Torvalds 已提交
753 754
			continue;

755
		if (PageCompound(page)) {
M
Mel Gorman 已提交
756 757 758
			if (locked_pgdat) {
				spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
				locked_pgdat = NULL;
759 760 761 762 763
			}
			__put_compound_page(page);
			continue;
		}

764
		if (PageLRU(page)) {
M
Mel Gorman 已提交
765
			struct pglist_data *pgdat = page_pgdat(page);
L
Lee Schermerhorn 已提交
766

M
Mel Gorman 已提交
767 768 769
			if (pgdat != locked_pgdat) {
				if (locked_pgdat)
					spin_unlock_irqrestore(&locked_pgdat->lru_lock,
770
									flags);
771
				lock_batch = 0;
M
Mel Gorman 已提交
772 773
				locked_pgdat = pgdat;
				spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
774
			}
775

M
Mel Gorman 已提交
776
			lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
777
			VM_BUG_ON_PAGE(!PageLRU(page), page);
N
Nick Piggin 已提交
778
			__ClearPageLRU(page);
779
			del_page_from_lru_list(page, lruvec, page_off_lru(page));
780 781
		}

782
		/* Clear Active bit in case of parallel mark_page_accessed */
783
		__ClearPageActive(page);
784
		__ClearPageWaiters(page);
785

786
		list_add(&page->lru, &pages_to_free);
L
Linus Torvalds 已提交
787
	}
M
Mel Gorman 已提交
788 789
	if (locked_pgdat)
		spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
L
Linus Torvalds 已提交
790

791
	mem_cgroup_uncharge_list(&pages_to_free);
792
	free_unref_page_list(&pages_to_free);
L
Linus Torvalds 已提交
793
}
M
Miklos Szeredi 已提交
794
EXPORT_SYMBOL(release_pages);
L
Linus Torvalds 已提交
795 796 797 798 799 800 801 802 803 804 805 806 807

/*
 * The pages which we're about to release may be in the deferred lru-addition
 * queues.  That would prevent them from really being freed right now.  That's
 * OK from a correctness point of view but is inefficient - those pages may be
 * cache-warm and we want to give them back to the page allocator ASAP.
 *
 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
 * and __pagevec_lru_add_active() call release_pages() directly to avoid
 * mutual recursion.
 */
void __pagevec_release(struct pagevec *pvec)
{
808
	if (!pvec->percpu_pvec_drained) {
809
		lru_add_drain();
810
		pvec->percpu_pvec_drained = true;
811
	}
812
	release_pages(pvec->pages, pagevec_count(pvec));
L
Linus Torvalds 已提交
813 814
	pagevec_reinit(pvec);
}
815 816
EXPORT_SYMBOL(__pagevec_release);

817
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
818
/* used by __split_huge_page_refcount() */
819
void lru_add_page_tail(struct page *page, struct page *page_tail,
820
		       struct lruvec *lruvec, struct list_head *list)
821 822 823
{
	const int file = 0;

824 825 826
	VM_BUG_ON_PAGE(!PageHead(page), page);
	VM_BUG_ON_PAGE(PageCompound(page_tail), page);
	VM_BUG_ON_PAGE(PageLRU(page_tail), page);
827
	VM_BUG_ON(NR_CPUS != 1 &&
M
Mel Gorman 已提交
828
		  !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
829

830 831
	if (!list)
		SetPageLRU(page_tail);
832

833 834
	if (likely(PageLRU(page)))
		list_add_tail(&page_tail->lru, &page->lru);
835 836 837 838 839
	else if (list) {
		/* page reclaim is reclaiming a huge page */
		get_page(page_tail);
		list_add_tail(&page_tail->lru, list);
	} else {
840 841 842 843 844 845 846 847
		struct list_head *list_head;
		/*
		 * Head page has not yet been counted, as an hpage,
		 * so we must account for each subpage individually.
		 *
		 * Use the standard add function to put page_tail on the list,
		 * but then correct its position so they all end up in order.
		 */
848
		add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
849 850
		list_head = page_tail->lru.prev;
		list_move_tail(&page_tail->lru, list_head);
851
	}
852 853

	if (!PageUnevictable(page))
854
		update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
855
}
856
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
857

858 859
static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
				 void *arg)
S
Shaohua Li 已提交
860
{
861 862
	enum lru_list lru;
	int was_unevictable = TestClearPageUnevictable(page);
S
Shaohua Li 已提交
863

864
	VM_BUG_ON_PAGE(PageLRU(page), page);
S
Shaohua Li 已提交
865 866

	SetPageLRU(page);
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
	/*
	 * Page becomes evictable in two ways:
	 * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()].
	 * 2) Before acquiring LRU lock to put the page to correct LRU and then
	 *   a) do PageLRU check with lock [check_move_unevictable_pages]
	 *   b) do PageLRU check before lock [clear_page_mlock]
	 *
	 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
	 * following strict ordering:
	 *
	 * #0: __pagevec_lru_add_fn		#1: clear_page_mlock
	 *
	 * SetPageLRU()				TestClearPageMlocked()
	 * smp_mb() // explicit ordering	// above provides strict
	 *					// ordering
	 * PageMlocked()			PageLRU()
	 *
	 *
	 * if '#1' does not observe setting of PG_lru by '#0' and fails
	 * isolation, the explicit barrier will make sure that page_evictable
	 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
	 * can be reordered after PageMlocked check and can make '#1' to fail
	 * the isolation of the page whose Mlocked bit is cleared (#0 is also
	 * looking at the same page) and the evictable page will be stranded
	 * in an unevictable LRU.
	 */
	smp_mb();

	if (page_evictable(page)) {
		lru = page_lru(page);
		update_page_reclaim_stat(lruvec, page_is_file_cache(page),
					 PageActive(page));
		if (was_unevictable)
			count_vm_event(UNEVICTABLE_PGRESCUED);
	} else {
		lru = LRU_UNEVICTABLE;
		ClearPageActive(page);
		SetPageUnevictable(page);
		if (!was_unevictable)
			count_vm_event(UNEVICTABLE_PGCULLED);
	}

909
	add_page_to_lru_list(page, lruvec, lru);
910
	trace_mm_lru_insertion(page, lru);
S
Shaohua Li 已提交
911 912
}

L
Linus Torvalds 已提交
913 914 915 916
/*
 * Add the passed pages to the LRU, then drop the caller's refcount
 * on them.  Reinitialises the caller's pagevec.
 */
917
void __pagevec_lru_add(struct pagevec *pvec)
L
Linus Torvalds 已提交
918
{
919
	pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
L
Linus Torvalds 已提交
920
}
921
EXPORT_SYMBOL(__pagevec_lru_add);
L
Linus Torvalds 已提交
922

923 924 925 926 927
/**
 * pagevec_lookup_entries - gang pagecache lookup
 * @pvec:	Where the resulting entries are placed
 * @mapping:	The address_space to search
 * @start:	The starting entry index
928
 * @nr_entries:	The maximum number of pages
929 930 931
 * @indices:	The cache indices corresponding to the entries in @pvec
 *
 * pagevec_lookup_entries() will search for and return a group of up
932
 * to @nr_pages pages and shadow entries in the mapping.  All
933 934 935 936 937 938 939 940 941 942 943 944
 * entries are placed in @pvec.  pagevec_lookup_entries() takes a
 * reference against actual pages in @pvec.
 *
 * The search returns a group of mapping-contiguous entries with
 * ascending indexes.  There may be holes in the indices due to
 * not-present entries.
 *
 * pagevec_lookup_entries() returns the number of entries which were
 * found.
 */
unsigned pagevec_lookup_entries(struct pagevec *pvec,
				struct address_space *mapping,
945
				pgoff_t start, unsigned nr_entries,
946 947
				pgoff_t *indices)
{
948
	pvec->nr = find_get_entries(mapping, start, nr_entries,
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
				    pvec->pages, indices);
	return pagevec_count(pvec);
}

/**
 * pagevec_remove_exceptionals - pagevec exceptionals pruning
 * @pvec:	The pagevec to prune
 *
 * pagevec_lookup_entries() fills both pages and exceptional radix
 * tree entries into the pagevec.  This function prunes all
 * exceptionals from @pvec without leaving holes, so that it can be
 * passed on to page-only pagevec operations.
 */
void pagevec_remove_exceptionals(struct pagevec *pvec)
{
	int i, j;

	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
		struct page *page = pvec->pages[i];
		if (!radix_tree_exceptional_entry(page))
			pvec->pages[j++] = page;
	}
	pvec->nr = j;
}

L
Linus Torvalds 已提交
974
/**
J
Jan Kara 已提交
975
 * pagevec_lookup_range - gang pagecache lookup
L
Linus Torvalds 已提交
976 977 978
 * @pvec:	Where the resulting pages are placed
 * @mapping:	The address_space to search
 * @start:	The starting page index
J
Jan Kara 已提交
979
 * @end:	The final page index
L
Linus Torvalds 已提交
980
 *
981
 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
J
Jan Kara 已提交
982 983
 * pages in the mapping starting from index @start and upto index @end
 * (inclusive).  The pages are placed in @pvec.  pagevec_lookup() takes a
L
Linus Torvalds 已提交
984 985 986
 * reference against the pages in @pvec.
 *
 * The search returns a group of mapping-contiguous pages with ascending
987 988
 * indexes.  There may be holes in the indices due to not-present pages. We
 * also update @start to index the next page for the traversal.
L
Linus Torvalds 已提交
989
 *
J
Jan Kara 已提交
990
 * pagevec_lookup_range() returns the number of pages which were found. If this
991
 * number is smaller than PAGEVEC_SIZE, the end of specified range has been
J
Jan Kara 已提交
992
 * reached.
L
Linus Torvalds 已提交
993
 */
J
Jan Kara 已提交
994
unsigned pagevec_lookup_range(struct pagevec *pvec,
995
		struct address_space *mapping, pgoff_t *start, pgoff_t end)
L
Linus Torvalds 已提交
996
{
997
	pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
J
Jan Kara 已提交
998
					pvec->pages);
L
Linus Torvalds 已提交
999 1000
	return pagevec_count(pvec);
}
J
Jan Kara 已提交
1001
EXPORT_SYMBOL(pagevec_lookup_range);
1002

1003 1004
unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
		struct address_space *mapping, pgoff_t *index, pgoff_t end,
1005
		int tag)
L
Linus Torvalds 已提交
1006
{
1007
	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1008
					PAGEVEC_SIZE, pvec->pages);
L
Linus Torvalds 已提交
1009 1010
	return pagevec_count(pvec);
}
1011
EXPORT_SYMBOL(pagevec_lookup_range_tag);
L
Linus Torvalds 已提交
1012

1013 1014 1015 1016 1017 1018 1019 1020 1021
unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
		struct address_space *mapping, pgoff_t *index, pgoff_t end,
		int tag, unsigned max_pages)
{
	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
		min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
	return pagevec_count(pvec);
}
EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
L
Linus Torvalds 已提交
1022 1023 1024 1025 1026
/*
 * Perform any setup for the swap system
 */
void __init swap_setup(void)
{
1027
	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
P
Peter Zijlstra 已提交
1028

L
Linus Torvalds 已提交
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	/* Use a smaller cluster for small-memory machines */
	if (megs < 16)
		page_cluster = 2;
	else
		page_cluster = 3;
	/*
	 * Right now other parts of the system means that we
	 * _really_ don't want to cluster much more
	 */
}