swap_state.c 19.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 *  linux/mm/swap_state.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *
 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
 */
#include <linux/mm.h>
10
#include <linux/gfp.h>
L
Linus Torvalds 已提交
11 12
#include <linux/kernel_stat.h>
#include <linux/swap.h>
13
#include <linux/swapops.h>
L
Linus Torvalds 已提交
14 15 16
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
17
#include <linux/blkdev.h>
18
#include <linux/pagevec.h>
C
Christoph Lameter 已提交
19
#include <linux/migrate.h>
20
#include <linux/vmalloc.h>
21
#include <linux/swap_slots.h>
22
#include <linux/huge_mm.h>
L
Linus Torvalds 已提交
23 24 25 26 27

#include <asm/pgtable.h>

/*
 * swapper_space is a fiction, retained to simplify the path through
J
Jens Axboe 已提交
28
 * vmscan's shrink_page_list.
L
Linus Torvalds 已提交
29
 */
30
static const struct address_space_operations swap_aops = {
L
Linus Torvalds 已提交
31
	.writepage	= swap_writepage,
32
	.set_page_dirty	= swap_set_page_dirty,
33
#ifdef CONFIG_MIGRATION
34
	.migratepage	= migrate_page,
35
#endif
L
Linus Torvalds 已提交
36 37
};

38 39
struct address_space *swapper_spaces[MAX_SWAPFILES];
static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
H
Huang Ying 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
bool swap_vma_readahead = true;

#define SWAP_RA_MAX_ORDER_DEFAULT	3

static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT;

#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)

#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)

#define SWAP_RA_VAL(addr, win, hits)				\
	(((addr) & PAGE_MASK) |					\
	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
	 ((hits) & SWAP_RA_HITS_MASK))

/* Initial readahead hits is 4 to start up with a small window */
#define GET_SWAP_RA_VAL(vma)					\
	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
L
Linus Torvalds 已提交
63 64

#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
65
#define ADD_CACHE_INFO(x, nr)	do { swap_cache_info.x += (nr); } while (0)
L
Linus Torvalds 已提交
66 67 68 69 70 71 72 73

static struct {
	unsigned long add_total;
	unsigned long del_total;
	unsigned long find_success;
	unsigned long find_total;
} swap_cache_info;

74 75
unsigned long total_swapcache_pages(void)
{
76
	unsigned int i, j, nr;
77
	unsigned long ret = 0;
78
	struct address_space *spaces;
79

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
	rcu_read_lock();
	for (i = 0; i < MAX_SWAPFILES; i++) {
		/*
		 * The corresponding entries in nr_swapper_spaces and
		 * swapper_spaces will be reused only after at least
		 * one grace period.  So it is impossible for them
		 * belongs to different usage.
		 */
		nr = nr_swapper_spaces[i];
		spaces = rcu_dereference(swapper_spaces[i]);
		if (!nr || !spaces)
			continue;
		for (j = 0; j < nr; j++)
			ret += spaces[j].nrpages;
	}
	rcu_read_unlock();
96 97 98
	return ret;
}

99 100
static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);

L
Linus Torvalds 已提交
101 102
void show_swap_cache_info(void)
{
103
	printk("%lu pages in swap cache\n", total_swapcache_pages());
104
	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
L
Linus Torvalds 已提交
105
		swap_cache_info.add_total, swap_cache_info.del_total,
106
		swap_cache_info.find_success, swap_cache_info.find_total);
107 108
	printk("Free swap  = %ldkB\n",
		get_nr_swap_pages() << (PAGE_SHIFT - 10));
L
Linus Torvalds 已提交
109 110 111 112
	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}

/*
113
 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
L
Linus Torvalds 已提交
114 115
 * but sets SwapCache flag and private instead of mapping and index.
 */
116
int __add_to_swap_cache(struct page *page, swp_entry_t entry)
L
Linus Torvalds 已提交
117
{
118
	int error, i, nr = hpage_nr_pages(page);
119
	struct address_space *address_space;
120
	pgoff_t idx = swp_offset(entry);
L
Linus Torvalds 已提交
121

122 123 124
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(PageSwapCache(page), page);
	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
125

126
	page_ref_add(page, nr);
127 128
	SetPageSwapCache(page);

129 130
	address_space = swap_address_space(entry);
	spin_lock_irq(&address_space->tree_lock);
131 132 133 134 135 136
	for (i = 0; i < nr; i++) {
		set_page_private(page + i, entry.val + i);
		error = radix_tree_insert(&address_space->page_tree,
					  idx + i, page + i);
		if (unlikely(error))
			break;
137
	}
138 139 140 141 142
	if (likely(!error)) {
		address_space->nrpages += nr;
		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
		ADD_CACHE_INFO(add_total, nr);
	} else {
143 144 145 146 147 148
		/*
		 * Only the context which have set SWAP_HAS_CACHE flag
		 * would call add_to_swap_cache().
		 * So add_to_swap_cache() doesn't returns -EEXIST.
		 */
		VM_BUG_ON(error == -EEXIST);
149 150 151 152 153
		set_page_private(page + i, 0UL);
		while (i--) {
			radix_tree_delete(&address_space->page_tree, idx + i);
			set_page_private(page + i, 0UL);
		}
154
		ClearPageSwapCache(page);
155
		page_ref_sub(page, nr);
156
	}
157
	spin_unlock_irq(&address_space->tree_lock);
158 159 160 161 162 163 164 165 166

	return error;
}


int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
	int error;

167
	error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
B
Balbir Singh 已提交
168
	if (!error) {
169
		error = __add_to_swap_cache(page, entry);
L
Linus Torvalds 已提交
170
		radix_tree_preload_end();
H
Hugh Dickins 已提交
171
	}
L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180
	return error;
}

/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
181
	struct address_space *address_space;
182 183 184
	int i, nr = hpage_nr_pages(page);
	swp_entry_t entry;
	pgoff_t idx;
185

186 187 188
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
	VM_BUG_ON_PAGE(PageWriteback(page), page);
L
Linus Torvalds 已提交
189

190 191
	entry.val = page_private(page);
	address_space = swap_address_space(entry);
192 193 194 195 196
	idx = swp_offset(entry);
	for (i = 0; i < nr; i++) {
		radix_tree_delete(&address_space->page_tree, idx + i);
		set_page_private(page + i, 0);
	}
L
Linus Torvalds 已提交
197
	ClearPageSwapCache(page);
198 199 200
	address_space->nrpages -= nr;
	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
	ADD_CACHE_INFO(del_total, nr);
L
Linus Torvalds 已提交
201 202 203 204 205 206 207 208 209
}

/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
 */
210
int add_to_swap(struct page *page)
L
Linus Torvalds 已提交
211 212 213 214
{
	swp_entry_t entry;
	int err;

215 216
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageUptodate(page), page);
L
Linus Torvalds 已提交
217

218
	entry = get_swap_page(page);
219
	if (!entry.val)
220 221
		return 0;

222
	if (mem_cgroup_try_charge_swap(page, entry))
223
		goto fail;
A
Andrea Arcangeli 已提交
224

225 226 227 228 229 230 231 232 233
	/*
	 * Radix-tree node allocations from PF_MEMALLOC contexts could
	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
	 * stops emergency reserves from being allocated.
	 *
	 * TODO: this could cause a theoretical memory reclaim
	 * deadlock in the swap out path.
	 */
	/*
M
Minchan Kim 已提交
234
	 * Add it to the swap cache.
235 236 237
	 */
	err = add_to_swap_cache(page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
238 239
	/* -ENOMEM radix-tree allocation failure */
	if (err)
N
Nick Piggin 已提交
240
		/*
241 242
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
L
Linus Torvalds 已提交
243
		 */
244
		goto fail;
245 246 247 248

	return 1;

fail:
249
	put_swap_page(page, entry);
250
	return 0;
L
Linus Torvalds 已提交
251 252 253 254 255 256 257 258 259 260 261
}

/*
 * This must be called only on pages that have
 * been verified to be in the swap cache and locked.
 * It will never put the page into the free list,
 * the caller has a reference on the page.
 */
void delete_from_swap_cache(struct page *page)
{
	swp_entry_t entry;
262
	struct address_space *address_space;
L
Linus Torvalds 已提交
263

H
Hugh Dickins 已提交
264
	entry.val = page_private(page);
L
Linus Torvalds 已提交
265

266 267
	address_space = swap_address_space(entry);
	spin_lock_irq(&address_space->tree_lock);
L
Linus Torvalds 已提交
268
	__delete_from_swap_cache(page);
269
	spin_unlock_irq(&address_space->tree_lock);
L
Linus Torvalds 已提交
270

271
	put_swap_page(page, entry);
272
	page_ref_sub(page, hpage_nr_pages(page));
L
Linus Torvalds 已提交
273 274 275 276 277 278
}

/* 
 * If we are the only user, then try to free up the swap cache. 
 * 
 * Its ok to check for PageSwapCache without the page lock
279 280
 * here because we are going to recheck again inside
 * try_to_free_swap() _with_ the lock.
L
Linus Torvalds 已提交
281 282 283 284
 * 					- Marcelo
 */
static inline void free_swap_cache(struct page *page)
{
285 286
	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
		try_to_free_swap(page);
L
Linus Torvalds 已提交
287 288 289 290 291 292
		unlock_page(page);
	}
}

/* 
 * Perform a free_page(), also freeing any swap cache associated with
293
 * this page if it is the last user of the page.
L
Linus Torvalds 已提交
294 295 296 297
 */
void free_page_and_swap_cache(struct page *page)
{
	free_swap_cache(page);
298
	if (!is_huge_zero_page(page))
299
		put_page(page);
L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307 308
}

/*
 * Passed an array of pages, drop them all from swapcache and then release
 * them.  They are removed from the LRU and freed if this is their last use.
 */
void free_pages_and_swap_cache(struct page **pages, int nr)
{
	struct page **pagep = pages;
309
	int i;
L
Linus Torvalds 已提交
310 311

	lru_add_drain();
312 313 314
	for (i = 0; i < nr; i++)
		free_swap_cache(pagep[i]);
	release_pages(pagep, nr, false);
L
Linus Torvalds 已提交
315 316 317 318 319 320 321 322
}

/*
 * Lookup a swap entry in the swap cache. A found page will be returned
 * unlocked and with its refcount incremented - we rely on the kernel
 * lock getting page table operations atomic even if we drop the page
 * lock before returning.
 */
H
Huang Ying 已提交
323 324
struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
			       unsigned long addr)
L
Linus Torvalds 已提交
325 326
{
	struct page *page;
H
Huang Ying 已提交
327 328
	unsigned long ra_info;
	int win, hits, readahead;
L
Linus Torvalds 已提交
329

330
	page = find_get_page(swap_address_space(entry), swp_offset(entry));
L
Linus Torvalds 已提交
331

H
Huang Ying 已提交
332 333
	INC_CACHE_INFO(find_total);
	if (page) {
L
Linus Torvalds 已提交
334
		INC_CACHE_INFO(find_success);
H
Huang Ying 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347
		if (unlikely(PageTransCompound(page)))
			return page;
		readahead = TestClearPageReadahead(page);
		if (vma) {
			ra_info = GET_SWAP_RA_VAL(vma);
			win = SWAP_RA_WIN(ra_info);
			hits = SWAP_RA_HITS(ra_info);
			if (readahead)
				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
			atomic_long_set(&vma->swap_readahead_info,
					SWAP_RA_VAL(addr, win, hits));
		}
		if (readahead) {
348
			count_vm_event(SWAP_RA_HIT);
H
Huang Ying 已提交
349 350
			if (!vma)
				atomic_inc(&swapin_readahead_hits);
351
		}
352
	}
L
Linus Torvalds 已提交
353 354 355
	return page;
}

356 357 358
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr,
			bool *new_page_allocated)
L
Linus Torvalds 已提交
359 360
{
	struct page *found_page, *new_page = NULL;
361
	struct address_space *swapper_space = swap_address_space(entry);
L
Linus Torvalds 已提交
362
	int err;
363
	*new_page_allocated = false;
L
Linus Torvalds 已提交
364 365 366 367 368 369 370

	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
371
		found_page = find_get_page(swapper_space, swp_offset(entry));
L
Linus Torvalds 已提交
372 373 374
		if (found_page)
			break;

375 376 377 378 379 380 381 382 383 384
		/*
		 * Just skip read ahead for unused swap slot.
		 * During swap_off when swap_slot_cache is disabled,
		 * we have to handle the race between putting
		 * swap entry in swap cache and marking swap slot
		 * as SWAP_HAS_CACHE.  That's done in later part of code or
		 * else swap_off will be aborted if we return NULL.
		 */
		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
			break;
385

L
Linus Torvalds 已提交
386 387 388 389
		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
390
			new_page = alloc_page_vma(gfp_mask, vma, addr);
L
Linus Torvalds 已提交
391 392 393 394
			if (!new_page)
				break;		/* Out of memory */
		}

395 396 397
		/*
		 * call radix_tree_preload() while we can wait.
		 */
398
		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
399 400 401
		if (err)
			break;

H
Hugh Dickins 已提交
402 403 404
		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
405
		err = swapcache_prepare(entry);
406
		if (err == -EEXIST) {
407
			radix_tree_preload_end();
408 409 410
			/*
			 * We might race against get_swap_page() and stumble
			 * across a SWAP_HAS_CACHE swap_map entry whose page
411
			 * has not been brought into the swapcache yet.
412 413
			 */
			cond_resched();
414
			continue;
415 416 417
		}
		if (err) {		/* swp entry is obsolete ? */
			radix_tree_preload_end();
H
Hugh Dickins 已提交
418
			break;
419
		}
H
Hugh Dickins 已提交
420

421
		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
422
		__SetPageLocked(new_page);
423
		__SetPageSwapBacked(new_page);
424
		err = __add_to_swap_cache(new_page, entry);
N
Nick Piggin 已提交
425
		if (likely(!err)) {
426
			radix_tree_preload_end();
L
Linus Torvalds 已提交
427 428 429
			/*
			 * Initiate read into locked page and return.
			 */
430
			lru_cache_add_anon(new_page);
431
			*new_page_allocated = true;
L
Linus Torvalds 已提交
432 433
			return new_page;
		}
434
		radix_tree_preload_end();
435
		__ClearPageLocked(new_page);
436 437 438 439
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
440
		put_swap_page(new_page, entry);
H
Hugh Dickins 已提交
441
	} while (err != -ENOMEM);
L
Linus Torvalds 已提交
442 443

	if (new_page)
444
		put_page(new_page);
L
Linus Torvalds 已提交
445 446
	return found_page;
}
447

448 449 450 451 452 453 454
/*
 * Locate a page of swap in physical memory, reserving swap cache space
 * and reading the disk if it is not already cached.
 * A failure return means that either the page allocation failed or that
 * the swap entry is no longer in use.
 */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
455
		struct vm_area_struct *vma, unsigned long addr, bool do_poll)
456 457 458 459 460 461
{
	bool page_was_allocated;
	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
			vma, addr, &page_was_allocated);

	if (page_was_allocated)
462
		swap_readpage(retpage, do_poll);
463 464 465 466

	return retpage;
}

H
Huang Ying 已提交
467 468 469 470 471
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
				      unsigned long offset,
				      int hits,
				      int max_pages,
				      int prev_win)
472
{
H
Huang Ying 已提交
473
	unsigned int pages, last_ra;
474 475 476 477 478 479

	/*
	 * This heuristic has been found to work well on both sequential and
	 * random loads, swapping to hard disk or to SSD: please don't ask
	 * what the "+ 2" means, it just happens to work well, that's all.
	 */
H
Huang Ying 已提交
480
	pages = hits + 2;
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
	if (pages == 2) {
		/*
		 * We can have no readahead hits to judge by: but must not get
		 * stuck here forever, so check for an adjacent offset instead
		 * (and don't even bother to check whether swap type is same).
		 */
		if (offset != prev_offset + 1 && offset != prev_offset - 1)
			pages = 1;
	} else {
		unsigned int roundup = 4;
		while (roundup < pages)
			roundup <<= 1;
		pages = roundup;
	}

	if (pages > max_pages)
		pages = max_pages;

	/* Don't shrink readahead too fast */
H
Huang Ying 已提交
500
	last_ra = prev_win / 2;
501 502
	if (pages < last_ra)
		pages = last_ra;
H
Huang Ying 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521

	return pages;
}

static unsigned long swapin_nr_pages(unsigned long offset)
{
	static unsigned long prev_offset;
	unsigned int hits, pages, max_pages;
	static atomic_t last_readahead_pages;

	max_pages = 1 << READ_ONCE(page_cluster);
	if (max_pages <= 1)
		return 1;

	hits = atomic_xchg(&swapin_readahead_hits, 0);
	pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
				  atomic_read(&last_readahead_pages));
	if (!hits)
		prev_offset = offset;
522 523 524 525 526
	atomic_set(&last_readahead_pages, pages);

	return pages;
}

527 528 529
/**
 * swapin_readahead - swap in pages in hope we need them soon
 * @entry: swap entry of this memory
530
 * @gfp_mask: memory allocation flags
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
 * @vma: user vma this address belongs to
 * @addr: target address for mempolicy
 *
 * Returns the struct page for entry and addr, after queueing swapin.
 *
 * Primitive swap readahead code. We simply read an aligned block of
 * (1 << page_cluster) entries in the swap area. This method is chosen
 * because it doesn't cost us any seek time.  We also make sure to queue
 * the 'original' request together with the readahead ones...
 *
 * This has been extended to use the NUMA policies from the mm triggering
 * the readahead.
 *
 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 */
546
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
547 548 549
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *page;
550 551
	unsigned long entry_offset = swp_offset(entry);
	unsigned long offset = entry_offset;
552
	unsigned long start_offset, end_offset;
553
	unsigned long mask;
554
	struct blk_plug plug;
555
	bool do_poll = true, page_allocated;
556

557 558 559 560
	mask = swapin_nr_pages(offset) - 1;
	if (!mask)
		goto skip;

561
	do_poll = false;
562 563 564 565 566 567
	/* Read a page_cluster sized and aligned cluster around offset. */
	start_offset = offset & ~mask;
	end_offset = offset | mask;
	if (!start_offset)	/* First page is swap header. */
		start_offset++;

568
	blk_start_plug(&plug);
569
	for (offset = start_offset; offset <= end_offset ; offset++) {
570
		/* Ok, do the async read-ahead now */
571 572 573
		page = __read_swap_cache_async(
			swp_entry(swp_type(entry), offset),
			gfp_mask, vma, addr, &page_allocated);
574
		if (!page)
575
			continue;
576 577 578 579 580 581 582
		if (page_allocated) {
			swap_readpage(page, false);
			if (offset != entry_offset &&
			    likely(!PageTransCompound(page))) {
				SetPageReadahead(page);
				count_vm_event(SWAP_RA);
			}
583
		}
584
		put_page(page);
585
	}
586 587
	blk_finish_plug(&plug);

588
	lru_add_drain();	/* Push any new pages onto the LRU now */
589
skip:
590
	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
591
}
592 593 594 595 596 597 598

int init_swap_address_space(unsigned int type, unsigned long nr_pages)
{
	struct address_space *spaces, *space;
	unsigned int i, nr;

	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
599
	spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
	if (!spaces)
		return -ENOMEM;
	for (i = 0; i < nr; i++) {
		space = spaces + i;
		INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
		atomic_set(&space->i_mmap_writable, 0);
		space->a_ops = &swap_aops;
		/* swap cache doesn't use writeback related tags */
		mapping_set_no_writeback_tags(space);
		spin_lock_init(&space->tree_lock);
	}
	nr_swapper_spaces[type] = nr;
	rcu_assign_pointer(swapper_spaces[type], spaces);

	return 0;
}

void exit_swap_address_space(unsigned int type)
{
	struct address_space *spaces;

	spaces = swapper_spaces[type];
	nr_swapper_spaces[type] = 0;
	rcu_assign_pointer(swapper_spaces[type], NULL);
	synchronize_rcu();
	kvfree(spaces);
}
H
Huang Ying 已提交
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753

static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
				     unsigned long faddr,
				     unsigned long lpfn,
				     unsigned long rpfn,
				     unsigned long *start,
				     unsigned long *end)
{
	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
		      PFN_DOWN(faddr & PMD_MASK));
	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
}

struct page *swap_readahead_detect(struct vm_fault *vmf,
				   struct vma_swap_readahead *swap_ra)
{
	struct vm_area_struct *vma = vmf->vma;
	unsigned long swap_ra_info;
	struct page *page;
	swp_entry_t entry;
	unsigned long faddr, pfn, fpfn;
	unsigned long start, end;
	pte_t *pte;
	unsigned int max_win, hits, prev_win, win, left;
#ifndef CONFIG_64BIT
	pte_t *tpte;
#endif

	faddr = vmf->address;
	entry = pte_to_swp_entry(vmf->orig_pte);
	if ((unlikely(non_swap_entry(entry))))
		return NULL;
	page = lookup_swap_cache(entry, vma, faddr);
	if (page)
		return page;

	max_win = 1 << READ_ONCE(swap_ra_max_order);
	if (max_win == 1) {
		swap_ra->win = 1;
		return NULL;
	}

	fpfn = PFN_DOWN(faddr);
	swap_ra_info = GET_SWAP_RA_VAL(vma);
	pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
	prev_win = SWAP_RA_WIN(swap_ra_info);
	hits = SWAP_RA_HITS(swap_ra_info);
	swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits,
					       max_win, prev_win);
	atomic_long_set(&vma->swap_readahead_info,
			SWAP_RA_VAL(faddr, win, 0));

	if (win == 1)
		return NULL;

	/* Copy the PTEs because the page table may be unmapped */
	if (fpfn == pfn + 1)
		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
	else if (pfn == fpfn + 1)
		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
				  &start, &end);
	else {
		left = (win - 1) / 2;
		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
				  &start, &end);
	}
	swap_ra->nr_pte = end - start;
	swap_ra->offset = fpfn - start;
	pte = vmf->pte - swap_ra->offset;
#ifdef CONFIG_64BIT
	swap_ra->ptes = pte;
#else
	tpte = swap_ra->ptes;
	for (pfn = start; pfn != end; pfn++)
		*tpte++ = *pte++;
#endif

	return NULL;
}

struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
				    struct vm_fault *vmf,
				    struct vma_swap_readahead *swap_ra)
{
	struct blk_plug plug;
	struct vm_area_struct *vma = vmf->vma;
	struct page *page;
	pte_t *pte, pentry;
	swp_entry_t entry;
	unsigned int i;
	bool page_allocated;

	if (swap_ra->win == 1)
		goto skip;

	blk_start_plug(&plug);
	for (i = 0, pte = swap_ra->ptes; i < swap_ra->nr_pte;
	     i++, pte++) {
		pentry = *pte;
		if (pte_none(pentry))
			continue;
		if (pte_present(pentry))
			continue;
		entry = pte_to_swp_entry(pentry);
		if (unlikely(non_swap_entry(entry)))
			continue;
		page = __read_swap_cache_async(entry, gfp_mask, vma,
					       vmf->address, &page_allocated);
		if (!page)
			continue;
		if (page_allocated) {
			swap_readpage(page, false);
			if (i != swap_ra->offset &&
			    likely(!PageTransCompound(page))) {
				SetPageReadahead(page);
				count_vm_event(SWAP_RA);
			}
		}
		put_page(page);
	}
	blk_finish_plug(&plug);
	lru_add_drain();
skip:
	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
				     swap_ra->win == 1);
}