swap_state.c 21.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 *  linux/mm/swap_state.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *
 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
 */
#include <linux/mm.h>
10
#include <linux/gfp.h>
L
Linus Torvalds 已提交
11 12
#include <linux/kernel_stat.h>
#include <linux/swap.h>
13
#include <linux/swapops.h>
L
Linus Torvalds 已提交
14 15 16
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
17
#include <linux/blkdev.h>
18
#include <linux/pagevec.h>
C
Christoph Lameter 已提交
19
#include <linux/migrate.h>
20
#include <linux/vmalloc.h>
21
#include <linux/swap_slots.h>
22
#include <linux/huge_mm.h>
L
Linus Torvalds 已提交
23 24 25 26 27

#include <asm/pgtable.h>

/*
 * swapper_space is a fiction, retained to simplify the path through
J
Jens Axboe 已提交
28
 * vmscan's shrink_page_list.
L
Linus Torvalds 已提交
29
 */
30
static const struct address_space_operations swap_aops = {
L
Linus Torvalds 已提交
31
	.writepage	= swap_writepage,
32
	.set_page_dirty	= swap_set_page_dirty,
33
#ifdef CONFIG_MIGRATION
34
	.migratepage	= migrate_page,
35
#endif
L
Linus Torvalds 已提交
36 37
};

38 39
struct address_space *swapper_spaces[MAX_SWAPFILES];
static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
H
Huang Ying 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
bool swap_vma_readahead = true;

#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)

#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)

#define SWAP_RA_VAL(addr, win, hits)				\
	(((addr) & PAGE_MASK) |					\
	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
	 ((hits) & SWAP_RA_HITS_MASK))

/* Initial readahead hits is 4 to start up with a small window */
#define GET_SWAP_RA_VAL(vma)					\
	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
L
Linus Torvalds 已提交
59 60

#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
61
#define ADD_CACHE_INFO(x, nr)	do { swap_cache_info.x += (nr); } while (0)
L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69

static struct {
	unsigned long add_total;
	unsigned long del_total;
	unsigned long find_success;
	unsigned long find_total;
} swap_cache_info;

70 71
unsigned long total_swapcache_pages(void)
{
72
	unsigned int i, j, nr;
73
	unsigned long ret = 0;
74
	struct address_space *spaces;
75

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	rcu_read_lock();
	for (i = 0; i < MAX_SWAPFILES; i++) {
		/*
		 * The corresponding entries in nr_swapper_spaces and
		 * swapper_spaces will be reused only after at least
		 * one grace period.  So it is impossible for them
		 * belongs to different usage.
		 */
		nr = nr_swapper_spaces[i];
		spaces = rcu_dereference(swapper_spaces[i]);
		if (!nr || !spaces)
			continue;
		for (j = 0; j < nr; j++)
			ret += spaces[j].nrpages;
	}
	rcu_read_unlock();
92 93 94
	return ret;
}

95 96
static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);

L
Linus Torvalds 已提交
97 98
void show_swap_cache_info(void)
{
99
	printk("%lu pages in swap cache\n", total_swapcache_pages());
100
	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
L
Linus Torvalds 已提交
101
		swap_cache_info.add_total, swap_cache_info.del_total,
102
		swap_cache_info.find_success, swap_cache_info.find_total);
103 104
	printk("Free swap  = %ldkB\n",
		get_nr_swap_pages() << (PAGE_SHIFT - 10));
L
Linus Torvalds 已提交
105 106 107 108
	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}

/*
109
 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
L
Linus Torvalds 已提交
110 111
 * but sets SwapCache flag and private instead of mapping and index.
 */
112
int __add_to_swap_cache(struct page *page, swp_entry_t entry)
L
Linus Torvalds 已提交
113
{
114
	int error, i, nr = hpage_nr_pages(page);
115
	struct address_space *address_space;
116
	pgoff_t idx = swp_offset(entry);
L
Linus Torvalds 已提交
117

118 119 120
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(PageSwapCache(page), page);
	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
121

122
	page_ref_add(page, nr);
123 124
	SetPageSwapCache(page);

125 126
	address_space = swap_address_space(entry);
	spin_lock_irq(&address_space->tree_lock);
127 128 129 130 131 132
	for (i = 0; i < nr; i++) {
		set_page_private(page + i, entry.val + i);
		error = radix_tree_insert(&address_space->page_tree,
					  idx + i, page + i);
		if (unlikely(error))
			break;
133
	}
134 135 136 137 138
	if (likely(!error)) {
		address_space->nrpages += nr;
		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
		ADD_CACHE_INFO(add_total, nr);
	} else {
139 140 141 142 143 144
		/*
		 * Only the context which have set SWAP_HAS_CACHE flag
		 * would call add_to_swap_cache().
		 * So add_to_swap_cache() doesn't returns -EEXIST.
		 */
		VM_BUG_ON(error == -EEXIST);
145 146 147 148 149
		set_page_private(page + i, 0UL);
		while (i--) {
			radix_tree_delete(&address_space->page_tree, idx + i);
			set_page_private(page + i, 0UL);
		}
150
		ClearPageSwapCache(page);
151
		page_ref_sub(page, nr);
152
	}
153
	spin_unlock_irq(&address_space->tree_lock);
154 155 156 157 158 159 160 161 162

	return error;
}


int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
	int error;

163
	error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
B
Balbir Singh 已提交
164
	if (!error) {
165
		error = __add_to_swap_cache(page, entry);
L
Linus Torvalds 已提交
166
		radix_tree_preload_end();
H
Hugh Dickins 已提交
167
	}
L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175 176
	return error;
}

/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
177
	struct address_space *address_space;
178 179 180
	int i, nr = hpage_nr_pages(page);
	swp_entry_t entry;
	pgoff_t idx;
181

182 183 184
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
	VM_BUG_ON_PAGE(PageWriteback(page), page);
L
Linus Torvalds 已提交
185

186 187
	entry.val = page_private(page);
	address_space = swap_address_space(entry);
188 189 190 191 192
	idx = swp_offset(entry);
	for (i = 0; i < nr; i++) {
		radix_tree_delete(&address_space->page_tree, idx + i);
		set_page_private(page + i, 0);
	}
L
Linus Torvalds 已提交
193
	ClearPageSwapCache(page);
194 195 196
	address_space->nrpages -= nr;
	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
	ADD_CACHE_INFO(del_total, nr);
L
Linus Torvalds 已提交
197 198 199 200 201 202 203 204 205
}

/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
 */
206
int add_to_swap(struct page *page)
L
Linus Torvalds 已提交
207 208 209 210
{
	swp_entry_t entry;
	int err;

211 212
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageUptodate(page), page);
L
Linus Torvalds 已提交
213

214
	entry = get_swap_page(page);
215
	if (!entry.val)
216 217
		return 0;

218
	if (mem_cgroup_try_charge_swap(page, entry))
219
		goto fail;
A
Andrea Arcangeli 已提交
220

221 222 223 224 225 226 227 228 229
	/*
	 * Radix-tree node allocations from PF_MEMALLOC contexts could
	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
	 * stops emergency reserves from being allocated.
	 *
	 * TODO: this could cause a theoretical memory reclaim
	 * deadlock in the swap out path.
	 */
	/*
M
Minchan Kim 已提交
230
	 * Add it to the swap cache.
231 232 233
	 */
	err = add_to_swap_cache(page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
234 235
	/* -ENOMEM radix-tree allocation failure */
	if (err)
N
Nick Piggin 已提交
236
		/*
237 238
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
L
Linus Torvalds 已提交
239
		 */
240
		goto fail;
241 242 243 244 245 246 247 248 249 250 251
	/*
	 * Normally the page will be dirtied in unmap because its pte should be
	 * dirty. A special case is MADV_FREE page. The page'e pte could have
	 * dirty bit cleared but the page's SwapBacked bit is still set because
	 * clearing the dirty bit and SwapBacked bit has no lock protected. For
	 * such page, unmap will not set dirty bit for it, so page reclaim will
	 * not write the page out. This can cause data corruption when the page
	 * is swap in later. Always setting the dirty bit for the page solves
	 * the problem.
	 */
	set_page_dirty(page);
252 253 254 255

	return 1;

fail:
256
	put_swap_page(page, entry);
257
	return 0;
L
Linus Torvalds 已提交
258 259 260 261 262 263 264 265 266 267 268
}

/*
 * This must be called only on pages that have
 * been verified to be in the swap cache and locked.
 * It will never put the page into the free list,
 * the caller has a reference on the page.
 */
void delete_from_swap_cache(struct page *page)
{
	swp_entry_t entry;
269
	struct address_space *address_space;
L
Linus Torvalds 已提交
270

H
Hugh Dickins 已提交
271
	entry.val = page_private(page);
L
Linus Torvalds 已提交
272

273 274
	address_space = swap_address_space(entry);
	spin_lock_irq(&address_space->tree_lock);
L
Linus Torvalds 已提交
275
	__delete_from_swap_cache(page);
276
	spin_unlock_irq(&address_space->tree_lock);
L
Linus Torvalds 已提交
277

278
	put_swap_page(page, entry);
279
	page_ref_sub(page, hpage_nr_pages(page));
L
Linus Torvalds 已提交
280 281 282 283 284 285
}

/* 
 * If we are the only user, then try to free up the swap cache. 
 * 
 * Its ok to check for PageSwapCache without the page lock
286 287
 * here because we are going to recheck again inside
 * try_to_free_swap() _with_ the lock.
L
Linus Torvalds 已提交
288 289 290 291
 * 					- Marcelo
 */
static inline void free_swap_cache(struct page *page)
{
292 293
	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
		try_to_free_swap(page);
L
Linus Torvalds 已提交
294 295 296 297 298 299
		unlock_page(page);
	}
}

/* 
 * Perform a free_page(), also freeing any swap cache associated with
300
 * this page if it is the last user of the page.
L
Linus Torvalds 已提交
301 302 303 304
 */
void free_page_and_swap_cache(struct page *page)
{
	free_swap_cache(page);
305
	if (!is_huge_zero_page(page))
306
		put_page(page);
L
Linus Torvalds 已提交
307 308 309 310 311 312 313 314 315
}

/*
 * Passed an array of pages, drop them all from swapcache and then release
 * them.  They are removed from the LRU and freed if this is their last use.
 */
void free_pages_and_swap_cache(struct page **pages, int nr)
{
	struct page **pagep = pages;
316
	int i;
L
Linus Torvalds 已提交
317 318

	lru_add_drain();
319 320 321
	for (i = 0; i < nr; i++)
		free_swap_cache(pagep[i]);
	release_pages(pagep, nr, false);
L
Linus Torvalds 已提交
322 323 324 325 326 327 328 329
}

/*
 * Lookup a swap entry in the swap cache. A found page will be returned
 * unlocked and with its refcount incremented - we rely on the kernel
 * lock getting page table operations atomic even if we drop the page
 * lock before returning.
 */
H
Huang Ying 已提交
330 331
struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
			       unsigned long addr)
L
Linus Torvalds 已提交
332 333
{
	struct page *page;
H
Huang Ying 已提交
334 335
	unsigned long ra_info;
	int win, hits, readahead;
L
Linus Torvalds 已提交
336

337
	page = find_get_page(swap_address_space(entry), swp_offset(entry));
L
Linus Torvalds 已提交
338

H
Huang Ying 已提交
339 340
	INC_CACHE_INFO(find_total);
	if (page) {
L
Linus Torvalds 已提交
341
		INC_CACHE_INFO(find_success);
H
Huang Ying 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354
		if (unlikely(PageTransCompound(page)))
			return page;
		readahead = TestClearPageReadahead(page);
		if (vma) {
			ra_info = GET_SWAP_RA_VAL(vma);
			win = SWAP_RA_WIN(ra_info);
			hits = SWAP_RA_HITS(ra_info);
			if (readahead)
				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
			atomic_long_set(&vma->swap_readahead_info,
					SWAP_RA_VAL(addr, win, hits));
		}
		if (readahead) {
355
			count_vm_event(SWAP_RA_HIT);
H
Huang Ying 已提交
356 357
			if (!vma)
				atomic_inc(&swapin_readahead_hits);
358
		}
359
	}
L
Linus Torvalds 已提交
360 361 362
	return page;
}

363 364 365
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr,
			bool *new_page_allocated)
L
Linus Torvalds 已提交
366 367
{
	struct page *found_page, *new_page = NULL;
368
	struct address_space *swapper_space = swap_address_space(entry);
L
Linus Torvalds 已提交
369
	int err;
370
	*new_page_allocated = false;
L
Linus Torvalds 已提交
371 372 373 374 375 376 377

	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
378
		found_page = find_get_page(swapper_space, swp_offset(entry));
L
Linus Torvalds 已提交
379 380 381
		if (found_page)
			break;

382 383 384 385 386 387 388 389 390 391
		/*
		 * Just skip read ahead for unused swap slot.
		 * During swap_off when swap_slot_cache is disabled,
		 * we have to handle the race between putting
		 * swap entry in swap cache and marking swap slot
		 * as SWAP_HAS_CACHE.  That's done in later part of code or
		 * else swap_off will be aborted if we return NULL.
		 */
		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
			break;
392

L
Linus Torvalds 已提交
393 394 395 396
		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
397
			new_page = alloc_page_vma(gfp_mask, vma, addr);
L
Linus Torvalds 已提交
398 399 400 401
			if (!new_page)
				break;		/* Out of memory */
		}

402 403 404
		/*
		 * call radix_tree_preload() while we can wait.
		 */
405
		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
406 407 408
		if (err)
			break;

H
Hugh Dickins 已提交
409 410 411
		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
412
		err = swapcache_prepare(entry);
413
		if (err == -EEXIST) {
414
			radix_tree_preload_end();
415 416 417
			/*
			 * We might race against get_swap_page() and stumble
			 * across a SWAP_HAS_CACHE swap_map entry whose page
418
			 * has not been brought into the swapcache yet.
419 420
			 */
			cond_resched();
421
			continue;
422 423 424
		}
		if (err) {		/* swp entry is obsolete ? */
			radix_tree_preload_end();
H
Hugh Dickins 已提交
425
			break;
426
		}
H
Hugh Dickins 已提交
427

428
		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
429
		__SetPageLocked(new_page);
430
		__SetPageSwapBacked(new_page);
431
		err = __add_to_swap_cache(new_page, entry);
N
Nick Piggin 已提交
432
		if (likely(!err)) {
433
			radix_tree_preload_end();
L
Linus Torvalds 已提交
434 435 436
			/*
			 * Initiate read into locked page and return.
			 */
437
			lru_cache_add_anon(new_page);
438
			*new_page_allocated = true;
L
Linus Torvalds 已提交
439 440
			return new_page;
		}
441
		radix_tree_preload_end();
442
		__ClearPageLocked(new_page);
443 444 445 446
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
447
		put_swap_page(new_page, entry);
H
Hugh Dickins 已提交
448
	} while (err != -ENOMEM);
L
Linus Torvalds 已提交
449 450

	if (new_page)
451
		put_page(new_page);
L
Linus Torvalds 已提交
452 453
	return found_page;
}
454

455 456 457 458 459 460 461
/*
 * Locate a page of swap in physical memory, reserving swap cache space
 * and reading the disk if it is not already cached.
 * A failure return means that either the page allocation failed or that
 * the swap entry is no longer in use.
 */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
462
		struct vm_area_struct *vma, unsigned long addr, bool do_poll)
463 464 465 466 467 468
{
	bool page_was_allocated;
	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
			vma, addr, &page_was_allocated);

	if (page_was_allocated)
469
		swap_readpage(retpage, do_poll);
470 471 472 473

	return retpage;
}

H
Huang Ying 已提交
474 475 476 477 478
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
				      unsigned long offset,
				      int hits,
				      int max_pages,
				      int prev_win)
479
{
H
Huang Ying 已提交
480
	unsigned int pages, last_ra;
481 482 483 484 485 486

	/*
	 * This heuristic has been found to work well on both sequential and
	 * random loads, swapping to hard disk or to SSD: please don't ask
	 * what the "+ 2" means, it just happens to work well, that's all.
	 */
H
Huang Ying 已提交
487
	pages = hits + 2;
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
	if (pages == 2) {
		/*
		 * We can have no readahead hits to judge by: but must not get
		 * stuck here forever, so check for an adjacent offset instead
		 * (and don't even bother to check whether swap type is same).
		 */
		if (offset != prev_offset + 1 && offset != prev_offset - 1)
			pages = 1;
	} else {
		unsigned int roundup = 4;
		while (roundup < pages)
			roundup <<= 1;
		pages = roundup;
	}

	if (pages > max_pages)
		pages = max_pages;

	/* Don't shrink readahead too fast */
H
Huang Ying 已提交
507
	last_ra = prev_win / 2;
508 509
	if (pages < last_ra)
		pages = last_ra;
H
Huang Ying 已提交
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528

	return pages;
}

static unsigned long swapin_nr_pages(unsigned long offset)
{
	static unsigned long prev_offset;
	unsigned int hits, pages, max_pages;
	static atomic_t last_readahead_pages;

	max_pages = 1 << READ_ONCE(page_cluster);
	if (max_pages <= 1)
		return 1;

	hits = atomic_xchg(&swapin_readahead_hits, 0);
	pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
				  atomic_read(&last_readahead_pages));
	if (!hits)
		prev_offset = offset;
529 530 531 532 533
	atomic_set(&last_readahead_pages, pages);

	return pages;
}

534 535 536
/**
 * swapin_readahead - swap in pages in hope we need them soon
 * @entry: swap entry of this memory
537
 * @gfp_mask: memory allocation flags
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
 * @vma: user vma this address belongs to
 * @addr: target address for mempolicy
 *
 * Returns the struct page for entry and addr, after queueing swapin.
 *
 * Primitive swap readahead code. We simply read an aligned block of
 * (1 << page_cluster) entries in the swap area. This method is chosen
 * because it doesn't cost us any seek time.  We also make sure to queue
 * the 'original' request together with the readahead ones...
 *
 * This has been extended to use the NUMA policies from the mm triggering
 * the readahead.
 *
 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 */
553
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
554 555 556
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *page;
557 558
	unsigned long entry_offset = swp_offset(entry);
	unsigned long offset = entry_offset;
559
	unsigned long start_offset, end_offset;
560
	unsigned long mask;
561
	struct blk_plug plug;
562
	bool do_poll = true, page_allocated;
563

564 565 566 567
	mask = swapin_nr_pages(offset) - 1;
	if (!mask)
		goto skip;

568
	do_poll = false;
569 570 571 572 573 574
	/* Read a page_cluster sized and aligned cluster around offset. */
	start_offset = offset & ~mask;
	end_offset = offset | mask;
	if (!start_offset)	/* First page is swap header. */
		start_offset++;

575
	blk_start_plug(&plug);
576
	for (offset = start_offset; offset <= end_offset ; offset++) {
577
		/* Ok, do the async read-ahead now */
578 579 580
		page = __read_swap_cache_async(
			swp_entry(swp_type(entry), offset),
			gfp_mask, vma, addr, &page_allocated);
581
		if (!page)
582
			continue;
583 584 585 586 587 588 589
		if (page_allocated) {
			swap_readpage(page, false);
			if (offset != entry_offset &&
			    likely(!PageTransCompound(page))) {
				SetPageReadahead(page);
				count_vm_event(SWAP_RA);
			}
590
		}
591
		put_page(page);
592
	}
593 594
	blk_finish_plug(&plug);

595
	lru_add_drain();	/* Push any new pages onto the LRU now */
596
skip:
597
	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
598
}
599 600 601 602 603 604 605

int init_swap_address_space(unsigned int type, unsigned long nr_pages)
{
	struct address_space *spaces, *space;
	unsigned int i, nr;

	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
606
	spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	if (!spaces)
		return -ENOMEM;
	for (i = 0; i < nr; i++) {
		space = spaces + i;
		INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
		atomic_set(&space->i_mmap_writable, 0);
		space->a_ops = &swap_aops;
		/* swap cache doesn't use writeback related tags */
		mapping_set_no_writeback_tags(space);
		spin_lock_init(&space->tree_lock);
	}
	nr_swapper_spaces[type] = nr;
	rcu_assign_pointer(swapper_spaces[type], spaces);

	return 0;
}

void exit_swap_address_space(unsigned int type)
{
	struct address_space *spaces;

	spaces = swapper_spaces[type];
	nr_swapper_spaces[type] = 0;
	rcu_assign_pointer(swapper_spaces[type], NULL);
	synchronize_rcu();
	kvfree(spaces);
}
H
Huang Ying 已提交
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662

static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
				     unsigned long faddr,
				     unsigned long lpfn,
				     unsigned long rpfn,
				     unsigned long *start,
				     unsigned long *end)
{
	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
		      PFN_DOWN(faddr & PMD_MASK));
	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
}

struct page *swap_readahead_detect(struct vm_fault *vmf,
				   struct vma_swap_readahead *swap_ra)
{
	struct vm_area_struct *vma = vmf->vma;
	unsigned long swap_ra_info;
	struct page *page;
	swp_entry_t entry;
	unsigned long faddr, pfn, fpfn;
	unsigned long start, end;
	pte_t *pte;
	unsigned int max_win, hits, prev_win, win, left;
#ifndef CONFIG_64BIT
	pte_t *tpte;
#endif

663 664 665 666 667 668 669
	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
			     SWAP_RA_ORDER_CEILING);
	if (max_win == 1) {
		swap_ra->win = 1;
		return NULL;
	}

H
Huang Ying 已提交
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	faddr = vmf->address;
	entry = pte_to_swp_entry(vmf->orig_pte);
	if ((unlikely(non_swap_entry(entry))))
		return NULL;
	page = lookup_swap_cache(entry, vma, faddr);
	if (page)
		return page;

	fpfn = PFN_DOWN(faddr);
	swap_ra_info = GET_SWAP_RA_VAL(vma);
	pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
	prev_win = SWAP_RA_WIN(swap_ra_info);
	hits = SWAP_RA_HITS(swap_ra_info);
	swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits,
					       max_win, prev_win);
	atomic_long_set(&vma->swap_readahead_info,
			SWAP_RA_VAL(faddr, win, 0));

	if (win == 1)
		return NULL;

	/* Copy the PTEs because the page table may be unmapped */
	if (fpfn == pfn + 1)
		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
	else if (pfn == fpfn + 1)
		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
				  &start, &end);
	else {
		left = (win - 1) / 2;
		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
				  &start, &end);
	}
	swap_ra->nr_pte = end - start;
	swap_ra->offset = fpfn - start;
	pte = vmf->pte - swap_ra->offset;
#ifdef CONFIG_64BIT
	swap_ra->ptes = pte;
#else
	tpte = swap_ra->ptes;
	for (pfn = start; pfn != end; pfn++)
		*tpte++ = *pte++;
#endif

	return NULL;
}

struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
				    struct vm_fault *vmf,
				    struct vma_swap_readahead *swap_ra)
{
	struct blk_plug plug;
	struct vm_area_struct *vma = vmf->vma;
	struct page *page;
	pte_t *pte, pentry;
	swp_entry_t entry;
	unsigned int i;
	bool page_allocated;

	if (swap_ra->win == 1)
		goto skip;

	blk_start_plug(&plug);
	for (i = 0, pte = swap_ra->ptes; i < swap_ra->nr_pte;
	     i++, pte++) {
		pentry = *pte;
		if (pte_none(pentry))
			continue;
		if (pte_present(pentry))
			continue;
		entry = pte_to_swp_entry(pentry);
		if (unlikely(non_swap_entry(entry)))
			continue;
		page = __read_swap_cache_async(entry, gfp_mask, vma,
					       vmf->address, &page_allocated);
		if (!page)
			continue;
		if (page_allocated) {
			swap_readpage(page, false);
			if (i != swap_ra->offset &&
			    likely(!PageTransCompound(page))) {
				SetPageReadahead(page);
				count_vm_event(SWAP_RA);
			}
		}
		put_page(page);
	}
	blk_finish_plug(&plug);
	lru_add_drain();
skip:
	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
				     swap_ra->win == 1);
}
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817

#ifdef CONFIG_SYSFS
static ssize_t vma_ra_enabled_show(struct kobject *kobj,
				     struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false");
}
static ssize_t vma_ra_enabled_store(struct kobject *kobj,
				      struct kobj_attribute *attr,
				      const char *buf, size_t count)
{
	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
		swap_vma_readahead = true;
	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
		swap_vma_readahead = false;
	else
		return -EINVAL;

	return count;
}
static struct kobj_attribute vma_ra_enabled_attr =
	__ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
	       vma_ra_enabled_store);

static struct attribute *swap_attrs[] = {
	&vma_ra_enabled_attr.attr,
	NULL,
};

static struct attribute_group swap_attr_group = {
	.attrs = swap_attrs,
};

static int __init swap_init_sysfs(void)
{
	int err;
	struct kobject *swap_kobj;

	swap_kobj = kobject_create_and_add("swap", mm_kobj);
	if (!swap_kobj) {
		pr_err("failed to create swap kobject\n");
		return -ENOMEM;
	}
	err = sysfs_create_group(swap_kobj, &swap_attr_group);
	if (err) {
		pr_err("failed to register swap group\n");
		goto delete_obj;
	}
	return 0;

delete_obj:
	kobject_put(swap_kobj);
	return err;
}
subsys_initcall(swap_init_sysfs);
#endif