rmap.c 48.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * mm/rmap.c - physical to virtual reverse mappings
 *
 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
 * Released under the General Public License (GPL).
 *
 * Simple, low overhead reverse mapping scheme.
 * Please try to keep this thing as modular as possible.
 *
 * Provides methods for unmapping each kind of mapped page:
 * the anon methods track anonymous pages, and
 * the file methods track pages belonging to an inode.
 *
 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
H
Hugh Dickins 已提交
17
 * Contributions by Hugh Dickins 2003, 2004
L
Linus Torvalds 已提交
18 19 20 21 22
 */

/*
 * Lock ordering in mm:
 *
23
 * inode->i_mutex	(while writing or truncating, not reading or faulting)
24 25
 *   mm->mmap_sem
 *     page->flags PG_locked (lock_page)
26 27 28 29
 *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
 *         mapping->i_mmap_rwsem
 *           anon_vma->rwsem
 *             mm->page_table_lock or pte_lock
30
 *               zone_lru_lock (in mark_page_accessed, isolate_lru_page)
31 32 33 34 35 36 37 38 39 40 41
 *               swap_lock (in swap_duplicate, swap_info_get)
 *                 mmlist_lock (in mmput, drain_mmlist and others)
 *                 mapping->private_lock (in __set_page_dirty_buffers)
 *                   mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
 *                     mapping->tree_lock (widely used)
 *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
 *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
 *                   sb_lock (within inode_lock in fs/fs-writeback.c)
 *                   mapping->tree_lock (widely used, in set_page_dirty,
 *                             in arch-dependent flush_dcache_mmap_lock,
 *                             within bdi.wb->list_lock in __sync_single_inode)
42
 *
43
 * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
44
 *   ->tasklist_lock
45
 *     pte map lock
L
Linus Torvalds 已提交
46 47 48
 */

#include <linux/mm.h>
49
#include <linux/sched/mm.h>
50
#include <linux/sched/task.h>
L
Linus Torvalds 已提交
51 52 53 54 55
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
H
Hugh Dickins 已提交
56
#include <linux/ksm.h>
L
Linus Torvalds 已提交
57 58
#include <linux/rmap.h>
#include <linux/rcupdate.h>
59
#include <linux/export.h>
60
#include <linux/memcontrol.h>
A
Andrea Arcangeli 已提交
61
#include <linux/mmu_notifier.h>
62
#include <linux/migrate.h>
63
#include <linux/hugetlb.h>
64
#include <linux/backing-dev.h>
65
#include <linux/page_idle.h>
L
Linus Torvalds 已提交
66 67 68

#include <asm/tlbflush.h>

69 70
#include <trace/events/tlb.h>

N
Nick Piggin 已提交
71 72
#include "internal.h"

73
static struct kmem_cache *anon_vma_cachep;
74
static struct kmem_cache *anon_vma_chain_cachep;
75 76 77

static inline struct anon_vma *anon_vma_alloc(void)
{
P
Peter Zijlstra 已提交
78 79 80 81 82
	struct anon_vma *anon_vma;

	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
	if (anon_vma) {
		atomic_set(&anon_vma->refcount, 1);
83 84
		anon_vma->degree = 1;	/* Reference for first vma */
		anon_vma->parent = anon_vma;
P
Peter Zijlstra 已提交
85 86 87 88 89 90 91 92
		/*
		 * Initialise the anon_vma root to point to itself. If called
		 * from fork, the root will be reset to the parents anon_vma.
		 */
		anon_vma->root = anon_vma;
	}

	return anon_vma;
93 94
}

P
Peter Zijlstra 已提交
95
static inline void anon_vma_free(struct anon_vma *anon_vma)
96
{
P
Peter Zijlstra 已提交
97
	VM_BUG_ON(atomic_read(&anon_vma->refcount));
98 99

	/*
100
	 * Synchronize against page_lock_anon_vma_read() such that
101 102 103 104 105
	 * we can safely hold the lock without the anon_vma getting
	 * freed.
	 *
	 * Relies on the full mb implied by the atomic_dec_and_test() from
	 * put_anon_vma() against the acquire barrier implied by
106
	 * down_read_trylock() from page_lock_anon_vma_read(). This orders:
107
	 *
108 109
	 * page_lock_anon_vma_read()	VS	put_anon_vma()
	 *   down_read_trylock()		  atomic_dec_and_test()
110
	 *   LOCK				  MB
111
	 *   atomic_read()			  rwsem_is_locked()
112 113 114 115
	 *
	 * LOCK should suffice since the actual taking of the lock must
	 * happen _before_ what follows.
	 */
116
	might_sleep();
117
	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
118
		anon_vma_lock_write(anon_vma);
119
		anon_vma_unlock_write(anon_vma);
120 121
	}

122 123
	kmem_cache_free(anon_vma_cachep, anon_vma);
}
L
Linus Torvalds 已提交
124

125
static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
126
{
127
	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
128 129
}

130
static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
131 132 133 134
{
	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
}

135 136 137 138 139 140 141
static void anon_vma_chain_link(struct vm_area_struct *vma,
				struct anon_vma_chain *avc,
				struct anon_vma *anon_vma)
{
	avc->vma = vma;
	avc->anon_vma = anon_vma;
	list_add(&avc->same_vma, &vma->anon_vma_chain);
142
	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
143 144
}

145
/**
146
 * __anon_vma_prepare - attach an anon_vma to a memory region
147 148 149 150 151 152
 * @vma: the memory region in question
 *
 * This makes sure the memory mapping described by 'vma' has
 * an 'anon_vma' attached to it, so that we can associate the
 * anonymous pages mapped into it with that anon_vma.
 *
153 154
 * The common case will be that we already have one, which
 * is handled inline by anon_vma_prepare(). But if
F
Figo.zhang 已提交
155
 * not we either need to find an adjacent mapping that we
156 157 158 159 160
 * can re-use the anon_vma from (very common when the only
 * reason for splitting a vma has been mprotect()), or we
 * allocate a new one.
 *
 * Anon-vma allocations are very subtle, because we may have
161
 * optimistically looked up an anon_vma in page_lock_anon_vma_read()
162 163 164 165 166 167 168 169 170 171 172
 * and that may actually touch the spinlock even in the newly
 * allocated vma (it depends on RCU to make sure that the
 * anon_vma isn't actually destroyed).
 *
 * As a result, we need to do proper anon_vma locking even
 * for the new allocation. At the same time, we do not want
 * to do any locking for the common case of already having
 * an anon_vma.
 *
 * This must be called with the mmap_sem held for reading.
 */
173
int __anon_vma_prepare(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
174
{
175 176
	struct mm_struct *mm = vma->vm_mm;
	struct anon_vma *anon_vma, *allocated;
177
	struct anon_vma_chain *avc;
L
Linus Torvalds 已提交
178 179 180

	might_sleep();

181 182 183 184 185 186 187 188 189 190 191 192
	avc = anon_vma_chain_alloc(GFP_KERNEL);
	if (!avc)
		goto out_enomem;

	anon_vma = find_mergeable_anon_vma(vma);
	allocated = NULL;
	if (!anon_vma) {
		anon_vma = anon_vma_alloc();
		if (unlikely(!anon_vma))
			goto out_enomem_free_avc;
		allocated = anon_vma;
	}
193

194 195 196 197 198 199 200 201
	anon_vma_lock_write(anon_vma);
	/* page_table_lock to protect against threads */
	spin_lock(&mm->page_table_lock);
	if (likely(!vma->anon_vma)) {
		vma->anon_vma = anon_vma;
		anon_vma_chain_link(vma, avc, anon_vma);
		/* vma reference or self-parent link for new root */
		anon_vma->degree++;
202
		allocated = NULL;
203 204 205 206
		avc = NULL;
	}
	spin_unlock(&mm->page_table_lock);
	anon_vma_unlock_write(anon_vma);
L
Linus Torvalds 已提交
207

208 209 210 211
	if (unlikely(allocated))
		put_anon_vma(allocated);
	if (unlikely(avc))
		anon_vma_chain_free(avc);
212

L
Linus Torvalds 已提交
213
	return 0;
214 215 216 217 218

 out_enomem_free_avc:
	anon_vma_chain_free(avc);
 out_enomem:
	return -ENOMEM;
L
Linus Torvalds 已提交
219 220
}

221 222 223 224 225 226 227 228 229 230 231 232 233
/*
 * This is a useful helper function for locking the anon_vma root as
 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
 * have the same vma.
 *
 * Such anon_vma's should have the same root, so you'd expect to see
 * just a single mutex_lock for the whole traversal.
 */
static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
{
	struct anon_vma *new_root = anon_vma->root;
	if (new_root != root) {
		if (WARN_ON_ONCE(root))
234
			up_write(&root->rwsem);
235
		root = new_root;
236
		down_write(&root->rwsem);
237 238 239 240 241 242 243
	}
	return root;
}

static inline void unlock_anon_vma_root(struct anon_vma *root)
{
	if (root)
244
		up_write(&root->rwsem);
245 246
}

247 248 249
/*
 * Attach the anon_vmas from src to dst.
 * Returns 0 on success, -ENOMEM on failure.
250 251 252 253 254 255 256 257
 *
 * If dst->anon_vma is NULL this function tries to find and reuse existing
 * anon_vma which has no vmas and only one child anon_vma. This prevents
 * degradation of anon_vma hierarchy to endless linear chain in case of
 * constantly forking task. On the other hand, an anon_vma with more than one
 * child isn't reused even if there was no alive vma, thus rmap walker has a
 * good chance of avoiding scanning the whole hierarchy when it searches where
 * page is mapped.
258 259
 */
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
L
Linus Torvalds 已提交
260
{
261
	struct anon_vma_chain *avc, *pavc;
262
	struct anon_vma *root = NULL;
263

264
	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
265 266
		struct anon_vma *anon_vma;

267 268 269 270 271 272 273 274
		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
		if (unlikely(!avc)) {
			unlock_anon_vma_root(root);
			root = NULL;
			avc = anon_vma_chain_alloc(GFP_KERNEL);
			if (!avc)
				goto enomem_failure;
		}
275 276 277
		anon_vma = pavc->anon_vma;
		root = lock_anon_vma_root(root, anon_vma);
		anon_vma_chain_link(dst, avc, anon_vma);
278 279 280 281 282 283 284 285 286 287 288 289

		/*
		 * Reuse existing anon_vma if its degree lower than two,
		 * that means it has no vma and only one anon_vma child.
		 *
		 * Do not chose parent anon_vma, otherwise first child
		 * will always reuse it. Root anon_vma is never reused:
		 * it has self-parent reference and at least one child.
		 */
		if (!dst->anon_vma && anon_vma != src->anon_vma &&
				anon_vma->degree < 2)
			dst->anon_vma = anon_vma;
290
	}
291 292
	if (dst->anon_vma)
		dst->anon_vma->degree++;
293
	unlock_anon_vma_root(root);
294
	return 0;
L
Linus Torvalds 已提交
295

296
 enomem_failure:
297 298 299 300 301 302 303
	/*
	 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
	 * decremented in unlink_anon_vmas().
	 * We can safely do this because callers of anon_vma_clone() don't care
	 * about dst->anon_vma if anon_vma_clone() failed.
	 */
	dst->anon_vma = NULL;
304 305
	unlink_anon_vmas(dst);
	return -ENOMEM;
L
Linus Torvalds 已提交
306 307
}

308 309 310 311 312 313
/*
 * Attach vma to its own anon_vma, as well as to the anon_vmas that
 * the corresponding VMA in the parent process is attached to.
 * Returns 0 on success, non-zero on failure.
 */
int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
L
Linus Torvalds 已提交
314
{
315 316
	struct anon_vma_chain *avc;
	struct anon_vma *anon_vma;
317
	int error;
L
Linus Torvalds 已提交
318

319 320 321 322
	/* Don't bother if the parent process has no anon_vma here. */
	if (!pvma->anon_vma)
		return 0;

323 324 325
	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
	vma->anon_vma = NULL;

326 327 328 329
	/*
	 * First, attach the new VMA to the parent VMA's anon_vmas,
	 * so rmap can find non-COWed pages in child processes.
	 */
330 331 332
	error = anon_vma_clone(vma, pvma);
	if (error)
		return error;
333

334 335 336 337
	/* An existing anon_vma has been reused, all done then. */
	if (vma->anon_vma)
		return 0;

338 339 340 341
	/* Then add our own anon_vma. */
	anon_vma = anon_vma_alloc();
	if (!anon_vma)
		goto out_error;
342
	avc = anon_vma_chain_alloc(GFP_KERNEL);
343 344
	if (!avc)
		goto out_error_free_anon_vma;
345 346 347 348 349 350

	/*
	 * The root anon_vma's spinlock is the lock actually used when we
	 * lock any of the anon_vmas in this anon_vma tree.
	 */
	anon_vma->root = pvma->anon_vma->root;
351
	anon_vma->parent = pvma->anon_vma;
352
	/*
P
Peter Zijlstra 已提交
353 354 355
	 * With refcounts, an anon_vma can stay around longer than the
	 * process it belongs to. The root anon_vma needs to be pinned until
	 * this anon_vma is freed, because the lock lives in the root.
356 357
	 */
	get_anon_vma(anon_vma->root);
358 359
	/* Mark this anon_vma as the one where our new (COWed) pages go. */
	vma->anon_vma = anon_vma;
360
	anon_vma_lock_write(anon_vma);
361
	anon_vma_chain_link(vma, avc, anon_vma);
362
	anon_vma->parent->degree++;
363
	anon_vma_unlock_write(anon_vma);
364 365 366 367

	return 0;

 out_error_free_anon_vma:
P
Peter Zijlstra 已提交
368
	put_anon_vma(anon_vma);
369
 out_error:
370
	unlink_anon_vmas(vma);
371
	return -ENOMEM;
L
Linus Torvalds 已提交
372 373
}

374 375 376
void unlink_anon_vmas(struct vm_area_struct *vma)
{
	struct anon_vma_chain *avc, *next;
377
	struct anon_vma *root = NULL;
378

379 380 381 382
	/*
	 * Unlink each anon_vma chained to the VMA.  This list is ordered
	 * from newest to oldest, ensuring the root anon_vma gets freed last.
	 */
383
	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
384 385 386
		struct anon_vma *anon_vma = avc->anon_vma;

		root = lock_anon_vma_root(root, anon_vma);
387
		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
388 389 390 391 392

		/*
		 * Leave empty anon_vmas on the list - we'll need
		 * to free them outside the lock.
		 */
393 394
		if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
			anon_vma->parent->degree--;
395
			continue;
396
		}
397 398 399 400

		list_del(&avc->same_vma);
		anon_vma_chain_free(avc);
	}
401 402
	if (vma->anon_vma)
		vma->anon_vma->degree--;
403 404 405 406 407
	unlock_anon_vma_root(root);

	/*
	 * Iterate the list once more, it now only contains empty and unlinked
	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
408
	 * needing to write-acquire the anon_vma->root->rwsem.
409 410 411 412
	 */
	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
		struct anon_vma *anon_vma = avc->anon_vma;

413
		VM_WARN_ON(anon_vma->degree);
414 415
		put_anon_vma(anon_vma);

416 417 418 419 420
		list_del(&avc->same_vma);
		anon_vma_chain_free(avc);
	}
}

421
static void anon_vma_ctor(void *data)
L
Linus Torvalds 已提交
422
{
C
Christoph Lameter 已提交
423
	struct anon_vma *anon_vma = data;
L
Linus Torvalds 已提交
424

425
	init_rwsem(&anon_vma->rwsem);
426
	atomic_set(&anon_vma->refcount, 0);
427
	anon_vma->rb_root = RB_ROOT;
L
Linus Torvalds 已提交
428 429 430 431 432
}

void __init anon_vma_init(void)
{
	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
433 434 435 436
			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
			anon_vma_ctor);
	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
			SLAB_PANIC|SLAB_ACCOUNT);
L
Linus Torvalds 已提交
437 438 439
}

/*
440 441 442 443 444 445 446 447 448
 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
 *
 * Since there is no serialization what so ever against page_remove_rmap()
 * the best this function can do is return a locked anon_vma that might
 * have been relevant to this page.
 *
 * The page might have been remapped to a different anon_vma or the anon_vma
 * returned may already be freed (and even reused).
 *
449 450 451 452 453
 * In case it was remapped to a different anon_vma, the new anon_vma will be a
 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
 * ensure that any anon_vma obtained from the page will still be valid for as
 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
 *
454 455 456 457 458 459 460
 * All users of this function must be very careful when walking the anon_vma
 * chain and verify that the page in question is indeed mapped in it
 * [ something equivalent to page_mapped_in_vma() ].
 *
 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
 * that the anon_vma pointer from page->mapping is valid if there is a
 * mapcount, we can dereference the anon_vma after observing those.
L
Linus Torvalds 已提交
461
 */
462
struct anon_vma *page_get_anon_vma(struct page *page)
L
Linus Torvalds 已提交
463
{
464
	struct anon_vma *anon_vma = NULL;
L
Linus Torvalds 已提交
465 466 467
	unsigned long anon_mapping;

	rcu_read_lock();
468
	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
H
Hugh Dickins 已提交
469
	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
L
Linus Torvalds 已提交
470 471 472 473 474
		goto out;
	if (!page_mapped(page))
		goto out;

	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
475 476 477 478
	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
		anon_vma = NULL;
		goto out;
	}
479 480 481

	/*
	 * If this page is still mapped, then its anon_vma cannot have been
482 483 484 485
	 * freed.  But if it has been unmapped, we have no security against the
	 * anon_vma structure being freed and reused (for another anon_vma:
	 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
	 * above cannot corrupt).
486
	 */
487
	if (!page_mapped(page)) {
488
		rcu_read_unlock();
489
		put_anon_vma(anon_vma);
490
		return NULL;
491
	}
L
Linus Torvalds 已提交
492 493
out:
	rcu_read_unlock();
494 495 496 497

	return anon_vma;
}

498 499 500 501 502 503 504
/*
 * Similar to page_get_anon_vma() except it locks the anon_vma.
 *
 * Its a little more complex as it tries to keep the fast path to a single
 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
 * reference like with page_get_anon_vma() and then block on the mutex.
 */
505
struct anon_vma *page_lock_anon_vma_read(struct page *page)
506
{
507
	struct anon_vma *anon_vma = NULL;
508
	struct anon_vma *root_anon_vma;
509
	unsigned long anon_mapping;
510

511
	rcu_read_lock();
512
	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
513 514 515 516 517 518
	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
		goto out;
	if (!page_mapped(page))
		goto out;

	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
519
	root_anon_vma = READ_ONCE(anon_vma->root);
520
	if (down_read_trylock(&root_anon_vma->rwsem)) {
521
		/*
522 523
		 * If the page is still mapped, then this anon_vma is still
		 * its anon_vma, and holding the mutex ensures that it will
524
		 * not go away, see anon_vma_free().
525
		 */
526
		if (!page_mapped(page)) {
527
			up_read(&root_anon_vma->rwsem);
528 529 530 531
			anon_vma = NULL;
		}
		goto out;
	}
532

533 534 535 536 537 538 539
	/* trylock failed, we got to sleep */
	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
		anon_vma = NULL;
		goto out;
	}

	if (!page_mapped(page)) {
540
		rcu_read_unlock();
541
		put_anon_vma(anon_vma);
542
		return NULL;
543 544 545 546
	}

	/* we pinned the anon_vma, its safe to sleep */
	rcu_read_unlock();
547
	anon_vma_lock_read(anon_vma);
548 549 550 551 552

	if (atomic_dec_and_test(&anon_vma->refcount)) {
		/*
		 * Oops, we held the last refcount, release the lock
		 * and bail -- can't simply use put_anon_vma() because
553
		 * we'll deadlock on the anon_vma_lock_write() recursion.
554
		 */
555
		anon_vma_unlock_read(anon_vma);
556 557 558 559 560 561 562 563
		__put_anon_vma(anon_vma);
		anon_vma = NULL;
	}

	return anon_vma;

out:
	rcu_read_unlock();
564
	return anon_vma;
565 566
}

567
void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
568
{
569
	anon_vma_unlock_read(anon_vma);
L
Linus Torvalds 已提交
570 571
}

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
 * important if a PTE was dirty when it was unmapped that it's flushed
 * before any IO is initiated on the page to prevent lost writes. Similarly,
 * it must be flushed before freeing to prevent data leakage.
 */
void try_to_unmap_flush(void)
{
	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
	int cpu;

	if (!tlb_ubc->flush_required)
		return;

	cpu = get_cpu();

589 590 591 592
	if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
		local_flush_tlb();
		trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
593
	}
594 595 596

	if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
		flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
597 598
	cpumask_clear(&tlb_ubc->cpumask);
	tlb_ubc->flush_required = false;
599
	tlb_ubc->writable = false;
600 601 602
	put_cpu();
}

603 604 605 606 607 608 609 610 611
/* Flush iff there are potentially writable TLB entries that can race with IO */
void try_to_unmap_flush_dirty(void)
{
	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;

	if (tlb_ubc->writable)
		try_to_unmap_flush();
}

612
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
613 614 615 616 617
{
	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;

	cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
	tlb_ubc->flush_required = true;
618 619 620 621 622 623 624 625

	/*
	 * If the PTE was dirty then it's best to assume it's writable. The
	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
	 * before the page is queued for IO.
	 */
	if (writable)
		tlb_ubc->writable = true;
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
}

/*
 * Returns true if the TLB flush should be deferred to the end of a batch of
 * unmap operations to reduce IPIs.
 */
static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
{
	bool should_defer = false;

	if (!(flags & TTU_BATCH_FLUSH))
		return false;

	/* If remote CPUs need to be flushed then defer batch the flush */
	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
		should_defer = true;
	put_cpu();

	return should_defer;
}
#else
647
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
648 649 650 651 652 653 654 655 656
{
}

static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
{
	return false;
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */

L
Linus Torvalds 已提交
657
/*
H
Huang Shijie 已提交
658
 * At what user virtual address is page expected in vma?
659
 * Caller should check the page is actually part of the vma.
L
Linus Torvalds 已提交
660 661 662
 */
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
663
	unsigned long address;
664
	if (PageAnon(page)) {
665 666 667 668 669 670 671
		struct anon_vma *page__anon_vma = page_anon_vma(page);
		/*
		 * Note: swapoff's unuse_vma() is more efficient with this
		 * check, and needs it to match anon_vma when KSM is active.
		 */
		if (!vma->anon_vma || !page__anon_vma ||
		    vma->anon_vma->root != page__anon_vma->root)
672
			return -EFAULT;
673 674
	} else if (page->mapping) {
		if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
L
Linus Torvalds 已提交
675 676 677
			return -EFAULT;
	} else
		return -EFAULT;
678 679 680 681
	address = __vma_address(page, vma);
	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
		return -EFAULT;
	return address;
L
Linus Torvalds 已提交
682 683
}

B
Bob Liu 已提交
684 685 686
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
{
	pgd_t *pgd;
687
	p4d_t *p4d;
B
Bob Liu 已提交
688 689
	pud_t *pud;
	pmd_t *pmd = NULL;
690
	pmd_t pmde;
B
Bob Liu 已提交
691 692 693 694 695

	pgd = pgd_offset(mm, address);
	if (!pgd_present(*pgd))
		goto out;

696 697 698 699 700
	p4d = p4d_offset(pgd, address);
	if (!p4d_present(*p4d))
		goto out;

	pud = pud_offset(p4d, address);
B
Bob Liu 已提交
701 702 703 704
	if (!pud_present(*pud))
		goto out;

	pmd = pmd_offset(pud, address);
705
	/*
706
	 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
707 708 709
	 * without holding anon_vma lock for write.  So when looking for a
	 * genuine pmde (in which to find pte), test present and !THP together.
	 */
710 711
	pmde = *pmd;
	barrier();
712
	if (!pmd_present(pmde) || pmd_trans_huge(pmde))
B
Bob Liu 已提交
713 714 715 716 717
		pmd = NULL;
out:
	return pmd;
}

718 719 720 721 722 723 724 725 726 727 728 729 730
struct page_referenced_arg {
	int mapcount;
	int referenced;
	unsigned long vm_flags;
	struct mem_cgroup *memcg;
};
/*
 * arg: page_referenced_arg will be passed
 */
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
			unsigned long address, void *arg)
{
	struct page_referenced_arg *pra = arg;
731 732 733 734 735
	struct page_vma_mapped_walk pvmw = {
		.page = page,
		.vma = vma,
		.address = address,
	};
736 737
	int referenced = 0;

738 739
	while (page_vma_mapped_walk(&pvmw)) {
		address = pvmw.address;
740

741 742 743 744 745
		if (vma->vm_flags & VM_LOCKED) {
			page_vma_mapped_walk_done(&pvmw);
			pra->vm_flags |= VM_LOCKED;
			return SWAP_FAIL; /* To break the loop */
		}
746

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
		if (pvmw.pte) {
			if (ptep_clear_flush_young_notify(vma, address,
						pvmw.pte)) {
				/*
				 * Don't treat a reference through
				 * a sequentially read mapping as such.
				 * If the page has been used in another mapping,
				 * we will catch it; if this other mapping is
				 * already gone, the unmap path will have set
				 * PG_referenced or activated the page.
				 */
				if (likely(!(vma->vm_flags & VM_SEQ_READ)))
					referenced++;
			}
		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
			if (pmdp_clear_flush_young_notify(vma, address,
						pvmw.pmd))
764
				referenced++;
765 766 767
		} else {
			/* unexpected pmd-mapped page? */
			WARN_ON_ONCE(1);
768
		}
769 770

		pra->mapcount--;
771 772
	}

773 774 775 776 777
	if (referenced)
		clear_page_idle(page);
	if (test_and_clear_page_young(page))
		referenced++;

778 779 780
	if (referenced) {
		pra->referenced++;
		pra->vm_flags |= vma->vm_flags;
L
Linus Torvalds 已提交
781
	}
782

783 784 785 786
	if (!pra->mapcount)
		return SWAP_SUCCESS; /* To break the loop */

	return SWAP_AGAIN;
L
Linus Torvalds 已提交
787 788
}

789
static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
L
Linus Torvalds 已提交
790
{
791 792
	struct page_referenced_arg *pra = arg;
	struct mem_cgroup *memcg = pra->memcg;
L
Linus Torvalds 已提交
793

794 795
	if (!mm_match_cgroup(vma->vm_mm, memcg))
		return true;
L
Linus Torvalds 已提交
796

797
	return false;
L
Linus Torvalds 已提交
798 799 800 801 802 803
}

/**
 * page_referenced - test if the page was referenced
 * @page: the page to test
 * @is_locked: caller holds lock on the page
804
 * @memcg: target memory cgroup
805
 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
L
Linus Torvalds 已提交
806 807 808 809
 *
 * Quick test_and_clear_referenced for all mappings to a page,
 * returns the number of ptes which referenced the page.
 */
810 811
int page_referenced(struct page *page,
		    int is_locked,
812
		    struct mem_cgroup *memcg,
813
		    unsigned long *vm_flags)
L
Linus Torvalds 已提交
814
{
H
Hugh Dickins 已提交
815
	int we_locked = 0;
816
	struct page_referenced_arg pra = {
817
		.mapcount = total_mapcount(page),
818 819 820 821 822 823 824
		.memcg = memcg,
	};
	struct rmap_walk_control rwc = {
		.rmap_one = page_referenced_one,
		.arg = (void *)&pra,
		.anon_lock = page_lock_anon_vma_read,
	};
L
Linus Torvalds 已提交
825

826
	*vm_flags = 0;
827 828 829 830 831 832 833 834 835 836
	if (!page_mapped(page))
		return 0;

	if (!page_rmapping(page))
		return 0;

	if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
		we_locked = trylock_page(page);
		if (!we_locked)
			return 1;
L
Linus Torvalds 已提交
837
	}
838 839 840 841 842 843 844 845 846 847

	/*
	 * If we are reclaiming on behalf of a cgroup, skip
	 * counting on behalf of references from different
	 * cgroups
	 */
	if (memcg) {
		rwc.invalid_vma = invalid_page_referenced_vma;
	}

848
	rmap_walk(page, &rwc);
849 850 851 852 853 854
	*vm_flags = pra.vm_flags;

	if (we_locked)
		unlock_page(page);

	return pra.referenced;
L
Linus Torvalds 已提交
855 856
}

H
Hugh Dickins 已提交
857
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
858
			    unsigned long address, void *arg)
859
{
860 861 862 863 864 865
	struct page_vma_mapped_walk pvmw = {
		.page = page,
		.vma = vma,
		.address = address,
		.flags = PVMW_SYNC,
	};
866
	int *cleaned = arg;
867

868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	while (page_vma_mapped_walk(&pvmw)) {
		int ret = 0;
		address = pvmw.address;
		if (pvmw.pte) {
			pte_t entry;
			pte_t *pte = pvmw.pte;

			if (!pte_dirty(*pte) && !pte_write(*pte))
				continue;

			flush_cache_page(vma, address, pte_pfn(*pte));
			entry = ptep_clear_flush(vma, address, pte);
			entry = pte_wrprotect(entry);
			entry = pte_mkclean(entry);
			set_pte_at(vma->vm_mm, address, pte, entry);
			ret = 1;
		} else {
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
			pmd_t *pmd = pvmw.pmd;
			pmd_t entry;

			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
				continue;

			flush_cache_page(vma, address, page_to_pfn(page));
			entry = pmdp_huge_clear_flush(vma, address, pmd);
			entry = pmd_wrprotect(entry);
			entry = pmd_mkclean(entry);
			set_pmd_at(vma->vm_mm, address, pmd, entry);
			ret = 1;
#else
			/* unexpected pmd-mapped page? */
			WARN_ON_ONCE(1);
#endif
		}
903

904 905 906 907
		if (ret) {
			mmu_notifier_invalidate_page(vma->vm_mm, address);
			(*cleaned)++;
		}
908
	}
909

910
	return SWAP_AGAIN;
911 912
}

913
static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
914
{
915
	if (vma->vm_flags & VM_SHARED)
F
Fengguang Wu 已提交
916
		return false;
917

F
Fengguang Wu 已提交
918
	return true;
919 920 921 922
}

int page_mkclean(struct page *page)
{
923 924 925 926 927 928 929
	int cleaned = 0;
	struct address_space *mapping;
	struct rmap_walk_control rwc = {
		.arg = (void *)&cleaned,
		.rmap_one = page_mkclean_one,
		.invalid_vma = invalid_mkclean_vma,
	};
930 931 932

	BUG_ON(!PageLocked(page));

933 934 935 936 937 938 939 940
	if (!page_mapped(page))
		return 0;

	mapping = page_mapping(page);
	if (!mapping)
		return 0;

	rmap_walk(page, &rwc);
941

942
	return cleaned;
943
}
J
Jaya Kumar 已提交
944
EXPORT_SYMBOL_GPL(page_mkclean);
945

946 947 948 949 950 951 952 953 954 955
/**
 * page_move_anon_rmap - move a page to our anon_vma
 * @page:	the page to move to our anon_vma
 * @vma:	the vma the page belongs to
 *
 * When a page belongs exclusively to one process after a COW event,
 * that page can be moved into the anon_vma that belongs to just that
 * process, so the rmap code will not search the parent or sibling
 * processes.
 */
956
void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
957 958 959
{
	struct anon_vma *anon_vma = vma->anon_vma;

960 961
	page = compound_head(page);

962
	VM_BUG_ON_PAGE(!PageLocked(page), page);
963
	VM_BUG_ON_VMA(!anon_vma, vma);
964 965

	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
966 967 968 969 970 971
	/*
	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
	 * simultaneously, so a concurrent reader (eg page_referenced()'s
	 * PageAnon()) will not see one without the other.
	 */
	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
972 973
}

N
Nick Piggin 已提交
974
/**
A
Andi Kleen 已提交
975 976 977 978
 * __page_set_anon_rmap - set up new anonymous rmap
 * @page:	Page to add to rmap	
 * @vma:	VM area to add page to.
 * @address:	User virtual address of the mapping	
979
 * @exclusive:	the page is exclusively owned by the current process
N
Nick Piggin 已提交
980 981
 */
static void __page_set_anon_rmap(struct page *page,
982
	struct vm_area_struct *vma, unsigned long address, int exclusive)
N
Nick Piggin 已提交
983
{
984
	struct anon_vma *anon_vma = vma->anon_vma;
985

986
	BUG_ON(!anon_vma);
987

A
Andi Kleen 已提交
988 989 990
	if (PageAnon(page))
		return;

991
	/*
992 993 994
	 * If the page isn't exclusively mapped into this vma,
	 * we must use the _oldest_ possible anon_vma for the
	 * page mapping!
995
	 */
A
Andi Kleen 已提交
996
	if (!exclusive)
997
		anon_vma = anon_vma->root;
N
Nick Piggin 已提交
998 999 1000 1001 1002 1003

	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
	page->mapping = (struct address_space *) anon_vma;
	page->index = linear_page_index(vma, address);
}

N
Nick Piggin 已提交
1004
/**
R
Randy Dunlap 已提交
1005
 * __page_check_anon_rmap - sanity check anonymous rmap addition
N
Nick Piggin 已提交
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
 * @page:	the page to add the mapping to
 * @vma:	the vm area in which the mapping is added
 * @address:	the user virtual address mapped
 */
static void __page_check_anon_rmap(struct page *page,
	struct vm_area_struct *vma, unsigned long address)
{
#ifdef CONFIG_DEBUG_VM
	/*
	 * The page's anon-rmap details (mapping and index) are guaranteed to
	 * be set up correctly at this point.
	 *
	 * We have exclusion against page_add_anon_rmap because the caller
	 * always holds the page locked, except if called from page_dup_rmap,
	 * in which case the page is already known to be setup.
	 *
	 * We have exclusion against page_add_new_anon_rmap because those pages
	 * are initially only visible via the pagetables, and the pte is locked
	 * over the call to page_add_new_anon_rmap.
	 */
A
Andrea Arcangeli 已提交
1026
	BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1027
	BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
N
Nick Piggin 已提交
1028 1029 1030
#endif
}

L
Linus Torvalds 已提交
1031 1032 1033 1034 1035
/**
 * page_add_anon_rmap - add pte mapping to an anonymous page
 * @page:	the page to add the mapping to
 * @vma:	the vm area in which the mapping is added
 * @address:	the user virtual address mapped
1036
 * @compound:	charge the page as compound or small page
L
Linus Torvalds 已提交
1037
 *
H
Hugh Dickins 已提交
1038
 * The caller needs to hold the pte lock, and the page must be locked in
1039 1040 1041
 * the anon_vma case: to serialize mapping,index checking after setting,
 * and to ensure that PageAnon is not being upgraded racily to PageKsm
 * (but PageKsm is never downgraded to PageAnon).
L
Linus Torvalds 已提交
1042 1043
 */
void page_add_anon_rmap(struct page *page,
1044
	struct vm_area_struct *vma, unsigned long address, bool compound)
1045
{
1046
	do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1047 1048 1049 1050 1051 1052 1053 1054
}

/*
 * Special version of the above for do_swap_page, which often runs
 * into pages that are exclusively owned by the current process.
 * Everybody else should continue to use page_add_anon_rmap above.
 */
void do_page_add_anon_rmap(struct page *page,
1055
	struct vm_area_struct *vma, unsigned long address, int flags)
L
Linus Torvalds 已提交
1056
{
1057 1058 1059
	bool compound = flags & RMAP_COMPOUND;
	bool first;

1060 1061
	if (compound) {
		atomic_t *mapcount;
1062
		VM_BUG_ON_PAGE(!PageLocked(page), page);
1063 1064 1065
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
		mapcount = compound_mapcount_ptr(page);
		first = atomic_inc_and_test(mapcount);
1066 1067 1068 1069
	} else {
		first = atomic_inc_and_test(&page->_mapcount);
	}

1070
	if (first) {
1071
		int nr = compound ? hpage_nr_pages(page) : 1;
1072 1073 1074 1075 1076 1077
		/*
		 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
		 * these counters are not modified in interrupt context, and
		 * pte lock(a spinlock) is held, which implies preemption
		 * disabled.
		 */
1078
		if (compound)
1079
			__inc_node_page_state(page, NR_ANON_THPS);
1080
		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1081
	}
H
Hugh Dickins 已提交
1082 1083 1084
	if (unlikely(PageKsm(page)))
		return;

1085
	VM_BUG_ON_PAGE(!PageLocked(page), page);
1086

1087
	/* address might be in next vma when migration races vma_adjust */
H
Hugh Dickins 已提交
1088
	if (first)
1089 1090
		__page_set_anon_rmap(page, vma, address,
				flags & RMAP_EXCLUSIVE);
1091
	else
N
Nick Piggin 已提交
1092
		__page_check_anon_rmap(page, vma, address);
L
Linus Torvalds 已提交
1093 1094
}

R
Randy Dunlap 已提交
1095
/**
N
Nick Piggin 已提交
1096 1097 1098 1099
 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
 * @page:	the page to add the mapping to
 * @vma:	the vm area in which the mapping is added
 * @address:	the user virtual address mapped
1100
 * @compound:	charge the page as compound or small page
N
Nick Piggin 已提交
1101 1102 1103
 *
 * Same as page_add_anon_rmap but must only be called on *new* pages.
 * This means the inc-and-test can be bypassed.
N
Nick Piggin 已提交
1104
 * Page does not have to be locked.
N
Nick Piggin 已提交
1105 1106
 */
void page_add_new_anon_rmap(struct page *page,
1107
	struct vm_area_struct *vma, unsigned long address, bool compound)
N
Nick Piggin 已提交
1108
{
1109 1110
	int nr = compound ? hpage_nr_pages(page) : 1;

1111
	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1112
	__SetPageSwapBacked(page);
1113 1114
	if (compound) {
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1115 1116
		/* increment count (starts at -1) */
		atomic_set(compound_mapcount_ptr(page), 0);
1117
		__inc_node_page_state(page, NR_ANON_THPS);
1118 1119 1120 1121 1122
	} else {
		/* Anon THP always mapped first with PMD */
		VM_BUG_ON_PAGE(PageTransCompound(page), page);
		/* increment count (starts at -1) */
		atomic_set(&page->_mapcount, 0);
1123
	}
1124
	__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1125
	__page_set_anon_rmap(page, vma, address, 1);
N
Nick Piggin 已提交
1126 1127
}

L
Linus Torvalds 已提交
1128 1129 1130 1131
/**
 * page_add_file_rmap - add pte mapping to a file page
 * @page: the page to add the mapping to
 *
1132
 * The caller needs to hold the pte lock.
L
Linus Torvalds 已提交
1133
 */
K
Kirill A. Shutemov 已提交
1134
void page_add_file_rmap(struct page *page, bool compound)
L
Linus Torvalds 已提交
1135
{
K
Kirill A. Shutemov 已提交
1136 1137 1138
	int i, nr = 1;

	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
J
Johannes Weiner 已提交
1139
	lock_page_memcg(page);
K
Kirill A. Shutemov 已提交
1140 1141 1142 1143 1144 1145 1146
	if (compound && PageTransHuge(page)) {
		for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
			if (atomic_inc_and_test(&page[i]._mapcount))
				nr++;
		}
		if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
			goto out;
1147
		VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1148
		__inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
K
Kirill A. Shutemov 已提交
1149
	} else {
1150 1151 1152
		if (PageTransCompound(page) && page_mapping(page)) {
			VM_WARN_ON_ONCE(!PageLocked(page));

1153 1154 1155 1156
			SetPageDoubleMap(compound_head(page));
			if (PageMlocked(page))
				clear_page_mlock(compound_head(page));
		}
K
Kirill A. Shutemov 已提交
1157 1158
		if (!atomic_inc_and_test(&page->_mapcount))
			goto out;
1159
	}
1160
	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
1161
	mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
K
Kirill A. Shutemov 已提交
1162
out:
J
Johannes Weiner 已提交
1163
	unlock_page_memcg(page);
L
Linus Torvalds 已提交
1164 1165
}

K
Kirill A. Shutemov 已提交
1166
static void page_remove_file_rmap(struct page *page, bool compound)
1167
{
K
Kirill A. Shutemov 已提交
1168 1169
	int i, nr = 1;

1170
	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
J
Johannes Weiner 已提交
1171
	lock_page_memcg(page);
1172

1173 1174 1175 1176
	/* Hugepages are not counted in NR_FILE_MAPPED for now. */
	if (unlikely(PageHuge(page))) {
		/* hugetlb pages are always mapped with pmds */
		atomic_dec(compound_mapcount_ptr(page));
1177
		goto out;
1178
	}
1179

1180
	/* page still mapped by someone else? */
K
Kirill A. Shutemov 已提交
1181 1182 1183 1184 1185 1186 1187
	if (compound && PageTransHuge(page)) {
		for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
			if (atomic_add_negative(-1, &page[i]._mapcount))
				nr++;
		}
		if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
			goto out;
1188
		VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1189
		__dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
K
Kirill A. Shutemov 已提交
1190 1191 1192 1193
	} else {
		if (!atomic_add_negative(-1, &page->_mapcount))
			goto out;
	}
1194 1195

	/*
1196
	 * We use the irq-unsafe __{inc|mod}_zone_page_state because
1197 1198 1199
	 * these counters are not modified in interrupt context, and
	 * pte lock(a spinlock) is held, which implies preemption disabled.
	 */
1200
	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
1201
	mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
1202 1203 1204 1205

	if (unlikely(PageMlocked(page)))
		clear_page_mlock(page);
out:
J
Johannes Weiner 已提交
1206
	unlock_page_memcg(page);
1207 1208
}

1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
static void page_remove_anon_compound_rmap(struct page *page)
{
	int i, nr;

	if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
		return;

	/* Hugepages are not counted in NR_ANON_PAGES for now. */
	if (unlikely(PageHuge(page)))
		return;

	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
		return;

1223
	__dec_node_page_state(page, NR_ANON_THPS);
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237

	if (TestClearPageDoubleMap(page)) {
		/*
		 * Subpages can be mapped with PTEs too. Check how many of
		 * themi are still mapped.
		 */
		for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
			if (atomic_add_negative(-1, &page[i]._mapcount))
				nr++;
		}
	} else {
		nr = HPAGE_PMD_NR;
	}

1238 1239 1240
	if (unlikely(PageMlocked(page)))
		clear_page_mlock(page);

1241
	if (nr) {
1242
		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
1243 1244
		deferred_split_huge_page(page);
	}
1245 1246
}

L
Linus Torvalds 已提交
1247 1248
/**
 * page_remove_rmap - take down pte mapping from a page
1249 1250
 * @page:	page to remove mapping from
 * @compound:	uncharge the page as compound or small page
L
Linus Torvalds 已提交
1251
 *
1252
 * The caller needs to hold the pte lock.
L
Linus Torvalds 已提交
1253
 */
1254
void page_remove_rmap(struct page *page, bool compound)
L
Linus Torvalds 已提交
1255
{
K
Kirill A. Shutemov 已提交
1256 1257
	if (!PageAnon(page))
		return page_remove_file_rmap(page, compound);
1258

1259 1260 1261
	if (compound)
		return page_remove_anon_compound_rmap(page);

1262 1263
	/* page still mapped by someone else? */
	if (!atomic_add_negative(-1, &page->_mapcount))
1264 1265
		return;

1266
	/*
1267 1268 1269
	 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
	 * these counters are not modified in interrupt context, and
	 * pte lock(a spinlock) is held, which implies preemption disabled.
1270
	 */
1271
	__dec_node_page_state(page, NR_ANON_MAPPED);
1272

1273 1274
	if (unlikely(PageMlocked(page)))
		clear_page_mlock(page);
1275

1276 1277 1278
	if (PageTransCompound(page))
		deferred_split_huge_page(compound_head(page));

1279 1280 1281 1282 1283 1284 1285 1286 1287
	/*
	 * It would be tidy to reset the PageAnon mapping here,
	 * but that might overwrite a racing page_add_anon_rmap
	 * which increments mapcount after us but sets mapping
	 * before us: so leave the reset to free_hot_cold_page,
	 * and remember that it's only reliable while mapped.
	 * Leaving it set also helps swapoff to reinstate ptes
	 * faster for those pages still in swapcache.
	 */
L
Linus Torvalds 已提交
1288 1289 1290
}

/*
1291
 * @arg: enum ttu_flags will be passed to this argument
L
Linus Torvalds 已提交
1292
 */
1293
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1294
		     unsigned long address, void *arg)
L
Linus Torvalds 已提交
1295 1296
{
	struct mm_struct *mm = vma->vm_mm;
1297 1298 1299 1300 1301
	struct page_vma_mapped_walk pvmw = {
		.page = page,
		.vma = vma,
		.address = address,
	};
L
Linus Torvalds 已提交
1302
	pte_t pteval;
1303
	struct page *subpage;
L
Linus Torvalds 已提交
1304
	int ret = SWAP_AGAIN;
S
Shaohua Li 已提交
1305
	enum ttu_flags flags = (enum ttu_flags)arg;
L
Linus Torvalds 已提交
1306

1307 1308
	/* munlock has nothing to gain from examining un-locked vmas */
	if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1309
		return SWAP_AGAIN;
1310

1311 1312 1313 1314 1315
	if (flags & TTU_SPLIT_HUGE_PMD) {
		split_huge_pmd_address(vma, address,
				flags & TTU_MIGRATION, page);
	}

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
	while (page_vma_mapped_walk(&pvmw)) {
		/*
		 * If the page is mlock()d, we cannot swap it out.
		 * If it's recently referenced (perhaps page_referenced
		 * skipped over this mm) then we should reactivate it.
		 */
		if (!(flags & TTU_IGNORE_MLOCK)) {
			if (vma->vm_flags & VM_LOCKED) {
				/* PTE-mapped THP are never mlocked */
				if (!PageTransCompound(page)) {
					/*
					 * Holding pte lock, we do *not* need
					 * mmap_sem here
					 */
					mlock_vma_page(page);
				}
M
Minchan Kim 已提交
1332
				ret = SWAP_FAIL;
1333 1334
				page_vma_mapped_walk_done(&pvmw);
				break;
1335
			}
1336 1337
			if (flags & TTU_MUNLOCK)
				continue;
1338
		}
1339

1340 1341 1342 1343 1344 1345 1346
		/* Unexpected PMD-mapped THP? */
		VM_BUG_ON_PAGE(!pvmw.pte, page);

		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
		address = pvmw.address;


1347 1348 1349 1350 1351 1352 1353
		if (!(flags & TTU_IGNORE_ACCESS)) {
			if (ptep_clear_flush_young_notify(vma, address,
						pvmw.pte)) {
				ret = SWAP_FAIL;
				page_vma_mapped_walk_done(&pvmw);
				break;
			}
N
Nick Piggin 已提交
1354
		}
L
Linus Torvalds 已提交
1355

1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
		/* Nuke the page table entry. */
		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
		if (should_defer_flush(mm, flags)) {
			/*
			 * We clear the PTE but do not flush so potentially
			 * a remote CPU could still be writing to the page.
			 * If the entry was previously clean then the
			 * architecture must guarantee that a clear->dirty
			 * transition on a cached TLB entry is written through
			 * and traps if the PTE is unmapped.
			 */
			pteval = ptep_get_and_clear(mm, address, pvmw.pte);

			set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
		} else {
			pteval = ptep_clear_flush(vma, address, pvmw.pte);
		}
1373

1374 1375 1376
		/* Move the dirty bit to the page. Now the pte is gone. */
		if (pte_dirty(pteval))
			set_page_dirty(page);
L
Linus Torvalds 已提交
1377

1378 1379
		/* Update high watermark before we lower rss */
		update_hiwater_rss(mm);
L
Linus Torvalds 已提交
1380

1381 1382 1383 1384 1385 1386 1387
		if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
			if (PageHuge(page)) {
				int nr = 1 << compound_order(page);
				hugetlb_count_sub(nr, mm);
			} else {
				dec_mm_counter(mm, mm_counter(page));
			}
1388

1389 1390 1391 1392 1393 1394 1395 1396
			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
			set_pte_at(mm, address, pvmw.pte, pteval);
		} else if (pte_unused(pteval)) {
			/*
			 * The guest indicated that the page content is of no
			 * interest anymore. Simply discard the pte, vmscan
			 * will take care of the rest.
			 */
1397
			dec_mm_counter(mm, mm_counter(page));
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
		} else if (IS_ENABLED(CONFIG_MIGRATION) &&
				(flags & TTU_MIGRATION)) {
			swp_entry_t entry;
			pte_t swp_pte;
			/*
			 * Store the pfn of the page in a special migration
			 * pte. do_swap_page() will wait until the migration
			 * pte is removed and then restart fault handling.
			 */
			entry = make_migration_entry(subpage,
					pte_write(pteval));
			swp_pte = swp_entry_to_pte(entry);
			if (pte_soft_dirty(pteval))
				swp_pte = pte_swp_mksoft_dirty(swp_pte);
			set_pte_at(mm, address, pvmw.pte, swp_pte);
		} else if (PageAnon(page)) {
			swp_entry_t entry = { .val = page_private(subpage) };
			pte_t swp_pte;
			/*
			 * Store the swap location in the pte.
			 * See handle_pte_fault() ...
			 */
1420 1421 1422 1423 1424 1425
			if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
				WARN_ON_ONCE(1);
				ret = SWAP_FAIL;
				page_vma_mapped_walk_done(&pvmw);
				break;
			}
1426

S
Shaohua Li 已提交
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
			/* MADV_FREE page check */
			if (!PageSwapBacked(page)) {
				if (!PageDirty(page)) {
					dec_mm_counter(mm, MM_ANONPAGES);
					goto discard;
				}

				/*
				 * If the page was redirtied, it cannot be
				 * discarded. Remap the page to page table.
				 */
				set_pte_at(mm, address, pvmw.pte, pteval);
M
Minchan Kim 已提交
1439 1440
				SetPageSwapBacked(page);
				ret = SWAP_FAIL;
S
Shaohua Li 已提交
1441 1442
				page_vma_mapped_walk_done(&pvmw);
				break;
1443
			}
M
Minchan Kim 已提交
1444

1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
			if (swap_duplicate(entry) < 0) {
				set_pte_at(mm, address, pvmw.pte, pteval);
				ret = SWAP_FAIL;
				page_vma_mapped_walk_done(&pvmw);
				break;
			}
			if (list_empty(&mm->mmlist)) {
				spin_lock(&mmlist_lock);
				if (list_empty(&mm->mmlist))
					list_add(&mm->mmlist, &init_mm.mmlist);
				spin_unlock(&mmlist_lock);
			}
M
Minchan Kim 已提交
1457
			dec_mm_counter(mm, MM_ANONPAGES);
1458 1459 1460 1461 1462 1463 1464
			inc_mm_counter(mm, MM_SWAPENTS);
			swp_pte = swp_entry_to_pte(entry);
			if (pte_soft_dirty(pteval))
				swp_pte = pte_swp_mksoft_dirty(swp_pte);
			set_pte_at(mm, address, pvmw.pte, swp_pte);
		} else
			dec_mm_counter(mm, mm_counter_file(page));
M
Minchan Kim 已提交
1465
discard:
1466 1467
		page_remove_rmap(subpage, PageHuge(page));
		put_page(page);
1468
		mmu_notifier_invalidate_page(mm, address);
1469
	}
K
KOSAKI Motohiro 已提交
1470
	return ret;
L
Linus Torvalds 已提交
1471 1472
}

1473
bool is_vma_temporary_stack(struct vm_area_struct *vma)
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
{
	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);

	if (!maybe_stack)
		return false;

	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
						VM_STACK_INCOMPLETE_SETUP)
		return true;

	return false;
}

1487 1488 1489 1490 1491
static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
{
	return is_vma_temporary_stack(vma);
}

1492
static int page_mapcount_is_zero(struct page *page)
1493
{
1494
	return !total_mapcount(page);
1495
}
1496

L
Linus Torvalds 已提交
1497 1498 1499
/**
 * try_to_unmap - try to remove all page table mappings to a page
 * @page: the page to get unmapped
1500
 * @flags: action and flags
L
Linus Torvalds 已提交
1501 1502 1503 1504 1505 1506 1507 1508
 *
 * Tries to remove all the page table entries which are mapping this
 * page, used in the pageout path.  Caller must hold the page lock.
 * Return values are:
 *
 * SWAP_SUCCESS	- we succeeded in removing all mappings
 * SWAP_FAIL	- the page is unswappable
 */
1509
int try_to_unmap(struct page *page, enum ttu_flags flags)
L
Linus Torvalds 已提交
1510
{
1511 1512
	struct rmap_walk_control rwc = {
		.rmap_one = try_to_unmap_one,
S
Shaohua Li 已提交
1513
		.arg = (void *)flags,
1514
		.done = page_mapcount_is_zero,
1515 1516
		.anon_lock = page_lock_anon_vma_read,
	};
L
Linus Torvalds 已提交
1517

1518 1519 1520 1521 1522 1523 1524 1525
	/*
	 * During exec, a temporary VMA is setup and later moved.
	 * The VMA is moved under the anon_vma lock but not the
	 * page tables leading to a race where migration cannot
	 * find the migration ptes. Rather than increasing the
	 * locking requirements of exec(), migration skips
	 * temporary VMAs until after exec() completes.
	 */
1526
	if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
1527 1528
		rwc.invalid_vma = invalid_migration_vma;

1529
	if (flags & TTU_RMAP_LOCKED)
M
Minchan Kim 已提交
1530
		rmap_walk_locked(page, &rwc);
1531
	else
M
Minchan Kim 已提交
1532
		rmap_walk(page, &rwc);
1533

M
Minchan Kim 已提交
1534
	return !page_mapcount(page) ? SWAP_SUCCESS : SWAP_FAIL;
L
Linus Torvalds 已提交
1535
}
N
Nikita Danilov 已提交
1536

1537 1538 1539 1540 1541
static int page_not_mapped(struct page *page)
{
	return !page_mapped(page);
};

N
Nick Piggin 已提交
1542 1543 1544 1545 1546 1547 1548 1549
/**
 * try_to_munlock - try to munlock a page
 * @page: the page to be munlocked
 *
 * Called from munlock code.  Checks all of the VMAs mapping the page
 * to make sure nobody else has this page mlocked. The page will be
 * returned with PG_mlocked cleared if no other vmas have it mlocked.
 */
M
Minchan Kim 已提交
1550

1551 1552
void try_to_munlock(struct page *page)
{
1553 1554
	struct rmap_walk_control rwc = {
		.rmap_one = try_to_unmap_one,
S
Shaohua Li 已提交
1555
		.arg = (void *)TTU_MUNLOCK,
1556 1557 1558 1559 1560
		.done = page_not_mapped,
		.anon_lock = page_lock_anon_vma_read,

	};

1561
	VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1562
	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
N
Nick Piggin 已提交
1563

1564
	rmap_walk(page, &rwc);
N
Nick Piggin 已提交
1565
}
1566

P
Peter Zijlstra 已提交
1567
void __put_anon_vma(struct anon_vma *anon_vma)
1568
{
P
Peter Zijlstra 已提交
1569
	struct anon_vma *root = anon_vma->root;
1570

1571
	anon_vma_free(anon_vma);
P
Peter Zijlstra 已提交
1572 1573
	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
		anon_vma_free(root);
1574 1575
}

1576 1577
static struct anon_vma *rmap_walk_anon_lock(struct page *page,
					struct rmap_walk_control *rwc)
1578 1579 1580
{
	struct anon_vma *anon_vma;

1581 1582 1583
	if (rwc->anon_lock)
		return rwc->anon_lock(page);

1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
	/*
	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
	 * because that depends on page_mapped(); but not all its usages
	 * are holding mmap_sem. Users without mmap_sem are required to
	 * take a reference count to prevent the anon_vma disappearing
	 */
	anon_vma = page_anon_vma(page);
	if (!anon_vma)
		return NULL;

	anon_vma_lock_read(anon_vma);
	return anon_vma;
}

1598
/*
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
 * rmap_walk_anon - do something to anonymous page using the object-based
 * rmap method
 * @page: the page to be handled
 * @rwc: control variable according to each walk type
 *
 * Find all the mappings of a page using the mapping pointer and the vma chains
 * contained in the anon_vma struct it points to.
 *
 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
 * where the page was found will be held for write.  So, we won't recheck
 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
 * LOCKED.
1611
 */
1612 1613
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
		bool locked)
1614 1615
{
	struct anon_vma *anon_vma;
1616
	pgoff_t pgoff_start, pgoff_end;
1617
	struct anon_vma_chain *avc;
1618 1619
	int ret = SWAP_AGAIN;

1620 1621 1622 1623 1624 1625 1626
	if (locked) {
		anon_vma = page_anon_vma(page);
		/* anon_vma disappear under us? */
		VM_BUG_ON_PAGE(!anon_vma, page);
	} else {
		anon_vma = rmap_walk_anon_lock(page, rwc);
	}
1627 1628
	if (!anon_vma)
		return ret;
1629

1630 1631 1632 1633
	pgoff_start = page_to_pgoff(page);
	pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
			pgoff_start, pgoff_end) {
1634
		struct vm_area_struct *vma = avc->vma;
1635
		unsigned long address = vma_address(page, vma);
1636

1637 1638
		cond_resched();

1639 1640 1641
		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
			continue;

1642
		ret = rwc->rmap_one(page, vma, address, rwc->arg);
1643 1644
		if (ret != SWAP_AGAIN)
			break;
1645 1646
		if (rwc->done && rwc->done(page))
			break;
1647
	}
1648 1649 1650

	if (!locked)
		anon_vma_unlock_read(anon_vma);
1651 1652 1653
	return ret;
}

1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
/*
 * rmap_walk_file - do something to file page using the object-based rmap method
 * @page: the page to be handled
 * @rwc: control variable according to each walk type
 *
 * Find all the mappings of a page using the mapping pointer and the vma chains
 * contained in the address_space struct it points to.
 *
 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
 * where the page was found will be held for write.  So, we won't recheck
 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
 * LOCKED.
 */
1667 1668
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
		bool locked)
1669
{
1670
	struct address_space *mapping = page_mapping(page);
1671
	pgoff_t pgoff_start, pgoff_end;
1672 1673 1674
	struct vm_area_struct *vma;
	int ret = SWAP_AGAIN;

1675 1676 1677 1678
	/*
	 * The page lock not only makes sure that page->mapping cannot
	 * suddenly be NULLified by truncation, it makes sure that the
	 * structure at mapping cannot be freed and reused yet,
1679
	 * so we can safely take mapping->i_mmap_rwsem.
1680
	 */
1681
	VM_BUG_ON_PAGE(!PageLocked(page), page);
1682

1683 1684
	if (!mapping)
		return ret;
D
Davidlohr Bueso 已提交
1685

1686 1687
	pgoff_start = page_to_pgoff(page);
	pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1688 1689
	if (!locked)
		i_mmap_lock_read(mapping);
1690 1691
	vma_interval_tree_foreach(vma, &mapping->i_mmap,
			pgoff_start, pgoff_end) {
1692
		unsigned long address = vma_address(page, vma);
1693

1694 1695
		cond_resched();

1696 1697 1698
		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
			continue;

1699
		ret = rwc->rmap_one(page, vma, address, rwc->arg);
1700
		if (ret != SWAP_AGAIN)
1701 1702 1703
			goto done;
		if (rwc->done && rwc->done(page))
			goto done;
1704
	}
1705 1706

done:
1707 1708
	if (!locked)
		i_mmap_unlock_read(mapping);
1709 1710 1711
	return ret;
}

1712
int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1713 1714
{
	if (unlikely(PageKsm(page)))
1715
		return rmap_walk_ksm(page, rwc);
1716
	else if (PageAnon(page))
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
		return rmap_walk_anon(page, rwc, false);
	else
		return rmap_walk_file(page, rwc, false);
}

/* Like rmap_walk, but caller holds relevant rmap lock */
int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
{
	/* no ksm support for now */
	VM_BUG_ON_PAGE(PageKsm(page), page);
	if (PageAnon(page))
		return rmap_walk_anon(page, rwc, true);
1729
	else
1730
		return rmap_walk_file(page, rwc, true);
1731
}
1732

N
Naoya Horiguchi 已提交
1733
#ifdef CONFIG_HUGETLB_PAGE
1734 1735 1736 1737 1738 1739 1740 1741 1742
/*
 * The following three functions are for anonymous (private mapped) hugepages.
 * Unlike common anonymous pages, anonymous hugepages have no accounting code
 * and no lru code, because we handle hugepages differently from common pages.
 */
static void __hugepage_set_anon_rmap(struct page *page,
	struct vm_area_struct *vma, unsigned long address, int exclusive)
{
	struct anon_vma *anon_vma = vma->anon_vma;
1743

1744
	BUG_ON(!anon_vma);
1745 1746 1747 1748 1749 1750

	if (PageAnon(page))
		return;
	if (!exclusive)
		anon_vma = anon_vma->root;

1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
	page->mapping = (struct address_space *) anon_vma;
	page->index = linear_page_index(vma, address);
}

void hugepage_add_anon_rmap(struct page *page,
			    struct vm_area_struct *vma, unsigned long address)
{
	struct anon_vma *anon_vma = vma->anon_vma;
	int first;
1761 1762

	BUG_ON(!PageLocked(page));
1763
	BUG_ON(!anon_vma);
1764
	/* address might be in next vma when migration races vma_adjust */
1765
	first = atomic_inc_and_test(compound_mapcount_ptr(page));
1766 1767 1768 1769 1770 1771 1772 1773
	if (first)
		__hugepage_set_anon_rmap(page, vma, address, 0);
}

void hugepage_add_new_anon_rmap(struct page *page,
			struct vm_area_struct *vma, unsigned long address)
{
	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1774
	atomic_set(compound_mapcount_ptr(page), 0);
1775 1776
	__hugepage_set_anon_rmap(page, vma, address, 1);
}
N
Naoya Horiguchi 已提交
1777
#endif /* CONFIG_HUGETLB_PAGE */