gup.c 51.3 KB
Newer Older
1 2 3 4 5 6
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>

#include <linux/mm.h>
7
#include <linux/memremap.h>
8 9 10 11 12
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>

13
#include <linux/sched/signal.h>
14
#include <linux/rwsem.h>
15
#include <linux/hugetlb.h>
16

17
#include <asm/mmu_context.h>
18
#include <asm/pgtable.h>
19
#include <asm/tlbflush.h>
20

21 22
#include "internal.h"

23 24 25 26 27
struct follow_page_context {
	struct dev_pagemap *pgmap;
	unsigned int page_mask;
};

28 29
static struct page *no_page_table(struct vm_area_struct *vma,
		unsigned int flags)
30
{
31 32 33 34 35 36 37 38 39 40 41 42
	/*
	 * When core dumping an enormous anonymous area that nobody
	 * has touched so far, we don't want to allocate unnecessary pages or
	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
	 * then get_dump_page() will return NULL to leave a hole in the dump.
	 * But we can only make this optimization where a hole would surely
	 * be zero-filled if handle_mm_fault() actually did handle it.
	 */
	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
		return ERR_PTR(-EFAULT);
	return NULL;
}
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
		pte_t *pte, unsigned int flags)
{
	/* No page to get reference */
	if (flags & FOLL_GET)
		return -EFAULT;

	if (flags & FOLL_TOUCH) {
		pte_t entry = *pte;

		if (flags & FOLL_WRITE)
			entry = pte_mkdirty(entry);
		entry = pte_mkyoung(entry);

		if (!pte_same(*pte, entry)) {
			set_pte_at(vma->vm_mm, address, pte, entry);
			update_mmu_cache(vma, address, pte);
		}
	}

	/* Proper page table entry exists, but no corresponding struct page */
	return -EEXIST;
}

68 69 70 71 72 73
/*
 * FOLL_FORCE can write to even unwritable pte's, but only
 * after we've gone through a COW cycle and they are dirty.
 */
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
74
	return pte_write(pte) ||
75 76 77
		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}

78
static struct page *follow_page_pte(struct vm_area_struct *vma,
79 80
		unsigned long address, pmd_t *pmd, unsigned int flags,
		struct dev_pagemap **pgmap)
81 82 83 84 85
{
	struct mm_struct *mm = vma->vm_mm;
	struct page *page;
	spinlock_t *ptl;
	pte_t *ptep, pte;
86

87
retry:
88
	if (unlikely(pmd_bad(*pmd)))
89
		return no_page_table(vma, flags);
90 91 92 93 94 95 96 97 98 99 100 101

	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
	pte = *ptep;
	if (!pte_present(pte)) {
		swp_entry_t entry;
		/*
		 * KSM's break_ksm() relies upon recognizing a ksm page
		 * even while it is being migrated, so for that case we
		 * need migration_entry_wait().
		 */
		if (likely(!(flags & FOLL_MIGRATION)))
			goto no_page;
102
		if (pte_none(pte))
103 104 105 106 107 108
			goto no_page;
		entry = pte_to_swp_entry(pte);
		if (!is_migration_entry(entry))
			goto no_page;
		pte_unmap_unlock(ptep, ptl);
		migration_entry_wait(mm, pmd, address);
109
		goto retry;
110
	}
111
	if ((flags & FOLL_NUMA) && pte_protnone(pte))
112
		goto no_page;
113
	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
114 115 116
		pte_unmap_unlock(ptep, ptl);
		return NULL;
	}
117 118

	page = vm_normal_page(vma, address, pte);
119 120 121 122 123
	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
		/*
		 * Only return device mapping pages in the FOLL_GET case since
		 * they are only valid while holding the pgmap reference.
		 */
124 125
		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
		if (*pgmap)
126 127 128 129
			page = pte_page(pte);
		else
			goto no_page;
	} else if (unlikely(!page)) {
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
		if (flags & FOLL_DUMP) {
			/* Avoid special (like zero) pages in core dumps */
			page = ERR_PTR(-EFAULT);
			goto out;
		}

		if (is_zero_pfn(pte_pfn(pte))) {
			page = pte_page(pte);
		} else {
			int ret;

			ret = follow_pfn_pte(vma, address, ptep, flags);
			page = ERR_PTR(ret);
			goto out;
		}
145 146
	}

147 148 149 150 151 152 153 154 155 156 157 158 159
	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
		int ret;
		get_page(page);
		pte_unmap_unlock(ptep, ptl);
		lock_page(page);
		ret = split_huge_page(page);
		unlock_page(page);
		put_page(page);
		if (ret)
			return ERR_PTR(ret);
		goto retry;
	}

160
	if (flags & FOLL_GET)
161
		get_page(page);
162 163 164 165 166 167 168 169 170 171 172
	if (flags & FOLL_TOUCH) {
		if ((flags & FOLL_WRITE) &&
		    !pte_dirty(pte) && !PageDirty(page))
			set_page_dirty(page);
		/*
		 * pte_mkyoung() would be more correct here, but atomic care
		 * is needed to avoid losing the dirty bit: it is easier to use
		 * mark_page_accessed().
		 */
		mark_page_accessed(page);
	}
E
Eric B Munson 已提交
173
	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
174 175 176 177
		/* Do not mlock pte-mapped THP */
		if (PageTransCompound(page))
			goto out;

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
		/*
		 * The preliminary mapping check is mainly to avoid the
		 * pointless overhead of lock_page on the ZERO_PAGE
		 * which might bounce very badly if there is contention.
		 *
		 * If the page is already locked, we don't need to
		 * handle it now - vmscan will handle it later if and
		 * when it attempts to reclaim the page.
		 */
		if (page->mapping && trylock_page(page)) {
			lru_add_drain();  /* push cached pages to LRU */
			/*
			 * Because we lock page here, and migration is
			 * blocked by the pte's page reference, and we
			 * know the page is still mapped, we don't even
			 * need to check for file-cache page truncation.
			 */
			mlock_vma_page(page);
			unlock_page(page);
		}
	}
199
out:
200 201 202 203 204
	pte_unmap_unlock(ptep, ptl);
	return page;
no_page:
	pte_unmap_unlock(ptep, ptl);
	if (!pte_none(pte))
205 206 207 208
		return NULL;
	return no_page_table(vma, flags);
}

209 210
static struct page *follow_pmd_mask(struct vm_area_struct *vma,
				    unsigned long address, pud_t *pudp,
211 212
				    unsigned int flags,
				    struct follow_page_context *ctx)
213
{
214
	pmd_t *pmd, pmdval;
215 216 217 218
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

219
	pmd = pmd_offset(pudp, address);
220 221 222 223 224 225
	/*
	 * The READ_ONCE() will stabilize the pmdval in a register or
	 * on the stack so that it will stop changing under the code.
	 */
	pmdval = READ_ONCE(*pmd);
	if (pmd_none(pmdval))
226
		return no_page_table(vma, flags);
227
	if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
228 229 230 231
		page = follow_huge_pmd(mm, address, pmd, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
232
	}
233
	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
234
		page = follow_huge_pd(vma, address,
235
				      __hugepd(pmd_val(pmdval)), flags,
236 237 238 239 240
				      PMD_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
241
retry:
242
	if (!pmd_present(pmdval)) {
243 244 245
		if (likely(!(flags & FOLL_MIGRATION)))
			return no_page_table(vma, flags);
		VM_BUG_ON(thp_migration_supported() &&
246 247
				  !is_pmd_migration_entry(pmdval));
		if (is_pmd_migration_entry(pmdval))
248
			pmd_migration_entry_wait(mm, pmd);
249 250 251 252 253 254 255
		pmdval = READ_ONCE(*pmd);
		/*
		 * MADV_DONTNEED may convert the pmd to null because
		 * mmap_sem is held in read mode
		 */
		if (pmd_none(pmdval))
			return no_page_table(vma, flags);
256 257
		goto retry;
	}
258
	if (pmd_devmap(pmdval)) {
259
		ptl = pmd_lock(mm, pmd);
260
		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
261 262 263 264
		spin_unlock(ptl);
		if (page)
			return page;
	}
265
	if (likely(!pmd_trans_huge(pmdval)))
266
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
267

268
	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
269 270
		return no_page_table(vma, flags);

271
retry_locked:
272
	ptl = pmd_lock(mm, pmd);
273 274 275 276
	if (unlikely(pmd_none(*pmd))) {
		spin_unlock(ptl);
		return no_page_table(vma, flags);
	}
277 278 279 280 281 282 283
	if (unlikely(!pmd_present(*pmd))) {
		spin_unlock(ptl);
		if (likely(!(flags & FOLL_MIGRATION)))
			return no_page_table(vma, flags);
		pmd_migration_entry_wait(mm, pmd);
		goto retry_locked;
	}
284 285
	if (unlikely(!pmd_trans_huge(*pmd))) {
		spin_unlock(ptl);
286
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
287 288 289 290 291 292 293
	}
	if (flags & FOLL_SPLIT) {
		int ret;
		page = pmd_page(*pmd);
		if (is_huge_zero_page(page)) {
			spin_unlock(ptl);
			ret = 0;
294
			split_huge_pmd(vma, pmd, address);
295 296
			if (pmd_trans_unstable(pmd))
				ret = -EBUSY;
297 298
		} else {
			get_page(page);
299
			spin_unlock(ptl);
300 301 302 303
			lock_page(page);
			ret = split_huge_page(page);
			unlock_page(page);
			put_page(page);
304 305
			if (pmd_none(*pmd))
				return no_page_table(vma, flags);
306 307 308
		}

		return ret ? ERR_PTR(ret) :
309
			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
310
	}
311 312
	page = follow_trans_huge_pmd(vma, address, pmd, flags);
	spin_unlock(ptl);
313
	ctx->page_mask = HPAGE_PMD_NR - 1;
314
	return page;
315 316
}

317 318
static struct page *follow_pud_mask(struct vm_area_struct *vma,
				    unsigned long address, p4d_t *p4dp,
319 320
				    unsigned int flags,
				    struct follow_page_context *ctx)
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
{
	pud_t *pud;
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

	pud = pud_offset(p4dp, address);
	if (pud_none(*pud))
		return no_page_table(vma, flags);
	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
		page = follow_huge_pud(mm, address, pud, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
336 337 338 339 340 341 342 343
	if (is_hugepd(__hugepd(pud_val(*pud)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(pud_val(*pud)), flags,
				      PUD_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
344 345
	if (pud_devmap(*pud)) {
		ptl = pud_lock(mm, pud);
346
		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
347 348 349 350 351 352 353
		spin_unlock(ptl);
		if (page)
			return page;
	}
	if (unlikely(pud_bad(*pud)))
		return no_page_table(vma, flags);

354
	return follow_pmd_mask(vma, address, pud, flags, ctx);
355 356 357 358
}

static struct page *follow_p4d_mask(struct vm_area_struct *vma,
				    unsigned long address, pgd_t *pgdp,
359 360
				    unsigned int flags,
				    struct follow_page_context *ctx)
361 362
{
	p4d_t *p4d;
363
	struct page *page;
364 365 366 367 368 369 370 371

	p4d = p4d_offset(pgdp, address);
	if (p4d_none(*p4d))
		return no_page_table(vma, flags);
	BUILD_BUG_ON(p4d_huge(*p4d));
	if (unlikely(p4d_bad(*p4d)))
		return no_page_table(vma, flags);

372 373 374 375 376 377 378 379
	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(p4d_val(*p4d)), flags,
				      P4D_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
380
	return follow_pud_mask(vma, address, p4d, flags, ctx);
381 382 383 384 385 386 387
}

/**
 * follow_page_mask - look up a page descriptor from a user-virtual address
 * @vma: vm_area_struct mapping @address
 * @address: virtual address to look up
 * @flags: flags modifying lookup behaviour
388 389
 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
 *       pointer to output page_mask
390 391 392
 *
 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 *
393 394 395 396 397 398
 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
 *
 * On output, the @ctx->page_mask is set according to the size of the page.
 *
 * Return: the mapped (struct page *), %NULL if no mapping exists, or
399 400 401 402 403
 * an error pointer if there is a mapping to something not represented
 * by a page descriptor (see also vm_normal_page()).
 */
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
404
			      struct follow_page_context *ctx)
405 406 407 408 409
{
	pgd_t *pgd;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

410
	ctx->page_mask = 0;
411 412 413 414 415 416 417 418 419 420 421 422 423

	/* make this handle hugepd */
	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
	if (!IS_ERR(page)) {
		BUG_ON(flags & FOLL_GET);
		return page;
	}

	pgd = pgd_offset(mm, address);

	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return no_page_table(vma, flags);

424 425 426 427 428 429
	if (pgd_huge(*pgd)) {
		page = follow_huge_pgd(mm, address, pgd, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
430 431 432 433 434 435 436 437
	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(pgd_val(*pgd)), flags,
				      PGDIR_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
438

439 440 441 442 443 444 445 446 447 448 449 450 451
	return follow_p4d_mask(vma, address, pgd, flags, ctx);
}

struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
			 unsigned int foll_flags)
{
	struct follow_page_context ctx = { NULL };
	struct page *page;

	page = follow_page_mask(vma, address, foll_flags, &ctx);
	if (ctx.pgmap)
		put_dev_pagemap(ctx.pgmap);
	return page;
452 453
}

454 455 456 457 458
static int get_gate_page(struct mm_struct *mm, unsigned long address,
		unsigned int gup_flags, struct vm_area_struct **vma,
		struct page **page)
{
	pgd_t *pgd;
459
	p4d_t *p4d;
460 461 462 463 464 465 466 467 468 469 470 471 472
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int ret = -EFAULT;

	/* user gate pages are read-only */
	if (gup_flags & FOLL_WRITE)
		return -EFAULT;
	if (address > TASK_SIZE)
		pgd = pgd_offset_k(address);
	else
		pgd = pgd_offset_gate(mm, address);
	BUG_ON(pgd_none(*pgd));
473 474 475
	p4d = p4d_offset(pgd, address);
	BUG_ON(p4d_none(*p4d));
	pud = pud_offset(p4d, address);
476 477
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
478
	if (!pmd_present(*pmd))
479 480 481 482 483 484 485 486 487 488 489 490 491
		return -EFAULT;
	VM_BUG_ON(pmd_trans_huge(*pmd));
	pte = pte_offset_map(pmd, address);
	if (pte_none(*pte))
		goto unmap;
	*vma = get_gate_vma(mm);
	if (!page)
		goto out;
	*page = vm_normal_page(*vma, address, *pte);
	if (!*page) {
		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
			goto unmap;
		*page = pte_page(*pte);
492 493 494 495 496 497 498

		/*
		 * This should never happen (a device public page in the gate
		 * area).
		 */
		if (is_device_public_page(*page))
			goto unmap;
499 500 501 502 503 504 505 506 507
	}
	get_page(*page);
out:
	ret = 0;
unmap:
	pte_unmap(pte);
	return ret;
}

508 509 510 511 512
/*
 * mmap_sem must be held on entry.  If @nonblocking != NULL and
 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
 */
513 514 515 516
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
		unsigned long address, unsigned int *flags, int *nonblocking)
{
	unsigned int fault_flags = 0;
517
	vm_fault_t ret;
518

E
Eric B Munson 已提交
519 520 521
	/* mlock all present pages, but do not fault in new pages */
	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
		return -ENOENT;
522 523
	if (*flags & FOLL_WRITE)
		fault_flags |= FAULT_FLAG_WRITE;
524 525
	if (*flags & FOLL_REMOTE)
		fault_flags |= FAULT_FLAG_REMOTE;
526 527 528 529
	if (nonblocking)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
	if (*flags & FOLL_NOWAIT)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
530 531 532 533
	if (*flags & FOLL_TRIED) {
		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
		fault_flags |= FAULT_FLAG_TRIED;
	}
534

535
	ret = handle_mm_fault(vma, address, fault_flags);
536
	if (ret & VM_FAULT_ERROR) {
537 538 539 540
		int err = vm_fault_to_errno(ret, *flags);

		if (err)
			return err;
541 542 543 544 545 546 547 548 549 550 551
		BUG();
	}

	if (tsk) {
		if (ret & VM_FAULT_MAJOR)
			tsk->maj_flt++;
		else
			tsk->min_flt++;
	}

	if (ret & VM_FAULT_RETRY) {
552
		if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
553 554 555 556 557 558 559 560 561 562 563 564 565 566
			*nonblocking = 0;
		return -EBUSY;
	}

	/*
	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
	 * can thus safely do subsequent page lookups as if they were reads.
	 * But only do so when looping for pte_write is futile: in some cases
	 * userspace may also be wanting to write to the gotten user page,
	 * which a read fault here might prevent (a readonly page might get
	 * reCOWed by userspace write).
	 */
	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
567
		*flags |= FOLL_COW;
568 569 570
	return 0;
}

571 572 573
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
{
	vm_flags_t vm_flags = vma->vm_flags;
574 575
	int write = (gup_flags & FOLL_WRITE);
	int foreign = (gup_flags & FOLL_REMOTE);
576 577 578 579

	if (vm_flags & (VM_IO | VM_PFNMAP))
		return -EFAULT;

580 581 582
	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
		return -EFAULT;

583
	if (write) {
584 585 586 587 588 589 590 591 592 593 594 595
		if (!(vm_flags & VM_WRITE)) {
			if (!(gup_flags & FOLL_FORCE))
				return -EFAULT;
			/*
			 * We used to let the write,force case do COW in a
			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
			 * set a breakpoint in a read-only mapping of an
			 * executable, without corrupting the file (yet only
			 * when that file had been opened for writing!).
			 * Anon pages in shared mappings are surprising: now
			 * just reject it.
			 */
596
			if (!is_cow_mapping(vm_flags))
597 598 599 600 601 602 603 604 605 606 607 608
				return -EFAULT;
		}
	} else if (!(vm_flags & VM_READ)) {
		if (!(gup_flags & FOLL_FORCE))
			return -EFAULT;
		/*
		 * Is there actually any vma we can reach here which does not
		 * have VM_MAYREAD set?
		 */
		if (!(vm_flags & VM_MAYREAD))
			return -EFAULT;
	}
609 610 611 612 613
	/*
	 * gups are always data accesses, not instruction
	 * fetches, so execute=false here
	 */
	if (!arch_vma_access_permitted(vma, write, false, foreign))
614
		return -EFAULT;
615 616 617
	return 0;
}

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
/**
 * __get_user_pages() - pin user pages in memory
 * @tsk:	task_struct of target task
 * @mm:		mm_struct of target mm
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
 * @gup_flags:	flags modifying pin behaviour
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long. Or NULL, if caller
 *		only intends to ensure the pages are faulted in.
 * @vmas:	array of pointers to vmas corresponding to each page.
 *		Or NULL if the caller does not require them.
 * @nonblocking: whether waiting for disk IO or mmap_sem contention
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno. Each page returned must be released
 * with a put_page() call when it is finished with. vmas will only
 * remain valid while mmap_sem is held.
 *
638
 * Must be called with mmap_sem held.  It may be released.  See below.
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
 *
 * __get_user_pages walks a process's page tables and takes a reference to
 * each struct page that each user address corresponds to at a given
 * instant. That is, it takes the page that would be accessed if a user
 * thread accesses the given user virtual address at that instant.
 *
 * This does not guarantee that the page exists in the user mappings when
 * __get_user_pages returns, and there may even be a completely different
 * page there in some cases (eg. if mmapped pagecache has been invalidated
 * and subsequently re faulted). However it does guarantee that the page
 * won't be freed completely. And mostly callers simply care that the page
 * contains data that was valid *at some point in time*. Typically, an IO
 * or similar operation cannot guarantee anything stronger anyway because
 * locks can't be held over the syscall boundary.
 *
 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
 * appropriate) must be called after the page is finished with, and
 * before put_page is called.
 *
 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
 * or mmap_sem contention, and if waiting is needed to pin all pages,
661 662 663 664 665 666 667 668
 * *@nonblocking will be set to 0.  Further, if @gup_flags does not
 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
 * this case.
 *
 * A caller using such a combination of @nonblocking and @gup_flags
 * must therefore hold the mmap_sem for reading only, and recognize
 * when it's been released.  Otherwise, it must be held for either
 * reading or writing and will not be released.
669 670 671 672 673
 *
 * In most cases, get_user_pages or get_user_pages_fast should be used
 * instead of __get_user_pages. __get_user_pages should be used only if
 * you need some special @gup_flags.
 */
L
Lorenzo Stoakes 已提交
674
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
675 676 677 678
		unsigned long start, unsigned long nr_pages,
		unsigned int gup_flags, struct page **pages,
		struct vm_area_struct **vmas, int *nonblocking)
{
679
	long ret = 0, i = 0;
680
	struct vm_area_struct *vma = NULL;
681
	struct follow_page_context ctx = { NULL };
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696

	if (!nr_pages)
		return 0;

	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));

	/*
	 * If FOLL_FORCE is set then do not force a full fault as the hinting
	 * fault information is unrelated to the reference behaviour of a task
	 * using the address space
	 */
	if (!(gup_flags & FOLL_FORCE))
		gup_flags |= FOLL_NUMA;

	do {
697 698 699 700 701 702 703 704 705 706 707 708
		struct page *page;
		unsigned int foll_flags = gup_flags;
		unsigned int page_increm;

		/* first iteration or cross vma bound */
		if (!vma || start >= vma->vm_end) {
			vma = find_extend_vma(mm, start);
			if (!vma && in_gate_area(mm, start)) {
				ret = get_gate_page(mm, start & PAGE_MASK,
						gup_flags, &vma,
						pages ? &pages[i] : NULL);
				if (ret)
709
					goto out;
710
				ctx.page_mask = 0;
711 712
				goto next_page;
			}
713

714 715 716 717
			if (!vma || check_vma_flags(vma, gup_flags)) {
				ret = -EFAULT;
				goto out;
			}
718 719 720
			if (is_vm_hugetlb_page(vma)) {
				i = follow_hugetlb_page(mm, vma, pages, vmas,
						&start, &nr_pages, i,
721
						gup_flags, nonblocking);
722
				continue;
723
			}
724 725 726 727 728 729
		}
retry:
		/*
		 * If we have a pending SIGKILL, don't keep faulting pages and
		 * potentially allocating memory.
		 */
730 731 732 733
		if (unlikely(fatal_signal_pending(current))) {
			ret = -ERESTARTSYS;
			goto out;
		}
734
		cond_resched();
735 736

		page = follow_page_mask(vma, start, foll_flags, &ctx);
737 738 739 740 741 742
		if (!page) {
			ret = faultin_page(tsk, vma, start, &foll_flags,
					nonblocking);
			switch (ret) {
			case 0:
				goto retry;
743 744 745
			case -EBUSY:
				ret = 0;
				/* FALLTHRU */
746 747 748
			case -EFAULT:
			case -ENOMEM:
			case -EHWPOISON:
749
				goto out;
750 751
			case -ENOENT:
				goto next_page;
752
			}
753
			BUG();
754 755 756 757 758 759 760
		} else if (PTR_ERR(page) == -EEXIST) {
			/*
			 * Proper page table entry exists, but no corresponding
			 * struct page.
			 */
			goto next_page;
		} else if (IS_ERR(page)) {
761 762
			ret = PTR_ERR(page);
			goto out;
763
		}
764 765 766 767
		if (pages) {
			pages[i] = page;
			flush_anon_page(vma, page, start);
			flush_dcache_page(page);
768
			ctx.page_mask = 0;
769 770
		}
next_page:
771 772
		if (vmas) {
			vmas[i] = vma;
773
			ctx.page_mask = 0;
774
		}
775
		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
776 777 778 779 780
		if (page_increm > nr_pages)
			page_increm = nr_pages;
		i += page_increm;
		start += page_increm * PAGE_SIZE;
		nr_pages -= page_increm;
781
	} while (nr_pages);
782 783 784 785
out:
	if (ctx.pgmap)
		put_dev_pagemap(ctx.pgmap);
	return i ? i : ret;
786 787
}

788 789
static bool vma_permits_fault(struct vm_area_struct *vma,
			      unsigned int fault_flags)
790
{
791 792
	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
793
	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
794 795 796 797

	if (!(vm_flags & vma->vm_flags))
		return false;

798 799
	/*
	 * The architecture might have a hardware protection
800
	 * mechanism other than read/write that can deny access.
801 802 803
	 *
	 * gup always represents data access, not instruction
	 * fetches, so execute=false here:
804
	 */
805
	if (!arch_vma_access_permitted(vma, write, false, foreign))
806 807
		return false;

808 809 810
	return true;
}

811 812 813 814 815 816 817
/*
 * fixup_user_fault() - manually resolve a user page fault
 * @tsk:	the task_struct to use for page fault accounting, or
 *		NULL if faults are not to be recorded.
 * @mm:		mm_struct of target mm
 * @address:	user address
 * @fault_flags:flags to pass down to handle_mm_fault()
818 819
 * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
 *		does not allow retry
820 821 822 823 824 825 826 827 828 829 830
 *
 * This is meant to be called in the specific scenario where for locking reasons
 * we try to access user memory in atomic context (within a pagefault_disable()
 * section), this returns -EFAULT, and we want to resolve the user fault before
 * trying again.
 *
 * Typically this is meant to be used by the futex code.
 *
 * The main difference with get_user_pages() is that this function will
 * unconditionally call handle_mm_fault() which will in turn perform all the
 * necessary SW fixup of the dirty and young bits in the PTE, while
831
 * get_user_pages() only guarantees to update these in the struct page.
832 833 834 835 836 837
 *
 * This is important for some architectures where those bits also gate the
 * access permission to the page because they are maintained in software.  On
 * such architectures, gup() will not be enough to make a subsequent access
 * succeed.
 *
838 839
 * This function will not return with an unlocked mmap_sem. So it has not the
 * same semantics wrt the @mm->mmap_sem as does filemap_fault().
840 841
 */
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
842 843
		     unsigned long address, unsigned int fault_flags,
		     bool *unlocked)
844 845
{
	struct vm_area_struct *vma;
846
	vm_fault_t ret, major = 0;
847 848 849

	if (unlocked)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
850

851
retry:
852 853 854 855
	vma = find_extend_vma(mm, address);
	if (!vma || address < vma->vm_start)
		return -EFAULT;

856
	if (!vma_permits_fault(vma, fault_flags))
857 858
		return -EFAULT;

859
	ret = handle_mm_fault(vma, address, fault_flags);
860
	major |= ret & VM_FAULT_MAJOR;
861
	if (ret & VM_FAULT_ERROR) {
862 863 864 865
		int err = vm_fault_to_errno(ret, 0);

		if (err)
			return err;
866 867
		BUG();
	}
868 869 870 871 872 873 874 875 876 877 878

	if (ret & VM_FAULT_RETRY) {
		down_read(&mm->mmap_sem);
		if (!(fault_flags & FAULT_FLAG_TRIED)) {
			*unlocked = true;
			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
			fault_flags |= FAULT_FLAG_TRIED;
			goto retry;
		}
	}

879
	if (tsk) {
880
		if (major)
881 882 883 884 885 886
			tsk->maj_flt++;
		else
			tsk->min_flt++;
	}
	return 0;
}
887
EXPORT_SYMBOL_GPL(fixup_user_fault);
888

889 890 891 892 893 894
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
						struct mm_struct *mm,
						unsigned long start,
						unsigned long nr_pages,
						struct page **pages,
						struct vm_area_struct **vmas,
895
						int *locked,
896
						unsigned int flags)
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
{
	long ret, pages_done;
	bool lock_dropped;

	if (locked) {
		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
		BUG_ON(vmas);
		/* check caller initialized locked */
		BUG_ON(*locked != 1);
	}

	if (pages)
		flags |= FOLL_GET;

	pages_done = 0;
	lock_dropped = false;
	for (;;) {
		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
				       vmas, locked);
		if (!locked)
			/* VM_FAULT_RETRY couldn't trigger, bypass */
			return ret;

		/* VM_FAULT_RETRY cannot return errors */
		if (!*locked) {
			BUG_ON(ret < 0);
			BUG_ON(ret >= nr_pages);
		}

		if (!pages)
			/* If it's a prefault don't insist harder */
			return ret;

		if (ret > 0) {
			nr_pages -= ret;
			pages_done += ret;
			if (!nr_pages)
				break;
		}
		if (*locked) {
937 938 939 940
			/*
			 * VM_FAULT_RETRY didn't trigger or it was a
			 * FOLL_NOWAIT.
			 */
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
			if (!pages_done)
				pages_done = ret;
			break;
		}
		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
		pages += ret;
		start += ret << PAGE_SHIFT;

		/*
		 * Repeat on the address that fired VM_FAULT_RETRY
		 * without FAULT_FLAG_ALLOW_RETRY but with
		 * FAULT_FLAG_TRIED.
		 */
		*locked = 1;
		lock_dropped = true;
		down_read(&mm->mmap_sem);
		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
				       pages, NULL, NULL);
		if (ret != 1) {
			BUG_ON(ret > 1);
			if (!pages_done)
				pages_done = ret;
			break;
		}
		nr_pages--;
		pages_done++;
		if (!nr_pages)
			break;
		pages++;
		start += PAGE_SIZE;
	}
972
	if (lock_dropped && *locked) {
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
		/*
		 * We must let the caller know we temporarily dropped the lock
		 * and so the critical section protected by it was lost.
		 */
		up_read(&mm->mmap_sem);
		*locked = 0;
	}
	return pages_done;
}

/*
 * We can leverage the VM_FAULT_RETRY functionality in the page fault
 * paths better by using either get_user_pages_locked() or
 * get_user_pages_unlocked().
 *
 * get_user_pages_locked() is suitable to replace the form:
 *
 *      down_read(&mm->mmap_sem);
 *      do_something()
 *      get_user_pages(tsk, mm, ..., pages, NULL);
 *      up_read(&mm->mmap_sem);
 *
 *  to:
 *
 *      int locked = 1;
 *      down_read(&mm->mmap_sem);
 *      do_something()
 *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
 *      if (locked)
 *          up_read(&mm->mmap_sem);
 */
1004
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1005
			   unsigned int gup_flags, struct page **pages,
1006 1007
			   int *locked)
{
1008
	return __get_user_pages_locked(current, current->mm, start, nr_pages,
1009
				       pages, NULL, locked,
1010
				       gup_flags | FOLL_TOUCH);
1011
}
1012
EXPORT_SYMBOL(get_user_pages_locked);
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025

/*
 * get_user_pages_unlocked() is suitable to replace the form:
 *
 *      down_read(&mm->mmap_sem);
 *      get_user_pages(tsk, mm, ..., pages, NULL);
 *      up_read(&mm->mmap_sem);
 *
 *  with:
 *
 *      get_user_pages_unlocked(tsk, mm, ..., pages);
 *
 * It is functionally equivalent to get_user_pages_fast so
1026 1027
 * get_user_pages_fast should be used instead if specific gup_flags
 * (e.g. FOLL_FORCE) are not required.
1028
 */
1029
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1030
			     struct page **pages, unsigned int gup_flags)
1031
{
1032 1033 1034 1035 1036 1037
	struct mm_struct *mm = current->mm;
	int locked = 1;
	long ret;

	down_read(&mm->mmap_sem);
	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
1038
				      &locked, gup_flags | FOLL_TOUCH);
1039 1040 1041
	if (locked)
		up_read(&mm->mmap_sem);
	return ret;
1042
}
1043
EXPORT_SYMBOL(get_user_pages_unlocked);
1044

1045
/*
1046
 * get_user_pages_remote() - pin user pages in memory
1047 1048 1049 1050 1051
 * @tsk:	the task_struct to use for page fault accounting, or
 *		NULL if faults are not to be recorded.
 * @mm:		mm_struct of target mm
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
1052
 * @gup_flags:	flags modifying lookup behaviour
1053 1054 1055 1056 1057
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long. Or NULL, if caller
 *		only intends to ensure the pages are faulted in.
 * @vmas:	array of pointers to vmas corresponding to each page.
 *		Or NULL if the caller does not require them.
1058 1059 1060
 * @locked:	pointer to lock flag indicating whether lock is held and
 *		subsequently whether VM_FAULT_RETRY functionality can be
 *		utilised. Lock must initially be held.
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno. Each page returned must be released
 * with a put_page() call when it is finished with. vmas will only
 * remain valid while mmap_sem is held.
 *
 * Must be called with mmap_sem held for read or write.
 *
 * get_user_pages walks a process's page tables and takes a reference to
 * each struct page that each user address corresponds to at a given
 * instant. That is, it takes the page that would be accessed if a user
 * thread accesses the given user virtual address at that instant.
 *
 * This does not guarantee that the page exists in the user mappings when
 * get_user_pages returns, and there may even be a completely different
 * page there in some cases (eg. if mmapped pagecache has been invalidated
 * and subsequently re faulted). However it does guarantee that the page
 * won't be freed completely. And mostly callers simply care that the page
 * contains data that was valid *at some point in time*. Typically, an IO
 * or similar operation cannot guarantee anything stronger anyway because
 * locks can't be held over the syscall boundary.
 *
1084 1085 1086
 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
 * be called after the page is finished with, and before put_page is called.
1087 1088 1089 1090 1091 1092 1093 1094
 *
 * get_user_pages is typically used for fewer-copy IO operations, to get a
 * handle on the memory by some means other than accesses via the user virtual
 * addresses. The pages may be submitted for DMA to devices or accessed via
 * their kernel linear mapping (via the kmap APIs). Care should be taken to
 * use the correct cache flushing APIs.
 *
 * See also get_user_pages_fast, for performance critical applications.
1095 1096 1097 1098 1099
 *
 * get_user_pages should be phased out in favor of
 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
 * should use get_user_pages because it cannot pass
 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1100
 */
1101 1102
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long start, unsigned long nr_pages,
1103
		unsigned int gup_flags, struct page **pages,
1104
		struct vm_area_struct **vmas, int *locked)
1105
{
1106
	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1107
				       locked,
1108
				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1109 1110 1111 1112
}
EXPORT_SYMBOL(get_user_pages_remote);

/*
1113 1114
 * This is the same as get_user_pages_remote(), just with a
 * less-flexible calling convention where we assume that the task
1115 1116 1117
 * and mm being operated on are the current task's and don't allow
 * passing of a locked parameter.  We also obviously don't pass
 * FOLL_REMOTE in here.
1118
 */
1119
long get_user_pages(unsigned long start, unsigned long nr_pages,
1120
		unsigned int gup_flags, struct page **pages,
1121 1122
		struct vm_area_struct **vmas)
{
1123
	return __get_user_pages_locked(current, current->mm, start, nr_pages,
1124
				       pages, vmas, NULL,
1125
				       gup_flags | FOLL_TOUCH);
1126
}
1127
EXPORT_SYMBOL(get_user_pages);
1128

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
#ifdef CONFIG_FS_DAX
/*
 * This is the same as get_user_pages() in that it assumes we are
 * operating on the current task's mm, but it goes further to validate
 * that the vmas associated with the address range are suitable for
 * longterm elevated page reference counts. For example, filesystem-dax
 * mappings are subject to the lifetime enforced by the filesystem and
 * we need guarantees that longterm users like RDMA and V4L2 only
 * establish mappings that have a kernel enforced revocation mechanism.
 *
 * "longterm" == userspace controlled elevated page count lifetime.
 * Contrast this to iov_iter_get_pages() usages which are transient.
 */
long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
		unsigned int gup_flags, struct page **pages,
		struct vm_area_struct **vmas_arg)
{
	struct vm_area_struct **vmas = vmas_arg;
	struct vm_area_struct *vma_prev = NULL;
	long rc, i;

	if (!pages)
		return -EINVAL;

	if (!vmas) {
		vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
			       GFP_KERNEL);
		if (!vmas)
			return -ENOMEM;
	}

	rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);

	for (i = 0; i < rc; i++) {
		struct vm_area_struct *vma = vmas[i];

		if (vma == vma_prev)
			continue;

		vma_prev = vma;

		if (vma_is_fsdax(vma))
			break;
	}

	/*
	 * Either get_user_pages() failed, or the vma validation
	 * succeeded, in either case we don't need to put_page() before
	 * returning.
	 */
	if (i >= rc)
		goto out;

	for (i = 0; i < rc; i++)
		put_page(pages[i]);
	rc = -EOPNOTSUPP;
out:
	if (vmas != vmas_arg)
		kfree(vmas);
	return rc;
}
EXPORT_SYMBOL(get_user_pages_longterm);
#endif /* CONFIG_FS_DAX */

1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
/**
 * populate_vma_page_range() -  populate a range of pages in the vma.
 * @vma:   target vma
 * @start: start address
 * @end:   end address
 * @nonblocking:
 *
 * This takes care of mlocking the pages too if VM_LOCKED is set.
 *
 * return 0 on success, negative error code on error.
 *
 * vma->vm_mm->mmap_sem must be held.
 *
 * If @nonblocking is NULL, it may be held for read or write and will
 * be unperturbed.
 *
 * If @nonblocking is non-NULL, it must held for read only and may be
 * released.  If it's released, *@nonblocking will be set to 0.
 */
long populate_vma_page_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end, int *nonblocking)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long nr_pages = (end - start) / PAGE_SIZE;
	int gup_flags;

	VM_BUG_ON(start & ~PAGE_MASK);
	VM_BUG_ON(end   & ~PAGE_MASK);
	VM_BUG_ON_VMA(start < vma->vm_start, vma);
	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);

E
Eric B Munson 已提交
1225 1226 1227
	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
	if (vma->vm_flags & VM_LOCKONFAULT)
		gup_flags &= ~FOLL_POPULATE;
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
	/*
	 * We want to touch writable mappings with a write fault in order
	 * to break COW, except for shared mappings because these don't COW
	 * and we would not want to dirty them for nothing.
	 */
	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
		gup_flags |= FOLL_WRITE;

	/*
	 * We want mlock to succeed for regions that have any permissions
	 * other than PROT_NONE.
	 */
	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
		gup_flags |= FOLL_FORCE;

	/*
	 * We made sure addr is within a VMA, so the following will
	 * not result in a stack expansion that recurses back here.
	 */
	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
				NULL, NULL, nonblocking);
}

/*
 * __mm_populate - populate and/or mlock pages within a range of address space.
 *
 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
 * flags. VMAs must be already marked with the desired vm_flags, and
 * mmap_sem must not be held.
 */
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
{
	struct mm_struct *mm = current->mm;
	unsigned long end, nstart, nend;
	struct vm_area_struct *vma = NULL;
	int locked = 0;
	long ret = 0;

	end = start + len;

	for (nstart = start; nstart < end; nstart = nend) {
		/*
		 * We want to fault in pages for [nstart; end) address range.
		 * Find first corresponding VMA.
		 */
		if (!locked) {
			locked = 1;
			down_read(&mm->mmap_sem);
			vma = find_vma(mm, nstart);
		} else if (nstart >= vma->vm_end)
			vma = vma->vm_next;
		if (!vma || vma->vm_start >= end)
			break;
		/*
		 * Set [nstart; nend) to intersection of desired address
		 * range with the first VMA. Also, skip undesirable VMA types.
		 */
		nend = min(end, vma->vm_end);
		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
			continue;
		if (nstart < vma->vm_start)
			nstart = vma->vm_start;
		/*
		 * Now fault in a range of pages. populate_vma_page_range()
		 * double checks the vma flags, so that it won't mlock pages
		 * if the vma was already munlocked.
		 */
		ret = populate_vma_page_range(vma, nstart, nend, &locked);
		if (ret < 0) {
			if (ignore_errors) {
				ret = 0;
				continue;	/* continue at next VMA */
			}
			break;
		}
		nend = nstart + ret * PAGE_SIZE;
		ret = 0;
	}
	if (locked)
		up_read(&mm->mmap_sem);
	return ret;	/* 0 or negative error code */
}

1311 1312 1313 1314 1315
/**
 * get_dump_page() - pin user page in memory while writing it to core dump
 * @addr: user address
 *
 * Returns struct page pointer of user page pinned for dump,
1316
 * to be freed afterwards by put_page().
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
 *
 * Returns NULL on any kind of failure - a hole must then be inserted into
 * the corefile, to preserve alignment with its headers; and also returns
 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
 * allowing a hole to be left in the corefile to save diskspace.
 *
 * Called without mmap_sem, but after all other threads have been killed.
 */
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
{
	struct vm_area_struct *vma;
	struct page *page;

	if (__get_user_pages(current, current->mm, addr, 1,
			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
			     NULL) < 1)
		return NULL;
	flush_cache_page(vma, addr, page_to_pfn(page));
	return page;
}
#endif /* CONFIG_ELF_CORE */
1339 1340

/*
1341
 * Generic Fast GUP
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
 *
 * get_user_pages_fast attempts to pin user pages by walking the page
 * tables directly and avoids taking locks. Thus the walker needs to be
 * protected from page table pages being freed from under it, and should
 * block any THP splits.
 *
 * One way to achieve this is to have the walker disable interrupts, and
 * rely on IPIs from the TLB flushing code blocking before the page table
 * pages are freed. This is unsuitable for architectures that do not need
 * to broadcast an IPI when invalidating TLBs.
 *
 * Another way to achieve this is to batch up page table containing pages
 * belonging to more than one mm_user, then rcu_sched a callback to free those
 * pages. Disabling interrupts will allow the fast_gup walker to both block
 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
 * (which is a relatively rare event). The code below adopts this strategy.
 *
 * Before activating this code, please be aware that the following assumptions
 * are currently made:
 *
1362 1363
 *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
 *  free pages containing page tables or TLB flushing requires IPI broadcast.
1364 1365 1366 1367 1368 1369 1370 1371 1372
 *
 *  *) ptes can be read atomically by the architecture.
 *
 *  *) access_ok is sufficient to validate userspace address ranges.
 *
 * The last two assumptions can be relaxed by the addition of helper functions.
 *
 * This code is based heavily on the PowerPC implementation by Nick Piggin.
 */
1373
#ifdef CONFIG_HAVE_GENERIC_GUP
1374

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
#ifndef gup_get_pte
/*
 * We assume that the PTE can be read atomically. If this is not the case for
 * your architecture, please provide the helper.
 */
static inline pte_t gup_get_pte(pte_t *ptep)
{
	return READ_ONCE(*ptep);
}
#endif

1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
{
	while ((*nr) - nr_start) {
		struct page *page = pages[--(*nr)];

		ClearPageReferenced(page);
		put_page(page);
	}
}

1396
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
1397 1398 1399
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
			 int write, struct page **pages, int *nr)
{
1400 1401
	struct dev_pagemap *pgmap = NULL;
	int nr_start = *nr, ret = 0;
1402 1403 1404 1405
	pte_t *ptep, *ptem;

	ptem = ptep = pte_offset_map(&pmd, addr);
	do {
1406
		pte_t pte = gup_get_pte(ptep);
1407
		struct page *head, *page;
1408 1409 1410

		/*
		 * Similar to the PMD case below, NUMA hinting must take slow
1411
		 * path using the pte_protnone check.
1412
		 */
1413 1414 1415 1416 1417 1418
		if (pte_protnone(pte))
			goto pte_unmap;

		if (!pte_access_permitted(pte, write))
			goto pte_unmap;

1419 1420 1421 1422 1423 1424 1425
		if (pte_devmap(pte)) {
			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
			if (unlikely(!pgmap)) {
				undo_dev_pagemap(nr, nr_start, pages);
				goto pte_unmap;
			}
		} else if (pte_special(pte))
1426 1427 1428 1429
			goto pte_unmap;

		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
		page = pte_page(pte);
1430
		head = compound_head(page);
1431

1432
		if (!page_cache_get_speculative(head))
1433 1434 1435
			goto pte_unmap;

		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1436
			put_page(head);
1437 1438 1439
			goto pte_unmap;
		}

1440
		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1441 1442

		SetPageReferenced(page);
1443 1444 1445 1446 1447 1448 1449 1450
		pages[*nr] = page;
		(*nr)++;

	} while (ptep++, addr += PAGE_SIZE, addr != end);

	ret = 1;

pte_unmap:
1451 1452
	if (pgmap)
		put_dev_pagemap(pgmap);
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	pte_unmap(ptem);
	return ret;
}
#else

/*
 * If we can't determine whether or not a pte is special, then fail immediately
 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
 * to be special.
 *
 * For a futex to be placed on a THP tail page, get_futex_key requires a
 * __get_user_pages_fast implementation that can pin pages. Thus it's still
 * useful to have gup_huge_pmd even if we can't operate on ptes.
 */
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
			 int write, struct page **pages, int *nr)
{
	return 0;
}
1472
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
1473

1474
#if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
		unsigned long end, struct page **pages, int *nr)
{
	int nr_start = *nr;
	struct dev_pagemap *pgmap = NULL;

	do {
		struct page *page = pfn_to_page(pfn);

		pgmap = get_dev_pagemap(pfn, pgmap);
		if (unlikely(!pgmap)) {
			undo_dev_pagemap(nr, nr_start, pages);
			return 0;
		}
		SetPageReferenced(page);
		pages[*nr] = page;
		get_page(page);
		(*nr)++;
		pfn++;
	} while (addr += PAGE_SIZE, addr != end);
1495 1496 1497

	if (pgmap)
		put_dev_pagemap(pgmap);
1498 1499 1500
	return 1;
}

1501
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1502 1503 1504
		unsigned long end, struct page **pages, int *nr)
{
	unsigned long fault_pfn;
1505 1506 1507 1508 1509
	int nr_start = *nr;

	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
	if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
		return 0;
1510

1511 1512 1513 1514 1515
	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
		undo_dev_pagemap(nr, nr_start, pages);
		return 0;
	}
	return 1;
1516 1517
}

1518
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1519 1520 1521
		unsigned long end, struct page **pages, int *nr)
{
	unsigned long fault_pfn;
1522 1523 1524 1525 1526
	int nr_start = *nr;

	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
	if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
		return 0;
1527

1528 1529 1530 1531 1532
	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
		undo_dev_pagemap(nr, nr_start, pages);
		return 0;
	}
	return 1;
1533 1534
}
#else
1535
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1536 1537 1538 1539 1540 1541
		unsigned long end, struct page **pages, int *nr)
{
	BUILD_BUG();
	return 0;
}

1542
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
1543 1544 1545 1546 1547 1548 1549
		unsigned long end, struct page **pages, int *nr)
{
	BUILD_BUG();
	return 0;
}
#endif

1550 1551 1552
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
1553
	struct page *head, *page;
1554 1555
	int refs;

1556
	if (!pmd_access_permitted(orig, write))
1557 1558
		return 0;

1559
	if (pmd_devmap(orig))
1560
		return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
1561

1562
	refs = 0;
1563
	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1564 1565 1566 1567 1568 1569 1570
	do {
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

1571
	head = compound_head(pmd_page(orig));
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}

	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

1584
	SetPageReferenced(head);
1585 1586 1587 1588 1589 1590
	return 1;
}

static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
1591
	struct page *head, *page;
1592 1593
	int refs;

1594
	if (!pud_access_permitted(orig, write))
1595 1596
		return 0;

1597
	if (pud_devmap(orig))
1598
		return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
1599

1600
	refs = 0;
1601
	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1602 1603 1604 1605 1606 1607 1608
	do {
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

1609
	head = compound_head(pud_page(orig));
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}

	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

1622
	SetPageReferenced(head);
1623 1624 1625
	return 1;
}

1626 1627 1628 1629 1630
static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
			unsigned long end, int write,
			struct page **pages, int *nr)
{
	int refs;
1631
	struct page *head, *page;
1632

1633
	if (!pgd_access_permitted(orig, write))
1634 1635
		return 0;

1636
	BUILD_BUG_ON(pgd_devmap(orig));
1637
	refs = 0;
1638
	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1639 1640 1641 1642 1643 1644 1645
	do {
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

1646
	head = compound_head(pgd_page(orig));
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}

	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

1659
	SetPageReferenced(head);
1660 1661 1662
	return 1;
}

1663 1664 1665 1666 1667 1668 1669 1670
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pmd_t *pmdp;

	pmdp = pmd_offset(&pud, addr);
	do {
1671
		pmd_t pmd = READ_ONCE(*pmdp);
1672 1673

		next = pmd_addr_end(addr, end);
1674
		if (!pmd_present(pmd))
1675 1676 1677 1678 1679 1680 1681 1682
			return 0;

		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
			/*
			 * NUMA hinting faults need to be handled in the GUP
			 * slowpath for accounting purposes and so that they
			 * can be serialised against THP migration.
			 */
1683
			if (pmd_protnone(pmd))
1684 1685 1686 1687 1688 1689
				return 0;

			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
				pages, nr))
				return 0;

1690 1691 1692 1693 1694 1695 1696 1697
		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
			/*
			 * architecture have different format for hugetlbfs
			 * pmd format and THP pmd format
			 */
			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
					 PMD_SHIFT, next, write, pages, nr))
				return 0;
1698
		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1699
			return 0;
1700 1701 1702 1703 1704
	} while (pmdp++, addr = next, addr != end);

	return 1;
}

1705
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
1706
			 int write, struct page **pages, int *nr)
1707 1708 1709 1710
{
	unsigned long next;
	pud_t *pudp;

1711
	pudp = pud_offset(&p4d, addr);
1712
	do {
1713
		pud_t pud = READ_ONCE(*pudp);
1714 1715 1716 1717

		next = pud_addr_end(addr, end);
		if (pud_none(pud))
			return 0;
1718
		if (unlikely(pud_huge(pud))) {
1719
			if (!gup_huge_pud(pud, pudp, addr, next, write,
1720 1721 1722 1723 1724
					  pages, nr))
				return 0;
		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
					 PUD_SHIFT, next, write, pages, nr))
1725 1726 1727 1728 1729 1730 1731 1732
				return 0;
		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
			return 0;
	} while (pudp++, addr = next, addr != end);

	return 1;
}

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
			 int write, struct page **pages, int *nr)
{
	unsigned long next;
	p4d_t *p4dp;

	p4dp = p4d_offset(&pgd, addr);
	do {
		p4d_t p4d = READ_ONCE(*p4dp);

		next = p4d_addr_end(addr, end);
		if (p4d_none(p4d))
			return 0;
		BUILD_BUG_ON(p4d_huge(p4d));
		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
					 P4D_SHIFT, next, write, pages, nr))
				return 0;
1751
		} else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
1752 1753 1754 1755 1756 1757
			return 0;
	} while (p4dp++, addr = next, addr != end);

	return 1;
}

1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
static void gup_pgd_range(unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pgd_t *pgdp;

	pgdp = pgd_offset(current->mm, addr);
	do {
		pgd_t pgd = READ_ONCE(*pgdp);

		next = pgd_addr_end(addr, end);
		if (pgd_none(pgd))
			return;
		if (unlikely(pgd_huge(pgd))) {
			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
					  pages, nr))
				return;
		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
					 PGDIR_SHIFT, next, write, pages, nr))
				return;
		} else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
			return;
	} while (pgdp++, addr = next, addr != end);
}

#ifndef gup_fast_permitted
/*
 * Check if it's allowed to use __get_user_pages_fast() for the range, or
 * we need to fall back to the slow version:
 */
bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
{
	unsigned long len, end;

	len = (unsigned long) nr_pages << PAGE_SHIFT;
	end = start + len;
	return end >= start;
}
#endif

1799 1800
/*
 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1801 1802 1803
 * the regular GUP.
 * Note a difference with get_user_pages_fast: this always returns the
 * number of pages pinned, 0 if no pages were pinned.
1804 1805 1806 1807
 */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
			  struct page **pages)
{
1808
	unsigned long len, end;
1809
	unsigned long flags;
1810 1811 1812 1813 1814 1815 1816
	int nr = 0;

	start &= PAGE_MASK;
	len = (unsigned long) nr_pages << PAGE_SHIFT;
	end = start + len;

	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1817
					(void __user *)start, len)))
1818 1819 1820 1821 1822 1823 1824
		return 0;

	/*
	 * Disable interrupts.  We use the nested form as we can already have
	 * interrupts disabled by get_futex_key.
	 *
	 * With interrupts disabled, we block page table pages from being
1825 1826
	 * freed from under us. See struct mmu_table_batch comments in
	 * include/asm-generic/tlb.h for more details.
1827 1828 1829 1830 1831
	 *
	 * We do not adopt an rcu_read_lock(.) here as we also want to
	 * block IPIs that come from THPs splitting.
	 */

1832 1833
	if (gup_fast_permitted(start, nr_pages, write)) {
		local_irq_save(flags);
1834
		gup_pgd_range(start, end, write, pages, &nr);
1835 1836
		local_irq_restore(flags);
	}
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859

	return nr;
}

/**
 * get_user_pages_fast() - pin user pages in memory
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
 * @write:	whether pages will be written to
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long.
 *
 * Attempt to pin user pages in memory without taking mm->mmap_sem.
 * If not successful, it will fall back to taking the lock and
 * calling get_user_pages().
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno.
 */
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
			struct page **pages)
{
1860
	unsigned long addr, len, end;
1861
	int nr = 0, ret = 0;
1862 1863

	start &= PAGE_MASK;
1864 1865 1866 1867
	addr = start;
	len = (unsigned long) nr_pages << PAGE_SHIFT;
	end = start + len;

1868 1869 1870
	if (nr_pages <= 0)
		return 0;

1871 1872
	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
					(void __user *)start, len)))
1873
		return -EFAULT;
1874 1875

	if (gup_fast_permitted(start, nr_pages, write)) {
1876 1877 1878
		local_irq_disable();
		gup_pgd_range(addr, end, write, pages, &nr);
		local_irq_enable();
1879 1880
		ret = nr;
	}
1881 1882 1883 1884 1885 1886

	if (nr < nr_pages) {
		/* Try to get the remaining pages with get_user_pages */
		start += nr << PAGE_SHIFT;
		pages += nr;

1887 1888
		ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
				write ? FOLL_WRITE : 0);
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901

		/* Have to be a bit careful with return values */
		if (nr > 0) {
			if (ret < 0)
				ret = nr;
			else
				ret += nr;
		}
	}

	return ret;
}

1902
#endif /* CONFIG_HAVE_GENERIC_GUP */