gup.c 51.7 KB
Newer Older
1 2 3 4 5 6
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>

#include <linux/mm.h>
7
#include <linux/memremap.h>
8 9 10 11 12
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>

13
#include <linux/sched/signal.h>
14
#include <linux/rwsem.h>
15
#include <linux/hugetlb.h>
16

17
#include <asm/mmu_context.h>
18
#include <asm/pgtable.h>
19
#include <asm/tlbflush.h>
20

21 22
#include "internal.h"

23 24 25 26 27
struct follow_page_context {
	struct dev_pagemap *pgmap;
	unsigned int page_mask;
};

28 29
static struct page *no_page_table(struct vm_area_struct *vma,
		unsigned int flags)
30
{
31 32 33 34 35 36 37 38 39 40 41 42
	/*
	 * When core dumping an enormous anonymous area that nobody
	 * has touched so far, we don't want to allocate unnecessary pages or
	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
	 * then get_dump_page() will return NULL to leave a hole in the dump.
	 * But we can only make this optimization where a hole would surely
	 * be zero-filled if handle_mm_fault() actually did handle it.
	 */
	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
		return ERR_PTR(-EFAULT);
	return NULL;
}
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
		pte_t *pte, unsigned int flags)
{
	/* No page to get reference */
	if (flags & FOLL_GET)
		return -EFAULT;

	if (flags & FOLL_TOUCH) {
		pte_t entry = *pte;

		if (flags & FOLL_WRITE)
			entry = pte_mkdirty(entry);
		entry = pte_mkyoung(entry);

		if (!pte_same(*pte, entry)) {
			set_pte_at(vma->vm_mm, address, pte, entry);
			update_mmu_cache(vma, address, pte);
		}
	}

	/* Proper page table entry exists, but no corresponding struct page */
	return -EEXIST;
}

68 69 70 71 72 73
/*
 * FOLL_FORCE can write to even unwritable pte's, but only
 * after we've gone through a COW cycle and they are dirty.
 */
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
74
	return pte_write(pte) ||
75 76 77
		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}

78
static struct page *follow_page_pte(struct vm_area_struct *vma,
79 80
		unsigned long address, pmd_t *pmd, unsigned int flags,
		struct dev_pagemap **pgmap)
81 82 83 84 85
{
	struct mm_struct *mm = vma->vm_mm;
	struct page *page;
	spinlock_t *ptl;
	pte_t *ptep, pte;
86

87
retry:
88
	if (unlikely(pmd_bad(*pmd)))
89
		return no_page_table(vma, flags);
90 91 92 93 94 95 96 97 98 99 100 101

	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
	pte = *ptep;
	if (!pte_present(pte)) {
		swp_entry_t entry;
		/*
		 * KSM's break_ksm() relies upon recognizing a ksm page
		 * even while it is being migrated, so for that case we
		 * need migration_entry_wait().
		 */
		if (likely(!(flags & FOLL_MIGRATION)))
			goto no_page;
102
		if (pte_none(pte))
103 104 105 106 107 108
			goto no_page;
		entry = pte_to_swp_entry(pte);
		if (!is_migration_entry(entry))
			goto no_page;
		pte_unmap_unlock(ptep, ptl);
		migration_entry_wait(mm, pmd, address);
109
		goto retry;
110
	}
111
	if ((flags & FOLL_NUMA) && pte_protnone(pte))
112
		goto no_page;
113
	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
114 115 116
		pte_unmap_unlock(ptep, ptl);
		return NULL;
	}
117 118

	page = vm_normal_page(vma, address, pte);
119 120 121 122 123
	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
		/*
		 * Only return device mapping pages in the FOLL_GET case since
		 * they are only valid while holding the pgmap reference.
		 */
124 125
		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
		if (*pgmap)
126 127 128 129
			page = pte_page(pte);
		else
			goto no_page;
	} else if (unlikely(!page)) {
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
		if (flags & FOLL_DUMP) {
			/* Avoid special (like zero) pages in core dumps */
			page = ERR_PTR(-EFAULT);
			goto out;
		}

		if (is_zero_pfn(pte_pfn(pte))) {
			page = pte_page(pte);
		} else {
			int ret;

			ret = follow_pfn_pte(vma, address, ptep, flags);
			page = ERR_PTR(ret);
			goto out;
		}
145 146
	}

147 148 149 150 151 152 153 154 155 156 157 158 159
	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
		int ret;
		get_page(page);
		pte_unmap_unlock(ptep, ptl);
		lock_page(page);
		ret = split_huge_page(page);
		unlock_page(page);
		put_page(page);
		if (ret)
			return ERR_PTR(ret);
		goto retry;
	}

160 161 162 163 164 165
	if (flags & FOLL_GET) {
		if (unlikely(!try_get_page(page))) {
			page = ERR_PTR(-ENOMEM);
			goto out;
		}
	}
166 167 168 169 170 171 172 173 174 175 176
	if (flags & FOLL_TOUCH) {
		if ((flags & FOLL_WRITE) &&
		    !pte_dirty(pte) && !PageDirty(page))
			set_page_dirty(page);
		/*
		 * pte_mkyoung() would be more correct here, but atomic care
		 * is needed to avoid losing the dirty bit: it is easier to use
		 * mark_page_accessed().
		 */
		mark_page_accessed(page);
	}
E
Eric B Munson 已提交
177
	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
178 179 180 181
		/* Do not mlock pte-mapped THP */
		if (PageTransCompound(page))
			goto out;

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
		/*
		 * The preliminary mapping check is mainly to avoid the
		 * pointless overhead of lock_page on the ZERO_PAGE
		 * which might bounce very badly if there is contention.
		 *
		 * If the page is already locked, we don't need to
		 * handle it now - vmscan will handle it later if and
		 * when it attempts to reclaim the page.
		 */
		if (page->mapping && trylock_page(page)) {
			lru_add_drain();  /* push cached pages to LRU */
			/*
			 * Because we lock page here, and migration is
			 * blocked by the pte's page reference, and we
			 * know the page is still mapped, we don't even
			 * need to check for file-cache page truncation.
			 */
			mlock_vma_page(page);
			unlock_page(page);
		}
	}
203
out:
204 205 206 207 208
	pte_unmap_unlock(ptep, ptl);
	return page;
no_page:
	pte_unmap_unlock(ptep, ptl);
	if (!pte_none(pte))
209 210 211 212
		return NULL;
	return no_page_table(vma, flags);
}

213 214
static struct page *follow_pmd_mask(struct vm_area_struct *vma,
				    unsigned long address, pud_t *pudp,
215 216
				    unsigned int flags,
				    struct follow_page_context *ctx)
217
{
218
	pmd_t *pmd, pmdval;
219 220 221 222
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

223
	pmd = pmd_offset(pudp, address);
224 225 226 227 228 229
	/*
	 * The READ_ONCE() will stabilize the pmdval in a register or
	 * on the stack so that it will stop changing under the code.
	 */
	pmdval = READ_ONCE(*pmd);
	if (pmd_none(pmdval))
230
		return no_page_table(vma, flags);
231
	if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
232 233 234 235
		page = follow_huge_pmd(mm, address, pmd, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
236
	}
237
	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
238
		page = follow_huge_pd(vma, address,
239
				      __hugepd(pmd_val(pmdval)), flags,
240 241 242 243 244
				      PMD_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
245
retry:
246
	if (!pmd_present(pmdval)) {
247 248 249
		if (likely(!(flags & FOLL_MIGRATION)))
			return no_page_table(vma, flags);
		VM_BUG_ON(thp_migration_supported() &&
250 251
				  !is_pmd_migration_entry(pmdval));
		if (is_pmd_migration_entry(pmdval))
252
			pmd_migration_entry_wait(mm, pmd);
253 254 255 256 257 258 259
		pmdval = READ_ONCE(*pmd);
		/*
		 * MADV_DONTNEED may convert the pmd to null because
		 * mmap_sem is held in read mode
		 */
		if (pmd_none(pmdval))
			return no_page_table(vma, flags);
260 261
		goto retry;
	}
262
	if (pmd_devmap(pmdval)) {
263
		ptl = pmd_lock(mm, pmd);
264
		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
265 266 267 268
		spin_unlock(ptl);
		if (page)
			return page;
	}
269
	if (likely(!pmd_trans_huge(pmdval)))
270
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
271

272
	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
273 274
		return no_page_table(vma, flags);

275
retry_locked:
276
	ptl = pmd_lock(mm, pmd);
277 278 279 280
	if (unlikely(pmd_none(*pmd))) {
		spin_unlock(ptl);
		return no_page_table(vma, flags);
	}
281 282 283 284 285 286 287
	if (unlikely(!pmd_present(*pmd))) {
		spin_unlock(ptl);
		if (likely(!(flags & FOLL_MIGRATION)))
			return no_page_table(vma, flags);
		pmd_migration_entry_wait(mm, pmd);
		goto retry_locked;
	}
288 289
	if (unlikely(!pmd_trans_huge(*pmd))) {
		spin_unlock(ptl);
290
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
291 292 293 294 295 296 297
	}
	if (flags & FOLL_SPLIT) {
		int ret;
		page = pmd_page(*pmd);
		if (is_huge_zero_page(page)) {
			spin_unlock(ptl);
			ret = 0;
298
			split_huge_pmd(vma, pmd, address);
299 300
			if (pmd_trans_unstable(pmd))
				ret = -EBUSY;
301
		} else {
302 303 304 305
			if (unlikely(!try_get_page(page))) {
				spin_unlock(ptl);
				return ERR_PTR(-ENOMEM);
			}
306
			spin_unlock(ptl);
307 308 309 310
			lock_page(page);
			ret = split_huge_page(page);
			unlock_page(page);
			put_page(page);
311 312
			if (pmd_none(*pmd))
				return no_page_table(vma, flags);
313 314 315
		}

		return ret ? ERR_PTR(ret) :
316
			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
317
	}
318 319
	page = follow_trans_huge_pmd(vma, address, pmd, flags);
	spin_unlock(ptl);
320
	ctx->page_mask = HPAGE_PMD_NR - 1;
321
	return page;
322 323
}

324 325
static struct page *follow_pud_mask(struct vm_area_struct *vma,
				    unsigned long address, p4d_t *p4dp,
326 327
				    unsigned int flags,
				    struct follow_page_context *ctx)
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
{
	pud_t *pud;
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

	pud = pud_offset(p4dp, address);
	if (pud_none(*pud))
		return no_page_table(vma, flags);
	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
		page = follow_huge_pud(mm, address, pud, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
343 344 345 346 347 348 349 350
	if (is_hugepd(__hugepd(pud_val(*pud)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(pud_val(*pud)), flags,
				      PUD_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
351 352
	if (pud_devmap(*pud)) {
		ptl = pud_lock(mm, pud);
353
		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
354 355 356 357 358 359 360
		spin_unlock(ptl);
		if (page)
			return page;
	}
	if (unlikely(pud_bad(*pud)))
		return no_page_table(vma, flags);

361
	return follow_pmd_mask(vma, address, pud, flags, ctx);
362 363 364 365
}

static struct page *follow_p4d_mask(struct vm_area_struct *vma,
				    unsigned long address, pgd_t *pgdp,
366 367
				    unsigned int flags,
				    struct follow_page_context *ctx)
368 369
{
	p4d_t *p4d;
370
	struct page *page;
371 372 373 374 375 376 377 378

	p4d = p4d_offset(pgdp, address);
	if (p4d_none(*p4d))
		return no_page_table(vma, flags);
	BUILD_BUG_ON(p4d_huge(*p4d));
	if (unlikely(p4d_bad(*p4d)))
		return no_page_table(vma, flags);

379 380 381 382 383 384 385 386
	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(p4d_val(*p4d)), flags,
				      P4D_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
387
	return follow_pud_mask(vma, address, p4d, flags, ctx);
388 389 390 391 392 393 394
}

/**
 * follow_page_mask - look up a page descriptor from a user-virtual address
 * @vma: vm_area_struct mapping @address
 * @address: virtual address to look up
 * @flags: flags modifying lookup behaviour
395 396
 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
 *       pointer to output page_mask
397 398 399
 *
 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 *
400 401 402 403 404 405
 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
 *
 * On output, the @ctx->page_mask is set according to the size of the page.
 *
 * Return: the mapped (struct page *), %NULL if no mapping exists, or
406 407 408 409 410
 * an error pointer if there is a mapping to something not represented
 * by a page descriptor (see also vm_normal_page()).
 */
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
411
			      struct follow_page_context *ctx)
412 413 414 415 416
{
	pgd_t *pgd;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

417
	ctx->page_mask = 0;
418 419 420 421 422 423 424 425 426 427 428 429 430

	/* make this handle hugepd */
	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
	if (!IS_ERR(page)) {
		BUG_ON(flags & FOLL_GET);
		return page;
	}

	pgd = pgd_offset(mm, address);

	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return no_page_table(vma, flags);

431 432 433 434 435 436
	if (pgd_huge(*pgd)) {
		page = follow_huge_pgd(mm, address, pgd, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
437 438 439 440 441 442 443 444
	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(pgd_val(*pgd)), flags,
				      PGDIR_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
445

446 447 448 449 450 451 452 453 454 455 456 457 458
	return follow_p4d_mask(vma, address, pgd, flags, ctx);
}

struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
			 unsigned int foll_flags)
{
	struct follow_page_context ctx = { NULL };
	struct page *page;

	page = follow_page_mask(vma, address, foll_flags, &ctx);
	if (ctx.pgmap)
		put_dev_pagemap(ctx.pgmap);
	return page;
459 460
}

461 462 463 464 465
static int get_gate_page(struct mm_struct *mm, unsigned long address,
		unsigned int gup_flags, struct vm_area_struct **vma,
		struct page **page)
{
	pgd_t *pgd;
466
	p4d_t *p4d;
467 468 469 470 471 472 473 474 475 476 477 478 479
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int ret = -EFAULT;

	/* user gate pages are read-only */
	if (gup_flags & FOLL_WRITE)
		return -EFAULT;
	if (address > TASK_SIZE)
		pgd = pgd_offset_k(address);
	else
		pgd = pgd_offset_gate(mm, address);
	BUG_ON(pgd_none(*pgd));
480 481 482
	p4d = p4d_offset(pgd, address);
	BUG_ON(p4d_none(*p4d));
	pud = pud_offset(p4d, address);
483 484
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
485
	if (!pmd_present(*pmd))
486 487 488 489 490 491 492 493 494 495 496 497 498
		return -EFAULT;
	VM_BUG_ON(pmd_trans_huge(*pmd));
	pte = pte_offset_map(pmd, address);
	if (pte_none(*pte))
		goto unmap;
	*vma = get_gate_vma(mm);
	if (!page)
		goto out;
	*page = vm_normal_page(*vma, address, *pte);
	if (!*page) {
		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
			goto unmap;
		*page = pte_page(*pte);
499 500 501 502 503 504 505

		/*
		 * This should never happen (a device public page in the gate
		 * area).
		 */
		if (is_device_public_page(*page))
			goto unmap;
506
	}
507 508 509 510
	if (unlikely(!try_get_page(*page))) {
		ret = -ENOMEM;
		goto unmap;
	}
511 512 513 514 515 516 517
out:
	ret = 0;
unmap:
	pte_unmap(pte);
	return ret;
}

518 519 520 521 522
/*
 * mmap_sem must be held on entry.  If @nonblocking != NULL and
 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
 */
523 524 525 526
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
		unsigned long address, unsigned int *flags, int *nonblocking)
{
	unsigned int fault_flags = 0;
527
	vm_fault_t ret;
528

E
Eric B Munson 已提交
529 530 531
	/* mlock all present pages, but do not fault in new pages */
	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
		return -ENOENT;
532 533
	if (*flags & FOLL_WRITE)
		fault_flags |= FAULT_FLAG_WRITE;
534 535
	if (*flags & FOLL_REMOTE)
		fault_flags |= FAULT_FLAG_REMOTE;
536 537 538 539
	if (nonblocking)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
	if (*flags & FOLL_NOWAIT)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
540 541 542 543
	if (*flags & FOLL_TRIED) {
		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
		fault_flags |= FAULT_FLAG_TRIED;
	}
544

545
	ret = handle_mm_fault(vma, address, fault_flags);
546
	if (ret & VM_FAULT_ERROR) {
547 548 549 550
		int err = vm_fault_to_errno(ret, *flags);

		if (err)
			return err;
551 552 553 554 555 556 557 558 559 560 561
		BUG();
	}

	if (tsk) {
		if (ret & VM_FAULT_MAJOR)
			tsk->maj_flt++;
		else
			tsk->min_flt++;
	}

	if (ret & VM_FAULT_RETRY) {
562
		if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
563 564 565 566 567 568 569 570 571 572 573 574 575 576
			*nonblocking = 0;
		return -EBUSY;
	}

	/*
	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
	 * can thus safely do subsequent page lookups as if they were reads.
	 * But only do so when looping for pte_write is futile: in some cases
	 * userspace may also be wanting to write to the gotten user page,
	 * which a read fault here might prevent (a readonly page might get
	 * reCOWed by userspace write).
	 */
	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
577
		*flags |= FOLL_COW;
578 579 580
	return 0;
}

581 582 583
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
{
	vm_flags_t vm_flags = vma->vm_flags;
584 585
	int write = (gup_flags & FOLL_WRITE);
	int foreign = (gup_flags & FOLL_REMOTE);
586 587 588 589

	if (vm_flags & (VM_IO | VM_PFNMAP))
		return -EFAULT;

590 591 592
	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
		return -EFAULT;

593
	if (write) {
594 595 596 597 598 599 600 601 602 603 604 605
		if (!(vm_flags & VM_WRITE)) {
			if (!(gup_flags & FOLL_FORCE))
				return -EFAULT;
			/*
			 * We used to let the write,force case do COW in a
			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
			 * set a breakpoint in a read-only mapping of an
			 * executable, without corrupting the file (yet only
			 * when that file had been opened for writing!).
			 * Anon pages in shared mappings are surprising: now
			 * just reject it.
			 */
606
			if (!is_cow_mapping(vm_flags))
607 608 609 610 611 612 613 614 615 616 617 618
				return -EFAULT;
		}
	} else if (!(vm_flags & VM_READ)) {
		if (!(gup_flags & FOLL_FORCE))
			return -EFAULT;
		/*
		 * Is there actually any vma we can reach here which does not
		 * have VM_MAYREAD set?
		 */
		if (!(vm_flags & VM_MAYREAD))
			return -EFAULT;
	}
619 620 621 622 623
	/*
	 * gups are always data accesses, not instruction
	 * fetches, so execute=false here
	 */
	if (!arch_vma_access_permitted(vma, write, false, foreign))
624
		return -EFAULT;
625 626 627
	return 0;
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
/**
 * __get_user_pages() - pin user pages in memory
 * @tsk:	task_struct of target task
 * @mm:		mm_struct of target mm
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
 * @gup_flags:	flags modifying pin behaviour
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long. Or NULL, if caller
 *		only intends to ensure the pages are faulted in.
 * @vmas:	array of pointers to vmas corresponding to each page.
 *		Or NULL if the caller does not require them.
 * @nonblocking: whether waiting for disk IO or mmap_sem contention
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno. Each page returned must be released
 * with a put_page() call when it is finished with. vmas will only
 * remain valid while mmap_sem is held.
 *
648
 * Must be called with mmap_sem held.  It may be released.  See below.
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
 *
 * __get_user_pages walks a process's page tables and takes a reference to
 * each struct page that each user address corresponds to at a given
 * instant. That is, it takes the page that would be accessed if a user
 * thread accesses the given user virtual address at that instant.
 *
 * This does not guarantee that the page exists in the user mappings when
 * __get_user_pages returns, and there may even be a completely different
 * page there in some cases (eg. if mmapped pagecache has been invalidated
 * and subsequently re faulted). However it does guarantee that the page
 * won't be freed completely. And mostly callers simply care that the page
 * contains data that was valid *at some point in time*. Typically, an IO
 * or similar operation cannot guarantee anything stronger anyway because
 * locks can't be held over the syscall boundary.
 *
 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
 * appropriate) must be called after the page is finished with, and
 * before put_page is called.
 *
 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
 * or mmap_sem contention, and if waiting is needed to pin all pages,
671 672 673 674 675 676 677 678
 * *@nonblocking will be set to 0.  Further, if @gup_flags does not
 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
 * this case.
 *
 * A caller using such a combination of @nonblocking and @gup_flags
 * must therefore hold the mmap_sem for reading only, and recognize
 * when it's been released.  Otherwise, it must be held for either
 * reading or writing and will not be released.
679 680 681 682 683
 *
 * In most cases, get_user_pages or get_user_pages_fast should be used
 * instead of __get_user_pages. __get_user_pages should be used only if
 * you need some special @gup_flags.
 */
L
Lorenzo Stoakes 已提交
684
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
685 686 687 688
		unsigned long start, unsigned long nr_pages,
		unsigned int gup_flags, struct page **pages,
		struct vm_area_struct **vmas, int *nonblocking)
{
689
	long ret = 0, i = 0;
690
	struct vm_area_struct *vma = NULL;
691
	struct follow_page_context ctx = { NULL };
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706

	if (!nr_pages)
		return 0;

	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));

	/*
	 * If FOLL_FORCE is set then do not force a full fault as the hinting
	 * fault information is unrelated to the reference behaviour of a task
	 * using the address space
	 */
	if (!(gup_flags & FOLL_FORCE))
		gup_flags |= FOLL_NUMA;

	do {
707 708 709 710 711 712 713 714 715 716 717 718
		struct page *page;
		unsigned int foll_flags = gup_flags;
		unsigned int page_increm;

		/* first iteration or cross vma bound */
		if (!vma || start >= vma->vm_end) {
			vma = find_extend_vma(mm, start);
			if (!vma && in_gate_area(mm, start)) {
				ret = get_gate_page(mm, start & PAGE_MASK,
						gup_flags, &vma,
						pages ? &pages[i] : NULL);
				if (ret)
719
					goto out;
720
				ctx.page_mask = 0;
721 722
				goto next_page;
			}
723

724 725 726 727
			if (!vma || check_vma_flags(vma, gup_flags)) {
				ret = -EFAULT;
				goto out;
			}
728 729 730
			if (is_vm_hugetlb_page(vma)) {
				i = follow_hugetlb_page(mm, vma, pages, vmas,
						&start, &nr_pages, i,
731
						gup_flags, nonblocking);
732
				continue;
733
			}
734 735 736 737 738 739
		}
retry:
		/*
		 * If we have a pending SIGKILL, don't keep faulting pages and
		 * potentially allocating memory.
		 */
740
		if (fatal_signal_pending(current)) {
741 742 743
			ret = -ERESTARTSYS;
			goto out;
		}
744
		cond_resched();
745 746

		page = follow_page_mask(vma, start, foll_flags, &ctx);
747 748 749 750 751 752
		if (!page) {
			ret = faultin_page(tsk, vma, start, &foll_flags,
					nonblocking);
			switch (ret) {
			case 0:
				goto retry;
753 754 755
			case -EBUSY:
				ret = 0;
				/* FALLTHRU */
756 757 758
			case -EFAULT:
			case -ENOMEM:
			case -EHWPOISON:
759
				goto out;
760 761
			case -ENOENT:
				goto next_page;
762
			}
763
			BUG();
764 765 766 767 768 769 770
		} else if (PTR_ERR(page) == -EEXIST) {
			/*
			 * Proper page table entry exists, but no corresponding
			 * struct page.
			 */
			goto next_page;
		} else if (IS_ERR(page)) {
771 772
			ret = PTR_ERR(page);
			goto out;
773
		}
774 775 776 777
		if (pages) {
			pages[i] = page;
			flush_anon_page(vma, page, start);
			flush_dcache_page(page);
778
			ctx.page_mask = 0;
779 780
		}
next_page:
781 782
		if (vmas) {
			vmas[i] = vma;
783
			ctx.page_mask = 0;
784
		}
785
		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
786 787 788 789 790
		if (page_increm > nr_pages)
			page_increm = nr_pages;
		i += page_increm;
		start += page_increm * PAGE_SIZE;
		nr_pages -= page_increm;
791
	} while (nr_pages);
792 793 794 795
out:
	if (ctx.pgmap)
		put_dev_pagemap(ctx.pgmap);
	return i ? i : ret;
796 797
}

798 799
static bool vma_permits_fault(struct vm_area_struct *vma,
			      unsigned int fault_flags)
800
{
801 802
	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
803
	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
804 805 806 807

	if (!(vm_flags & vma->vm_flags))
		return false;

808 809
	/*
	 * The architecture might have a hardware protection
810
	 * mechanism other than read/write that can deny access.
811 812 813
	 *
	 * gup always represents data access, not instruction
	 * fetches, so execute=false here:
814
	 */
815
	if (!arch_vma_access_permitted(vma, write, false, foreign))
816 817
		return false;

818 819 820
	return true;
}

821 822 823 824 825 826 827
/*
 * fixup_user_fault() - manually resolve a user page fault
 * @tsk:	the task_struct to use for page fault accounting, or
 *		NULL if faults are not to be recorded.
 * @mm:		mm_struct of target mm
 * @address:	user address
 * @fault_flags:flags to pass down to handle_mm_fault()
828 829
 * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
 *		does not allow retry
830 831 832 833 834 835 836 837 838 839 840
 *
 * This is meant to be called in the specific scenario where for locking reasons
 * we try to access user memory in atomic context (within a pagefault_disable()
 * section), this returns -EFAULT, and we want to resolve the user fault before
 * trying again.
 *
 * Typically this is meant to be used by the futex code.
 *
 * The main difference with get_user_pages() is that this function will
 * unconditionally call handle_mm_fault() which will in turn perform all the
 * necessary SW fixup of the dirty and young bits in the PTE, while
841
 * get_user_pages() only guarantees to update these in the struct page.
842 843 844 845 846 847
 *
 * This is important for some architectures where those bits also gate the
 * access permission to the page because they are maintained in software.  On
 * such architectures, gup() will not be enough to make a subsequent access
 * succeed.
 *
848 849
 * This function will not return with an unlocked mmap_sem. So it has not the
 * same semantics wrt the @mm->mmap_sem as does filemap_fault().
850 851
 */
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
852 853
		     unsigned long address, unsigned int fault_flags,
		     bool *unlocked)
854 855
{
	struct vm_area_struct *vma;
856
	vm_fault_t ret, major = 0;
857 858 859

	if (unlocked)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
860

861
retry:
862 863 864 865
	vma = find_extend_vma(mm, address);
	if (!vma || address < vma->vm_start)
		return -EFAULT;

866
	if (!vma_permits_fault(vma, fault_flags))
867 868
		return -EFAULT;

869
	ret = handle_mm_fault(vma, address, fault_flags);
870
	major |= ret & VM_FAULT_MAJOR;
871
	if (ret & VM_FAULT_ERROR) {
872 873 874 875
		int err = vm_fault_to_errno(ret, 0);

		if (err)
			return err;
876 877
		BUG();
	}
878 879 880 881 882 883 884 885 886 887 888

	if (ret & VM_FAULT_RETRY) {
		down_read(&mm->mmap_sem);
		if (!(fault_flags & FAULT_FLAG_TRIED)) {
			*unlocked = true;
			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
			fault_flags |= FAULT_FLAG_TRIED;
			goto retry;
		}
	}

889
	if (tsk) {
890
		if (major)
891 892 893 894 895 896
			tsk->maj_flt++;
		else
			tsk->min_flt++;
	}
	return 0;
}
897
EXPORT_SYMBOL_GPL(fixup_user_fault);
898

899 900 901 902 903 904
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
						struct mm_struct *mm,
						unsigned long start,
						unsigned long nr_pages,
						struct page **pages,
						struct vm_area_struct **vmas,
905
						int *locked,
906
						unsigned int flags)
907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
{
	long ret, pages_done;
	bool lock_dropped;

	if (locked) {
		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
		BUG_ON(vmas);
		/* check caller initialized locked */
		BUG_ON(*locked != 1);
	}

	if (pages)
		flags |= FOLL_GET;

	pages_done = 0;
	lock_dropped = false;
	for (;;) {
		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
				       vmas, locked);
		if (!locked)
			/* VM_FAULT_RETRY couldn't trigger, bypass */
			return ret;

		/* VM_FAULT_RETRY cannot return errors */
		if (!*locked) {
			BUG_ON(ret < 0);
			BUG_ON(ret >= nr_pages);
		}

		if (!pages)
			/* If it's a prefault don't insist harder */
			return ret;

		if (ret > 0) {
			nr_pages -= ret;
			pages_done += ret;
			if (!nr_pages)
				break;
		}
		if (*locked) {
947 948 949 950
			/*
			 * VM_FAULT_RETRY didn't trigger or it was a
			 * FOLL_NOWAIT.
			 */
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
			if (!pages_done)
				pages_done = ret;
			break;
		}
		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
		pages += ret;
		start += ret << PAGE_SHIFT;

		/*
		 * Repeat on the address that fired VM_FAULT_RETRY
		 * without FAULT_FLAG_ALLOW_RETRY but with
		 * FAULT_FLAG_TRIED.
		 */
		*locked = 1;
		lock_dropped = true;
		down_read(&mm->mmap_sem);
		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
				       pages, NULL, NULL);
		if (ret != 1) {
			BUG_ON(ret > 1);
			if (!pages_done)
				pages_done = ret;
			break;
		}
		nr_pages--;
		pages_done++;
		if (!nr_pages)
			break;
		pages++;
		start += PAGE_SIZE;
	}
982
	if (lock_dropped && *locked) {
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
		/*
		 * We must let the caller know we temporarily dropped the lock
		 * and so the critical section protected by it was lost.
		 */
		up_read(&mm->mmap_sem);
		*locked = 0;
	}
	return pages_done;
}

/*
 * We can leverage the VM_FAULT_RETRY functionality in the page fault
 * paths better by using either get_user_pages_locked() or
 * get_user_pages_unlocked().
 *
 * get_user_pages_locked() is suitable to replace the form:
 *
 *      down_read(&mm->mmap_sem);
 *      do_something()
 *      get_user_pages(tsk, mm, ..., pages, NULL);
 *      up_read(&mm->mmap_sem);
 *
 *  to:
 *
 *      int locked = 1;
 *      down_read(&mm->mmap_sem);
 *      do_something()
 *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
 *      if (locked)
 *          up_read(&mm->mmap_sem);
 */
1014
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1015
			   unsigned int gup_flags, struct page **pages,
1016 1017
			   int *locked)
{
1018
	return __get_user_pages_locked(current, current->mm, start, nr_pages,
1019
				       pages, NULL, locked,
1020
				       gup_flags | FOLL_TOUCH);
1021
}
1022
EXPORT_SYMBOL(get_user_pages_locked);
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035

/*
 * get_user_pages_unlocked() is suitable to replace the form:
 *
 *      down_read(&mm->mmap_sem);
 *      get_user_pages(tsk, mm, ..., pages, NULL);
 *      up_read(&mm->mmap_sem);
 *
 *  with:
 *
 *      get_user_pages_unlocked(tsk, mm, ..., pages);
 *
 * It is functionally equivalent to get_user_pages_fast so
1036 1037
 * get_user_pages_fast should be used instead if specific gup_flags
 * (e.g. FOLL_FORCE) are not required.
1038
 */
1039
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1040
			     struct page **pages, unsigned int gup_flags)
1041
{
1042 1043 1044 1045 1046 1047
	struct mm_struct *mm = current->mm;
	int locked = 1;
	long ret;

	down_read(&mm->mmap_sem);
	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
1048
				      &locked, gup_flags | FOLL_TOUCH);
1049 1050 1051
	if (locked)
		up_read(&mm->mmap_sem);
	return ret;
1052
}
1053
EXPORT_SYMBOL(get_user_pages_unlocked);
1054

1055
/*
1056
 * get_user_pages_remote() - pin user pages in memory
1057 1058 1059 1060 1061
 * @tsk:	the task_struct to use for page fault accounting, or
 *		NULL if faults are not to be recorded.
 * @mm:		mm_struct of target mm
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
1062
 * @gup_flags:	flags modifying lookup behaviour
1063 1064 1065 1066 1067
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long. Or NULL, if caller
 *		only intends to ensure the pages are faulted in.
 * @vmas:	array of pointers to vmas corresponding to each page.
 *		Or NULL if the caller does not require them.
1068 1069 1070
 * @locked:	pointer to lock flag indicating whether lock is held and
 *		subsequently whether VM_FAULT_RETRY functionality can be
 *		utilised. Lock must initially be held.
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno. Each page returned must be released
 * with a put_page() call when it is finished with. vmas will only
 * remain valid while mmap_sem is held.
 *
 * Must be called with mmap_sem held for read or write.
 *
 * get_user_pages walks a process's page tables and takes a reference to
 * each struct page that each user address corresponds to at a given
 * instant. That is, it takes the page that would be accessed if a user
 * thread accesses the given user virtual address at that instant.
 *
 * This does not guarantee that the page exists in the user mappings when
 * get_user_pages returns, and there may even be a completely different
 * page there in some cases (eg. if mmapped pagecache has been invalidated
 * and subsequently re faulted). However it does guarantee that the page
 * won't be freed completely. And mostly callers simply care that the page
 * contains data that was valid *at some point in time*. Typically, an IO
 * or similar operation cannot guarantee anything stronger anyway because
 * locks can't be held over the syscall boundary.
 *
1094 1095 1096
 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
 * be called after the page is finished with, and before put_page is called.
1097 1098 1099 1100 1101 1102 1103 1104
 *
 * get_user_pages is typically used for fewer-copy IO operations, to get a
 * handle on the memory by some means other than accesses via the user virtual
 * addresses. The pages may be submitted for DMA to devices or accessed via
 * their kernel linear mapping (via the kmap APIs). Care should be taken to
 * use the correct cache flushing APIs.
 *
 * See also get_user_pages_fast, for performance critical applications.
1105 1106 1107 1108 1109
 *
 * get_user_pages should be phased out in favor of
 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
 * should use get_user_pages because it cannot pass
 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1110
 */
1111 1112
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long start, unsigned long nr_pages,
1113
		unsigned int gup_flags, struct page **pages,
1114
		struct vm_area_struct **vmas, int *locked)
1115
{
1116
	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1117
				       locked,
1118
				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1119 1120 1121 1122
}
EXPORT_SYMBOL(get_user_pages_remote);

/*
1123 1124
 * This is the same as get_user_pages_remote(), just with a
 * less-flexible calling convention where we assume that the task
1125 1126 1127
 * and mm being operated on are the current task's and don't allow
 * passing of a locked parameter.  We also obviously don't pass
 * FOLL_REMOTE in here.
1128
 */
1129
long get_user_pages(unsigned long start, unsigned long nr_pages,
1130
		unsigned int gup_flags, struct page **pages,
1131 1132
		struct vm_area_struct **vmas)
{
1133
	return __get_user_pages_locked(current, current->mm, start, nr_pages,
1134
				       pages, vmas, NULL,
1135
				       gup_flags | FOLL_TOUCH);
1136
}
1137
EXPORT_SYMBOL(get_user_pages);
1138

1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
#ifdef CONFIG_FS_DAX
/*
 * This is the same as get_user_pages() in that it assumes we are
 * operating on the current task's mm, but it goes further to validate
 * that the vmas associated with the address range are suitable for
 * longterm elevated page reference counts. For example, filesystem-dax
 * mappings are subject to the lifetime enforced by the filesystem and
 * we need guarantees that longterm users like RDMA and V4L2 only
 * establish mappings that have a kernel enforced revocation mechanism.
 *
 * "longterm" == userspace controlled elevated page count lifetime.
 * Contrast this to iov_iter_get_pages() usages which are transient.
 */
long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
		unsigned int gup_flags, struct page **pages,
		struct vm_area_struct **vmas_arg)
{
	struct vm_area_struct **vmas = vmas_arg;
	struct vm_area_struct *vma_prev = NULL;
	long rc, i;

	if (!pages)
		return -EINVAL;

	if (!vmas) {
		vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
			       GFP_KERNEL);
		if (!vmas)
			return -ENOMEM;
	}

	rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);

	for (i = 0; i < rc; i++) {
		struct vm_area_struct *vma = vmas[i];

		if (vma == vma_prev)
			continue;

		vma_prev = vma;

		if (vma_is_fsdax(vma))
			break;
	}

	/*
	 * Either get_user_pages() failed, or the vma validation
	 * succeeded, in either case we don't need to put_page() before
	 * returning.
	 */
	if (i >= rc)
		goto out;

	for (i = 0; i < rc; i++)
		put_page(pages[i]);
	rc = -EOPNOTSUPP;
out:
	if (vmas != vmas_arg)
		kfree(vmas);
	return rc;
}
EXPORT_SYMBOL(get_user_pages_longterm);
#endif /* CONFIG_FS_DAX */

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
/**
 * populate_vma_page_range() -  populate a range of pages in the vma.
 * @vma:   target vma
 * @start: start address
 * @end:   end address
 * @nonblocking:
 *
 * This takes care of mlocking the pages too if VM_LOCKED is set.
 *
 * return 0 on success, negative error code on error.
 *
 * vma->vm_mm->mmap_sem must be held.
 *
 * If @nonblocking is NULL, it may be held for read or write and will
 * be unperturbed.
 *
 * If @nonblocking is non-NULL, it must held for read only and may be
 * released.  If it's released, *@nonblocking will be set to 0.
 */
long populate_vma_page_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end, int *nonblocking)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long nr_pages = (end - start) / PAGE_SIZE;
	int gup_flags;

	VM_BUG_ON(start & ~PAGE_MASK);
	VM_BUG_ON(end   & ~PAGE_MASK);
	VM_BUG_ON_VMA(start < vma->vm_start, vma);
	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);

E
Eric B Munson 已提交
1235 1236 1237
	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
	if (vma->vm_flags & VM_LOCKONFAULT)
		gup_flags &= ~FOLL_POPULATE;
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
	/*
	 * We want to touch writable mappings with a write fault in order
	 * to break COW, except for shared mappings because these don't COW
	 * and we would not want to dirty them for nothing.
	 */
	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
		gup_flags |= FOLL_WRITE;

	/*
	 * We want mlock to succeed for regions that have any permissions
	 * other than PROT_NONE.
	 */
	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
		gup_flags |= FOLL_FORCE;

	/*
	 * We made sure addr is within a VMA, so the following will
	 * not result in a stack expansion that recurses back here.
	 */
	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
				NULL, NULL, nonblocking);
}

/*
 * __mm_populate - populate and/or mlock pages within a range of address space.
 *
 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
 * flags. VMAs must be already marked with the desired vm_flags, and
 * mmap_sem must not be held.
 */
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
{
	struct mm_struct *mm = current->mm;
	unsigned long end, nstart, nend;
	struct vm_area_struct *vma = NULL;
	int locked = 0;
	long ret = 0;

	end = start + len;

	for (nstart = start; nstart < end; nstart = nend) {
		/*
		 * We want to fault in pages for [nstart; end) address range.
		 * Find first corresponding VMA.
		 */
		if (!locked) {
			locked = 1;
			down_read(&mm->mmap_sem);
			vma = find_vma(mm, nstart);
		} else if (nstart >= vma->vm_end)
			vma = vma->vm_next;
		if (!vma || vma->vm_start >= end)
			break;
		/*
		 * Set [nstart; nend) to intersection of desired address
		 * range with the first VMA. Also, skip undesirable VMA types.
		 */
		nend = min(end, vma->vm_end);
		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
			continue;
		if (nstart < vma->vm_start)
			nstart = vma->vm_start;
		/*
		 * Now fault in a range of pages. populate_vma_page_range()
		 * double checks the vma flags, so that it won't mlock pages
		 * if the vma was already munlocked.
		 */
		ret = populate_vma_page_range(vma, nstart, nend, &locked);
		if (ret < 0) {
			if (ignore_errors) {
				ret = 0;
				continue;	/* continue at next VMA */
			}
			break;
		}
		nend = nstart + ret * PAGE_SIZE;
		ret = 0;
	}
	if (locked)
		up_read(&mm->mmap_sem);
	return ret;	/* 0 or negative error code */
}

1321 1322 1323 1324 1325
/**
 * get_dump_page() - pin user page in memory while writing it to core dump
 * @addr: user address
 *
 * Returns struct page pointer of user page pinned for dump,
1326
 * to be freed afterwards by put_page().
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
 *
 * Returns NULL on any kind of failure - a hole must then be inserted into
 * the corefile, to preserve alignment with its headers; and also returns
 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
 * allowing a hole to be left in the corefile to save diskspace.
 *
 * Called without mmap_sem, but after all other threads have been killed.
 */
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
{
	struct vm_area_struct *vma;
	struct page *page;

	if (__get_user_pages(current, current->mm, addr, 1,
			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
			     NULL) < 1)
		return NULL;
	flush_cache_page(vma, addr, page_to_pfn(page));
	return page;
}
#endif /* CONFIG_ELF_CORE */
1349 1350

/*
1351
 * Generic Fast GUP
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
 *
 * get_user_pages_fast attempts to pin user pages by walking the page
 * tables directly and avoids taking locks. Thus the walker needs to be
 * protected from page table pages being freed from under it, and should
 * block any THP splits.
 *
 * One way to achieve this is to have the walker disable interrupts, and
 * rely on IPIs from the TLB flushing code blocking before the page table
 * pages are freed. This is unsuitable for architectures that do not need
 * to broadcast an IPI when invalidating TLBs.
 *
 * Another way to achieve this is to batch up page table containing pages
 * belonging to more than one mm_user, then rcu_sched a callback to free those
 * pages. Disabling interrupts will allow the fast_gup walker to both block
 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
 * (which is a relatively rare event). The code below adopts this strategy.
 *
 * Before activating this code, please be aware that the following assumptions
 * are currently made:
 *
1372 1373
 *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
 *  free pages containing page tables or TLB flushing requires IPI broadcast.
1374 1375 1376 1377 1378 1379 1380 1381 1382
 *
 *  *) ptes can be read atomically by the architecture.
 *
 *  *) access_ok is sufficient to validate userspace address ranges.
 *
 * The last two assumptions can be relaxed by the addition of helper functions.
 *
 * This code is based heavily on the PowerPC implementation by Nick Piggin.
 */
1383
#ifdef CONFIG_HAVE_GENERIC_GUP
1384

1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
#ifndef gup_get_pte
/*
 * We assume that the PTE can be read atomically. If this is not the case for
 * your architecture, please provide the helper.
 */
static inline pte_t gup_get_pte(pte_t *ptep)
{
	return READ_ONCE(*ptep);
}
#endif

1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
{
	while ((*nr) - nr_start) {
		struct page *page = pages[--(*nr)];

		ClearPageReferenced(page);
		put_page(page);
	}
}

1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
/*
 * Return the compund head page with ref appropriately incremented,
 * or NULL if that failed.
 */
static inline struct page *try_get_compound_head(struct page *page, int refs)
{
	struct page *head = compound_head(page);
	if (WARN_ON_ONCE(page_ref_count(head) < 0))
		return NULL;
	if (unlikely(!page_cache_add_speculative(head, refs)))
		return NULL;
	return head;
}

1420
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
1421 1422 1423
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
			 int write, struct page **pages, int *nr)
{
1424 1425
	struct dev_pagemap *pgmap = NULL;
	int nr_start = *nr, ret = 0;
1426 1427 1428 1429
	pte_t *ptep, *ptem;

	ptem = ptep = pte_offset_map(&pmd, addr);
	do {
1430
		pte_t pte = gup_get_pte(ptep);
1431
		struct page *head, *page;
1432 1433 1434

		/*
		 * Similar to the PMD case below, NUMA hinting must take slow
1435
		 * path using the pte_protnone check.
1436
		 */
1437 1438 1439 1440 1441 1442
		if (pte_protnone(pte))
			goto pte_unmap;

		if (!pte_access_permitted(pte, write))
			goto pte_unmap;

1443 1444 1445 1446 1447 1448 1449
		if (pte_devmap(pte)) {
			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
			if (unlikely(!pgmap)) {
				undo_dev_pagemap(nr, nr_start, pages);
				goto pte_unmap;
			}
		} else if (pte_special(pte))
1450 1451 1452 1453 1454
			goto pte_unmap;

		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
		page = pte_page(pte);

1455 1456
		head = try_get_compound_head(page, 1);
		if (!head)
1457 1458 1459
			goto pte_unmap;

		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1460
			put_page(head);
1461 1462 1463
			goto pte_unmap;
		}

1464
		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1465 1466

		SetPageReferenced(page);
1467 1468 1469 1470 1471 1472 1473 1474
		pages[*nr] = page;
		(*nr)++;

	} while (ptep++, addr += PAGE_SIZE, addr != end);

	ret = 1;

pte_unmap:
1475 1476
	if (pgmap)
		put_dev_pagemap(pgmap);
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
	pte_unmap(ptem);
	return ret;
}
#else

/*
 * If we can't determine whether or not a pte is special, then fail immediately
 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
 * to be special.
 *
 * For a futex to be placed on a THP tail page, get_futex_key requires a
 * __get_user_pages_fast implementation that can pin pages. Thus it's still
 * useful to have gup_huge_pmd even if we can't operate on ptes.
 */
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
			 int write, struct page **pages, int *nr)
{
	return 0;
}
1496
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
1497

1498
#if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
		unsigned long end, struct page **pages, int *nr)
{
	int nr_start = *nr;
	struct dev_pagemap *pgmap = NULL;

	do {
		struct page *page = pfn_to_page(pfn);

		pgmap = get_dev_pagemap(pfn, pgmap);
		if (unlikely(!pgmap)) {
			undo_dev_pagemap(nr, nr_start, pages);
			return 0;
		}
		SetPageReferenced(page);
		pages[*nr] = page;
		get_page(page);
		(*nr)++;
		pfn++;
	} while (addr += PAGE_SIZE, addr != end);
1519 1520 1521

	if (pgmap)
		put_dev_pagemap(pgmap);
1522 1523 1524
	return 1;
}

1525
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1526 1527 1528
		unsigned long end, struct page **pages, int *nr)
{
	unsigned long fault_pfn;
1529 1530 1531 1532 1533
	int nr_start = *nr;

	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
	if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
		return 0;
1534

1535 1536 1537 1538 1539
	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
		undo_dev_pagemap(nr, nr_start, pages);
		return 0;
	}
	return 1;
1540 1541
}

1542
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1543 1544 1545
		unsigned long end, struct page **pages, int *nr)
{
	unsigned long fault_pfn;
1546 1547 1548 1549 1550
	int nr_start = *nr;

	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
	if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
		return 0;
1551

1552 1553 1554 1555 1556
	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
		undo_dev_pagemap(nr, nr_start, pages);
		return 0;
	}
	return 1;
1557 1558
}
#else
1559
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1560 1561 1562 1563 1564 1565
		unsigned long end, struct page **pages, int *nr)
{
	BUILD_BUG();
	return 0;
}

1566
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
1567 1568 1569 1570 1571 1572 1573
		unsigned long end, struct page **pages, int *nr)
{
	BUILD_BUG();
	return 0;
}
#endif

1574 1575 1576
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
1577
	struct page *head, *page;
1578 1579
	int refs;

1580
	if (!pmd_access_permitted(orig, write))
1581 1582
		return 0;

1583
	if (pmd_devmap(orig))
1584
		return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
1585

1586
	refs = 0;
1587
	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1588 1589 1590 1591 1592 1593 1594
	do {
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

1595 1596
	head = try_get_compound_head(pmd_page(orig), refs);
	if (!head) {
1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
		*nr -= refs;
		return 0;
	}

	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

1608
	SetPageReferenced(head);
1609 1610 1611 1612 1613 1614
	return 1;
}

static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
1615
	struct page *head, *page;
1616 1617
	int refs;

1618
	if (!pud_access_permitted(orig, write))
1619 1620
		return 0;

1621
	if (pud_devmap(orig))
1622
		return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
1623

1624
	refs = 0;
1625
	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1626 1627 1628 1629 1630 1631 1632
	do {
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

1633 1634
	head = try_get_compound_head(pud_page(orig), refs);
	if (!head) {
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
		*nr -= refs;
		return 0;
	}

	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

1646
	SetPageReferenced(head);
1647 1648 1649
	return 1;
}

1650 1651 1652 1653 1654
static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
			unsigned long end, int write,
			struct page **pages, int *nr)
{
	int refs;
1655
	struct page *head, *page;
1656

1657
	if (!pgd_access_permitted(orig, write))
1658 1659
		return 0;

1660
	BUILD_BUG_ON(pgd_devmap(orig));
1661
	refs = 0;
1662
	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1663 1664 1665 1666 1667 1668 1669
	do {
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

1670 1671
	head = try_get_compound_head(pgd_page(orig), refs);
	if (!head) {
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
		*nr -= refs;
		return 0;
	}

	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

1683
	SetPageReferenced(head);
1684 1685 1686
	return 1;
}

1687 1688 1689 1690 1691 1692 1693 1694
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pmd_t *pmdp;

	pmdp = pmd_offset(&pud, addr);
	do {
1695
		pmd_t pmd = READ_ONCE(*pmdp);
1696 1697

		next = pmd_addr_end(addr, end);
1698
		if (!pmd_present(pmd))
1699 1700
			return 0;

Y
Yu Zhao 已提交
1701 1702
		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
			     pmd_devmap(pmd))) {
1703 1704 1705 1706 1707
			/*
			 * NUMA hinting faults need to be handled in the GUP
			 * slowpath for accounting purposes and so that they
			 * can be serialised against THP migration.
			 */
1708
			if (pmd_protnone(pmd))
1709 1710 1711 1712 1713 1714
				return 0;

			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
				pages, nr))
				return 0;

1715 1716 1717 1718 1719 1720 1721 1722
		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
			/*
			 * architecture have different format for hugetlbfs
			 * pmd format and THP pmd format
			 */
			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
					 PMD_SHIFT, next, write, pages, nr))
				return 0;
1723
		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1724
			return 0;
1725 1726 1727 1728 1729
	} while (pmdp++, addr = next, addr != end);

	return 1;
}

1730
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
1731
			 int write, struct page **pages, int *nr)
1732 1733 1734 1735
{
	unsigned long next;
	pud_t *pudp;

1736
	pudp = pud_offset(&p4d, addr);
1737
	do {
1738
		pud_t pud = READ_ONCE(*pudp);
1739 1740 1741 1742

		next = pud_addr_end(addr, end);
		if (pud_none(pud))
			return 0;
1743
		if (unlikely(pud_huge(pud))) {
1744
			if (!gup_huge_pud(pud, pudp, addr, next, write,
1745 1746 1747 1748 1749
					  pages, nr))
				return 0;
		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
					 PUD_SHIFT, next, write, pages, nr))
1750 1751 1752 1753 1754 1755 1756 1757
				return 0;
		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
			return 0;
	} while (pudp++, addr = next, addr != end);

	return 1;
}

1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
			 int write, struct page **pages, int *nr)
{
	unsigned long next;
	p4d_t *p4dp;

	p4dp = p4d_offset(&pgd, addr);
	do {
		p4d_t p4d = READ_ONCE(*p4dp);

		next = p4d_addr_end(addr, end);
		if (p4d_none(p4d))
			return 0;
		BUILD_BUG_ON(p4d_huge(p4d));
		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
					 P4D_SHIFT, next, write, pages, nr))
				return 0;
1776
		} else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
1777 1778 1779 1780 1781 1782
			return 0;
	} while (p4dp++, addr = next, addr != end);

	return 1;
}

1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
static void gup_pgd_range(unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pgd_t *pgdp;

	pgdp = pgd_offset(current->mm, addr);
	do {
		pgd_t pgd = READ_ONCE(*pgdp);

		next = pgd_addr_end(addr, end);
		if (pgd_none(pgd))
			return;
		if (unlikely(pgd_huge(pgd))) {
			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
					  pages, nr))
				return;
		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
					 PGDIR_SHIFT, next, write, pages, nr))
				return;
		} else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
			return;
	} while (pgdp++, addr = next, addr != end);
}

#ifndef gup_fast_permitted
/*
 * Check if it's allowed to use __get_user_pages_fast() for the range, or
 * we need to fall back to the slow version:
 */
bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
{
	unsigned long len, end;

	len = (unsigned long) nr_pages << PAGE_SHIFT;
	end = start + len;
	return end >= start;
}
#endif

1824 1825
/*
 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1826 1827 1828
 * the regular GUP.
 * Note a difference with get_user_pages_fast: this always returns the
 * number of pages pinned, 0 if no pages were pinned.
1829 1830 1831 1832
 */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
			  struct page **pages)
{
1833
	unsigned long len, end;
1834
	unsigned long flags;
1835 1836 1837 1838 1839 1840
	int nr = 0;

	start &= PAGE_MASK;
	len = (unsigned long) nr_pages << PAGE_SHIFT;
	end = start + len;

1841
	if (unlikely(!access_ok((void __user *)start, len)))
1842 1843 1844 1845 1846 1847 1848
		return 0;

	/*
	 * Disable interrupts.  We use the nested form as we can already have
	 * interrupts disabled by get_futex_key.
	 *
	 * With interrupts disabled, we block page table pages from being
1849 1850
	 * freed from under us. See struct mmu_table_batch comments in
	 * include/asm-generic/tlb.h for more details.
1851 1852 1853 1854 1855
	 *
	 * We do not adopt an rcu_read_lock(.) here as we also want to
	 * block IPIs that come from THPs splitting.
	 */

1856 1857
	if (gup_fast_permitted(start, nr_pages, write)) {
		local_irq_save(flags);
1858
		gup_pgd_range(start, end, write, pages, &nr);
1859 1860
		local_irq_restore(flags);
	}
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883

	return nr;
}

/**
 * get_user_pages_fast() - pin user pages in memory
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
 * @write:	whether pages will be written to
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long.
 *
 * Attempt to pin user pages in memory without taking mm->mmap_sem.
 * If not successful, it will fall back to taking the lock and
 * calling get_user_pages().
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno.
 */
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
			struct page **pages)
{
1884
	unsigned long addr, len, end;
1885
	int nr = 0, ret = 0;
1886 1887

	start &= PAGE_MASK;
1888 1889 1890 1891
	addr = start;
	len = (unsigned long) nr_pages << PAGE_SHIFT;
	end = start + len;

1892 1893 1894
	if (nr_pages <= 0)
		return 0;

1895
	if (unlikely(!access_ok((void __user *)start, len)))
1896
		return -EFAULT;
1897 1898

	if (gup_fast_permitted(start, nr_pages, write)) {
1899 1900 1901
		local_irq_disable();
		gup_pgd_range(addr, end, write, pages, &nr);
		local_irq_enable();
1902 1903
		ret = nr;
	}
1904 1905 1906 1907 1908 1909

	if (nr < nr_pages) {
		/* Try to get the remaining pages with get_user_pages */
		start += nr << PAGE_SHIFT;
		pages += nr;

1910 1911
		ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
				write ? FOLL_WRITE : 0);
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924

		/* Have to be a bit careful with return values */
		if (nr > 0) {
			if (ret < 0)
				ret = nr;
			else
				ret += nr;
		}
	}

	return ret;
}

1925
#endif /* CONFIG_HAVE_GENERIC_GUP */