memory.c 59.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/*
 *  linux/mm/memory.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 */

/*
 * demand-loading started 01.12.91 - seems it is high on the list of
 * things wanted, and it should be easy to implement. - Linus
 */

/*
 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
 * pages started 02.12.91, seems to work. - Linus.
 *
 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
 * would have taken more than the 6M I have free, but it worked well as
 * far as I could see.
 *
 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
 */

/*
 * Real VM (paging to/from disk) started 18.12.91. Much more work and
 * thought has to go into this. Oh, well..
 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
 *		Found it. Everything seems to work now.
 * 20.12.91  -  Ok, making the swap-device changeable like the root.
 */

/*
 * 05.04.94  -  Multi-page memory management added for v1.1.
 * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
 *
 * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
 *		(Gerhard.Wichert@pdb.siemens.de)
 *
 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
 */

#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/init.h>

#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>

#include <linux/swapops.h>
#include <linux/elf.h>

A
Andy Whitcroft 已提交
61
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/* use the per-pgdat data instead for discontigmem - mbligh */
unsigned long max_mapnr;
struct page *mem_map;

EXPORT_SYMBOL(max_mapnr);
EXPORT_SYMBOL(mem_map);
#endif

unsigned long num_physpages;
/*
 * A number of key systems in x86 including ioremap() rely on the assumption
 * that high_memory defines the upper bound on direct map memory, then end
 * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
 * and ZONE_HIGHMEM.
 */
void * high_memory;
unsigned long vmalloc_earlyreserve;

EXPORT_SYMBOL(num_physpages);
EXPORT_SYMBOL(high_memory);
EXPORT_SYMBOL(vmalloc_earlyreserve);

/*
 * If a p?d_bad entry is found while walking page tables, report
 * the error, before resetting entry to p?d_none.  Usually (but
 * very seldom) called out from the p?d_none_or_clear_bad macros.
 */

void pgd_clear_bad(pgd_t *pgd)
{
	pgd_ERROR(*pgd);
	pgd_clear(pgd);
}

void pud_clear_bad(pud_t *pud)
{
	pud_ERROR(*pud);
	pud_clear(pud);
}

void pmd_clear_bad(pmd_t *pmd)
{
	pmd_ERROR(*pmd);
	pmd_clear(pmd);
}

/*
 * Note: this doesn't free the actual pages themselves. That
 * has been handled earlier when unmapping all the memory regions.
 */
113
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
L
Linus Torvalds 已提交
114
{
115 116 117 118 119
	struct page *page = pmd_page(*pmd);
	pmd_clear(pmd);
	pte_free_tlb(tlb, page);
	dec_page_state(nr_page_table_pages);
	tlb->mm->nr_ptes--;
L
Linus Torvalds 已提交
120 121
}

122 123 124
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
				unsigned long addr, unsigned long end,
				unsigned long floor, unsigned long ceiling)
L
Linus Torvalds 已提交
125 126 127
{
	pmd_t *pmd;
	unsigned long next;
128
	unsigned long start;
L
Linus Torvalds 已提交
129

130
	start = addr;
L
Linus Torvalds 已提交
131 132 133 134 135
	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
136
		free_pte_range(tlb, pmd);
L
Linus Torvalds 已提交
137 138
	} while (pmd++, addr = next, addr != end);

139 140 141 142 143 144 145
	start &= PUD_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PUD_MASK;
		if (!ceiling)
			return;
L
Linus Torvalds 已提交
146
	}
147 148 149 150 151 152
	if (end - 1 > ceiling - 1)
		return;

	pmd = pmd_offset(pud, start);
	pud_clear(pud);
	pmd_free_tlb(tlb, pmd);
L
Linus Torvalds 已提交
153 154
}

155 156 157
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
				unsigned long addr, unsigned long end,
				unsigned long floor, unsigned long ceiling)
L
Linus Torvalds 已提交
158 159 160
{
	pud_t *pud;
	unsigned long next;
161
	unsigned long start;
L
Linus Torvalds 已提交
162

163
	start = addr;
L
Linus Torvalds 已提交
164 165 166 167 168
	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
169
		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
L
Linus Torvalds 已提交
170 171
	} while (pud++, addr = next, addr != end);

172 173 174 175 176 177 178
	start &= PGDIR_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PGDIR_MASK;
		if (!ceiling)
			return;
L
Linus Torvalds 已提交
179
	}
180 181 182 183 184 185
	if (end - 1 > ceiling - 1)
		return;

	pud = pud_offset(pgd, start);
	pgd_clear(pgd);
	pud_free_tlb(tlb, pud);
L
Linus Torvalds 已提交
186 187 188
}

/*
189 190
 * This function frees user-level page tables of a process.
 *
L
Linus Torvalds 已提交
191 192
 * Must be called with pagetable lock held.
 */
193
void free_pgd_range(struct mmu_gather **tlb,
194 195
			unsigned long addr, unsigned long end,
			unsigned long floor, unsigned long ceiling)
L
Linus Torvalds 已提交
196 197 198
{
	pgd_t *pgd;
	unsigned long next;
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	unsigned long start;

	/*
	 * The next few lines have given us lots of grief...
	 *
	 * Why are we testing PMD* at this top level?  Because often
	 * there will be no work to do at all, and we'd prefer not to
	 * go all the way down to the bottom just to discover that.
	 *
	 * Why all these "- 1"s?  Because 0 represents both the bottom
	 * of the address space and the top of it (using -1 for the
	 * top wouldn't help much: the masks would do the wrong thing).
	 * The rule is that addr 0 and floor 0 refer to the bottom of
	 * the address space, but end 0 and ceiling 0 refer to the top
	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
	 * that end 0 case should be mythical).
	 *
	 * Wherever addr is brought up or ceiling brought down, we must
	 * be careful to reject "the opposite 0" before it confuses the
	 * subsequent tests.  But what about where end is brought down
	 * by PMD_SIZE below? no, end can't go down to 0 there.
	 *
	 * Whereas we round start (addr) and ceiling down, by different
	 * masks at different levels, in order to test whether a table
	 * now has no other vmas using it, so can be freed, we don't
	 * bother to round floor or end up - the tests don't need that.
	 */
L
Linus Torvalds 已提交
226

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	addr &= PMD_MASK;
	if (addr < floor) {
		addr += PMD_SIZE;
		if (!addr)
			return;
	}
	if (ceiling) {
		ceiling &= PMD_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		end -= PMD_SIZE;
	if (addr > end - 1)
		return;

	start = addr;
244
	pgd = pgd_offset((*tlb)->mm, addr);
L
Linus Torvalds 已提交
245 246 247 248
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
249
		free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
L
Linus Torvalds 已提交
250
	} while (pgd++, addr = next, addr != end);
251

252
	if (!(*tlb)->fullmm)
253
		flush_tlb_pgtables((*tlb)->mm, start, end);
254 255 256
}

void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
257
		unsigned long floor, unsigned long ceiling)
258 259 260 261 262
{
	while (vma) {
		struct vm_area_struct *next = vma->vm_next;
		unsigned long addr = vma->vm_start;

263 264
		if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
265
				floor, next? next->vm_start: ceiling);
266 267 268 269 270 271 272 273 274 275 276 277 278
		} else {
			/*
			 * Optimization: gather nearby vmas into one call down
			 */
			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
			  && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
							HPAGE_SIZE)) {
				vma = next;
				next = vma->vm_next;
			}
			free_pgd_range(tlb, addr, vma->vm_end,
				floor, next? next->vm_start: ceiling);
		}
279 280
		vma = next;
	}
L
Linus Torvalds 已提交
281 282
}

283
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
L
Linus Torvalds 已提交
284
{
H
Hugh Dickins 已提交
285
	struct page *new = pte_alloc_one(mm, address);
286 287 288
	if (!new)
		return -ENOMEM;

H
Hugh Dickins 已提交
289
	spin_lock(&mm->page_table_lock);
290 291 292
	if (pmd_present(*pmd))		/* Another has populated it */
		pte_free(new);
	else {
L
Linus Torvalds 已提交
293 294 295 296
		mm->nr_ptes++;
		inc_page_state(nr_page_table_pages);
		pmd_populate(mm, pmd, new);
	}
H
Hugh Dickins 已提交
297
	spin_unlock(&mm->page_table_lock);
298
	return 0;
L
Linus Torvalds 已提交
299 300
}

301
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
L
Linus Torvalds 已提交
302
{
303 304 305 306 307 308 309 310 311 312 313
	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
	if (!new)
		return -ENOMEM;

	spin_lock(&init_mm.page_table_lock);
	if (pmd_present(*pmd))		/* Another has populated it */
		pte_free_kernel(new);
	else
		pmd_populate_kernel(&init_mm, pmd, new);
	spin_unlock(&init_mm.page_table_lock);
	return 0;
L
Linus Torvalds 已提交
314 315
}

316 317 318 319 320 321 322 323
static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
{
	if (file_rss)
		add_mm_counter(mm, file_rss, file_rss);
	if (anon_rss)
		add_mm_counter(mm, anon_rss, anon_rss);
}

N
Nick Piggin 已提交
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
/*
 * This function is called to print an error when a pte in a
 * !VM_RESERVED region is found pointing to an invalid pfn (which
 * is an error.
 *
 * The calling function must still handle the error.
 */
void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
{
	printk(KERN_ERR "Bad pte = %08llx, process = %s, "
			"vm_flags = %lx, vaddr = %lx\n",
		(long long)pte_val(pte),
		(vma->vm_mm == current->mm ? current->comm : "???"),
		vma->vm_flags, vaddr);
	dump_stack();
}

L
Linus Torvalds 已提交
341 342 343 344 345 346
/*
 * copy one vm_area from one task to the other. Assumes the page tables
 * already present in the new task to be cleared in the whole range
 * covered by this vma.
 */

H
Hugh Dickins 已提交
347
static inline void
L
Linus Torvalds 已提交
348
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
N
Nick Piggin 已提交
349
		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
H
Hugh Dickins 已提交
350
		unsigned long addr, int *rss)
L
Linus Torvalds 已提交
351
{
N
Nick Piggin 已提交
352
	unsigned long vm_flags = vma->vm_flags;
L
Linus Torvalds 已提交
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
	pte_t pte = *src_pte;
	struct page *page;
	unsigned long pfn;

	/* pte contains position in swap or file, so copy. */
	if (unlikely(!pte_present(pte))) {
		if (!pte_file(pte)) {
			swap_duplicate(pte_to_swp_entry(pte));
			/* make sure dst_mm is on swapoff's mmlist. */
			if (unlikely(list_empty(&dst_mm->mmlist))) {
				spin_lock(&mmlist_lock);
				list_add(&dst_mm->mmlist, &src_mm->mmlist);
				spin_unlock(&mmlist_lock);
			}
		}
368
		goto out_set_pte;
L
Linus Torvalds 已提交
369 370
	}

N
Nick Piggin 已提交
371 372 373 374 375 376
	/* If the region is VM_RESERVED, the mapping is not
	 * mapped via rmap - duplicate the pte as is.
	 */
	if (vm_flags & VM_RESERVED)
		goto out_set_pte;

L
Linus Torvalds 已提交
377
	pfn = pte_pfn(pte);
N
Nick Piggin 已提交
378 379
	/* If the pte points outside of valid memory but
	 * the region is not VM_RESERVED, we have a problem.
L
Linus Torvalds 已提交
380
	 */
N
Nick Piggin 已提交
381 382 383 384
	if (unlikely(!pfn_valid(pfn))) {
		print_bad_pte(vma, pte, addr);
		goto out_set_pte; /* try to do something sane */
	}
L
Linus Torvalds 已提交
385

N
Nick Piggin 已提交
386
	page = pfn_to_page(pfn);
L
Linus Torvalds 已提交
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405

	/*
	 * If it's a COW mapping, write protect it both
	 * in the parent and the child
	 */
	if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
		ptep_set_wrprotect(src_mm, addr, src_pte);
		pte = *src_pte;
	}

	/*
	 * If it's a shared mapping, mark it clean in
	 * the child
	 */
	if (vm_flags & VM_SHARED)
		pte = pte_mkclean(pte);
	pte = pte_mkold(pte);
	get_page(page);
	page_dup_rmap(page);
H
Hugh Dickins 已提交
406
	rss[!!PageAnon(page)]++;
407 408 409

out_set_pte:
	set_pte_at(dst_mm, addr, dst_pte, pte);
L
Linus Torvalds 已提交
410 411 412 413 414 415 416
}

static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pte_t *src_pte, *dst_pte;
H
Hugh Dickins 已提交
417
	spinlock_t *src_ptl, *dst_ptl;
418
	int progress = 0;
H
Hugh Dickins 已提交
419
	int rss[2];
L
Linus Torvalds 已提交
420 421

again:
422
	rss[1] = rss[0] = 0;
H
Hugh Dickins 已提交
423
	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
L
Linus Torvalds 已提交
424 425 426
	if (!dst_pte)
		return -ENOMEM;
	src_pte = pte_offset_map_nested(src_pmd, addr);
H
Hugh Dickins 已提交
427 428
	src_ptl = &src_mm->page_table_lock;
	spin_lock(src_ptl);
L
Linus Torvalds 已提交
429 430 431 432 433 434

	do {
		/*
		 * We are holding two locks at this point - either of them
		 * could generate latencies in another task on another CPU.
		 */
435 436 437
		if (progress >= 32) {
			progress = 0;
			if (need_resched() ||
H
Hugh Dickins 已提交
438 439
			    need_lockbreak(src_ptl) ||
			    need_lockbreak(dst_ptl))
440 441
				break;
		}
L
Linus Torvalds 已提交
442 443 444 445
		if (pte_none(*src_pte)) {
			progress++;
			continue;
		}
H
Hugh Dickins 已提交
446
		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
L
Linus Torvalds 已提交
447 448 449
		progress += 8;
	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);

H
Hugh Dickins 已提交
450
	spin_unlock(src_ptl);
L
Linus Torvalds 已提交
451
	pte_unmap_nested(src_pte - 1);
452
	add_mm_rss(dst_mm, rss[0], rss[1]);
H
Hugh Dickins 已提交
453 454
	pte_unmap_unlock(dst_pte - 1, dst_ptl);
	cond_resched();
L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	if (addr != end)
		goto again;
	return 0;
}

static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pmd_t *src_pmd, *dst_pmd;
	unsigned long next;

	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
	if (!dst_pmd)
		return -ENOMEM;
	src_pmd = pmd_offset(src_pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(src_pmd))
			continue;
		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
						vma, addr, next))
			return -ENOMEM;
	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
	return 0;
}

static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pud_t *src_pud, *dst_pud;
	unsigned long next;

	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
	if (!dst_pud)
		return -ENOMEM;
	src_pud = pud_offset(src_pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(src_pud))
			continue;
		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
						vma, addr, next))
			return -ENOMEM;
	} while (dst_pud++, src_pud++, addr = next, addr != end);
	return 0;
}

int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		struct vm_area_struct *vma)
{
	pgd_t *src_pgd, *dst_pgd;
	unsigned long next;
	unsigned long addr = vma->vm_start;
	unsigned long end = vma->vm_end;

512 513 514 515 516 517 518 519 520 521 522
	/*
	 * Don't copy ptes where a page fault will fill them correctly.
	 * Fork becomes much lighter when there are big shared or private
	 * readonly mappings. The tradeoff is that copy_page_range is more
	 * efficient than faulting.
	 */
	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
		if (!vma->anon_vma)
			return 0;
	}

L
Linus Torvalds 已提交
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
	if (is_vm_hugetlb_page(vma))
		return copy_hugetlb_page_range(dst_mm, src_mm, vma);

	dst_pgd = pgd_offset(dst_mm, addr);
	src_pgd = pgd_offset(src_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(src_pgd))
			continue;
		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
						vma, addr, next))
			return -ENOMEM;
	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
	return 0;
}

N
Nick Piggin 已提交
539 540
static void zap_pte_range(struct mmu_gather *tlb,
				struct vm_area_struct *vma, pmd_t *pmd,
L
Linus Torvalds 已提交
541 542 543
				unsigned long addr, unsigned long end,
				struct zap_details *details)
{
N
Nick Piggin 已提交
544
	struct mm_struct *mm = tlb->mm;
L
Linus Torvalds 已提交
545
	pte_t *pte;
546 547
	int file_rss = 0;
	int anon_rss = 0;
L
Linus Torvalds 已提交
548 549 550 551 552 553 554 555

	pte = pte_offset_map(pmd, addr);
	do {
		pte_t ptent = *pte;
		if (pte_none(ptent))
			continue;
		if (pte_present(ptent)) {
			struct page *page = NULL;
N
Nick Piggin 已提交
556 557 558 559 560 561
			if (!(vma->vm_flags & VM_RESERVED)) {
				unsigned long pfn = pte_pfn(ptent);
				if (unlikely(!pfn_valid(pfn)))
					print_bad_pte(vma, ptent, addr);
				else
					page = pfn_to_page(pfn);
L
Linus Torvalds 已提交
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
			}
			if (unlikely(details) && page) {
				/*
				 * unmap_shared_mapping_pages() wants to
				 * invalidate cache without truncating:
				 * unmap shared but keep private pages.
				 */
				if (details->check_mapping &&
				    details->check_mapping != page->mapping)
					continue;
				/*
				 * Each page->index must be checked when
				 * invalidating or truncating nonlinear.
				 */
				if (details->nonlinear_vma &&
				    (page->index < details->first_index ||
				     page->index > details->last_index))
					continue;
			}
N
Nick Piggin 已提交
581
			ptent = ptep_get_and_clear_full(mm, addr, pte,
582
							tlb->fullmm);
L
Linus Torvalds 已提交
583 584 585 586 587 588
			tlb_remove_tlb_entry(tlb, pte, addr);
			if (unlikely(!page))
				continue;
			if (unlikely(details) && details->nonlinear_vma
			    && linear_page_index(details->nonlinear_vma,
						addr) != page->index)
N
Nick Piggin 已提交
589
				set_pte_at(mm, addr, pte,
L
Linus Torvalds 已提交
590 591
					   pgoff_to_pte(page->index));
			if (PageAnon(page))
H
Hugh Dickins 已提交
592
				anon_rss--;
593 594 595 596 597
			else {
				if (pte_dirty(ptent))
					set_page_dirty(page);
				if (pte_young(ptent))
					mark_page_accessed(page);
H
Hugh Dickins 已提交
598
				file_rss--;
599
			}
L
Linus Torvalds 已提交
600 601 602 603 604 605 606 607 608 609 610 611
			page_remove_rmap(page);
			tlb_remove_page(tlb, page);
			continue;
		}
		/*
		 * If details->check_mapping, we leave swap entries;
		 * if details->nonlinear_vma, we leave file entries.
		 */
		if (unlikely(details))
			continue;
		if (!pte_file(ptent))
			free_swap_and_cache(pte_to_swp_entry(ptent));
N
Nick Piggin 已提交
612
		pte_clear_full(mm, addr, pte, tlb->fullmm);
L
Linus Torvalds 已提交
613
	} while (pte++, addr += PAGE_SIZE, addr != end);
614

H
Hugh Dickins 已提交
615
	add_mm_rss(mm, file_rss, anon_rss);
L
Linus Torvalds 已提交
616 617 618
	pte_unmap(pte - 1);
}

N
Nick Piggin 已提交
619 620
static inline void zap_pmd_range(struct mmu_gather *tlb,
				struct vm_area_struct *vma, pud_t *pud,
L
Linus Torvalds 已提交
621 622 623 624 625 626 627 628 629 630 631
				unsigned long addr, unsigned long end,
				struct zap_details *details)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
N
Nick Piggin 已提交
632
		zap_pte_range(tlb, vma, pmd, addr, next, details);
L
Linus Torvalds 已提交
633 634 635
	} while (pmd++, addr = next, addr != end);
}

N
Nick Piggin 已提交
636 637
static inline void zap_pud_range(struct mmu_gather *tlb,
				struct vm_area_struct *vma, pgd_t *pgd,
L
Linus Torvalds 已提交
638 639 640 641 642 643 644 645 646 647 648
				unsigned long addr, unsigned long end,
				struct zap_details *details)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
N
Nick Piggin 已提交
649
		zap_pmd_range(tlb, vma, pud, addr, next, details);
L
Linus Torvalds 已提交
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
	} while (pud++, addr = next, addr != end);
}

static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
				unsigned long addr, unsigned long end,
				struct zap_details *details)
{
	pgd_t *pgd;
	unsigned long next;

	if (details && !details->check_mapping && !details->nonlinear_vma)
		details = NULL;

	BUG_ON(addr >= end);
	tlb_start_vma(tlb, vma);
	pgd = pgd_offset(vma->vm_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
N
Nick Piggin 已提交
670
		zap_pud_range(tlb, vma, pgd, addr, next, details);
L
Linus Torvalds 已提交
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
	} while (pgd++, addr = next, addr != end);
	tlb_end_vma(tlb, vma);
}

#ifdef CONFIG_PREEMPT
# define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
#else
/* No preempt: go for improved straight-line efficiency */
# define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
#endif

/**
 * unmap_vmas - unmap a range of memory covered by a list of vma's
 * @tlbp: address of the caller's struct mmu_gather
 * @mm: the controlling mm_struct
 * @vma: the starting vma
 * @start_addr: virtual address at which to start unmapping
 * @end_addr: virtual address at which to end unmapping
 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
 * @details: details of nonlinear truncation or shared cache invalidation
 *
692
 * Returns the end address of the unmapping (restart addr if interrupted).
L
Linus Torvalds 已提交
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
 *
 * Unmap all pages in the vma list.  Called under page_table_lock.
 *
 * We aim to not hold page_table_lock for too long (for scheduling latency
 * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
 * return the ending mmu_gather to the caller.
 *
 * Only addresses between `start' and `end' will be unmapped.
 *
 * The VMA list must be sorted in ascending virtual address order.
 *
 * unmap_vmas() assumes that the caller will flush the whole unmapped address
 * range after unmap_vmas() returns.  So the only responsibility here is to
 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
 * drops the lock and schedules.
 */
709
unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
L
Linus Torvalds 已提交
710 711 712 713 714 715 716
		struct vm_area_struct *vma, unsigned long start_addr,
		unsigned long end_addr, unsigned long *nr_accounted,
		struct zap_details *details)
{
	unsigned long zap_bytes = ZAP_BLOCK_SIZE;
	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
	int tlb_start_valid = 0;
717
	unsigned long start = start_addr;
L
Linus Torvalds 已提交
718
	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
719
	int fullmm = (*tlbp)->fullmm;
L
Linus Torvalds 已提交
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776

	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
		unsigned long end;

		start = max(vma->vm_start, start_addr);
		if (start >= vma->vm_end)
			continue;
		end = min(vma->vm_end, end_addr);
		if (end <= vma->vm_start)
			continue;

		if (vma->vm_flags & VM_ACCOUNT)
			*nr_accounted += (end - start) >> PAGE_SHIFT;

		while (start != end) {
			unsigned long block;

			if (!tlb_start_valid) {
				tlb_start = start;
				tlb_start_valid = 1;
			}

			if (is_vm_hugetlb_page(vma)) {
				block = end - start;
				unmap_hugepage_range(vma, start, end);
			} else {
				block = min(zap_bytes, end - start);
				unmap_page_range(*tlbp, vma, start,
						start + block, details);
			}

			start += block;
			zap_bytes -= block;
			if ((long)zap_bytes > 0)
				continue;

			tlb_finish_mmu(*tlbp, tlb_start, start);

			if (need_resched() ||
				need_lockbreak(&mm->page_table_lock) ||
				(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
				if (i_mmap_lock) {
					/* must reset count of rss freed */
					*tlbp = tlb_gather_mmu(mm, fullmm);
					goto out;
				}
				spin_unlock(&mm->page_table_lock);
				cond_resched();
				spin_lock(&mm->page_table_lock);
			}

			*tlbp = tlb_gather_mmu(mm, fullmm);
			tlb_start_valid = 0;
			zap_bytes = ZAP_BLOCK_SIZE;
		}
	}
out:
777
	return start;	/* which is now the end (or restart) address */
L
Linus Torvalds 已提交
778 779 780 781 782 783 784 785 786
}

/**
 * zap_page_range - remove user pages in a given range
 * @vma: vm_area_struct holding the applicable pages
 * @address: starting address of pages to zap
 * @size: number of bytes to zap
 * @details: details of nonlinear truncation or shared cache invalidation
 */
787
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
L
Linus Torvalds 已提交
788 789 790 791 792 793 794 795 796
		unsigned long size, struct zap_details *details)
{
	struct mm_struct *mm = vma->vm_mm;
	struct mmu_gather *tlb;
	unsigned long end = address + size;
	unsigned long nr_accounted = 0;

	if (is_vm_hugetlb_page(vma)) {
		zap_hugepage_range(vma, address, size);
797
		return end;
L
Linus Torvalds 已提交
798 799 800 801 802
	}

	lru_add_drain();
	spin_lock(&mm->page_table_lock);
	tlb = tlb_gather_mmu(mm, 0);
803
	update_hiwater_rss(mm);
804
	end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
L
Linus Torvalds 已提交
805 806
	tlb_finish_mmu(tlb, address, end);
	spin_unlock(&mm->page_table_lock);
807
	return end;
L
Linus Torvalds 已提交
808 809 810 811 812 813
}

/*
 * Do a quick page-table lookup for a single page.
 * mm->page_table_lock must be held.
 */
814 815
static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
			int read, int write, int accessed)
L
Linus Torvalds 已提交
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
	unsigned long pfn;
	struct page *page;

	page = follow_huge_addr(mm, address, write);
	if (! IS_ERR(page))
		return page;

	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		goto out;

	pud = pud_offset(pgd, address);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
		goto out;
	
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
		goto out;
	if (pmd_huge(*pmd))
		return follow_huge_pmd(mm, address, pmd, write);

	ptep = pte_offset_map(pmd, address);
	if (!ptep)
		goto out;

	pte = *ptep;
	pte_unmap(ptep);
	if (pte_present(pte)) {
N
Nick Piggin 已提交
849
		if (write && !pte_write(pte))
L
Linus Torvalds 已提交
850 851 852 853 854 855
			goto out;
		if (read && !pte_read(pte))
			goto out;
		pfn = pte_pfn(pte);
		if (pfn_valid(pfn)) {
			page = pfn_to_page(pfn);
N
Nick Piggin 已提交
856 857 858
			if (accessed) {
				if (write && !pte_dirty(pte) &&!PageDirty(page))
					set_page_dirty(page);
859
				mark_page_accessed(page);
N
Nick Piggin 已提交
860
			}
L
Linus Torvalds 已提交
861 862 863 864 865 866 867 868
			return page;
		}
	}

out:
	return NULL;
}

869
inline struct page *
L
Linus Torvalds 已提交
870 871
follow_page(struct mm_struct *mm, unsigned long address, int write)
{
872
	return __follow_page(mm, address, 0, write, 1);
L
Linus Torvalds 已提交
873 874
}

875 876 877 878 879
/*
 * check_user_page_readable() can be called frm niterrupt context by oprofile,
 * so we need to avoid taking any non-irq-safe locks
 */
int check_user_page_readable(struct mm_struct *mm, unsigned long address)
L
Linus Torvalds 已提交
880
{
881
	return __follow_page(mm, address, 1, 0, 0) != NULL;
L
Linus Torvalds 已提交
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
}
EXPORT_SYMBOL(check_user_page_readable);

static inline int
untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
			 unsigned long address)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	/* Check if the vma is for an anonymous mapping. */
	if (vma->vm_ops && vma->vm_ops->nopage)
		return 0;

	/* Check if page directory entry exists. */
	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return 1;

	pud = pud_offset(pgd, address);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
		return 1;

	/* Check if page middle directory entry exists. */
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
		return 1;

	/* There is a pte slot for 'address' in 'mm'. */
	return 0;
}

int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long start, int len, int write, int force,
		struct page **pages, struct vm_area_struct **vmas)
{
	int i;
	unsigned int flags;

	/* 
	 * Require read or write permissions.
	 * If 'force' is set, we only require the "MAY" flags.
	 */
	flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
	i = 0;

	do {
		struct vm_area_struct *	vma;

		vma = find_extend_vma(mm, start);
		if (!vma && in_gate_area(tsk, start)) {
			unsigned long pg = start & PAGE_MASK;
			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
			pgd_t *pgd;
			pud_t *pud;
			pmd_t *pmd;
			pte_t *pte;
			if (write) /* user gate pages are read-only */
				return i ? : -EFAULT;
			if (pg > TASK_SIZE)
				pgd = pgd_offset_k(pg);
			else
				pgd = pgd_offset_gate(mm, pg);
			BUG_ON(pgd_none(*pgd));
			pud = pud_offset(pgd, pg);
			BUG_ON(pud_none(*pud));
			pmd = pmd_offset(pud, pg);
951 952
			if (pmd_none(*pmd))
				return i ? : -EFAULT;
L
Linus Torvalds 已提交
953
			pte = pte_offset_map(pmd, pg);
954 955 956 957
			if (pte_none(*pte)) {
				pte_unmap(pte);
				return i ? : -EFAULT;
			}
L
Linus Torvalds 已提交
958 959 960 961 962 963 964 965 966 967 968 969 970
			if (pages) {
				pages[i] = pte_page(*pte);
				get_page(pages[i]);
			}
			pte_unmap(pte);
			if (vmas)
				vmas[i] = gate_vma;
			i++;
			start += PAGE_SIZE;
			len--;
			continue;
		}

N
Nick Piggin 已提交
971
		if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED))
L
Linus Torvalds 已提交
972 973 974 975 976 977 978 979 980 981
				|| !(flags & vma->vm_flags))
			return i ? : -EFAULT;

		if (is_vm_hugetlb_page(vma)) {
			i = follow_hugetlb_page(mm, vma, pages, vmas,
						&start, &len, i);
			continue;
		}
		spin_lock(&mm->page_table_lock);
		do {
N
Nick Piggin 已提交
982
			int write_access = write;
983
			struct page *page;
L
Linus Torvalds 已提交
984 985

			cond_resched_lock(&mm->page_table_lock);
N
Nick Piggin 已提交
986
			while (!(page = follow_page(mm, start, write_access))) {
987 988
				int ret;

L
Linus Torvalds 已提交
989 990 991
				/*
				 * Shortcut for anonymous pages. We don't want
				 * to force the creation of pages tables for
992
				 * insanely big anonymously mapped areas that
L
Linus Torvalds 已提交
993 994 995
				 * nobody touched so far. This is important
				 * for doing a core dump for these mappings.
				 */
996
				if (!write && untouched_anonymous_page(mm,vma,start)) {
997
					page = ZERO_PAGE(start);
L
Linus Torvalds 已提交
998 999 1000
					break;
				}
				spin_unlock(&mm->page_table_lock);
1001 1002 1003 1004 1005 1006 1007 1008 1009
				ret = __handle_mm_fault(mm, vma, start, write_access);

				/*
				 * The VM_FAULT_WRITE bit tells us that do_wp_page has
				 * broken COW when necessary, even if maybe_mkwrite
				 * decided not to set pte_write. We can thus safely do
				 * subsequent page lookups as if they were reads.
				 */
				if (ret & VM_FAULT_WRITE)
N
Nick Piggin 已提交
1010
					write_access = 0;
1011 1012
				
				switch (ret & ~VM_FAULT_WRITE) {
L
Linus Torvalds 已提交
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
				case VM_FAULT_MINOR:
					tsk->min_flt++;
					break;
				case VM_FAULT_MAJOR:
					tsk->maj_flt++;
					break;
				case VM_FAULT_SIGBUS:
					return i ? i : -EFAULT;
				case VM_FAULT_OOM:
					return i ? i : -ENOMEM;
				default:
					BUG();
				}
				spin_lock(&mm->page_table_lock);
			}
			if (pages) {
1029 1030
				pages[i] = page;
				flush_dcache_page(page);
N
Nick Piggin 已提交
1031
				page_cache_get(page);
L
Linus Torvalds 已提交
1032 1033 1034 1035 1036 1037
			}
			if (vmas)
				vmas[i] = vma;
			i++;
			start += PAGE_SIZE;
			len--;
1038
		} while (len && start < vma->vm_end);
L
Linus Torvalds 已提交
1039
		spin_unlock(&mm->page_table_lock);
1040
	} while (len);
L
Linus Torvalds 已提交
1041 1042 1043 1044 1045 1046 1047 1048
	return i;
}
EXPORT_SYMBOL(get_user_pages);

static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
			unsigned long addr, unsigned long end, pgprot_t prot)
{
	pte_t *pte;
H
Hugh Dickins 已提交
1049
	spinlock_t *ptl;
L
Linus Torvalds 已提交
1050

H
Hugh Dickins 已提交
1051
	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
L
Linus Torvalds 已提交
1052 1053 1054
	if (!pte)
		return -ENOMEM;
	do {
N
Nick Piggin 已提交
1055 1056 1057 1058 1059
		struct page *page = ZERO_PAGE(addr);
		pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
		page_cache_get(page);
		page_add_file_rmap(page);
		inc_mm_counter(mm, file_rss);
L
Linus Torvalds 已提交
1060 1061 1062
		BUG_ON(!pte_none(*pte));
		set_pte_at(mm, addr, pte, zero_pte);
	} while (pte++, addr += PAGE_SIZE, addr != end);
H
Hugh Dickins 已提交
1063
	pte_unmap_unlock(pte - 1, ptl);
L
Linus Torvalds 已提交
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	return 0;
}

static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
			unsigned long addr, unsigned long end, pgprot_t prot)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_alloc(mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
		if (zeromap_pte_range(mm, pmd, addr, next, prot))
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
			unsigned long addr, unsigned long end, pgprot_t prot)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
		if (zeromap_pmd_range(mm, pud, addr, next, prot))
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

int zeromap_page_range(struct vm_area_struct *vma,
			unsigned long addr, unsigned long size, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long next;
	unsigned long end = addr + size;
	struct mm_struct *mm = vma->vm_mm;
	int err;

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		err = zeromap_pud_range(mm, pgd, addr, next, prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);
	return err;
}

/*
 * maps a range of physical memory into the requested pages. the old
 * mappings are removed. any references to nonexistent pages results
 * in null mappings (currently treated as "copy-on-access")
 */
static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
			unsigned long addr, unsigned long end,
			unsigned long pfn, pgprot_t prot)
{
	pte_t *pte;
H
Hugh Dickins 已提交
1132
	spinlock_t *ptl;
L
Linus Torvalds 已提交
1133

H
Hugh Dickins 已提交
1134
	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
L
Linus Torvalds 已提交
1135 1136 1137 1138
	if (!pte)
		return -ENOMEM;
	do {
		BUG_ON(!pte_none(*pte));
N
Nick Piggin 已提交
1139
		set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
L
Linus Torvalds 已提交
1140 1141
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
H
Hugh Dickins 已提交
1142
	pte_unmap_unlock(pte - 1, ptl);
L
Linus Torvalds 已提交
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	return 0;
}

static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
			unsigned long addr, unsigned long end,
			unsigned long pfn, pgprot_t prot)
{
	pmd_t *pmd;
	unsigned long next;

	pfn -= addr >> PAGE_SHIFT;
	pmd = pmd_alloc(mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
		if (remap_pte_range(mm, pmd, addr, next,
				pfn + (addr >> PAGE_SHIFT), prot))
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
			unsigned long addr, unsigned long end,
			unsigned long pfn, pgprot_t prot)
{
	pud_t *pud;
	unsigned long next;

	pfn -= addr >> PAGE_SHIFT;
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
		if (remap_pmd_range(mm, pud, addr, next,
				pfn + (addr >> PAGE_SHIFT), prot))
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

/*  Note: this is only safe if the mm semaphore is held when called. */
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
		    unsigned long pfn, unsigned long size, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long next;
1192
	unsigned long end = addr + PAGE_ALIGN(size);
L
Linus Torvalds 已提交
1193 1194 1195 1196 1197 1198 1199 1200
	struct mm_struct *mm = vma->vm_mm;
	int err;

	/*
	 * Physically remapped pages are special. Tell the
	 * rest of the world about it:
	 *   VM_IO tells people not to look at these pages
	 *	(accesses can have side effects).
N
Nick Piggin 已提交
1201 1202
	 *   VM_RESERVED tells the core MM not to "manage" these pages
         *	(e.g. refcount, mapcount, try to swap them out).
L
Linus Torvalds 已提交
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
	 */
	vma->vm_flags |= VM_IO | VM_RESERVED;

	BUG_ON(addr >= end);
	pfn -= addr >> PAGE_SHIFT;
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		err = remap_pud_range(mm, pgd, addr, next,
				pfn + (addr >> PAGE_SHIFT), prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);
	return err;
}
EXPORT_SYMBOL(remap_pfn_range);

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
/*
 * handle_pte_fault chooses page fault handler according to an entry
 * which was read non-atomically.  Before making any commitment, on
 * those architectures or configurations (e.g. i386 with PAE) which
 * might give a mix of unmatched parts, do_swap_page and do_file_page
 * must check under lock before unmapping the pte and proceeding
 * (but do_wp_page is only called after already making such a check;
 * and do_anonymous_page and do_no_page can safely check later on).
 */
static inline int pte_unmap_same(struct mm_struct *mm,
				pte_t *page_table, pte_t orig_pte)
{
	int same = 1;
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
	if (sizeof(pte_t) > sizeof(unsigned long)) {
		spin_lock(&mm->page_table_lock);
		same = pte_same(*page_table, orig_pte);
		spin_unlock(&mm->page_table_lock);
	}
#endif
	pte_unmap(page_table);
	return same;
}

L
Linus Torvalds 已提交
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
/*
 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 * servicing faults for write access.  In the normal case, do always want
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
	if (likely(vma->vm_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}

/*
 * This routine handles present pages, when users try to write
 * to a shared page. It is done by copying the page to a new address
 * and decrementing the shared-page counter for the old page.
 *
 * Note that this routine assumes that the protection checks have been
 * done by the caller (the low-level page fault routine in most cases).
 * Thus we can safely just mark it writable once we've done any necessary
 * COW.
 *
 * We also mark the page dirty at this point even though the page will
 * change only once the write actually happens. This avoids a few races,
 * and potentially makes it more efficient.
 *
1272 1273 1274
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), with pte both mapped and locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
L
Linus Torvalds 已提交
1275
 */
1276 1277
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
1278
		spinlock_t *ptl, pte_t orig_pte)
L
Linus Torvalds 已提交
1279 1280
{
	struct page *old_page, *new_page;
1281
	unsigned long pfn = pte_pfn(orig_pte);
L
Linus Torvalds 已提交
1282
	pte_t entry;
1283
	int ret = VM_FAULT_MINOR;
L
Linus Torvalds 已提交
1284

N
Nick Piggin 已提交
1285 1286
	BUG_ON(vma->vm_flags & VM_RESERVED);

L
Linus Torvalds 已提交
1287 1288
	if (unlikely(!pfn_valid(pfn))) {
		/*
1289
		 * Page table corrupted: show pte and kill process.
L
Linus Torvalds 已提交
1290
		 */
N
Nick Piggin 已提交
1291
		print_bad_pte(vma, orig_pte, address);
1292 1293
		ret = VM_FAULT_OOM;
		goto unlock;
L
Linus Torvalds 已提交
1294 1295 1296
	}
	old_page = pfn_to_page(pfn);

1297
	if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
L
Linus Torvalds 已提交
1298 1299 1300 1301
		int reuse = can_share_swap_page(old_page);
		unlock_page(old_page);
		if (reuse) {
			flush_cache_page(vma, address, pfn);
1302 1303
			entry = pte_mkyoung(orig_pte);
			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
L
Linus Torvalds 已提交
1304 1305 1306
			ptep_set_access_flags(vma, address, page_table, entry, 1);
			update_mmu_cache(vma, address, entry);
			lazy_mmu_prot_update(entry);
1307 1308
			ret |= VM_FAULT_WRITE;
			goto unlock;
L
Linus Torvalds 已提交
1309 1310 1311 1312 1313 1314
		}
	}

	/*
	 * Ok, we need to copy. Oh, well..
	 */
N
Nick Piggin 已提交
1315
	page_cache_get(old_page);
1316
	pte_unmap_unlock(page_table, ptl);
L
Linus Torvalds 已提交
1317 1318

	if (unlikely(anon_vma_prepare(vma)))
1319
		goto oom;
L
Linus Torvalds 已提交
1320 1321 1322
	if (old_page == ZERO_PAGE(address)) {
		new_page = alloc_zeroed_user_highpage(vma, address);
		if (!new_page)
1323
			goto oom;
L
Linus Torvalds 已提交
1324 1325 1326
	} else {
		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
		if (!new_page)
1327
			goto oom;
L
Linus Torvalds 已提交
1328 1329
		copy_user_highpage(new_page, old_page, address);
	}
1330

L
Linus Torvalds 已提交
1331 1332 1333
	/*
	 * Re-check the pte - we dropped the lock
	 */
1334
	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1335
	if (likely(pte_same(*page_table, orig_pte))) {
N
Nick Piggin 已提交
1336 1337
		page_remove_rmap(old_page);
		if (!PageAnon(old_page)) {
1338
			inc_mm_counter(mm, anon_rss);
N
Nick Piggin 已提交
1339
			dec_mm_counter(mm, file_rss);
1340
		}
L
Linus Torvalds 已提交
1341
		flush_cache_page(vma, address, pfn);
1342 1343 1344 1345 1346
		entry = mk_pte(new_page, vma->vm_page_prot);
		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
		ptep_establish(vma, address, page_table, entry);
		update_mmu_cache(vma, address, entry);
		lazy_mmu_prot_update(entry);
L
Linus Torvalds 已提交
1347 1348 1349 1350 1351
		lru_cache_add_active(new_page);
		page_add_anon_rmap(new_page, vma, address);

		/* Free the old page.. */
		new_page = old_page;
N
Nick Piggin 已提交
1352
		ret |= VM_FAULT_WRITE;
L
Linus Torvalds 已提交
1353 1354 1355
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
1356
unlock:
1357
	pte_unmap_unlock(page_table, ptl);
N
Nick Piggin 已提交
1358
	return ret;
1359
oom:
L
Linus Torvalds 已提交
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
	page_cache_release(old_page);
	return VM_FAULT_OOM;
}

/*
 * Helper functions for unmap_mapping_range().
 *
 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
 *
 * We have to restart searching the prio_tree whenever we drop the lock,
 * since the iterator is only valid while the lock is held, and anyway
 * a later vma might be split and reinserted earlier while lock dropped.
 *
 * The list of nonlinear vmas could be handled more efficiently, using
 * a placeholder, but handle it in the same way until a need is shown.
 * It is important to search the prio_tree before nonlinear list: a vma
 * may become nonlinear and be shifted from prio_tree to nonlinear list
 * while the lock is dropped; but never shifted from list to prio_tree.
 *
 * In order to make forward progress despite restarting the search,
 * vm_truncate_count is used to mark a vma as now dealt with, so we can
 * quickly skip it next time around.  Since the prio_tree search only
 * shows us those vmas affected by unmapping the range in question, we
 * can't efficiently keep all vmas in step with mapping->truncate_count:
 * so instead reset them all whenever it wraps back to 0 (then go to 1).
 * mapping->truncate_count and vma->vm_truncate_count are protected by
 * i_mmap_lock.
 *
 * In order to make forward progress despite repeatedly restarting some
1389
 * large vma, note the restart_addr from unmap_vmas when it breaks out:
L
Linus Torvalds 已提交
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
 * and restart from that address when we reach that vma again.  It might
 * have been split or merged, shrunk or extended, but never shifted: so
 * restart_addr remains valid so long as it remains in the vma's range.
 * unmap_mapping_range forces truncate_count to leap over page-aligned
 * values so we can save vma's restart_addr in its truncate_count field.
 */
#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))

static void reset_vma_truncate_counts(struct address_space *mapping)
{
	struct vm_area_struct *vma;
	struct prio_tree_iter iter;

	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
		vma->vm_truncate_count = 0;
	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
		vma->vm_truncate_count = 0;
}

static int unmap_mapping_range_vma(struct vm_area_struct *vma,
		unsigned long start_addr, unsigned long end_addr,
		struct zap_details *details)
{
	unsigned long restart_addr;
	int need_break;

again:
	restart_addr = vma->vm_truncate_count;
	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
		start_addr = restart_addr;
		if (start_addr >= end_addr) {
			/* Top of vma has been split off since last time */
			vma->vm_truncate_count = details->truncate_count;
			return 0;
		}
	}

1427 1428
	restart_addr = zap_page_range(vma, start_addr,
					end_addr - start_addr, details);
L
Linus Torvalds 已提交
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438

	/*
	 * We cannot rely on the break test in unmap_vmas:
	 * on the one hand, we don't want to restart our loop
	 * just because that broke out for the page_table_lock;
	 * on the other hand, it does no test when vma is small.
	 */
	need_break = need_resched() ||
			need_lockbreak(details->i_mmap_lock);

1439
	if (restart_addr >= end_addr) {
L
Linus Torvalds 已提交
1440 1441 1442 1443 1444 1445
		/* We have now completed this vma: mark it so */
		vma->vm_truncate_count = details->truncate_count;
		if (!need_break)
			return 0;
	} else {
		/* Note restart_addr in vma's truncate_count field */
1446
		vma->vm_truncate_count = restart_addr;
L
Linus Torvalds 已提交
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
		if (!need_break)
			goto again;
	}

	spin_unlock(details->i_mmap_lock);
	cond_resched();
	spin_lock(details->i_mmap_lock);
	return -EINTR;
}

static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
					    struct zap_details *details)
{
	struct vm_area_struct *vma;
	struct prio_tree_iter iter;
	pgoff_t vba, vea, zba, zea;

restart:
	vma_prio_tree_foreach(vma, &iter, root,
			details->first_index, details->last_index) {
		/* Skip quickly over those we have already dealt with */
		if (vma->vm_truncate_count == details->truncate_count)
			continue;

		vba = vma->vm_pgoff;
		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
		zba = details->first_index;
		if (zba < vba)
			zba = vba;
		zea = details->last_index;
		if (zea > vea)
			zea = vea;

		if (unmap_mapping_range_vma(vma,
			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
				details) < 0)
			goto restart;
	}
}

static inline void unmap_mapping_range_list(struct list_head *head,
					    struct zap_details *details)
{
	struct vm_area_struct *vma;

	/*
	 * In nonlinear VMAs there is no correspondence between virtual address
	 * offset and file offset.  So we must perform an exhaustive search
	 * across *all* the pages in each nonlinear VMA, not just the pages
	 * whose virtual address lies outside the file truncation point.
	 */
restart:
	list_for_each_entry(vma, head, shared.vm_set.list) {
		/* Skip quickly over those we have already dealt with */
		if (vma->vm_truncate_count == details->truncate_count)
			continue;
		details->nonlinear_vma = vma;
		if (unmap_mapping_range_vma(vma, vma->vm_start,
					vma->vm_end, details) < 0)
			goto restart;
	}
}

/**
 * unmap_mapping_range - unmap the portion of all mmaps
 * in the specified address_space corresponding to the specified
 * page range in the underlying file.
M
Martin Waitz 已提交
1516
 * @mapping: the address space containing mmaps to be unmapped.
L
Linus Torvalds 已提交
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
 * @holebegin: byte in first page to unmap, relative to the start of
 * the underlying file.  This will be rounded down to a PAGE_SIZE
 * boundary.  Note that this is different from vmtruncate(), which
 * must keep the partial page.  In contrast, we must get rid of
 * partial pages.
 * @holelen: size of prospective hole in bytes.  This will be rounded
 * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
 * end of the file.
 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
 * but 0 when invalidating pagecache, don't throw away private data.
 */
void unmap_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen, int even_cows)
{
	struct zap_details details;
	pgoff_t hba = holebegin >> PAGE_SHIFT;
	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;

	/* Check for overflow. */
	if (sizeof(holelen) > sizeof(hlen)) {
		long long holeend =
			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
		if (holeend & ~(long long)ULONG_MAX)
			hlen = ULONG_MAX - hba + 1;
	}

	details.check_mapping = even_cows? NULL: mapping;
	details.nonlinear_vma = NULL;
	details.first_index = hba;
	details.last_index = hba + hlen - 1;
	if (details.last_index < details.first_index)
		details.last_index = ULONG_MAX;
	details.i_mmap_lock = &mapping->i_mmap_lock;

	spin_lock(&mapping->i_mmap_lock);

	/* serialize i_size write against truncate_count write */
	smp_wmb();
	/* Protect against page faults, and endless unmapping loops */
	mapping->truncate_count++;
	/*
	 * For archs where spin_lock has inclusive semantics like ia64
	 * this smp_mb() will prevent to read pagetable contents
	 * before the truncate_count increment is visible to
	 * other cpus.
	 */
	smp_mb();
	if (unlikely(is_restart_addr(mapping->truncate_count))) {
		if (mapping->truncate_count == 0)
			reset_vma_truncate_counts(mapping);
		mapping->truncate_count++;
	}
	details.truncate_count = mapping->truncate_count;

	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
		unmap_mapping_range_tree(&mapping->i_mmap, &details);
	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
	spin_unlock(&mapping->i_mmap_lock);
}
EXPORT_SYMBOL(unmap_mapping_range);

/*
 * Handle all mappings that got truncated by a "truncate()"
 * system call.
 *
 * NOTE! We have to be ready to update the memory sharing
 * between the file and the memory map for a potential last
 * incomplete page.  Ugly, but necessary.
 */
int vmtruncate(struct inode * inode, loff_t offset)
{
	struct address_space *mapping = inode->i_mapping;
	unsigned long limit;

	if (inode->i_size < offset)
		goto do_expand;
	/*
	 * truncation of in-use swapfiles is disallowed - it would cause
	 * subsequent swapout to scribble on the now-freed blocks.
	 */
	if (IS_SWAPFILE(inode))
		goto out_busy;
	i_size_write(inode, offset);
	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
	truncate_inode_pages(mapping, offset);
	goto out_truncate;

do_expand:
	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
	if (limit != RLIM_INFINITY && offset > limit)
		goto out_sig;
	if (offset > inode->i_sb->s_maxbytes)
		goto out_big;
	i_size_write(inode, offset);

out_truncate:
	if (inode->i_op && inode->i_op->truncate)
		inode->i_op->truncate(inode);
	return 0;
out_sig:
	send_sig(SIGXFSZ, current, 0);
out_big:
	return -EFBIG;
out_busy:
	return -ETXTBSY;
}

EXPORT_SYMBOL(vmtruncate);

/* 
 * Primitive swap readahead code. We simply read an aligned block of
 * (1 << page_cluster) entries in the swap area. This method is chosen
 * because it doesn't cost us any seek time.  We also make sure to queue
 * the 'original' request together with the readahead ones...  
 *
 * This has been extended to use the NUMA policies from the mm triggering
 * the readahead.
 *
 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 */
void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
{
#ifdef CONFIG_NUMA
	struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
#endif
	int i, num;
	struct page *new_page;
	unsigned long offset;

	/*
	 * Get the number of handles we should do readahead io to.
	 */
	num = valid_swaphandles(entry, &offset);
	for (i = 0; i < num; offset++, i++) {
		/* Ok, do the async read-ahead now */
		new_page = read_swap_cache_async(swp_entry(swp_type(entry),
							   offset), vma, addr);
		if (!new_page)
			break;
		page_cache_release(new_page);
#ifdef CONFIG_NUMA
		/*
		 * Find the next applicable VMA for the NUMA policy.
		 */
		addr += PAGE_SIZE;
		if (addr == 0)
			vma = NULL;
		if (vma) {
			if (addr >= vma->vm_end) {
				vma = next_vma;
				next_vma = vma ? vma->vm_next : NULL;
			}
			if (vma && addr < vma->vm_start)
				vma = NULL;
		} else {
			if (next_vma && addr >= next_vma->vm_start) {
				vma = next_vma;
				next_vma = vma->vm_next;
			}
		}
#endif
	}
	lru_add_drain();	/* Push any new pages onto the LRU now */
}

/*
1684 1685 1686
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
L
Linus Torvalds 已提交
1687
 */
1688 1689 1690
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		int write_access, pte_t orig_pte)
L
Linus Torvalds 已提交
1691
{
1692
	spinlock_t *ptl;
L
Linus Torvalds 已提交
1693
	struct page *page;
1694
	swp_entry_t entry;
L
Linus Torvalds 已提交
1695 1696 1697
	pte_t pte;
	int ret = VM_FAULT_MINOR;

1698 1699
	if (!pte_unmap_same(mm, page_table, orig_pte))
		goto out;
1700 1701

	entry = pte_to_swp_entry(orig_pte);
L
Linus Torvalds 已提交
1702 1703 1704 1705 1706 1707
	page = lookup_swap_cache(entry);
	if (!page) {
 		swapin_readahead(entry, address, vma);
 		page = read_swap_cache_async(entry, vma, address);
		if (!page) {
			/*
1708 1709
			 * Back out if somebody else faulted in this pte
			 * while we released the pte lock.
L
Linus Torvalds 已提交
1710
			 */
1711
			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
L
Linus Torvalds 已提交
1712 1713
			if (likely(pte_same(*page_table, orig_pte)))
				ret = VM_FAULT_OOM;
1714
			goto unlock;
L
Linus Torvalds 已提交
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
		}

		/* Had to read the page from swap area: Major fault */
		ret = VM_FAULT_MAJOR;
		inc_page_state(pgmajfault);
		grab_swap_token();
	}

	mark_page_accessed(page);
	lock_page(page);

	/*
1727
	 * Back out if somebody else already faulted in this pte.
L
Linus Torvalds 已提交
1728
	 */
1729
	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
H
Hugh Dickins 已提交
1730
	if (unlikely(!pte_same(*page_table, orig_pte)))
1731 1732 1733 1734 1735
		goto out_nomap;

	if (unlikely(!PageUptodate(page))) {
		ret = VM_FAULT_SIGBUS;
		goto out_nomap;
L
Linus Torvalds 已提交
1736 1737 1738 1739
	}

	/* The page isn't present yet, go ahead with the fault. */

1740
	inc_mm_counter(mm, anon_rss);
L
Linus Torvalds 已提交
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
	pte = mk_pte(page, vma->vm_page_prot);
	if (write_access && can_share_swap_page(page)) {
		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
		write_access = 0;
	}

	flush_icache_page(vma, page);
	set_pte_at(mm, address, page_table, pte);
	page_add_anon_rmap(page, vma, address);

1751 1752 1753 1754 1755
	swap_free(entry);
	if (vm_swap_full())
		remove_exclusive_swap_page(page);
	unlock_page(page);

L
Linus Torvalds 已提交
1756 1757
	if (write_access) {
		if (do_wp_page(mm, vma, address,
1758
				page_table, pmd, ptl, pte) == VM_FAULT_OOM)
L
Linus Torvalds 已提交
1759 1760 1761 1762 1763 1764 1765
			ret = VM_FAULT_OOM;
		goto out;
	}

	/* No need to invalidate - it was non-present before */
	update_mmu_cache(vma, address, pte);
	lazy_mmu_prot_update(pte);
1766
unlock:
1767
	pte_unmap_unlock(page_table, ptl);
L
Linus Torvalds 已提交
1768 1769
out:
	return ret;
1770
out_nomap:
1771
	pte_unmap_unlock(page_table, ptl);
1772 1773
	unlock_page(page);
	page_cache_release(page);
1774
	return ret;
L
Linus Torvalds 已提交
1775 1776 1777
}

/*
1778 1779 1780
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
L
Linus Torvalds 已提交
1781
 */
1782 1783 1784
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		int write_access)
L
Linus Torvalds 已提交
1785
{
1786 1787
	struct page *page;
	spinlock_t *ptl;
L
Linus Torvalds 已提交
1788 1789 1790 1791 1792 1793 1794
	pte_t entry;

	if (write_access) {
		/* Allocate our own private page. */
		pte_unmap(page_table);

		if (unlikely(anon_vma_prepare(vma)))
1795 1796
			goto oom;
		page = alloc_zeroed_user_highpage(vma, address);
L
Linus Torvalds 已提交
1797
		if (!page)
1798
			goto oom;
L
Linus Torvalds 已提交
1799

1800 1801
		entry = mk_pte(page, vma->vm_page_prot);
		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1802 1803 1804 1805 1806

		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
		if (!pte_none(*page_table))
			goto release;
		inc_mm_counter(mm, anon_rss);
L
Linus Torvalds 已提交
1807 1808
		lru_cache_add_active(page);
		SetPageReferenced(page);
1809
		page_add_anon_rmap(page, vma, address);
N
Nick Piggin 已提交
1810
	} else {
1811 1812 1813 1814 1815 1816 1817 1818 1819
		/* Map the ZERO_PAGE - vm_page_prot is readonly */
		page = ZERO_PAGE(address);
		page_cache_get(page);
		entry = mk_pte(page, vma->vm_page_prot);

		ptl = &mm->page_table_lock;
		spin_lock(ptl);
		if (!pte_none(*page_table))
			goto release;
N
Nick Piggin 已提交
1820 1821
		inc_mm_counter(mm, file_rss);
		page_add_file_rmap(page);
L
Linus Torvalds 已提交
1822 1823
	}

1824
	set_pte_at(mm, address, page_table, entry);
L
Linus Torvalds 已提交
1825 1826

	/* No need to invalidate - it was non-present before */
1827
	update_mmu_cache(vma, address, entry);
L
Linus Torvalds 已提交
1828
	lazy_mmu_prot_update(entry);
1829
unlock:
1830
	pte_unmap_unlock(page_table, ptl);
L
Linus Torvalds 已提交
1831
	return VM_FAULT_MINOR;
1832 1833 1834
release:
	page_cache_release(page);
	goto unlock;
1835
oom:
L
Linus Torvalds 已提交
1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
	return VM_FAULT_OOM;
}

/*
 * do_no_page() tries to create a new page mapping. It aggressively
 * tries to share with existing pages, but makes a separate copy if
 * the "write_access" parameter is true in order to avoid the next
 * page fault.
 *
 * As this is called only for pages that do not currently exist, we
 * do not need to flush old virtual caches or the TLB.
 *
1848 1849 1850
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
L
Linus Torvalds 已提交
1851
 */
1852 1853 1854
static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		int write_access)
L
Linus Torvalds 已提交
1855
{
1856
	spinlock_t *ptl;
1857
	struct page *new_page;
L
Linus Torvalds 已提交
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
	struct address_space *mapping = NULL;
	pte_t entry;
	unsigned int sequence = 0;
	int ret = VM_FAULT_MINOR;
	int anon = 0;

	pte_unmap(page_table);

	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;
		sequence = mapping->truncate_count;
		smp_rmb(); /* serializes i_size against truncate_count */
	}
retry:
	new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
	/*
	 * No smp_rmb is needed here as long as there's a full
	 * spin_lock/unlock sequence inside the ->nopage callback
	 * (for the pagecache lookup) that acts as an implicit
	 * smp_mb() and prevents the i_size read to happen
	 * after the next truncate_count read.
	 */

	/* no page was available -- either SIGBUS or OOM */
	if (new_page == NOPAGE_SIGBUS)
		return VM_FAULT_SIGBUS;
	if (new_page == NOPAGE_OOM)
		return VM_FAULT_OOM;

	/*
	 * Should we do an early C-O-W break?
	 */
	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		struct page *page;

		if (unlikely(anon_vma_prepare(vma)))
			goto oom;
		page = alloc_page_vma(GFP_HIGHUSER, vma, address);
		if (!page)
			goto oom;
		copy_user_highpage(page, new_page, address);
		page_cache_release(new_page);
		new_page = page;
		anon = 1;
	}

1904
	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
L
Linus Torvalds 已提交
1905 1906 1907 1908 1909 1910
	/*
	 * For a file-backed vma, someone could have truncated or otherwise
	 * invalidated this page.  If unmap_mapping_range got called,
	 * retry getting the page.
	 */
	if (mapping && unlikely(sequence != mapping->truncate_count)) {
1911
		pte_unmap_unlock(page_table, ptl);
L
Linus Torvalds 已提交
1912
		page_cache_release(new_page);
1913 1914 1915
		cond_resched();
		sequence = mapping->truncate_count;
		smp_rmb();
L
Linus Torvalds 已提交
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
		goto retry;
	}

	/*
	 * This silly early PAGE_DIRTY setting removes a race
	 * due to the bad i386 page protection. But it's valid
	 * for other architectures too.
	 *
	 * Note that if write_access is true, we either now have
	 * an exclusive copy of the page, or this is a shared mapping,
	 * so we can make it writable and dirty to avoid having to
	 * handle that later.
	 */
	/* Only go through if we didn't race with anybody else... */
	if (pte_none(*page_table)) {
		flush_icache_page(vma, new_page);
		entry = mk_pte(new_page, vma->vm_page_prot);
		if (write_access)
			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
		set_pte_at(mm, address, page_table, entry);
		if (anon) {
1937
			inc_mm_counter(mm, anon_rss);
L
Linus Torvalds 已提交
1938 1939
			lru_cache_add_active(new_page);
			page_add_anon_rmap(new_page, vma, address);
N
Nick Piggin 已提交
1940
		} else if (!(vma->vm_flags & VM_RESERVED)) {
1941
			inc_mm_counter(mm, file_rss);
L
Linus Torvalds 已提交
1942
			page_add_file_rmap(new_page);
1943
		}
L
Linus Torvalds 已提交
1944 1945 1946
	} else {
		/* One of our sibling threads was faster, back out. */
		page_cache_release(new_page);
1947
		goto unlock;
L
Linus Torvalds 已提交
1948 1949 1950 1951 1952
	}

	/* no need to invalidate: a not-present page shouldn't be cached */
	update_mmu_cache(vma, address, entry);
	lazy_mmu_prot_update(entry);
1953
unlock:
1954
	pte_unmap_unlock(page_table, ptl);
L
Linus Torvalds 已提交
1955 1956 1957
	return ret;
oom:
	page_cache_release(new_page);
1958
	return VM_FAULT_OOM;
L
Linus Torvalds 已提交
1959 1960 1961 1962 1963 1964
}

/*
 * Fault of a previously existing named mapping. Repopulate the pte
 * from the encoded file_pte if possible. This enables swappable
 * nonlinear vmas.
1965 1966 1967 1968
 *
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
L
Linus Torvalds 已提交
1969
 */
1970 1971 1972
static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		int write_access, pte_t orig_pte)
L
Linus Torvalds 已提交
1973
{
1974
	pgoff_t pgoff;
L
Linus Torvalds 已提交
1975 1976
	int err;

1977 1978
	if (!pte_unmap_same(mm, page_table, orig_pte))
		return VM_FAULT_MINOR;
L
Linus Torvalds 已提交
1979

1980 1981 1982 1983
	if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
		/*
		 * Page table corrupted: show pte and kill process.
		 */
N
Nick Piggin 已提交
1984
		print_bad_pte(vma, orig_pte, address);
1985 1986 1987 1988 1989 1990 1991
		return VM_FAULT_OOM;
	}
	/* We can then assume vm->vm_ops && vma->vm_ops->populate */

	pgoff = pte_to_pgoff(orig_pte);
	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
					vma->vm_page_prot, pgoff, 0);
L
Linus Torvalds 已提交
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007
	if (err == -ENOMEM)
		return VM_FAULT_OOM;
	if (err)
		return VM_FAULT_SIGBUS;
	return VM_FAULT_MAJOR;
}

/*
 * These routines also need to handle stuff like marking pages dirty
 * and/or accessed for architectures that don't do it in hardware (most
 * RISC architectures).  The early dirtying is also good on the i386.
 *
 * There is also a hook called "update_mmu_cache()" that architectures
 * with external mmu caches can use to update those (ie the Sparc or
 * PowerPC hashed page tables that act as extended TLBs).
 *
H
Hugh Dickins 已提交
2008 2009 2010
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
L
Linus Torvalds 已提交
2011 2012
 */
static inline int handle_pte_fault(struct mm_struct *mm,
2013 2014
		struct vm_area_struct *vma, unsigned long address,
		pte_t *pte, pmd_t *pmd, int write_access)
L
Linus Torvalds 已提交
2015 2016
{
	pte_t entry;
2017
	spinlock_t *ptl;
L
Linus Torvalds 已提交
2018 2019 2020

	entry = *pte;
	if (!pte_present(entry)) {
2021 2022 2023 2024 2025 2026 2027
		if (pte_none(entry)) {
			if (!vma->vm_ops || !vma->vm_ops->nopage)
				return do_anonymous_page(mm, vma, address,
					pte, pmd, write_access);
			return do_no_page(mm, vma, address,
					pte, pmd, write_access);
		}
L
Linus Torvalds 已提交
2028
		if (pte_file(entry))
2029 2030 2031 2032
			return do_file_page(mm, vma, address,
					pte, pmd, write_access, entry);
		return do_swap_page(mm, vma, address,
					pte, pmd, write_access, entry);
L
Linus Torvalds 已提交
2033 2034
	}

2035 2036 2037 2038
	ptl = &mm->page_table_lock;
	spin_lock(ptl);
	if (unlikely(!pte_same(*pte, entry)))
		goto unlock;
L
Linus Torvalds 已提交
2039 2040
	if (write_access) {
		if (!pte_write(entry))
2041 2042
			return do_wp_page(mm, vma, address,
					pte, pmd, ptl, entry);
L
Linus Torvalds 已提交
2043 2044 2045 2046 2047 2048
		entry = pte_mkdirty(entry);
	}
	entry = pte_mkyoung(entry);
	ptep_set_access_flags(vma, address, pte, entry, write_access);
	update_mmu_cache(vma, address, entry);
	lazy_mmu_prot_update(entry);
2049 2050
unlock:
	pte_unmap_unlock(pte, ptl);
L
Linus Torvalds 已提交
2051 2052 2053 2054 2055 2056
	return VM_FAULT_MINOR;
}

/*
 * By the time we get here, we already hold the mm semaphore
 */
2057
int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
L
Linus Torvalds 已提交
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
		unsigned long address, int write_access)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	__set_current_state(TASK_RUNNING);

	inc_page_state(pgfault);

2069 2070
	if (unlikely(is_vm_hugetlb_page(vma)))
		return hugetlb_fault(mm, vma, address, write_access);
L
Linus Torvalds 已提交
2071 2072 2073 2074

	pgd = pgd_offset(mm, address);
	pud = pud_alloc(mm, pgd, address);
	if (!pud)
H
Hugh Dickins 已提交
2075
		return VM_FAULT_OOM;
L
Linus Torvalds 已提交
2076 2077
	pmd = pmd_alloc(mm, pud, address);
	if (!pmd)
H
Hugh Dickins 已提交
2078
		return VM_FAULT_OOM;
L
Linus Torvalds 已提交
2079 2080
	pte = pte_alloc_map(mm, pmd, address);
	if (!pte)
H
Hugh Dickins 已提交
2081
		return VM_FAULT_OOM;
L
Linus Torvalds 已提交
2082

H
Hugh Dickins 已提交
2083
	return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
L
Linus Torvalds 已提交
2084 2085 2086 2087 2088
}

#ifndef __PAGETABLE_PUD_FOLDED
/*
 * Allocate page upper directory.
H
Hugh Dickins 已提交
2089
 * We've already handled the fast-path in-line.
L
Linus Torvalds 已提交
2090
 */
2091
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
L
Linus Torvalds 已提交
2092
{
H
Hugh Dickins 已提交
2093 2094
	pud_t *new = pud_alloc_one(mm, address);
	if (!new)
2095
		return -ENOMEM;
L
Linus Torvalds 已提交
2096

H
Hugh Dickins 已提交
2097
	spin_lock(&mm->page_table_lock);
2098
	if (pgd_present(*pgd))		/* Another has populated it */
L
Linus Torvalds 已提交
2099
		pud_free(new);
2100 2101
	else
		pgd_populate(mm, pgd, new);
H
Hugh Dickins 已提交
2102
	spin_unlock(&mm->page_table_lock);
2103
	return 0;
L
Linus Torvalds 已提交
2104 2105 2106 2107 2108 2109
}
#endif /* __PAGETABLE_PUD_FOLDED */

#ifndef __PAGETABLE_PMD_FOLDED
/*
 * Allocate page middle directory.
H
Hugh Dickins 已提交
2110
 * We've already handled the fast-path in-line.
L
Linus Torvalds 已提交
2111
 */
2112
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
L
Linus Torvalds 已提交
2113
{
H
Hugh Dickins 已提交
2114 2115
	pmd_t *new = pmd_alloc_one(mm, address);
	if (!new)
2116
		return -ENOMEM;
L
Linus Torvalds 已提交
2117

H
Hugh Dickins 已提交
2118
	spin_lock(&mm->page_table_lock);
L
Linus Torvalds 已提交
2119
#ifndef __ARCH_HAS_4LEVEL_HACK
2120
	if (pud_present(*pud))		/* Another has populated it */
L
Linus Torvalds 已提交
2121
		pmd_free(new);
2122 2123
	else
		pud_populate(mm, pud, new);
L
Linus Torvalds 已提交
2124
#else
2125
	if (pgd_present(*pud))		/* Another has populated it */
L
Linus Torvalds 已提交
2126
		pmd_free(new);
2127 2128
	else
		pgd_populate(mm, pud, new);
L
Linus Torvalds 已提交
2129
#endif /* __ARCH_HAS_4LEVEL_HACK */
H
Hugh Dickins 已提交
2130
	spin_unlock(&mm->page_table_lock);
2131
	return 0;
L
Linus Torvalds 已提交
2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198
}
#endif /* __PAGETABLE_PMD_FOLDED */

int make_pages_present(unsigned long addr, unsigned long end)
{
	int ret, len, write;
	struct vm_area_struct * vma;

	vma = find_vma(current->mm, addr);
	if (!vma)
		return -1;
	write = (vma->vm_flags & VM_WRITE) != 0;
	if (addr >= end)
		BUG();
	if (end > vma->vm_end)
		BUG();
	len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
	ret = get_user_pages(current, current->mm, addr,
			len, write, 0, NULL, NULL);
	if (ret < 0)
		return ret;
	return ret == len ? 0 : -1;
}

/* 
 * Map a vmalloc()-space virtual address to the physical page.
 */
struct page * vmalloc_to_page(void * vmalloc_addr)
{
	unsigned long addr = (unsigned long) vmalloc_addr;
	struct page *page = NULL;
	pgd_t *pgd = pgd_offset_k(addr);
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
  
	if (!pgd_none(*pgd)) {
		pud = pud_offset(pgd, addr);
		if (!pud_none(*pud)) {
			pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd)) {
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
					page = pte_page(pte);
				pte_unmap(ptep);
			}
		}
	}
	return page;
}

EXPORT_SYMBOL(vmalloc_to_page);

/*
 * Map a vmalloc()-space virtual address to the physical page frame number.
 */
unsigned long vmalloc_to_pfn(void * vmalloc_addr)
{
	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
}

EXPORT_SYMBOL(vmalloc_to_pfn);

#if !defined(__HAVE_ARCH_GATE_AREA)

#if defined(AT_SYSINFO_EHDR)
2199
static struct vm_area_struct gate_vma;
L
Linus Torvalds 已提交
2200 2201 2202 2203 2204 2205 2206

static int __init gate_vma_init(void)
{
	gate_vma.vm_mm = NULL;
	gate_vma.vm_start = FIXADDR_USER_START;
	gate_vma.vm_end = FIXADDR_USER_END;
	gate_vma.vm_page_prot = PAGE_READONLY;
N
Nick Piggin 已提交
2207
	gate_vma.vm_flags = VM_RESERVED;
L
Linus Torvalds 已提交
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
	return 0;
}
__initcall(gate_vma_init);
#endif

struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
{
#ifdef AT_SYSINFO_EHDR
	return &gate_vma;
#else
	return NULL;
#endif
}

int in_gate_area_no_task(unsigned long addr)
{
#ifdef AT_SYSINFO_EHDR
	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
		return 1;
#endif
	return 0;
}

#endif	/* __HAVE_ARCH_GATE_AREA */