fault.c 25.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *  Copyright (C) 1995  Linus Torvalds
 *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>		/* For unblank_screen() */
#include <linux/compiler.h>
H
Harvey Harrison 已提交
21 22
#include <linux/highmem.h>
#include <linux/bootmem.h>		/* for max_low_pfn */
23
#include <linux/vmalloc.h>
L
Linus Torvalds 已提交
24
#include <linux/module.h>
25
#include <linux/kprobes.h>
26
#include <linux/uaccess.h>
27
#include <linux/kdebug.h>
L
Linus Torvalds 已提交
28 29

#include <asm/system.h>
H
Harvey Harrison 已提交
30 31
#include <asm/desc.h>
#include <asm/segment.h>
L
Linus Torvalds 已提交
32 33 34 35 36 37
#include <asm/pgalloc.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm-generic/sections.h>

38 39 40 41 42 43 44 45
/*
 * Page fault error code bits
 *	bit 0 == 0 means no page found, 1 means protection fault
 *	bit 1 == 0 means read, 1 means write
 *	bit 2 == 0 means kernel, 1 means user-mode
 *	bit 3 == 1 means use of reserved bit detected
 *	bit 4 == 1 means fault was an instruction fetch
 */
46
#define PF_PROT		(1<<0)
47
#define PF_WRITE	(1<<1)
48 49
#define PF_USER		(1<<2)
#define PF_RSVD		(1<<3)
50 51
#define PF_INSTR	(1<<4)

52
static inline int notify_page_fault(struct pt_regs *regs)
53
{
54
#ifdef CONFIG_KPROBES
55 56 57
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
58 59 60
#ifdef CONFIG_X86_32
	if (!user_mode_vm(regs)) {
#else
61
	if (!user_mode(regs)) {
62
#endif
63 64 65 66 67
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
68

69 70 71 72
	return ret;
#else
	return 0;
#endif
73
}
74

75 76 77 78 79 80 81 82 83 84 85 86 87
/*
 * X86_32
 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 * Check that here and ignore it.
 *
 * X86_64
 * Sometimes the CPU reports invalid exceptions on prefetch.
 * Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner
 */
static int is_prefetch(struct pt_regs *regs, unsigned long addr,
		       unsigned long error_code)
88
{
89
	unsigned char *instr;
L
Linus Torvalds 已提交
90
	int scan_more = 1;
91
	int prefetch = 0;
92
	unsigned char *max_instr;
L
Linus Torvalds 已提交
93

94
#ifdef CONFIG_X86_32
95
	if (!(__supported_pte_mask & _PAGE_NX))
96
		return 0;
97 98
#endif

H
Harvey Harrison 已提交
99
	/* If it was a exec fault on NX page, ignore */
100
	if (error_code & PF_INSTR)
L
Linus Torvalds 已提交
101
		return 0;
102

103
	instr = (unsigned char *)convert_ip_to_linear(current, regs);
104
	max_instr = instr + 15;
L
Linus Torvalds 已提交
105

106
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
L
Linus Torvalds 已提交
107 108
		return 0;

109
	while (scan_more && instr < max_instr) {
L
Linus Torvalds 已提交
110 111 112 113
		unsigned char opcode;
		unsigned char instr_hi;
		unsigned char instr_lo;

114
		if (probe_kernel_address(instr, opcode))
115
			break;
L
Linus Torvalds 已提交
116

117 118
		instr_hi = opcode & 0xf0;
		instr_lo = opcode & 0x0f;
L
Linus Torvalds 已提交
119 120
		instr++;

121
		switch (instr_hi) {
L
Linus Torvalds 已提交
122 123
		case 0x20:
		case 0x30:
124 125 126 127 128 129
			/*
			 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
			 * In X86_64 long mode, the CPU will signal invalid
			 * opcode if some of these prefixes are present so
			 * X86_64 will never get here anyway
			 */
L
Linus Torvalds 已提交
130 131
			scan_more = ((instr_lo & 7) == 0x6);
			break;
132
#ifdef CONFIG_X86_64
L
Linus Torvalds 已提交
133
		case 0x40:
134 135 136 137 138 139 140
			/*
			 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
			 * Need to figure out under what instruction mode the
			 * instruction was issued. Could check the LDT for lm,
			 * but for now it's good enough to assume that long
			 * mode only uses well known segments or kernel.
			 */
141
			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
L
Linus Torvalds 已提交
142
			break;
143
#endif
L
Linus Torvalds 已提交
144 145 146
		case 0x60:
			/* 0x64 thru 0x67 are valid prefixes in all modes. */
			scan_more = (instr_lo & 0xC) == 0x4;
147
			break;
L
Linus Torvalds 已提交
148
		case 0xF0:
149
			/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
L
Linus Torvalds 已提交
150
			scan_more = !instr_lo || (instr_lo>>1) == 1;
151
			break;
L
Linus Torvalds 已提交
152 153 154
		case 0x00:
			/* Prefetch instruction is 0x0F0D or 0x0F18 */
			scan_more = 0;
155

156
			if (probe_kernel_address(instr, opcode))
L
Linus Torvalds 已提交
157 158 159
				break;
			prefetch = (instr_lo == 0xF) &&
				(opcode == 0x0D || opcode == 0x18);
160
			break;
L
Linus Torvalds 已提交
161 162 163
		default:
			scan_more = 0;
			break;
164
		}
L
Linus Torvalds 已提交
165 166 167 168
	}
	return prefetch;
}

169 170 171 172 173 174 175 176 177 178 179 180
static void force_sig_info_fault(int si_signo, int si_code,
	unsigned long address, struct task_struct *tsk)
{
	siginfo_t info;

	info.si_signo = si_signo;
	info.si_errno = 0;
	info.si_code = si_code;
	info.si_addr = (void __user *)address;
	force_sig_info(si_signo, &info, tsk);
}

181
#ifdef CONFIG_X86_64
182 183
static int bad_address(void *p)
{
L
Linus Torvalds 已提交
184
	unsigned long dummy;
185
	return probe_kernel_address((unsigned long *)p, dummy);
186
}
187
#endif
L
Linus Torvalds 已提交
188 189 190

void dump_pagetable(unsigned long address)
{
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
#ifdef CONFIG_X86_32
	__typeof__(pte_val(__pte(0))) page;

	page = read_cr3();
	page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
#ifdef CONFIG_X86_PAE
	printk("*pdpt = %016Lx ", page);
	if ((page >> PAGE_SHIFT) < max_low_pfn
	    && page & _PAGE_PRESENT) {
		page &= PAGE_MASK;
		page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
		                                         & (PTRS_PER_PMD - 1)];
		printk(KERN_CONT "*pde = %016Lx ", page);
		page &= ~_PAGE_NX;
	}
#else
	printk("*pde = %08lx ", page);
#endif

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
	 * it's allocated already.
	 */
	if ((page >> PAGE_SHIFT) < max_low_pfn
	    && (page & _PAGE_PRESENT)
	    && !(page & _PAGE_PSE)) {
		page &= PAGE_MASK;
		page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
		                                         & (PTRS_PER_PTE - 1)];
		printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
	}

	printk("\n");
#else /* CONFIG_X86_64 */
L
Linus Torvalds 已提交
227 228 229 230 231
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

232
	pgd = (pgd_t *)read_cr3();
L
Linus Torvalds 已提交
233

234
	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
L
Linus Torvalds 已提交
235 236
	pgd += pgd_index(address);
	if (bad_address(pgd)) goto bad;
237
	printk("PGD %lx ", pgd_val(*pgd));
238
	if (!pgd_present(*pgd)) goto ret;
L
Linus Torvalds 已提交
239

240
	pud = pud_offset(pgd, address);
L
Linus Torvalds 已提交
241 242 243 244 245 246 247
	if (bad_address(pud)) goto bad;
	printk("PUD %lx ", pud_val(*pud));
	if (!pud_present(*pud))	goto ret;

	pmd = pmd_offset(pud, address);
	if (bad_address(pmd)) goto bad;
	printk("PMD %lx ", pmd_val(*pmd));
248
	if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
L
Linus Torvalds 已提交
249 250 251

	pte = pte_offset_kernel(pmd, address);
	if (bad_address(pte)) goto bad;
252
	printk("PTE %lx", pte_val(*pte));
L
Linus Torvalds 已提交
253 254 255 256 257
ret:
	printk("\n");
	return;
bad:
	printk("BAD\n");
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
#endif
}

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;

	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */

	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;
	if (!pmd_present(*pmd)) {
		set_pmd(pmd, *pmd_k);
		arch_flush_lazy_mmu_mode();
	} else
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
	return pmd_k;
L
Linus Torvalds 已提交
296
}
297
#endif
L
Linus Torvalds 已提交
298

299
#ifdef CONFIG_X86_64
300
static const char errata93_warning[] =
L
Linus Torvalds 已提交
301 302 303 304
KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
KERN_ERR "******* Please consider a BIOS update.\n"
KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
305
#endif
L
Linus Torvalds 已提交
306 307 308

/* Workaround for K8 erratum #93 & buggy BIOS.
   BIOS SMM functions are required to use a specific workaround
309 310
   to avoid corruption of the 64bit RIP register on C stepping K8.
   A lot of BIOS that didn't get tested properly miss this.
L
Linus Torvalds 已提交
311 312
   The OS sees this as a page fault with the upper 32bits of RIP cleared.
   Try to work around it here.
313 314 315
   Note we only handle faults in kernel here.
   Does nothing for X86_32
 */
316
static int is_errata93(struct pt_regs *regs, unsigned long address)
L
Linus Torvalds 已提交
317
{
318
#ifdef CONFIG_X86_64
L
Linus Torvalds 已提交
319
	static int warned;
320
	if (address != regs->ip)
L
Linus Torvalds 已提交
321
		return 0;
322
	if ((address >> 32) != 0)
L
Linus Torvalds 已提交
323 324
		return 0;
	address |= 0xffffffffUL << 32;
325 326
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
L
Linus Torvalds 已提交
327
		if (!warned) {
328
			printk(errata93_warning);
L
Linus Torvalds 已提交
329 330
			warned = 1;
		}
331
		regs->ip = address;
L
Linus Torvalds 已提交
332 333
		return 1;
	}
334
#endif
L
Linus Torvalds 已提交
335
	return 0;
336
}
L
Linus Torvalds 已提交
337

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
/*
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
 * addresses >4GB.  We catch this in the page fault handler because these
 * addresses are not reachable. Just detect this case and return.  Any code
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
	    (address >> 32))
		return 1;
#endif
	return 0;
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
void do_invalid_op(struct pt_regs *, unsigned long);

static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
	/*
	 * Pentium F0 0F C7 C8 bug workaround.
	 */
	if (boot_cpu_data.f00f_bug) {
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

375 376 377
static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
			    unsigned long address)
{
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
#ifdef CONFIG_X86_32
	if (!oops_may_print())
		return;

#ifdef CONFIG_X86_PAE
	if (error_code & PF_INSTR) {
		int level;
		pte_t *pte = lookup_address(address, &level);

		if (pte && pte_present(*pte) && !pte_exec(*pte))
			printk(KERN_CRIT "kernel tried to execute "
				"NX-protected page - exploit attempt? "
				"(uid: %d)\n", current->uid);
	}
#endif
	printk(KERN_ALERT "BUG: unable to handle kernel ");
	if (address < PAGE_SIZE)
		printk(KERN_CONT "NULL pointer dereference");
	else
		printk(KERN_CONT "paging request");
	printk(KERN_CONT " at %08lx\n", address);

	printk(KERN_ALERT "IP:");
	printk_address(regs->ip, 1);
	dump_pagetable(address);
#else /* CONFIG_X86_64 */
404
	printk(KERN_ALERT "BUG: unable to handle kernel ");
405
	if (address < PAGE_SIZE)
406
		printk(KERN_CONT "NULL pointer dereference");
407
	else
408 409 410 411
		printk(KERN_CONT "paging request");
	printk(KERN_CONT " at %016lx\n", address);

	printk(KERN_ALERT "IP:");
412 413
	printk_address(regs->ip, 1);
	dump_pagetable(address);
414
#endif
415 416
}

417
#ifdef CONFIG_X86_64
L
Linus Torvalds 已提交
418 419 420
static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
				 unsigned long error_code)
{
421
	unsigned long flags = oops_begin();
422
	struct task_struct *tsk;
423

L
Linus Torvalds 已提交
424 425 426
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
	       current->comm, address);
	dump_pagetable(address);
427 428 429 430
	tsk = current;
	tsk->thread.cr2 = address;
	tsk->thread.trap_no = 14;
	tsk->thread.error_code = error_code;
431 432 433
	if (__die("Bad pagetable", regs, error_code))
		regs = NULL;
	oops_end(flags, regs, SIGKILL);
L
Linus Torvalds 已提交
434
}
435
#endif
L
Linus Torvalds 已提交
436

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
/*
 * Handle a spurious fault caused by a stale TLB entry.  This allows
 * us to lazily refresh the TLB when increasing the permissions of a
 * kernel page (RO -> RW or NX -> X).  Doing it eagerly is very
 * expensive since that implies doing a full cross-processor TLB
 * flush, even if no stale TLB entries exist on other processors.
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
 */
static int spurious_fault(unsigned long address,
			  unsigned long error_code)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	/* Reserved-bit violation or user access to kernel space? */
	if (error_code & (PF_USER | PF_RSVD))
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return 0;

	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

	pte = pte_offset_kernel(pmd, address);
	if (!pte_present(*pte))
		return 0;

	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;

	return 1;
}

L
Linus Torvalds 已提交
482
/*
483 484 485 486
 * X86_32
 * Handle a fault on the vmalloc or module mapping area
 *
 * X86_64
487
 * Handle a fault on the vmalloc area
488 489
 *
 * This assumes no large pages in there.
L
Linus Torvalds 已提交
490 491 492
 */
static int vmalloc_fault(unsigned long address)
{
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
#ifdef CONFIG_X86_32
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;
	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;
	return 0;
#else
L
Linus Torvalds 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Copy kernel mappings over when needed. This can also
	   happen within a race in page table update. In the later
	   case just flush. */

	pgd = pgd_offset(current->mm ?: &init_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;
	if (pgd_none(*pgd))
		set_pgd(pgd, *pgd_ref);
528
	else
529
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
L
Linus Torvalds 已提交
530 531 532 533 534 535 536 537

	/* Below here mismatches are bugs because these lower tables
	   are shared */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;
538
	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
L
Linus Torvalds 已提交
539 540 541 542 543 544 545 546 547 548 549
		BUG();
	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;
	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();
	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;
	pte = pte_offset_kernel(pmd, address);
550 551 552 553
	/* Don't use pte_page here, because the mappings can point
	   outside mem_map, and the NUMA hash lookup cannot handle
	   that. */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
L
Linus Torvalds 已提交
554 555
		BUG();
	return 0;
556
#endif
L
Linus Torvalds 已提交
557 558
}

559
int show_unhandled_signals = 1;
L
Linus Torvalds 已提交
560 561 562 563 564 565

/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
566 567 568 569
#ifdef CONFIG_X86_64
asmlinkage
#endif
void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
L
Linus Torvalds 已提交
570 571 572
{
	struct task_struct *tsk;
	struct mm_struct *mm;
573
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
574
	unsigned long address;
575 576 577
	int write, si_code;
	int fault;
#ifdef CONFIG_X86_64
578
	unsigned long flags;
579
#endif
L
Linus Torvalds 已提交
580

P
Peter Zijlstra 已提交
581 582 583 584 585
	/*
	 * We can fault from pretty much anywhere, with unknown IRQ state.
	 */
	trace_hardirqs_fixup();

586 587 588 589
	tsk = current;
	mm = tsk->mm;
	prefetchw(&mm->mmap_sem);

L
Linus Torvalds 已提交
590
	/* get the address */
591
	address = read_cr2();
L
Linus Torvalds 已提交
592

593
	si_code = SEGV_MAPERR;
L
Linus Torvalds 已提交
594

595 596
	if (notify_page_fault(regs))
		return;
L
Linus Torvalds 已提交
597 598 599 600 601 602 603 604 605 606 607 608

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
609
	 * protection error (error_code & 9) == 0.
L
Linus Torvalds 已提交
610
	 */
611 612 613 614 615
#ifdef CONFIG_X86_32
	if (unlikely(address >= TASK_SIZE)) {
		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
		    vmalloc_fault(address) >= 0)
			return;
616 617 618 619 620

		/* Can handle a stale RO->RW TLB */
		if (spurious_fault(address, error_code))
			return;

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
		 * fault we could otherwise deadlock.
		 */
		goto bad_area_nosemaphore;
	}

	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
	   fault has been handled. */
	if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
		local_irq_enable();

	/*
	 * If we're in an interrupt, have no user context or are running in an
	 * atomic region then we must not take the fault.
	 */
	if (in_atomic() || !mm)
		goto bad_area_nosemaphore;
#else /* CONFIG_X86_64 */
640
	if (unlikely(address >= TASK_SIZE64)) {
641 642 643 644 645
		/*
		 * Don't check for the module range here: its PML4
		 * is always initialized because it's shared with the main
		 * kernel text. Only vmalloc may need PML4 syncups.
		 */
646
		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
647
		      ((address >= VMALLOC_START && address < VMALLOC_END))) {
648 649
			if (vmalloc_fault(address) >= 0)
				return;
L
Linus Torvalds 已提交
650
		}
651 652 653 654 655

		/* Can handle a stale RO->RW TLB */
		if (spurious_fault(address, error_code))
			return;

L
Linus Torvalds 已提交
656 657 658 659 660 661
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
		 * fault we could otherwise deadlock.
		 */
		goto bad_area_nosemaphore;
	}
662
	if (likely(regs->flags & X86_EFLAGS_IF))
663 664
		local_irq_enable();

665
	if (unlikely(error_code & PF_RSVD))
L
Linus Torvalds 已提交
666 667 668
		pgtable_bad(address, regs, error_code);

	/*
669 670
	 * If we're in an interrupt, have no user context or are running in an
	 * atomic region then we must not take the fault.
L
Linus Torvalds 已提交
671 672 673 674
	 */
	if (unlikely(in_atomic() || !mm))
		goto bad_area_nosemaphore;

675 676 677 678 679 680
	/*
	 * User-mode registers count as a user access even for any
	 * potential system fault or CPU buglet.
	 */
	if (user_mode_vm(regs))
		error_code |= PF_USER;
681 682
again:
#endif
L
Linus Torvalds 已提交
683 684
	/* When running in the kernel we expect faults to occur only to
	 * addresses in user space.  All other faults represent errors in the
S
Simon Arlott 已提交
685
	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
A
Adrian Bunk 已提交
686
	 * erroneous fault occurring in a code path which already holds mmap_sem
L
Linus Torvalds 已提交
687 688 689 690 691 692
	 * we will deadlock attempting to validate the fault against the
	 * address space.  Luckily the kernel only validly references user
	 * space from well defined areas of code, which are listed in the
	 * exceptions table.
	 *
	 * As the vast majority of faults will be valid we will only perform
S
Simon Arlott 已提交
693
	 * the source reference check when there is a possibility of a deadlock.
L
Linus Torvalds 已提交
694 695 696 697 698
	 * Attempt to lock the address space, if we cannot we then validate the
	 * source.  If this is invalid we can skip the address space check,
	 * thus avoiding the deadlock.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
699
		if ((error_code & PF_USER) == 0 &&
700
		    !search_exception_tables(regs->ip))
L
Linus Torvalds 已提交
701 702 703 704 705 706 707
			goto bad_area_nosemaphore;
		down_read(&mm->mmap_sem);
	}

	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
708 709 710
#ifdef CONFIG_X86_32
	if (vma->vm_start <= address)
#else
L
Linus Torvalds 已提交
711
	if (likely(vma->vm_start <= address))
712
#endif
L
Linus Torvalds 已提交
713 714 715
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
716
	if (error_code & PF_USER) {
717 718 719 720 721
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
		 * and pusha to work.  ("enter $65535,$31" pushes
		 * 32 pointers and then decrements %sp by 65535.)
722
		 */
723
		if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
L
Linus Torvalds 已提交
724 725 726 727 728 729 730 731 732
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
733
	si_code = SEGV_ACCERR;
L
Linus Torvalds 已提交
734
	write = 0;
735
	switch (error_code & (PF_PROT|PF_WRITE)) {
736 737 738 739 740 741 742 743 744 745 746
	default:	/* 3: write, present */
		/* fall through */
	case PF_WRITE:		/* write, not present */
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		write++;
		break;
	case PF_PROT:		/* read, present */
		goto bad_area;
	case 0:			/* read, not present */
		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
L
Linus Torvalds 已提交
747 748 749
			goto bad_area;
	}

750 751 752
#ifdef CONFIG_X86_32
survive:
#endif
L
Linus Torvalds 已提交
753 754 755 756 757
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
N
Nick Piggin 已提交
758 759 760 761 762 763 764
	fault = handle_mm_fault(mm, vma, address, write);
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
L
Linus Torvalds 已提交
765
	}
N
Nick Piggin 已提交
766 767 768 769
	if (fault & VM_FAULT_MAJOR)
		tsk->maj_flt++;
	else
		tsk->min_flt++;
770 771 772 773 774 775 776 777 778 779 780

#ifdef CONFIG_X86_32
	/*
	 * Did it hit the DOS screen memory VA from vm86 mode?
	 */
	if (v8086_mode(regs)) {
		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
		if (bit < 32)
			tsk->thread.screen_bitmap |= 1 << bit;
	}
#endif
L
Linus Torvalds 已提交
781 782 783 784 785 786 787 788 789 790 791 792
	up_read(&mm->mmap_sem);
	return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
793
	if (error_code & PF_USER) {
794 795 796 797 798
		/*
		 * It's possible to have interrupts off here.
		 */
		local_irq_enable();

799 800 801 802
		/*
		 * Valid to do another page fault here because this one came
		 * from user space.
		 */
L
Linus Torvalds 已提交
803 804 805
		if (is_prefetch(regs, address, error_code))
			return;

806
		if (is_errata100(regs, address))
L
Linus Torvalds 已提交
807 808
			return;

809 810
		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
		    printk_ratelimit()) {
L
Linus Torvalds 已提交
811
			printk(
812
#ifdef CONFIG_X86_32
813
			"%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
814
#else
815
			"%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
816 817 818 819
#endif
			task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
			tsk->comm, task_pid_nr(tsk), address, regs->ip,
			regs->sp, error_code);
820 821
			print_vma_addr(" in ", regs->ip);
			printk("\n");
L
Linus Torvalds 已提交
822
		}
823

L
Linus Torvalds 已提交
824 825 826 827
		tsk->thread.cr2 = address;
		/* Kernel addresses are always protection faults */
		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
		tsk->thread.trap_no = 14;
828
		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
L
Linus Torvalds 已提交
829 830 831
		return;
	}

832 833 834
	if (is_f00f_bug(regs, address))
		return;

L
Linus Torvalds 已提交
835 836
no_context:
	/* Are we prepared to handle this kernel fault?  */
837
	if (fixup_exception(regs))
L
Linus Torvalds 已提交
838 839
		return;

840
	/*
841 842 843 844 845 846
	 * X86_32
	 * Valid to do another page fault here, because if this fault
	 * had been triggered by is_prefetch fixup_exception would have
	 * handled it.
	 *
	 * X86_64
L
Linus Torvalds 已提交
847 848
	 * Hall of shame of CPU/BIOS bugs.
	 */
849 850
	if (is_prefetch(regs, address, error_code))
		return;
L
Linus Torvalds 已提交
851 852

	if (is_errata93(regs, address))
853
		return;
L
Linus Torvalds 已提交
854 855 856 857 858

/*
 * Oops. The kernel tried to access some bad page. We'll have to
 * terminate things with extreme prejudice.
 */
859 860 861 862
#ifdef CONFIG_X86_32
	bust_spinlocks(1);

	show_fault_oops(regs, error_code, address);
L
Linus Torvalds 已提交
863

864 865 866 867 868 869 870
	tsk->thread.cr2 = address;
	tsk->thread.trap_no = 14;
	tsk->thread.error_code = error_code;
	die("Oops", regs, error_code);
	bust_spinlocks(0);
	do_exit(SIGKILL);
#else /* CONFIG_X86_64 */
871
	flags = oops_begin();
L
Linus Torvalds 已提交
872

873 874
	show_fault_oops(regs, error_code, address);

875 876 877
	tsk->thread.cr2 = address;
	tsk->thread.trap_no = 14;
	tsk->thread.error_code = error_code;
878 879
	if (__die("Oops", regs, error_code))
		regs = NULL;
L
Linus Torvalds 已提交
880 881
	/* Executive summary in case the body of the oops scrolled away */
	printk(KERN_EMERG "CR2: %016lx\n", address);
882
	oops_end(flags, regs, SIGKILL);
883
#endif
L
Linus Torvalds 已提交
884 885 886 887 888 889 890

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
891 892 893 894 895 896 897
#ifdef CONFIG_X86_32
	if (is_global_init(tsk)) {
		yield();
		down_read(&mm->mmap_sem);
		goto survive;
	}
#else
898
	if (is_global_init(current)) {
L
Linus Torvalds 已提交
899 900 901
		yield();
		goto again;
	}
902
#endif
L
Linus Torvalds 已提交
903
	printk("VM: killing process %s\n", tsk->comm);
904
	if (error_code & PF_USER)
905
		do_group_exit(SIGKILL);
L
Linus Torvalds 已提交
906 907 908 909 910 911
	goto no_context;

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
912
	if (!(error_code & PF_USER))
L
Linus Torvalds 已提交
913
		goto no_context;
914 915 916 917 918
#ifdef CONFIG_X86_32
	/* User space => ok to do another page fault */
	if (is_prefetch(regs, address, error_code))
		return;
#endif
L
Linus Torvalds 已提交
919 920 921
	tsk->thread.cr2 = address;
	tsk->thread.error_code = error_code;
	tsk->thread.trap_no = 14;
922
	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
L
Linus Torvalds 已提交
923
}
924

925
#ifdef CONFIG_X86_64
926
DEFINE_SPINLOCK(pgd_lock);
927
LIST_HEAD(pgd_list);
928
#endif
929 930 931

void vmalloc_sync_all(void)
{
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
#ifdef CONFIG_X86_32
	/*
	 * Note that races in the updates of insync and start aren't
	 * problematic: insync can only get set bits added, and updates to
	 * start are only improving performance (without affecting correctness
	 * if undone).
	 */
	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
	static unsigned long start = TASK_SIZE;
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
	for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
		if (!test_bit(pgd_index(address), insync)) {
			unsigned long flags;
			struct page *page;

			spin_lock_irqsave(&pgd_lock, flags);
			for (page = pgd_list; page; page =
					(struct page *)page->index)
				if (!vmalloc_sync_one(page_address(page),
								address)) {
					BUG_ON(page != pgd_list);
					break;
				}
			spin_unlock_irqrestore(&pgd_lock, flags);
			if (!page)
				set_bit(pgd_index(address), insync);
		}
		if (address == start && test_bit(pgd_index(address), insync))
			start = address + PGDIR_SIZE;
	}
#else /* CONFIG_X86_64 */
968 969 970 971 972 973
	/*
	 * Note that races in the updates of insync and start aren't
	 * problematic: insync can only get set bits added, and updates to
	 * start are only improving performance (without affecting correctness
	 * if undone).
	 */
974 975 976 977 978 979 980 981 982 983 984 985
	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
	static unsigned long start = VMALLOC_START & PGDIR_MASK;
	unsigned long address;

	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
		if (!test_bit(pgd_index(address), insync)) {
			const pgd_t *pgd_ref = pgd_offset_k(address);
			struct page *page;

			if (pgd_none(*pgd_ref))
				continue;
			spin_lock(&pgd_lock);
986
			list_for_each_entry(page, &pgd_list, lru) {
987 988 989 990 991
				pgd_t *pgd;
				pgd = (pgd_t *)page_address(page) + pgd_index(address);
				if (pgd_none(*pgd))
					set_pgd(pgd, *pgd_ref);
				else
992
					BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
993 994 995 996 997 998 999 1000 1001
			}
			spin_unlock(&pgd_lock);
			set_bit(pgd_index(address), insync);
		}
		if (address == start)
			start = address + PGDIR_SIZE;
	}
	/* Check that there is no need to do the same for the modules area. */
	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
1002
	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
1003
				(__START_KERNEL & PGDIR_MASK)));
1004
#endif
1005
}