fault.c 26.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 *  Copyright (C) 1995  Linus Torvalds
I
Ingo Molnar 已提交
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
L
Linus Torvalds 已提交
5
 */
6 7 8 9 10 11 12
#include <linux/magic.h>		/* STACK_END_MAGIC		*/
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
#include <linux/module.h>		/* search_exception_table	*/
#include <linux/bootmem.h>		/* max_low_pfn			*/
#include <linux/kprobes.h>		/* __kprobes, ...		*/
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13
#include <linux/perf_counter.h>		/* perf_swcounter_event		*/
I
Ingo Molnar 已提交
14

15 16
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
V
Vegard Nossum 已提交
17
#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
L
Linus Torvalds 已提交
18

19
/*
I
Ingo Molnar 已提交
20 21 22 23 24 25 26
 * Page fault error code bits:
 *
 *   bit 0 ==	 0: no page found	1: protection fault
 *   bit 1 ==	 0: read access		1: write access
 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
 *   bit 3 ==				1: use of reserved bit detected
 *   bit 4 ==				1: fault was an instruction fetch
27
 */
I
Ingo Molnar 已提交
28 29 30 31 32 33 34 35
enum x86_pf_error_code {

	PF_PROT		=		1 << 0,
	PF_WRITE	=		1 << 1,
	PF_USER		=		1 << 2,
	PF_RSVD		=		1 << 3,
	PF_INSTR	=		1 << 4,
};
36

37
/*
38 39
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
40
 */
41
static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
42
{
43 44 45 46
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
47 48
}

49
static inline int notify_page_fault(struct pt_regs *regs)
50
{
51 52 53
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
54
	if (kprobes_built_in() && !user_mode_vm(regs)) {
55 56 57 58 59
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
60

61
	return ret;
62
}
63

64
/*
I
Ingo Molnar 已提交
65 66 67 68 69 70
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
71
 *
I
Ingo Molnar 已提交
72
 * 64-bit mode:
73
 *
I
Ingo Molnar 已提交
74 75 76 77
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
78
 */
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
		return (!user_mode(regs)) || (regs->cs == __USER_CS);
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

I
Ingo Molnar 已提交
126 127
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
128
{
I
Ingo Molnar 已提交
129
	unsigned char *max_instr;
130
	unsigned char *instr;
131
	int prefetch = 0;
L
Linus Torvalds 已提交
132

I
Ingo Molnar 已提交
133 134 135 136
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
137
	if (error_code & PF_INSTR)
L
Linus Torvalds 已提交
138
		return 0;
139

140
	instr = (void *)convert_ip_to_linear(current, regs);
141
	max_instr = instr + 15;
L
Linus Torvalds 已提交
142

143
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
L
Linus Torvalds 已提交
144 145
		return 0;

146
	while (instr < max_instr) {
I
Ingo Molnar 已提交
147
		unsigned char opcode;
L
Linus Torvalds 已提交
148

149
		if (probe_kernel_address(instr, opcode))
150
			break;
L
Linus Torvalds 已提交
151 152 153

		instr++;

154
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
L
Linus Torvalds 已提交
155 156 157 158 159
			break;
	}
	return prefetch;
}

I
Ingo Molnar 已提交
160 161 162
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
		     struct task_struct *tsk)
163 164 165
{
	siginfo_t info;

I
Ingo Molnar 已提交
166 167 168 169 170
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;

171 172 173
	force_sig_info(si_signo, &info, tsk);
}

174 175 176 177 178
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
179
{
180 181 182 183
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
I
Ingo Molnar 已提交
184

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */
	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

206
	if (!pmd_present(*pmd))
207
		set_pmd(pmd, *pmd_k);
208
	else
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {

		unsigned long flags;
		struct page *page;

		spin_lock_irqsave(&pgd_lock, flags);
		list_for_each_entry(page, &pgd_list, lru) {
			if (!vmalloc_sync_one(page_address(page), address))
				break;
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
static noinline int vmalloc_fault(unsigned long address)
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
	unsigned long bit;

	if (!v8086_mode(regs))
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
		tsk->thread.screen_bitmap |= 1 << bit;
286
}
L
Linus Torvalds 已提交
287

A
Adrian Bunk 已提交
288
static void dump_pagetable(unsigned long address)
L
Linus Torvalds 已提交
289
{
290 291 292 293
	__typeof__(pte_val(__pte(0))) page;

	page = read_cr3();
	page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
I
Ingo Molnar 已提交
294

295 296 297 298 299 300
#ifdef CONFIG_X86_PAE
	printk("*pdpt = %016Lx ", page);
	if ((page >> PAGE_SHIFT) < max_low_pfn
	    && page & _PAGE_PRESENT) {
		page &= PAGE_MASK;
		page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
I
Ingo Molnar 已提交
301
							& (PTRS_PER_PMD - 1)];
302 303 304 305 306 307 308 309 310 311 312
		printk(KERN_CONT "*pde = %016Lx ", page);
		page &= ~_PAGE_NX;
	}
#else
	printk("*pde = %08lx ", page);
#endif

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
I
Ingo Molnar 已提交
313
	 * it's allocated already:
314 315 316 317
	 */
	if ((page >> PAGE_SHIFT) < max_low_pfn
	    && (page & _PAGE_PRESENT)
	    && !(page & _PAGE_PSE)) {
I
Ingo Molnar 已提交
318

319 320
		page &= PAGE_MASK;
		page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
I
Ingo Molnar 已提交
321
							& (PTRS_PER_PTE - 1)];
322 323 324 325
		printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
	}

	printk("\n");
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
	unsigned long address;

	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
	     address += PGDIR_SIZE) {

		const pgd_t *pgd_ref = pgd_offset_k(address);
		unsigned long flags;
		struct page *page;

		if (pgd_none(*pgd_ref))
			continue;

		spin_lock_irqsave(&pgd_lock, flags);
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			if (pgd_none(*pgd))
				set_pgd(pgd, *pgd_ref);
			else
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
static noinline int vmalloc_fault(unsigned long address)
{
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
	pgd = pgd_offset(current->active_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;

	if (pgd_none(*pgd))
		set_pgd(pgd, *pgd_ref);
	else
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

	/*
	 * Below here mismatches are bugs because these lower tables
	 * are shared:
	 */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
		BUG();

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;

	pte = pte_offset_kernel(pmd, address);

	/*
	 * Don't use pte_page here, because the mappings can point
	 * outside mem_map, and the NUMA hash lookup cannot handle
	 * that:
	 */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
		BUG();

	return 0;
}

static const char errata93_warning[] =
429 430 431 432 433
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
L
Linus Torvalds 已提交
453 454 455 456 457
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

458
	pgd = (pgd_t *)read_cr3();
L
Linus Torvalds 已提交
459

460
	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
I
Ingo Molnar 已提交
461

L
Linus Torvalds 已提交
462
	pgd += pgd_index(address);
I
Ingo Molnar 已提交
463 464 465
	if (bad_address(pgd))
		goto bad;

466
	printk("PGD %lx ", pgd_val(*pgd));
I
Ingo Molnar 已提交
467 468 469

	if (!pgd_present(*pgd))
		goto out;
L
Linus Torvalds 已提交
470

471
	pud = pud_offset(pgd, address);
I
Ingo Molnar 已提交
472 473 474
	if (bad_address(pud))
		goto bad;

L
Linus Torvalds 已提交
475
	printk("PUD %lx ", pud_val(*pud));
476
	if (!pud_present(*pud) || pud_large(*pud))
I
Ingo Molnar 已提交
477
		goto out;
L
Linus Torvalds 已提交
478 479

	pmd = pmd_offset(pud, address);
I
Ingo Molnar 已提交
480 481 482
	if (bad_address(pmd))
		goto bad;

L
Linus Torvalds 已提交
483
	printk("PMD %lx ", pmd_val(*pmd));
I
Ingo Molnar 已提交
484 485
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
L
Linus Torvalds 已提交
486 487

	pte = pte_offset_kernel(pmd, address);
I
Ingo Molnar 已提交
488 489 490
	if (bad_address(pte))
		goto bad;

491
	printk("PTE %lx", pte_val(*pte));
I
Ingo Molnar 已提交
492
out:
L
Linus Torvalds 已提交
493 494 495 496
	printk("\n");
	return;
bad:
	printk("BAD\n");
497 498
}

499
#endif /* CONFIG_X86_64 */
L
Linus Torvalds 已提交
500

I
Ingo Molnar 已提交
501 502 503 504 505 506 507 508 509 510 511 512 513
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
514
 */
515
static int is_errata93(struct pt_regs *regs, unsigned long address)
L
Linus Torvalds 已提交
516
{
517
#ifdef CONFIG_X86_64
518
	if (address != regs->ip)
L
Linus Torvalds 已提交
519
		return 0;
I
Ingo Molnar 已提交
520

521
	if ((address >> 32) != 0)
L
Linus Torvalds 已提交
522
		return 0;
I
Ingo Molnar 已提交
523

L
Linus Torvalds 已提交
524
	address |= 0xffffffffUL << 32;
525 526
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
527
		printk_once(errata93_warning);
528
		regs->ip = address;
L
Linus Torvalds 已提交
529 530
		return 1;
	}
531
#endif
L
Linus Torvalds 已提交
532
	return 0;
533
}
L
Linus Torvalds 已提交
534

535
/*
I
Ingo Molnar 已提交
536 537 538 539 540
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
541 542 543 544 545
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
I
Ingo Molnar 已提交
546
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
547 548 549 550 551
		return 1;
#endif
	return 0;
}

552 553 554 555
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
I
Ingo Molnar 已提交
556

557
	/*
I
Ingo Molnar 已提交
558
	 * Pentium F0 0F C7 C8 bug workaround:
559 560 561 562 563 564 565 566 567 568 569 570 571
	 */
	if (boot_cpu_data.f00f_bug) {
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

572 573 574
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";

I
Ingo Molnar 已提交
575 576 577
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
578
{
579 580 581 582
	if (!oops_may_print())
		return;

	if (error_code & PF_INSTR) {
583
		unsigned int level;
I
Ingo Molnar 已提交
584

585 586
		pte_t *pte = lookup_address(address, &level);

587 588
		if (pte && pte_present(*pte) && !pte_exec(*pte))
			printk(nx_warning, current_uid());
589 590
	}

591
	printk(KERN_ALERT "BUG: unable to handle kernel ");
592
	if (address < PAGE_SIZE)
593
		printk(KERN_CONT "NULL pointer dereference");
594
	else
595
		printk(KERN_CONT "paging request");
I
Ingo Molnar 已提交
596

597
	printk(KERN_CONT " at %p\n", (void *) address);
598
	printk(KERN_ALERT "IP:");
599
	printk_address(regs->ip, 1);
I
Ingo Molnar 已提交
600

601 602 603
	dump_pagetable(address);
}

I
Ingo Molnar 已提交
604 605 606
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
L
Linus Torvalds 已提交
607
{
I
Ingo Molnar 已提交
608 609 610 611 612 613 614
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
615

L
Linus Torvalds 已提交
616
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
617
	       tsk->comm, address);
L
Linus Torvalds 已提交
618
	dump_pagetable(address);
I
Ingo Molnar 已提交
619 620 621 622 623

	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;

624
	if (__die("Bad pagetable", regs, error_code))
625
		sig = 0;
I
Ingo Molnar 已提交
626

627
	oops_end(flags, regs, sig);
L
Linus Torvalds 已提交
628 629
}

I
Ingo Molnar 已提交
630 631 632
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address)
633 634
{
	struct task_struct *tsk = current;
635
	unsigned long *stackend;
636 637 638
	unsigned long flags;
	int sig;

I
Ingo Molnar 已提交
639
	/* Are we prepared to handle this kernel fault? */
640 641 642 643
	if (fixup_exception(regs))
		return;

	/*
I
Ingo Molnar 已提交
644 645 646 647 648 649 650
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
651
	 *
I
Ingo Molnar 已提交
652
	 *   Hall of shame of CPU/BIOS bugs.
653 654 655 656 657 658 659 660 661
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
I
Ingo Molnar 已提交
662
	 * terminate things with extreme prejudice:
663 664 665 666 667
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

I
Ingo Molnar 已提交
668
	stackend = end_of_stack(tsk);
669 670 671
	if (*stackend != STACK_END_MAGIC)
		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");

672 673 674
	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;
675 676 677 678

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
I
Ingo Molnar 已提交
679

680 681
	/* Executive summary in case the body of the oops scrolled away */
	printk(KERN_EMERG "CR2: %016lx\n", address);
I
Ingo Molnar 已提交
682

683 684 685
	oops_end(flags, regs, sig);
}

I
Ingo Molnar 已提交
686 687 688 689 690 691 692 693 694 695 696 697 698 699
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

700
	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
I
Ingo Molnar 已提交
701 702 703 704 705 706 707 708 709 710 711 712
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		       unsigned long address, int si_code)
713 714 715 716 717 718
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
	if (error_code & PF_USER) {
		/*
I
Ingo Molnar 已提交
719
		 * It's possible to have interrupts off here:
720 721 722 723 724
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
I
Ingo Molnar 已提交
725
		 * from user space:
726 727 728 729 730 731 732
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

I
Ingo Molnar 已提交
733 734 735 736 737 738 739
		if (unlikely(show_unhandled_signals))
			show_signal_msg(regs, error_code, address, tsk);

		/* Kernel addresses are always protection faults: */
		tsk->thread.cr2		= address;
		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
		tsk->thread.trap_no	= 14;
740 741

		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
I
Ingo Molnar 已提交
742

743 744 745 746 747 748 749 750 751
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

	no_context(regs, error_code, address);
}

I
Ingo Molnar 已提交
752 753 754
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		     unsigned long address)
755 756 757 758
{
	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}

I
Ingo Molnar 已提交
759 760 761
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address, int si_code)
762 763 764 765 766 767 768 769 770 771 772 773
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

	__bad_area_nosemaphore(regs, error_code, address, si_code);
}

I
Ingo Molnar 已提交
774 775
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
776 777 778 779
{
	__bad_area(regs, error_code, address, SEGV_MAPERR);
}

I
Ingo Molnar 已提交
780 781 782
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
		      unsigned long address)
783 784 785 786 787
{
	__bad_area(regs, error_code, address, SEGV_ACCERR);
}

/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
I
Ingo Molnar 已提交
788 789 790
static void
out_of_memory(struct pt_regs *regs, unsigned long error_code,
	      unsigned long address)
791 792 793
{
	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
I
Ingo Molnar 已提交
794
	 * (which will retry the fault, or kill us if we got oom-killed):
795 796
	 */
	up_read(&current->mm->mmap_sem);
I
Ingo Molnar 已提交
797

798 799 800
	pagefault_out_of_memory();
}

I
Ingo Molnar 已提交
801 802
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
803 804 805 806 807 808
{
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;

	up_read(&mm->mmap_sem);

I
Ingo Molnar 已提交
809
	/* Kernel mode? Handle exceptions or die: */
810 811
	if (!(error_code & PF_USER))
		no_context(regs, error_code, address);
I
Ingo Molnar 已提交
812

813
	/* User-space => ok to do another page fault: */
814 815
	if (is_prefetch(regs, error_code, address))
		return;
I
Ingo Molnar 已提交
816 817 818 819 820

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
	tsk->thread.trap_no	= 14;

821 822 823
	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
}

I
Ingo Molnar 已提交
824 825 826
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
	       unsigned long address, unsigned int fault)
827
{
I
Ingo Molnar 已提交
828
	if (fault & VM_FAULT_OOM) {
829
		out_of_memory(regs, error_code, address);
I
Ingo Molnar 已提交
830 831 832 833 834 835
	} else {
		if (fault & VM_FAULT_SIGBUS)
			do_sigbus(regs, error_code, address);
		else
			BUG();
	}
836 837
}

838 839 840 841
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
I
Ingo Molnar 已提交
842

843 844 845 846 847 848
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;

	return 1;
}

849
/*
I
Ingo Molnar 已提交
850 851 852 853 854 855 856 857
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
858 859 860
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
 */
I
Ingo Molnar 已提交
861 862
static noinline int
spurious_fault(unsigned long error_code, unsigned long address)
863 864 865 866 867
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
868
	int ret;
869 870 871 872 873 874 875 876 877 878 879 880 881

	/* Reserved-bit violation or user access to kernel space? */
	if (error_code & (PF_USER | PF_RSVD))
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return 0;

882 883 884
	if (pud_large(*pud))
		return spurious_fault_check(error_code, (pte_t *) pud);

885 886 887 888
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

889 890 891
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

892 893 894 895
	pte = pte_offset_kernel(pmd, address);
	if (!pte_present(*pte))
		return 0;

896 897 898 899 900
	ret = spurious_fault_check(error_code, pte);
	if (!ret)
		return 0;

	/*
I
Ingo Molnar 已提交
901 902
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
903 904 905
	 */
	ret = spurious_fault_check(error_code, (pte_t *) pmd);
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
I
Ingo Molnar 已提交
906

907
	return ret;
908 909
}

910
int show_unhandled_signals = 1;
L
Linus Torvalds 已提交
911

I
Ingo Molnar 已提交
912 913
static inline int
access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
914 915
{
	if (write) {
I
Ingo Molnar 已提交
916
		/* write, present and write, not present: */
917 918
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
I
Ingo Molnar 已提交
919
		return 0;
920 921
	}

I
Ingo Molnar 已提交
922 923 924 925 926 927 928 929
	/* read, present: */
	if (unlikely(error_code & PF_PROT))
		return 1;

	/* read, not present: */
	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return 1;

930 931 932
	return 0;
}

933 934
static int fault_in_kernel_space(unsigned long address)
{
935
	return address >= TASK_SIZE_MAX;
936 937
}

L
Linus Torvalds 已提交
938 939 940 941 942
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
943 944
dotraplinkage void __kprobes
do_page_fault(struct pt_regs *regs, unsigned long error_code)
L
Linus Torvalds 已提交
945
{
I
Ingo Molnar 已提交
946
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
947
	struct task_struct *tsk;
I
Ingo Molnar 已提交
948
	unsigned long address;
L
Linus Torvalds 已提交
949
	struct mm_struct *mm;
950
	int write;
951
	int fault;
L
Linus Torvalds 已提交
952

953 954
	tsk = current;
	mm = tsk->mm;
I
Ingo Molnar 已提交
955 956

	/* Get the faulting address: */
957
	address = read_cr2();
L
Linus Torvalds 已提交
958

V
Vegard Nossum 已提交
959 960 961 962 963 964
	/*
	 * Detect and handle instructions that would cause a page fault for
	 * both a tracked kernel page and a userspace page.
	 */
	if (kmemcheck_active(regs))
		kmemcheck_hide(regs);
965
	prefetchw(&mm->mmap_sem);
V
Vegard Nossum 已提交
966

967
	if (unlikely(kmmio_fault(regs, address)))
968
		return;
L
Linus Torvalds 已提交
969 970 971 972 973 974 975 976 977 978 979 980

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
981
	 * protection error (error_code & 9) == 0.
L
Linus Torvalds 已提交
982
	 */
983
	if (unlikely(fault_in_kernel_space(address))) {
V
Vegard Nossum 已提交
984 985 986 987 988 989 990
		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
			if (vmalloc_fault(address) >= 0)
				return;

			if (kmemcheck_fault(regs, address, error_code))
				return;
		}
991

I
Ingo Molnar 已提交
992
		/* Can handle a stale RO->RW TLB: */
993
		if (spurious_fault(error_code, address))
994 995
			return;

I
Ingo Molnar 已提交
996
		/* kprobes don't want to hook the spurious faults: */
997 998
		if (notify_page_fault(regs))
			return;
999 1000
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
I
Ingo Molnar 已提交
1001
		 * fault we could otherwise deadlock:
1002
		 */
1003
		bad_area_nosemaphore(regs, error_code, address);
I
Ingo Molnar 已提交
1004

1005
		return;
1006 1007
	}

I
Ingo Molnar 已提交
1008
	/* kprobes don't want to hook the spurious faults: */
I
Ingo Molnar 已提交
1009
	if (unlikely(notify_page_fault(regs)))
1010
		return;
1011
	/*
1012 1013 1014 1015
	 * It's safe to allow irq's after cr2 has been saved and the
	 * vmalloc fault has been handled.
	 *
	 * User-mode registers count as a user access even for any
I
Ingo Molnar 已提交
1016
	 * potential system fault or CPU buglet:
1017
	 */
1018 1019 1020
	if (user_mode_vm(regs)) {
		local_irq_enable();
		error_code |= PF_USER;
I
Ingo Molnar 已提交
1021 1022 1023 1024
	} else {
		if (regs->flags & X86_EFLAGS_IF)
			local_irq_enable();
	}
1025

1026
	if (unlikely(error_code & PF_RSVD))
1027
		pgtable_bad(regs, error_code, address);
L
Linus Torvalds 已提交
1028

1029
	perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
1030

L
Linus Torvalds 已提交
1031
	/*
I
Ingo Molnar 已提交
1032 1033
	 * If we're in an interrupt, have no user context or are running
	 * in an atomic region then we must not take the fault:
L
Linus Torvalds 已提交
1034
	 */
1035 1036 1037 1038
	if (unlikely(in_atomic() || !mm)) {
		bad_area_nosemaphore(regs, error_code, address);
		return;
	}
L
Linus Torvalds 已提交
1039

I
Ingo Molnar 已提交
1040 1041
	/*
	 * When running in the kernel we expect faults to occur only to
I
Ingo Molnar 已提交
1042 1043 1044 1045 1046 1047 1048
	 * addresses in user space.  All other faults represent errors in
	 * the kernel and should generate an OOPS.  Unfortunately, in the
	 * case of an erroneous fault occurring in a code path which already
	 * holds mmap_sem we will deadlock attempting to validate the fault
	 * against the address space.  Luckily the kernel only validly
	 * references user space from well defined areas of code, which are
	 * listed in the exceptions table.
L
Linus Torvalds 已提交
1049 1050
	 *
	 * As the vast majority of faults will be valid we will only perform
I
Ingo Molnar 已提交
1051 1052 1053 1054
	 * the source reference check when there is a possibility of a
	 * deadlock. Attempt to lock the address space, if we cannot we then
	 * validate the source. If this is invalid we can skip the address
	 * space check, thus avoiding the deadlock:
L
Linus Torvalds 已提交
1055
	 */
1056
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1057
		if ((error_code & PF_USER) == 0 &&
1058 1059 1060 1061
		    !search_exception_tables(regs->ip)) {
			bad_area_nosemaphore(regs, error_code, address);
			return;
		}
L
Linus Torvalds 已提交
1062
		down_read(&mm->mmap_sem);
1063 1064
	} else {
		/*
I
Ingo Molnar 已提交
1065 1066 1067
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
1068 1069
		 */
		might_sleep();
L
Linus Torvalds 已提交
1070 1071 1072
	}

	vma = find_vma(mm, address);
1073 1074 1075 1076 1077
	if (unlikely(!vma)) {
		bad_area(regs, error_code, address);
		return;
	}
	if (likely(vma->vm_start <= address))
L
Linus Torvalds 已提交
1078
		goto good_area;
1079 1080 1081 1082
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
		bad_area(regs, error_code, address);
		return;
	}
1083
	if (error_code & PF_USER) {
1084 1085 1086
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
I
Ingo Molnar 已提交
1087
		 * and pusha to work. ("enter $65535, $31" pushes
1088
		 * 32 pointers and then decrements %sp by 65535.)
1089
		 */
1090 1091 1092 1093
		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
			bad_area(regs, error_code, address);
			return;
		}
L
Linus Torvalds 已提交
1094
	}
1095 1096 1097 1098 1099 1100 1101 1102 1103
	if (unlikely(expand_stack(vma, address))) {
		bad_area(regs, error_code, address);
		return;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
L
Linus Torvalds 已提交
1104
good_area:
1105
	write = error_code & PF_WRITE;
I
Ingo Molnar 已提交
1106

1107 1108 1109
	if (unlikely(access_error(error_code, write, vma))) {
		bad_area_access_error(regs, error_code, address);
		return;
L
Linus Torvalds 已提交
1110 1111 1112 1113 1114
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
I
Ingo Molnar 已提交
1115
	 * the fault:
L
Linus Torvalds 已提交
1116
	 */
1117
	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
I
Ingo Molnar 已提交
1118

N
Nick Piggin 已提交
1119
	if (unlikely(fault & VM_FAULT_ERROR)) {
1120 1121
		mm_fault_error(regs, error_code, address, fault);
		return;
L
Linus Torvalds 已提交
1122
	}
I
Ingo Molnar 已提交
1123

1124
	if (fault & VM_FAULT_MAJOR) {
N
Nick Piggin 已提交
1125
		tsk->maj_flt++;
1126
		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
1127
				     regs, address);
1128
	} else {
N
Nick Piggin 已提交
1129
		tsk->min_flt++;
1130
		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
1131
				     regs, address);
1132
	}
1133

1134 1135
	check_v8086_mode(regs, address, tsk);

L
Linus Torvalds 已提交
1136 1137
	up_read(&mm->mmap_sem);
}