fault.c 37.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  Copyright (C) 1995  Linus Torvalds
I
Ingo Molnar 已提交
4
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
L
Linus Torvalds 已提交
6
 */
7
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
8
#include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
10
#include <linux/extable.h>		/* search_exception_tables	*/
11
#include <linux/bootmem.h>		/* max_low_pfn			*/
12
#include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
13
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
14
#include <linux/perf_event.h>		/* perf_sw_event		*/
15
#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
16
#include <linux/prefetch.h>		/* prefetchw			*/
17
#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
18
#include <linux/uaccess.h>		/* faulthandler_disabled()	*/
I
Ingo Molnar 已提交
19

20
#include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
21 22
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
23 24
#include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
#include <asm/vsyscall.h>		/* emulate_vsyscall		*/
B
Brian Gerst 已提交
25
#include <asm/vm86.h>			/* struct vm86			*/
26
#include <asm/mmu_context.h>		/* vma_pkey()			*/
L
Linus Torvalds 已提交
27

28 29 30
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>

31
/*
32 33
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
34
 */
35
static nokprobe_inline int
36
kmmio_fault(struct pt_regs *regs, unsigned long addr)
37
{
38 39 40 41
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
42 43
}

44
static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
45
{
46 47 48
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
49
	if (kprobes_built_in() && !user_mode(regs)) {
50 51 52 53 54
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
55

56
	return ret;
57
}
58

59
/*
I
Ingo Molnar 已提交
60 61 62 63 64 65
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
66
 *
I
Ingo Molnar 已提交
67
 * 64-bit mode:
68
 *
I
Ingo Molnar 已提交
69 70 71 72
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
73
 */
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
100
		return (!user_mode(regs) || user_64bit_mode(regs));
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

I
Ingo Molnar 已提交
121 122
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
123
{
I
Ingo Molnar 已提交
124
	unsigned char *max_instr;
125
	unsigned char *instr;
126
	int prefetch = 0;
L
Linus Torvalds 已提交
127

I
Ingo Molnar 已提交
128 129 130 131
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
132
	if (error_code & X86_PF_INSTR)
L
Linus Torvalds 已提交
133
		return 0;
134

135
	instr = (void *)convert_ip_to_linear(current, regs);
136
	max_instr = instr + 15;
L
Linus Torvalds 已提交
137

138
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
L
Linus Torvalds 已提交
139 140
		return 0;

141
	while (instr < max_instr) {
I
Ingo Molnar 已提交
142
		unsigned char opcode;
L
Linus Torvalds 已提交
143

144
		if (probe_kernel_address(instr, opcode))
145
			break;
L
Linus Torvalds 已提交
146 147 148

		instr++;

149
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
L
Linus Torvalds 已提交
150 151 152 153 154
			break;
	}
	return prefetch;
}

155 156 157 158 159 160 161
/*
 * A protection key fault means that the PKRU value did not allow
 * access to some PTE.  Userspace can figure out what PKRU was
 * from the XSAVE state, and this function fills out a field in
 * siginfo so userspace can discover which protection key was set
 * on the PTE.
 *
162
 * If we get here, we know that the hardware signaled a X86_PF_PK
163 164 165 166 167 168 169 170 171 172 173 174
 * fault and that there was a VMA once we got in the fault
 * handler.  It does *not* guarantee that the VMA we find here
 * was the one that we faulted on.
 *
 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
 * 2. T1   : set PKRU to deny access to pkey=4, touches page
 * 3. T1   : faults...
 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
 * 5. T1   : enters fault handler, takes mmap_sem, etc...
 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 *	     faulted on a pte with its pkey=4.
 */
175 176
static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
		u32 *pkey)
177 178 179 180 181 182
{
	/* This is effectively an #ifdef */
	if (!boot_cpu_has(X86_FEATURE_OSPKE))
		return;

	/* Fault not from Protection Keys: nothing to do */
183
	if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
184 185 186 187
		return;
	/*
	 * force_sig_info_fault() is called from a number of
	 * contexts, some of which have a VMA and some of which
188
	 * do not.  The X86_PF_PK handing happens after we have a
189 190 191
	 * valid VMA, so we should never reach this without a
	 * valid VMA.
	 */
192
	if (!pkey) {
193 194 195 196 197 198 199 200 201
		WARN_ONCE(1, "PKU fault with no VMA passed in");
		info->si_pkey = 0;
		return;
	}
	/*
	 * si_pkey should be thought of as a strong hint, but not
	 * absolutely guranteed to be 100% accurate because of
	 * the race explained above.
	 */
202
	info->si_pkey = *pkey;
203 204
}

I
Ingo Molnar 已提交
205 206
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
207
		     struct task_struct *tsk, u32 *pkey, int fault)
208
{
209
	unsigned lsb = 0;
210 211
	siginfo_t info;

I
Ingo Molnar 已提交
212 213 214 215
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;
216 217 218 219 220
	if (fault & VM_FAULT_HWPOISON_LARGE)
		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
	if (fault & VM_FAULT_HWPOISON)
		lsb = PAGE_SHIFT;
	info.si_addr_lsb = lsb;
I
Ingo Molnar 已提交
221

222
	fill_sig_info_pkey(si_signo, si_code, &info, pkey);
223

224 225 226
	force_sig_info(si_signo, &info, tsk);
}

227 228 229 230 231
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
232
{
233 234
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
235
	p4d_t *p4d, *p4d_k;
236 237
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
I
Ingo Molnar 已提交
238

239 240 241 242 243 244 245 246 247
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
248
	 * set_p4d/set_pud.
249
	 */
250 251 252 253 254 255 256
	p4d = p4d_offset(pgd, address);
	p4d_k = p4d_offset(pgd_k, address);
	if (!p4d_present(*p4d_k))
		return NULL;

	pud = pud_offset(p4d, address);
	pud_k = pud_offset(p4d_k, address);
257 258 259 260 261 262 263 264
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

265
	if (!pmd_present(*pmd))
266
		set_pmd(pmd, *pmd_k);
267
	else
268 269 270 271 272 273 274 275 276 277 278 279 280
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
281
	     address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
282 283 284
	     address += PMD_SIZE) {
		struct page *page;

A
Andrea Arcangeli 已提交
285
		spin_lock(&pgd_lock);
286
		list_for_each_entry(page, &pgd_list, lru) {
287
			spinlock_t *pgt_lock;
288
			pmd_t *ret;
289

A
Andrea Arcangeli 已提交
290
			/* the pgt_lock only for Xen */
291 292 293 294 295 296 297
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;

			spin_lock(pgt_lock);
			ret = vmalloc_sync_one(page_address(page), address);
			spin_unlock(pgt_lock);

			if (!ret)
298 299
				break;
		}
A
Andrea Arcangeli 已提交
300
		spin_unlock(&pgd_lock);
301 302 303 304 305 306 307 308
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
309
static noinline int vmalloc_fault(unsigned long address)
310 311 312 313 314 315 316 317 318
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

319 320
	WARN_ON_ONCE(in_nmi());

321 322 323 324 325 326 327
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
328
	pgd_paddr = read_cr3_pa();
329 330 331 332
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

333
	if (pmd_large(*pmd_k))
334 335
		return 0;

336 337 338 339 340 341
	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}
342
NOKPROBE_SYMBOL(vmalloc_fault);
343 344 345 346 347 348 349 350

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
351
#ifdef CONFIG_VM86
352 353
	unsigned long bit;

354
	if (!v8086_mode(regs) || !tsk->thread.vm86)
355 356 357 358
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
359 360
		tsk->thread.vm86->screen_bitmap |= 1 << bit;
#endif
361
}
L
Linus Torvalds 已提交
362

A
Akinobu Mita 已提交
363
static bool low_pfn(unsigned long pfn)
L
Linus Torvalds 已提交
364
{
A
Akinobu Mita 已提交
365 366
	return pfn < max_low_pfn;
}
367

A
Akinobu Mita 已提交
368 369
static void dump_pagetable(unsigned long address)
{
370
	pgd_t *base = __va(read_cr3_pa());
A
Akinobu Mita 已提交
371
	pgd_t *pgd = &base[pgd_index(address)];
372 373
	p4d_t *p4d;
	pud_t *pud;
A
Akinobu Mita 已提交
374 375
	pmd_t *pmd;
	pte_t *pte;
I
Ingo Molnar 已提交
376

377
#ifdef CONFIG_X86_PAE
378
	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
A
Akinobu Mita 已提交
379 380
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
381 382 383
#define pr_pde pr_cont
#else
#define pr_pde pr_info
384
#endif
385 386 387
	p4d = p4d_offset(pgd, address);
	pud = pud_offset(p4d, address);
	pmd = pmd_offset(pud, address);
388 389
	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
#undef pr_pde
390 391 392 393 394

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
I
Ingo Molnar 已提交
395
	 * it's allocated already:
396
	 */
A
Akinobu Mita 已提交
397 398
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
399

A
Akinobu Mita 已提交
400
	pte = pte_offset_kernel(pmd, address);
401
	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
A
Akinobu Mita 已提交
402
out:
403
	pr_cont("\n");
404 405 406 407 408 409
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
410
	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
411 412 413 414 415 416 417
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 */
418
static noinline int vmalloc_fault(unsigned long address)
419
{
420 421 422 423 424
	pgd_t *pgd, *pgd_k;
	p4d_t *p4d, *p4d_k;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
425 426 427 428 429

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

430 431
	WARN_ON_ONCE(in_nmi());

432 433 434 435 436
	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
437
	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
438 439
	pgd_k = pgd_offset_k(address);
	if (pgd_none(*pgd_k))
440 441
		return -1;

442
	if (pgtable_l5_enabled) {
443
		if (pgd_none(*pgd)) {
444
			set_pgd(pgd, *pgd_k);
445 446
			arch_flush_lazy_mmu_mode();
		} else {
447
			BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
448
		}
449
	}
450

451 452
	/* With 4-level paging, copying happens on the p4d level. */
	p4d = p4d_offset(pgd, address);
453 454
	p4d_k = p4d_offset(pgd_k, address);
	if (p4d_none(*p4d_k))
455 456
		return -1;

457
	if (p4d_none(*p4d) && !pgtable_l5_enabled) {
458
		set_p4d(p4d, *p4d_k);
459 460
		arch_flush_lazy_mmu_mode();
	} else {
461
		BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
462 463
	}

464
	BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
465

466
	pud = pud_offset(p4d, address);
467
	if (pud_none(*pud))
468 469
		return -1;

470
	if (pud_large(*pud))
471 472
		return 0;

473
	pmd = pmd_offset(pud, address);
474
	if (pmd_none(*pmd))
475 476
		return -1;

477
	if (pmd_large(*pmd))
478 479
		return 0;

480
	pte = pte_offset_kernel(pmd, address);
481 482
	if (!pte_present(*pte))
		return -1;
483 484 485

	return 0;
}
486
NOKPROBE_SYMBOL(vmalloc_fault);
487

488
#ifdef CONFIG_CPU_SUP_AMD
489
static const char errata93_warning[] =
490 491 492 493 494
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
495
#endif
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
515
	pgd_t *base = __va(read_cr3_pa());
A
Akinobu Mita 已提交
516
	pgd_t *pgd = base + pgd_index(address);
517
	p4d_t *p4d;
L
Linus Torvalds 已提交
518 519 520 521
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

I
Ingo Molnar 已提交
522 523 524
	if (bad_address(pgd))
		goto bad;

525
	pr_info("PGD %lx ", pgd_val(*pgd));
I
Ingo Molnar 已提交
526 527 528

	if (!pgd_present(*pgd))
		goto out;
L
Linus Torvalds 已提交
529

530 531 532 533
	p4d = p4d_offset(pgd, address);
	if (bad_address(p4d))
		goto bad;

534
	pr_cont("P4D %lx ", p4d_val(*p4d));
535 536 537 538
	if (!p4d_present(*p4d) || p4d_large(*p4d))
		goto out;

	pud = pud_offset(p4d, address);
I
Ingo Molnar 已提交
539 540 541
	if (bad_address(pud))
		goto bad;

542
	pr_cont("PUD %lx ", pud_val(*pud));
543
	if (!pud_present(*pud) || pud_large(*pud))
I
Ingo Molnar 已提交
544
		goto out;
L
Linus Torvalds 已提交
545 546

	pmd = pmd_offset(pud, address);
I
Ingo Molnar 已提交
547 548 549
	if (bad_address(pmd))
		goto bad;

550
	pr_cont("PMD %lx ", pmd_val(*pmd));
I
Ingo Molnar 已提交
551 552
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
L
Linus Torvalds 已提交
553 554

	pte = pte_offset_kernel(pmd, address);
I
Ingo Molnar 已提交
555 556 557
	if (bad_address(pte))
		goto bad;

558
	pr_cont("PTE %lx", pte_val(*pte));
I
Ingo Molnar 已提交
559
out:
560
	pr_cont("\n");
L
Linus Torvalds 已提交
561 562
	return;
bad:
563
	pr_info("BAD\n");
564 565
}

566
#endif /* CONFIG_X86_64 */
L
Linus Torvalds 已提交
567

I
Ingo Molnar 已提交
568 569 570 571 572 573 574 575 576 577 578 579 580
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
581
 */
582
static int is_errata93(struct pt_regs *regs, unsigned long address)
L
Linus Torvalds 已提交
583
{
584 585 586 587 588
#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
	    || boot_cpu_data.x86 != 0xf)
		return 0;

589
	if (address != regs->ip)
L
Linus Torvalds 已提交
590
		return 0;
I
Ingo Molnar 已提交
591

592
	if ((address >> 32) != 0)
L
Linus Torvalds 已提交
593
		return 0;
I
Ingo Molnar 已提交
594

L
Linus Torvalds 已提交
595
	address |= 0xffffffffUL << 32;
596 597
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
598
		printk_once(errata93_warning);
599
		regs->ip = address;
L
Linus Torvalds 已提交
600 601
		return 1;
	}
602
#endif
L
Linus Torvalds 已提交
603
	return 0;
604
}
L
Linus Torvalds 已提交
605

606
/*
I
Ingo Molnar 已提交
607 608 609 610 611
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
612 613 614 615 616
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
I
Ingo Molnar 已提交
617
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
618 619 620 621 622
		return 1;
#endif
	return 0;
}

623 624 625 626
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
I
Ingo Molnar 已提交
627

628
	/*
I
Ingo Molnar 已提交
629
	 * Pentium F0 0F C7 C8 bug workaround:
630
	 */
631
	if (boot_cpu_has_bug(X86_BUG_F00F)) {
632 633 634 635 636 637 638 639 640 641 642
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

643 644
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
645 646
static const char smep_warning[] = KERN_CRIT
"unable to execute userspace code (SMEP?) (uid: %d)\n";
647

I
Ingo Molnar 已提交
648 649 650
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
651
{
652 653 654
	if (!oops_may_print())
		return;

655
	if (error_code & X86_PF_INSTR) {
656
		unsigned int level;
657 658
		pgd_t *pgd;
		pte_t *pte;
I
Ingo Molnar 已提交
659

660
		pgd = __va(read_cr3_pa());
661 662 663
		pgd += pgd_index(address);

		pte = lookup_address_in_pgd(pgd, address, &level);
664

665
		if (pte && pte_present(*pte) && !pte_exec(*pte))
666
			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
667 668
		if (pte && pte_present(*pte) && pte_exec(*pte) &&
				(pgd_flags(*pgd) & _PAGE_USER) &&
669
				(__read_cr4() & X86_CR4_SMEP))
670
			printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
671 672
	}

673
	printk(KERN_ALERT "BUG: unable to handle kernel ");
674
	if (address < PAGE_SIZE)
675
		printk(KERN_CONT "NULL pointer dereference");
676
	else
677
		printk(KERN_CONT "paging request");
I
Ingo Molnar 已提交
678

679
	printk(KERN_CONT " at %px\n", (void *) address);
I
Ingo Molnar 已提交
680

681 682 683
	dump_pagetable(address);
}

I
Ingo Molnar 已提交
684 685 686
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
L
Linus Torvalds 已提交
687
{
I
Ingo Molnar 已提交
688 689 690 691 692 693 694
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
695

L
Linus Torvalds 已提交
696
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
697
	       tsk->comm, address);
L
Linus Torvalds 已提交
698
	dump_pagetable(address);
I
Ingo Molnar 已提交
699 700

	tsk->thread.cr2		= address;
701
	tsk->thread.trap_nr	= X86_TRAP_PF;
I
Ingo Molnar 已提交
702 703
	tsk->thread.error_code	= error_code;

704
	if (__die("Bad pagetable", regs, error_code))
705
		sig = 0;
I
Ingo Molnar 已提交
706

707
	oops_end(flags, regs, sig);
L
Linus Torvalds 已提交
708 709
}

I
Ingo Molnar 已提交
710 711
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
712
	   unsigned long address, int signal, int si_code)
713 714 715 716 717
{
	struct task_struct *tsk = current;
	unsigned long flags;
	int sig;

I
Ingo Molnar 已提交
718
	/* Are we prepared to handle this kernel fault? */
719
	if (fixup_exception(regs, X86_TRAP_PF)) {
720 721 722 723 724 725 726 727 728 729 730 731 732 733
		/*
		 * Any interrupt that takes a fault gets the fixup. This makes
		 * the below recursive fault logic only apply to a faults from
		 * task context.
		 */
		if (in_interrupt())
			return;

		/*
		 * Per the above we're !in_interrupt(), aka. task context.
		 *
		 * In this case we need to make sure we're not recursively
		 * faulting through the emulate_vsyscall() logic.
		 */
734
		if (current->thread.sig_on_uaccess_err && signal) {
735
			tsk->thread.trap_nr = X86_TRAP_PF;
736
			tsk->thread.error_code = error_code | X86_PF_USER;
737 738 739
			tsk->thread.cr2 = address;

			/* XXX: hwpoison faults will set the wrong code. */
740
			force_sig_info_fault(signal, si_code, address,
741
					     tsk, NULL, 0);
742
		}
743 744 745 746

		/*
		 * Barring that, we can do the fixup and be happy.
		 */
747
		return;
748
	}
749

750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
#ifdef CONFIG_VMAP_STACK
	/*
	 * Stack overflow?  During boot, we can fault near the initial
	 * stack in the direct map, but that's not an overflow -- check
	 * that we're in vmalloc space to avoid this.
	 */
	if (is_vmalloc_addr((void *)address) &&
	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
		unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
		/*
		 * We're likely to be running with very little stack space
		 * left.  It's plausible that we'd hit this condition but
		 * double-fault even before we get this far, in which case
		 * we're fine: the double-fault handler will deal with it.
		 *
		 * We don't want to make it all the way into the oops code
		 * and then double-fault, though, because we're likely to
		 * break the console driver and lose most of the stack dump.
		 */
		asm volatile ("movq %[stack], %%rsp\n\t"
			      "call handle_stack_overflow\n\t"
			      "1: jmp 1b"
773
			      : ASM_CALL_CONSTRAINT
774 775 776 777 778 779 780
			      : "D" ("kernel stack overflow (page fault)"),
				"S" (regs), "d" (address),
				[stack] "rm" (stack));
		unreachable();
	}
#endif

781
	/*
I
Ingo Molnar 已提交
782 783 784 785 786 787 788
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
789
	 *
I
Ingo Molnar 已提交
790
	 *   Hall of shame of CPU/BIOS bugs.
791 792 793 794 795 796 797 798 799
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
I
Ingo Molnar 已提交
800
	 * terminate things with extreme prejudice:
801 802 803 804 805
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

806
	if (task_stack_end_corrupted(tsk))
807
		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
808

809
	tsk->thread.cr2		= address;
810
	tsk->thread.trap_nr	= X86_TRAP_PF;
811
	tsk->thread.error_code	= error_code;
812 813 814 815

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
I
Ingo Molnar 已提交
816

817
	/* Executive summary in case the body of the oops scrolled away */
818
	printk(KERN_DEFAULT "CR2: %016lx\n", address);
I
Ingo Molnar 已提交
819

820 821 822
	oops_end(flags, regs, sig);
}

I
Ingo Molnar 已提交
823 824 825 826 827 828 829 830 831 832 833 834 835 836
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

837
	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
I
Ingo Molnar 已提交
838 839 840 841 842 843 844 845 846 847 848
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
849
		       unsigned long address, u32 *pkey, int si_code)
850 851 852 853
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
854
	if (error_code & X86_PF_USER) {
855
		/*
I
Ingo Molnar 已提交
856
		 * It's possible to have interrupts off here:
857 858 859 860 861
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
I
Ingo Molnar 已提交
862
		 * from user space:
863 864 865 866 867 868 869
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

870 871 872 873 874
#ifdef CONFIG_X86_64
		/*
		 * Instruction fetch faults in the vsyscall page might need
		 * emulation.
		 */
875
		if (unlikely((error_code & X86_PF_INSTR) &&
876
			     ((address & ~0xfff) == VSYSCALL_ADDR))) {
877 878 879 880
			if (emulate_vsyscall(regs, address))
				return;
		}
#endif
881 882 883 884 885 886 887

		/*
		 * To avoid leaking information about the kernel page table
		 * layout, pretend that user-mode accesses to kernel addresses
		 * are always protection faults.
		 */
		if (address >= TASK_SIZE_MAX)
888
			error_code |= X86_PF_PROT;
889

890
		if (likely(show_unhandled_signals))
I
Ingo Molnar 已提交
891 892 893
			show_signal_msg(regs, error_code, address, tsk);

		tsk->thread.cr2		= address;
894
		tsk->thread.error_code	= error_code;
895
		tsk->thread.trap_nr	= X86_TRAP_PF;
896

897
		force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
I
Ingo Molnar 已提交
898

899 900 901 902 903 904
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

905
	no_context(regs, error_code, address, SIGSEGV, si_code);
906 907
}

I
Ingo Molnar 已提交
908 909
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
910
		     unsigned long address, u32 *pkey)
911
{
912
	__bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
913 914
}

I
Ingo Molnar 已提交
915 916
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
917
	   unsigned long address,  struct vm_area_struct *vma, int si_code)
918 919
{
	struct mm_struct *mm = current->mm;
920 921 922 923
	u32 pkey;

	if (vma)
		pkey = vma_pkey(vma);
924 925 926 927 928 929 930

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

931 932
	__bad_area_nosemaphore(regs, error_code, address,
			       (vma) ? &pkey : NULL, si_code);
933 934
}

I
Ingo Molnar 已提交
935 936
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
937
{
938
	__bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
939 940
}

941 942 943
static inline bool bad_area_access_from_pkeys(unsigned long error_code,
		struct vm_area_struct *vma)
{
944 945 946
	/* This code is always called on the current mm */
	bool foreign = false;

947 948
	if (!boot_cpu_has(X86_FEATURE_OSPKE))
		return false;
949
	if (error_code & X86_PF_PK)
950
		return true;
951
	/* this checks permission keys on the VMA: */
952 953
	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
				       (error_code & X86_PF_INSTR), foreign))
954
		return true;
955
	return false;
956 957
}

I
Ingo Molnar 已提交
958 959
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
960
		      unsigned long address, struct vm_area_struct *vma)
961
{
962 963 964 965 966
	/*
	 * This OSPKE check is not strictly necessary at runtime.
	 * But, doing it this way allows compiler optimizations
	 * if pkeys are compiled out.
	 */
967
	if (bad_area_access_from_pkeys(error_code, vma))
968 969 970
		__bad_area(regs, error_code, address, vma, SEGV_PKUERR);
	else
		__bad_area(regs, error_code, address, vma, SEGV_ACCERR);
971 972
}

I
Ingo Molnar 已提交
973
static void
974
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
975
	  u32 *pkey, unsigned int fault)
976 977
{
	struct task_struct *tsk = current;
978
	int code = BUS_ADRERR;
979

I
Ingo Molnar 已提交
980
	/* Kernel mode? Handle exceptions or die: */
981
	if (!(error_code & X86_PF_USER)) {
982
		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
983 984
		return;
	}
I
Ingo Molnar 已提交
985

986
	/* User-space => ok to do another page fault: */
987 988
	if (is_prefetch(regs, error_code, address))
		return;
I
Ingo Molnar 已提交
989 990 991

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
992
	tsk->thread.trap_nr	= X86_TRAP_PF;
I
Ingo Molnar 已提交
993

994
#ifdef CONFIG_MEMORY_FAILURE
995
	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
996 997 998 999 1000 1001
		printk(KERN_ERR
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
		code = BUS_MCEERR_AR;
	}
#endif
1002
	force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
1003 1004
}

1005
static noinline void
I
Ingo Molnar 已提交
1006
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
1007
	       unsigned long address, u32 *pkey, unsigned int fault)
1008
{
1009
	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
1010 1011
		no_context(regs, error_code, address, 0, 0);
		return;
1012 1013
	}

I
Ingo Molnar 已提交
1014
	if (fault & VM_FAULT_OOM) {
1015
		/* Kernel mode? Handle exceptions or die: */
1016
		if (!(error_code & X86_PF_USER)) {
1017 1018
			no_context(regs, error_code, address,
				   SIGSEGV, SEGV_MAPERR);
1019
			return;
1020 1021
		}

1022 1023 1024 1025 1026 1027
		/*
		 * We ran out of memory, call the OOM killer, and return the
		 * userspace (which will retry the fault, or kill us if we got
		 * oom-killed):
		 */
		pagefault_out_of_memory();
I
Ingo Molnar 已提交
1028
	} else {
1029 1030
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
			     VM_FAULT_HWPOISON_LARGE))
1031
			do_sigbus(regs, error_code, address, pkey, fault);
1032
		else if (fault & VM_FAULT_SIGSEGV)
1033
			bad_area_nosemaphore(regs, error_code, address, pkey);
I
Ingo Molnar 已提交
1034 1035 1036
		else
			BUG();
	}
1037 1038
}

1039 1040
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
1041
	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
1042
		return 0;
I
Ingo Molnar 已提交
1043

1044
	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
1045
		return 0;
1046 1047
	/*
	 * Note: We do not do lazy flushing on protection key
1048
	 * changes, so no spurious fault will ever set X86_PF_PK.
1049
	 */
1050
	if ((error_code & X86_PF_PK))
1051
		return 1;
1052 1053 1054 1055

	return 1;
}

1056
/*
I
Ingo Molnar 已提交
1057 1058 1059 1060 1061 1062 1063 1064
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
1065 1066 1067 1068
 * Spurious faults may only occur if the TLB contains an entry with
 * fewer permission than the page table entry.  Non-present (P = 0)
 * and reserved bit (R = 1) faults are never spurious.
 *
1069 1070
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
1071 1072 1073 1074 1075
 *
 * Returns non-zero if a spurious fault was handled, zero otherwise.
 *
 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
 * (Optional Invalidation).
1076
 */
1077
static noinline int
I
Ingo Molnar 已提交
1078
spurious_fault(unsigned long error_code, unsigned long address)
1079 1080
{
	pgd_t *pgd;
1081
	p4d_t *p4d;
1082 1083 1084
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
1085
	int ret;
1086

1087 1088 1089 1090 1091 1092 1093 1094 1095
	/*
	 * Only writes to RO or instruction fetches from NX may cause
	 * spurious faults.
	 *
	 * These could be from user or supervisor accesses but the TLB
	 * is only lazily flushed after a kernel mapping protection
	 * change, so user accesses are not expected to cause spurious
	 * faults.
	 */
1096 1097
	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
	    error_code != (X86_PF_INSTR | X86_PF_PROT))
1098 1099 1100 1101 1102 1103
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

1104 1105 1106 1107 1108 1109 1110 1111
	p4d = p4d_offset(pgd, address);
	if (!p4d_present(*p4d))
		return 0;

	if (p4d_large(*p4d))
		return spurious_fault_check(error_code, (pte_t *) p4d);

	pud = pud_offset(p4d, address);
1112 1113 1114
	if (!pud_present(*pud))
		return 0;

1115 1116 1117
	if (pud_large(*pud))
		return spurious_fault_check(error_code, (pte_t *) pud);

1118 1119 1120 1121
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

1122 1123 1124
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

1125
	pte = pte_offset_kernel(pmd, address);
1126
	if (!pte_present(*pte))
1127 1128
		return 0;

1129 1130 1131 1132 1133
	ret = spurious_fault_check(error_code, pte);
	if (!ret)
		return 0;

	/*
I
Ingo Molnar 已提交
1134 1135
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
1136 1137 1138
	 */
	ret = spurious_fault_check(error_code, (pte_t *) pmd);
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
I
Ingo Molnar 已提交
1139

1140
	return ret;
1141
}
1142
NOKPROBE_SYMBOL(spurious_fault);
1143

1144
int show_unhandled_signals = 1;
L
Linus Torvalds 已提交
1145

I
Ingo Molnar 已提交
1146
static inline int
M
Michel Lespinasse 已提交
1147
access_error(unsigned long error_code, struct vm_area_struct *vma)
1148
{
1149 1150
	/* This is only called for the current mm, so: */
	bool foreign = false;
1151 1152 1153 1154 1155 1156

	/*
	 * Read or write was blocked by protection keys.  This is
	 * always an unconditional error and can never result in
	 * a follow-up action to resolve the fault, like a COW.
	 */
1157
	if (error_code & X86_PF_PK)
1158 1159
		return 1;

1160 1161
	/*
	 * Make sure to check the VMA so that we do not perform
1162
	 * faults just to hit a X86_PF_PK as soon as we fill in a
1163 1164
	 * page.
	 */
1165 1166
	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
				       (error_code & X86_PF_INSTR), foreign))
1167
		return 1;
1168

1169
	if (error_code & X86_PF_WRITE) {
I
Ingo Molnar 已提交
1170
		/* write, present and write, not present: */
1171 1172
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
I
Ingo Molnar 已提交
1173
		return 0;
1174 1175
	}

I
Ingo Molnar 已提交
1176
	/* read, present: */
1177
	if (unlikely(error_code & X86_PF_PROT))
I
Ingo Molnar 已提交
1178 1179 1180 1181 1182 1183
		return 1;

	/* read, not present: */
	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return 1;

1184 1185 1186
	return 0;
}

1187 1188
static int fault_in_kernel_space(unsigned long address)
{
1189
	return address >= TASK_SIZE_MAX;
1190 1191
}

1192 1193
static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
1194 1195 1196 1197 1198 1199
	if (!IS_ENABLED(CONFIG_X86_SMAP))
		return false;

	if (!static_cpu_has(X86_FEATURE_SMAP))
		return false;

1200
	if (error_code & X86_PF_USER)
1201 1202
		return false;

1203
	if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
1204 1205 1206 1207 1208
		return false;

	return true;
}

L
Linus Torvalds 已提交
1209 1210 1211 1212 1213
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
1214
static noinline void
1215 1216
__do_page_fault(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
L
Linus Torvalds 已提交
1217
{
I
Ingo Molnar 已提交
1218
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1219 1220
	struct task_struct *tsk;
	struct mm_struct *mm;
1221
	int fault, major = 0;
1222
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1223
	u32 pkey;
L
Linus Torvalds 已提交
1224

1225 1226
	tsk = current;
	mm = tsk->mm;
I
Ingo Molnar 已提交
1227

1228
	prefetchw(&mm->mmap_sem);
V
Vegard Nossum 已提交
1229

1230
	if (unlikely(kmmio_fault(regs, address)))
1231
		return;
L
Linus Torvalds 已提交
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
1244
	 * protection error (error_code & 9) == 0.
L
Linus Torvalds 已提交
1245
	 */
1246
	if (unlikely(fault_in_kernel_space(address))) {
1247
		if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
V
Vegard Nossum 已提交
1248 1249 1250
			if (vmalloc_fault(address) >= 0)
				return;
		}
1251

I
Ingo Molnar 已提交
1252
		/* Can handle a stale RO->RW TLB: */
1253
		if (spurious_fault(error_code, address))
1254 1255
			return;

I
Ingo Molnar 已提交
1256
		/* kprobes don't want to hook the spurious faults: */
1257
		if (kprobes_fault(regs))
1258
			return;
1259 1260
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
I
Ingo Molnar 已提交
1261
		 * fault we could otherwise deadlock:
1262
		 */
1263
		bad_area_nosemaphore(regs, error_code, address, NULL);
I
Ingo Molnar 已提交
1264

1265
		return;
1266 1267
	}

I
Ingo Molnar 已提交
1268
	/* kprobes don't want to hook the spurious faults: */
1269
	if (unlikely(kprobes_fault(regs)))
1270
		return;
1271

1272
	if (unlikely(error_code & X86_PF_RSVD))
1273
		pgtable_bad(regs, error_code, address);
L
Linus Torvalds 已提交
1274

1275
	if (unlikely(smap_violation(error_code, regs))) {
1276
		bad_area_nosemaphore(regs, error_code, address, NULL);
1277
		return;
1278 1279
	}

L
Linus Torvalds 已提交
1280
	/*
I
Ingo Molnar 已提交
1281
	 * If we're in an interrupt, have no user context or are running
1282
	 * in a region with pagefaults disabled then we must not take the fault
L
Linus Torvalds 已提交
1283
	 */
1284
	if (unlikely(faulthandler_disabled() || !mm)) {
1285
		bad_area_nosemaphore(regs, error_code, address, NULL);
1286 1287
		return;
	}
L
Linus Torvalds 已提交
1288

1289 1290 1291 1292 1293 1294 1295
	/*
	 * It's safe to allow irq's after cr2 has been saved and the
	 * vmalloc fault has been handled.
	 *
	 * User-mode registers count as a user access even for any
	 * potential system fault or CPU buglet:
	 */
1296
	if (user_mode(regs)) {
1297
		local_irq_enable();
1298
		error_code |= X86_PF_USER;
1299 1300 1301 1302 1303 1304 1305 1306
		flags |= FAULT_FLAG_USER;
	} else {
		if (regs->flags & X86_EFLAGS_IF)
			local_irq_enable();
	}

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);

1307
	if (error_code & X86_PF_WRITE)
1308
		flags |= FAULT_FLAG_WRITE;
1309
	if (error_code & X86_PF_INSTR)
1310
		flags |= FAULT_FLAG_INSTRUCTION;
1311

I
Ingo Molnar 已提交
1312 1313
	/*
	 * When running in the kernel we expect faults to occur only to
I
Ingo Molnar 已提交
1314 1315 1316 1317 1318 1319 1320
	 * addresses in user space.  All other faults represent errors in
	 * the kernel and should generate an OOPS.  Unfortunately, in the
	 * case of an erroneous fault occurring in a code path which already
	 * holds mmap_sem we will deadlock attempting to validate the fault
	 * against the address space.  Luckily the kernel only validly
	 * references user space from well defined areas of code, which are
	 * listed in the exceptions table.
L
Linus Torvalds 已提交
1321 1322
	 *
	 * As the vast majority of faults will be valid we will only perform
I
Ingo Molnar 已提交
1323 1324 1325 1326
	 * the source reference check when there is a possibility of a
	 * deadlock. Attempt to lock the address space, if we cannot we then
	 * validate the source. If this is invalid we can skip the address
	 * space check, thus avoiding the deadlock:
L
Linus Torvalds 已提交
1327
	 */
1328
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1329
		if (!(error_code & X86_PF_USER) &&
1330
		    !search_exception_tables(regs->ip)) {
1331
			bad_area_nosemaphore(regs, error_code, address, NULL);
1332 1333
			return;
		}
1334
retry:
L
Linus Torvalds 已提交
1335
		down_read(&mm->mmap_sem);
1336 1337
	} else {
		/*
I
Ingo Molnar 已提交
1338 1339 1340
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
1341 1342
		 */
		might_sleep();
L
Linus Torvalds 已提交
1343 1344 1345
	}

	vma = find_vma(mm, address);
1346 1347 1348 1349 1350
	if (unlikely(!vma)) {
		bad_area(regs, error_code, address);
		return;
	}
	if (likely(vma->vm_start <= address))
L
Linus Torvalds 已提交
1351
		goto good_area;
1352 1353 1354 1355
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
		bad_area(regs, error_code, address);
		return;
	}
1356
	if (error_code & X86_PF_USER) {
1357 1358 1359
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
I
Ingo Molnar 已提交
1360
		 * and pusha to work. ("enter $65535, $31" pushes
1361
		 * 32 pointers and then decrements %sp by 65535.)
1362
		 */
1363 1364 1365 1366
		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
			bad_area(regs, error_code, address);
			return;
		}
L
Linus Torvalds 已提交
1367
	}
1368 1369 1370 1371 1372 1373 1374 1375 1376
	if (unlikely(expand_stack(vma, address))) {
		bad_area(regs, error_code, address);
		return;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
L
Linus Torvalds 已提交
1377
good_area:
M
Michel Lespinasse 已提交
1378
	if (unlikely(access_error(error_code, vma))) {
1379
		bad_area_access_error(regs, error_code, address, vma);
1380
		return;
L
Linus Torvalds 已提交
1381 1382 1383 1384 1385
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
1386 1387
	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1388 1389 1390 1391 1392 1393 1394 1395 1396
	 *
	 * Note that handle_userfault() may also release and reacquire mmap_sem
	 * (and not return with VM_FAULT_RETRY), when returning to userland to
	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
	 * (potentially after handling any pending signal during the return to
	 * userland). The return to userland is identified whenever
	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
	 * Thus we have to be careful about not touching vma after handling the
	 * fault, so we read the pkey beforehand.
L
Linus Torvalds 已提交
1397
	 */
1398
	pkey = vma_pkey(vma);
1399
	fault = handle_mm_fault(vma, address, flags);
1400
	major |= fault & VM_FAULT_MAJOR;
I
Ingo Molnar 已提交
1401

1402
	/*
1403 1404 1405
	 * If we need to retry the mmap_sem has already been released,
	 * and if there is a fatal signal pending there is no guarantee
	 * that we made any progress. Handle this case first.
1406
	 */
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
	if (unlikely(fault & VM_FAULT_RETRY)) {
		/* Retry at most once */
		if (flags & FAULT_FLAG_ALLOW_RETRY) {
			flags &= ~FAULT_FLAG_ALLOW_RETRY;
			flags |= FAULT_FLAG_TRIED;
			if (!fatal_signal_pending(tsk))
				goto retry;
		}

		/* User mode? Just return to handle the fatal exception */
1417
		if (flags & FAULT_FLAG_USER)
1418 1419 1420 1421
			return;

		/* Not returning to user mode? Handle exceptions or die: */
		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
1422
		return;
1423
	}
1424

1425
	up_read(&mm->mmap_sem);
1426
	if (unlikely(fault & VM_FAULT_ERROR)) {
1427
		mm_fault_error(regs, error_code, address, &pkey, fault);
1428
		return;
1429 1430
	}

1431
	/*
1432 1433
	 * Major/minor page fault accounting. If any of the events
	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1434
	 */
1435 1436 1437 1438 1439 1440
	if (major) {
		tsk->maj_flt++;
		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
	} else {
		tsk->min_flt++;
		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1441
	}
1442

1443
	check_v8086_mode(regs, address, tsk);
L
Linus Torvalds 已提交
1444
}
1445
NOKPROBE_SYMBOL(__do_page_fault);
1446

1447 1448 1449
static nokprobe_inline void
trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
			 unsigned long error_code)
1450 1451
{
	if (user_mode(regs))
1452
		trace_page_fault_user(address, regs, error_code);
1453
	else
1454
		trace_page_fault_kernel(address, regs, error_code);
1455 1456
}

1457 1458 1459 1460 1461 1462 1463
/*
 * We must have this function blacklisted from kprobes, tagged with notrace
 * and call read_cr2() before calling anything else. To avoid calling any
 * kind of tracing machinery before we've observed the CR2 value.
 *
 * exception_{enter,exit}() contains all sorts of tracepoints.
 */
1464
dotraplinkage void notrace
1465
do_page_fault(struct pt_regs *regs, unsigned long error_code)
1466
{
1467
	unsigned long address = read_cr2(); /* Get the faulting address */
1468
	enum ctx_state prev_state;
1469 1470

	prev_state = exception_enter();
1471
	if (trace_pagefault_enabled())
1472 1473
		trace_page_fault_entries(address, regs, error_code);

1474
	__do_page_fault(regs, error_code, address);
1475 1476
	exception_exit(prev_state);
}
1477
NOKPROBE_SYMBOL(do_page_fault);