fault.c 41.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  Copyright (C) 1995  Linus Torvalds
I
Ingo Molnar 已提交
4
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
L
Linus Torvalds 已提交
6
 */
7
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
8
#include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
10
#include <linux/extable.h>		/* search_exception_tables	*/
M
Mike Rapoport 已提交
11
#include <linux/memblock.h>		/* max_low_pfn			*/
12
#include <linux/kfence.h>		/* kfence_handle_page_fault	*/
13
#include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
14
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
15
#include <linux/perf_event.h>		/* perf_sw_event		*/
16
#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
17
#include <linux/prefetch.h>		/* prefetchw			*/
18
#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
19
#include <linux/uaccess.h>		/* faulthandler_disabled()	*/
20
#include <linux/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
21
#include <linux/mm_types.h>
I
Ingo Molnar 已提交
22

23
#include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
24
#include <asm/traps.h>			/* dotraplinkage, ...		*/
25 26
#include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
#include <asm/vsyscall.h>		/* emulate_vsyscall		*/
B
Brian Gerst 已提交
27
#include <asm/vm86.h>			/* struct vm86			*/
28
#include <asm/mmu_context.h>		/* vma_pkey()			*/
29
#include <asm/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
30
#include <asm/desc.h>			/* store_idt(), ...		*/
31
#include <asm/cpu_entry_area.h>		/* exception stack		*/
32
#include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
33
#include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
34
#include <asm/vdso.h>			/* fixup_vdso_exception()	*/
L
Linus Torvalds 已提交
35

36 37 38
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>

39
/*
40 41
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
42
 */
43
static nokprobe_inline int
44
kmmio_fault(struct pt_regs *regs, unsigned long addr)
45
{
46 47 48 49
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
50 51
}

52
/*
I
Ingo Molnar 已提交
53 54 55 56 57
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
58
 *   Check that here and ignore it.  This is AMD erratum #91.
59
 *
I
Ingo Molnar 已提交
60
 * 64-bit mode:
61
 *
I
Ingo Molnar 已提交
62 63 64 65
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
66
 */
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
87
		 * In 64-bit mode 0x40..0x4F are valid REX prefixes
88
		 */
89
		return (!user_mode(regs) || user_64bit_mode(regs));
90 91 92 93 94 95 96 97 98
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
99
		if (get_kernel_nofault(opcode, instr))
100 101 102 103 104 105 106 107 108 109
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

110 111 112 113 114 115 116 117 118
static bool is_amd_k8_pre_npt(void)
{
	struct cpuinfo_x86 *c = &boot_cpu_data;

	return unlikely(IS_ENABLED(CONFIG_CPU_SUP_AMD) &&
			c->x86_vendor == X86_VENDOR_AMD &&
			c->x86 == 0xf && c->x86_model < 0x40);
}

I
Ingo Molnar 已提交
119 120
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
121
{
I
Ingo Molnar 已提交
122
	unsigned char *max_instr;
123
	unsigned char *instr;
124
	int prefetch = 0;
L
Linus Torvalds 已提交
125

126 127 128 129
	/* Erratum #91 affects AMD K8, pre-NPT CPUs */
	if (!is_amd_k8_pre_npt())
		return 0;

I
Ingo Molnar 已提交
130 131 132 133
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
134
	if (error_code & X86_PF_INSTR)
L
Linus Torvalds 已提交
135
		return 0;
136

137
	instr = (void *)convert_ip_to_linear(current, regs);
138
	max_instr = instr + 15;
L
Linus Torvalds 已提交
139

140 141 142 143 144 145
	/*
	 * This code has historically always bailed out if IP points to a
	 * not-present page (e.g. due to a race).  No one has ever
	 * complained about this.
	 */
	pagefault_disable();
L
Linus Torvalds 已提交
146

147
	while (instr < max_instr) {
I
Ingo Molnar 已提交
148
		unsigned char opcode;
L
Linus Torvalds 已提交
149

150 151 152 153 154 155 156
		if (user_mode(regs)) {
			if (get_user(opcode, instr))
				break;
		} else {
			if (get_kernel_nofault(opcode, instr))
				break;
		}
L
Linus Torvalds 已提交
157 158 159

		instr++;

160
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
L
Linus Torvalds 已提交
161 162
			break;
	}
163 164

	pagefault_enable();
L
Linus Torvalds 已提交
165 166 167
	return prefetch;
}

168 169 170 171 172
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
173
{
174 175
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
176
	p4d_t *p4d, *p4d_k;
177 178
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
I
Ingo Molnar 已提交
179

180 181 182 183 184 185 186 187 188
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
189
	 * set_p4d/set_pud.
190
	 */
191 192 193 194 195 196 197
	p4d = p4d_offset(pgd, address);
	p4d_k = p4d_offset(pgd_k, address);
	if (!p4d_present(*p4d_k))
		return NULL;

	pud = pud_offset(p4d, address);
	pud_k = pud_offset(p4d_k, address);
198 199 200 201 202 203
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);

204
	if (pmd_present(*pmd) != pmd_present(*pmd_k))
205
		set_pmd(pmd, *pmd_k);
206 207 208

	if (!pmd_present(*pmd_k))
		return NULL;
209
	else
210
		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
211 212 213 214

	return pmd_k;
}

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
/*
 *   Handle a fault on the vmalloc or module mapping area
 *
 *   This is needed because there is a race condition between the time
 *   when the vmalloc mapping code updates the PMD to the point in time
 *   where it synchronizes this update with the other page-tables in the
 *   system.
 *
 *   In this race window another thread/CPU can map an area on the same
 *   PMD, finds it already present and does not synchronize it with the
 *   rest of the system yet. As a result v[mz]alloc might return areas
 *   which are not mapped in every page-table in the system, causing an
 *   unhandled page-fault when they are accessed.
 */
static noinline int vmalloc_fault(unsigned long address)
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3_pa();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	if (pmd_large(*pmd_k))
		return 0;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}
NOKPROBE_SYMBOL(vmalloc_fault);

262
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
263
{
264
	unsigned long addr;
265

266 267 268
	for (addr = start & PMD_MASK;
	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
	     addr += PMD_SIZE) {
269 270
		struct page *page;

A
Andrea Arcangeli 已提交
271
		spin_lock(&pgd_lock);
272
		list_for_each_entry(page, &pgd_list, lru) {
273 274
			spinlock_t *pgt_lock;

A
Andrea Arcangeli 已提交
275
			/* the pgt_lock only for Xen */
276 277 278
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;

			spin_lock(pgt_lock);
279
			vmalloc_sync_one(page_address(page), addr);
280
			spin_unlock(pgt_lock);
281
		}
A
Andrea Arcangeli 已提交
282
		spin_unlock(&pgd_lock);
283 284 285
	}
}

A
Akinobu Mita 已提交
286
static bool low_pfn(unsigned long pfn)
L
Linus Torvalds 已提交
287
{
A
Akinobu Mita 已提交
288 289
	return pfn < max_low_pfn;
}
290

A
Akinobu Mita 已提交
291 292
static void dump_pagetable(unsigned long address)
{
293
	pgd_t *base = __va(read_cr3_pa());
A
Akinobu Mita 已提交
294
	pgd_t *pgd = &base[pgd_index(address)];
295 296
	p4d_t *p4d;
	pud_t *pud;
A
Akinobu Mita 已提交
297 298
	pmd_t *pmd;
	pte_t *pte;
I
Ingo Molnar 已提交
299

300
#ifdef CONFIG_X86_PAE
301
	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
A
Akinobu Mita 已提交
302 303
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
304 305 306
#define pr_pde pr_cont
#else
#define pr_pde pr_info
307
#endif
308 309 310
	p4d = p4d_offset(pgd, address);
	pud = pud_offset(p4d, address);
	pmd = pmd_offset(pud, address);
311 312
	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
#undef pr_pde
313 314 315 316 317

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
I
Ingo Molnar 已提交
318
	 * it's allocated already:
319
	 */
A
Akinobu Mita 已提交
320 321
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
322

A
Akinobu Mita 已提交
323
	pte = pte_offset_kernel(pmd, address);
324
	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
A
Akinobu Mita 已提交
325
out:
326
	pr_cont("\n");
327 328 329 330
}

#else /* CONFIG_X86_64: */

331
#ifdef CONFIG_CPU_SUP_AMD
332
static const char errata93_warning[] =
333 334 335 336 337
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
338
#endif
339 340 341 342 343

static int bad_address(void *p)
{
	unsigned long dummy;

344
	return get_kernel_nofault(dummy, (unsigned long *)p);
345 346 347 348
}

static void dump_pagetable(unsigned long address)
{
349
	pgd_t *base = __va(read_cr3_pa());
A
Akinobu Mita 已提交
350
	pgd_t *pgd = base + pgd_index(address);
351
	p4d_t *p4d;
L
Linus Torvalds 已提交
352 353 354 355
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

I
Ingo Molnar 已提交
356 357 358
	if (bad_address(pgd))
		goto bad;

359
	pr_info("PGD %lx ", pgd_val(*pgd));
I
Ingo Molnar 已提交
360 361 362

	if (!pgd_present(*pgd))
		goto out;
L
Linus Torvalds 已提交
363

364 365 366 367
	p4d = p4d_offset(pgd, address);
	if (bad_address(p4d))
		goto bad;

368
	pr_cont("P4D %lx ", p4d_val(*p4d));
369 370 371 372
	if (!p4d_present(*p4d) || p4d_large(*p4d))
		goto out;

	pud = pud_offset(p4d, address);
I
Ingo Molnar 已提交
373 374 375
	if (bad_address(pud))
		goto bad;

376
	pr_cont("PUD %lx ", pud_val(*pud));
377
	if (!pud_present(*pud) || pud_large(*pud))
I
Ingo Molnar 已提交
378
		goto out;
L
Linus Torvalds 已提交
379 380

	pmd = pmd_offset(pud, address);
I
Ingo Molnar 已提交
381 382 383
	if (bad_address(pmd))
		goto bad;

384
	pr_cont("PMD %lx ", pmd_val(*pmd));
I
Ingo Molnar 已提交
385 386
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
L
Linus Torvalds 已提交
387 388

	pte = pte_offset_kernel(pmd, address);
I
Ingo Molnar 已提交
389 390 391
	if (bad_address(pte))
		goto bad;

392
	pr_cont("PTE %lx", pte_val(*pte));
I
Ingo Molnar 已提交
393
out:
394
	pr_cont("\n");
L
Linus Torvalds 已提交
395 396
	return;
bad:
397
	pr_info("BAD\n");
398 399
}

400
#endif /* CONFIG_X86_64 */
L
Linus Torvalds 已提交
401

I
Ingo Molnar 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
415
 */
416
static int is_errata93(struct pt_regs *regs, unsigned long address)
L
Linus Torvalds 已提交
417
{
418 419 420 421 422
#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
	    || boot_cpu_data.x86 != 0xf)
		return 0;

423 424 425
	if (user_mode(regs))
		return 0;

426
	if (address != regs->ip)
L
Linus Torvalds 已提交
427
		return 0;
I
Ingo Molnar 已提交
428

429
	if ((address >> 32) != 0)
L
Linus Torvalds 已提交
430
		return 0;
I
Ingo Molnar 已提交
431

L
Linus Torvalds 已提交
432
	address |= 0xffffffffUL << 32;
433 434
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
435
		printk_once(errata93_warning);
436
		regs->ip = address;
L
Linus Torvalds 已提交
437 438
		return 1;
	}
439
#endif
L
Linus Torvalds 已提交
440
	return 0;
441
}
L
Linus Torvalds 已提交
442

443
/*
I
Ingo Molnar 已提交
444 445 446 447 448
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
449 450 451 452 453
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
I
Ingo Molnar 已提交
454
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
455 456 457 458 459
		return 1;
#endif
	return 0;
}

460
/* Pentium F0 0F C7 C8 bug workaround: */
461 462
static int is_f00f_bug(struct pt_regs *regs, unsigned long error_code,
		       unsigned long address)
463 464
{
#ifdef CONFIG_X86_F00F_BUG
465 466
	if (boot_cpu_has_bug(X86_BUG_F00F) && !(error_code & X86_PF_USER) &&
	    idt_is_f00f_address(address)) {
467 468
		handle_invalid_op(regs);
		return 1;
469 470 471 472 473
	}
#endif
	return 0;
}

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
{
	u32 offset = (index >> 3) * sizeof(struct desc_struct);
	unsigned long addr;
	struct ldttss_desc desc;

	if (index == 0) {
		pr_alert("%s: NULL\n", name);
		return;
	}

	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
		return;
	}

490
	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
491 492 493 494 495 496
			      sizeof(struct ldttss_desc))) {
		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
			 name, index);
		return;
	}

497
	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
498 499 500 501 502 503 504
#ifdef CONFIG_X86_64
	addr |= ((u64)desc.base3 << 32);
#endif
	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
}

I
Ingo Molnar 已提交
505
static void
506
show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
507
{
508 509 510
	if (!oops_may_print())
		return;

511
	if (error_code & X86_PF_INSTR) {
512
		unsigned int level;
513 514
		pgd_t *pgd;
		pte_t *pte;
I
Ingo Molnar 已提交
515

516
		pgd = __va(read_cr3_pa());
517 518 519
		pgd += pgd_index(address);

		pte = lookup_address_in_pgd(pgd, address, &level);
520

521
		if (pte && pte_present(*pte) && !pte_exec(*pte))
522 523
			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
				from_kuid(&init_user_ns, current_uid()));
524 525
		if (pte && pte_present(*pte) && pte_exec(*pte) &&
				(pgd_flags(*pgd) & _PAGE_USER) &&
526
				(__read_cr4() & X86_CR4_SMEP))
527 528
			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
				from_kuid(&init_user_ns, current_uid()));
529 530
	}

531
	if (address < PAGE_SIZE && !user_mode(regs))
532
		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
533 534
			(void *)address);
	else
535
		pr_alert("BUG: unable to handle page fault for address: %px\n",
536
			(void *)address);
I
Ingo Molnar 已提交
537

538
	pr_alert("#PF: %s %s in %s mode\n",
539 540 541 542 543 544 545 546 547 548
		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
		 (error_code & X86_PF_WRITE) ? "write access" :
					       "read access",
			     user_mode(regs) ? "user" : "kernel");
	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
		 !(error_code & X86_PF_PROT) ? "not-present page" :
		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
		 (error_code & X86_PF_PK)    ? "protection keys violation" :
					       "permissions violation");
549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
		struct desc_ptr idt, gdt;
		u16 ldtr, tr;

		/*
		 * This can happen for quite a few reasons.  The more obvious
		 * ones are faults accessing the GDT, or LDT.  Perhaps
		 * surprisingly, if the CPU tries to deliver a benign or
		 * contributory exception from user code and gets a page fault
		 * during delivery, the page fault can be delivered as though
		 * it originated directly from user code.  This could happen
		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
		 * kernel or IST stack.
		 */
		store_idt(&idt);

		/* Usable even on Xen PV -- it's just slow. */
		native_store_gdt(&gdt);

		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
			 idt.address, idt.size, gdt.address, gdt.size);

		store_ldt(ldtr);
		show_ldttss(&gdt, "LDTR", ldtr);

		store_tr(tr);
		show_ldttss(&gdt, "TR", tr);
	}

579 580 581
	dump_pagetable(address);
}

I
Ingo Molnar 已提交
582 583 584
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
L
Linus Torvalds 已提交
585
{
I
Ingo Molnar 已提交
586 587 588 589 590 591 592
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
593

L
Linus Torvalds 已提交
594
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
595
	       tsk->comm, address);
L
Linus Torvalds 已提交
596
	dump_pagetable(address);
I
Ingo Molnar 已提交
597

598
	if (__die("Bad pagetable", regs, error_code))
599
		sig = 0;
I
Ingo Molnar 已提交
600

601
	oops_end(flags, regs, sig);
L
Linus Torvalds 已提交
602 603
}

604 605
static void sanitize_error_code(unsigned long address,
				unsigned long *error_code)
606 607 608 609 610
{
	/*
	 * To avoid leaking information about the kernel page
	 * table layout, pretend that user-mode accesses to
	 * kernel addresses are always protection faults.
611 612 613 614
	 *
	 * NB: This means that failed vsyscalls with vsyscall=none
	 * will have the PROT bit.  This doesn't leak any
	 * information and does not appear to cause any problems.
615 616
	 */
	if (address >= TASK_SIZE_MAX)
617 618 619 620 621 622 623
		*error_code |= X86_PF_PROT;
}

static void set_signal_archinfo(unsigned long address,
				unsigned long error_code)
{
	struct task_struct *tsk = current;
624 625 626 627 628 629

	tsk->thread.trap_nr = X86_TRAP_PF;
	tsk->thread.error_code = error_code | X86_PF_USER;
	tsk->thread.cr2 = address;
}

I
Ingo Molnar 已提交
630
static noinline void
631 632
page_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
633 634 635 636
{
	unsigned long flags;
	int sig;

637 638
	if (user_mode(regs)) {
		/*
639 640
		 * Implicit kernel access from user mode?  Skip the stack
		 * overflow and EFI special cases.
641 642 643 644
		 */
		goto oops;
	}

645 646 647 648 649 650 651
#ifdef CONFIG_VMAP_STACK
	/*
	 * Stack overflow?  During boot, we can fault near the initial
	 * stack in the direct map, but that's not an overflow -- check
	 * that we're in vmalloc space to avoid this.
	 */
	if (is_vmalloc_addr((void *)address) &&
652 653
	    (((unsigned long)current->stack - 1 - address < PAGE_SIZE) ||
	     address - ((unsigned long)current->stack + THREAD_SIZE) < PAGE_SIZE)) {
654
		unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
655 656 657 658 659 660 661 662 663 664 665 666 667
		/*
		 * We're likely to be running with very little stack space
		 * left.  It's plausible that we'd hit this condition but
		 * double-fault even before we get this far, in which case
		 * we're fine: the double-fault handler will deal with it.
		 *
		 * We don't want to make it all the way into the oops code
		 * and then double-fault, though, because we're likely to
		 * break the console driver and lose most of the stack dump.
		 */
		asm volatile ("movq %[stack], %%rsp\n\t"
			      "call handle_stack_overflow\n\t"
			      "1: jmp 1b"
668
			      : ASM_CALL_CONSTRAINT
669 670 671 672 673 674 675
			      : "D" ("kernel stack overflow (page fault)"),
				"S" (regs), "d" (address),
				[stack] "rm" (stack));
		unreachable();
	}
#endif

676
	/*
677 678 679
	 * Buggy firmware could access regions which might page fault.  If
	 * this happens, EFI has a special OOPS path that will try to
	 * avoid hanging the system.
680 681
	 */
	if (IS_ENABLED(CONFIG_EFI))
682
		efi_crash_gracefully_on_page_fault(address);
683

684
	/* Only not-present faults should be handled by KFENCE. */
M
Marco Elver 已提交
685 686
	if (!(error_code & X86_PF_PROT) &&
	    kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs))
687 688
		return;

689
oops:
690 691
	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
I
Ingo Molnar 已提交
692
	 * terminate things with extreme prejudice:
693 694 695 696 697
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

698
	if (task_stack_end_corrupted(current))
699
		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
700

701 702 703
	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
I
Ingo Molnar 已提交
704

705
	/* Executive summary in case the body of the oops scrolled away */
706
	printk(KERN_DEFAULT "CR2: %016lx\n", address);
I
Ingo Molnar 已提交
707

708 709 710
	oops_end(flags, regs, sig);
}

711
static noinline void
712 713
kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
			 unsigned long address, int signal, int si_code)
714
{
715
	WARN_ON_ONCE(user_mode(regs));
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757

	/* Are we prepared to handle this kernel fault? */
	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
		/*
		 * Any interrupt that takes a fault gets the fixup. This makes
		 * the below recursive fault logic only apply to a faults from
		 * task context.
		 */
		if (in_interrupt())
			return;

		/*
		 * Per the above we're !in_interrupt(), aka. task context.
		 *
		 * In this case we need to make sure we're not recursively
		 * faulting through the emulate_vsyscall() logic.
		 */
		if (current->thread.sig_on_uaccess_err && signal) {
			sanitize_error_code(address, &error_code);

			set_signal_archinfo(address, error_code);

			/* XXX: hwpoison faults will set the wrong code. */
			force_sig_fault(signal, si_code, (void __user *)address);
		}

		/*
		 * Barring that, we can do the fixup and be happy.
		 */
		return;
	}

	/*
	 * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
	 * instruction.
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	page_fault_oops(regs, error_code, address);
}

I
Ingo Molnar 已提交
758 759 760 761 762 763 764 765
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
766 767
	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;

I
Ingo Molnar 已提交
768 769 770 771 772 773
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

774
	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
775
		loglvl, tsk->comm, task_pid_nr(tsk), address,
I
Ingo Molnar 已提交
776 777 778 779 780
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
781

782
	show_opcodes(regs, loglvl);
I
Ingo Molnar 已提交
783 784
}

D
Dave Hansen 已提交
785 786 787 788 789 790
/*
 * The (legacy) vsyscall page is the long page in the kernel portion
 * of the address space that has user-accessible permissions.
 */
static bool is_vsyscall_vaddr(unsigned long vaddr)
{
791
	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
D
Dave Hansen 已提交
792 793
}

I
Ingo Molnar 已提交
794 795
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
796
		       unsigned long address, u32 pkey, int si_code)
797 798 799
{
	struct task_struct *tsk = current;

800
	if (!user_mode(regs)) {
801
		kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
802 803
		return;
	}
804

805 806 807 808 809
	if (!(error_code & X86_PF_USER)) {
		/* Implicit user access to kernel memory -- just oops */
		page_fault_oops(regs, error_code, address);
		return;
	}
810

811 812 813 814 815
	/*
	 * User mode accesses just cause a SIGSEGV.
	 * It's possible to have interrupts off here:
	 */
	local_irq_enable();
816

817 818 819 820 821 822
	/*
	 * Valid to do another page fault here because this one came
	 * from user space:
	 */
	if (is_prefetch(regs, error_code, address))
		return;
823

824 825
	if (is_errata100(regs, address))
		return;
826

827
	sanitize_error_code(address, &error_code);
I
Ingo Molnar 已提交
828

829 830
	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
		return;
831

832 833
	if (likely(show_unhandled_signals))
		show_signal_msg(regs, error_code, address, tsk);
834

835
	set_signal_archinfo(address, error_code);
I
Ingo Molnar 已提交
836

837 838
	if (si_code == SEGV_PKUERR)
		force_sig_pkuerr((void __user *)address, pkey);
839

840
	force_sig_fault(SIGSEGV, si_code, (void __user *)address);
841

842
	local_irq_disable();
843 844
}

I
Ingo Molnar 已提交
845 846
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
847
		     unsigned long address)
848
{
849
	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
850 851
}

I
Ingo Molnar 已提交
852 853
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
854
	   unsigned long address, u32 pkey, int si_code)
855 856 857 858 859 860
{
	struct mm_struct *mm = current->mm;
	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
861
	mmap_read_unlock(mm);
862

863
	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
864 865
}

I
Ingo Molnar 已提交
866 867
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
868
{
869
	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
870 871
}

872 873 874
static inline bool bad_area_access_from_pkeys(unsigned long error_code,
		struct vm_area_struct *vma)
{
875 876 877
	/* This code is always called on the current mm */
	bool foreign = false;

878 879
	if (!boot_cpu_has(X86_FEATURE_OSPKE))
		return false;
880
	if (error_code & X86_PF_PK)
881
		return true;
882
	/* this checks permission keys on the VMA: */
883 884
	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
				       (error_code & X86_PF_INSTR), foreign))
885
		return true;
886
	return false;
887 888
}

I
Ingo Molnar 已提交
889 890
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
891
		      unsigned long address, struct vm_area_struct *vma)
892
{
893 894 895 896 897
	/*
	 * This OSPKE check is not strictly necessary at runtime.
	 * But, doing it this way allows compiler optimizations
	 * if pkeys are compiled out.
	 */
898
	if (bad_area_access_from_pkeys(error_code, vma)) {
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
		/*
		 * A protection key fault means that the PKRU value did not allow
		 * access to some PTE.  Userspace can figure out what PKRU was
		 * from the XSAVE state.  This function captures the pkey from
		 * the vma and passes it to userspace so userspace can discover
		 * which protection key was set on the PTE.
		 *
		 * If we get here, we know that the hardware signaled a X86_PF_PK
		 * fault and that there was a VMA once we got in the fault
		 * handler.  It does *not* guarantee that the VMA we find here
		 * was the one that we faulted on.
		 *
		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
		 * 3. T1   : faults...
		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
915
		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
916 917 918
		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
		 *	     faulted on a pte with its pkey=4.
		 */
919
		u32 pkey = vma_pkey(vma);
920

921
		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
922
	} else {
923
		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
924
	}
925 926
}

I
Ingo Molnar 已提交
927
static void
928
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
929
	  vm_fault_t fault)
930
{
I
Ingo Molnar 已提交
931
	/* Kernel mode? Handle exceptions or die: */
932
	if (!user_mode(regs)) {
933
		kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
934 935
		return;
	}
I
Ingo Molnar 已提交
936

937
	/* User-space => ok to do another page fault: */
938 939
	if (is_prefetch(regs, error_code, address))
		return;
I
Ingo Molnar 已提交
940

941 942
	sanitize_error_code(address, &error_code);

943 944 945
	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
		return;

946
	set_signal_archinfo(address, error_code);
I
Ingo Molnar 已提交
947

948
#ifdef CONFIG_MEMORY_FAILURE
949
	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
950
		struct task_struct *tsk = current;
951 952 953
		unsigned lsb = 0;

		pr_err(
954 955
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
956 957 958 959
		if (fault & VM_FAULT_HWPOISON_LARGE)
			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
		if (fault & VM_FAULT_HWPOISON)
			lsb = PAGE_SHIFT;
960
		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
961
		return;
962 963
	}
#endif
964
	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
965 966
}

967
static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
968
{
969
	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
970
		return 0;
I
Ingo Molnar 已提交
971

972
	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
973 974 975 976 977
		return 0;

	return 1;
}

978
/*
I
Ingo Molnar 已提交
979 980 981 982 983 984 985 986
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
987 988 989 990
 * Spurious faults may only occur if the TLB contains an entry with
 * fewer permission than the page table entry.  Non-present (P = 0)
 * and reserved bit (R = 1) faults are never spurious.
 *
991 992
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
993 994 995 996 997
 *
 * Returns non-zero if a spurious fault was handled, zero otherwise.
 *
 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
 * (Optional Invalidation).
998
 */
999
static noinline int
1000
spurious_kernel_fault(unsigned long error_code, unsigned long address)
1001 1002
{
	pgd_t *pgd;
1003
	p4d_t *p4d;
1004 1005 1006
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
1007
	int ret;
1008

1009 1010 1011 1012 1013 1014 1015 1016 1017
	/*
	 * Only writes to RO or instruction fetches from NX may cause
	 * spurious faults.
	 *
	 * These could be from user or supervisor accesses but the TLB
	 * is only lazily flushed after a kernel mapping protection
	 * change, so user accesses are not expected to cause spurious
	 * faults.
	 */
1018 1019
	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
	    error_code != (X86_PF_INSTR | X86_PF_PROT))
1020 1021 1022 1023 1024 1025
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

1026 1027 1028 1029 1030
	p4d = p4d_offset(pgd, address);
	if (!p4d_present(*p4d))
		return 0;

	if (p4d_large(*p4d))
1031
		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1032 1033

	pud = pud_offset(p4d, address);
1034 1035 1036
	if (!pud_present(*pud))
		return 0;

1037
	if (pud_large(*pud))
1038
		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1039

1040 1041 1042 1043
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

1044
	if (pmd_large(*pmd))
1045
		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1046

1047
	pte = pte_offset_kernel(pmd, address);
1048
	if (!pte_present(*pte))
1049 1050
		return 0;

1051
	ret = spurious_kernel_fault_check(error_code, pte);
1052 1053 1054 1055
	if (!ret)
		return 0;

	/*
I
Ingo Molnar 已提交
1056 1057
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
1058
	 */
1059
	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1060
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
I
Ingo Molnar 已提交
1061

1062
	return ret;
1063
}
1064
NOKPROBE_SYMBOL(spurious_kernel_fault);
1065

1066
int show_unhandled_signals = 1;
L
Linus Torvalds 已提交
1067

I
Ingo Molnar 已提交
1068
static inline int
M
Michel Lespinasse 已提交
1069
access_error(unsigned long error_code, struct vm_area_struct *vma)
1070
{
1071 1072
	/* This is only called for the current mm, so: */
	bool foreign = false;
1073 1074 1075 1076 1077 1078

	/*
	 * Read or write was blocked by protection keys.  This is
	 * always an unconditional error and can never result in
	 * a follow-up action to resolve the fault, like a COW.
	 */
1079
	if (error_code & X86_PF_PK)
1080 1081
		return 1;

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	/*
	 * SGX hardware blocked the access.  This usually happens
	 * when the enclave memory contents have been destroyed, like
	 * after a suspend/resume cycle. In any case, the kernel can't
	 * fix the cause of the fault.  Handle the fault as an access
	 * error even in cases where no actual access violation
	 * occurred.  This allows userspace to rebuild the enclave in
	 * response to the signal.
	 */
	if (unlikely(error_code & X86_PF_SGX))
		return 1;

1094 1095
	/*
	 * Make sure to check the VMA so that we do not perform
1096
	 * faults just to hit a X86_PF_PK as soon as we fill in a
1097 1098
	 * page.
	 */
1099 1100
	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
				       (error_code & X86_PF_INSTR), foreign))
1101
		return 1;
1102

1103
	if (error_code & X86_PF_WRITE) {
I
Ingo Molnar 已提交
1104
		/* write, present and write, not present: */
1105 1106
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
I
Ingo Molnar 已提交
1107
		return 0;
1108 1109
	}

I
Ingo Molnar 已提交
1110
	/* read, present: */
1111
	if (unlikely(error_code & X86_PF_PROT))
I
Ingo Molnar 已提交
1112 1113 1114
		return 1;

	/* read, not present: */
1115
	if (unlikely(!vma_is_accessible(vma)))
I
Ingo Molnar 已提交
1116 1117
		return 1;

1118 1119 1120
	return 0;
}

1121
bool fault_in_kernel_space(unsigned long address)
1122
{
1123 1124 1125 1126 1127 1128 1129 1130
	/*
	 * On 64-bit systems, the vsyscall page is at an address above
	 * TASK_SIZE_MAX, but is not considered part of the kernel
	 * address space.
	 */
	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
		return false;

1131
	return address >= TASK_SIZE_MAX;
1132 1133
}

L
Linus Torvalds 已提交
1134
/*
1135 1136 1137
 * Called for all faults where 'address' is part of the kernel address
 * space.  Might get called for faults that originate from *code* that
 * ran in userspace or the kernel.
L
Linus Torvalds 已提交
1138
 */
1139 1140 1141
static void
do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
		   unsigned long address)
L
Linus Torvalds 已提交
1142
{
1143 1144 1145 1146 1147 1148
	/*
	 * Protection keys exceptions only happen on user pages.  We
	 * have no user pages in the kernel portion of the address
	 * space, so do not expect them here.
	 */
	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
L
Linus Torvalds 已提交
1149

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
#ifdef CONFIG_X86_32
	/*
	 * We can fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * Before doing this on-demand faulting, ensure that the
	 * fault is not any of the following:
	 * 1. A fault on a PTE with a reserved bit set.
	 * 2. A fault caused by a user-mode access.  (Do not demand-
	 *    fault kernel memory due to user-mode accesses).
	 * 3. A fault caused by a page-level protection violation.
	 *    (A demand fault would be on a non-present page which
	 *     would have X86_PF_PROT==0).
	 *
	 * This is only needed to close a race condition on x86-32 in
	 * the vmalloc mapping/unmapping code. See the comment above
	 * vmalloc_fault() for details. On x86-64 the race does not
	 * exist as the vmalloc mappings don't need to be synchronized
	 * there.
	 */
	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
		if (vmalloc_fault(address) >= 0)
			return;
	}
#endif

1181 1182 1183
	if (is_f00f_bug(regs, hw_error_code, address))
		return;

1184 1185 1186
	/* Was the fault spurious, caused by lazy TLB invalidation? */
	if (spurious_kernel_fault(hw_error_code, address))
		return;
I
Ingo Molnar 已提交
1187

1188
	/* kprobes don't want to hook the spurious faults: */
1189
	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
1190
		return;
1191 1192 1193 1194 1195 1196 1197 1198 1199

	/*
	 * Note, despite being a "bad area", there are quite a few
	 * acceptable reasons to get here, such as erratum fixups
	 * and handling kernel code that can fault, like get_user().
	 *
	 * Don't take the mm semaphore here. If we fixup a prefetch
	 * fault we could otherwise deadlock:
	 */
1200
	bad_area_nosemaphore(regs, hw_error_code, address);
1201 1202 1203
}
NOKPROBE_SYMBOL(do_kern_addr_fault);

1204 1205 1206 1207 1208 1209 1210 1211
/*
 * Handle faults in the user portion of the address space.  Nothing in here
 * should check X86_PF_USER without a specific justification: for almost
 * all purposes, we should treat a normal kernel access to user memory
 * (e.g. get_user(), put_user(), etc.) the same as the WRUSS instruction.
 * The one exception is AC flag handling, which is, per the x86
 * architecture, special for WRUSS.
 */
1212 1213
static inline
void do_user_addr_fault(struct pt_regs *regs,
1214
			unsigned long error_code,
1215
			unsigned long address)
L
Linus Torvalds 已提交
1216
{
I
Ingo Molnar 已提交
1217
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1218 1219
	struct task_struct *tsk;
	struct mm_struct *mm;
1220
	vm_fault_t fault;
P
Peter Xu 已提交
1221
	unsigned int flags = FAULT_FLAG_DEFAULT;
L
Linus Torvalds 已提交
1222

1223 1224
	tsk = current;
	mm = tsk->mm;
1225

1226 1227 1228 1229 1230 1231
	if (unlikely((error_code & (X86_PF_USER | X86_PF_INSTR)) == X86_PF_INSTR)) {
		/*
		 * Whoops, this is kernel mode code trying to execute from
		 * user memory.  Unless this is AMD erratum #93, which
		 * corrupts RIP such that it looks like a user address,
		 * this is unrecoverable.  Don't even try to look up the
1232
		 * VMA or look for extable entries.
1233 1234 1235 1236
		 */
		if (is_errata93(regs, address))
			return;

1237
		page_fault_oops(regs, error_code, address);
1238 1239 1240
		return;
	}

I
Ingo Molnar 已提交
1241
	/* kprobes don't want to hook the spurious faults: */
1242
	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
1243
		return;
1244

1245 1246 1247 1248
	/*
	 * Reserved bits are never expected to be set on
	 * entries in the user portion of the page tables.
	 */
1249 1250
	if (unlikely(error_code & X86_PF_RSVD))
		pgtable_bad(regs, error_code, address);
L
Linus Torvalds 已提交
1251

1252
	/*
1253 1254 1255 1256 1257
	 * If SMAP is on, check for invalid kernel (supervisor) access to user
	 * pages in the user address space.  The odd case here is WRUSS,
	 * which, according to the preliminary documentation, does not respect
	 * SMAP and will have the USER bit set so, in all cases, SMAP
	 * enforcement appears to be consistent with the USER bit.
1258
	 */
1259
	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1260
		     !(error_code & X86_PF_USER) &&
1261 1262 1263 1264 1265 1266
		     !(regs->flags & X86_EFLAGS_AC))) {
		/*
		 * No extable entry here.  This was a kernel access to an
		 * invalid pointer.  get_kernel_nofault() will not get here.
		 */
		page_fault_oops(regs, error_code, address);
1267
		return;
1268 1269
	}

L
Linus Torvalds 已提交
1270
	/*
I
Ingo Molnar 已提交
1271
	 * If we're in an interrupt, have no user context or are running
1272
	 * in a region with pagefaults disabled then we must not take the fault
L
Linus Torvalds 已提交
1273
	 */
1274
	if (unlikely(faulthandler_disabled() || !mm)) {
1275
		bad_area_nosemaphore(regs, error_code, address);
1276 1277
		return;
	}
L
Linus Torvalds 已提交
1278

1279 1280 1281 1282 1283 1284 1285
	/*
	 * It's safe to allow irq's after cr2 has been saved and the
	 * vmalloc fault has been handled.
	 *
	 * User-mode registers count as a user access even for any
	 * potential system fault or CPU buglet:
	 */
1286
	if (user_mode(regs)) {
1287 1288 1289 1290 1291 1292 1293 1294 1295
		local_irq_enable();
		flags |= FAULT_FLAG_USER;
	} else {
		if (regs->flags & X86_EFLAGS_IF)
			local_irq_enable();
	}

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);

1296
	if (error_code & X86_PF_WRITE)
1297
		flags |= FAULT_FLAG_WRITE;
1298
	if (error_code & X86_PF_INSTR)
1299
		flags |= FAULT_FLAG_INSTRUCTION;
1300

1301
#ifdef CONFIG_X86_64
I
Ingo Molnar 已提交
1302
	/*
1303 1304 1305
	 * Faults in the vsyscall page might need emulation.  The
	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
	 * considered to be part of the user address space.
L
Linus Torvalds 已提交
1306
	 *
1307 1308
	 * The vsyscall page does not have a "real" VMA, so do this
	 * emulation before we go searching for VMAs.
1309 1310 1311
	 *
	 * PKRU never rejects instruction fetches, so we don't need
	 * to consider the PF_PK bit.
1312
	 */
1313
	if (is_vsyscall_vaddr(address)) {
1314
		if (emulate_vsyscall(error_code, regs, address))
1315 1316 1317 1318
			return;
	}
#endif

I
Ingo Molnar 已提交
1319
	/*
D
Dave Hansen 已提交
1320 1321 1322
	 * Kernel-mode access to the user address space should only occur
	 * on well-defined single instructions listed in the exception
	 * tables.  But, an erroneous kernel fault occurring outside one of
1323
	 * those areas which also holds mmap_lock might deadlock attempting
D
Dave Hansen 已提交
1324
	 * to validate the fault against the address space.
L
Linus Torvalds 已提交
1325
	 *
D
Dave Hansen 已提交
1326 1327
	 * Only do the expensive exception table search when we might be at
	 * risk of a deadlock.  This happens if we
1328
	 * 1. Failed to acquire mmap_lock, and
1329
	 * 2. The access did not originate in userspace.
L
Linus Torvalds 已提交
1330
	 */
1331
	if (unlikely(!mmap_read_trylock(mm))) {
1332
		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
D
Dave Hansen 已提交
1333 1334 1335 1336
			/*
			 * Fault from code in kernel from
			 * which we do not expect faults.
			 */
1337
			bad_area_nosemaphore(regs, error_code, address);
1338 1339
			return;
		}
1340
retry:
1341
		mmap_read_lock(mm);
1342 1343
	} else {
		/*
I
Ingo Molnar 已提交
1344 1345 1346
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
1347 1348
		 */
		might_sleep();
L
Linus Torvalds 已提交
1349 1350 1351
	}

	vma = find_vma(mm, address);
1352
	if (unlikely(!vma)) {
1353
		bad_area(regs, error_code, address);
1354 1355 1356
		return;
	}
	if (likely(vma->vm_start <= address))
L
Linus Torvalds 已提交
1357
		goto good_area;
1358
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1359
		bad_area(regs, error_code, address);
1360 1361 1362
		return;
	}
	if (unlikely(expand_stack(vma, address))) {
1363
		bad_area(regs, error_code, address);
1364 1365 1366 1367 1368 1369 1370
		return;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
L
Linus Torvalds 已提交
1371
good_area:
1372 1373
	if (unlikely(access_error(error_code, vma))) {
		bad_area_access_error(regs, error_code, address, vma);
1374
		return;
L
Linus Torvalds 已提交
1375 1376 1377 1378 1379
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
1380
	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1381
	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1382
	 *
1383
	 * Note that handle_userfault() may also release and reacquire mmap_lock
1384 1385 1386 1387 1388
	 * (and not return with VM_FAULT_RETRY), when returning to userland to
	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
	 * (potentially after handling any pending signal during the return to
	 * userland). The return to userland is identified whenever
	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
L
Linus Torvalds 已提交
1389
	 */
1390
	fault = handle_mm_fault(vma, address, flags, regs);
I
Ingo Molnar 已提交
1391

1392
	if (fault_signal_pending(fault, regs)) {
1393 1394 1395 1396
		/*
		 * Quick path to respond to signals.  The core mm code
		 * has unlocked the mm for us if we get here.
		 */
1397
		if (!user_mode(regs))
1398 1399
			kernelmode_fixup_or_oops(regs, error_code, address,
						 SIGBUS, BUS_ADRERR);
1400 1401 1402
		return;
	}

1403
	/*
1404
	 * If we need to retry the mmap_lock has already been released,
1405 1406
	 * and if there is a fatal signal pending there is no guarantee
	 * that we made any progress. Handle this case first.
1407
	 */
1408 1409 1410 1411
	if (unlikely((fault & VM_FAULT_RETRY) &&
		     (flags & FAULT_FLAG_ALLOW_RETRY))) {
		flags |= FAULT_FLAG_TRIED;
		goto retry;
1412
	}
1413

1414
	mmap_read_unlock(mm);
1415 1416 1417
	if (likely(!(fault & VM_FAULT_ERROR)))
		return;

1418
	if (fatal_signal_pending(current) && !user_mode(regs)) {
1419
		kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
1420
		return;
1421 1422
	}

1423 1424
	if (fault & VM_FAULT_OOM) {
		/* Kernel mode? Handle exceptions or die: */
1425
		if (!user_mode(regs)) {
1426 1427
			kernelmode_fixup_or_oops(regs, error_code, address,
						 SIGSEGV, SEGV_MAPERR);
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
			return;
		}

		/*
		 * We ran out of memory, call the OOM killer, and return the
		 * userspace (which will retry the fault, or kill us if we got
		 * oom-killed):
		 */
		pagefault_out_of_memory();
	} else {
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
			     VM_FAULT_HWPOISON_LARGE))
			do_sigbus(regs, error_code, address, fault);
		else if (fault & VM_FAULT_SIGSEGV)
			bad_area_nosemaphore(regs, error_code, address);
		else
			BUG();
	}
L
Linus Torvalds 已提交
1446
}
1447 1448
NOKPROBE_SYMBOL(do_user_addr_fault);

1449 1450 1451
static __always_inline void
trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
			 unsigned long address)
1452
{
1453 1454 1455
	if (!trace_pagefault_enabled())
		return;

1456
	if (user_mode(regs))
1457
		trace_page_fault_user(address, regs, error_code);
1458
	else
1459
		trace_page_fault_kernel(address, regs, error_code);
1460 1461
}

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
static __always_inline void
handle_page_fault(struct pt_regs *regs, unsigned long error_code,
			      unsigned long address)
{
	trace_page_fault_entries(regs, error_code, address);

	if (unlikely(kmmio_fault(regs, address)))
		return;

	/* Was the fault on kernel-controlled part of the address space? */
	if (unlikely(fault_in_kernel_space(address))) {
		do_kern_addr_fault(regs, error_code, address);
	} else {
		do_user_addr_fault(regs, error_code, address);
		/*
		 * User address page fault handling might have reenabled
		 * interrupts. Fixing up all potential exit points of
		 * do_user_addr_fault() and its leaf functions is just not
		 * doable w/o creating an unholy mess or turning the code
		 * upside down.
		 */
		local_irq_disable();
	}
}

DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
1488
{
1489
	unsigned long address = read_cr2();
1490
	irqentry_state_t state;
1491

1492
	prefetchw(&current->mm->mmap_lock);
1493

1494
	/*
1495 1496 1497 1498 1499
	 * KVM uses #PF vector to deliver 'page not present' events to guests
	 * (asynchronous page fault mechanism). The event happens when a
	 * userspace task is trying to access some valid (from guest's point of
	 * view) memory which is not currently mapped by the host (e.g. the
	 * memory is swapped out). Note, the corresponding "page ready" event
1500
	 * which is injected when the memory becomes available, is delivered via
1501 1502
	 * an interrupt mechanism and not a #PF exception
	 * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
1503 1504 1505 1506 1507 1508 1509 1510
	 *
	 * We are relying on the interrupted context being sane (valid RSP,
	 * relevant locks not held, etc.), which is fine as long as the
	 * interrupted context had IF=1.  We are also relying on the KVM
	 * async pf type field and CR2 being read consistently instead of
	 * getting values from real and async page faults mixed up.
	 *
	 * Fingers crossed.
1511 1512 1513
	 *
	 * The async #PF handling code takes care of idtentry handling
	 * itself.
1514 1515 1516 1517
	 */
	if (kvm_handle_async_pf(regs, (u32)address))
		return;

1518 1519 1520 1521 1522 1523 1524 1525
	/*
	 * Entry handling for valid #PF from kernel mode is slightly
	 * different: RCU is already watching and rcu_irq_enter() must not
	 * be invoked because a kernel fault on a user space address might
	 * sleep.
	 *
	 * In case the fault hit a RCU idle region the conditional entry
	 * code reenabled RCU to avoid subsequent wreckage which helps
I
Ingo Molnar 已提交
1526
	 * debuggability.
1527
	 */
1528
	state = irqentry_enter(regs);
1529

1530 1531 1532
	instrumentation_begin();
	handle_page_fault(regs, error_code, address);
	instrumentation_end();
1533

1534
	irqentry_exit(regs, state);
1535
}