fault.c 20.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Based on arch/arm/mm/fault.c
 *
 * Copyright (C) 1995  Linus Torvalds
 * Copyright (C) 1995-2004 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

21
#include <linux/extable.h>
22 23 24 25 26 27 28
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
29
#include <linux/sched/signal.h>
30
#include <linux/sched/debug.h>
31 32
#include <linux/highmem.h>
#include <linux/perf_event.h>
33
#include <linux/preempt.h>
34

35
#include <asm/bug.h>
36
#include <asm/cpufeature.h>
37 38
#include <asm/exception.h>
#include <asm/debug-monitors.h>
39
#include <asm/esr.h>
40
#include <asm/sysreg.h>
41 42 43 44
#include <asm/system_misc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>

45 46 47 48 49 50 51 52 53 54 55 56 57 58
struct fault_info {
	int	(*fn)(unsigned long addr, unsigned int esr,
		      struct pt_regs *regs);
	int	sig;
	int	code;
	const char *name;
};

static const struct fault_info fault_info[];

static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
{
	return fault_info + (esr & 63);
}
59

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
{
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
	if (!user_mode(regs)) {
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, esr))
			ret = 1;
		preempt_enable();
	}

	return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
{
	return 0;
}
#endif

82
/*
83
 * Dump out the page tables associated with 'addr' in the currently active mm.
84
 */
85
void show_pte(unsigned long addr)
86
{
87
	struct mm_struct *mm;
88 89
	pgd_t *pgd;

90 91 92 93 94 95 96 97 98 99
	if (addr < TASK_SIZE) {
		/* TTBR0 */
		mm = current->active_mm;
		if (mm == &init_mm) {
			pr_alert("[%016lx] user address but active_mm is swapper\n",
				 addr);
			return;
		}
	} else if (addr >= VA_START) {
		/* TTBR1 */
100
		mm = &init_mm;
101 102 103 104 105
	} else {
		pr_alert("[%016lx] address between user and kernel address ranges\n",
			 addr);
		return;
	}
106

107 108 109
	pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgd = %p\n",
		 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
		 VA_BITS, mm->pgd);
110
	pgd = pgd_offset(mm, addr);
111
	pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
112 113 114 115 116 117

	do {
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

118
		if (pgd_none(*pgd) || pgd_bad(*pgd))
119 120 121
			break;

		pud = pud_offset(pgd, addr);
122
		pr_cont(", *pud=%016llx", pud_val(*pud));
123
		if (pud_none(*pud) || pud_bad(*pud))
124 125 126
			break;

		pmd = pmd_offset(pud, addr);
127
		pr_cont(", *pmd=%016llx", pmd_val(*pmd));
128
		if (pmd_none(*pmd) || pmd_bad(*pmd))
129 130 131
			break;

		pte = pte_offset_map(pmd, addr);
132
		pr_cont(", *pte=%016llx", pte_val(*pte));
133 134 135
		pte_unmap(pte);
	} while(0);

136
	pr_cont("\n");
137 138
}

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
#ifdef CONFIG_ARM64_HW_AFDBM
/*
 * This function sets the access flags (dirty, accessed), as well as write
 * permission, and only to a more permissive setting.
 *
 * It needs to cope with hardware update of the accessed/dirty state by other
 * agents in the system and can safely skip the __sync_icache_dcache() call as,
 * like set_pte_at(), the PTE is never changed from no-exec to exec here.
 *
 * Returns whether or not the PTE actually changed.
 */
int ptep_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pte_t *ptep,
			  pte_t entry, int dirty)
{
	pteval_t old_pteval;
	unsigned int tmp;

	if (pte_same(*ptep, entry))
		return 0;

	/* only preserve the access flags and write permission */
	pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;

	/*
	 * PTE_RDONLY is cleared by default in the asm below, so set it in
	 * back if necessary (read-only or clean PTE).
	 */
167
	if (!pte_write(entry) || !pte_sw_dirty(entry))
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
		pte_val(entry) |= PTE_RDONLY;

	/*
	 * Setting the flags must be done atomically to avoid racing with the
	 * hardware update of the access/dirty state.
	 */
	asm volatile("//	ptep_set_access_flags\n"
	"	prfm	pstl1strm, %2\n"
	"1:	ldxr	%0, %2\n"
	"	and	%0, %0, %3		// clear PTE_RDONLY\n"
	"	orr	%0, %0, %4		// set flags\n"
	"	stxr	%w1, %0, %2\n"
	"	cbnz	%w1, 1b\n"
	: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
	: "L" (~PTE_RDONLY), "r" (pte_val(entry)));

	flush_tlb_fix_spurious_fault(vma, address);
	return 1;
}
#endif

189 190 191 192 193
static bool is_el1_instruction_abort(unsigned int esr)
{
	return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
				       unsigned long addr)
{
	unsigned int ec       = ESR_ELx_EC(esr);
	unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;

	if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
		return false;

	if (fsc_type == ESR_ELx_FSC_PERM)
		return true;

	if (addr < USER_DS && system_uses_ttbr0_pan())
		return fsc_type == ESR_ELx_FSC_FAULT &&
			(regs->pstate & PSR_PAN_BIT);

	return false;
}

213 214 215
/*
 * The kernel tried to access some page that wasn't present.
 */
216 217
static void __do_kernel_fault(unsigned long addr, unsigned int esr,
			      struct pt_regs *regs)
218
{
219 220
	const char *msg;

221 222
	/*
	 * Are we prepared to handle this kernel fault?
223
	 * We are almost certainly not prepared to handle instruction faults.
224
	 */
225
	if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
226 227 228 229 230 231
		return;

	/*
	 * No handler, we'll have to terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);
232 233 234 235 236 237 238 239 240 241 242 243 244 245

	if (is_permission_fault(esr, regs, addr)) {
		if (esr & ESR_ELx_WNR)
			msg = "write to read-only memory";
		else
			msg = "read from unreadable memory";
	} else if (addr < PAGE_SIZE) {
		msg = "NULL pointer dereference";
	} else {
		msg = "paging request";
	}

	pr_alert("Unable to handle kernel %s at virtual address %08lx\n", msg,
		 addr);
246

247
	show_pte(addr);
248 249 250 251 252 253 254 255 256 257 258 259 260 261
	die("Oops", regs, esr);
	bust_spinlocks(0);
	do_exit(SIGKILL);
}

/*
 * Something tried to access memory that isn't in our memory map. User mode
 * accesses just cause a SIGSEGV
 */
static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
			    unsigned int esr, unsigned int sig, int code,
			    struct pt_regs *regs)
{
	struct siginfo si;
262
	const struct fault_info *inf;
263

264
	if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
265
		inf = esr_to_fault_info(esr);
266
		pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x",
267
			tsk->comm, task_pid_nr(tsk), inf->name, sig,
268
			addr, esr);
269 270
		print_vma_addr(KERN_CONT ", in ", regs->pc);
		pr_cont("\n");
K
Kefeng Wang 已提交
271
		__show_regs(regs);
272 273 274
	}

	tsk->thread.fault_address = addr;
275
	tsk->thread.fault_code = esr;
276 277 278 279 280 281 282
	si.si_signo = sig;
	si.si_errno = 0;
	si.si_code = code;
	si.si_addr = (void __user *)addr;
	force_sig_info(sig, &si, tsk);
}

283
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
284 285
{
	struct task_struct *tsk = current;
286
	const struct fault_info *inf;
287 288 289 290 291

	/*
	 * If we are in kernel mode at this point, we have no context to
	 * handle this fault with.
	 */
292 293 294 295
	if (user_mode(regs)) {
		inf = esr_to_fault_info(esr);
		__do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
	} else
296
		__do_kernel_fault(addr, esr, regs);
297 298 299 300 301 302
}

#define VM_FAULT_BADMAP		0x010000
#define VM_FAULT_BADACCESS	0x020000

static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
303
			   unsigned int mm_flags, unsigned long vm_flags,
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
			   struct task_struct *tsk)
{
	struct vm_area_struct *vma;
	int fault;

	vma = find_vma(mm, addr);
	fault = VM_FAULT_BADMAP;
	if (unlikely(!vma))
		goto out;
	if (unlikely(vma->vm_start > addr))
		goto check_stack;

	/*
	 * Ok, we have a good vm_area for this memory access, so we can handle
	 * it.
	 */
good_area:
321 322
	/*
	 * Check that the permissions on the VMA allow for the fault which
323
	 * occurred.
324 325
	 */
	if (!(vma->vm_flags & vm_flags)) {
326 327 328 329
		fault = VM_FAULT_BADACCESS;
		goto out;
	}

330
	return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
331 332 333 334 335 336 337 338

check_stack:
	if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
		goto good_area;
out:
	return fault;
}

M
Mark Rutland 已提交
339 340 341 342 343
static bool is_el0_instruction_abort(unsigned int esr)
{
	return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
}

344 345 346 347 348 349
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
				   struct pt_regs *regs)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	int fault, sig, code;
350
	unsigned long vm_flags = VM_READ | VM_WRITE;
351 352
	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

353 354 355
	if (notify_page_fault(regs, esr))
		return 0;

356 357 358 359 360 361 362
	tsk = current;
	mm  = tsk->mm;

	/*
	 * If we're in an interrupt or have no user context, we must not take
	 * the fault.
	 */
363
	if (faulthandler_disabled() || !mm)
364 365
		goto no_context;

366 367 368
	if (user_mode(regs))
		mm_flags |= FAULT_FLAG_USER;

M
Mark Rutland 已提交
369
	if (is_el0_instruction_abort(esr)) {
370
		vm_flags = VM_EXEC;
M
Mark Rutland 已提交
371
	} else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
372 373 374 375
		vm_flags = VM_WRITE;
		mm_flags |= FAULT_FLAG_WRITE;
	}

376
	if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
377 378
		/* regs->orig_addr_limit may be 0 if we entered from EL0 */
		if (regs->orig_addr_limit == KERNEL_DS)
379
			die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
380

381 382 383
		if (is_el1_instruction_abort(esr))
			die("Attempting to execute userspace memory", regs, esr);

384
		if (!search_exception_tables(regs->pc))
385
			die("Accessing user space memory outside uaccess.h routines", regs, esr);
386
	}
387

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	/*
	 * As per x86, we may deadlock here. However, since the kernel only
	 * validly references user space from well defined areas of the code,
	 * we can bug out early if this is from code which shouldn't.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;
retry:
		down_read(&mm->mmap_sem);
	} else {
		/*
		 * The above down_read_trylock() might have succeeded in which
		 * case, we'll have missed the might_sleep() from down_read().
		 */
		might_sleep();
#ifdef CONFIG_DEBUG_VM
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;
#endif
	}

410
	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426

	/*
	 * If we need to retry but a fatal signal is pending, handle the
	 * signal first. We do not need to release the mmap_sem because it
	 * would already be released in __lock_page_or_retry in mm/filemap.c.
	 */
	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return 0;

	/*
	 * Major/minor page fault accounting is only done on the initial
	 * attempt. If we go through a retry, it is extremely likely that the
	 * page will be found in page cache at that point.
	 */

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
427
	if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
428 429 430 431 432 433 434 435 436 437 438 439 440 441
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
				      addr);
		} else {
			tsk->min_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
				      addr);
		}
		if (fault & VM_FAULT_RETRY) {
			/*
			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
			 * starvation.
			 */
442
			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
443
			mm_flags |= FAULT_FLAG_TRIED;
444 445 446 447 448 449 450
			goto retry;
		}
	}

	up_read(&mm->mmap_sem);

	/*
J
Jan Kara 已提交
451
	 * Handle the "normal" case first - VM_FAULT_MAJOR
452 453 454 455 456
	 */
	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
			      VM_FAULT_BADACCESS))))
		return 0;

457 458 459 460 461 462 463
	/*
	 * If we are in kernel mode at this point, we have no context to
	 * handle this fault with.
	 */
	if (!user_mode(regs))
		goto no_context;

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	if (fault & VM_FAULT_OOM) {
		/*
		 * We ran out of memory, call the OOM killer, and return to
		 * userspace (which will retry the fault, or kill us if we got
		 * oom-killed).
		 */
		pagefault_out_of_memory();
		return 0;
	}

	if (fault & VM_FAULT_SIGBUS) {
		/*
		 * We had some memory, but were unable to successfully fix up
		 * this page fault.
		 */
		sig = SIGBUS;
		code = BUS_ADRERR;
	} else {
		/*
		 * Something tried to access memory that isn't in our memory
		 * map.
		 */
		sig = SIGSEGV;
		code = fault == VM_FAULT_BADACCESS ?
			SEGV_ACCERR : SEGV_MAPERR;
	}

	__do_user_fault(tsk, addr, esr, sig, code, regs);
	return 0;

no_context:
495
	__do_kernel_fault(addr, esr, regs);
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
	return 0;
}

/*
 * First Level Translation Fault Handler
 *
 * We enter here because the first level page table doesn't contain a valid
 * entry for the address.
 *
 * If the address is in kernel space (>= TASK_SIZE), then we are probably
 * faulting in the vmalloc() area.
 *
 * If the init_task's first level page tables contains the relevant entry, we
 * copy the it to this task.  If not, we send the process a signal, fixup the
 * exception, or oops the kernel.
 *
 * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
 * or a critical region, and should only copy the information from the master
 * page table, nothing more.
 */
static int __kprobes do_translation_fault(unsigned long addr,
					  unsigned int esr,
					  struct pt_regs *regs)
{
	if (addr < TASK_SIZE)
		return do_page_fault(addr, esr, regs);

	do_bad_area(addr, esr, regs);
	return 0;
}

527 528 529 530 531 532 533
static int do_alignment_fault(unsigned long addr, unsigned int esr,
			      struct pt_regs *regs)
{
	do_bad_area(addr, esr, regs);
	return 0;
}

534 535 536 537 538 539 540 541
/*
 * This abort handler always returns "fault".
 */
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
	return 1;
}

542
static const struct fault_info fault_info[] = {
543 544 545 546
	{ do_bad,		SIGBUS,  0,		"ttbr address size fault"	},
	{ do_bad,		SIGBUS,  0,		"level 1 address size fault"	},
	{ do_bad,		SIGBUS,  0,		"level 2 address size fault"	},
	{ do_bad,		SIGBUS,  0,		"level 3 address size fault"	},
547
	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 0 translation fault"	},
548 549 550
	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 1 translation fault"	},
	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 2 translation fault"	},
	{ do_page_fault,	SIGSEGV, SEGV_MAPERR,	"level 3 translation fault"	},
551
	{ do_bad,		SIGBUS,  0,		"unknown 8"			},
S
Steve Capper 已提交
552 553
	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 1 access flag fault"	},
	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 2 access flag fault"	},
554
	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 3 access flag fault"	},
555
	{ do_bad,		SIGBUS,  0,		"unknown 12"			},
S
Steve Capper 已提交
556 557
	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 1 permission fault"	},
	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 2 permission fault"	},
558 559
	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 3 permission fault"	},
	{ do_bad,		SIGBUS,  0,		"synchronous external abort"	},
560
	{ do_bad,		SIGBUS,  0,		"unknown 17"			},
561 562
	{ do_bad,		SIGBUS,  0,		"unknown 18"			},
	{ do_bad,		SIGBUS,  0,		"unknown 19"			},
563 564 565 566
	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
567
	{ do_bad,		SIGBUS,  0,		"synchronous parity error"	},
568
	{ do_bad,		SIGBUS,  0,		"unknown 25"			},
569 570
	{ do_bad,		SIGBUS,  0,		"unknown 26"			},
	{ do_bad,		SIGBUS,  0,		"unknown 27"			},
571 572 573 574
	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk)" },
	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk)" },
	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk)" },
	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk)" },
575
	{ do_bad,		SIGBUS,  0,		"unknown 32"			},
576
	{ do_alignment_fault,	SIGBUS,  BUS_ADRALN,	"alignment fault"		},
577
	{ do_bad,		SIGBUS,  0,		"unknown 34"			},
578 579 580 581 582 583 584 585 586 587 588 589 590
	{ do_bad,		SIGBUS,  0,		"unknown 35"			},
	{ do_bad,		SIGBUS,  0,		"unknown 36"			},
	{ do_bad,		SIGBUS,  0,		"unknown 37"			},
	{ do_bad,		SIGBUS,  0,		"unknown 38"			},
	{ do_bad,		SIGBUS,  0,		"unknown 39"			},
	{ do_bad,		SIGBUS,  0,		"unknown 40"			},
	{ do_bad,		SIGBUS,  0,		"unknown 41"			},
	{ do_bad,		SIGBUS,  0,		"unknown 42"			},
	{ do_bad,		SIGBUS,  0,		"unknown 43"			},
	{ do_bad,		SIGBUS,  0,		"unknown 44"			},
	{ do_bad,		SIGBUS,  0,		"unknown 45"			},
	{ do_bad,		SIGBUS,  0,		"unknown 46"			},
	{ do_bad,		SIGBUS,  0,		"unknown 47"			},
591
	{ do_bad,		SIGBUS,  0,		"TLB conflict abort"		},
592 593 594 595
	{ do_bad,		SIGBUS,  0,		"unknown 49"			},
	{ do_bad,		SIGBUS,  0,		"unknown 50"			},
	{ do_bad,		SIGBUS,  0,		"unknown 51"			},
	{ do_bad,		SIGBUS,  0,		"implementation fault (lockdown abort)" },
596
	{ do_bad,		SIGBUS,  0,		"implementation fault (unsupported exclusive)" },
597 598 599 600
	{ do_bad,		SIGBUS,  0,		"unknown 54"			},
	{ do_bad,		SIGBUS,  0,		"unknown 55"			},
	{ do_bad,		SIGBUS,  0,		"unknown 56"			},
	{ do_bad,		SIGBUS,  0,		"unknown 57"			},
601
	{ do_bad,		SIGBUS,  0,		"unknown 58" 			},
602 603
	{ do_bad,		SIGBUS,  0,		"unknown 59"			},
	{ do_bad,		SIGBUS,  0,		"unknown 60"			},
604 605
	{ do_bad,		SIGBUS,  0,		"section domain fault"		},
	{ do_bad,		SIGBUS,  0,		"page domain fault"		},
606 607 608 609 610 611 612 613 614
	{ do_bad,		SIGBUS,  0,		"unknown 63"			},
};

/*
 * Dispatch a data abort to the relevant handler.
 */
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
					 struct pt_regs *regs)
{
615
	const struct fault_info *inf = esr_to_fault_info(esr);
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
	struct siginfo info;

	if (!inf->fn(addr, esr, regs))
		return;

	pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
		 inf->name, esr, addr);

	info.si_signo = inf->sig;
	info.si_errno = 0;
	info.si_code  = inf->code;
	info.si_addr  = (void __user *)addr;
	arm64_notify_die("", regs, &info, esr);
}

/*
 * Handle stack alignment exceptions.
 */
asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
					   unsigned int esr,
					   struct pt_regs *regs)
{
	struct siginfo info;
639 640 641 642 643 644 645
	struct task_struct *tsk = current;

	if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
		pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
				    tsk->comm, task_pid_nr(tsk),
				    esr_get_class_string(esr), (void *)regs->pc,
				    (void *)regs->sp);
646 647 648 649 650

	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code  = BUS_ADRALN;
	info.si_addr  = (void __user *)addr;
651
	arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
652 653
}

654 655 656 657 658 659 660 661 662
int __init early_brk64(unsigned long addr, unsigned int esr,
		       struct pt_regs *regs);

/*
 * __refdata because early_brk64 is __init, but the reference to it is
 * clobbered at arch_initcall time.
 * See traps.c and debug-monitors.c:debug_traps_init().
 */
static struct fault_info __refdata debug_fault_info[] = {
663 664 665 666 667 668
	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware breakpoint"	},
	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware single-step"	},
	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware watchpoint"	},
	{ do_bad,	SIGBUS,		0,		"unknown 3"		},
	{ do_bad,	SIGTRAP,	TRAP_BRKPT,	"aarch32 BKPT"		},
	{ do_bad,	SIGTRAP,	0,		"aarch32 vector catch"	},
669
	{ early_brk64,	SIGTRAP,	TRAP_BRKPT,	"aarch64 BRK"		},
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
	{ do_bad,	SIGBUS,		0,		"unknown 7"		},
};

void __init hook_debug_fault_code(int nr,
				  int (*fn)(unsigned long, unsigned int, struct pt_regs *),
				  int sig, int code, const char *name)
{
	BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));

	debug_fault_info[nr].fn		= fn;
	debug_fault_info[nr].sig	= sig;
	debug_fault_info[nr].code	= code;
	debug_fault_info[nr].name	= name;
}

asmlinkage int __exception do_debug_exception(unsigned long addr,
					      unsigned int esr,
					      struct pt_regs *regs)
{
	const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
	struct siginfo info;
691
	int rv;
692

693 694 695 696 697 698
	/*
	 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
	 * already disabled to preserve the last enabled/disabled addresses.
	 */
	if (interrupts_enabled(regs))
		trace_hardirqs_off();
699

700 701 702 703 704 705 706 707 708 709 710 711 712
	if (!inf->fn(addr, esr, regs)) {
		rv = 1;
	} else {
		pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
			 inf->name, esr, addr);

		info.si_signo = inf->sig;
		info.si_errno = 0;
		info.si_code  = inf->code;
		info.si_addr  = (void __user *)addr;
		arm64_notify_die("", regs, &info, 0);
		rv = 0;
	}
713

714 715
	if (interrupts_enabled(regs))
		trace_hardirqs_on();
716

717
	return rv;
718
}
719
NOKPROBE_SYMBOL(do_debug_exception);
720 721

#ifdef CONFIG_ARM64_PAN
722
int cpu_enable_pan(void *__unused)
723
{
724 725 726 727 728 729
	/*
	 * We modify PSTATE. This won't work from irq context as the PSTATE
	 * is discarded once we return from the exception.
	 */
	WARN_ON_ONCE(in_interrupt());

730
	config_sctlr_el1(SCTLR_EL1_SPAN, 0);
731
	asm(SET_PSTATE_PAN(1));
732
	return 0;
733 734
}
#endif /* CONFIG_ARM64_PAN */