traps.c 61.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
L
Linus Torvalds 已提交
7 8 9 10
 * Copyright (C) 1995, 1996 Paul M. Antoine
 * Copyright (C) 1998 Ulf Carlsson
 * Copyright (C) 1999 Silicon Graphics, Inc.
 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11
 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
12
 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
13
 * Copyright (C) 2014, Imagination Technologies Ltd.
L
Linus Torvalds 已提交
14
 */
15
#include <linux/bitops.h>
16
#include <linux/bug.h>
17
#include <linux/compiler.h>
18
#include <linux/context_tracking.h>
19
#include <linux/cpu_pm.h>
R
Ralf Baechle 已提交
20
#include <linux/kexec.h>
L
Linus Torvalds 已提交
21
#include <linux/init.h>
22
#include <linux/kernel.h>
23
#include <linux/module.h>
24
#include <linux/extable.h>
L
Linus Torvalds 已提交
25 26 27 28 29
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
30
#include <linux/bootmem.h>
31
#include <linux/interrupt.h>
32
#include <linux/ptrace.h>
33 34
#include <linux/kgdb.h>
#include <linux/kdebug.h>
D
David Daney 已提交
35
#include <linux/kprobes.h>
R
Ralf Baechle 已提交
36
#include <linux/notifier.h>
37
#include <linux/kdb.h>
38
#include <linux/irq.h>
39
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
40

41
#include <asm/addrspace.h>
L
Linus Torvalds 已提交
42 43 44
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
R
Ralf Baechle 已提交
45
#include <asm/cop2.h>
L
Linus Torvalds 已提交
46
#include <asm/cpu.h>
47
#include <asm/cpu-type.h>
48
#include <asm/dsp.h>
L
Linus Torvalds 已提交
49
#include <asm/fpu.h>
50
#include <asm/fpu_emulator.h>
51
#include <asm/idle.h>
52
#include <asm/mips-cm.h>
53
#include <asm/mips-r2-to-r6-emul.h>
54
#include <asm/mips-cm.h>
R
Ralf Baechle 已提交
55 56
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
L
Linus Torvalds 已提交
57
#include <asm/module.h>
58
#include <asm/msa.h>
L
Linus Torvalds 已提交
59 60 61
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
62
#include <asm/siginfo.h>
L
Linus Torvalds 已提交
63 64
#include <asm/tlbdebug.h>
#include <asm/traps.h>
65
#include <linux/uaccess.h>
66
#include <asm/watch.h>
L
Linus Torvalds 已提交
67 68
#include <asm/mmu_context.h>
#include <asm/types.h>
69
#include <asm/stacktrace.h>
70
#include <asm/uasm.h>
L
Linus Torvalds 已提交
71

72 73
extern void check_wait(void);
extern asmlinkage void rollback_handle_int(void);
74
extern asmlinkage void handle_int(void);
75 76 77
extern u32 handle_tlbl[];
extern u32 handle_tlbs[];
extern u32 handle_tlbm[];
L
Linus Torvalds 已提交
78 79 80 81 82 83 84
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void);
extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
85 86
extern asmlinkage void handle_ri_rdhwr_vivt(void);
extern asmlinkage void handle_ri_rdhwr(void);
L
Linus Torvalds 已提交
87 88 89
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
90
extern asmlinkage void handle_msa_fpe(void);
L
Linus Torvalds 已提交
91
extern asmlinkage void handle_fpe(void);
L
Leonid Yegoshin 已提交
92
extern asmlinkage void handle_ftlb(void);
93
extern asmlinkage void handle_msa(void);
L
Linus Torvalds 已提交
94 95
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
R
Ralf Baechle 已提交
96
extern asmlinkage void handle_mt(void);
97
extern asmlinkage void handle_dsp(void);
L
Linus Torvalds 已提交
98 99
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
100
extern void tlb_do_page_fault_0(void);
L
Linus Torvalds 已提交
101 102 103

void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
104 105 106
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
K
Kevin Cernekee 已提交
107
void (*board_ebase_setup)(void);
108
void(*board_cache_error_setup)(void);
L
Linus Torvalds 已提交
109

F
Franck Bui-Huu 已提交
110
static void show_raw_backtrace(unsigned long reg29)
111
{
112
	unsigned long *sp = (unsigned long *)(reg29 & ~3);
113 114 115 116 117 118
	unsigned long addr;

	printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
	printk("\n");
#endif
119 120 121 122 123 124
	while (!kstack_end(sp)) {
		unsigned long __user *p =
			(unsigned long __user *)(unsigned long)sp++;
		if (__get_user(addr, p)) {
			printk(" (Bad stack address)");
			break;
125
		}
126 127
		if (__kernel_text_address(addr))
			print_ip_sym(addr);
128
	}
129
	printk("\n");
130 131
}

132
#ifdef CONFIG_KALLSYMS
133
int raw_show_trace;
134 135 136 137 138 139
static int __init set_raw_show_trace(char *str)
{
	raw_show_trace = 1;
	return 1;
}
__setup("raw_show_trace", set_raw_show_trace);
140
#endif
F
Franck Bui-Huu 已提交
141

R
Ralf Baechle 已提交
142
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
143
{
F
Franck Bui-Huu 已提交
144 145
	unsigned long sp = regs->regs[29];
	unsigned long ra = regs->regs[31];
146 147
	unsigned long pc = regs->cp0_epc;

148 149 150
	if (!task)
		task = current;

151
	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
152
		show_raw_backtrace(sp);
153 154 155
		return;
	}
	printk("Call Trace:\n");
F
Franck Bui-Huu 已提交
156
	do {
157
		print_ip_sym(pc);
158
		pc = unwind_stack(task, &sp, pc, &ra);
F
Franck Bui-Huu 已提交
159
	} while (pc);
160
	pr_cont("\n");
161 162
}

L
Linus Torvalds 已提交
163 164 165 166
/*
 * This routine abuses get_user()/put_user() to reference pointers
 * with at least a bit of error checking ...
 */
R
Ralf Baechle 已提交
167 168
static void show_stacktrace(struct task_struct *task,
	const struct pt_regs *regs)
L
Linus Torvalds 已提交
169 170 171 172
{
	const int field = 2 * sizeof(unsigned long);
	long stackdata;
	int i;
A
Atsushi Nemoto 已提交
173
	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
L
Linus Torvalds 已提交
174 175 176 177

	printk("Stack :");
	i = 0;
	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
178 179 180 181
		if (i && ((i % (64 / field)) == 0)) {
			pr_cont("\n");
			printk("       ");
		}
L
Linus Torvalds 已提交
182
		if (i > 39) {
183
			pr_cont(" ...");
L
Linus Torvalds 已提交
184 185 186 187
			break;
		}

		if (__get_user(stackdata, sp++)) {
188
			pr_cont(" (Bad stack address)");
L
Linus Torvalds 已提交
189 190 191
			break;
		}

192
		pr_cont(" %0*lx", field, stackdata);
L
Linus Torvalds 已提交
193 194
		i++;
	}
195
	pr_cont("\n");
196
	show_backtrace(task, regs);
197 198 199 200 201
}

void show_stack(struct task_struct *task, unsigned long *sp)
{
	struct pt_regs regs;
202
	mm_segment_t old_fs = get_fs();
203 204 205 206 207 208 209 210 211
	if (sp) {
		regs.regs[29] = (unsigned long)sp;
		regs.regs[31] = 0;
		regs.cp0_epc = 0;
	} else {
		if (task && task != current) {
			regs.regs[29] = task->thread.reg29;
			regs.regs[31] = 0;
			regs.cp0_epc = task->thread.reg31;
212 213 214 215 216
#ifdef CONFIG_KGDB_KDB
		} else if (atomic_read(&kgdb_active) != -1 &&
			   kdb_current_regs) {
			memcpy(&regs, kdb_current_regs, sizeof(regs));
#endif /* CONFIG_KGDB_KDB */
217 218 219 220
		} else {
			prepare_frametrace(&regs);
		}
	}
221 222 223 224 225
	/*
	 * show_stack() deals exclusively with kernel mode, so be sure to access
	 * the stack in the kernel (not user) address space.
	 */
	set_fs(KERNEL_DS);
226
	show_stacktrace(task, &regs);
227
	set_fs(old_fs);
L
Linus Torvalds 已提交
228 229
}

230
static void show_code(unsigned int __user *pc)
L
Linus Torvalds 已提交
231 232
{
	long i;
233
	unsigned short __user *pc16 = NULL;
L
Linus Torvalds 已提交
234

235
	printk("Code:");
L
Linus Torvalds 已提交
236

237 238
	if ((unsigned long)pc & 1)
		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
L
Linus Torvalds 已提交
239 240
	for(i = -3 ; i < 6 ; i++) {
		unsigned int insn;
241
		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
242
			pr_cont(" (Bad address in epc)\n");
L
Linus Torvalds 已提交
243 244
			break;
		}
245
		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
L
Linus Torvalds 已提交
246
	}
247
	pr_cont("\n");
L
Linus Torvalds 已提交
248 249
}

R
Ralf Baechle 已提交
250
static void __show_regs(const struct pt_regs *regs)
L
Linus Torvalds 已提交
251 252 253
{
	const int field = 2 * sizeof(unsigned long);
	unsigned int cause = regs->cp0_cause;
254
	unsigned int exccode;
L
Linus Torvalds 已提交
255 256
	int i;

257
	show_regs_print_info(KERN_DEFAULT);
L
Linus Torvalds 已提交
258 259 260 261 262 263 264 265

	/*
	 * Saved main processor registers
	 */
	for (i = 0; i < 32; ) {
		if ((i % 4) == 0)
			printk("$%2d   :", i);
		if (i == 0)
P
Paul Burton 已提交
266
			pr_cont(" %0*lx", field, 0UL);
L
Linus Torvalds 已提交
267
		else if (i == 26 || i == 27)
P
Paul Burton 已提交
268
			pr_cont(" %*s", field, "");
L
Linus Torvalds 已提交
269
		else
P
Paul Burton 已提交
270
			pr_cont(" %0*lx", field, regs->regs[i]);
L
Linus Torvalds 已提交
271 272 273

		i++;
		if ((i % 4) == 0)
P
Paul Burton 已提交
274
			pr_cont("\n");
L
Linus Torvalds 已提交
275 276
	}

277 278 279
#ifdef CONFIG_CPU_HAS_SMARTMIPS
	printk("Acx    : %0*lx\n", field, regs->acx);
#endif
L
Linus Torvalds 已提交
280 281 282 283 284 285
	printk("Hi    : %0*lx\n", field, regs->hi);
	printk("Lo    : %0*lx\n", field, regs->lo);

	/*
	 * Saved cp0 registers
	 */
286 287 288 289
	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
	       (void *) regs->cp0_epc);
	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
	       (void *) regs->regs[31]);
L
Linus Torvalds 已提交
290

R
Ralf Baechle 已提交
291
	printk("Status: %08x	", (uint32_t) regs->cp0_status);
L
Linus Torvalds 已提交
292

293
	if (cpu_has_3kex) {
294
		if (regs->cp0_status & ST0_KUO)
P
Paul Burton 已提交
295
			pr_cont("KUo ");
296
		if (regs->cp0_status & ST0_IEO)
P
Paul Burton 已提交
297
			pr_cont("IEo ");
298
		if (regs->cp0_status & ST0_KUP)
P
Paul Burton 已提交
299
			pr_cont("KUp ");
300
		if (regs->cp0_status & ST0_IEP)
P
Paul Burton 已提交
301
			pr_cont("IEp ");
302
		if (regs->cp0_status & ST0_KUC)
P
Paul Burton 已提交
303
			pr_cont("KUc ");
304
		if (regs->cp0_status & ST0_IEC)
P
Paul Burton 已提交
305
			pr_cont("IEc ");
306
	} else if (cpu_has_4kex) {
307
		if (regs->cp0_status & ST0_KX)
P
Paul Burton 已提交
308
			pr_cont("KX ");
309
		if (regs->cp0_status & ST0_SX)
P
Paul Burton 已提交
310
			pr_cont("SX ");
311
		if (regs->cp0_status & ST0_UX)
P
Paul Burton 已提交
312
			pr_cont("UX ");
313 314
		switch (regs->cp0_status & ST0_KSU) {
		case KSU_USER:
P
Paul Burton 已提交
315
			pr_cont("USER ");
316 317
			break;
		case KSU_SUPERVISOR:
P
Paul Burton 已提交
318
			pr_cont("SUPERVISOR ");
319 320
			break;
		case KSU_KERNEL:
P
Paul Burton 已提交
321
			pr_cont("KERNEL ");
322 323
			break;
		default:
P
Paul Burton 已提交
324
			pr_cont("BAD_MODE ");
325 326 327
			break;
		}
		if (regs->cp0_status & ST0_ERL)
P
Paul Burton 已提交
328
			pr_cont("ERL ");
329
		if (regs->cp0_status & ST0_EXL)
P
Paul Burton 已提交
330
			pr_cont("EXL ");
331
		if (regs->cp0_status & ST0_IE)
P
Paul Burton 已提交
332
			pr_cont("IE ");
L
Linus Torvalds 已提交
333
	}
P
Paul Burton 已提交
334
	pr_cont("\n");
L
Linus Torvalds 已提交
335

336 337
	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
L
Linus Torvalds 已提交
338

339
	if (1 <= exccode && exccode <= 5)
L
Linus Torvalds 已提交
340 341
		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);

342 343
	printk("PrId  : %08x (%s)\n", read_c0_prid(),
	       cpu_name_string());
L
Linus Torvalds 已提交
344 345
}

R
Ralf Baechle 已提交
346 347 348 349 350 351 352 353
/*
 * FIXME: really the generic show_regs should take a const pointer argument.
 */
void show_regs(struct pt_regs *regs)
{
	__show_regs((struct pt_regs *)regs);
}

D
David Daney 已提交
354
void show_registers(struct pt_regs *regs)
L
Linus Torvalds 已提交
355
{
356
	const int field = 2 * sizeof(unsigned long);
357
	mm_segment_t old_fs = get_fs();
358

R
Ralf Baechle 已提交
359
	__show_regs(regs);
L
Linus Torvalds 已提交
360
	print_modules();
361 362 363 364 365 366 367 368 369 370 371
	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
	       current->comm, current->pid, current_thread_info(), current,
	      field, current_thread_info()->tp_value);
	if (cpu_has_userlocal) {
		unsigned long tls;

		tls = read_c0_userlocal();
		if (tls != current_thread_info()->tp_value)
			printk("*HwTLS: %0*lx\n", field, tls);
	}

372 373 374
	if (!user_mode(regs))
		/* Necessary for getting the correct stack content */
		set_fs(KERNEL_DS);
375
	show_stacktrace(current, regs);
376
	show_code((unsigned int __user *) regs->cp0_epc);
L
Linus Torvalds 已提交
377
	printk("\n");
378
	set_fs(old_fs);
L
Linus Torvalds 已提交
379 380
}

W
Wu Zhangjin 已提交
381
static DEFINE_RAW_SPINLOCK(die_lock);
L
Linus Torvalds 已提交
382

D
David Daney 已提交
383
void __noreturn die(const char *str, struct pt_regs *regs)
L
Linus Torvalds 已提交
384 385
{
	static int die_counter;
386
	int sig = SIGSEGV;
L
Linus Torvalds 已提交
387

388 389
	oops_enter();

390
	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
391
		       SIGSEGV) == NOTIFY_STOP)
392
		sig = 0;
393

L
Linus Torvalds 已提交
394
	console_verbose();
W
Wu Zhangjin 已提交
395
	raw_spin_lock_irq(&die_lock);
396
	bust_spinlocks(1);
397

398
	printk("%s[#%d]:\n", str, ++die_counter);
L
Linus Torvalds 已提交
399
	show_registers(regs);
400
	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
W
Wu Zhangjin 已提交
401
	raw_spin_unlock_irq(&die_lock);
402

403 404
	oops_exit();

405 406 407
	if (in_interrupt())
		panic("Fatal exception in interrupt");

408
	if (panic_on_oops)
409 410
		panic("Fatal exception");

R
Ralf Baechle 已提交
411 412 413
	if (regs && kexec_should_crash(current))
		crash_kexec(regs);

414
	do_exit(sig);
L
Linus Torvalds 已提交
415 416
}

417 418
extern struct exception_table_entry __start___dbe_table[];
extern struct exception_table_entry __stop___dbe_table[];
L
Linus Torvalds 已提交
419

420 421 422
__asm__(
"	.section	__dbe_table, \"a\"\n"
"	.previous			\n");
L
Linus Torvalds 已提交
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

/* Given an address, look for it in the exception tables. */
static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
{
	const struct exception_table_entry *e;

	e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
	if (!e)
		e = search_module_dbetables(addr);
	return e;
}

asmlinkage void do_be(struct pt_regs *regs)
{
	const int field = 2 * sizeof(unsigned long);
	const struct exception_table_entry *fixup = NULL;
	int data = regs->cp0_cause & 4;
	int action = MIPS_BE_FATAL;
441
	enum ctx_state prev_state;
L
Linus Torvalds 已提交
442

443
	prev_state = exception_enter();
R
Ralf Baechle 已提交
444
	/* XXX For now.	 Fixme, this searches the wrong table ...  */
L
Linus Torvalds 已提交
445 446 447 448 449 450 451
	if (data && !user_mode(regs))
		fixup = search_dbe_tables(exception_epc(regs));

	if (fixup)
		action = MIPS_BE_FIXUP;

	if (board_be_handler)
452
		action = board_be_handler(regs, fixup != NULL);
453 454
	else
		mips_cm_error_report();
L
Linus Torvalds 已提交
455 456 457

	switch (action) {
	case MIPS_BE_DISCARD:
458
		goto out;
L
Linus Torvalds 已提交
459 460 461
	case MIPS_BE_FIXUP:
		if (fixup) {
			regs->cp0_epc = fixup->nextinsn;
462
			goto out;
L
Linus Torvalds 已提交
463 464 465 466 467 468 469 470 471 472 473 474
		}
		break;
	default:
		break;
	}

	/*
	 * Assume it would be too dangerous to continue ...
	 */
	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
	       data ? "Data" : "Instruction",
	       field, regs->cp0_epc, field, regs->regs[31]);
475
	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
476
		       SIGBUS) == NOTIFY_STOP)
477
		goto out;
478

L
Linus Torvalds 已提交
479 480
	die_if_kernel("Oops", regs);
	force_sig(SIGBUS, current);
481 482 483

out:
	exception_exit(prev_state);
L
Linus Torvalds 已提交
484 485 486
}

/*
487
 * ll/sc, rdhwr, sync emulation
L
Linus Torvalds 已提交
488 489 490 491 492 493 494 495
 */

#define OPCODE 0xfc000000
#define BASE   0x03e00000
#define RT     0x001f0000
#define OFFSET 0x0000ffff
#define LL     0xc0000000
#define SC     0xe0000000
496
#define SPEC0  0x00000000
R
Ralf Baechle 已提交
497 498 499
#define SPEC3  0x7c000000
#define RD     0x0000f800
#define FUNC   0x0000003f
500
#define SYNC   0x0000000f
R
Ralf Baechle 已提交
501
#define RDHWR  0x0000003b
L
Linus Torvalds 已提交
502

503 504 505 506 507 508
/*  microMIPS definitions   */
#define MM_POOL32A_FUNC 0xfc00ffff
#define MM_RDHWR        0x00006b3c
#define MM_RS           0x001f0000
#define MM_RT           0x03e00000

L
Linus Torvalds 已提交
509 510 511 512
/*
 * The ll_bit is cleared by r*_switch.S
 */

513 514
unsigned int ll_bit;
struct task_struct *ll_task;
L
Linus Torvalds 已提交
515

516
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
L
Linus Torvalds 已提交
517
{
R
Ralf Baechle 已提交
518
	unsigned long value, __user *vaddr;
L
Linus Torvalds 已提交
519 520 521 522 523 524 525 526 527 528 529 530
	long offset;

	/*
	 * analyse the ll instruction that just caused a ri exception
	 * and put the referenced address to addr.
	 */

	/* sign extend offset */
	offset = opcode & OFFSET;
	offset <<= 16;
	offset >>= 16;

R
Ralf Baechle 已提交
531
	vaddr = (unsigned long __user *)
532
		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
L
Linus Torvalds 已提交
533

534 535 536 537
	if ((unsigned long)vaddr & 3)
		return SIGBUS;
	if (get_user(value, vaddr))
		return SIGSEGV;
L
Linus Torvalds 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551

	preempt_disable();

	if (ll_task == NULL || ll_task == current) {
		ll_bit = 1;
	} else {
		ll_bit = 0;
	}
	ll_task = current;

	preempt_enable();

	regs->regs[(opcode & RT) >> 16] = value;

552
	return 0;
L
Linus Torvalds 已提交
553 554
}

555
static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
L
Linus Torvalds 已提交
556
{
R
Ralf Baechle 已提交
557 558
	unsigned long __user *vaddr;
	unsigned long reg;
L
Linus Torvalds 已提交
559 560 561 562 563 564 565 566 567 568 569 570
	long offset;

	/*
	 * analyse the sc instruction that just caused a ri exception
	 * and put the referenced address to addr.
	 */

	/* sign extend offset */
	offset = opcode & OFFSET;
	offset <<= 16;
	offset >>= 16;

R
Ralf Baechle 已提交
571
	vaddr = (unsigned long __user *)
572
		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
L
Linus Torvalds 已提交
573 574
	reg = (opcode & RT) >> 16;

575 576
	if ((unsigned long)vaddr & 3)
		return SIGBUS;
L
Linus Torvalds 已提交
577 578 579 580 581 582

	preempt_disable();

	if (ll_bit == 0 || ll_task != current) {
		regs->regs[reg] = 0;
		preempt_enable();
583
		return 0;
L
Linus Torvalds 已提交
584 585 586 587
	}

	preempt_enable();

588 589
	if (put_user(regs->regs[reg], vaddr))
		return SIGSEGV;
L
Linus Torvalds 已提交
590 591 592

	regs->regs[reg] = 1;

593
	return 0;
L
Linus Torvalds 已提交
594 595 596 597 598 599 600 601 602
}

/*
 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 * opcodes are supposed to result in coprocessor unusable exceptions if
 * executed on ll/sc-less processors.  That's the theory.  In practice a
 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 * instead, so we're doing the emulation thing in both exception handlers.
 */
603
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
L
Linus Torvalds 已提交
604
{
605 606
	if ((opcode & OPCODE) == LL) {
		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
607
				1, regs, 0);
608
		return simulate_ll(regs, opcode);
609 610 611
	}
	if ((opcode & OPCODE) == SC) {
		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
612
				1, regs, 0);
613
		return simulate_sc(regs, opcode);
614
	}
L
Linus Torvalds 已提交
615

616
	return -1;			/* Must be something else ... */
L
Linus Torvalds 已提交
617 618
}

R
Ralf Baechle 已提交
619 620
/*
 * Simulate trapping 'rdhwr' instructions to provide user accessible
621
 * registers not implemented in hardware.
R
Ralf Baechle 已提交
622
 */
623
static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
R
Ralf Baechle 已提交
624
{
A
Al Viro 已提交
625
	struct thread_info *ti = task_thread_info(current);
R
Ralf Baechle 已提交
626

627 628 629
	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
			1, regs, 0);
	switch (rd) {
J
James Hogan 已提交
630
	case MIPS_HWR_CPUNUM:		/* CPU number */
631 632
		regs->regs[rt] = smp_processor_id();
		return 0;
J
James Hogan 已提交
633
	case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
634 635 636
		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
				     current_cpu_data.icache.linesz);
		return 0;
J
James Hogan 已提交
637
	case MIPS_HWR_CC:		/* Read count register */
638 639
		regs->regs[rt] = read_c0_count();
		return 0;
J
James Hogan 已提交
640
	case MIPS_HWR_CCRES:		/* Count register resolution */
641
		switch (current_cpu_type()) {
642 643 644 645 646 647 648 649
		case CPU_20KC:
		case CPU_25KF:
			regs->regs[rt] = 1;
			break;
		default:
			regs->regs[rt] = 2;
		}
		return 0;
J
James Hogan 已提交
650
	case MIPS_HWR_ULR:		/* Read UserLocal register */
651 652 653 654 655 656 657 658 659
		regs->regs[rt] = ti->tp_value;
		return 0;
	default:
		return -1;
	}
}

static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
{
R
Ralf Baechle 已提交
660 661 662
	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
		int rd = (opcode & RD) >> 11;
		int rt = (opcode & RT) >> 16;
663 664 665 666 667 668 669 670 671

		simulate_rdhwr(regs, rd, rt);
		return 0;
	}

	/* Not ours.  */
	return -1;
}

672
static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
673 674 675 676 677 678
{
	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
		int rd = (opcode & MM_RS) >> 16;
		int rt = (opcode & MM_RT) >> 21;
		simulate_rdhwr(regs, rd, rt);
		return 0;
R
Ralf Baechle 已提交
679 680
	}

D
Daniel Jacobowitz 已提交
681
	/* Not ours.  */
682 683
	return -1;
}
684

685 686
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
687 688
	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
689
				1, regs, 0);
690
		return 0;
691
	}
692 693

	return -1;			/* Must be something else ... */
R
Ralf Baechle 已提交
694 695
}

L
Linus Torvalds 已提交
696 697
asmlinkage void do_ov(struct pt_regs *regs)
{
698
	enum ctx_state prev_state;
699 700 701 702 703
	siginfo_t info = {
		.si_signo = SIGFPE,
		.si_code = FPE_INTOVF,
		.si_addr = (void __user *)regs->cp0_epc,
	};
L
Linus Torvalds 已提交
704

705
	prev_state = exception_enter();
706 707
	die_if_kernel("Integer overflow", regs);

L
Linus Torvalds 已提交
708
	force_sig_info(SIGFPE, &info, current);
709
	exception_exit(prev_state);
L
Linus Torvalds 已提交
710 711
}

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
/*
 * Send SIGFPE according to FCSR Cause bits, which must have already
 * been masked against Enable bits.  This is impotant as Inexact can
 * happen together with Overflow or Underflow, and `ptrace' can set
 * any bits.
 */
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
		     struct task_struct *tsk)
{
	struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };

	if (fcr31 & FPU_CSR_INV_X)
		si.si_code = FPE_FLTINV;
	else if (fcr31 & FPU_CSR_DIV_X)
		si.si_code = FPE_FLTDIV;
	else if (fcr31 & FPU_CSR_OVF_X)
		si.si_code = FPE_FLTOVF;
	else if (fcr31 & FPU_CSR_UDF_X)
		si.si_code = FPE_FLTUND;
	else if (fcr31 & FPU_CSR_INE_X)
		si.si_code = FPE_FLTRES;
	else
		si.si_code = __SI_FAULT;
	force_sig_info(SIGFPE, &si, tsk);
}

738
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
739
{
740
	struct siginfo si = { 0 };
741
	struct vm_area_struct *vma;
742 743 744 745

	switch (sig) {
	case 0:
		return 0;
746

747
	case SIGFPE:
748
		force_fcr31_sig(fcr31, fault_addr, current);
749
		return 1;
750 751 752 753 754 755 756 757 758 759 760 761

	case SIGBUS:
		si.si_addr = fault_addr;
		si.si_signo = sig;
		si.si_code = BUS_ADRERR;
		force_sig_info(sig, &si, current);
		return 1;

	case SIGSEGV:
		si.si_addr = fault_addr;
		si.si_signo = sig;
		down_read(&current->mm->mmap_sem);
762 763
		vma = find_vma(current->mm, (unsigned long)fault_addr);
		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
764 765 766 767 768 769 770 771
			si.si_code = SEGV_ACCERR;
		else
			si.si_code = SEGV_MAPERR;
		up_read(&current->mm->mmap_sem);
		force_sig_info(sig, &si, current);
		return 1;

	default:
772 773 774 775 776
		force_sig(sig, current);
		return 1;
	}
}

P
Paul Burton 已提交
777 778 779 780
static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
		       unsigned long old_epc, unsigned long old_ra)
{
	union mips_instruction inst = { .word = opcode };
781 782
	void __user *fault_addr;
	unsigned long fcr31;
P
Paul Burton 已提交
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
	int sig;

	/* If it's obviously not an FP instruction, skip it */
	switch (inst.i_format.opcode) {
	case cop1_op:
	case cop1x_op:
	case lwc1_op:
	case ldc1_op:
	case swc1_op:
	case sdc1_op:
		break;

	default:
		return -1;
	}

	/*
	 * do_ri skipped over the instruction via compute_return_epc, undo
	 * that for the FPU emulator.
	 */
	regs->cp0_epc = old_epc;
	regs->regs[31] = old_ra;

	/* Save the FP context to struct thread_struct */
	lose_fpu(1);

	/* Run the emulator */
	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
				       &fault_addr);

813
	/*
814 815
	 * We can't allow the emulated instruction to leave any
	 * enabled Cause bits set in $fcr31.
816
	 */
817 818
	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
	current->thread.fpu.fcr31 &= ~fcr31;
P
Paul Burton 已提交
819 820 821 822

	/* Restore the hardware register state */
	own_fpu(1);

823 824 825
	/* Send a signal if required.  */
	process_fpemu_return(sig, fault_addr, fcr31);

P
Paul Burton 已提交
826 827 828
	return 0;
}

L
Linus Torvalds 已提交
829 830 831 832 833
/*
 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 */
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
834
	enum ctx_state prev_state;
835 836
	void __user *fault_addr;
	int sig;
837

838
	prev_state = exception_enter();
839
	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
840
		       SIGFPE) == NOTIFY_STOP)
841
		goto out;
842 843

	/* Clear FCSR.Cause before enabling interrupts */
844
	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
845 846
	local_irq_enable();

847 848
	die_if_kernel("FP exception in kernel code", regs);

L
Linus Torvalds 已提交
849 850
	if (fcr31 & FPU_CSR_UNI_X) {
		/*
851
		 * Unimplemented operation exception.  If we've got the full
L
Linus Torvalds 已提交
852 853 854 855 856 857 858 859
		 * software emulator on-board, let's use it...
		 *
		 * Force FPU to dump state into task/thread context.  We're
		 * moving a lot of data here for what is probably a single
		 * instruction, but the alternative is to pre-decode the FP
		 * register operands before invoking the emulator, which seems
		 * a bit extreme for what should be an infrequent event.
		 */
860
		/* Ensure 'resume' not overwrite saved fp context again. */
861
		lose_fpu(1);
L
Linus Torvalds 已提交
862 863

		/* Run the emulator */
864 865
		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
					       &fault_addr);
L
Linus Torvalds 已提交
866 867

		/*
868 869
		 * We can't allow the emulated instruction to leave any
		 * enabled Cause bits set in $fcr31.
L
Linus Torvalds 已提交
870
		 */
871 872
		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
		current->thread.fpu.fcr31 &= ~fcr31;
L
Linus Torvalds 已提交
873 874

		/* Restore the hardware register state */
R
Ralf Baechle 已提交
875
		own_fpu(1);	/* Using the FPU again.	 */
876 877 878
	} else {
		sig = SIGFPE;
		fault_addr = (void __user *) regs->cp0_epc;
879
	}
L
Linus Torvalds 已提交
880

881 882
	/* Send a signal if required.  */
	process_fpemu_return(sig, fault_addr, fcr31);
883 884 885

out:
	exception_exit(prev_state);
L
Linus Torvalds 已提交
886 887
}

888
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
889
	const char *str)
L
Linus Torvalds 已提交
890
{
891
	siginfo_t info = { 0 };
892
	char b[40];
L
Linus Torvalds 已提交
893

894
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
895 896
	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
			 SIGTRAP) == NOTIFY_STOP)
897 898 899
		return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */

900
	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
901
		       SIGTRAP) == NOTIFY_STOP)
902 903
		return;

L
Linus Torvalds 已提交
904
	/*
905 906 907
	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
	 * insns, even for trap and break codes that indicate arithmetic
	 * failures.  Weird ...
L
Linus Torvalds 已提交
908 909
	 * But should we continue the brokenness???  --macro
	 */
910 911 912 913 914 915
	switch (code) {
	case BRK_OVERFLOW:
	case BRK_DIVZERO:
		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
		die_if_kernel(b, regs);
		if (code == BRK_DIVZERO)
L
Linus Torvalds 已提交
916 917 918 919
			info.si_code = FPE_INTDIV;
		else
			info.si_code = FPE_INTOVF;
		info.si_signo = SIGFPE;
R
Ralf Baechle 已提交
920
		info.si_addr = (void __user *) regs->cp0_epc;
L
Linus Torvalds 已提交
921 922
		force_sig_info(SIGFPE, &info, current);
		break;
923
	case BRK_BUG:
924 925
		die_if_kernel("Kernel bug detected", regs);
		force_sig(SIGTRAP, current);
926
		break;
927 928
	case BRK_MEMU:
		/*
929 930 931
		 * This breakpoint code is used by the FPU emulator to retake
		 * control of the CPU after executing the instruction from the
		 * delay slot of an emulated branch.
932 933 934 935 936 937 938 939 940 941
		 *
		 * Terminate if exception was recognized as a delay slot return
		 * otherwise handle as normal.
		 */
		if (do_dsemulret(regs))
			return;

		die_if_kernel("Math emu break/trap", regs);
		force_sig(SIGTRAP, current);
		break;
L
Linus Torvalds 已提交
942
	default:
943 944
		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
		die_if_kernel(b, regs);
945 946 947 948 949 950 951
		if (si_code) {
			info.si_signo = SIGTRAP;
			info.si_code = si_code;
			force_sig_info(SIGTRAP, &info, current);
		} else {
			force_sig(SIGTRAP, current);
		}
L
Linus Torvalds 已提交
952
	}
953 954 955 956
}

asmlinkage void do_bp(struct pt_regs *regs)
{
957
	unsigned long epc = msk_isa16_mode(exception_epc(regs));
958
	unsigned int opcode, bcode;
959
	enum ctx_state prev_state;
960 961 962 963 964
	mm_segment_t seg;

	seg = get_fs();
	if (!user_mode(regs))
		set_fs(KERNEL_DS);
965

966
	prev_state = exception_enter();
967
	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
968
	if (get_isa16_mode(regs->cp0_epc)) {
969 970 971 972 973 974
		u16 instr[2];

		if (__get_user(instr[0], (u16 __user *)epc))
			goto out_sigsegv;

		if (!cpu_has_mmips) {
975
			/* MIPS16e mode */
976
			bcode = (instr[0] >> 5) & 0x3f;
977 978 979 980 981 982
		} else if (mm_insn_16bit(instr[0])) {
			/* 16-bit microMIPS BREAK */
			bcode = instr[0] & 0xf;
		} else {
			/* 32-bit microMIPS BREAK */
			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
983
				goto out_sigsegv;
984 985
			opcode = (instr[0] << 16) | instr[1];
			bcode = (opcode >> 6) & ((1 << 20) - 1);
986 987
		}
	} else {
988
		if (__get_user(opcode, (unsigned int __user *)epc))
989
			goto out_sigsegv;
990
		bcode = (opcode >> 6) & ((1 << 20) - 1);
991
	}
992 993 994 995 996 997 998 999

	/*
	 * There is the ancient bug in the MIPS assemblers that the break
	 * code starts left to bit 16 instead to bit 6 in the opcode.
	 * Gas is bug-compatible, but not always, grrr...
	 * We handle both cases with a simple heuristics.  --macro
	 */
	if (bcode >= (1 << 10))
1000
		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1001

D
David Daney 已提交
1002 1003 1004 1005 1006
	/*
	 * notify the kprobe handlers, if instruction is likely to
	 * pertain to them.
	 */
	switch (bcode) {
R
Ralf Baechle 已提交
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	case BRK_UPROBE:
		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
			goto out;
		else
			break;
	case BRK_UPROBE_XOL:
		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
			goto out;
		else
			break;
D
David Daney 已提交
1019
	case BRK_KPROBE_BP:
1020
		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1021
			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1022
			goto out;
D
David Daney 已提交
1023 1024 1025
		else
			break;
	case BRK_KPROBE_SSTEPBP:
1026
		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1027
			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1028
			goto out;
D
David Daney 已提交
1029 1030 1031 1032 1033 1034
		else
			break;
	default:
		break;
	}

1035
	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1036 1037

out:
1038
	set_fs(seg);
1039
	exception_exit(prev_state);
1040
	return;
1041 1042 1043

out_sigsegv:
	force_sig(SIGSEGV, current);
1044
	goto out;
L
Linus Torvalds 已提交
1045 1046 1047 1048
}

asmlinkage void do_tr(struct pt_regs *regs)
{
1049
	u32 opcode, tcode = 0;
1050
	enum ctx_state prev_state;
1051
	u16 instr[2];
1052
	mm_segment_t seg;
1053
	unsigned long epc = msk_isa16_mode(exception_epc(regs));
L
Linus Torvalds 已提交
1054

1055 1056 1057 1058
	seg = get_fs();
	if (!user_mode(regs))
		set_fs(get_ds());

1059
	prev_state = exception_enter();
1060
	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1061 1062 1063
	if (get_isa16_mode(regs->cp0_epc)) {
		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1064
			goto out_sigsegv;
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
		opcode = (instr[0] << 16) | instr[1];
		/* Immediate versions don't provide a code.  */
		if (!(opcode & OPCODE))
			tcode = (opcode >> 12) & ((1 << 4) - 1);
	} else {
		if (__get_user(opcode, (u32 __user *)epc))
			goto out_sigsegv;
		/* Immediate versions don't provide a code.  */
		if (!(opcode & OPCODE))
			tcode = (opcode >> 6) & ((1 << 10) - 1);
1075
	}
L
Linus Torvalds 已提交
1076

1077
	do_trap_or_bp(regs, tcode, 0, "Trap");
1078 1079

out:
1080
	set_fs(seg);
1081
	exception_exit(prev_state);
1082
	return;
1083 1084 1085

out_sigsegv:
	force_sig(SIGSEGV, current);
1086
	goto out;
L
Linus Torvalds 已提交
1087 1088 1089 1090
}

asmlinkage void do_ri(struct pt_regs *regs)
{
1091 1092
	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
	unsigned long old_epc = regs->cp0_epc;
1093
	unsigned long old31 = regs->regs[31];
1094
	enum ctx_state prev_state;
1095 1096
	unsigned int opcode = 0;
	int status = -1;
L
Linus Torvalds 已提交
1097

1098 1099 1100 1101 1102
	/*
	 * Avoid any kernel code. Just emulate the R2 instruction
	 * as quickly as possible.
	 */
	if (mipsr2_emulation && cpu_has_mips_r6 &&
1103 1104
	    likely(user_mode(regs)) &&
	    likely(get_user(opcode, epc) >= 0)) {
1105 1106 1107
		unsigned long fcr31 = 0;

		status = mipsr2_decoder(regs, opcode, &fcr31);
1108 1109 1110 1111 1112 1113 1114 1115
		switch (status) {
		case 0:
		case SIGEMT:
			return;
		case SIGILL:
			goto no_r2_instr;
		default:
			process_fpemu_return(status,
1116 1117
					     &current->thread.cp0_baduaddr,
					     fcr31);
1118
			return;
1119 1120 1121 1122 1123
		}
	}

no_r2_instr:

1124
	prev_state = exception_enter();
1125
	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1126

1127
	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1128
		       SIGILL) == NOTIFY_STOP)
1129
		goto out;
1130

1131
	die_if_kernel("Reserved instruction in kernel code", regs);
L
Linus Torvalds 已提交
1132

1133
	if (unlikely(compute_return_epc(regs) < 0))
1134
		goto out;
R
Ralf Baechle 已提交
1135

1136
	if (!get_isa16_mode(regs->cp0_epc)) {
1137 1138
		if (unlikely(get_user(opcode, epc) < 0))
			status = SIGSEGV;
1139

1140 1141 1142 1143 1144 1145 1146 1147
		if (!cpu_has_llsc && status < 0)
			status = simulate_llsc(regs, opcode);

		if (status < 0)
			status = simulate_rdhwr_normal(regs, opcode);

		if (status < 0)
			status = simulate_sync(regs, opcode);
P
Paul Burton 已提交
1148 1149 1150

		if (status < 0)
			status = simulate_fp(regs, opcode, old_epc, old31);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
	} else if (cpu_has_mmips) {
		unsigned short mmop[2] = { 0 };

		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
			status = SIGSEGV;
		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
			status = SIGSEGV;
		opcode = mmop[0];
		opcode = (opcode << 16) | mmop[1];

		if (status < 0)
			status = simulate_rdhwr_mm(regs, opcode);
1163
	}
1164 1165 1166 1167 1168 1169

	if (status < 0)
		status = SIGILL;

	if (unlikely(status > 0)) {
		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1170
		regs->regs[31] = old31;
1171 1172
		force_sig(status, current);
	}
1173 1174 1175

out:
	exception_exit(prev_state);
L
Linus Torvalds 已提交
1176 1177
}

1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
/*
 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
 * emulated more than some threshold number of instructions, force migration to
 * a "CPU" that has FP support.
 */
static void mt_ase_fp_affinity(void)
{
#ifdef CONFIG_MIPS_MT_FPAFF
	if (mt_fpemul_threshold > 0 &&
	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
		/*
		 * If there's no FPU present, or if the application has already
		 * restricted the allowed set to exclude any CPUs with FPUs,
		 * we'll skip the procedure.
		 */
1193
		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1194 1195
			cpumask_t tmask;

1196 1197
			current->thread.user_cpus_allowed
				= current->cpus_allowed;
1198 1199
			cpumask_and(&tmask, &current->cpus_allowed,
				    &mt_fpu_cpumask);
J
Julia Lawall 已提交
1200
			set_cpus_allowed_ptr(current, &tmask);
1201
			set_thread_flag(TIF_FPUBOUND);
1202 1203 1204 1205 1206
		}
	}
#endif /* CONFIG_MIPS_MT_FPAFF */
}

R
Ralf Baechle 已提交
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
/*
 * No lock; only written during early bootup by CPU 0.
 */
static RAW_NOTIFIER_HEAD(cu2_chain);

int __ref register_cu2_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_register(&cu2_chain, nb);
}

int cu2_notifier_call_chain(unsigned long val, void *v)
{
	return raw_notifier_call_chain(&cu2_chain, val, v);
}

static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
R
Ralf Baechle 已提交
1223
	void *data)
R
Ralf Baechle 已提交
1224 1225 1226
{
	struct pt_regs *regs = data;

1227
	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
R
Ralf Baechle 已提交
1228
			      "instruction", regs);
1229
	force_sig(SIGILL, current);
R
Ralf Baechle 已提交
1230 1231 1232 1233

	return NOTIFY_OK;
}

1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
static int wait_on_fp_mode_switch(atomic_t *p)
{
	/*
	 * The FP mode for this task is currently being switched. That may
	 * involve modifications to the format of this tasks FP context which
	 * make it unsafe to proceed with execution for the moment. Instead,
	 * schedule some other task.
	 */
	schedule();
	return 0;
}

1246 1247
static int enable_restore_fp_context(int msa)
{
1248
	int err, was_fpu_owner, prior_msa;
1249

1250 1251 1252 1253 1254 1255 1256
	/*
	 * If an FP mode switch is currently underway, wait for it to
	 * complete before proceeding.
	 */
	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
			 wait_on_fp_mode_switch, TASK_KILLABLE);

1257 1258
	if (!used_math()) {
		/* First time FP context user. */
1259
		preempt_disable();
1260
		err = init_fpu();
1261
		if (msa && !err) {
1262
			enable_msa();
1263
			init_msa_upper();
1264 1265
			set_thread_flag(TIF_USEDMSA);
			set_thread_flag(TIF_MSA_CTX_LIVE);
1266
		}
1267
		preempt_enable();
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
		if (!err)
			set_used_math();
		return err;
	}

	/*
	 * This task has formerly used the FP context.
	 *
	 * If this thread has no live MSA vector context then we can simply
	 * restore the scalar FP context. If it has live MSA vector context
	 * (that is, it has or may have used MSA since last performing a
	 * function call) then we'll need to restore the vector context. This
	 * applies even if we're currently only executing a scalar FP
	 * instruction. This is because if we were to later execute an MSA
	 * instruction then we'd either have to:
	 *
	 *  - Restore the vector context & clobber any registers modified by
	 *    scalar FP instructions between now & then.
	 *
	 * or
	 *
	 *  - Not restore the vector context & lose the most significant bits
	 *    of all vector registers.
	 *
	 * Neither of those options is acceptable. We cannot restore the least
	 * significant bits of the registers now & only restore the most
	 * significant bits later because the most significant bits of any
	 * vector registers whose aliased FP register is modified now will have
	 * been zeroed. We'd have no way to know that when restoring the vector
	 * context & thus may load an outdated value for the most significant
	 * bits of a vector register.
	 */
	if (!msa && !thread_msa_context_live())
		return own_fpu(1);

	/*
	 * This task is using or has previously used MSA. Thus we require
	 * that Status.FR == 1.
	 */
1307
	preempt_disable();
1308
	was_fpu_owner = is_fpu_owner();
1309
	err = own_fpu_inatomic(0);
1310
	if (err)
1311
		goto out;
1312 1313 1314 1315 1316 1317 1318 1319

	enable_msa();
	write_msa_csr(current->thread.fpu.msacsr);
	set_thread_flag(TIF_USEDMSA);

	/*
	 * If this is the first time that the task is using MSA and it has
	 * previously used scalar FP in this time slice then we already nave
1320 1321 1322
	 * FP context which we shouldn't clobber. We do however need to clear
	 * the upper 64b of each vector register so that this task has no
	 * opportunity to see data left behind by another.
1323
	 */
1324 1325
	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
	if (!prior_msa && was_fpu_owner) {
1326
		init_msa_upper();
1327 1328

		goto out;
1329
	}
1330

1331 1332 1333 1334 1335 1336
	if (!prior_msa) {
		/*
		 * Restore the least significant 64b of each vector register
		 * from the existing scalar FP context.
		 */
		_restore_fp(current);
1337

1338 1339 1340 1341 1342
		/*
		 * The task has not formerly used MSA, so clear the upper 64b
		 * of each vector register such that it cannot see data left
		 * behind by another task.
		 */
1343
		init_msa_upper();
1344 1345 1346
	} else {
		/* We need to restore the vector context. */
		restore_msa(current);
1347

1348 1349
		/* Restore the scalar FP control & status register */
		if (!was_fpu_owner)
1350 1351
			write_32bit_cp1_register(CP1_STATUS,
						 current->thread.fpu.fcr31);
1352
	}
1353 1354 1355 1356

out:
	preempt_enable();

1357 1358 1359
	return 0;
}

L
Linus Torvalds 已提交
1360 1361
asmlinkage void do_cpu(struct pt_regs *regs)
{
1362
	enum ctx_state prev_state;
1363
	unsigned int __user *epc;
1364
	unsigned long old_epc, old31;
1365
	void __user *fault_addr;
1366
	unsigned int opcode;
1367
	unsigned long fcr31;
L
Linus Torvalds 已提交
1368
	unsigned int cpid;
1369
	int status, err;
1370
	int sig;
L
Linus Torvalds 已提交
1371

1372
	prev_state = exception_enter();
L
Linus Torvalds 已提交
1373 1374
	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;

1375 1376 1377
	if (cpid != 2)
		die_if_kernel("do_cpu invoked from kernel context!", regs);

L
Linus Torvalds 已提交
1378 1379
	switch (cpid) {
	case 0:
1380 1381
		epc = (unsigned int __user *)exception_epc(regs);
		old_epc = regs->cp0_epc;
1382
		old31 = regs->regs[31];
1383 1384
		opcode = 0;
		status = -1;
L
Linus Torvalds 已提交
1385

1386
		if (unlikely(compute_return_epc(regs) < 0))
1387
			break;
R
Ralf Baechle 已提交
1388

1389
		if (!get_isa16_mode(regs->cp0_epc)) {
1390 1391 1392 1393 1394 1395
			if (unlikely(get_user(opcode, epc) < 0))
				status = SIGSEGV;

			if (!cpu_has_llsc && status < 0)
				status = simulate_llsc(regs, opcode);
		}
1396 1397 1398 1399 1400 1401

		if (status < 0)
			status = SIGILL;

		if (unlikely(status > 0)) {
			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1402
			regs->regs[31] = old31;
1403 1404 1405
			force_sig(status, current);
		}

1406
		break;
L
Linus Torvalds 已提交
1407

1408 1409
	case 3:
		/*
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
		 * The COP3 opcode space and consequently the CP0.Status.CU3
		 * bit and the CP0.Cause.CE=3 encoding have been removed as
		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
		 * up the space has been reused for COP1X instructions, that
		 * are enabled by the CP0.Status.CU1 bit and consequently
		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
		 * exceptions.  Some FPU-less processors that implement one
		 * of these ISAs however use this code erroneously for COP1X
		 * instructions.  Therefore we redirect this trap to the FP
		 * emulator too.
1420
		 */
1421
		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1422
			force_sig(SIGILL, current);
1423
			break;
1424
		}
1425 1426
		/* Fall through.  */

L
Linus Torvalds 已提交
1427
	case 1:
1428
		err = enable_restore_fp_context(0);
L
Linus Torvalds 已提交
1429

1430 1431
		if (raw_cpu_has_fpu && !err)
			break;
L
Linus Torvalds 已提交
1432

1433 1434 1435 1436 1437
		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
					       &fault_addr);

		/*
		 * We can't allow the emulated instruction to leave
1438
		 * any enabled Cause bits set in $fcr31.
1439
		 */
1440 1441
		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
		current->thread.fpu.fcr31 &= ~fcr31;
1442 1443 1444 1445

		/* Send a signal if required.  */
		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
			mt_ase_fp_affinity();
L
Linus Torvalds 已提交
1446

1447
		break;
L
Linus Torvalds 已提交
1448 1449

	case 2:
R
Ralf Baechle 已提交
1450
		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1451
		break;
L
Linus Torvalds 已提交
1452 1453
	}

1454
	exception_exit(prev_state);
L
Linus Torvalds 已提交
1455 1456
}

1457
asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1458 1459 1460 1461
{
	enum ctx_state prev_state;

	prev_state = exception_enter();
1462
	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1463
	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1464
		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1465 1466 1467 1468 1469 1470
		goto out;

	/* Clear MSACSR.Cause before enabling interrupts */
	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
	local_irq_enable();

1471 1472
	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
	force_sig(SIGFPE, current);
1473
out:
1474 1475 1476
	exception_exit(prev_state);
}

1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
asmlinkage void do_msa(struct pt_regs *regs)
{
	enum ctx_state prev_state;
	int err;

	prev_state = exception_enter();

	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
		force_sig(SIGILL, current);
		goto out;
	}

	die_if_kernel("do_msa invoked from kernel context!", regs);

	err = enable_restore_fp_context(1);
	if (err)
		force_sig(SIGILL, current);
out:
	exception_exit(prev_state);
}

L
Linus Torvalds 已提交
1498 1499
asmlinkage void do_mdmx(struct pt_regs *regs)
{
1500 1501 1502
	enum ctx_state prev_state;

	prev_state = exception_enter();
L
Linus Torvalds 已提交
1503
	force_sig(SIGILL, current);
1504
	exception_exit(prev_state);
L
Linus Torvalds 已提交
1505 1506
}

1507 1508 1509
/*
 * Called with interrupts disabled.
 */
L
Linus Torvalds 已提交
1510 1511
asmlinkage void do_watch(struct pt_regs *regs)
{
1512
	siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1513
	enum ctx_state prev_state;
1514

1515
	prev_state = exception_enter();
L
Linus Torvalds 已提交
1516
	/*
1517 1518
	 * Clear WP (bit 22) bit of cause register so we don't loop
	 * forever.
L
Linus Torvalds 已提交
1519
	 */
1520
	clear_c0_cause(CAUSEF_WP);
1521 1522 1523 1524 1525 1526 1527 1528

	/*
	 * If the current thread has the watch registers loaded, save
	 * their values and send SIGTRAP.  Otherwise another thread
	 * left the registers set, clear them and continue.
	 */
	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
		mips_read_watch_registers();
1529
		local_irq_enable();
1530
		force_sig_info(SIGTRAP, &info, current);
1531
	} else {
1532
		mips_clear_watch_registers();
1533 1534
		local_irq_enable();
	}
1535
	exception_exit(prev_state);
L
Linus Torvalds 已提交
1536 1537 1538 1539
}

asmlinkage void do_mcheck(struct pt_regs *regs)
{
1540
	int multi_match = regs->cp0_status & ST0_TS;
1541
	enum ctx_state prev_state;
1542
	mm_segment_t old_fs = get_fs();
1543

1544
	prev_state = exception_enter();
L
Linus Torvalds 已提交
1545
	show_regs(regs);
1546 1547

	if (multi_match) {
1548 1549
		dump_tlb_regs();
		pr_info("\n");
1550 1551 1552
		dump_tlb_all();
	}

1553 1554 1555
	if (!user_mode(regs))
		set_fs(KERNEL_DS);

1556
	show_code((unsigned int __user *) regs->cp0_epc);
1557

1558 1559
	set_fs(old_fs);

L
Linus Torvalds 已提交
1560 1561 1562 1563 1564 1565
	/*
	 * Some chips may have other causes of machine check (e.g. SB1
	 * graduation timer)
	 */
	panic("Caught Machine Check exception - %scaused by multiple "
	      "matching entries in the TLB.",
1566
	      (multi_match) ? "" : "not ");
L
Linus Torvalds 已提交
1567 1568
}

R
Ralf Baechle 已提交
1569 1570
asmlinkage void do_mt(struct pt_regs *regs)
{
1571 1572 1573 1574 1575 1576
	int subcode;

	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
			>> VPECONTROL_EXCPT_SHIFT;
	switch (subcode) {
	case 0:
1577
		printk(KERN_DEBUG "Thread Underflow\n");
1578 1579
		break;
	case 1:
1580
		printk(KERN_DEBUG "Thread Overflow\n");
1581 1582
		break;
	case 2:
1583
		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1584 1585
		break;
	case 3:
1586
		printk(KERN_DEBUG "Gating Storage Exception\n");
1587 1588
		break;
	case 4:
1589
		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1590 1591
		break;
	case 5:
M
Masanari Iida 已提交
1592
		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1593 1594
		break;
	default:
1595
		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1596 1597 1598
			subcode);
		break;
	}
R
Ralf Baechle 已提交
1599 1600 1601 1602 1603 1604
	die_if_kernel("MIPS MT Thread exception in kernel", regs);

	force_sig(SIGILL, current);
}


1605 1606 1607
asmlinkage void do_dsp(struct pt_regs *regs)
{
	if (cpu_has_dsp)
1608
		panic("Unexpected DSP exception");
1609 1610 1611 1612

	force_sig(SIGILL, current);
}

L
Linus Torvalds 已提交
1613 1614 1615
asmlinkage void do_reserved(struct pt_regs *regs)
{
	/*
R
Ralf Baechle 已提交
1616
	 * Game over - no way to handle this if it ever occurs.	 Most probably
L
Linus Torvalds 已提交
1617 1618 1619 1620 1621 1622 1623 1624
	 * caused by a new unknown cpu type or after another deadly
	 * hard/software error.
	 */
	show_regs(regs);
	panic("Caught reserved exception %ld - should not happen.",
	      (regs->cp0_cause & 0x7f) >> 2);
}

1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
static int __initdata l1parity = 1;
static int __init nol1parity(char *s)
{
	l1parity = 0;
	return 1;
}
__setup("nol1par", nol1parity);
static int __initdata l2parity = 1;
static int __init nol2parity(char *s)
{
	l2parity = 0;
	return 1;
}
__setup("nol2par", nol2parity);

L
Linus Torvalds 已提交
1640 1641 1642 1643 1644 1645
/*
 * Some MIPS CPUs can enable/disable for cache parity detection, but do
 * it different ways.
 */
static inline void parity_protection_init(void)
{
1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
#define ERRCTL_PE	0x80000000
#define ERRCTL_L2P	0x00800000

	if (mips_cm_revision() >= CM_REV_CM3) {
		ulong gcr_ectl, cp0_ectl;

		/*
		 * With CM3 systems we need to ensure that the L1 & L2
		 * parity enables are set to the same value, since this
		 * is presumed by the hardware engineers.
		 *
		 * If the user disabled either of L1 or L2 ECC checking,
		 * disable both.
		 */
		l1parity &= l2parity;
		l2parity &= l1parity;

		/* Probe L1 ECC support */
		cp0_ectl = read_c0_ecc();
		write_c0_ecc(cp0_ectl | ERRCTL_PE);
		back_to_back_c0_hazard();
		cp0_ectl = read_c0_ecc();

		/* Probe L2 ECC support */
		gcr_ectl = read_gcr_err_control();

		if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK) ||
		    !(cp0_ectl & ERRCTL_PE)) {
			/*
			 * One of L1 or L2 ECC checking isn't supported,
			 * so we cannot enable either.
			 */
			l1parity = l2parity = 0;
		}

		/* Configure L1 ECC checking */
		if (l1parity)
			cp0_ectl |= ERRCTL_PE;
		else
			cp0_ectl &= ~ERRCTL_PE;
		write_c0_ecc(cp0_ectl);
		back_to_back_c0_hazard();
		WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);

		/* Configure L2 ECC checking */
		if (l2parity)
			gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
		else
			gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
		write_gcr_err_control(gcr_ectl);
		gcr_ectl = read_gcr_err_control();
		gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
		WARN_ON(!!gcr_ectl != l2parity);

		pr_info("Cache parity protection %sabled\n",
			l1parity ? "en" : "dis");
		return;
	}

1705
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
1706
	case CPU_24K:
1707
	case CPU_34K:
1708 1709
	case CPU_74K:
	case CPU_1004K:
1710
	case CPU_1074K:
1711
	case CPU_INTERAPTIV:
1712
	case CPU_PROAPTIV:
J
James Hogan 已提交
1713
	case CPU_P5600:
1714
	case CPU_QEMU_GENERIC:
1715
	case CPU_P6600:
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
		{
			unsigned long errctl;
			unsigned int l1parity_present, l2parity_present;

			errctl = read_c0_ecc();
			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);

			/* probe L1 parity support */
			write_c0_ecc(errctl | ERRCTL_PE);
			back_to_back_c0_hazard();
			l1parity_present = (read_c0_ecc() & ERRCTL_PE);

			/* probe L2 parity support */
			write_c0_ecc(errctl|ERRCTL_L2P);
			back_to_back_c0_hazard();
			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);

			if (l1parity_present && l2parity_present) {
				if (l1parity)
					errctl |= ERRCTL_PE;
				if (l1parity ^ l2parity)
					errctl |= ERRCTL_L2P;
			} else if (l1parity_present) {
				if (l1parity)
					errctl |= ERRCTL_PE;
			} else if (l2parity_present) {
				if (l2parity)
					errctl |= ERRCTL_L2P;
			} else {
				/* No parity available */
			}

			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);

			write_c0_ecc(errctl);
			back_to_back_c0_hazard();
			errctl = read_c0_ecc();
			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);

			if (l1parity_present)
				printk(KERN_INFO "Cache parity protection %sabled\n",
				       (errctl & ERRCTL_PE) ? "en" : "dis");

			if (l2parity_present) {
				if (l1parity_present && l1parity)
					errctl ^= ERRCTL_L2P;
				printk(KERN_INFO "L2 cache parity protection %sabled\n",
				       (errctl & ERRCTL_L2P) ? "en" : "dis");
			}
		}
		break;

L
Linus Torvalds 已提交
1768
	case CPU_5KC:
L
Leonid Yegoshin 已提交
1769
	case CPU_5KE:
1770
	case CPU_LOONGSON1:
1771 1772 1773 1774 1775
		write_c0_ecc(0x80000000);
		back_to_back_c0_hazard();
		/* Set the PE bit (bit 31) in the c0_errctl register. */
		printk(KERN_INFO "Cache parity protection %sabled\n",
		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
L
Linus Torvalds 已提交
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
		break;
	case CPU_20KC:
	case CPU_25KF:
		/* Clear the DE bit (bit 16) in the c0_status register. */
		printk(KERN_INFO "Enable cache parity protection for "
		       "MIPS 20KC/25KF CPUs.\n");
		clear_c0_status(ST0_DE);
		break;
	default:
		break;
	}
}

asmlinkage void cache_parity_error(void)
{
	const int field = 2 * sizeof(unsigned long);
	unsigned int reg_val;

	/* For the moment, report the problem and hang. */
	printk("Cache error exception:\n");
	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
	reg_val = read_c0_cacheerr();
	printk("c0_cacheerr == %08x\n", reg_val);

	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
	       reg_val & (1<<30) ? "secondary" : "primary",
	       reg_val & (1<<31) ? "data" : "insn");
1803
	if ((cpu_has_mips_r2_r6) &&
1804
	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
			reg_val & (1<<29) ? "ED " : "",
			reg_val & (1<<28) ? "ET " : "",
			reg_val & (1<<27) ? "ES " : "",
			reg_val & (1<<26) ? "EE " : "",
			reg_val & (1<<25) ? "EB " : "",
			reg_val & (1<<24) ? "EI " : "",
			reg_val & (1<<23) ? "E1 " : "",
			reg_val & (1<<22) ? "E0 " : "");
	} else {
		pr_err("Error bits: %s%s%s%s%s%s%s\n",
			reg_val & (1<<29) ? "ED " : "",
			reg_val & (1<<28) ? "ET " : "",
			reg_val & (1<<26) ? "EE " : "",
			reg_val & (1<<25) ? "EB " : "",
			reg_val & (1<<24) ? "EI " : "",
			reg_val & (1<<23) ? "E1 " : "",
			reg_val & (1<<22) ? "E0 " : "");
	}
L
Linus Torvalds 已提交
1824 1825
	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));

1826
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
L
Linus Torvalds 已提交
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
	if (reg_val & (1<<22))
		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());

	if (reg_val & (1<<23))
		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
#endif

	panic("Can't handle the cache error!");
}

L
Leonid Yegoshin 已提交
1837 1838 1839 1840 1841 1842
asmlinkage void do_ftlb(void)
{
	const int field = 2 * sizeof(unsigned long);
	unsigned int reg_val;

	/* For the moment, report the problem and hang. */
1843
	if ((cpu_has_mips_r2_r6) &&
1844 1845
	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
L
Leonid Yegoshin 已提交
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
		       read_c0_ecc());
		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
		reg_val = read_c0_cacheerr();
		pr_err("c0_cacheerr == %08x\n", reg_val);

		if ((reg_val & 0xc0000000) == 0xc0000000) {
			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
		} else {
			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
			       reg_val & (1<<30) ? "secondary" : "primary",
			       reg_val & (1<<31) ? "data" : "insn");
		}
	} else {
		pr_err("FTLB error exception\n");
	}
	/* Just print the cacheerr bits for now */
	cache_parity_error();
}

L
Linus Torvalds 已提交
1866 1867 1868 1869 1870 1871 1872
/*
 * SDBBP EJTAG debug exception handler.
 * We skip the instruction and return to the next instruction.
 */
void ejtag_exception_handler(struct pt_regs *regs)
{
	const int field = 2 * sizeof(unsigned long);
1873
	unsigned long depc, old_epc, old_ra;
L
Linus Torvalds 已提交
1874 1875
	unsigned int debug;

1876
	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
L
Linus Torvalds 已提交
1877 1878
	depc = read_c0_depc();
	debug = read_c0_debug();
1879
	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
L
Linus Torvalds 已提交
1880 1881 1882 1883 1884 1885 1886 1887
	if (debug & 0x80000000) {
		/*
		 * In branch delay slot.
		 * We cheat a little bit here and use EPC to calculate the
		 * debug return address (DEPC). EPC is restored after the
		 * calculation.
		 */
		old_epc = regs->cp0_epc;
1888
		old_ra = regs->regs[31];
L
Linus Torvalds 已提交
1889
		regs->cp0_epc = depc;
1890
		compute_return_epc(regs);
L
Linus Torvalds 已提交
1891 1892
		depc = regs->cp0_epc;
		regs->cp0_epc = old_epc;
1893
		regs->regs[31] = old_ra;
L
Linus Torvalds 已提交
1894 1895 1896 1897 1898
	} else
		depc += 4;
	write_c0_depc(depc);

#if 0
1899
	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
L
Linus Torvalds 已提交
1900 1901 1902 1903 1904 1905
	write_c0_debug(debug | 0x100);
#endif
}

/*
 * NMI exception handler.
K
Kevin Cernekee 已提交
1906
 * No lock; only written during early bootup by CPU 0.
L
Linus Torvalds 已提交
1907
 */
K
Kevin Cernekee 已提交
1908 1909 1910 1911 1912 1913 1914
static RAW_NOTIFIER_HEAD(nmi_chain);

int register_nmi_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_register(&nmi_chain, nb);
}

1915
void __noreturn nmi_exception_handler(struct pt_regs *regs)
L
Linus Torvalds 已提交
1916
{
1917 1918
	char str[100];

1919
	nmi_enter();
K
Kevin Cernekee 已提交
1920
	raw_notifier_call_chain(&nmi_chain, 0, regs);
1921
	bust_spinlocks(1);
1922 1923 1924 1925
	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
		 smp_processor_id(), regs->cp0_epc);
	regs->cp0_epc = read_c0_errorepc();
	die(str, regs);
1926
	nmi_exit();
L
Linus Torvalds 已提交
1927 1928
}

1929 1930 1931
#define VECTORSPACING 0x100	/* for EI/VI mode */

unsigned long ebase;
1932
EXPORT_SYMBOL_GPL(ebase);
L
Linus Torvalds 已提交
1933
unsigned long exception_handlers[32];
1934
unsigned long vi_handlers[64];
L
Linus Torvalds 已提交
1935

1936
void __init *set_except_vector(int n, void *addr)
L
Linus Torvalds 已提交
1937 1938
{
	unsigned long handler = (unsigned long) addr;
R
Ralf Baechle 已提交
1939
	unsigned long old_handler;
L
Linus Torvalds 已提交
1940

1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * Only the TLB handlers are cache aligned with an even
	 * address. All other handlers are on an odd address and
	 * require no modification. Otherwise, MIPS32 mode will
	 * be entered when handling any TLB exceptions. That
	 * would be bad...since we must stay in microMIPS mode.
	 */
	if (!(handler & 0x1))
		handler |= 1;
#endif
R
Ralf Baechle 已提交
1952
	old_handler = xchg(&exception_handlers[n], handler);
L
Linus Torvalds 已提交
1953 1954

	if (n == 0 && cpu_has_divec) {
1955 1956 1957
#ifdef CONFIG_CPU_MICROMIPS
		unsigned long jump_mask = ~((1 << 27) - 1);
#else
1958
		unsigned long jump_mask = ~((1 << 28) - 1);
1959
#endif
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
		u32 *buf = (u32 *)(ebase + 0x200);
		unsigned int k0 = 26;
		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
			uasm_i_j(&buf, handler & ~jump_mask);
			uasm_i_nop(&buf);
		} else {
			UASM_i_LA(&buf, k0, handler);
			uasm_i_jr(&buf, k0);
			uasm_i_nop(&buf);
		}
		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1971 1972 1973 1974
	}
	return (void *)old_handler;
}

1975
static void do_default_vi(void)
1976 1977 1978 1979 1980
{
	show_regs(get_irq_regs());
	panic("Caught unexpected vectored interrupt.");
}

1981
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1982 1983 1984
{
	unsigned long handler;
	unsigned long old_handler = vi_handlers[n];
R
Ralf Baechle 已提交
1985
	int srssets = current_cpu_data.srsets;
1986
	u16 *h;
1987 1988
	unsigned char *b;

1989
	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1990 1991 1992 1993

	if (addr == NULL) {
		handler = (unsigned long) do_default_vi;
		srs = 0;
1994
	} else
1995
		handler = (unsigned long) addr;
1996
	vi_handlers[n] = handler;
1997 1998 1999

	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);

R
Ralf Baechle 已提交
2000
	if (srs >= srssets)
2001 2002 2003 2004
		panic("Shadow register set %d not supported", srs);

	if (cpu_has_veic) {
		if (board_bind_eic_interrupt)
2005
			board_bind_eic_interrupt(n, srs);
2006
	} else if (cpu_has_vint) {
2007
		/* SRSMap is only defined if shadow sets are implemented */
R
Ralf Baechle 已提交
2008
		if (srssets > 1)
2009
			change_c0_srsmap(0xf << n*4, srs << n*4);
2010 2011 2012 2013 2014
	}

	if (srs == 0) {
		/*
		 * If no shadow set is selected then use the default handler
2015
		 * that does normal register saving and standard interrupt exit
2016 2017 2018
		 */
		extern char except_vec_vi, except_vec_vi_lui;
		extern char except_vec_vi_ori, except_vec_vi_end;
2019
		extern char rollback_except_vec_vi;
2020
		char *vec_start = using_rollback_handler() ?
2021
			&rollback_except_vec_vi : &except_vec_vi;
2022 2023 2024 2025
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
#else
2026 2027
		const int lui_offset = &except_vec_vi_lui - vec_start;
		const int ori_offset = &except_vec_vi_ori - vec_start;
2028 2029
#endif
		const int handler_len = &except_vec_vi_end - vec_start;
2030 2031 2032 2033 2034 2035

		if (handler_len > VECTORSPACING) {
			/*
			 * Sigh... panicing won't help as the console
			 * is probably not configured :(
			 */
2036
			panic("VECTORSPACING too small");
2037 2038
		}

2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
		set_handler(((unsigned long)b - ebase), vec_start,
#ifdef CONFIG_CPU_MICROMIPS
				(handler_len - 1));
#else
				handler_len);
#endif
		h = (u16 *)(b + lui_offset);
		*h = (handler >> 16) & 0xffff;
		h = (u16 *)(b + ori_offset);
		*h = (handler & 0xffff);
2049 2050
		local_flush_icache_range((unsigned long)b,
					 (unsigned long)(b+handler_len));
2051 2052 2053
	}
	else {
		/*
2054 2055 2056
		 * In other cases jump directly to the interrupt handler. It
		 * is the handler's responsibility to save registers if required
		 * (eg hi/lo) and return from the exception using "eret".
2057
		 */
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
		u32 insn;

		h = (u16 *)b;
		/* j handler */
#ifdef CONFIG_CPU_MICROMIPS
		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
#else
		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
#endif
		h[0] = (insn >> 16) & 0xffff;
		h[1] = insn & 0xffff;
		h[2] = 0;
		h[3] = 0;
2071 2072
		local_flush_icache_range((unsigned long)b,
					 (unsigned long)(b+8));
L
Linus Torvalds 已提交
2073
	}
2074

L
Linus Torvalds 已提交
2075 2076 2077
	return (void *)old_handler;
}

2078
void *set_vi_handler(int n, vi_handler_t addr)
2079
{
R
Ralf Baechle 已提交
2080
	return set_vi_srs_handler(n, addr, 0);
2081
}
2082

L
Linus Torvalds 已提交
2083 2084
extern void tlb_init(void);

2085 2086 2087 2088
/*
 * Timer interrupt
 */
int cp0_compare_irq;
2089
EXPORT_SYMBOL_GPL(cp0_compare_irq);
2090
int cp0_compare_irq_shift;
2091 2092 2093 2094 2095 2096 2097

/*
 * Performance counter IRQ or -1 if shared with timer
 */
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);

2098 2099 2100 2101 2102 2103
/*
 * Fast debug channel IRQ or -1 if not present
 */
int cp0_fdc_irq;
EXPORT_SYMBOL_GPL(cp0_fdc_irq);

2104
static int noulri;
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114

static int __init ulri_disable(char *s)
{
	pr_info("Disabling ulri\n");
	noulri = 1;

	return 1;
}
__setup("noulri", ulri_disable);

2115 2116
/* configure STATUS register */
static void configure_status(void)
L
Linus Torvalds 已提交
2117 2118 2119 2120 2121 2122 2123
{
	/*
	 * Disable coprocessors and select 32-bit or 64-bit addressing
	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
	 * flag that some firmware may have left set and the TS bit (for
	 * IP27).  Set XX for ISA IV code to work.
	 */
2124
	unsigned int status_set = ST0_CU0;
2125
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
2126 2127
	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
2128
	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
L
Linus Torvalds 已提交
2129
		status_set |= ST0_XX;
2130 2131 2132
	if (cpu_has_dsp)
		status_set |= ST0_MX;

R
Ralf Baechle 已提交
2133
	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
L
Linus Torvalds 已提交
2134
			 status_set);
2135 2136
}

2137 2138 2139
unsigned int hwrena;
EXPORT_SYMBOL_GPL(hwrena);

2140 2141 2142
/* configure HWRENA register */
static void configure_hwrena(void)
{
2143
	hwrena = cpu_hwrena_impl_bits;
L
Linus Torvalds 已提交
2144

2145
	if (cpu_has_mips_r2_r6)
J
James Hogan 已提交
2146 2147 2148 2149
		hwrena |= MIPS_HWRENA_CPUNUM |
			  MIPS_HWRENA_SYNCISTEP |
			  MIPS_HWRENA_CC |
			  MIPS_HWRENA_CCRES;
2150

2151
	if (!noulri && cpu_has_userlocal)
J
James Hogan 已提交
2152
		hwrena |= MIPS_HWRENA_ULR;
2153

2154 2155
	if (hwrena)
		write_c0_hwrena(hwrena);
2156
}
2157

2158 2159
static void configure_exception_vector(void)
{
2160
	if (cpu_has_veic || cpu_has_vint) {
2161
		unsigned long sr = set_c0_status(ST0_BEV);
2162 2163 2164 2165 2166 2167 2168 2169
		/* If available, use WG to set top bits of EBASE */
		if (cpu_has_ebase_wg) {
#ifdef CONFIG_64BIT
			write_c0_ebase_64(ebase | MIPS_EBASE_WG);
#else
			write_c0_ebase(ebase | MIPS_EBASE_WG);
#endif
		}
2170
		write_c0_ebase(ebase);
2171
		write_c0_status(sr);
2172
		/* Setting vector spacing enables EI/VI mode  */
2173
		change_c0_intctl(0x3e0, VECTORSPACING);
2174
	}
R
Ralf Baechle 已提交
2175 2176 2177 2178 2179 2180 2181 2182
	if (cpu_has_divec) {
		if (cpu_has_mipsmt) {
			unsigned int vpflags = dvpe();
			set_c0_cause(CAUSEF_IV);
			evpe(vpflags);
		} else
			set_c0_cause(CAUSEF_IV);
	}
2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
}

void per_cpu_trap_init(bool is_boot_cpu)
{
	unsigned int cpu = smp_processor_id();

	configure_status();
	configure_hwrena();

	configure_exception_vector();
2193 2194 2195 2196 2197 2198

	/*
	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
	 *
	 *  o read IntCtl.IPTI to determine the timer interrupt
	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2199
	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2200
	 */
2201
	if (cpu_has_mips_r2_r6) {
2202 2203 2204 2205
		/*
		 * We shouldn't trust a secondary core has a sane EBASE register
		 * so use the one calculated by the boot CPU.
		 */
2206 2207 2208 2209 2210 2211 2212 2213 2214
		if (!is_boot_cpu) {
			/* If available, use WG to set top bits of EBASE */
			if (cpu_has_ebase_wg) {
#ifdef CONFIG_64BIT
				write_c0_ebase_64(ebase | MIPS_EBASE_WG);
#else
				write_c0_ebase(ebase | MIPS_EBASE_WG);
#endif
			}
2215
			write_c0_ebase(ebase);
2216
		}
2217

2218 2219 2220
		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2221 2222 2223 2224
		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
		if (!cp0_fdc_irq)
			cp0_fdc_irq = -1;

2225 2226
	} else {
		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2227
		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2228
		cp0_perfcount_irq = -1;
2229
		cp0_fdc_irq = -1;
2230 2231
	}

2232
	if (!cpu_data[cpu].asid_cache)
2233
		cpu_data[cpu].asid_cache = asid_first_version(cpu);
L
Linus Torvalds 已提交
2234

V
Vegard Nossum 已提交
2235
	mmgrab(&init_mm);
L
Linus Torvalds 已提交
2236 2237 2238 2239
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(&init_mm, current);

2240 2241 2242 2243
	/* Boot CPU's cache setup in setup_arch(). */
	if (!is_boot_cpu)
		cpu_cache_init();
	tlb_init();
2244
	TLBMISS_HANDLER_SETUP();
L
Linus Torvalds 已提交
2245 2246
}

2247
/* Install CPU exception handler */
2248
void set_handler(unsigned long offset, void *addr, unsigned long size)
2249
{
2250 2251 2252
#ifdef CONFIG_CPU_MICROMIPS
	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
#else
2253
	memcpy((void *)(ebase + offset), addr, size);
2254
#endif
2255
	local_flush_icache_range(ebase + offset, ebase + offset + size);
2256 2257
}

2258
static char panic_null_cerr[] =
2259 2260
	"Trying to set NULL cache error exception handler";

2261 2262 2263 2264 2265
/*
 * Install uncached CPU exception handler.
 * This is suitable only for the cache error exception which is the only
 * exception handler that is being run uncached.
 */
2266
void set_uncached_handler(unsigned long offset, void *addr,
2267
	unsigned long size)
2268
{
2269
	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2270

2271 2272 2273
	if (!addr)
		panic(panic_null_cerr);

2274 2275 2276
	memcpy((void *)(uncached_ebase + offset), addr, size);
}

2277 2278 2279 2280 2281 2282 2283 2284 2285
static int __initdata rdhwr_noopt;
static int __init set_rdhwr_noopt(char *str)
{
	rdhwr_noopt = 1;
	return 1;
}

__setup("rdhwr_noopt", set_rdhwr_noopt);

L
Linus Torvalds 已提交
2286 2287
void __init trap_init(void)
{
2288
	extern char except_vec3_generic;
L
Linus Torvalds 已提交
2289
	extern char except_vec4;
2290
	extern char except_vec3_r4000;
L
Linus Torvalds 已提交
2291
	unsigned long i;
2292 2293

	check_wait();
L
Linus Torvalds 已提交
2294

2295 2296
	if (cpu_has_veic || cpu_has_vint) {
		unsigned long size = 0x200 + VECTORSPACING*64;
J
James Hogan 已提交
2297 2298
		phys_addr_t ebase_pa;

2299 2300
		ebase = (unsigned long)
			__alloc_bootmem(size, 1 << fls(size), 0);
J
James Hogan 已提交
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315

		/*
		 * Try to ensure ebase resides in KSeg0 if possible.
		 *
		 * It shouldn't generally be in XKPhys on MIPS64 to avoid
		 * hitting a poorly defined exception base for Cache Errors.
		 * The allocation is likely to be in the low 512MB of physical,
		 * in which case we should be able to convert to KSeg0.
		 *
		 * EVA is special though as it allows segments to be rearranged
		 * and to become uncached during cache error handling.
		 */
		ebase_pa = __pa(ebase);
		if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
			ebase = CKSEG0ADDR(ebase_pa);
2316
	} else {
2317 2318
		ebase = CAC_BASE;

2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
		if (cpu_has_mips_r2_r6) {
			if (cpu_has_ebase_wg) {
#ifdef CONFIG_64BIT
				ebase = (read_c0_ebase_64() & ~0xfff);
#else
				ebase = (read_c0_ebase() & ~0xfff);
#endif
			} else {
				ebase += (read_c0_ebase() & 0x3ffff000);
			}
		}
2330
	}
2331

2332 2333 2334 2335 2336 2337 2338 2339 2340
	if (cpu_has_mmips) {
		unsigned int config3 = read_c0_config3();

		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
		else
			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
	}

K
Kevin Cernekee 已提交
2341 2342
	if (board_ebase_setup)
		board_ebase_setup();
2343
	per_cpu_trap_init(true);
L
Linus Torvalds 已提交
2344 2345 2346

	/*
	 * Copy the generic exception handlers to their final destination.
2347
	 * This will be overridden later as suitable for a particular
L
Linus Torvalds 已提交
2348 2349
	 * configuration.
	 */
2350
	set_handler(0x180, &except_vec3_generic, 0x80);
L
Linus Torvalds 已提交
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361

	/*
	 * Setup default vectors
	 */
	for (i = 0; i <= 31; i++)
		set_except_vector(i, handle_reserved);

	/*
	 * Copy the EJTAG debug exception vector handler code to it's final
	 * destination.
	 */
2362
	if (cpu_has_ejtag && board_ejtag_handler_setup)
2363
		board_ejtag_handler_setup();
L
Linus Torvalds 已提交
2364 2365 2366 2367 2368

	/*
	 * Only some CPUs have the watch exceptions.
	 */
	if (cpu_has_watch)
2369
		set_except_vector(EXCCODE_WATCH, handle_watch);
L
Linus Torvalds 已提交
2370 2371

	/*
2372
	 * Initialise interrupt handlers
L
Linus Torvalds 已提交
2373
	 */
2374 2375 2376
	if (cpu_has_veic || cpu_has_vint) {
		int nvec = cpu_has_veic ? 64 : 8;
		for (i = 0; i < nvec; i++)
R
Ralf Baechle 已提交
2377
			set_vi_handler(i, NULL);
2378 2379 2380
	}
	else if (cpu_has_divec)
		set_handler(0x200, &except_vec4, 0x8);
L
Linus Torvalds 已提交
2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395

	/*
	 * Some CPUs can enable/disable for cache parity detection, but does
	 * it different ways.
	 */
	parity_protection_init();

	/*
	 * The Data Bus Errors / Instruction Bus Errors are signaled
	 * by external hardware.  Therefore these two exceptions
	 * may have board specific handlers.
	 */
	if (board_be_init)
		board_be_init();

2396 2397 2398 2399 2400
	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
					rollback_handle_int : handle_int);
	set_except_vector(EXCCODE_MOD, handle_tlbm);
	set_except_vector(EXCCODE_TLBL, handle_tlbl);
	set_except_vector(EXCCODE_TLBS, handle_tlbs);
L
Linus Torvalds 已提交
2401

2402 2403
	set_except_vector(EXCCODE_ADEL, handle_adel);
	set_except_vector(EXCCODE_ADES, handle_ades);
L
Linus Torvalds 已提交
2404

2405 2406
	set_except_vector(EXCCODE_IBE, handle_ibe);
	set_except_vector(EXCCODE_DBE, handle_dbe);
L
Linus Torvalds 已提交
2407

2408 2409 2410
	set_except_vector(EXCCODE_SYS, handle_sys);
	set_except_vector(EXCCODE_BP, handle_bp);
	set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
2411 2412
			  (cpu_has_vtag_icache ?
			   handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2413 2414 2415 2416
	set_except_vector(EXCCODE_CPU, handle_cpu);
	set_except_vector(EXCCODE_OV, handle_ov);
	set_except_vector(EXCCODE_TR, handle_tr);
	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
L
Linus Torvalds 已提交
2417

2418 2419
	if (current_cpu_type() == CPU_R6000 ||
	    current_cpu_type() == CPU_R6000A) {
L
Linus Torvalds 已提交
2420 2421 2422 2423
		/*
		 * The R6000 is the only R-series CPU that features a machine
		 * check exception (similar to the R4000 cache error) and
		 * unaligned ldc1/sdc1 exception.  The handlers have not been
R
Ralf Baechle 已提交
2424
		 * written yet.	 Well, anyway there is no R6000 machine on the
L
Linus Torvalds 已提交
2425 2426 2427 2428 2429 2430 2431
		 * current list of targets for Linux/MIPS.
		 * (Duh, crap, there is someone with a triple R6k machine)
		 */
		//set_except_vector(14, handle_mc);
		//set_except_vector(15, handle_ndc);
	}

2432 2433 2434 2435

	if (board_nmi_handler_setup)
		board_nmi_handler_setup();

2436
	if (cpu_has_fpu && !cpu_has_nofpuex)
2437
		set_except_vector(EXCCODE_FPE, handle_fpe);
2438

2439
	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2440 2441

	if (cpu_has_rixiex) {
2442 2443
		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2444 2445
	}

2446 2447
	set_except_vector(EXCCODE_MSADIS, handle_msa);
	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2448 2449

	if (cpu_has_mcheck)
2450
		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2451

R
Ralf Baechle 已提交
2452
	if (cpu_has_mipsmt)
2453
		set_except_vector(EXCCODE_THREAD, handle_mt);
R
Ralf Baechle 已提交
2454

2455
	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2456

2457 2458 2459
	if (board_cache_error_setup)
		board_cache_error_setup();

2460 2461
	if (cpu_has_vce)
		/* Special exception: R4[04]00 uses also the divec space. */
2462
		set_handler(0x180, &except_vec3_r4000, 0x100);
2463
	else if (cpu_has_4kex)
2464
		set_handler(0x180, &except_vec3_generic, 0x80);
2465
	else
2466
		set_handler(0x080, &except_vec3_generic, 0x80);
2467

2468
	local_flush_icache_range(ebase, ebase + 0x400);
2469 2470

	sort_extable(__start___dbe_table, __stop___dbe_table);
R
Ralf Baechle 已提交
2471

2472
	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
L
Linus Torvalds 已提交
2473
}
2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502

static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
			    void *v)
{
	switch (cmd) {
	case CPU_PM_ENTER_FAILED:
	case CPU_PM_EXIT:
		configure_status();
		configure_hwrena();
		configure_exception_vector();

		/* Restore register with CPU number for TLB handlers */
		TLBMISS_HANDLER_RESTORE();

		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block trap_pm_notifier_block = {
	.notifier_call = trap_pm_notifier,
};

static int __init trap_pm_init(void)
{
	return cpu_pm_register_notifier(&trap_pm_notifier_block);
}
arch_initcall(trap_pm_init);