提交 2a0b24f5 编写于 作者: S Steven J. Hill 提交者: Ralf Baechle

MIPS: microMIPS: Add support for exception handling.

All exceptions must be taken in microMIPS mode, never in classic
MIPS mode or the kernel falls apart. A few NOP instructions are
used to maintain the correct alignment of microMIPS versions of
the exception vectors.
Signed-off-by: NSteven J. Hill <Steven.Hill@imgtec.com>
上级 102cedc3
...@@ -596,6 +596,7 @@ ...@@ -596,6 +596,7 @@
#define MIPS_CONF3_RXI (_ULCAST_(1) << 12) #define MIPS_CONF3_RXI (_ULCAST_(1) << 12)
#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) #define MIPS_CONF3_ULRI (_ULCAST_(1) << 13)
#define MIPS_CONF3_ISA (_ULCAST_(3) << 14) #define MIPS_CONF3_ISA (_ULCAST_(3) << 14)
#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16)
#define MIPS_CONF3_VZ (_ULCAST_(1) << 23) #define MIPS_CONF3_VZ (_ULCAST_(1) << 23)
#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) #define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0)
......
...@@ -139,7 +139,7 @@ ...@@ -139,7 +139,7 @@
1: move ra, k0 1: move ra, k0
li k0, 3 li k0, 3
mtc0 k0, $22 mtc0 k0, $22
#endif /* CONFIG_CPU_LOONGSON2F */ #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
lui k1, %hi(kernelsp) lui k1, %hi(kernelsp)
#else #else
...@@ -189,6 +189,7 @@ ...@@ -189,6 +189,7 @@
LONG_S $0, PT_R0(sp) LONG_S $0, PT_R0(sp)
mfc0 v1, CP0_STATUS mfc0 v1, CP0_STATUS
LONG_S $2, PT_R2(sp) LONG_S $2, PT_R2(sp)
LONG_S v1, PT_STATUS(sp)
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
/* /*
* Ideally, these instructions would be shuffled in * Ideally, these instructions would be shuffled in
...@@ -200,21 +201,20 @@ ...@@ -200,21 +201,20 @@
LONG_S k0, PT_TCSTATUS(sp) LONG_S k0, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp) LONG_S $4, PT_R4(sp)
LONG_S $5, PT_R5(sp)
LONG_S v1, PT_STATUS(sp)
mfc0 v1, CP0_CAUSE mfc0 v1, CP0_CAUSE
LONG_S $6, PT_R6(sp) LONG_S $5, PT_R5(sp)
LONG_S $7, PT_R7(sp)
LONG_S v1, PT_CAUSE(sp) LONG_S v1, PT_CAUSE(sp)
LONG_S $6, PT_R6(sp)
MFC0 v1, CP0_EPC MFC0 v1, CP0_EPC
LONG_S $7, PT_R7(sp)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
LONG_S $8, PT_R8(sp) LONG_S $8, PT_R8(sp)
LONG_S $9, PT_R9(sp) LONG_S $9, PT_R9(sp)
#endif #endif
LONG_S v1, PT_EPC(sp)
LONG_S $25, PT_R25(sp) LONG_S $25, PT_R25(sp)
LONG_S $28, PT_R28(sp) LONG_S $28, PT_R28(sp)
LONG_S $31, PT_R31(sp) LONG_S $31, PT_R31(sp)
LONG_S v1, PT_EPC(sp)
ori $28, sp, _THREAD_MASK ori $28, sp, _THREAD_MASK
xori $28, _THREAD_MASK xori $28, _THREAD_MASK
#ifdef CONFIG_CPU_CAVIUM_OCTEON #ifdef CONFIG_CPU_CAVIUM_OCTEON
......
...@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) ...@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_ULRI; c->options |= MIPS_CPU_ULRI;
if (config3 & MIPS_CONF3_ISA) if (config3 & MIPS_CONF3_ISA)
c->options |= MIPS_CPU_MICROMIPS; c->options |= MIPS_CPU_MICROMIPS;
#ifdef CONFIG_CPU_MICROMIPS
write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
#endif
if (config3 & MIPS_CONF3_VZ) if (config3 & MIPS_CONF3_VZ)
c->ases |= MIPS_ASE_VZ; c->ases |= MIPS_ASE_VZ;
......
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
* *
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2002, 2007 Maciej W. Rozycki * Copyright (C) 2002, 2007 Maciej W. Rozycki
* Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
*/ */
#include <linux/init.h> #include <linux/init.h>
...@@ -21,8 +21,10 @@ ...@@ -21,8 +21,10 @@
#include <asm/war.h> #include <asm/war.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#ifdef CONFIG_MIPS_MT_SMTC
#define PANIC_PIC(msg) \ #define PANIC_PIC(msg) \
.set push; \ .set push; \
.set nomicromips; \
.set reorder; \ .set reorder; \
PTR_LA a0,8f; \ PTR_LA a0,8f; \
.set noat; \ .set noat; \
...@@ -31,17 +33,10 @@ ...@@ -31,17 +33,10 @@
9: b 9b; \ 9: b 9b; \
.set pop; \ .set pop; \
TEXT(msg) TEXT(msg)
#endif
__INIT __INIT
NESTED(except_vec0_generic, 0, sp)
PANIC_PIC("Exception vector 0 called")
END(except_vec0_generic)
NESTED(except_vec1_generic, 0, sp)
PANIC_PIC("Exception vector 1 called")
END(except_vec1_generic)
/* /*
* General exception vector for all other CPUs. * General exception vector for all other CPUs.
* *
...@@ -138,12 +133,19 @@ LEAF(r4k_wait) ...@@ -138,12 +133,19 @@ LEAF(r4k_wait)
nop nop
nop nop
nop nop
#ifdef CONFIG_CPU_MICROMIPS
nop
nop
nop
nop
#endif
.set mips3 .set mips3
wait wait
/* end of rollback region (the region size must be power of two) */ /* end of rollback region (the region size must be power of two) */
.set pop
1: 1:
jr ra jr ra
nop
.set pop
END(r4k_wait) END(r4k_wait)
.macro BUILD_ROLLBACK_PROLOGUE handler .macro BUILD_ROLLBACK_PROLOGUE handler
...@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp) ...@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp)
LONG_L s0, TI_REGS($28) LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28) LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq PTR_LA ra, ret_from_irq
j plat_irq_dispatch PTR_LA v0, plat_irq_dispatch
jr v0
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
END(handle_int) END(handle_int)
__INIT __INIT
...@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp) ...@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp)
/* /*
* EJTAG debug exception handler. * EJTAG debug exception handler.
* The EJTAG debug exception entry point is 0xbfc00480, which * The EJTAG debug exception entry point is 0xbfc00480, which
* normally is in the boot PROM, so the boot PROM must do a * normally is in the boot PROM, so the boot PROM must do an
* unconditional jump to this vector. * unconditional jump to this vector.
*/ */
NESTED(except_vec_ejtag_debug, 0, sp) NESTED(except_vec_ejtag_debug, 0, sp)
j ejtag_debug_handler j ejtag_debug_handler
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
END(except_vec_ejtag_debug) END(except_vec_ejtag_debug)
__FINIT __FINIT
...@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp) ...@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp)
FEXPORT(except_vec_vi_mori) FEXPORT(except_vec_vi_mori)
ori a0, $0, 0 ori a0, $0, 0
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA v1, except_vec_vi_handler
FEXPORT(except_vec_vi_lui) FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */ lui v0, 0 /* Patched */
j except_vec_vi_handler jr v1
FEXPORT(except_vec_vi_ori) FEXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */ ori v0, 0 /* Patched */
.set pop .set pop
...@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer) ...@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer)
*/ */
NESTED(except_vec_nmi, 0, sp) NESTED(except_vec_nmi, 0, sp)
j nmi_handler j nmi_handler
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
END(except_vec_nmi) END(except_vec_nmi)
__FINIT __FINIT
...@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp) ...@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set push .set push
.set noat .set noat
.set noreorder .set noreorder
/* 0x7c03e83b: rdhwr v1,$29 */ /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
MFC0 k1, CP0_EPC MFC0 k1, CP0_EPC
lui k0, 0x7c03 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
lw k1, (k1) and k0, k1, 1
ori k0, 0xe83b beqz k0, 1f
.set reorder xor k1, k0
lhu k0, (k1)
lhu k1, 2(k1)
ins k1, k0, 16, 16
lui k0, 0x007d
b docheck
ori k0, 0x6b3c
1:
lui k0, 0x7c03
lw k1, (k1)
ori k0, 0xe83b
#else
andi k0, k1, 1
bnez k0, handle_ri
lui k0, 0x7c03
lw k1, (k1)
ori k0, 0xe83b
#endif
.set reorder
docheck:
bne k0, k1, handle_ri /* if not ours */ bne k0, k1, handle_ri /* if not ours */
isrdhwr:
/* The insn is rdhwr. No need to check CAUSE.BD here. */ /* The insn is rdhwr. No need to check CAUSE.BD here. */
get_saved_sp /* k1 := current_thread_info */ get_saved_sp /* k1 := current_thread_info */
.set noreorder .set noreorder
......
...@@ -138,9 +138,18 @@ stackargs: ...@@ -138,9 +138,18 @@ stackargs:
5: jr t1 5: jr t1
sw t5, 16(sp) # argument #5 to ksp sw t5, 16(sp) # argument #5 to ksp
#ifdef CONFIG_CPU_MICROMIPS
sw t8, 28(sp) # argument #8 to ksp sw t8, 28(sp) # argument #8 to ksp
nop
sw t7, 24(sp) # argument #7 to ksp sw t7, 24(sp) # argument #7 to ksp
nop
sw t6, 20(sp) # argument #6 to ksp sw t6, 20(sp) # argument #6 to ksp
nop
#else
sw t8, 28(sp) # argument #8 to ksp
sw t7, 24(sp) # argument #7 to ksp
sw t6, 20(sp) # argument #6 to ksp
#endif
6: j stack_done # go back 6: j stack_done # go back
nop nop
.set pop .set pop
......
...@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? ...@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
.text .text
.align 5 .align 5
FEXPORT(__smtc_ipi_vector) FEXPORT(__smtc_ipi_vector)
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
.set noat .set noat
/* Disable thread scheduling to make Status update atomic */ /* Disable thread scheduling to make Status update atomic */
DMT 27 # dmt k1 DMT 27 # dmt k1
......
...@@ -8,8 +8,8 @@ ...@@ -8,8 +8,8 @@
* Copyright (C) 1998 Ulf Carlsson * Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 01 MIPS Technologies, Inc.
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
*/ */
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/compiler.h> #include <linux/compiler.h>
...@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void); ...@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void); extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
void (*board_be_init)(void); void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup); int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void); void (*board_nmi_handler_setup)(void);
...@@ -495,6 +491,12 @@ asmlinkage void do_be(struct pt_regs *regs) ...@@ -495,6 +491,12 @@ asmlinkage void do_be(struct pt_regs *regs)
#define SYNC 0x0000000f #define SYNC 0x0000000f
#define RDHWR 0x0000003b #define RDHWR 0x0000003b
/* microMIPS definitions */
#define MM_POOL32A_FUNC 0xfc00ffff
#define MM_RDHWR 0x00006b3c
#define MM_RS 0x001f0000
#define MM_RT 0x03e00000
/* /*
* The ll_bit is cleared by r*_switch.S * The ll_bit is cleared by r*_switch.S
*/ */
...@@ -609,42 +611,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) ...@@ -609,42 +611,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
* Simulate trapping 'rdhwr' instructions to provide user accessible * Simulate trapping 'rdhwr' instructions to provide user accessible
* registers not implemented in hardware. * registers not implemented in hardware.
*/ */
static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
{ {
struct thread_info *ti = task_thread_info(current); struct thread_info *ti = task_thread_info(current);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
return 0;
case 1: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
return 0;
case 2: /* Read count register */
regs->regs[rt] = read_c0_count();
return 0;
case 3: /* Count register resolution */
switch (current_cpu_data.cputype) {
case CPU_20KC:
case CPU_25KF:
regs->regs[rt] = 1;
break;
default:
regs->regs[rt] = 2;
}
return 0;
case 29:
regs->regs[rt] = ti->tp_value;
return 0;
default:
return -1;
}
}
static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11; int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16; int rt = (opcode & RT) >> 16;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0); simulate_rdhwr(regs, rd, rt);
switch (rd) { return 0;
case 0: /* CPU number */ }
regs->regs[rt] = smp_processor_id();
return 0; /* Not ours. */
case 1: /* SYNCI length */ return -1;
regs->regs[rt] = min(current_cpu_data.dcache.linesz, }
current_cpu_data.icache.linesz);
return 0; static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
case 2: /* Read count register */ {
regs->regs[rt] = read_c0_count(); if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
return 0; int rd = (opcode & MM_RS) >> 16;
case 3: /* Count register resolution */ int rt = (opcode & MM_RT) >> 21;
switch (current_cpu_data.cputype) { simulate_rdhwr(regs, rd, rt);
case CPU_20KC: return 0;
case CPU_25KF:
regs->regs[rt] = 1;
break;
default:
regs->regs[rt] = 2;
}
return 0;
case 29:
regs->regs[rt] = ti->tp_value;
return 0;
default:
return -1;
}
} }
/* Not ours. */ /* Not ours. */
...@@ -826,9 +848,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, ...@@ -826,9 +848,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
asmlinkage void do_bp(struct pt_regs *regs) asmlinkage void do_bp(struct pt_regs *regs)
{ {
unsigned int opcode, bcode; unsigned int opcode, bcode;
unsigned long epc;
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) u16 instr[2];
goto out_sigsegv;
if (get_isa16_mode(regs->cp0_epc)) {
/* Calculate EPC. */
epc = exception_epc(regs);
if (cpu_has_mmips) {
if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
(__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
goto out_sigsegv;
opcode = (instr[0] << 16) | instr[1];
} else {
/* MIPS16e mode */
if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
goto out_sigsegv;
bcode = (instr[0] >> 6) & 0x3f;
do_trap_or_bp(regs, bcode, "Break");
return;
}
} else {
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
}
/* /*
* There is the ancient bug in the MIPS assemblers that the break * There is the ancient bug in the MIPS assemblers that the break
...@@ -869,13 +911,22 @@ asmlinkage void do_bp(struct pt_regs *regs) ...@@ -869,13 +911,22 @@ asmlinkage void do_bp(struct pt_regs *regs)
asmlinkage void do_tr(struct pt_regs *regs) asmlinkage void do_tr(struct pt_regs *regs)
{ {
unsigned int opcode, tcode = 0; unsigned int opcode, tcode = 0;
u16 instr[2];
unsigned long epc = exception_epc(regs);
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
goto out_sigsegv; (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
goto out_sigsegv;
opcode = (instr[0] << 16) | instr[1];
/* Immediate versions don't provide a code. */ /* Immediate versions don't provide a code. */
if (!(opcode & OPCODE)) if (!(opcode & OPCODE)) {
tcode = ((opcode >> 6) & ((1 << 10) - 1)); if (get_isa16_mode(regs->cp0_epc))
/* microMIPS */
tcode = (opcode >> 12) & 0x1f;
else
tcode = ((opcode >> 6) & ((1 << 10) - 1));
}
do_trap_or_bp(regs, tcode, "Trap"); do_trap_or_bp(regs, tcode, "Trap");
return; return;
...@@ -888,6 +939,7 @@ asmlinkage void do_ri(struct pt_regs *regs) ...@@ -888,6 +939,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
{ {
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
unsigned long old_epc = regs->cp0_epc; unsigned long old_epc = regs->cp0_epc;
unsigned long old31 = regs->regs[31];
unsigned int opcode = 0; unsigned int opcode = 0;
int status = -1; int status = -1;
...@@ -900,23 +952,37 @@ asmlinkage void do_ri(struct pt_regs *regs) ...@@ -900,23 +952,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
if (unlikely(compute_return_epc(regs) < 0)) if (unlikely(compute_return_epc(regs) < 0))
return; return;
if (unlikely(get_user(opcode, epc) < 0)) if (get_isa16_mode(regs->cp0_epc)) {
status = SIGSEGV; unsigned short mmop[2] = { 0 };
if (!cpu_has_llsc && status < 0) if (unlikely(get_user(mmop[0], epc) < 0))
status = simulate_llsc(regs, opcode); status = SIGSEGV;
if (unlikely(get_user(mmop[1], epc) < 0))
status = SIGSEGV;
opcode = (mmop[0] << 16) | mmop[1];
if (status < 0) if (status < 0)
status = simulate_rdhwr(regs, opcode); status = simulate_rdhwr_mm(regs, opcode);
} else {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (status < 0) if (!cpu_has_llsc && status < 0)
status = simulate_sync(regs, opcode); status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr_normal(regs, opcode);
if (status < 0)
status = simulate_sync(regs, opcode);
}
if (status < 0) if (status < 0)
status = SIGILL; status = SIGILL;
if (unlikely(status > 0)) { if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */ regs->cp0_epc = old_epc; /* Undo skip-over. */
regs->regs[31] = old31;
force_sig(status, current); force_sig(status, current);
} }
} }
...@@ -986,7 +1052,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, ...@@ -986,7 +1052,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
asmlinkage void do_cpu(struct pt_regs *regs) asmlinkage void do_cpu(struct pt_regs *regs)
{ {
unsigned int __user *epc; unsigned int __user *epc;
unsigned long old_epc; unsigned long old_epc, old31;
unsigned int opcode; unsigned int opcode;
unsigned int cpid; unsigned int cpid;
int status; int status;
...@@ -1000,26 +1066,41 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -1000,26 +1066,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
case 0: case 0:
epc = (unsigned int __user *)exception_epc(regs); epc = (unsigned int __user *)exception_epc(regs);
old_epc = regs->cp0_epc; old_epc = regs->cp0_epc;
old31 = regs->regs[31];
opcode = 0; opcode = 0;
status = -1; status = -1;
if (unlikely(compute_return_epc(regs) < 0)) if (unlikely(compute_return_epc(regs) < 0))
return; return;
if (unlikely(get_user(opcode, epc) < 0)) if (get_isa16_mode(regs->cp0_epc)) {
status = SIGSEGV; unsigned short mmop[2] = { 0 };
if (!cpu_has_llsc && status < 0) if (unlikely(get_user(mmop[0], epc) < 0))
status = simulate_llsc(regs, opcode); status = SIGSEGV;
if (unlikely(get_user(mmop[1], epc) < 0))
status = SIGSEGV;
opcode = (mmop[0] << 16) | mmop[1];
if (status < 0) if (status < 0)
status = simulate_rdhwr(regs, opcode); status = simulate_rdhwr_mm(regs, opcode);
} else {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr_normal(regs, opcode);
}
if (status < 0) if (status < 0)
status = SIGILL; status = SIGILL;
if (unlikely(status > 0)) { if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */ regs->cp0_epc = old_epc; /* Undo skip-over. */
regs->regs[31] = old31;
force_sig(status, current); force_sig(status, current);
} }
...@@ -1333,7 +1414,7 @@ asmlinkage void cache_parity_error(void) ...@@ -1333,7 +1414,7 @@ asmlinkage void cache_parity_error(void)
void ejtag_exception_handler(struct pt_regs *regs) void ejtag_exception_handler(struct pt_regs *regs)
{ {
const int field = 2 * sizeof(unsigned long); const int field = 2 * sizeof(unsigned long);
unsigned long depc, old_epc; unsigned long depc, old_epc, old_ra;
unsigned int debug; unsigned int debug;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
...@@ -1348,10 +1429,12 @@ void ejtag_exception_handler(struct pt_regs *regs) ...@@ -1348,10 +1429,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
* calculation. * calculation.
*/ */
old_epc = regs->cp0_epc; old_epc = regs->cp0_epc;
old_ra = regs->regs[31];
regs->cp0_epc = depc; regs->cp0_epc = depc;
__compute_return_epc(regs); compute_return_epc(regs);
depc = regs->cp0_epc; depc = regs->cp0_epc;
regs->cp0_epc = old_epc; regs->cp0_epc = old_epc;
regs->regs[31] = old_ra;
} else } else
depc += 4; depc += 4;
write_c0_depc(depc); write_c0_depc(depc);
...@@ -1392,9 +1475,24 @@ void __init *set_except_vector(int n, void *addr) ...@@ -1392,9 +1475,24 @@ void __init *set_except_vector(int n, void *addr)
unsigned long handler = (unsigned long) addr; unsigned long handler = (unsigned long) addr;
unsigned long old_handler = exception_handlers[n]; unsigned long old_handler = exception_handlers[n];
#ifdef CONFIG_CPU_MICROMIPS
/*
* Only the TLB handlers are cache aligned with an even
* address. All other handlers are on an odd address and
* require no modification. Otherwise, MIPS32 mode will
* be entered when handling any TLB exceptions. That
* would be bad...since we must stay in microMIPS mode.
*/
if (!(handler & 0x1))
handler |= 1;
#endif
exception_handlers[n] = handler; exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) { if (n == 0 && cpu_has_divec) {
#ifdef CONFIG_CPU_MICROMIPS
unsigned long jump_mask = ~((1 << 27) - 1);
#else
unsigned long jump_mask = ~((1 << 28) - 1); unsigned long jump_mask = ~((1 << 28) - 1);
#endif
u32 *buf = (u32 *)(ebase + 0x200); u32 *buf = (u32 *)(ebase + 0x200);
unsigned int k0 = 26; unsigned int k0 = 26;
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
...@@ -1421,17 +1519,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) ...@@ -1421,17 +1519,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
unsigned long handler; unsigned long handler;
unsigned long old_handler = vi_handlers[n]; unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets; int srssets = current_cpu_data.srsets;
u32 *w; u16 *h;
unsigned char *b; unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint); BUG_ON(!cpu_has_veic && !cpu_has_vint);
BUG_ON((n < 0) && (n > 9));
if (addr == NULL) { if (addr == NULL) {
handler = (unsigned long) do_default_vi; handler = (unsigned long) do_default_vi;
srs = 0; srs = 0;
} else } else
handler = (unsigned long) addr; handler = (unsigned long) addr;
vi_handlers[n] = (unsigned long) addr; vi_handlers[n] = handler;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
...@@ -1450,9 +1549,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) ...@@ -1450,9 +1549,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
if (srs == 0) { if (srs == 0) {
/* /*
* If no shadow set is selected then use the default handler * If no shadow set is selected then use the default handler
* that does normal register saving and a standard interrupt exit * that does normal register saving and standard interrupt exit
*/ */
extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end; extern char except_vec_vi_ori, except_vec_vi_end;
extern char rollback_except_vec_vi; extern char rollback_except_vec_vi;
...@@ -1465,11 +1563,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) ...@@ -1465,11 +1563,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
* Status.IM bit to be masked before going there. * Status.IM bit to be masked before going there.
*/ */
extern char except_vec_vi_mori; extern char except_vec_vi_mori;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int mori_offset = &except_vec_vi_mori - vec_start + 2;
#else
const int mori_offset = &except_vec_vi_mori - vec_start; const int mori_offset = &except_vec_vi_mori - vec_start;
#endif
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
const int handler_len = &except_vec_vi_end - vec_start; #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int lui_offset = &except_vec_vi_lui - vec_start + 2;
const int ori_offset = &except_vec_vi_ori - vec_start + 2;
#else
const int lui_offset = &except_vec_vi_lui - vec_start; const int lui_offset = &except_vec_vi_lui - vec_start;
const int ori_offset = &except_vec_vi_ori - vec_start; const int ori_offset = &except_vec_vi_ori - vec_start;
#endif
const int handler_len = &except_vec_vi_end - vec_start;
if (handler_len > VECTORSPACING) { if (handler_len > VECTORSPACING) {
/* /*
...@@ -1479,30 +1586,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) ...@@ -1479,30 +1586,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
panic("VECTORSPACING too small"); panic("VECTORSPACING too small");
} }
memcpy(b, vec_start, handler_len); set_handler(((unsigned long)b - ebase), vec_start,
#ifdef CONFIG_CPU_MICROMIPS
(handler_len - 1));
#else
handler_len);
#endif
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
w = (u32 *)(b + mori_offset); h = (u16 *)(b + mori_offset);
*w = (*w & 0xffff0000) | (0x100 << n); *h = (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
w = (u32 *)(b + lui_offset); h = (u16 *)(b + lui_offset);
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); *h = (handler >> 16) & 0xffff;
w = (u32 *)(b + ori_offset); h = (u16 *)(b + ori_offset);
*w = (*w & 0xffff0000) | ((u32)handler & 0xffff); *h = (handler & 0xffff);
local_flush_icache_range((unsigned long)b, local_flush_icache_range((unsigned long)b,
(unsigned long)(b+handler_len)); (unsigned long)(b+handler_len));
} }
else { else {
/* /*
* In other cases jump directly to the interrupt handler * In other cases jump directly to the interrupt handler. It
* * is the handler's responsibility to save registers if required
* It is the handlers responsibility to save registers if required * (eg hi/lo) and return from the exception using "eret".
* (eg hi/lo) and return from the exception using "eret"
*/ */
w = (u32 *)b; u32 insn;
*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
*w = 0; h = (u16 *)b;
/* j handler */
#ifdef CONFIG_CPU_MICROMIPS
insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
#else
insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
#endif
h[0] = (insn >> 16) & 0xffff;
h[1] = insn & 0xffff;
h[2] = 0;
h[3] = 0;
local_flush_icache_range((unsigned long)b, local_flush_icache_range((unsigned long)b,
(unsigned long)(b+8)); (unsigned long)(b+8));
} }
...@@ -1663,7 +1784,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) ...@@ -1663,7 +1784,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
/* Install CPU exception handler */ /* Install CPU exception handler */
void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
{ {
#ifdef CONFIG_CPU_MICROMIPS
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
#else
memcpy((void *)(ebase + offset), addr, size); memcpy((void *)(ebase + offset), addr, size);
#endif
local_flush_icache_range(ebase + offset, ebase + offset + size); local_flush_icache_range(ebase + offset, ebase + offset + size);
} }
...@@ -1697,8 +1822,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt); ...@@ -1697,8 +1822,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
void __init trap_init(void) void __init trap_init(void)
{ {
extern char except_vec3_generic, except_vec3_r4000; extern char except_vec3_generic;
extern char except_vec4; extern char except_vec4;
extern char except_vec3_r4000;
unsigned long i; unsigned long i;
int rollback; int rollback;
...@@ -1831,11 +1957,11 @@ void __init trap_init(void) ...@@ -1831,11 +1957,11 @@ void __init trap_init(void)
if (cpu_has_vce) if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */ /* Special exception: R4[04]00 uses also the divec space. */
memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); set_handler(0x180, &except_vec3_r4000, 0x100);
else if (cpu_has_4kex) else if (cpu_has_4kex)
memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); set_handler(0x180, &except_vec3_generic, 0x80);
else else
memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); set_handler(0x080, &except_vec3_generic, 0x80);
local_flush_icache_range(ebase, ebase + 0x400); local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers(); flush_tlb_handlers();
......
...@@ -2103,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void) ...@@ -2103,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_l_nopage_tlbl(&l, p); uasm_l_nopage_tlbl(&l, p);
build_restore_work_registers(&p); build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_0 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
uasm_i_jr(&p, K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
...@@ -2150,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void) ...@@ -2150,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
uasm_l_nopage_tlbs(&l, p); uasm_l_nopage_tlbs(&l, p);
build_restore_work_registers(&p); build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
uasm_i_jr(&p, K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
...@@ -2198,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) ...@@ -2198,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
uasm_l_nopage_tlbm(&l, p); uasm_l_nopage_tlbm(&l, p);
build_restore_work_registers(&p); build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
uasm_i_jr(&p, K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
......
...@@ -65,7 +65,41 @@ static void __init mips_nmi_setup(void) ...@@ -65,7 +65,41 @@ static void __init mips_nmi_setup(void)
base = cpu_has_veic ? base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa80) : (void *)(CAC_BASE + 0xa80) :
(void *)(CAC_BASE + 0x380); (void *)(CAC_BASE + 0x380);
#ifdef CONFIG_CPU_MICROMIPS
/*
* Decrement the exception vector address by one for microMIPS.
*/
memcpy(base, (&except_vec_nmi - 1), 0x80);
/*
* This is a hack. We do not know if the boot loader was built with
* microMIPS instructions or not. If it was not, the NMI exception
* code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
* assembly below forces us into microMIPS mode if we are a pure
* microMIPS kernel. The assembly instructions are:
*
* 3C1A8000 lui k0,0x8000
* 375A0381 ori k0,k0,0x381
* 03400008 jr k0
* 00000000 nop
*
* The mode switch occurs by jumping to the unaligned exception
* vector address at 0x80000381 which would have been 0x80000380
* in MIPS32 mode. The jump to the unaligned address transitions
* us into microMIPS mode.
*/
if (!cpu_has_veic) {
void *base2 = (void *)(CAC_BASE + 0xa80);
*((unsigned int *)base2) = 0x3c1a8000;
*((unsigned int *)base2 + 1) = 0x375a0381;
*((unsigned int *)base2 + 2) = 0x03400008;
*((unsigned int *)base2 + 3) = 0x00000000;
flush_icache_range((unsigned long)base2,
(unsigned long)base2 + 0x10);
}
#else
memcpy(base, &except_vec_nmi, 0x80); memcpy(base, &except_vec_nmi, 0x80);
#endif
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
} }
...@@ -76,7 +110,21 @@ static void __init mips_ejtag_setup(void) ...@@ -76,7 +110,21 @@ static void __init mips_ejtag_setup(void)
base = cpu_has_veic ? base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa00) : (void *)(CAC_BASE + 0xa00) :
(void *)(CAC_BASE + 0x300); (void *)(CAC_BASE + 0x300);
#ifdef CONFIG_CPU_MICROMIPS
/* Deja vu... */
memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
if (!cpu_has_veic) {
void *base2 = (void *)(CAC_BASE + 0xa00);
*((unsigned int *)base2) = 0x3c1a8000;
*((unsigned int *)base2 + 1) = 0x375a0301;
*((unsigned int *)base2 + 2) = 0x03400008;
*((unsigned int *)base2 + 3) = 0x00000000;
flush_icache_range((unsigned long)base2,
(unsigned long)base2 + 0x10);
}
#else
memcpy(base, &except_vec_ejtag_debug, 0x80); memcpy(base, &except_vec_ejtag_debug, 0x80);
#endif
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册