提交 bc5378fc 编写于 作者: M Max Filippov 提交者: Chris Zankel

xtensa: reorganize SR referencing

- reference SRs by names where possible, not by numbers;
- get rid of __stringify around SR names where possible;
- remove unneeded SR names from asm/regs.h;
- add SREG_ prefix to remaining SR names;
Signed-off-by: NMax Filippov <jcmvbkbc@gmail.com>
Signed-off-by: NChris Zankel <chris@zankel.net>
上级 f4349b6e
......@@ -51,17 +51,17 @@ _start:
/* 'reset' window registers */
movi a4, 1
wsr a4, PS
wsr a4, ps
rsync
rsr a5, WINDOWBASE
rsr a5, windowbase
ssl a5
sll a4, a4
wsr a4, WINDOWSTART
wsr a4, windowstart
rsync
movi a4, 0x00040000
wsr a4, PS
wsr a4, ps
rsync
/* copy the loader to its address
......
......@@ -73,7 +73,7 @@ static inline void atomic_add(int i, atomic_t * v)
"l32i %0, %2, 0 \n\t"
"add %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
......@@ -97,7 +97,7 @@ static inline void atomic_sub(int i, atomic_t *v)
"l32i %0, %2, 0 \n\t"
"sub %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
......@@ -118,7 +118,7 @@ static inline int atomic_add_return(int i, atomic_t * v)
"l32i %0, %2, 0 \n\t"
"add %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
......@@ -137,7 +137,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
"l32i %0, %2, 0 \n\t"
"sub %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
......@@ -260,7 +260,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
"xor %1, %4, %3 \n\t"
"and %0, %0, %4 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
......@@ -277,7 +277,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
"l32i %0, %2, 0 \n\t"
"or %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (mask), "a" (v)
......
......@@ -165,7 +165,7 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
static inline u32 xtensa_get_cacheattr(void)
{
u32 r;
asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
asm volatile(" rsr %0, cacheattr" : "=a"(r));
return r;
}
......
......@@ -27,7 +27,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
"bne %0, %2, 1f \n\t"
"s32i %3, %1, 0 \n\t"
"1: \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"wsr a15, ps \n\t"
"rsync \n\t"
: "=&a" (old)
: "a" (p), "a" (old), "r" (new)
......@@ -97,7 +97,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %1, 0 \n\t"
"s32i %2, %1, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"wsr a15, ps \n\t"
"rsync \n\t"
: "=&a" (tmp)
: "a" (m), "a" (val)
......
......@@ -94,11 +94,10 @@
#if XCHAL_HAVE_CP
#define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
__asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \
:: "a" (x)); \
__asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
} while(0);
#endif /* XCHAL_HAVE_CP */
......
......@@ -27,7 +27,7 @@ static inline void __delay(unsigned long loops)
static __inline__ u32 xtensa_get_ccount(void)
{
u32 ccount;
asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount));
asm volatile ("rsr %0, ccount\n" : "=r" (ccount));
return ccount;
}
......
......@@ -16,7 +16,7 @@
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile("rsr %0,"__stringify(PS) : "=a" (flags));
asm volatile("rsr %0, ps" : "=a" (flags));
return flags;
}
......@@ -41,7 +41,7 @@ static inline void arch_local_irq_enable(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
asm volatile("wsr %0, "__stringify(PS)" ; rsync"
asm volatile("wsr %0, ps; rsync"
:: "a" (flags) : "memory");
}
......
......@@ -51,14 +51,14 @@ extern unsigned long asid_cache;
static inline void set_rasid_register (unsigned long val)
{
__asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
__asm__ __volatile__ (" wsr %0, rasid\n\t"
" isync\n" : : "a" (val));
}
static inline unsigned long get_rasid_register (void)
{
unsigned long tmp;
__asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
__asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
return tmp;
}
......
......@@ -27,52 +27,15 @@
/* Special registers. */
#define LBEG 0
#define LEND 1
#define LCOUNT 2
#define SAR 3
#define BR 4
#define SCOMPARE1 12
#define ACCHI 16
#define ACCLO 17
#define MR 32
#define WINDOWBASE 72
#define WINDOWSTART 73
#define PTEVADDR 83
#define RASID 90
#define ITLBCFG 91
#define DTLBCFG 92
#define IBREAKENABLE 96
#define DDR 104
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
#define EPC 176
#define EPC_1 177
#define DEPC 192
#define EPS 192
#define EPS_1 193
#define EXCSAVE 208
#define EXCSAVE_1 209
#define INTERRUPT 226
#define INTENABLE 228
#define PS 230
#define THREADPTR 231
#define EXCCAUSE 232
#define DEBUGCAUSE 233
#define CCOUNT 234
#define PRID 235
#define ICOUNT 236
#define ICOUNTLEVEL 237
#define EXCVADDR 238
#define CCOMPARE 240
#define MISC_SR 244
/* Special names for read-only and write-only interrupt registers. */
#define INTREAD 226
#define INTSET 226
#define INTCLEAR 227
#define SREG_MR 32
#define SREG_IBREAKA 128
#define SREG_DBREAKA 144
#define SREG_DBREAKC 160
#define SREG_EPC 176
#define SREG_EPS 192
#define SREG_EXCSAVE 208
#define SREG_CCOMPARE 240
#define SREG_MISC 244
/* EXCCAUSE register fields */
......
......@@ -63,10 +63,10 @@ extern cycles_t cacheflush_time;
* Register access.
*/
#define WSR_CCOUNT(r) asm volatile ("wsr %0,"__stringify(CCOUNT) :: "a" (r))
#define RSR_CCOUNT(r) asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (r))
#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r))
#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void)
{
......
......@@ -86,26 +86,26 @@ static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
static inline void set_itlbcfg_register (unsigned long val)
{
__asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
__asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
: : "a" (val));
}
static inline void set_dtlbcfg_register (unsigned long val)
{
__asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
__asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
: : "a" (val));
}
static inline void set_ptevaddr_register (unsigned long val)
{
__asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
__asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
: : "a" (val));
}
static inline unsigned long read_ptevaddr_register (void)
{
unsigned long tmp;
__asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
__asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
return tmp;
}
......
......@@ -170,15 +170,15 @@ ENTRY(fast_unaligned)
s32i a7, a2, PT_AREG7
s32i a8, a2, PT_AREG8
rsr a0, DEPC
xsr a3, EXCSAVE_1
rsr a0, depc
xsr a3, excsave1
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
/* Keep value of SAR in a0 */
rsr a0, SAR
rsr a8, EXCVADDR # load unaligned memory address
rsr a0, sar
rsr a8, excvaddr # load unaligned memory address
/* Now, identify one of the following load/store instructions.
*
......@@ -197,7 +197,7 @@ ENTRY(fast_unaligned)
/* Extract the instruction that caused the unaligned access. */
rsr a7, EPC_1 # load exception address
rsr a7, epc1 # load exception address
movi a3, ~3
and a3, a3, a7 # mask lower bits
......@@ -275,16 +275,16 @@ ENTRY(fast_unaligned)
1:
#if XCHAL_HAVE_LOOPS
rsr a5, LEND # check if we reached LEND
rsr a5, lend # check if we reached LEND
bne a7, a5, 1f
rsr a5, LCOUNT # and LCOUNT != 0
rsr a5, lcount # and LCOUNT != 0
beqz a5, 1f
addi a5, a5, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN
wsr a5, LCOUNT
rsr a7, lbeg # set PC to LBEGIN
wsr a5, lcount
#endif
1: wsr a7, EPC_1 # skip load instruction
1: wsr a7, epc1 # skip load instruction
extui a4, a4, INSN_T, 4 # extract target register
movi a5, .Lload_table
addx8 a4, a4, a5
......@@ -355,16 +355,16 @@ ENTRY(fast_unaligned)
1:
#if XCHAL_HAVE_LOOPS
rsr a4, LEND # check if we reached LEND
rsr a4, lend # check if we reached LEND
bne a7, a4, 1f
rsr a4, LCOUNT # and LCOUNT != 0
rsr a4, lcount # and LCOUNT != 0
beqz a4, 1f
addi a4, a4, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN
wsr a4, LCOUNT
rsr a7, lbeg # set PC to LBEGIN
wsr a4, lcount
#endif
1: wsr a7, EPC_1 # skip store instruction
1: wsr a7, epc1 # skip store instruction
movi a4, ~3
and a4, a4, a8 # align memory address
......@@ -406,7 +406,7 @@ ENTRY(fast_unaligned)
.Lexit:
movi a4, 0
rsr a3, EXCSAVE_1
rsr a3, excsave1
s32i a4, a3, EXC_TABLE_FIXUP
/* Restore working register */
......@@ -420,7 +420,7 @@ ENTRY(fast_unaligned)
/* restore SAR and return */
wsr a0, SAR
wsr a0, sar
l32i a0, a2, PT_AREG0
l32i a2, a2, PT_AREG2
rfe
......@@ -438,10 +438,10 @@ ENTRY(fast_unaligned)
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
l32i a4, a2, PT_AREG4
wsr a0, SAR
wsr a0, sar
mov a1, a2
rsr a0, PS
rsr a0, ps
bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception
......
......@@ -43,7 +43,7 @@
/* IO protection is currently unsupported. */
ENTRY(fast_io_protect)
wsr a0, EXCSAVE_1
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
......@@ -220,7 +220,7 @@ ENTRY(coprocessor_restore)
*/
ENTRY(fast_coprocessor_double)
wsr a0, EXCSAVE_1
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
......@@ -229,13 +229,13 @@ ENTRY(fast_coprocessor)
/* Save remaining registers a1-a3 and SAR */
xsr a3, EXCSAVE_1
xsr a3, excsave1
s32i a3, a2, PT_AREG3
rsr a3, SAR
rsr a3, sar
s32i a1, a2, PT_AREG1
s32i a3, a2, PT_SAR
mov a1, a2
rsr a2, DEPC
rsr a2, depc
s32i a2, a1, PT_AREG2
/*
......@@ -248,17 +248,17 @@ ENTRY(fast_coprocessor)
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
rsr a3, EXCCAUSE
rsr a3, exccause
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
/* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
ssl a3 # SAR: 32 - coprocessor_number
movi a2, 1
rsr a0, CPENABLE
rsr a0, cpenable
sll a2, a2
or a0, a0, a2
wsr a0, CPENABLE
wsr a0, cpenable
rsync
/* Retrieve previous owner. (a3 still holds CP number) */
......@@ -291,7 +291,7 @@ ENTRY(fast_coprocessor)
/* Note that only a0 and a1 were preserved. */
2: rsr a3, EXCCAUSE
2: rsr a3, exccause
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
movi a0, coprocessor_owner
addx4 a0, a3, a0
......@@ -321,7 +321,7 @@ ENTRY(fast_coprocessor)
l32i a0, a1, PT_SAR
l32i a3, a1, PT_AREG3
l32i a2, a1, PT_AREG2
wsr a0, SAR
wsr a0, sar
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
......
......@@ -112,8 +112,8 @@ ENTRY(user_exception)
/* Save a2, a3, and depc, restore excsave_1 and set SP. */
xsr a3, EXCSAVE_1
rsr a0, DEPC
xsr a3, excsave1
rsr a0, depc
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
......@@ -125,16 +125,16 @@ _user_exception:
/* Save SAR and turn off single stepping */
movi a2, 0
rsr a3, SAR
xsr a2, ICOUNTLEVEL
rsr a3, sar
xsr a2, icountlevel
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
rsr a2, WINDOWBASE
rsr a3, WINDOWSTART
rsr a2, windowbase
rsr a3, windowstart
ssr a2
s32i a2, a1, PT_WINDOWBASE
s32i a3, a1, PT_WINDOWSTART
......@@ -205,12 +205,12 @@ _user_exception:
/* WINDOWBASE still in SAR! */
rsr a2, SAR # original WINDOWBASE
rsr a2, sar # original WINDOWBASE
movi a3, 1
ssl a2
sll a3, a3
wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit
wsr a2, WINDOWBASE # and WINDOWSTART
wsr a3, windowstart # set corresponding WINDOWSTART bit
wsr a2, windowbase # and WINDOWSTART
rsync
/* We are back to the original stack pointer (a1) */
......@@ -252,8 +252,8 @@ ENTRY(kernel_exception)
/* Save a0, a2, a3, DEPC and set SP. */
xsr a3, EXCSAVE_1 # restore a3, excsave_1
rsr a0, DEPC # get a2
xsr a3, excsave1 # restore a3, excsave_1
rsr a0, depc # get a2
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
......@@ -265,16 +265,16 @@ _kernel_exception:
/* Save SAR and turn off single stepping */
movi a2, 0
rsr a3, SAR
xsr a2, ICOUNTLEVEL
rsr a3, sar
xsr a2, icountlevel
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
rsr a2, WINDOWBASE # don't need to save these, we only
rsr a3, WINDOWSTART # need shifted windowstart: windowmask
rsr a2, windowbase # don't need to save these, we only
rsr a3, windowstart # need shifted windowstart: windowmask
ssr a2
slli a2, a3, 32-WSBITS
src a2, a3, a2
......@@ -323,24 +323,24 @@ common_exception:
/* Save some registers, disable loops and clear the syscall flag. */
rsr a2, DEBUGCAUSE
rsr a3, EPC_1
rsr a2, debugcause
rsr a3, epc1
s32i a2, a1, PT_DEBUGCAUSE
s32i a3, a1, PT_PC
movi a2, -1
rsr a3, EXCVADDR
rsr a3, excvaddr
s32i a2, a1, PT_SYSCALL
movi a2, 0
s32i a3, a1, PT_EXCVADDR
xsr a2, LCOUNT
xsr a2, lcount
s32i a2, a1, PT_LCOUNT
/* It is now save to restore the EXC_TABLE_FIXUP variable. */
rsr a0, EXCCAUSE
rsr a0, exccause
movi a3, 0
rsr a2, EXCSAVE_1
rsr a2, excsave1
s32i a0, a1, PT_EXCCAUSE
s32i a3, a2, EXC_TABLE_FIXUP
......@@ -352,22 +352,22 @@ common_exception:
* (interrupts disabled) and if this exception is not an interrupt.
*/
rsr a3, PS
rsr a3, ps
addi a0, a0, -4
movi a2, 1
extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
rsr a0, EXCCAUSE
xsr a3, PS
rsr a0, exccause
xsr a3, ps
s32i a3, a1, PT_PS # save ps
/* Save LBEG, LEND */
/* Save lbeg, lend */
rsr a2, LBEG
rsr a3, LEND
rsr a2, lbeg
rsr a3, lend
s32i a2, a1, PT_LBEG
s32i a3, a1, PT_LEND
......@@ -432,7 +432,7 @@ common_exception_return:
load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
wsr a3, PS /* disable interrupts */
wsr a3, ps /* disable interrupts */
_bbci.l a3, PS_UM_BIT, kernel_exception_exit
......@@ -444,12 +444,12 @@ user_exception_exit:
l32i a2, a1, PT_WINDOWBASE
l32i a3, a1, PT_WINDOWSTART
wsr a1, DEPC # use DEPC as temp storage
wsr a3, WINDOWSTART # restore WINDOWSTART
wsr a1, depc # use DEPC as temp storage
wsr a3, windowstart # restore WINDOWSTART
ssr a2 # preserve user's WB in the SAR
wsr a2, WINDOWBASE # switch to user's saved WB
wsr a2, windowbase # switch to user's saved WB
rsync
rsr a1, DEPC # restore stack pointer
rsr a1, depc # restore stack pointer
l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
rotw -1 # we restore a4..a7
_bltui a6, 16, 1f # only have to restore current window?
......@@ -475,8 +475,8 @@ user_exception_exit:
/* Clear unrestored registers (don't leak anything to user-land */
1: rsr a0, WINDOWBASE
rsr a3, SAR
1: rsr a0, windowbase
rsr a3, sar
sub a3, a0, a3
beqz a3, 2f
extui a3, a3, 0, WBBITS
......@@ -556,7 +556,7 @@ kernel_exception_exit:
/* Test WINDOWSTART now. If spilled, do the movsp */
rsr a3, WINDOWSTART
rsr a3, windowstart
addi a0, a3, -1
and a3, a3, a0
_bnez a3, common_exception_exit
......@@ -604,24 +604,24 @@ common_exception_exit:
1: l32i a2, a1, PT_PC
l32i a3, a1, PT_SAR
wsr a2, EPC_1
wsr a3, SAR
wsr a2, epc1
wsr a3, sar
/* Restore LBEG, LEND, LCOUNT */
l32i a2, a1, PT_LBEG
l32i a3, a1, PT_LEND
wsr a2, LBEG
wsr a2, lbeg
l32i a2, a1, PT_LCOUNT
wsr a3, LEND
wsr a2, LCOUNT
wsr a3, lend
wsr a2, lcount
/* We control single stepping through the ICOUNTLEVEL register. */
l32i a2, a1, PT_ICOUNTLEVEL
movi a3, -2
wsr a2, ICOUNTLEVEL
wsr a3, ICOUNT
wsr a2, icountlevel
wsr a3, icount
/* Check if it was double exception. */
......@@ -636,7 +636,7 @@ common_exception_exit:
l32i a1, a1, PT_AREG1
rfe
1: wsr a0, DEPC
1: wsr a0, depc
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfde
......@@ -651,25 +651,25 @@ common_exception_exit:
ENTRY(debug_exception)
rsr a0, EPS + XCHAL_DEBUGLEVEL
rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
/* Set EPC_1 and EXCCAUSE */
/* Set EPC1 and EXCCAUSE */
wsr a2, DEPC # save a2 temporarily
rsr a2, EPC + XCHAL_DEBUGLEVEL
wsr a2, EPC_1
wsr a2, depc # save a2 temporarily
rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
wsr a2, epc1
movi a2, EXCCAUSE_MAPPED_DEBUG
wsr a2, EXCCAUSE
wsr a2, exccause
/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
movi a2, 1 << PS_EXCM_BIT
or a2, a0, a2
movi a0, debug_exception # restore a3, debug jump vector
wsr a2, PS
xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
wsr a2, ps
xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
/* Switch to kernel/user stack, restore jump vector, and save a0 */
......@@ -680,19 +680,19 @@ ENTRY(debug_exception)
movi a0, 0
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_DEPC # mark it as a regular exception
xsr a0, DEPC
xsr a0, depc
s32i a3, a2, PT_AREG3
s32i a0, a2, PT_AREG2
mov a1, a2
j _kernel_exception
2: rsr a2, EXCSAVE_1
2: rsr a2, excsave1
l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
s32i a0, a2, PT_AREG0
movi a0, 0
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_DEPC
xsr a0, DEPC
xsr a0, depc
s32i a3, a2, PT_AREG3
s32i a0, a2, PT_AREG2
mov a1, a2
......@@ -732,12 +732,12 @@ ENTRY(unrecoverable_exception)
movi a0, 1
movi a1, 0
wsr a0, WINDOWSTART
wsr a1, WINDOWBASE
wsr a0, windowstart
wsr a1, windowbase
rsync
movi a1, (1 << PS_WOE_BIT) | 1
wsr a1, PS
wsr a1, ps
rsync
movi a1, init_task
......@@ -793,7 +793,7 @@ ENTRY(fast_alloca)
l32i a0, a2, PT_DEPC
_bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
rsr a0, DEPC # get a2
rsr a0, depc # get a2
s32i a4, a2, PT_AREG4 # save a4 and
s32i a0, a2, PT_AREG2 # a2 to stack
......@@ -804,8 +804,8 @@ ENTRY(fast_alloca)
/* Restore a3, excsave_1 */
xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl.
rsr a4, EPC_1 # get exception address
xsr a3, excsave1 # make sure excsave_1 is valid for dbl.
rsr a4, epc1 # get exception address
s32i a3, a2, PT_AREG3 # save a3 to stack
#ifdef ALLOCA_EXCEPTION_IN_IRAM
......@@ -820,7 +820,7 @@ ENTRY(fast_alloca)
jx a3
.Lunhandled_double:
wsr a0, EXCSAVE_1
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
......@@ -852,7 +852,7 @@ ENTRY(fast_alloca)
#endif
addi a4, a4, 3 # step over movsp
_EXTUI_MOVSP_DST(a0) # extract destination register
wsr a4, EPC_1 # save new epc_1
wsr a4, epc1 # save new epc_1
_bnei a0, 1, 1f # no 'movsp a1, ax': jump
......@@ -953,14 +953,14 @@ ENTRY(fast_syscall_kernel)
/* Skip syscall. */
rsr a0, EPC_1
rsr a0, epc1
addi a0, a0, 3
wsr a0, EPC_1
wsr a0, epc1
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
rsr a0, DEPC # get syscall-nr
rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers
_beqi a0, __NR_xtensa, fast_syscall_xtensa
......@@ -970,14 +970,14 @@ ENTRY(fast_syscall_user)
/* Skip syscall. */
rsr a0, EPC_1
rsr a0, epc1
addi a0, a0, 3
wsr a0, EPC_1
wsr a0, epc1
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
rsr a0, DEPC # get syscall-nr
rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers
_beqi a0, __NR_xtensa, fast_syscall_xtensa
......@@ -988,10 +988,10 @@ ENTRY(fast_syscall_unrecoverable)
/* Restore all states. */
l32i a0, a2, PT_AREG0 # restore a0
xsr a2, DEPC # restore a2, depc
rsr a3, EXCSAVE_1
xsr a2, depc # restore a2, depc
rsr a3, excsave1
wsr a0, EXCSAVE_1
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
......@@ -1047,7 +1047,7 @@ ENTRY(fast_syscall_unrecoverable)
ENTRY(fast_syscall_xtensa)
xsr a3, EXCSAVE_1 # restore a3, excsave1
xsr a3, excsave1 # restore a3, excsave1
s32i a7, a2, PT_AREG7 # we need an additional register
movi a7, 4 # sizeof(unsigned int)
......@@ -1124,13 +1124,13 @@ ENTRY(fast_syscall_spill_registers)
movi a0, fast_syscall_spill_registers_fixup
s32i a0, a3, EXC_TABLE_FIXUP
rsr a0, WINDOWBASE
rsr a0, windowbase
s32i a0, a3, EXC_TABLE_PARAM
/* Save a3 and SAR on stack. */
rsr a0, SAR
xsr a3, EXCSAVE_1 # restore a3 and excsave_1
rsr a0, sar
xsr a3, excsave1 # restore a3 and excsave_1
s32i a3, a2, PT_AREG3
s32i a4, a2, PT_AREG4
s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
......@@ -1148,7 +1148,7 @@ ENTRY(fast_syscall_spill_registers)
l32i a3, a2, PT_AREG5
l32i a4, a2, PT_AREG4
l32i a0, a2, PT_AREG0
wsr a3, SAR
wsr a3, sar
l32i a3, a2, PT_AREG3
/* Restore clobbered registers. */
......@@ -1173,8 +1173,8 @@ ENTRY(fast_syscall_spill_registers)
fast_syscall_spill_registers_fixup:
rsr a2, WINDOWBASE # get current windowbase (a2 is saved)
xsr a0, DEPC # restore depc and a0
rsr a2, windowbase # get current windowbase (a2 is saved)
xsr a0, depc # restore depc and a0
ssl a2 # set shift (32 - WB)
/* We need to make sure the current registers (a0-a3) are preserved.
......@@ -1182,12 +1182,12 @@ fast_syscall_spill_registers_fixup:
* in WS, so that the exception handlers save them to the task stack.
*/
rsr a3, EXCSAVE_1 # get spill-mask
rsr a3, excsave1 # get spill-mask
slli a2, a3, 1 # shift left by one
slli a3, a2, 32-WSBITS
src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
wsr a2, WINDOWSTART # set corrected windowstart
wsr a2, windowstart # set corrected windowstart
movi a3, exc_table
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
......@@ -1201,7 +1201,7 @@ fast_syscall_spill_registers_fixup:
* excsave_1: a3
*/
wsr a3, WINDOWBASE
wsr a3, windowbase
rsync
/* We are now in the original frame when we entered _spill_registers:
......@@ -1227,7 +1227,7 @@ fast_syscall_spill_registers_fixup:
/* Jump to the exception handler. */
movi a3, exc_table
rsr a0, EXCCAUSE
rsr a0, exccause
addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
jx a0
......@@ -1236,28 +1236,28 @@ fast_syscall_spill_registers_fixup_return:
/* When we return here, all registers have been restored (a2: DEPC) */
wsr a2, DEPC # exception address
wsr a2, depc # exception address
/* Restore fixup handler. */
xsr a3, EXCSAVE_1
xsr a3, excsave1
movi a2, fast_syscall_spill_registers_fixup
s32i a2, a3, EXC_TABLE_FIXUP
rsr a2, WINDOWBASE
rsr a2, windowbase
s32i a2, a3, EXC_TABLE_PARAM
l32i a2, a3, EXC_TABLE_KSTK
/* Load WB at the time the exception occurred. */
rsr a3, SAR # WB is still in SAR
rsr a3, sar # WB is still in SAR
neg a3, a3
wsr a3, WINDOWBASE
wsr a3, windowbase
rsync
/* Restore a3 and return. */
movi a3, exc_table
xsr a3, EXCSAVE_1
xsr a3, excsave1
rfde
......@@ -1283,8 +1283,8 @@ ENTRY(_spill_registers)
* Rotate ws right so that a4 = yyxxxwww1.
*/
rsr a4, WINDOWBASE
rsr a3, WINDOWSTART # a3 = xxxwww1yy
rsr a4, windowbase
rsr a3, windowstart # a3 = xxxwww1yy
ssr a4 # holds WB
slli a4, a3, WSBITS
or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
......@@ -1302,7 +1302,7 @@ ENTRY(_spill_registers)
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
wsr a3, WINDOWSTART # save shifted windowstart
wsr a3, windowstart # save shifted windowstart
neg a4, a3
and a3, a4, a3 # first bit set from right: 000010000
......@@ -1311,12 +1311,12 @@ ENTRY(_spill_registers)
sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
ssr a4 # save in SAR for later.
rsr a3, WINDOWBASE
rsr a3, windowbase
add a3, a3, a4
wsr a3, WINDOWBASE
wsr a3, windowbase
rsync
rsr a3, WINDOWSTART
rsr a3, windowstart
srl a3, a3 # shift windowstart
/* WB is now just one frame below the oldest frame in the register
......@@ -1364,11 +1364,11 @@ ENTRY(_spill_registers)
.Lexit: /* Done. Do the final rotation, set WS, and return. */
rotw 1
rsr a3, WINDOWBASE
rsr a3, windowbase
ssl a3
movi a3, 1
sll a3, a3
wsr a3, WINDOWSTART
wsr a3, windowstart
ret
.Lc4: s32e a4, a9, -16
......@@ -1429,7 +1429,7 @@ ENTRY(_spill_registers)
* however, this condition is unrecoverable in kernel space.
*/
rsr a0, PS
rsr a0, ps
_bbci.l a0, PS_UM_BIT, 1f
/* User space: Setup a dummy frame and kill application.
......@@ -1439,18 +1439,18 @@ ENTRY(_spill_registers)
movi a0, 1
movi a1, 0
wsr a0, WINDOWSTART
wsr a1, WINDOWBASE
wsr a0, windowstart
wsr a1, windowbase
rsync
movi a0, 0
movi a3, exc_table
l32i a1, a3, EXC_TABLE_KSTK
wsr a3, EXCSAVE_1
wsr a3, excsave1
movi a4, (1 << PS_WOE_BIT) | 1
wsr a4, PS
wsr a4, ps
rsync
movi a6, SIGSEGV
......@@ -1459,7 +1459,7 @@ ENTRY(_spill_registers)
1: /* Kernel space: PANIC! */
wsr a0, EXCSAVE_1
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0 # should not return
1: j 1b
......@@ -1524,7 +1524,7 @@ ENTRY(fast_second_level_miss)
/* We deliberately destroy a3 that holds the exception table. */
8: rsr a3, EXCVADDR # fault address
8: rsr a3, excvaddr # fault address
_PGD_OFFSET(a0, a3, a1)
l32i a0, a0, 0 # read pmdval
beqz a0, 2f
......@@ -1561,7 +1561,7 @@ ENTRY(fast_second_level_miss)
*/
extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
rsr a1, PTEVADDR
rsr a1, ptevaddr
addx2 a3, a3, a3 # -> 0,3,6,9
srli a1, a1, PAGE_SHIFT
extui a3, a3, 2, 2 # -> 0,0,1,2
......@@ -1583,18 +1583,18 @@ ENTRY(fast_second_level_miss)
l32i a0, a2, PT_AREG0
l32i a1, a2, PT_AREG1
l32i a2, a2, PT_DEPC
xsr a3, EXCSAVE_1
xsr a3, excsave1
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
/* Restore excsave1 and return. */
rsr a2, DEPC
rsr a2, depc
rfe
/* Return from double exception. */
1: xsr a2, DEPC
1: xsr a2, depc
esync
rfde
......@@ -1618,7 +1618,7 @@ ENTRY(fast_second_level_miss)
/* Make sure the exception originated in the special functions */
movi a0, __tlbtemp_mapping_start
rsr a3, EPC_1
rsr a3, epc1
bltu a3, a0, 2f
movi a0, __tlbtemp_mapping_end
bgeu a3, a0, 2f
......@@ -1626,7 +1626,7 @@ ENTRY(fast_second_level_miss)
/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
movi a3, TLBTEMP_BASE_1
rsr a0, EXCVADDR
rsr a0, excvaddr
bltu a0, a3, 2f
addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
......@@ -1635,7 +1635,7 @@ ENTRY(fast_second_level_miss)
/* Check if we have to restore an ITLB mapping. */
movi a1, __tlbtemp_mapping_itlb
rsr a3, EPC_1
rsr a3, epc1
sub a3, a3, a1
/* Calculate VPN */
......@@ -1671,13 +1671,13 @@ ENTRY(fast_second_level_miss)
2: /* Invalid PGD, default exception handling */
movi a3, exc_table
rsr a1, DEPC
xsr a3, EXCSAVE_1
rsr a1, depc
xsr a3, excsave1
s32i a1, a2, PT_AREG2
s32i a3, a2, PT_AREG3
mov a1, a2
rsr a2, PS
rsr a2, ps
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
......@@ -1712,7 +1712,7 @@ ENTRY(fast_store_prohibited)
l32i a0, a1, TASK_MM # tsk->mm
beqz a0, 9f
8: rsr a1, EXCVADDR # fault address
8: rsr a1, excvaddr # fault address
_PGD_OFFSET(a0, a1, a4)
l32i a0, a0, 0
beqz a0, 2f
......@@ -1725,7 +1725,7 @@ ENTRY(fast_store_prohibited)
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
or a4, a4, a1
rsr a1, EXCVADDR
rsr a1, excvaddr
s32i a4, a0, 0
/* We need to flush the cache if we have page coloring. */
......@@ -1749,15 +1749,15 @@ ENTRY(fast_store_prohibited)
/* Restore excsave1 and a3. */
xsr a3, EXCSAVE_1
xsr a3, excsave1
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
rsr a2, DEPC
rsr a2, depc
rfe
/* Double exception. Restore FIXUP handler and return. */
1: xsr a2, DEPC
1: xsr a2, depc
esync
rfde
......@@ -1766,14 +1766,14 @@ ENTRY(fast_store_prohibited)
2: /* If there was a problem, handle fault in C */
rsr a4, DEPC # still holds a2
xsr a3, EXCSAVE_1
rsr a4, depc # still holds a2
xsr a3, excsave1
s32i a4, a2, PT_AREG2
s32i a3, a2, PT_AREG3
l32i a4, a2, PT_AREG4
mov a1, a2
rsr a2, PS
rsr a2, ps
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
......@@ -1901,8 +1901,8 @@ ENTRY(_switch_to)
/* Disable ints while we manipulate the stack pointer. */
movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
xsr a14, PS
rsr a3, EXCSAVE_1
xsr a14, ps
rsr a3, excsave1
rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
......@@ -1910,7 +1910,7 @@ ENTRY(_switch_to)
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
l32i a3, a5, THREAD_CPENABLE
xsr a3, CPENABLE
xsr a3, cpenable
s32i a3, a4, THREAD_CPENABLE
#endif
......@@ -1924,7 +1924,7 @@ ENTRY(_switch_to)
* we return from kernel space.
*/
rsr a3, EXCSAVE_1 # exc_table
rsr a3, excsave1 # exc_table
movi a6, 0
addi a7, a5, PT_REGS_OFFSET
s32i a6, a3, EXC_TABLE_FIXUP
......@@ -1937,7 +1937,7 @@ ENTRY(_switch_to)
load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
wsr a14, PS
wsr a14, ps
mov a2, a12 # return 'prev'
rsync
......
......@@ -61,18 +61,18 @@ _startup:
/* Disable interrupts and exceptions. */
movi a0, LOCKLEVEL
wsr a0, PS
wsr a0, ps
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
wsr a2, EXCSAVE_1
wsr a2, excsave1
/* Start with a fresh windowbase and windowstart. */
movi a1, 1
movi a0, 0
wsr a1, WINDOWSTART
wsr a0, WINDOWBASE
wsr a1, windowstart
wsr a0, windowbase
rsync
/* Set a0 to 0 for the remaining initialization. */
......@@ -82,46 +82,46 @@ _startup:
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
wsr a0, IBREAKENABLE
wsr a0, ICOUNT
wsr a0, ibreakenable
wsr a0, icount
movi a1, 15
wsr a0, ICOUNTLEVEL
wsr a0, icountlevel
.set _index, 0
.rept XCHAL_NUM_DBREAK - 1
wsr a0, DBREAKC + _index
wsr a0, SREG_DBREAKC + _index
.set _index, _index + 1
.endr
#endif
/* Clear CCOUNT (not really necessary, but nice) */
wsr a0, CCOUNT # not really necessary, but nice
wsr a0, ccount # not really necessary, but nice
/* Disable zero-loops. */
#if XCHAL_HAVE_LOOPS
wsr a0, LCOUNT
wsr a0, lcount
#endif
/* Disable all timers. */
.set _index, 0
.rept XCHAL_NUM_TIMERS - 1
wsr a0, CCOMPARE + _index
wsr a0, SREG_CCOMPARE + _index
.set _index, _index + 1
.endr
/* Interrupt initialization. */
movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
wsr a0, INTENABLE
wsr a2, INTCLEAR
wsr a0, intenable
wsr a2, intclear
/* Disable coprocessors. */
#if XCHAL_CP_NUM > 0
wsr a0, CPENABLE
wsr a0, cpenable
#endif
/* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
......@@ -132,7 +132,7 @@ _startup:
*/
movi a1, 1
wsr a1, PS
wsr a1, ps
rsync
/* Initialize the caches.
......@@ -206,18 +206,18 @@ _startup:
addi a1, a1, KERNEL_STACK_SIZE
movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
wsr a2, PS # (enable reg-windows; progmode stack)
wsr a2, ps # (enable reg-windows; progmode stack)
rsync
/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
movi a2, debug_exception
wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
/* Set up EXCSAVE[1] to point to the exc_table. */
movi a6, exc_table
xsr a6, EXCSAVE_1
xsr a6, excsave1
/* init_arch kick-starts the linux kernel */
......
......@@ -72,13 +72,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
static void xtensa_irq_mask(struct irq_data *d)
{
cached_irq_mask &= ~(1 << d->irq);
set_sr (cached_irq_mask, INTENABLE);
set_sr (cached_irq_mask, intenable);
}
static void xtensa_irq_unmask(struct irq_data *d)
{
cached_irq_mask |= 1 << d->irq;
set_sr (cached_irq_mask, INTENABLE);
set_sr (cached_irq_mask, intenable);
}
static void xtensa_irq_enable(struct irq_data *d)
......@@ -95,7 +95,7 @@ static void xtensa_irq_disable(struct irq_data *d)
static void xtensa_irq_ack(struct irq_data *d)
{
set_sr(1 << d->irq, INTCLEAR);
set_sr(1 << d->irq, intclear);
}
static int xtensa_irq_retrigger(struct irq_data *d)
......
......@@ -202,8 +202,8 @@ extern void do_IRQ(int, struct pt_regs *);
void do_interrupt (struct pt_regs *regs)
{
unsigned long intread = get_sr (INTREAD);
unsigned long intenable = get_sr (INTENABLE);
unsigned long intread = get_sr (interrupt);
unsigned long intenable = get_sr (intenable);
int i, mask;
/* Handle all interrupts (no priorities).
......@@ -213,7 +213,7 @@ void do_interrupt (struct pt_regs *regs)
for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
if (mask & (intread & intenable)) {
set_sr (mask, INTCLEAR);
set_sr (mask, intclear);
do_IRQ (i,regs);
}
}
......@@ -339,7 +339,7 @@ void __init trap_init(void)
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
i = (unsigned long)exc_table;
__asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i));
}
/*
......@@ -386,16 +386,16 @@ static inline void spill_registers(void)
unsigned int a0, ps;
__asm__ __volatile__ (
"movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
"movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t"
"mov a12, a0\n\t"
"rsr a13," __stringify(SAR) "\n\t"
"xsr a14," __stringify(PS) "\n\t"
"rsr a13, sar\n\t"
"xsr a14, ps\n\t"
"movi a0, _spill_registers\n\t"
"rsync\n\t"
"callx0 a0\n\t"
"mov a0, a12\n\t"
"wsr a13," __stringify(SAR) "\n\t"
"wsr a14," __stringify(PS) "\n\t"
"wsr a13, sar\n\t"
"wsr a14, ps\n\t"
:: "a" (&a0), "a" (&ps)
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
}
......
......@@ -69,11 +69,11 @@
ENTRY(_UserExceptionVector)
xsr a3, EXCSAVE_1 # save a3 and get dispatch table
wsr a2, DEPC # save a2
xsr a3, excsave1 # save a3 and get dispatch table
wsr a2, depc # save a2
l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
s32i a0, a2, PT_AREG0 # save a0 to ESF
rsr a0, EXCCAUSE # retrieve exception cause
rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
......@@ -93,11 +93,11 @@ ENTRY(_UserExceptionVector)
ENTRY(_KernelExceptionVector)
xsr a3, EXCSAVE_1 # save a3, and get dispatch table
wsr a2, DEPC # save a2
xsr a3, excsave1 # save a3, and get dispatch table
wsr a2, depc # save a2
addi a2, a1, -16-PT_SIZE # adjust stack pointer
s32i a0, a2, PT_AREG0 # save a0 to ESF
rsr a0, EXCCAUSE # retrieve exception cause
rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
......@@ -205,17 +205,17 @@ ENTRY(_DoubleExceptionVector)
/* Deliberately destroy excsave (don't assume it's value was valid). */
wsr a3, EXCSAVE_1 # save a3
wsr a3, excsave1 # save a3
/* Check for kernel double exception (usually fatal). */
rsr a3, PS
rsr a3, ps
_bbci.l a3, PS_UM_BIT, .Lksp
/* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */
xsr a0, DEPC # get DEPC, save a0
xsr a0, depc # get DEPC, save a0
movi a3, XCHAL_WINDOW_VECTORS_VADDR
_bltu a0, a3, .Lfixup
......@@ -243,21 +243,21 @@ ENTRY(_DoubleExceptionVector)
* Note: We can trash the current window frame (a0...a3) and depc!
*/
wsr a2, DEPC # save stack pointer temporarily
rsr a0, PS
wsr a2, depc # save stack pointer temporarily
rsr a0, ps
extui a0, a0, PS_OWB_SHIFT, 4
wsr a0, WINDOWBASE
wsr a0, windowbase
rsync
/* We are now in the previous window frame. Save registers again. */
xsr a2, DEPC # save a2 and get stack pointer
xsr a2, depc # save a2 and get stack pointer
s32i a0, a2, PT_AREG0
wsr a3, EXCSAVE_1 # save a3
wsr a3, excsave1 # save a3
movi a3, exc_table
rsr a0, EXCCAUSE
rsr a0, exccause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER
......@@ -290,14 +290,14 @@ ENTRY(_DoubleExceptionVector)
/* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
xsr a3, DEPC
xsr a3, depc
s32i a0, a2, PT_DEPC
s32i a3, a2, PT_AREG0
/* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
movi a3, exc_table
rsr a0, EXCCAUSE
rsr a0, exccause
addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER
jx a0
......@@ -312,7 +312,7 @@ ENTRY(_DoubleExceptionVector)
.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
rsr a3, EXCCAUSE
rsr a3, exccause
beqi a3, EXCCAUSE_ITLB_MISS, 1f
addi a3, a3, -EXCCAUSE_DTLB_MISS
bnez a3, .Lunrecoverable
......@@ -328,11 +328,11 @@ ENTRY(_DoubleExceptionVector)
.Lunrecoverable_fixup:
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a0, DEPC
xsr a0, depc
.Lunrecoverable:
rsr a3, EXCSAVE_1
wsr a0, EXCSAVE_1
rsr a3, excsave1
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
......@@ -349,7 +349,7 @@ ENTRY(_DoubleExceptionVector)
.section .DebugInterruptVector.text, "ax"
ENTRY(_DebugInterruptVector)
xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
jx a0
......
......@@ -61,13 +61,13 @@ void platform_restart(void)
* jump to the reset vector. */
__asm__ __volatile__("movi a2, 15\n\t"
"wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, " __stringify(ICOUNT) "\n\t"
"wsr a2, " __stringify(IBREAKENABLE) "\n\t"
"wsr a2, " __stringify(LCOUNT) "\n\t"
"wsr a2, icount\n\t"
"wsr a2, ibreakenable\n\t"
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, " __stringify(PS) "\n\t"
"wsr a2, ps\n\t"
"isync\n\t"
"jx %0\n\t"
:
......
......@@ -66,13 +66,13 @@ void platform_restart(void)
* jump to the reset vector. */
__asm__ __volatile__ ("movi a2, 15\n\t"
"wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, " __stringify(ICOUNT) "\n\t"
"wsr a2, " __stringify(IBREAKENABLE) "\n\t"
"wsr a2, " __stringify(LCOUNT) "\n\t"
"wsr a2, icount\n\t"
"wsr a2, ibreakenable\n\t"
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, " __stringify(PS) "\n\t"
"wsr a2, ps\n\t"
"isync\n\t"
"jx %0\n\t"
:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册