提交 4da808c3 编写于 作者: D David S. Miller 提交者: David S. Miller

[SPARC64]: Fix bogus flush instruction usage.

Some of the trap code was still assuming that alternate
global %g6 was hard coded with current_thread_info().
Let's just consistently flush at KERNBASE when we need
a pipeline synchronization.  That's locked into the TLB
and will always work.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 4753eb2a
...@@ -72,7 +72,8 @@ etrap_irq: ...@@ -72,7 +72,8 @@ etrap_irq:
sethi %hi(sparc64_kern_pri_context), %g2 sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU stxa %g3, [%l4] ASI_DMMU
flush %l6 sethi %hi(KERNBASE), %l4
flush %l4
wr %g0, ASI_AIUS, %asi wr %g0, ASI_AIUS, %asi
2: wrpr %g0, 0x0, %tl 2: wrpr %g0, 0x0, %tl
mov %g4, %l4 mov %g4, %l4
...@@ -215,7 +216,8 @@ scetrap: ...@@ -215,7 +216,8 @@ scetrap:
sethi %hi(sparc64_kern_pri_context), %g2 sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU stxa %g3, [%l4] ASI_DMMU
flush %l6 sethi %hi(KERNBASE), %l4
flush %l4
mov ASI_AIUS, %l7 mov ASI_AIUS, %l7
2: mov %g4, %l4 2: mov %g4, %l4
......
...@@ -259,7 +259,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 ...@@ -259,7 +259,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
or %l0, %l1, %l0 or %l0, %l1, %l0
stxa %l0, [%l7] ASI_DMMU stxa %l0, [%l7] ASI_DMMU
flush %g6 sethi %hi(KERNBASE), %l7
flush %l7
rdpr %wstate, %l1 rdpr %wstate, %l1
rdpr %otherwin, %l2 rdpr %otherwin, %l2
srl %l1, 3, %l1 srl %l1, 3, %l1
......
...@@ -20,7 +20,8 @@ set_pcontext: ...@@ -20,7 +20,8 @@ set_pcontext:
ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1
mov PRIMARY_CONTEXT, %g1 mov PRIMARY_CONTEXT, %g1
stxa %l1, [%g1] ASI_DMMU stxa %l1, [%g1] ASI_DMMU
flush %g6 sethi %hi(KERNBASE), %l1
flush %l1
retl retl
nop nop
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/head.h>
/* What we used to do was lock a TLB entry into a specific /* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then * TLB slot, clear the page with interrupts disabled, then
...@@ -66,7 +67,8 @@ clear_user_page: /* %o0=dest, %o1=vaddr */ ...@@ -66,7 +67,8 @@ clear_user_page: /* %o0=dest, %o1=vaddr */
wrpr %o4, PSTATE_IE, %pstate wrpr %o4, PSTATE_IE, %pstate
stxa %o0, [%g3] ASI_DMMU stxa %o0, [%g3] ASI_DMMU
stxa %g1, [%g0] ASI_DTLB_DATA_IN stxa %g1, [%g0] ASI_DTLB_DATA_IN
flush %g6 sethi %hi(KERNBASE), %g1
flush %g1
wrpr %o4, 0x0, %pstate wrpr %o4, 0x0, %pstate
mov 1, %o4 mov 1, %o4
......
...@@ -36,9 +36,10 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ ...@@ -36,9 +36,10 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
mov 0x50, %g3 mov 0x50, %g3
stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP
sethi %hi(KERNBASE), %g3
flush %g3
retl retl
flush %g6 nop
nop
nop nop
nop nop
nop nop
...@@ -72,7 +73,8 @@ __flush_tlb_pending: ...@@ -72,7 +73,8 @@ __flush_tlb_pending:
brnz,pt %o1, 1b brnz,pt %o1, 1b
nop nop
stxa %g2, [%o4] ASI_DMMU stxa %g2, [%o4] ASI_DMMU
flush %g6 sethi %hi(KERNBASE), %o4
flush %o4
retl retl
wrpr %g7, 0x0, %pstate wrpr %g7, 0x0, %pstate
nop nop
...@@ -94,8 +96,10 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */ ...@@ -94,8 +96,10 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */
membar #Sync membar #Sync
brnz,pt %o3, 1b brnz,pt %o3, 1b
sub %o3, %o4, %o3 sub %o3, %o4, %o3
2: retl 2: sethi %hi(KERNBASE), %o3
flush %g6 flush %o3
retl
nop
__spitfire_flush_tlb_mm_slow: __spitfire_flush_tlb_mm_slow:
rdpr %pstate, %g1 rdpr %pstate, %g1
...@@ -105,7 +109,8 @@ __spitfire_flush_tlb_mm_slow: ...@@ -105,7 +109,8 @@ __spitfire_flush_tlb_mm_slow:
stxa %g0, [%g3] ASI_IMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP
flush %g6 flush %g6
stxa %g2, [%o1] ASI_DMMU stxa %g2, [%o1] ASI_DMMU
flush %g6 sethi %hi(KERNBASE), %o1
flush %o1
retl retl
wrpr %g1, 0, %pstate wrpr %g1, 0, %pstate
...@@ -181,7 +186,7 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ ...@@ -181,7 +186,7 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
.previous .previous
/* Cheetah specific versions, patched at boot time. */ /* Cheetah specific versions, patched at boot time. */
__cheetah_flush_tlb_mm: /* 18 insns */ __cheetah_flush_tlb_mm: /* 19 insns */
rdpr %pstate, %g7 rdpr %pstate, %g7
andn %g7, PSTATE_IE, %g2 andn %g7, PSTATE_IE, %g2
wrpr %g2, 0x0, %pstate wrpr %g2, 0x0, %pstate
...@@ -196,12 +201,13 @@ __cheetah_flush_tlb_mm: /* 18 insns */ ...@@ -196,12 +201,13 @@ __cheetah_flush_tlb_mm: /* 18 insns */
stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP
stxa %g2, [%o2] ASI_DMMU stxa %g2, [%o2] ASI_DMMU
flush %g6 sethi %hi(KERNBASE), %o2
flush %o2
wrpr %g0, 0, %tl wrpr %g0, 0, %tl
retl retl
wrpr %g7, 0x0, %pstate wrpr %g7, 0x0, %pstate
__cheetah_flush_tlb_pending: /* 26 insns */ __cheetah_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7 rdpr %pstate, %g7
sllx %o1, 3, %o1 sllx %o1, 3, %o1
...@@ -225,7 +231,8 @@ __cheetah_flush_tlb_pending: /* 26 insns */ ...@@ -225,7 +231,8 @@ __cheetah_flush_tlb_pending: /* 26 insns */
brnz,pt %o1, 1b brnz,pt %o1, 1b
nop nop
stxa %g2, [%o4] ASI_DMMU stxa %g2, [%o4] ASI_DMMU
flush %g6 sethi %hi(KERNBASE), %o4
flush %o4
wrpr %g0, 0, %tl wrpr %g0, 0, %tl
retl retl
wrpr %g7, 0x0, %pstate wrpr %g7, 0x0, %pstate
...@@ -265,14 +272,14 @@ cheetah_patch_cachetlbops: ...@@ -265,14 +272,14 @@ cheetah_patch_cachetlbops:
sethi %hi(__cheetah_flush_tlb_mm), %o1 sethi %hi(__cheetah_flush_tlb_mm), %o1
or %o1, %lo(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
call cheetah_patch_one call cheetah_patch_one
mov 18, %o2 mov 19, %o2
sethi %hi(__flush_tlb_pending), %o0 sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__cheetah_flush_tlb_pending), %o1 sethi %hi(__cheetah_flush_tlb_pending), %o1
or %o1, %lo(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
call cheetah_patch_one call cheetah_patch_one
mov 26, %o2 mov 27, %o2
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0 sethi %hi(__flush_dcache_page), %o0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册