提交 ae8a8b95 编写于 作者: W Will Deacon 提交者: Russell King

ARM: 7691/1: mm: kill unused TLB_CAN_READ_FROM_L1_CACHE and use ALT_SMP instead

Many ARMv7 cores have hardware page table walkers that can read the L1
cache. This is discoverable from the ID_MMFR3 register, although this
can be expensive to access from the low-level set_pte functions and is a
pain to cache, particularly with multi-cluster systems.

A useful observation is that the multi-processing extensions for ARMv7
require coherent table walks, meaning that we can make use of ALT_SMP
patching in proc-v7-* to patch away the cache flush safely for these
cores.
Reported-by: NAlbin Tonnerre <Albin.Tonnerre@arm.com>
Reviewed-by: NCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: NWill Deacon <will.deacon@arm.com>
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 b0088480
...@@ -169,7 +169,7 @@ ...@@ -169,7 +169,7 @@
# define v6wbi_always_flags (-1UL) # define v6wbi_always_flags (-1UL)
#endif #endif
#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ #define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
TLB_V7_UIS_ASID | TLB_V7_UIS_BP) TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
......
...@@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle) ...@@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle)
mov pc, lr mov pc, lr
ENTRY(cpu_v6_dcache_clean_area) ENTRY(cpu_v6_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #D_CACHE_LINE_SIZE add r0, r0, #D_CACHE_LINE_SIZE
subs r1, r1, #D_CACHE_LINE_SIZE subs r1, r1, #D_CACHE_LINE_SIZE
bhi 1b bhi 1b
#endif
mov pc, lr mov pc, lr
/* /*
......
...@@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext) ...@@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! ) ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 ) THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] ) THUMB( str r3, [r0] )
mcr p15, 0, r0, c7, c10, 1 @ flush_pte ALT_SMP(mov pc,lr)
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_set_pte_ext) ENDPROC(cpu_v7_set_pte_ext)
......
...@@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext) ...@@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0] 1: strd r2, r3, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte ALT_SMP(mov pc, lr)
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_set_pte_ext) ENDPROC(cpu_v7_set_pte_ext)
......
...@@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle) ...@@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle)
ENDPROC(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area) ENTRY(cpu_v7_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
ALT_UP(W(nop))
dcache_line_size r2, r3 dcache_line_size r2, r3
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2 add r0, r0, r2
subs r1, r1, r2 subs r1, r1, r2
bhi 1b bhi 1b
dsb dsb
#endif
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area) ENDPROC(cpu_v7_dcache_clean_area)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册