提交 6790dae8 编写于 作者: C Christophe Leroy 提交者: Michael Ellerman

powerpc/hash32: use physical address directly in hash handlers.

Since commit c62ce9ef ("powerpc: remove remaining bits from
CONFIG_APUS"), tophys() has become a pure constant operation.
PAGE_OFFSET is known at compile time so the physical address
can be builtin directly.
Signed-off-by: NChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 2c12393f
...@@ -47,14 +47,13 @@ mmu_hash_lock: ...@@ -47,14 +47,13 @@ mmu_hash_lock:
* Returns to the caller if the access is illegal or there is no * Returns to the caller if the access is illegal or there is no
* mapping for the address. Otherwise it places an appropriate PTE * mapping for the address. Otherwise it places an appropriate PTE
* in the hash table and returns from the exception. * in the hash table and returns from the exception.
* Uses r0, r3 - r8, r10, ctr, lr. * Uses r0, r3 - r6, r8, r10, ctr, lr.
*/ */
.text .text
_GLOBAL(hash_page) _GLOBAL(hash_page)
tophys(r7,0) /* gets -KERNELBASE into r7 */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
addis r8,r7,mmu_hash_lock@h lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
ori r8,r8,mmu_hash_lock@l ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
lis r0,0x0fff lis r0,0x0fff
b 10f b 10f
11: lwz r6,0(r8) 11: lwz r6,0(r8)
...@@ -76,7 +75,7 @@ _GLOBAL(hash_page) ...@@ -76,7 +75,7 @@ _GLOBAL(hash_page)
lis r5,swapper_pg_dir@ha /* if kernel address, use */ lis r5,swapper_pg_dir@ha /* if kernel address, use */
addi r5,r5,swapper_pg_dir@l /* kernel page table */ addi r5,r5,swapper_pg_dir@l /* kernel page table */
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
112: add r5,r5,r7 /* convert to phys addr */ 112: tophys(r5, r5)
#ifndef CONFIG_PTE_64BIT #ifndef CONFIG_PTE_64BIT
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
lwz r8,0(r5) /* get pmd entry */ lwz r8,0(r5) /* get pmd entry */
...@@ -143,25 +142,24 @@ retry: ...@@ -143,25 +142,24 @@ retry:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
eieio eieio
addis r8,r7,mmu_hash_lock@ha lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
li r0,0 li r0,0
stw r0,mmu_hash_lock@l(r8) stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
#endif #endif
/* Return from the exception */ /* Return from the exception */
lwz r5,_CTR(r11) lwz r5,_CTR(r11)
mtctr r5 mtctr r5
lwz r0,GPR0(r11) lwz r0,GPR0(r11)
lwz r7,GPR7(r11)
lwz r8,GPR8(r11) lwz r8,GPR8(r11)
b fast_exception_return b fast_exception_return
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
hash_page_out: hash_page_out:
eieio eieio
addis r8,r7,mmu_hash_lock@ha lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
li r0,0 li r0,0
stw r0,mmu_hash_lock@l(r8) stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
blr blr
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -207,11 +205,9 @@ _GLOBAL(add_hash_page) ...@@ -207,11 +205,9 @@ _GLOBAL(add_hash_page)
SYNC_601 SYNC_601
isync isync
tophys(r7,0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
addis r6,r7,mmu_hash_lock@ha lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
addi r6,r6,mmu_hash_lock@l addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
10: lwarx r0,0,r6 /* take the mmu_hash_lock */ 10: lwarx r0,0,r6 /* take the mmu_hash_lock */
cmpi 0,r0,0 cmpi 0,r0,0
bne- 11f bne- 11f
...@@ -256,8 +252,8 @@ _GLOBAL(add_hash_page) ...@@ -256,8 +252,8 @@ _GLOBAL(add_hash_page)
9: 9:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
addis r6,r7,mmu_hash_lock@ha lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
addi r6,r6,mmu_hash_lock@l addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
eieio eieio
li r0,0 li r0,0
stw r0,0(r6) /* clear mmu_hash_lock */ stw r0,0(r6) /* clear mmu_hash_lock */
...@@ -277,10 +273,8 @@ _GLOBAL(add_hash_page) ...@@ -277,10 +273,8 @@ _GLOBAL(add_hash_page)
* It is designed to be called with the MMU either on or off. * It is designed to be called with the MMU either on or off.
* r3 contains the VSID, r4 contains the virtual address, * r3 contains the VSID, r4 contains the virtual address,
* r5 contains the linux PTE, r6 contains the old value of the * r5 contains the linux PTE, r6 contains the old value of the
* linux PTE (before setting _PAGE_HASHPTE) and r7 contains the * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
* offset to be added to addresses (0 if the MMU is on, * upper half of the PTE if CONFIG_PTE_64BIT.
* -KERNELBASE if it is off). r10 contains the upper half of
* the PTE if CONFIG_PTE_64BIT.
* On SMP, the caller should have the mmu_hash_lock held. * On SMP, the caller should have the mmu_hash_lock held.
* We assume that the caller has (or will) set the _PAGE_HASHPTE * We assume that the caller has (or will) set the _PAGE_HASHPTE
* bit in the linux PTE in memory. The value passed in r6 should * bit in the linux PTE in memory. The value passed in r6 should
...@@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) ...@@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
patch_site 1f, patch__hash_page_A1 patch_site 1f, patch__hash_page_A1
patch_site 2f, patch__hash_page_A2 patch_site 2f, patch__hash_page_A2
/* Get the address of the primary PTE group in the hash table (r3) */ /* Get the address of the primary PTE group in the hash table (r3) */
0: addis r0,r7,Hash_base@h /* base address of hash table */ 0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ 2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r3,r3,r0 /* make primary hash */ xor r3,r3,r0 /* make primary hash */
...@@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) ...@@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
beq+ 10f /* no PTE: go look for an empty slot */ beq+ 10f /* no PTE: go look for an empty slot */
tlbie r4 tlbie r4
addis r4,r7,htab_hash_searches@ha lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
lwz r6,htab_hash_searches@l(r4) lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
addi r6,r6,1 /* count how many searches we do */ addi r6,r6,1 /* count how many searches we do */
stw r6,htab_hash_searches@l(r4) stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
mtctr r0 mtctr r0
...@@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) ...@@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
beq+ found_empty beq+ found_empty
/* update counter of times that the primary PTEG is full */ /* update counter of times that the primary PTEG is full */
addis r4,r7,primary_pteg_full@ha lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
lwz r6,primary_pteg_full@l(r4) lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
addi r6,r6,1 addi r6,r6,1
stw r6,primary_pteg_full@l(r4) stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
patch_site 0f, patch__hash_page_C patch_site 0f, patch__hash_page_C
/* Search the secondary PTEG for an empty slot */ /* Search the secondary PTEG for an empty slot */
...@@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) ...@@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
* lockup here but that shouldn't happen * lockup here but that shouldn't happen
*/ */
1: addis r4,r7,next_slot@ha /* get next evict slot */ 1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
lwz r6,next_slot@l(r4) lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
addi r6,r6,HPTE_SIZE /* search for candidate */ addi r6,r6,HPTE_SIZE /* search for candidate */
andi. r6,r6,7*HPTE_SIZE andi. r6,r6,7*HPTE_SIZE
stw r6,next_slot@l(r4) stw r6,next_slot@l(r4)
...@@ -500,8 +494,6 @@ htab_hash_searches: ...@@ -500,8 +494,6 @@ htab_hash_searches:
* We assume that there is a hash table in use (Hash != 0). * We assume that there is a hash table in use (Hash != 0).
*/ */
_GLOBAL(flush_hash_pages) _GLOBAL(flush_hash_pages)
tophys(r7,0)
/* /*
* We disable interrupts here, even on UP, because we want * We disable interrupts here, even on UP, because we want
* the _PAGE_HASHPTE bit to be a reliable indication of * the _PAGE_HASHPTE bit to be a reliable indication of
...@@ -546,10 +538,10 @@ _GLOBAL(flush_hash_pages) ...@@ -546,10 +538,10 @@ _GLOBAL(flush_hash_pages)
SET_V(r11) /* set V (valid) bit */ SET_V(r11) /* set V (valid) bit */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
addis r9,r7,mmu_hash_lock@ha lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
addi r9,r9,mmu_hash_lock@l addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
CURRENT_THREAD_INFO(r8, r1) CURRENT_THREAD_INFO(r8, r1)
add r8,r8,r7 tophys(r8, r8)
lwz r8,TI_CPU(r8) lwz r8,TI_CPU(r8)
oris r8,r8,9 oris r8,r8,9
10: lwarx r0,0,r9 10: lwarx r0,0,r9
...@@ -583,7 +575,7 @@ _GLOBAL(flush_hash_pages) ...@@ -583,7 +575,7 @@ _GLOBAL(flush_hash_pages)
patch_site 1f, patch__flush_hash_A1 patch_site 1f, patch__flush_hash_A1
patch_site 2f, patch__flush_hash_A2 patch_site 2f, patch__flush_hash_A2
/* Get the address of the primary PTE group in the hash table (r3) */ /* Get the address of the primary PTE group in the hash table (r3) */
0: addis r8,r7,Hash_base@h /* base address of hash table */ 0: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ 2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r8,r0,r8 /* make primary hash */ xor r8,r0,r8 /* make primary hash */
......
...@@ -231,7 +231,8 @@ void __init MMU_init_hw(void) ...@@ -231,7 +231,8 @@ void __init MMU_init_hw(void)
if (lg_n_hpteg > 16) if (lg_n_hpteg > 16)
mb2 = 16 - LG_HPTEG_SIZE; mb2 = 16 - LG_HPTEG_SIZE;
modify_instruction_site(&patch__hash_page_A0, 0xffff, (unsigned int)Hash >> 16); modify_instruction_site(&patch__hash_page_A0, 0xffff,
((unsigned int)Hash - PAGE_OFFSET) >> 16);
modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6); modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6);
modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6); modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6);
modify_instruction_site(&patch__hash_page_B, 0xffff, hmask); modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
...@@ -240,7 +241,8 @@ void __init MMU_init_hw(void) ...@@ -240,7 +241,8 @@ void __init MMU_init_hw(void)
/* /*
* Patch up the instructions in hashtable.S:flush_hash_page * Patch up the instructions in hashtable.S:flush_hash_page
*/ */
modify_instruction_site(&patch__flush_hash_A0, 0xffff, (unsigned int)Hash >> 16); modify_instruction_site(&patch__flush_hash_A0, 0xffff,
((unsigned int)Hash - PAGE_OFFSET) >> 16);
modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6); modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6);
modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6); modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6);
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册