/* * ppc64 MMU hashtable management routines * * (c) Copyright IBM Corp. 2003, 2005 * * Maintained by: Benjamin Herrenschmidt * * * This file is covered by the GNU Public Licence v2 as * described in the kernel's COPYING file. */ #include #include #include #include #include #include #include #include .text /* * Stackframe: * * +-> Back chain (SP + 256) * | General register save area (SP + 112) * | Parameter save area (SP + 48) * | TOC save area (SP + 40) * | link editor doubleword (SP + 32) * | compiler doubleword (SP + 24) * | LR save area (SP + 16) * | CR save area (SP + 8) * SP ---> +-- Back chain (SP + 0) */ #ifndef CONFIG_PPC_64K_PAGES /***************************************************************************** * * * 4K SW & 4K HW pages implementation * * * *****************************************************************************/ /* * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * pte_t *ptep, unsigned long trap, unsigned long flags, * int ssize) * * Adds a 4K page to the hash table in a segment of 4K pages only */ _GLOBAL(__hash_page_4K) mflr r0 std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) /* Save all params that we need after a function call */ std r6,STK_PARAM(R6)(r1) std r8,STK_PARAM(R8)(r1) std r9,STK_PARAM(R9)(r1) /* Save non-volatile registers. * r31 will hold "old PTE" * r30 is "new PTE" * r29 is vpn * r28 is a hash value * r27 is hashtab mask (maybe dynamic patched instead ?) */ std r27,STK_REG(R27)(r1) std r28,STK_REG(R28)(r1) std r29,STK_REG(R29)(r1) std r30,STK_REG(R30)(r1) std r31,STK_REG(R31)(r1) /* Step 1: * * Check permissions, atomically mark the linux PTE busy * and hashed. */ 1: ldarx r31,0,r6 /* Check access rights (access & ~(pte_val(*ptep))) */ andc. r0,r4,r31 bne- htab_wrong_access /* Check if PTE is busy */ andi. r0,r31,_PAGE_BUSY /* If so, just bail out and refault if needed. Someone else * is changing this PTE anyway and might hash it. */ bne- htab_bail_ok /* Prepare new PTE value (turn access RW into DIRTY, then * add BUSY,HASHPTE and ACCESSED) */ rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ or r30,r30,r31 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE /* Write the linux PTE atomically (setting busy) */ stdcx. r30,0,r6 bne- 1b isync /* Step 2: * * Insert/Update the HPTE in the hash table. At this point, * r4 (access) is re-useable, we use it for the new HPTE flags */ BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Calc vpn and put it in r29 */ sldi r29,r5,SID_SHIFT - VPN_SHIFT rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 /* * Calculate hash value for primary slot and store it in r28 * r3 = va, r5 = vsid * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) */ rldicl r0,r3,64-12,48 xor r28,r5,r0 /* hash */ b 4f 3: /* Calc vpn and put it in r29 */ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) or r29,r28,r29 /* * calculate hash value for primary slot and * store it in r28 for 1T segment * r3 = va, r5 = vsid */ sldi r28,r5,25 /* vsid << 25 */ /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ rldicl r0,r3,64-12,36 xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ 4: andi. r3,r30,0x1fe /* Get basic set of flags */ xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ andc r0,r30,r0 /* r0 = pte & ~r0 */ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ /* * Always add "C" bit for perf. Memory coherence is always enabled */ ori r3,r3,HPTE_R_C | HPTE_R_M /* We eventually do the icache sync here (maybe inline that * code rather than call a C function...) */ BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in * place of "access" in the param area (sic) */ std r3,STK_PARAM(R4)(r1) /* Get htab_hash_mask */ ld r4,htab_hash_mask@got(2) ld r27,0(r4) /* htab_hash_mask -> r27 */ /* Check if we may already be in the hashtable, in this case, we * go to out-of-line code to try to modify the HPTE */ andi. r0,r31,_PAGE_HASHPTE bne htab_modify_pte htab_insert_pte: /* Clear hpte bits in new pte (we also clear BUSY btw) and * add _PAGE_HASHPTE */ lis r0,_PAGE_HPTEFLAGS@h ori r0,r0,_PAGE_HPTEFLAGS@l andc r30,r30,r0 ori r30,r30,_PAGE_HASHPTE /* physical address r5 */ rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT sldi r5,r5,PAGE_SHIFT /* Calculate primary group hash */ and r0,r28,r27 rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ mr r4,r29 /* Retrieve vpn */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ .globl htab_call_hpte_insert1 htab_call_hpte_insert1: bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 bge htab_pte_insert_ok /* Insertion successful */ cmpdi 0,r3,-2 /* Critical failure */ beq- htab_pte_insert_failure /* Now try secondary slot */ /* physical address r5 */ rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT sldi r5,r5,PAGE_SHIFT /* Calculate secondary group hash */ andc r0,r27,r28 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ mr r4,r29 /* Retrieve vpn */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ .globl htab_call_hpte_insert2 htab_call_hpte_insert2: bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ htab_pte_insert_ok /* Insertion successful */ cmpdi 0,r3,-2 /* Critical failure */ beq- htab_pte_insert_failure /* Both are full, we need to evict something */ mftb r0 /* Pick a random group based on TB */ andi. r0,r0,1 mr r5,r28 bne 2f not r5,r5 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ .globl htab_call_hpte_remove htab_call_hpte_remove: bl . /* Patched by htab_finish_init() */ /* Try all again */ b htab_insert_pte htab_bail_ok: li r3,0 b htab_bail htab_pte_insert_ok: /* Insert slot number & secondary bit in PTE */ rldimi r30,r3,12,63-15 /* Write out the PTE with a normal write * (maybe add eieio may be good still ?) */ htab_write_out_pte: ld r6,STK_PARAM(R6)(r1) std r30,0(r6) li r3, 0 htab_bail: ld r27,STK_REG(R27)(r1) ld r28,STK_REG(R28)(r1) ld r29,STK_REG(R29)(r1) ld r30,STK_REG(R30)(r1) ld r31,STK_REG(R31)(r1) addi r1,r1,STACKFRAMESIZE ld r0,16(r1) mtlr r0 blr htab_modify_pte: /* Keep PP bits in r4 and slot idx from the PTE around in r3 */ mr r4,r3 rlwinm r3,r31,32-12,29,31 /* Secondary group ? if yes, get a inverted hash value */ mr r5,r28 andi. r0,r31,_PAGE_F_SECOND beq 1f not r5,r5 1: /* Calculate proper slot value for ppc_md.hpte_updatepp */ and r0,r5,r27 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ add r3,r0,r3 /* add slot idx */ /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* vpn */ li r6,MMU_PAGE_4K /* base page size */ li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ .globl htab_call_hpte_updatepp htab_call_hpte_updatepp: bl . /* Patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here * we try an insertion. */ cmpdi 0,r3,-1 beq- htab_insert_pte /* Clear the BUSY bit and Write out the PTE */ li r0,_PAGE_BUSY andc r30,r30,r0 b htab_write_out_pte htab_wrong_access: /* Bail out clearing reservation */ stdcx. r31,0,r6 li r3,1 b htab_bail htab_pte_insert_failure: /* Bail out restoring old PTE */ ld r6,STK_PARAM(R6)(r1) std r31,0(r6) li r3,-1 b htab_bail #endif