提交 106713a1 编写于 作者: A Aneesh Kumar K.V 提交者: Michael Ellerman

powerpc/mm: Remove the dependency on pte bit position in asm code

We should not expect pte bit position in asm code. Simply
by moving part of that to C
Acked-by: NScott Wood <scottwood@freescale.com>
Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 91f1da99
...@@ -1556,29 +1556,19 @@ do_hash_page: ...@@ -1556,29 +1556,19 @@ do_hash_page:
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */ bne 77f /* then don't call hash_page now */
/*
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
* accessing a userspace segment (even from the kernel). We assume
* kernel addresses always have the high bit set.
*/
rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
orc r0,r12,r0 /* MSR_PR | ~high_bit */
rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
ori r4,r4,1 /* add _PAGE_PRESENT */
rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
/* /*
* r3 contains the faulting address * r3 contains the faulting address
* r4 contains the required access permissions * r4 msr
* r5 contains the trap number * r5 contains the trap number
* r6 contains dsisr * r6 contains dsisr
* *
* at return r3 = 0 for success, 1 for page fault, negative for error * at return r3 = 0 for success, 1 for page fault, negative for error
*/ */
mr r4,r12
ld r6,_DSISR(r1) ld r6,_DSISR(r1)
bl hash_page /* build HPTE if possible */ bl __hash_page /* build HPTE if possible */
cmpdi r3,0 /* see if hash_page succeeded */ cmpdi r3,0 /* see if __hash_page succeeded */
/* Success */ /* Success */
beq fast_exc_return_irq /* Return from exception on success */ beq fast_exc_return_irq /* Return from exception on success */
......
...@@ -1206,6 +1206,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap, ...@@ -1206,6 +1206,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
} }
EXPORT_SYMBOL_GPL(hash_page); EXPORT_SYMBOL_GPL(hash_page);
int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
unsigned long dsisr)
{
unsigned long access = _PAGE_PRESENT;
unsigned long flags = 0;
struct mm_struct *mm = current->mm;
if (REGION_ID(ea) == VMALLOC_REGION_ID)
mm = &init_mm;
if (dsisr & DSISR_NOHPTE)
flags |= HPTE_NOHPTE_UPDATE;
if (dsisr & DSISR_ISSTORE)
access |= _PAGE_RW;
/*
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
* accessing a userspace segment (even from the kernel). We assume
* kernel addresses always have the high bit set.
*/
if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
access |= _PAGE_USER;
if (trap == 0x400)
access |= _PAGE_EXEC;
return hash_page_mm(mm, ea, access, trap, flags);
}
void hash_preload(struct mm_struct *mm, unsigned long ea, void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap) unsigned long access, unsigned long trap)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册