提交 c6a3c495 编写于 作者: A Aneesh Kumar K.V 提交者: Michael Ellerman

powerpc/mm: Add helper for converting pte bit to hpte bits

Instead of open coding it in multiple code paths, export the helper
and add more documentation. Also make sure we don't make assumption
regarding pte bit position
Acked-by: NScott Wood <scottwood@freescale.com>
Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 a43c0eb8
......@@ -236,6 +236,7 @@ extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
pmd_t *pmdp,
unsigned long clr,
unsigned long set);
extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
......
......@@ -53,18 +53,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
*/
rflags = new_pte & _PAGE_USER;
if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) &&
(new_pte & _PAGE_DIRTY)))
rflags |= 0x1;
/*
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
*/
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
/*
* Always add C and Memory coherence bit
*/
rflags |= HPTE_R_C | HPTE_R_M;
rflags = htab_convert_pte_flags(new_pte);
/*
* Add in WIMG bits
*/
......
......@@ -85,22 +85,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* Handle the subpage protection bits
*/
subpg_pte = new_pte & ~subpg_prot;
/*
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
*/
rflags = subpg_pte & _PAGE_USER;
if ((subpg_pte & _PAGE_USER) && !((subpg_pte & _PAGE_RW) &&
(subpg_pte & _PAGE_DIRTY)))
rflags |= 0x1;
/*
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
*/
rflags |= ((subpg_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
/*
* Always add C and Memory coherence bit
*/
rflags |= HPTE_R_C | HPTE_R_M;
rflags = htab_convert_pte_flags(subpg_pte);
/*
* Add in WIMG bits
*/
......@@ -271,22 +256,8 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
new_pte |= _PAGE_DIRTY;
} while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
/*
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
*/
rflags = new_pte & _PAGE_USER;
if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) &&
(new_pte & _PAGE_DIRTY)))
rflags |= 0x1;
/*
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
*/
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
/*
* Always add C and Memory coherence bit
*/
rflags |= HPTE_R_C | HPTE_R_M;
rflags = htab_convert_pte_flags(new_pte);
/*
* Add in WIMG bits
*/
......
......@@ -159,20 +159,26 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
},
};
static unsigned long htab_convert_pte_flags(unsigned long pteflags)
unsigned long htab_convert_pte_flags(unsigned long pteflags)
{
unsigned long rflags = pteflags & 0x1fa;
unsigned long rflags = 0;
/* _PAGE_EXEC -> NOEXEC */
if ((pteflags & _PAGE_EXEC) == 0)
rflags |= HPTE_R_N;
/* PP bits. PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
/*
* PP bits:
* Linux use slb key 0 for kernel and 1 for user.
* kernel areas are mapped by PP bits 00
* and and there is no kernel RO (_PAGE_KERNEL_RO).
* User area mapped by 0x2 and read only use by
* 0x3.
*/
if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
(pteflags & _PAGE_DIRTY)))
rflags |= 1;
if (pteflags & _PAGE_USER) {
rflags |= 0x2;
if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY)))
rflags |= 0x1;
}
/*
* Always add "C" bit for perf. Memory coherence is always enabled
*/
......
......@@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
new_pmd |= _PAGE_DIRTY;
} while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
old_pmd, new_pmd));
/*
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
*/
rflags = new_pmd & _PAGE_USER;
if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
(new_pmd & _PAGE_DIRTY)))
rflags |= 0x1;
/*
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
*/
rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
rflags = htab_convert_pte_flags(new_pmd);
#if 0
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
......
......@@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte |= _PAGE_DIRTY;
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
rflags = htab_convert_pte_flags(new_pte);
rflags = 0x2 | (!(new_pte & _PAGE_RW));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
sz = ((1UL) << shift);
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
/* No CPU has hugepages but lacks no execute, so we
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册