提交 4f9c53c8 编写于 作者: M Michael Ellerman

powerpc: Fix compile errors with STRICT_MM_TYPECHECKS enabled

Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
[mpe: Fix the 32-bit code also]
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 5dd4e4f6
...@@ -290,11 +290,11 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing, ...@@ -290,11 +290,11 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
pte_t old_pte, new_pte = __pte(0); pte_t old_pte, new_pte = __pte(0);
while (1) { while (1) {
old_pte = pte_val(*ptep); old_pte = *ptep;
/* /*
* wait until _PAGE_BUSY is clear then set it atomically * wait until _PAGE_BUSY is clear then set it atomically
*/ */
if (unlikely(old_pte & _PAGE_BUSY)) { if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
cpu_relax(); cpu_relax();
continue; continue;
} }
...@@ -305,16 +305,18 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing, ...@@ -305,16 +305,18 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
return __pte(0); return __pte(0);
#endif #endif
/* If pte is not present return None */ /* If pte is not present return None */
if (unlikely(!(old_pte & _PAGE_PRESENT))) if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
return __pte(0); return __pte(0);
new_pte = pte_mkyoung(old_pte); new_pte = pte_mkyoung(old_pte);
if (writing && pte_write(old_pte)) if (writing && pte_write(old_pte))
new_pte = pte_mkdirty(new_pte); new_pte = pte_mkdirty(new_pte);
if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte, if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
new_pte)) pte_val(old_pte),
pte_val(new_pte))) {
break; break;
}
} }
return new_pte; return new_pte;
} }
......
...@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t ...@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
do { do {
SetPageReserved(page); SetPageReserved(page);
map_page(vaddr, page_to_phys(page), map_page(vaddr, page_to_phys(page),
pgprot_noncached(PAGE_KERNEL)); pgprot_val(pgprot_noncached(PAGE_KERNEL)));
page++; page++;
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
} while (size -= PAGE_SIZE); } while (size -= PAGE_SIZE);
......
...@@ -181,7 +181,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, ...@@ -181,7 +181,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long cam_sz; unsigned long cam_sz;
cam_sz = calc_cam_sz(ram, virt, phys); cam_sz = calc_cam_sz(ram, virt, phys);
settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0); settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0);
ram -= cam_sz; ram -= cam_sz;
amount_mapped += cam_sz; amount_mapped += cam_sz;
......
...@@ -33,7 +33,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -33,7 +33,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* atomically mark the linux large page PMD busy and dirty * atomically mark the linux large page PMD busy and dirty
*/ */
do { do {
pmd_t pmd = ACCESS_ONCE(*pmdp); pmd_t pmd = READ_ONCE(*pmdp);
old_pmd = pmd_val(pmd); old_pmd = pmd_val(pmd);
/* If PMD busy, retry the access */ /* If PMD busy, retry the access */
......
...@@ -964,7 +964,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift ...@@ -964,7 +964,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*shift = 0; *shift = 0;
pgdp = pgdir + pgd_index(ea); pgdp = pgdir + pgd_index(ea);
pgd = ACCESS_ONCE(*pgdp); pgd = READ_ONCE(*pgdp);
/* /*
* Always operate on the local stack value. This make sure the * Always operate on the local stack value. This make sure the
* value don't get updated by a parallel THP split/collapse, * value don't get updated by a parallel THP split/collapse,
...@@ -1045,7 +1045,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, ...@@ -1045,7 +1045,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
if (pte_end < end) if (pte_end < end)
end = pte_end; end = pte_end;
pte = ACCESS_ONCE(*ptep); pte = READ_ONCE(*ptep);
mask = _PAGE_PRESENT | _PAGE_USER; mask = _PAGE_PRESENT | _PAGE_USER;
if (write) if (write)
mask |= _PAGE_RW; mask |= _PAGE_RW;
......
...@@ -189,7 +189,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, ...@@ -189,7 +189,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
/* Make sure we have the base flags */ /* Make sure we have the base flags */
if ((flags & _PAGE_PRESENT) == 0) if ((flags & _PAGE_PRESENT) == 0)
flags |= PAGE_KERNEL; flags |= pgprot_val(PAGE_KERNEL);
/* Non-cacheable page cannot be coherent */ /* Non-cacheable page cannot be coherent */
if (flags & _PAGE_NO_CACHE) if (flags & _PAGE_NO_CACHE)
...@@ -324,7 +324,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) ...@@ -324,7 +324,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
p = memstart_addr + s; p = memstart_addr + s;
for (; s < top; s += PAGE_SIZE) { for (; s < top; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext); ktext = ((char *) v >= _stext && (char *) v < etext);
f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
map_page(v, p, f); map_page(v, p, f);
#ifdef CONFIG_PPC_STD_MMU_32 #ifdef CONFIG_PPC_STD_MMU_32
if (ktext) if (ktext)
......
...@@ -723,7 +723,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -723,7 +723,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
assert_spin_locked(&mm->page_table_lock); assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd)); WARN_ON(!pmd_trans_huge(pmd));
#endif #endif
trace_hugepage_set_pmd(addr, pmd); trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
} }
......
...@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, ...@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
continue; continue;
pte = pte_val(*ptep); pte = pte_val(*ptep);
if (hugepage_shift) if (hugepage_shift)
trace_hugepage_invalidate(start, pte_val(pte)); trace_hugepage_invalidate(start, pte);
if (!(pte & _PAGE_HASHPTE)) if (!(pte & _PAGE_HASHPTE))
continue; continue;
if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册