提交 12bc9f6f 编写于 作者: A Aneesh Kumar K.V 提交者: Benjamin Herrenschmidt

powerpc: Replace find_linux_pte with find_linux_pte_or_hugepte

Replace find_linux_pte with find_linux_pte_or_hugepte and explicitly
document why we don't need to handle transparent hugepages at callsites.
Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 ac52ae47
...@@ -344,30 +344,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) ...@@ -344,30 +344,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void); void pgtable_cache_init(void);
/*
* find_linux_pte returns the address of a linux pte for a given
* effective address and directory. If not found, it returns zero.
*/
static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
{
pgd_t *pg;
pud_t *pu;
pmd_t *pm;
pte_t *pt = NULL;
pg = pgdir + pgd_index(ea);
if (!pgd_none(*pg)) {
pu = pud_offset(pg, ea);
if (!pud_none(*pu)) {
pm = pmd_offset(pu, ea);
if (pmd_present(*pm))
pt = pte_offset_kernel(pm, ea);
}
}
return pt;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* /*
......
...@@ -260,10 +260,15 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) ...@@ -260,10 +260,15 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
{ {
pte_t *ptep; pte_t *ptep;
unsigned long pa; unsigned long pa;
int hugepage_shift;
ptep = find_linux_pte(init_mm.pgd, token); /*
* We won't find hugepages here, iomem
*/
ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
if (!ptep) if (!ptep)
return token; return token;
WARN_ON(hugepage_shift);
pa = pte_pfn(*ptep) << PAGE_SHIFT; pa = pte_pfn(*ptep) << PAGE_SHIFT;
return pa | (token & (PAGE_SIZE-1)); return pa | (token & (PAGE_SIZE-1));
......
...@@ -55,6 +55,7 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) ...@@ -55,6 +55,7 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{ {
unsigned hugepage_shift;
struct iowa_bus *bus; struct iowa_bus *bus;
int token; int token;
...@@ -70,11 +71,17 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) ...@@ -70,11 +71,17 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
return NULL; return NULL;
ptep = find_linux_pte(init_mm.pgd, vaddr); ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
&hugepage_shift);
if (ptep == NULL) if (ptep == NULL)
paddr = 0; paddr = 0;
else else {
/*
* we don't have hugepages backing iomem
*/
WARN_ON(hugepage_shift);
paddr = pte_pfn(*ptep) << PAGE_SHIFT; paddr = pte_pfn(*ptep) << PAGE_SHIFT;
}
bus = iowa_pci_find(vaddr, paddr); bus = iowa_pci_find(vaddr, paddr);
if (bus == NULL) if (bus == NULL)
......
...@@ -27,7 +27,7 @@ static void *real_vmalloc_addr(void *x) ...@@ -27,7 +27,7 @@ static void *real_vmalloc_addr(void *x)
unsigned long addr = (unsigned long) x; unsigned long addr = (unsigned long) x;
pte_t *p; pte_t *p;
p = find_linux_pte(swapper_pg_dir, addr); p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
if (!p || !pte_present(*p)) if (!p || !pte_present(*p))
return NULL; return NULL;
/* assume we don't have huge pages in vmalloc space... */ /* assume we don't have huge pages in vmalloc space... */
......
...@@ -1145,6 +1145,7 @@ EXPORT_SYMBOL_GPL(hash_page); ...@@ -1145,6 +1145,7 @@ EXPORT_SYMBOL_GPL(hash_page);
void hash_preload(struct mm_struct *mm, unsigned long ea, void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap) unsigned long access, unsigned long trap)
{ {
int hugepage_shift;
unsigned long vsid; unsigned long vsid;
pgd_t *pgdir; pgd_t *pgdir;
pte_t *ptep; pte_t *ptep;
...@@ -1166,10 +1167,15 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, ...@@ -1166,10 +1167,15 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
pgdir = mm->pgd; pgdir = mm->pgd;
if (pgdir == NULL) if (pgdir == NULL)
return; return;
ptep = find_linux_pte(pgdir, ea); /*
* THP pages use update_mmu_cache_pmd. We don't do
* hash preload there. Hence can ignore THP here
*/
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
if (!ptep) if (!ptep)
return; return;
WARN_ON(hugepage_shift);
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
/* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
* a 64K kernel), then we don't preload, hash_page() will take * a 64K kernel), then we don't preload, hash_page() will take
......
...@@ -105,6 +105,7 @@ int pgd_huge(pgd_t pgd) ...@@ -105,6 +105,7 @@ int pgd_huge(pgd_t pgd)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{ {
/* Only called for hugetlbfs pages, hence can ignore THP */
return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
} }
...@@ -673,11 +674,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) ...@@ -673,11 +674,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
struct page *page; struct page *page;
unsigned shift; unsigned shift;
unsigned long mask; unsigned long mask;
/*
* Transparent hugepages are handled by generic code. We can skip them
* here.
*/
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
/* Verify it is a huge page else bail. */ /* Verify it is a huge page else bail. */
if (!ptep || !shift) if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
mask = (1UL << shift) - 1; mask = (1UL << shift) - 1;
......
...@@ -189,6 +189,7 @@ void tlb_flush(struct mmu_gather *tlb) ...@@ -189,6 +189,7 @@ void tlb_flush(struct mmu_gather *tlb)
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
unsigned long end) unsigned long end)
{ {
int hugepage_shift;
unsigned long flags; unsigned long flags;
start = _ALIGN_DOWN(start, PAGE_SIZE); start = _ALIGN_DOWN(start, PAGE_SIZE);
...@@ -206,7 +207,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, ...@@ -206,7 +207,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
local_irq_save(flags); local_irq_save(flags);
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) { for (; start < end; start += PAGE_SIZE) {
pte_t *ptep = find_linux_pte(mm->pgd, start); pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start,
&hugepage_shift);
unsigned long pte; unsigned long pte;
if (ptep == NULL) if (ptep == NULL)
...@@ -214,7 +216,10 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, ...@@ -214,7 +216,10 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
pte = pte_val(*ptep); pte = pte_val(*ptep);
if (!(pte & _PAGE_HASHPTE)) if (!(pte & _PAGE_HASHPTE))
continue; continue;
hpte_need_flush(mm, start, ptep, pte, 0); if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
else
hpte_need_flush(mm, start, ptep, pte, 0);
} }
arch_leave_lazy_mmu_mode(); arch_leave_lazy_mmu_mode();
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册