提交 56eecdb9 编写于 作者: A Aneesh Kumar K.V 提交者: Benjamin Herrenschmidt

mm: Use ptep/pmdp_set_numa() for updating _PAGE_NUMA bit

Archs like ppc64 doesn't do tlb flush in set_pte/pmd functions when using
a hash table MMU for various reasons (the flush is handled as part of
the PTE modification when necessary).

ppc64 thus doesn't implement flush_tlb_range for hash based MMUs.

Additionally ppc64 require the tlb flushing to be batched within ptl locks.

The reason to do that is to ensure that the hash page table is in sync with
linux page table.

We track the hpte index in linux pte and if we clear them without flushing
hash and drop the ptl lock, we can have another cpu update the pte and can
end up with duplicate entry in the hash table, which is fatal.

We also want to keep set_pte_at simpler by not requiring them to do hash
flush for performance reason. We do that by assuming that set_pte_at() is
never *ever* called on a PTE that is already valid.

This was the case until the NUMA code went in which broke that assumption.

Fix that by introducing a new pair of helpers to set _PAGE_NUMA in a
way similar to ptep/pmdp_set_wrprotect(), with a generic implementation
using set_pte_at() and a powerpc specific one using the appropriate
mechanism needed to keep the hash table in sync.
Acked-by: NMel Gorman <mgorman@suse.de>
Reviewed-by: NRik van Riel <riel@redhat.com>
Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 9d85d586
...@@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte) ...@@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte; return pte;
} }
#define ptep_set_numa ptep_set_numa
static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
VM_BUG_ON(1);
pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
return;
}
#define pmd_numa pmd_numa #define pmd_numa pmd_numa
static inline int pmd_numa(pmd_t pmd) static inline int pmd_numa(pmd_t pmd)
{ {
return pte_numa(pmd_pte(pmd)); return pte_numa(pmd_pte(pmd));
} }
#define pmdp_set_numa pmdp_set_numa
static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
VM_BUG_ON(1);
pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
return;
}
#define pmd_mknonnuma pmd_mknonnuma #define pmd_mknonnuma pmd_mknonnuma
static inline pmd_t pmd_mknonnuma(pmd_t pmd) static inline pmd_t pmd_mknonnuma(pmd_t pmd)
{ {
......
...@@ -701,6 +701,18 @@ static inline pte_t pte_mknuma(pte_t pte) ...@@ -701,6 +701,18 @@ static inline pte_t pte_mknuma(pte_t pte)
} }
#endif #endif
#ifndef ptep_set_numa
static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t ptent = *ptep;
ptent = pte_mknuma(ptent);
set_pte_at(mm, addr, ptep, ptent);
return;
}
#endif
#ifndef pmd_mknuma #ifndef pmd_mknuma
static inline pmd_t pmd_mknuma(pmd_t pmd) static inline pmd_t pmd_mknuma(pmd_t pmd)
{ {
...@@ -708,6 +720,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd) ...@@ -708,6 +720,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
return pmd_clear_flags(pmd, _PAGE_PRESENT); return pmd_clear_flags(pmd, _PAGE_PRESENT);
} }
#endif #endif
#ifndef pmdp_set_numa
static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
pmd = pmd_mknuma(pmd);
set_pmd_at(mm, addr, pmdp, pmd);
return;
}
#endif
#else #else
extern int pte_numa(pte_t pte); extern int pte_numa(pte_t pte);
extern int pmd_numa(pmd_t pmd); extern int pmd_numa(pmd_t pmd);
...@@ -715,6 +739,8 @@ extern pte_t pte_mknonnuma(pte_t pte); ...@@ -715,6 +739,8 @@ extern pte_t pte_mknonnuma(pte_t pte);
extern pmd_t pmd_mknonnuma(pmd_t pmd); extern pmd_t pmd_mknonnuma(pmd_t pmd);
extern pte_t pte_mknuma(pte_t pte); extern pte_t pte_mknuma(pte_t pte);
extern pmd_t pmd_mknuma(pmd_t pmd); extern pmd_t pmd_mknuma(pmd_t pmd);
extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
#else #else
static inline int pmd_numa(pmd_t pmd) static inline int pmd_numa(pmd_t pmd)
...@@ -742,10 +768,23 @@ static inline pte_t pte_mknuma(pte_t pte) ...@@ -742,10 +768,23 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte; return pte;
} }
static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
return;
}
static inline pmd_t pmd_mknuma(pmd_t pmd) static inline pmd_t pmd_mknuma(pmd_t pmd)
{ {
return pmd; return pmd;
} }
static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
return ;
}
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -1545,6 +1545,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1545,6 +1545,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
entry = pmd_mknonnuma(entry); entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot); entry = pmd_modify(entry, newprot);
ret = HPAGE_PMD_NR; ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
BUG_ON(pmd_write(entry)); BUG_ON(pmd_write(entry));
} else { } else {
struct page *page = pmd_page(*pmd); struct page *page = pmd_page(*pmd);
...@@ -1557,16 +1558,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1557,16 +1558,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
*/ */
if (!is_huge_zero_page(page) && if (!is_huge_zero_page(page) &&
!pmd_numa(*pmd)) { !pmd_numa(*pmd)) {
entry = *pmd; pmdp_set_numa(mm, addr, pmd);
entry = pmd_mknuma(entry);
ret = HPAGE_PMD_NR; ret = HPAGE_PMD_NR;
} }
} }
/* Set PMD if cleared earlier */
if (ret == HPAGE_PMD_NR)
set_pmd_at(mm, addr, pmd, entry);
spin_unlock(ptl); spin_unlock(ptl);
} }
......
...@@ -69,12 +69,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -69,12 +69,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
} else { } else {
struct page *page; struct page *page;
ptent = *pte;
page = vm_normal_page(vma, addr, oldpte); page = vm_normal_page(vma, addr, oldpte);
if (page && !PageKsm(page)) { if (page && !PageKsm(page)) {
if (!pte_numa(oldpte)) { if (!pte_numa(oldpte)) {
ptent = pte_mknuma(ptent); ptep_set_numa(mm, addr, pte);
set_pte_at(mm, addr, pte, ptent);
updated = true; updated = true;
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册