提交 719ea79e 编写于 作者: C Chris Metcalf

arch/tile: fix up locking in pgtable.c slightly

We should be holding the init_mm.page_table_lock in shatter_huge_page()
since we are modifying the kernel page tables.  Then, only if we are
walking the other root page tables to update them, do we want to take
the pgd_lock.

Add a comment about taking the pgd_lock that we always do it with
interrupts disabled and therefore are not at risk from the tlbflush
IPI deadlock as is seen on x86.
Signed-off-by: NChris Metcalf <cmetcalf@tilera.com>
上级 5f220704
...@@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr) ...@@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr)
if (!pmd_huge_page(*pmd)) if (!pmd_huge_page(*pmd))
return; return;
/* spin_lock_irqsave(&init_mm.page_table_lock, flags);
* Grab the pgd_lock, since we may need it to walk the pgd_list,
* and since we need some kind of lock here to avoid races.
*/
spin_lock_irqsave(&pgd_lock, flags);
if (!pmd_huge_page(*pmd)) { if (!pmd_huge_page(*pmd)) {
/* Lost the race to convert the huge page. */ /* Lost the race to convert the huge page. */
spin_unlock_irqrestore(&pgd_lock, flags); spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
return; return;
} }
...@@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr) ...@@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr)
#ifdef __PAGETABLE_PMD_FOLDED #ifdef __PAGETABLE_PMD_FOLDED
/* Walk every pgd on the system and update the pmd there. */ /* Walk every pgd on the system and update the pmd there. */
spin_lock(&pgd_lock);
list_for_each(pos, &pgd_list) { list_for_each(pos, &pgd_list) {
pmd_t *copy_pmd; pmd_t *copy_pmd;
pgd = list_to_pgd(pos) + pgd_index(addr); pgd = list_to_pgd(pos) + pgd_index(addr);
...@@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr) ...@@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr)
copy_pmd = pmd_offset(pud, addr); copy_pmd = pmd_offset(pud, addr);
__set_pmd(copy_pmd, *pmd); __set_pmd(copy_pmd, *pmd);
} }
spin_unlock(&pgd_lock);
#endif #endif
/* Tell every cpu to notice the change. */ /* Tell every cpu to notice the change. */
...@@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr) ...@@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr)
cpu_possible_mask, NULL, 0); cpu_possible_mask, NULL, 0);
/* Hold the lock until the TLB flush is finished to avoid races. */ /* Hold the lock until the TLB flush is finished to avoid races. */
spin_unlock_irqrestore(&pgd_lock, flags); spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
} }
/* /*
...@@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr) ...@@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr)
* against pageattr.c; it is the unique case in which a valid change * against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults. * of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed. * vmalloc faults work because attached pagetables are never freed.
* The locking scheme was chosen on the basis of manfred's *
* recommendations and having no core impact whatsoever. * The lock is always taken with interrupts disabled, unlike on x86
* -- wli * and other platforms, because we need to take the lock in
* shatter_huge_page(), which may be called from an interrupt context.
* We are not at risk from the tlbflush IPI deadlock that was seen on
* x86, since we use the flush_remote() API to have the hypervisor do
* the TLB flushes regardless of irq disabling.
*/ */
DEFINE_SPINLOCK(pgd_lock); DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list); LIST_HEAD(pgd_list);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册