提交 b8bcfe99 编写于 作者: J Jeremy Fitzhardinge

x86/paravirt: remove lazy mode in interrupts

Impact: simplification, robustness

Make paravirt_lazy_mode() always return PARAVIRT_LAZY_NONE
when in an interrupt.  This prevents interrupt code from
accidentally inheriting an outer lazy state, and instead
does everything synchronously.  Outer batched operations
are left deferred.
Signed-off-by: NJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
上级 a8a93f3f
...@@ -282,6 +282,9 @@ void paravirt_leave_lazy_cpu(void) ...@@ -282,6 +282,9 @@ void paravirt_leave_lazy_cpu(void)
enum paravirt_lazy_mode paravirt_get_lazy_mode(void) enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
{ {
if (in_interrupt())
return PARAVIRT_LAZY_NONE;
return __get_cpu_var(paravirt_lazy_mode); return __get_cpu_var(paravirt_lazy_mode);
} }
......
...@@ -225,12 +225,10 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) ...@@ -225,12 +225,10 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
if (!pmd_present(*pmd_k)) if (!pmd_present(*pmd_k))
return NULL; return NULL;
if (!pmd_present(*pmd)) { if (!pmd_present(*pmd))
set_pmd(pmd, *pmd_k); set_pmd(pmd, *pmd_k);
arch_flush_lazy_mmu_mode(); else
} else {
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
}
return pmd_k; return pmd_k;
} }
......
...@@ -87,7 +87,6 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) ...@@ -87,7 +87,6 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx))); BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot)); set_pte(kmap_pte-idx, mk_pte(page, prot));
arch_flush_lazy_mmu_mode();
return (void *)vaddr; return (void *)vaddr;
} }
...@@ -117,7 +116,6 @@ void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -117,7 +116,6 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#endif #endif
} }
arch_flush_lazy_mmu_mode();
pagefault_enable(); pagefault_enable();
} }
......
...@@ -74,7 +74,6 @@ iounmap_atomic(void *kvaddr, enum km_type type) ...@@ -74,7 +74,6 @@ iounmap_atomic(void *kvaddr, enum km_type type)
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
kpte_clear_flush(kmap_pte-idx, vaddr); kpte_clear_flush(kmap_pte-idx, vaddr);
arch_flush_lazy_mmu_mode();
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL_GPL(iounmap_atomic); EXPORT_SYMBOL_GPL(iounmap_atomic);
...@@ -824,13 +824,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, ...@@ -824,13 +824,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
vm_unmap_aliases(); vm_unmap_aliases();
/*
* If we're called with lazy mmu updates enabled, the
* in-memory pte state may be stale. Flush pending updates to
* bring them up to date.
*/
arch_flush_lazy_mmu_mode();
cpa.vaddr = addr; cpa.vaddr = addr;
cpa.numpages = numpages; cpa.numpages = numpages;
cpa.mask_set = mask_set; cpa.mask_set = mask_set;
...@@ -873,13 +866,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, ...@@ -873,13 +866,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
} else } else
cpa_flush_all(cache); cpa_flush_all(cache);
/*
* If we've been called with lazy mmu updates enabled, then
* make sure that everything gets flushed out before we
* return.
*/
arch_flush_lazy_mmu_mode();
out: out:
return ret; return ret;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册