提交 bc7fe1d9 编写于 作者: J Jeremy Fitzhardinge

xen/mmu: tune pgtable alloc/release

Make sure the fastpath code is inlined.  Batch the page permission change
and the pin/unpin, and make sure that it can be batched with any
adjacent set_pte/pmd/etc operations.
Signed-off-by: NJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
上级 dcf7435c
...@@ -1497,22 +1497,52 @@ static void __init xen_release_pmd_init(unsigned long pfn) ...@@ -1497,22 +1497,52 @@ static void __init xen_release_pmd_init(unsigned long pfn)
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
} }
static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct multicall_space mcs;
struct mmuext_op *op;
mcs = __xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = cmd;
op->arg1.mfn = pfn_to_mfn(pfn);
MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
{
struct multicall_space mcs;
unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
pfn_pte(pfn, prot), 0);
}
/* This needs to make sure the new pte page is pinned iff its being /* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */ attached to a pinned pagetable. */
static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
unsigned level)
{ {
struct page *page = pfn_to_page(pfn); bool pinned = PagePinned(virt_to_page(mm->pgd));
int pinned = PagePinned(virt_to_page(mm->pgd));
trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
if (pinned) { if (pinned) {
struct page *page = pfn_to_page(pfn);
SetPagePinned(page); SetPagePinned(page);
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
if (level == PT_PTE && USE_SPLIT_PTLOCKS) if (level == PT_PTE && USE_SPLIT_PTLOCKS)
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU);
} else { } else {
/* make sure there are no stray mappings of /* make sure there are no stray mappings of
this page */ this page */
...@@ -1532,7 +1562,7 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) ...@@ -1532,7 +1562,7 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
} }
/* This should never happen until we're OK to use struct page */ /* This should never happen until we're OK to use struct page */
static void xen_release_ptpage(unsigned long pfn, unsigned level) static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
{ {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
bool pinned = PagePinned(page); bool pinned = PagePinned(page);
...@@ -1541,9 +1571,14 @@ static void xen_release_ptpage(unsigned long pfn, unsigned level) ...@@ -1541,9 +1571,14 @@ static void xen_release_ptpage(unsigned long pfn, unsigned level)
if (pinned) { if (pinned) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
xen_mc_batch();
if (level == PT_PTE && USE_SPLIT_PTLOCKS) if (level == PT_PTE && USE_SPLIT_PTLOCKS)
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
__set_pfn_prot(pfn, PAGE_KERNEL);
xen_mc_issue(PARAVIRT_LAZY_MMU);
} }
ClearPagePinned(page); ClearPagePinned(page);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册