提交 90603d15 编写于 作者: M Matias Zabaljauregui 提交者: Rusty Russell

lguest: use native_set_* macros, which properly handle 64-bit entries when PAE is activated

Some cleanups and replace direct assignment with native_set_* macros which properly handle 64-bit entries when PAE is activated
Signed-off-by: NMatias Zabaljauregui <zabaljauregui@gmail.com>
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
上级 ed1dc778
...@@ -525,7 +525,7 @@ static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, ...@@ -525,7 +525,7 @@ static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval) pte_t *ptep, pte_t pteval)
{ {
*ptep = pteval; native_set_pte(ptep, pteval);
lguest_pte_update(mm, addr, ptep); lguest_pte_update(mm, addr, ptep);
} }
...@@ -534,9 +534,9 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -534,9 +534,9 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
* changed. */ * changed. */
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{ {
*pmdp = pmdval; native_set_pmd(pmdp, pmdval);
lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK, lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
(__pa(pmdp) & (PAGE_SIZE - 1)) / 4); (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
} }
/* There are a couple of legacy places where the kernel sets a PTE, but we /* There are a couple of legacy places where the kernel sets a PTE, but we
...@@ -550,7 +550,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) ...@@ -550,7 +550,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
* which brings boot back to 0.25 seconds. */ * which brings boot back to 0.25 seconds. */
static void lguest_set_pte(pte_t *ptep, pte_t pteval) static void lguest_set_pte(pte_t *ptep, pte_t pteval)
{ {
*ptep = pteval; native_set_pte(ptep, pteval);
if (cr3_changed) if (cr3_changed)
lazy_hcall1(LHCALL_FLUSH_TLB, 1); lazy_hcall1(LHCALL_FLUSH_TLB, 1);
} }
......
...@@ -90,7 +90,7 @@ static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) ...@@ -90,7 +90,7 @@ static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
/* You should never call this if the PGD entry wasn't valid */ /* You should never call this if the PGD entry wasn't valid */
BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; return &page[pte_index(vaddr)];
} }
/* These two functions just like the above two, except they access the Guest /* These two functions just like the above two, except they access the Guest
...@@ -105,7 +105,7 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) ...@@ -105,7 +105,7 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
{ {
unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); return gpage + pte_index(vaddr) * sizeof(pte_t);
} }
/*:*/ /*:*/
...@@ -171,7 +171,7 @@ static void release_pte(pte_t pte) ...@@ -171,7 +171,7 @@ static void release_pte(pte_t pte)
/* Remember that get_user_pages_fast() took a reference to the page, in /* Remember that get_user_pages_fast() took a reference to the page, in
* get_pfn()? We have to put it back now. */ * get_pfn()? We have to put it back now. */
if (pte_flags(pte) & _PAGE_PRESENT) if (pte_flags(pte) & _PAGE_PRESENT)
put_page(pfn_to_page(pte_pfn(pte))); put_page(pte_page(pte));
} }
/*:*/ /*:*/
...@@ -273,7 +273,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) ...@@ -273,7 +273,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* table entry, even if the Guest says it's writable. That way * table entry, even if the Guest says it's writable. That way
* we will come back here when a write does actually occur, so * we will come back here when a write does actually occur, so
* we can update the Guest's _PAGE_DIRTY flag. */ * we can update the Guest's _PAGE_DIRTY flag. */
*spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
/* Finally, we write the Guest PTE entry back: we've set the /* Finally, we write the Guest PTE entry back: we've set the
* _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
...@@ -323,7 +323,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) ...@@ -323,7 +323,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
} }
/*H:450 If we chase down the release_pgd() code, it looks like this: */ /*H:450 If we chase down the release_pgd() code, it looks like this: */
static void release_pgd(struct lguest *lg, pgd_t *spgd) static void release_pgd(pgd_t *spgd)
{ {
/* If the entry's not present, there's nothing to release. */ /* If the entry's not present, there's nothing to release. */
if (pgd_flags(*spgd) & _PAGE_PRESENT) { if (pgd_flags(*spgd) & _PAGE_PRESENT) {
...@@ -350,7 +350,7 @@ static void flush_user_mappings(struct lguest *lg, int idx) ...@@ -350,7 +350,7 @@ static void flush_user_mappings(struct lguest *lg, int idx)
unsigned int i; unsigned int i;
/* Release every pgd entry up to the kernel's address. */ /* Release every pgd entry up to the kernel's address. */
for (i = 0; i < pgd_index(lg->kernel_address); i++) for (i = 0; i < pgd_index(lg->kernel_address); i++)
release_pgd(lg, lg->pgdirs[idx].pgdir + i); release_pgd(lg->pgdirs[idx].pgdir + i);
} }
/*H:440 (v) Flushing (throwing away) page tables, /*H:440 (v) Flushing (throwing away) page tables,
...@@ -431,7 +431,7 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, ...@@ -431,7 +431,7 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
/*H:430 (iv) Switching page tables /*H:430 (iv) Switching page tables
* *
* Now we've seen all the page table setting and manipulation, let's see what * Now we've seen all the page table setting and manipulation, let's see
* what happens when the Guest changes page tables (ie. changes the top-level * what happens when the Guest changes page tables (ie. changes the top-level
* pgdir). This occurs on almost every context switch. */ * pgdir). This occurs on almost every context switch. */
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
...@@ -463,7 +463,7 @@ static void release_all_pagetables(struct lguest *lg) ...@@ -463,7 +463,7 @@ static void release_all_pagetables(struct lguest *lg)
if (lg->pgdirs[i].pgdir) if (lg->pgdirs[i].pgdir)
/* Every PGD entry except the Switcher at the top */ /* Every PGD entry except the Switcher at the top */
for (j = 0; j < SWITCHER_PGD_INDEX; j++) for (j = 0; j < SWITCHER_PGD_INDEX; j++)
release_pgd(lg, lg->pgdirs[i].pgdir + j); release_pgd(lg->pgdirs[i].pgdir + j);
} }
/* We also throw away everything when a Guest tells us it's changed a kernel /* We also throw away everything when a Guest tells us it's changed a kernel
...@@ -581,7 +581,7 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) ...@@ -581,7 +581,7 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
pgdir = find_pgdir(lg, gpgdir); pgdir = find_pgdir(lg, gpgdir);
if (pgdir < ARRAY_SIZE(lg->pgdirs)) if (pgdir < ARRAY_SIZE(lg->pgdirs))
/* ... throw it away. */ /* ... throw it away. */
release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); release_pgd(lg->pgdirs[pgdir].pgdir + idx);
} }
/* Once we know how much memory we have we can construct simple identity /* Once we know how much memory we have we can construct simple identity
...@@ -726,8 +726,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) ...@@ -726,8 +726,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
* page is already mapped there, we don't have to copy them out * page is already mapped there, we don't have to copy them out
* again. */ * again. */
pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL));
switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
regs_pte);
} }
/*:*/ /*:*/
...@@ -752,21 +753,21 @@ static __init void populate_switcher_pte_page(unsigned int cpu, ...@@ -752,21 +753,21 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
/* The first entries are easy: they map the Switcher code. */ /* The first entries are easy: they map the Switcher code. */
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
pte[i] = mk_pte(switcher_page[i], native_set_pte(&pte[i], mk_pte(switcher_page[i],
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
} }
/* The only other thing we map is this CPU's pair of pages. */ /* The only other thing we map is this CPU's pair of pages. */
i = pages + cpu*2; i = pages + cpu*2;
/* First page (Guest registers) is writable from the Guest */ /* First page (Guest registers) is writable from the Guest */
pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
/* The second page contains the "struct lguest_ro_state", and is /* The second page contains the "struct lguest_ro_state", and is
* read-only. */ * read-only. */
pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
} }
/* We've made it through the page table code. Perhaps our tired brains are /* We've made it through the page table code. Perhaps our tired brains are
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册