提交 d40a1ee4 编写于 作者: S Sheng Yang 提交者: Avi Kivity

KVM: MMU: Modify kvm_shadow_walk.entry to accept u64 addr

EPT is 4 level by default in 32pae(48 bits), but the addr parameter
of kvm_shadow_walk->entry() only accept unsigned long as virtual
address, which is 32bit in 32pae. This result in SHADOW_PT_INDEX()
overflow when try to fetch level 4 index.

Fix it by extend kvm_shadow_walk->entry() to accept 64bit addr in
parameter.
Signed-off-by: NSheng Yang <sheng.yang@intel.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 8c4b537d
...@@ -144,7 +144,7 @@ struct kvm_rmap_desc { ...@@ -144,7 +144,7 @@ struct kvm_rmap_desc {
struct kvm_shadow_walk { struct kvm_shadow_walk {
int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
gva_t addr, u64 *spte, int level); u64 addr, u64 *spte, int level);
}; };
static struct kmem_cache *pte_chain_cache; static struct kmem_cache *pte_chain_cache;
...@@ -941,7 +941,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -941,7 +941,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
} }
static int walk_shadow(struct kvm_shadow_walk *walker, static int walk_shadow(struct kvm_shadow_walk *walker,
struct kvm_vcpu *vcpu, gva_t addr) struct kvm_vcpu *vcpu, u64 addr)
{ {
hpa_t shadow_addr; hpa_t shadow_addr;
int level; int level;
...@@ -1270,7 +1270,7 @@ struct direct_shadow_walk { ...@@ -1270,7 +1270,7 @@ struct direct_shadow_walk {
static int direct_map_entry(struct kvm_shadow_walk *_walk, static int direct_map_entry(struct kvm_shadow_walk *_walk,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
gva_t addr, u64 *sptep, int level) u64 addr, u64 *sptep, int level)
{ {
struct direct_shadow_walk *walk = struct direct_shadow_walk *walk =
container_of(_walk, struct direct_shadow_walk, walker); container_of(_walk, struct direct_shadow_walk, walker);
...@@ -1289,7 +1289,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk, ...@@ -1289,7 +1289,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk,
if (*sptep == shadow_trap_nonpresent_pte) { if (*sptep == shadow_trap_nonpresent_pte) {
pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, addr, level - 1, sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
1, ACC_ALL, sptep); 1, ACC_ALL, sptep);
if (!sp) { if (!sp) {
pgprintk("nonpaging_map: ENOMEM\n"); pgprintk("nonpaging_map: ENOMEM\n");
...@@ -1317,7 +1317,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, ...@@ -1317,7 +1317,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
.pt_write = 0, .pt_write = 0,
}; };
r = walk_shadow(&walker.walker, vcpu, (gva_t)gfn << PAGE_SHIFT); r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
if (r < 0) if (r < 0)
return r; return r;
return walker.pt_write; return walker.pt_write;
......
...@@ -286,7 +286,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, ...@@ -286,7 +286,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
* Fetch a shadow pte for a specific level in the paging hierarchy. * Fetch a shadow pte for a specific level in the paging hierarchy.
*/ */
static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
struct kvm_vcpu *vcpu, gva_t addr, struct kvm_vcpu *vcpu, u64 addr,
u64 *sptep, int level) u64 *sptep, int level)
{ {
struct shadow_walker *sw = struct shadow_walker *sw =
...@@ -326,7 +326,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, ...@@ -326,7 +326,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
metaphysical = 0; metaphysical = 0;
table_gfn = gw->table_gfn[level - 2]; table_gfn = gw->table_gfn[level - 2];
} }
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
metaphysical, access, sptep); metaphysical, access, sptep);
if (!metaphysical) { if (!metaphysical) {
r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册