Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
abb9e0b8
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
abb9e0b8
编写于
8月 22, 2008
作者:
A
Avi Kivity
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
KVM: MMU: Convert the paging mode shadow walk to use the generic walker
Signed-off-by:
N
Avi Kivity
<
avi@qumranet.com
>
上级
140754bc
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
86 addition
and
72 deletion
+86
-72
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/paging_tmpl.h
+86
-72
未找到文件。
arch/x86/kvm/paging_tmpl.h
浏览文件 @
abb9e0b8
...
...
@@ -25,6 +25,7 @@
#if PTTYPE == 64
#define pt_element_t u64
#define guest_walker guest_walker64
#define shadow_walker shadow_walker64
#define FNAME(name) paging##64_##name
#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
...
...
@@ -41,6 +42,7 @@
#elif PTTYPE == 32
#define pt_element_t u32
#define guest_walker guest_walker32
#define shadow_walker shadow_walker32
#define FNAME(name) paging##32_##name
#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
...
...
@@ -71,6 +73,17 @@ struct guest_walker {
u32
error_code
;
};
struct
shadow_walker
{
struct
kvm_shadow_walk
walker
;
struct
guest_walker
*
guest_walker
;
int
user_fault
;
int
write_fault
;
int
largepage
;
int
*
ptwrite
;
pfn_t
pfn
;
u64
*
sptep
;
};
static
gfn_t
gpte_to_gfn
(
pt_element_t
gpte
)
{
return
(
gpte
&
PT_BASE_ADDR_MASK
)
>>
PAGE_SHIFT
;
...
...
@@ -272,86 +285,86 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
*/
static
u64
*
FNAME
(
fetch
)(
struct
kvm_vcpu
*
vcpu
,
gva_t
addr
,
struct
guest_walker
*
walker
,
int
user_fault
,
int
write_fault
,
int
largepage
,
int
*
ptwrite
,
pfn_t
pfn
)
static
int
FNAME
(
shadow_walk_entry
)(
struct
kvm_shadow_walk
*
_sw
,
struct
kvm_vcpu
*
vcpu
,
gva_t
addr
,
u64
*
sptep
,
int
level
)
{
hpa_t
shadow_addr
;
int
level
;
u64
*
shadow_ent
;
unsigned
access
=
walker
->
pt_access
;
if
(
!
is_present_pte
(
walker
->
ptes
[
walker
->
level
-
1
]))
return
NULL
;
shadow_addr
=
vcpu
->
arch
.
mmu
.
root_hpa
;
level
=
vcpu
->
arch
.
mmu
.
shadow_root_level
;
if
(
level
==
PT32E_ROOT_LEVEL
)
{
shadow_addr
=
vcpu
->
arch
.
mmu
.
pae_root
[(
addr
>>
30
)
&
3
];
shadow_addr
&=
PT64_BASE_ADDR_MASK
;
--
level
;
struct
shadow_walker
*
sw
=
container_of
(
_sw
,
struct
shadow_walker
,
walker
);
struct
guest_walker
*
gw
=
sw
->
guest_walker
;
unsigned
access
=
gw
->
pt_access
;
struct
kvm_mmu_page
*
shadow_page
;
u64
spte
;
int
metaphysical
;
gfn_t
table_gfn
;
int
r
;
pt_element_t
curr_pte
;
if
(
level
==
PT_PAGE_TABLE_LEVEL
||
(
sw
->
largepage
&&
level
==
PT_DIRECTORY_LEVEL
))
{
mmu_set_spte
(
vcpu
,
sptep
,
access
,
gw
->
pte_access
&
access
,
sw
->
user_fault
,
sw
->
write_fault
,
gw
->
ptes
[
gw
->
level
-
1
]
&
PT_DIRTY_MASK
,
sw
->
ptwrite
,
sw
->
largepage
,
gw
->
gfn
,
sw
->
pfn
,
false
);
sw
->
sptep
=
sptep
;
return
1
;
}
for
(;
;
level
--
)
{
u32
index
=
SHADOW_PT_INDEX
(
addr
,
level
);
struct
kvm_mmu_page
*
shadow_page
;
u64
shadow_pte
;
int
metaphysical
;
gfn_t
table_gfn
;
shadow_ent
=
((
u64
*
)
__va
(
shadow_addr
))
+
index
;
if
(
level
==
PT_PAGE_TABLE_LEVEL
)
break
;
if
(
largepage
&&
level
==
PT_DIRECTORY_LEVEL
)
break
;
if
(
is_shadow_present_pte
(
*
shadow_ent
)
&&
!
is_large_pte
(
*
shadow_ent
))
{
shadow_addr
=
*
shadow_ent
&
PT64_BASE_ADDR_MASK
;
continue
;
}
if
(
is_shadow_present_pte
(
*
sptep
)
&&
!
is_large_pte
(
*
sptep
))
return
0
;
if
(
is_large_pte
(
*
shadow_ent
))
rmap_remove
(
vcpu
->
kvm
,
shadow_ent
);
if
(
level
-
1
==
PT_PAGE_TABLE_LEVEL
&&
walker
->
level
==
PT_DIRECTORY_LEVEL
)
{
metaphysical
=
1
;
if
(
!
is_dirty_pte
(
walker
->
ptes
[
level
-
1
]))
access
&=
~
ACC_WRITE_MASK
;
table_gfn
=
gpte_to_gfn
(
walker
->
ptes
[
level
-
1
]);
}
else
{
metaphysical
=
0
;
table_gfn
=
walker
->
table_gfn
[
level
-
2
];
}
shadow_page
=
kvm_mmu_get_page
(
vcpu
,
table_gfn
,
addr
,
level
-
1
,
metaphysical
,
access
,
shadow_ent
);
if
(
!
metaphysical
)
{
int
r
;
pt_element_t
curr_pte
;
r
=
kvm_read_guest_atomic
(
vcpu
->
kvm
,
walker
->
pte_gpa
[
level
-
2
],
&
curr_pte
,
sizeof
(
curr_pte
));
if
(
r
||
curr_pte
!=
walker
->
ptes
[
level
-
2
])
{
kvm_release_pfn_clean
(
pfn
);
return
NULL
;
}
if
(
is_large_pte
(
*
sptep
))
rmap_remove
(
vcpu
->
kvm
,
sptep
);
if
(
level
==
PT_DIRECTORY_LEVEL
&&
gw
->
level
==
PT_DIRECTORY_LEVEL
)
{
metaphysical
=
1
;
if
(
!
is_dirty_pte
(
gw
->
ptes
[
level
-
1
]))
access
&=
~
ACC_WRITE_MASK
;
table_gfn
=
gpte_to_gfn
(
gw
->
ptes
[
level
-
1
]);
}
else
{
metaphysical
=
0
;
table_gfn
=
gw
->
table_gfn
[
level
-
2
];
}
shadow_page
=
kvm_mmu_get_page
(
vcpu
,
table_gfn
,
addr
,
level
-
1
,
metaphysical
,
access
,
sptep
);
if
(
!
metaphysical
)
{
r
=
kvm_read_guest_atomic
(
vcpu
->
kvm
,
gw
->
pte_gpa
[
level
-
2
],
&
curr_pte
,
sizeof
(
curr_pte
));
if
(
r
||
curr_pte
!=
gw
->
ptes
[
level
-
2
])
{
kvm_release_pfn_clean
(
sw
->
pfn
);
sw
->
sptep
=
NULL
;
return
1
;
}
shadow_addr
=
__pa
(
shadow_page
->
spt
);
shadow_pte
=
shadow_addr
|
PT_PRESENT_MASK
|
PT_ACCESSED_MASK
|
PT_WRITABLE_MASK
|
PT_USER_MASK
;
set_shadow_pte
(
shadow_ent
,
shadow_pte
);
}
mmu_set_spte
(
vcpu
,
shadow_ent
,
access
,
walker
->
pte_access
&
access
,
user_fault
,
write_fault
,
walker
->
ptes
[
walker
->
level
-
1
]
&
PT_DIRTY_MASK
,
ptwrite
,
largepage
,
walker
->
gfn
,
pfn
,
false
);
spte
=
__pa
(
shadow_page
->
spt
)
|
PT_PRESENT_MASK
|
PT_ACCESSED_MASK
|
PT_WRITABLE_MASK
|
PT_USER_MASK
;
*
sptep
=
spte
;
return
0
;
}
static
u64
*
FNAME
(
fetch
)(
struct
kvm_vcpu
*
vcpu
,
gva_t
addr
,
struct
guest_walker
*
guest_walker
,
int
user_fault
,
int
write_fault
,
int
largepage
,
int
*
ptwrite
,
pfn_t
pfn
)
{
struct
shadow_walker
walker
=
{
.
walker
=
{
.
entry
=
FNAME
(
shadow_walk_entry
),
},
.
guest_walker
=
guest_walker
,
.
user_fault
=
user_fault
,
.
write_fault
=
write_fault
,
.
largepage
=
largepage
,
.
ptwrite
=
ptwrite
,
.
pfn
=
pfn
,
};
if
(
!
is_present_pte
(
guest_walker
->
ptes
[
guest_walker
->
level
-
1
]))
return
NULL
;
walk_shadow
(
&
walker
.
walker
,
vcpu
,
addr
);
return
shadow_ent
;
return
walker
.
sptep
;
}
/*
...
...
@@ -499,6 +512,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
#undef pt_element_t
#undef guest_walker
#undef shadow_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录