Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
c1a5d4f9
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c1a5d4f9
编写于
11月 25, 2007
作者:
A
Avi Kivity
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
KVM: Replace #GP injection by the generalized exception queue
Signed-off-by:
N
Avi Kivity
<
avi@qumranet.com
>
上级
c3c91fee
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
30 addition
and
59 deletion
+30
-59
drivers/kvm/svm.c
drivers/kvm/svm.c
+2
-15
drivers/kvm/vmx.c
drivers/kvm/vmx.c
+2
-16
drivers/kvm/x86.c
drivers/kvm/x86.c
+19
-24
drivers/kvm/x86.h
drivers/kvm/x86.h
+5
-2
drivers/kvm/x86_emulate.c
drivers/kvm/x86_emulate.c
+2
-2
未找到文件。
drivers/kvm/svm.c
浏览文件 @
c1a5d4f9
...
...
@@ -207,17 +207,6 @@ static bool svm_exception_injected(struct kvm_vcpu *vcpu)
return
!
(
svm
->
vmcb
->
control
.
exit_int_info
&
SVM_EXITINTINFO_VALID
);
}
static
void
svm_inject_gp
(
struct
kvm_vcpu
*
vcpu
,
unsigned
error_code
)
{
struct
vcpu_svm
*
svm
=
to_svm
(
vcpu
);
svm
->
vmcb
->
control
.
event_inj
=
SVM_EVTINJ_VALID
|
SVM_EVTINJ_VALID_ERR
|
SVM_EVTINJ_TYPE_EXEPT
|
GP_VECTOR
;
svm
->
vmcb
->
control
.
event_inj_err
=
error_code
;
}
static
void
inject_ud
(
struct
kvm_vcpu
*
vcpu
)
{
to_svm
(
vcpu
)
->
vmcb
->
control
.
event_inj
=
SVM_EVTINJ_VALID
|
...
...
@@ -1115,7 +1104,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
u64
data
;
if
(
svm_get_msr
(
&
svm
->
vcpu
,
ecx
,
&
data
))
s
vm_inject_gp
(
&
svm
->
vcpu
,
0
);
k
vm_inject_gp
(
&
svm
->
vcpu
,
0
);
else
{
svm
->
vmcb
->
save
.
rax
=
data
&
0xffffffff
;
svm
->
vcpu
.
regs
[
VCPU_REGS_RDX
]
=
data
>>
32
;
...
...
@@ -1176,7 +1165,7 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
|
((
u64
)(
svm
->
vcpu
.
regs
[
VCPU_REGS_RDX
]
&
-
1u
)
<<
32
);
svm
->
next_rip
=
svm
->
vmcb
->
save
.
rip
+
2
;
if
(
svm_set_msr
(
&
svm
->
vcpu
,
ecx
,
data
))
s
vm_inject_gp
(
&
svm
->
vcpu
,
0
);
k
vm_inject_gp
(
&
svm
->
vcpu
,
0
);
else
skip_emulated_instruction
(
&
svm
->
vcpu
);
return
1
;
...
...
@@ -1688,8 +1677,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.
tlb_flush
=
svm_flush_tlb
,
.
inject_gp
=
svm_inject_gp
,
.
run
=
svm_vcpu_run
,
.
handle_exit
=
handle_exit
,
.
skip_emulated_instruction
=
skip_emulated_instruction
,
...
...
drivers/kvm/vmx.c
浏览文件 @
c1a5d4f9
...
...
@@ -613,18 +613,6 @@ static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
return
!
(
vmx
->
idt_vectoring_info
&
VECTORING_INFO_VALID_MASK
);
}
static
void
vmx_inject_gp
(
struct
kvm_vcpu
*
vcpu
,
unsigned
error_code
)
{
printk
(
KERN_DEBUG
"inject_general_protection: rip 0x%lx
\n
"
,
vmcs_readl
(
GUEST_RIP
));
vmcs_write32
(
VM_ENTRY_EXCEPTION_ERROR_CODE
,
error_code
);
vmcs_write32
(
VM_ENTRY_INTR_INFO_FIELD
,
GP_VECTOR
|
INTR_TYPE_EXCEPTION
|
INTR_INFO_DELIEVER_CODE_MASK
|
INTR_INFO_VALID_MASK
);
}
static
void
vmx_inject_ud
(
struct
kvm_vcpu
*
vcpu
)
{
vmcs_write32
(
VM_ENTRY_INTR_INFO_FIELD
,
...
...
@@ -2083,7 +2071,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u64
data
;
if
(
vmx_get_msr
(
vcpu
,
ecx
,
&
data
))
{
vmx
_inject_gp
(
vcpu
,
0
);
kvm
_inject_gp
(
vcpu
,
0
);
return
1
;
}
...
...
@@ -2101,7 +2089,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
((
u64
)(
vcpu
->
regs
[
VCPU_REGS_RDX
]
&
-
1u
)
<<
32
);
if
(
vmx_set_msr
(
vcpu
,
ecx
,
data
)
!=
0
)
{
vmx
_inject_gp
(
vcpu
,
0
);
kvm
_inject_gp
(
vcpu
,
0
);
return
1
;
}
...
...
@@ -2619,8 +2607,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
.
tlb_flush
=
vmx_flush_tlb
,
.
inject_gp
=
vmx_inject_gp
,
.
run
=
vmx_vcpu_run
,
.
handle_exit
=
kvm_handle_exit
,
.
skip_emulated_instruction
=
skip_emulated_instruction
,
...
...
drivers/kvm/x86.c
浏览文件 @
c1a5d4f9
...
...
@@ -128,11 +128,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
}
EXPORT_SYMBOL_GPL
(
kvm_set_apic_base
);
static
void
inject_gp
(
struct
kvm_vcpu
*
vcpu
)
{
kvm_x86_ops
->
inject_gp
(
vcpu
,
0
);
}
void
kvm_queue_exception
(
struct
kvm_vcpu
*
vcpu
,
unsigned
nr
)
{
WARN_ON
(
vcpu
->
exception
.
pending
);
...
...
@@ -232,20 +227,20 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if
(
cr0
&
CR0_RESERVED_BITS
)
{
printk
(
KERN_DEBUG
"set_cr0: 0x%lx #GP, reserved bits 0x%lx
\n
"
,
cr0
,
vcpu
->
cr0
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
if
((
cr0
&
X86_CR0_NW
)
&&
!
(
cr0
&
X86_CR0_CD
))
{
printk
(
KERN_DEBUG
"set_cr0: #GP, CD == 0 && NW == 1
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
if
((
cr0
&
X86_CR0_PG
)
&&
!
(
cr0
&
X86_CR0_PE
))
{
printk
(
KERN_DEBUG
"set_cr0: #GP, set PG flag "
"and a clear PE flag
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
...
...
@@ -257,14 +252,14 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if
(
!
is_pae
(
vcpu
))
{
printk
(
KERN_DEBUG
"set_cr0: #GP, start paging "
"in long mode while PAE is disabled
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
kvm_x86_ops
->
get_cs_db_l_bits
(
vcpu
,
&
cs_db
,
&
cs_l
);
if
(
cs_l
)
{
printk
(
KERN_DEBUG
"set_cr0: #GP, start paging "
"in long mode while CS.L == 1
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
...
...
@@ -273,7 +268,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if
(
is_pae
(
vcpu
)
&&
!
load_pdptrs
(
vcpu
,
vcpu
->
cr3
))
{
printk
(
KERN_DEBUG
"set_cr0: #GP, pdptrs "
"reserved bits
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
...
...
@@ -299,7 +294,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
if
(
cr4
&
CR4_RESERVED_BITS
)
{
printk
(
KERN_DEBUG
"set_cr4: #GP, reserved bits
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
...
...
@@ -307,19 +302,19 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if
(
!
(
cr4
&
X86_CR4_PAE
))
{
printk
(
KERN_DEBUG
"set_cr4: #GP, clearing PAE while "
"in long mode
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
}
else
if
(
is_paging
(
vcpu
)
&&
!
is_pae
(
vcpu
)
&&
(
cr4
&
X86_CR4_PAE
)
&&
!
load_pdptrs
(
vcpu
,
vcpu
->
cr3
))
{
printk
(
KERN_DEBUG
"set_cr4: #GP, pdptrs reserved bits
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
if
(
cr4
&
X86_CR4_VMXE
)
{
printk
(
KERN_DEBUG
"set_cr4: #GP, setting VMXE
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
kvm_x86_ops
->
set_cr4
(
vcpu
,
cr4
);
...
...
@@ -340,7 +335,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if
(
is_long_mode
(
vcpu
))
{
if
(
cr3
&
CR3_L_MODE_RESERVED_BITS
)
{
printk
(
KERN_DEBUG
"set_cr3: #GP, reserved bits
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
}
else
{
...
...
@@ -348,13 +343,13 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if
(
cr3
&
CR3_PAE_RESERVED_BITS
)
{
printk
(
KERN_DEBUG
"set_cr3: #GP, reserved bits
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
if
(
is_paging
(
vcpu
)
&&
!
load_pdptrs
(
vcpu
,
cr3
))
{
printk
(
KERN_DEBUG
"set_cr3: #GP, pdptrs "
"reserved bits
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
}
...
...
@@ -375,7 +370,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
* to debug) behavior on the guest side.
*/
if
(
unlikely
(
!
gfn_to_memslot
(
vcpu
->
kvm
,
cr3
>>
PAGE_SHIFT
)))
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
else
{
vcpu
->
cr3
=
cr3
;
vcpu
->
mmu
.
new_cr3
(
vcpu
);
...
...
@@ -388,7 +383,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if
(
cr8
&
CR8_RESERVED_BITS
)
{
printk
(
KERN_DEBUG
"set_cr8: #GP, reserved bits 0x%lx
\n
"
,
cr8
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
if
(
irqchip_in_kernel
(
vcpu
->
kvm
))
...
...
@@ -436,14 +431,14 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
if
(
efer
&
EFER_RESERVED_BITS
)
{
printk
(
KERN_DEBUG
"set_efer: 0x%llx #GP, reserved bits
\n
"
,
efer
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
if
(
is_paging
(
vcpu
)
&&
(
vcpu
->
shadow_efer
&
EFER_LME
)
!=
(
efer
&
EFER_LME
))
{
printk
(
KERN_DEBUG
"set_efer: #GP, change LME while paging
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
;
}
...
...
@@ -2047,7 +2042,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
* String I/O in reverse. Yuck. Kill the guest, fix later.
*/
pr_unimpl
(
vcpu
,
"guest string pio down
\n
"
);
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
return
1
;
}
vcpu
->
run
->
io
.
count
=
now
;
...
...
@@ -2062,7 +2057,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu
->
pio
.
guest_pages
[
i
]
=
page
;
mutex_unlock
(
&
vcpu
->
kvm
->
lock
);
if
(
!
page
)
{
inject_gp
(
vcpu
);
kvm_inject_gp
(
vcpu
,
0
);
free_pio_guest_pages
(
vcpu
);
return
1
;
}
...
...
drivers/kvm/x86.h
浏览文件 @
c1a5d4f9
...
...
@@ -220,8 +220,6 @@ struct kvm_x86_ops {
void
(
*
tlb_flush
)(
struct
kvm_vcpu
*
vcpu
);
void
(
*
inject_gp
)(
struct
kvm_vcpu
*
vcpu
,
unsigned
err_code
);
void
(
*
run
)(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
int
(
*
handle_exit
)(
struct
kvm_run
*
run
,
struct
kvm_vcpu
*
vcpu
);
void
(
*
skip_emulated_instruction
)(
struct
kvm_vcpu
*
vcpu
);
...
...
@@ -467,6 +465,11 @@ static inline u32 get_rdx_init_val(void)
return
0x600
;
/* P6 family */
}
static
inline
void
kvm_inject_gp
(
struct
kvm_vcpu
*
vcpu
,
u32
error_code
)
{
kvm_queue_exception_e
(
vcpu
,
GP_VECTOR
,
error_code
);
}
#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
...
...
drivers/kvm/x86_emulate.c
浏览文件 @
c1a5d4f9
...
...
@@ -1779,7 +1779,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
|
((
u64
)
c
->
regs
[
VCPU_REGS_RDX
]
<<
32
);
rc
=
kvm_set_msr
(
ctxt
->
vcpu
,
c
->
regs
[
VCPU_REGS_RCX
],
msr_data
);
if
(
rc
)
{
kvm_
x86_ops
->
inject_gp
(
ctxt
->
vcpu
,
0
);
kvm_inject_gp
(
ctxt
->
vcpu
,
0
);
c
->
eip
=
ctxt
->
vcpu
->
rip
;
}
rc
=
X86EMUL_CONTINUE
;
...
...
@@ -1789,7 +1789,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* rdmsr */
rc
=
kvm_get_msr
(
ctxt
->
vcpu
,
c
->
regs
[
VCPU_REGS_RCX
],
&
msr_data
);
if
(
rc
)
{
kvm_
x86_ops
->
inject_gp
(
ctxt
->
vcpu
,
0
);
kvm_inject_gp
(
ctxt
->
vcpu
,
0
);
c
->
eip
=
ctxt
->
vcpu
->
rip
;
}
else
{
c
->
regs
[
VCPU_REGS_RAX
]
=
(
u32
)
msr_data
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录