Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
a875dafc
cloud-kernel
项目概览
openanolis
/
cloud-kernel
接近 2 年 前同步成功
通知
169
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a875dafc
编写于
9月 18, 2014
作者:
C
Christoffer Dall
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'kvm/next' into queue
Conflicts: arch/arm64/include/asm/kvm_host.h virt/kvm/arm/vgic.c
上级
0ba09511
f51770ed
变更
46
展开全部
隐藏空白更改
内联
并排
Showing
46 changed file
with
1293 addition
and
1179 deletion
+1293
-1179
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/api.txt
+117
-69
Documentation/virtual/kvm/mmu.txt
Documentation/virtual/kvm/mmu.txt
+14
-0
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_host.h
+8
-5
arch/arm/kvm/arm.c
arch/arm/kvm/arm.c
+1
-20
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
+8
-4
arch/ia64/include/asm/kvm_host.h
arch/ia64/include/asm/kvm_host.h
+12
-3
arch/ia64/kvm/kvm-ia64.c
arch/ia64/kvm/kvm-ia64.c
+2
-32
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/kvm_host.h
+11
-5
arch/mips/kvm/mips.c
arch/mips/kvm/mips.c
+1
-43
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_host.h
+8
-5
arch/powerpc/kvm/powerpc.c
arch/powerpc/kvm/powerpc.c
+1
-30
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/kvm_host.h
+30
-3
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/pgalloc.h
+4
-4
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/pgtable.h
+29
-43
arch/s390/include/asm/tlb.h
arch/s390/include/asm/tlb.h
+1
-1
arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm.h
+10
-0
arch/s390/kvm/diag.c
arch/s390/kvm/diag.c
+19
-9
arch/s390/kvm/gaccess.c
arch/s390/kvm/gaccess.c
+1
-2
arch/s390/kvm/interrupt.c
arch/s390/kvm/interrupt.c
+46
-105
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.c
+99
-94
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/kvm-s390.h
+3
-3
arch/s390/kvm/priv.c
arch/s390/kvm/priv.c
+1
-10
arch/s390/mm/fault.c
arch/s390/mm/fault.c
+18
-7
arch/s390/mm/pgtable.c
arch/s390/mm/pgtable.c
+331
-374
arch/s390/mm/vmem.c
arch/s390/mm/vmem.c
+1
-1
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/kvm_host.h
+9
-11
arch/x86/kvm/cpuid.h
arch/x86/kvm/cpuid.h
+8
-0
arch/x86/kvm/emulate.c
arch/x86/kvm/emulate.c
+17
-10
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.c
+2
-2
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.c
+26
-23
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu.h
+5
-0
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/paging_tmpl.h
+16
-3
arch/x86/kvm/svm.c
arch/x86/kvm/svm.c
+25
-8
arch/x86/kvm/trace.h
arch/x86/kvm/trace.h
+6
-9
arch/x86/kvm/vmx.c
arch/x86/kvm/vmx.c
+108
-52
arch/x86/kvm/x86.c
arch/x86/kvm/x86.c
+31
-29
arch/x86/kvm/x86.h
arch/x86/kvm/x86.h
+15
-5
include/linux/kvm_host.h
include/linux/kvm_host.h
+3
-11
include/linux/kvm_types.h
include/linux/kvm_types.h
+14
-0
include/trace/events/kvm.h
include/trace/events/kvm.h
+20
-0
include/uapi/linux/kvm.h
include/uapi/linux/kvm.h
+17
-11
virt/kvm/arm/vgic.c
virt/kvm/arm/vgic.c
+79
-78
virt/kvm/ioapic.c
virt/kvm/ioapic.c
+44
-2
virt/kvm/ioapic.h
virt/kvm/ioapic.h
+2
-0
virt/kvm/kvm_main.c
virt/kvm/kvm_main.c
+55
-46
virt/kvm/vfio.c
virt/kvm/vfio.c
+15
-7
未找到文件。
Documentation/virtual/kvm/api.txt
浏览文件 @
a875dafc
...
@@ -2565,6 +2565,120 @@ associated with the service will be forgotten, and subsequent RTAS
...
@@ -2565,6 +2565,120 @@ associated with the service will be forgotten, and subsequent RTAS
calls by the guest for that service will be passed to userspace to be
calls by the guest for that service will be passed to userspace to be
handled.
handled.
4.87 KVM_SET_GUEST_DEBUG
Capability: KVM_CAP_SET_GUEST_DEBUG
Architectures: x86, s390, ppc
Type: vcpu ioctl
Parameters: struct kvm_guest_debug (in)
Returns: 0 on success; -1 on error
struct kvm_guest_debug {
__u32 control;
__u32 pad;
struct kvm_guest_debug_arch arch;
};
Set up the processor specific debug registers and configure vcpu for
handling guest debug events. There are two parts to the structure, the
first a control bitfield indicates the type of debug events to handle
when running. Common control bits are:
- KVM_GUESTDBG_ENABLE: guest debugging is enabled
- KVM_GUESTDBG_SINGLESTEP: the next run should single-step
The top 16 bits of the control field are architecture specific control
flags which can include the following:
- KVM_GUESTDBG_USE_SW_BP: using software breakpoints [x86]
- KVM_GUESTDBG_USE_HW_BP: using hardware breakpoints [x86, s390]
- KVM_GUESTDBG_INJECT_DB: inject DB type exception [x86]
- KVM_GUESTDBG_INJECT_BP: inject BP type exception [x86]
- KVM_GUESTDBG_EXIT_PENDING: trigger an immediate guest exit [s390]
For example KVM_GUESTDBG_USE_SW_BP indicates that software breakpoints
are enabled in memory so we need to ensure breakpoint exceptions are
correctly trapped and the KVM run loop exits at the breakpoint and not
running off into the normal guest vector. For KVM_GUESTDBG_USE_HW_BP
we need to ensure the guest vCPUs architecture specific registers are
updated to the correct (supplied) values.
The second part of the structure is architecture specific and
typically contains a set of debug registers.
When debug events exit the main run loop with the reason
KVM_EXIT_DEBUG with the kvm_debug_exit_arch part of the kvm_run
structure containing architecture specific debug information.
4.88 KVM_GET_EMULATED_CPUID
Capability: KVM_CAP_EXT_EMUL_CPUID
Architectures: x86
Type: system ioctl
Parameters: struct kvm_cpuid2 (in/out)
Returns: 0 on success, -1 on error
struct kvm_cpuid2 {
__u32 nent;
__u32 flags;
struct kvm_cpuid_entry2 entries[0];
};
The member 'flags' is used for passing flags from userspace.
#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
struct kvm_cpuid_entry2 {
__u32 function;
__u32 index;
__u32 flags;
__u32 eax;
__u32 ebx;
__u32 ecx;
__u32 edx;
__u32 padding[3];
};
This ioctl returns x86 cpuid features which are emulated by
kvm.Userspace can use the information returned by this ioctl to query
which features are emulated by kvm instead of being present natively.
Userspace invokes KVM_GET_EMULATED_CPUID by passing a kvm_cpuid2
structure with the 'nent' field indicating the number of entries in
the variable-size array 'entries'. If the number of entries is too low
to describe the cpu capabilities, an error (E2BIG) is returned. If the
number is too high, the 'nent' field is adjusted and an error (ENOMEM)
is returned. If the number is just right, the 'nent' field is adjusted
to the number of valid entries in the 'entries' array, which is then
filled.
The entries returned are the set CPUID bits of the respective features
which kvm emulates, as returned by the CPUID instruction, with unknown
or unsupported feature bits cleared.
Features like x2apic, for example, may not be present in the host cpu
but are exposed by kvm in KVM_GET_SUPPORTED_CPUID because they can be
emulated efficiently and thus not included here.
The fields in each entry are defined as follows:
function: the eax value used to obtain the entry
index: the ecx value used to obtain the entry (for entries that are
affected by ecx)
flags: an OR of zero or more of the following:
KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
if the index field is valid
KVM_CPUID_FLAG_STATEFUL_FUNC:
if cpuid for this function returns different values for successive
invocations; there will be several entries with the same function,
all with this flag set
KVM_CPUID_FLAG_STATE_READ_NEXT:
for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
the first entry to be read by a cpu
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
this function/index combination
5. The kvm_run structure
5. The kvm_run structure
------------------------
------------------------
...
@@ -2861,78 +2975,12 @@ kvm_valid_regs for specific bits. These bits are architecture specific
...
@@ -2861,78 +2975,12 @@ kvm_valid_regs for specific bits. These bits are architecture specific
and usually define the validity of a groups of registers. (e.g. one bit
and usually define the validity of a groups of registers. (e.g. one bit
for general purpose registers)
for general purpose registers)
};
Please note that the kernel is allowed to use the kvm_run structure as the
primary storage for certain register types. Therefore, the kernel may use the
values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set.
4.81 KVM_GET_EMULATED_CPUID
Capability: KVM_CAP_EXT_EMUL_CPUID
Architectures: x86
Type: system ioctl
Parameters: struct kvm_cpuid2 (in/out)
Returns: 0 on success, -1 on error
struct kvm_cpuid2 {
__u32 nent;
__u32 flags;
struct kvm_cpuid_entry2 entries[0];
};
};
The member 'flags' is used for passing flags from userspace.
#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
struct kvm_cpuid_entry2 {
__u32 function;
__u32 index;
__u32 flags;
__u32 eax;
__u32 ebx;
__u32 ecx;
__u32 edx;
__u32 padding[3];
};
This ioctl returns x86 cpuid features which are emulated by
kvm.Userspace can use the information returned by this ioctl to query
which features are emulated by kvm instead of being present natively.
Userspace invokes KVM_GET_EMULATED_CPUID by passing a kvm_cpuid2
structure with the 'nent' field indicating the number of entries in
the variable-size array 'entries'. If the number of entries is too low
to describe the cpu capabilities, an error (E2BIG) is returned. If the
number is too high, the 'nent' field is adjusted and an error (ENOMEM)
is returned. If the number is just right, the 'nent' field is adjusted
to the number of valid entries in the 'entries' array, which is then
filled.
The entries returned are the set CPUID bits of the respective features
which kvm emulates, as returned by the CPUID instruction, with unknown
or unsupported feature bits cleared.
Features like x2apic, for example, may not be present in the host cpu
but are exposed by kvm in KVM_GET_SUPPORTED_CPUID because they can be
emulated efficiently and thus not included here.
The fields in each entry are defined as follows:
function: the eax value used to obtain the entry
index: the ecx value used to obtain the entry (for entries that are
affected by ecx)
flags: an OR of zero or more of the following:
KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
if the index field is valid
KVM_CPUID_FLAG_STATEFUL_FUNC:
if cpuid for this function returns different values for successive
invocations; there will be several entries with the same function,
all with this flag set
KVM_CPUID_FLAG_STATE_READ_NEXT:
for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
the first entry to be read by a cpu
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
this function/index combination
6. Capabilities that can be enabled on vCPUs
6. Capabilities that can be enabled on vCPUs
...
...
Documentation/virtual/kvm/mmu.txt
浏览文件 @
a875dafc
...
@@ -425,6 +425,20 @@ fault through the slow path.
...
@@ -425,6 +425,20 @@ fault through the slow path.
Since only 19 bits are used to store generation-number on mmio spte, all
Since only 19 bits are used to store generation-number on mmio spte, all
pages are zapped when there is an overflow.
pages are zapped when there is an overflow.
Unfortunately, a single memory access might access kvm_memslots(kvm) multiple
times, the last one happening when the generation number is retrieved and
stored into the MMIO spte. Thus, the MMIO spte might be created based on
out-of-date information, but with an up-to-date generation number.
To avoid this, the generation number is incremented again after synchronize_srcu
returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a
memslot update, while some SRCU readers might be using the old copy. We do not
want to use an MMIO sptes created with an odd generation number, and we can do
this without losing a bit in the MMIO spte. The low bit of the generation
is not stored in MMIO spte, and presumed zero when it is extracted out of the
spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1,
the next access to the spte will always be a cache miss.
Further reading
Further reading
===============
===============
...
...
arch/arm/include/asm/kvm_host.h
浏览文件 @
a875dafc
...
@@ -19,6 +19,8 @@
...
@@ -19,6 +19,8 @@
#ifndef __ARM_KVM_HOST_H__
#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/kvm.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_mmio.h>
...
@@ -40,7 +42,6 @@
...
@@ -40,7 +42,6 @@
#include <kvm/arm_vgic.h>
#include <kvm/arm_vgic.h>
struct
kvm_vcpu
;
u32
*
kvm_vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
,
u32
mode
);
u32
*
kvm_vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
,
u32
mode
);
int
__attribute_const__
kvm_target_cpu
(
void
);
int
__attribute_const__
kvm_target_cpu
(
void
);
int
kvm_reset_vcpu
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_reset_vcpu
(
struct
kvm_vcpu
*
vcpu
);
...
@@ -149,20 +150,17 @@ struct kvm_vcpu_stat {
...
@@ -149,20 +150,17 @@ struct kvm_vcpu_stat {
u32
halt_wakeup
;
u32
halt_wakeup
;
};
};
struct
kvm_vcpu_init
;
int
kvm_vcpu_set_target
(
struct
kvm_vcpu
*
vcpu
,
int
kvm_vcpu_set_target
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_vcpu_init
*
init
);
const
struct
kvm_vcpu_init
*
init
);
int
kvm_vcpu_preferred_target
(
struct
kvm_vcpu_init
*
init
);
int
kvm_vcpu_preferred_target
(
struct
kvm_vcpu_init
*
init
);
unsigned
long
kvm_arm_num_regs
(
struct
kvm_vcpu
*
vcpu
);
unsigned
long
kvm_arm_num_regs
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_arm_copy_reg_indices
(
struct
kvm_vcpu
*
vcpu
,
u64
__user
*
indices
);
int
kvm_arm_copy_reg_indices
(
struct
kvm_vcpu
*
vcpu
,
u64
__user
*
indices
);
struct
kvm_one_reg
;
int
kvm_arm_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
);
int
kvm_arm_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
);
int
kvm_arm_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
);
int
kvm_arm_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
);
u64
kvm_call_hyp
(
void
*
hypfn
,
...);
u64
kvm_call_hyp
(
void
*
hypfn
,
...);
void
force_vm_exit
(
const
cpumask_t
*
mask
);
void
force_vm_exit
(
const
cpumask_t
*
mask
);
#define KVM_ARCH_WANT_MMU_NOTIFIER
#define KVM_ARCH_WANT_MMU_NOTIFIER
struct
kvm
;
int
kvm_unmap_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
);
int
kvm_unmap_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
);
int
kvm_unmap_hva_range
(
struct
kvm
*
kvm
,
int
kvm_unmap_hva_range
(
struct
kvm
*
kvm
,
unsigned
long
start
,
unsigned
long
end
);
unsigned
long
start
,
unsigned
long
end
);
...
@@ -187,7 +185,6 @@ struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
...
@@ -187,7 +185,6 @@ struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
int
kvm_arm_copy_coproc_indices
(
struct
kvm_vcpu
*
vcpu
,
u64
__user
*
uindices
);
int
kvm_arm_copy_coproc_indices
(
struct
kvm_vcpu
*
vcpu
,
u64
__user
*
uindices
);
unsigned
long
kvm_arm_num_coproc_regs
(
struct
kvm_vcpu
*
vcpu
);
unsigned
long
kvm_arm_num_coproc_regs
(
struct
kvm_vcpu
*
vcpu
);
struct
kvm_one_reg
;
int
kvm_arm_coproc_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
kvm_arm_coproc_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
kvm_arm_coproc_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
kvm_arm_coproc_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
...
@@ -233,4 +230,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
...
@@ -233,4 +230,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
int
kvm_perf_init
(
void
);
int
kvm_perf_init
(
void
);
int
kvm_perf_teardown
(
void
);
int
kvm_perf_teardown
(
void
);
static
inline
void
kvm_arch_hardware_disable
(
void
)
{}
static
inline
void
kvm_arch_hardware_unsetup
(
void
)
{}
static
inline
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_vcpu_uninit
(
struct
kvm_vcpu
*
vcpu
)
{}
static
inline
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{}
#endif
/* __ARM_KVM_HOST_H__ */
#endif
/* __ARM_KVM_HOST_H__ */
arch/arm/kvm/arm.c
浏览文件 @
a875dafc
...
@@ -87,7 +87,7 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
...
@@ -87,7 +87,7 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
return
&
kvm_arm_running_vcpu
;
return
&
kvm_arm_running_vcpu
;
}
}
int
kvm_arch_hardware_enable
(
void
*
garbage
)
int
kvm_arch_hardware_enable
(
void
)
{
{
return
0
;
return
0
;
}
}
...
@@ -97,27 +97,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
...
@@ -97,27 +97,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return
kvm_vcpu_exiting_guest_mode
(
vcpu
)
==
IN_GUEST_MODE
;
return
kvm_vcpu_exiting_guest_mode
(
vcpu
)
==
IN_GUEST_MODE
;
}
}
void
kvm_arch_hardware_disable
(
void
*
garbage
)
{
}
int
kvm_arch_hardware_setup
(
void
)
int
kvm_arch_hardware_setup
(
void
)
{
{
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_unsetup
(
void
)
{
}
void
kvm_arch_check_processor_compat
(
void
*
rtn
)
void
kvm_arch_check_processor_compat
(
void
*
rtn
)
{
{
*
(
int
*
)
rtn
=
0
;
*
(
int
*
)
rtn
=
0
;
}
}
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{
}
/**
/**
* kvm_arch_init_vm - initializes a VM data structure
* kvm_arch_init_vm - initializes a VM data structure
...
@@ -285,14 +274,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
...
@@ -285,14 +274,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
return
0
;
return
0
;
}
}
void
kvm_arch_vcpu_uninit
(
struct
kvm_vcpu
*
vcpu
)
{
}
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
}
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
{
vcpu
->
cpu
=
cpu
;
vcpu
->
cpu
=
cpu
;
...
...
arch/arm64/include/asm/kvm_host.h
浏览文件 @
a875dafc
...
@@ -22,6 +22,8 @@
...
@@ -22,6 +22,8 @@
#ifndef __ARM64_KVM_HOST_H__
#ifndef __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/kvm.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_mmio.h>
...
@@ -41,7 +43,6 @@
...
@@ -41,7 +43,6 @@
#define KVM_VCPU_MAX_FEATURES 3
#define KVM_VCPU_MAX_FEATURES 3
struct
kvm_vcpu
;
int
__attribute_const__
kvm_target_cpu
(
void
);
int
__attribute_const__
kvm_target_cpu
(
void
);
int
kvm_reset_vcpu
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_reset_vcpu
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_arch_dev_ioctl_check_extension
(
long
ext
);
int
kvm_arch_dev_ioctl_check_extension
(
long
ext
);
...
@@ -164,18 +165,15 @@ struct kvm_vcpu_stat {
...
@@ -164,18 +165,15 @@ struct kvm_vcpu_stat {
u32
halt_wakeup
;
u32
halt_wakeup
;
};
};
struct
kvm_vcpu_init
;
int
kvm_vcpu_set_target
(
struct
kvm_vcpu
*
vcpu
,
int
kvm_vcpu_set_target
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_vcpu_init
*
init
);
const
struct
kvm_vcpu_init
*
init
);
int
kvm_vcpu_preferred_target
(
struct
kvm_vcpu_init
*
init
);
int
kvm_vcpu_preferred_target
(
struct
kvm_vcpu_init
*
init
);
unsigned
long
kvm_arm_num_regs
(
struct
kvm_vcpu
*
vcpu
);
unsigned
long
kvm_arm_num_regs
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_arm_copy_reg_indices
(
struct
kvm_vcpu
*
vcpu
,
u64
__user
*
indices
);
int
kvm_arm_copy_reg_indices
(
struct
kvm_vcpu
*
vcpu
,
u64
__user
*
indices
);
struct
kvm_one_reg
;
int
kvm_arm_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
);
int
kvm_arm_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
);
int
kvm_arm_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
);
int
kvm_arm_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
);
#define KVM_ARCH_WANT_MMU_NOTIFIER
#define KVM_ARCH_WANT_MMU_NOTIFIER
struct
kvm
;
int
kvm_unmap_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
);
int
kvm_unmap_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
);
int
kvm_unmap_hva_range
(
struct
kvm
*
kvm
,
int
kvm_unmap_hva_range
(
struct
kvm
*
kvm
,
unsigned
long
start
,
unsigned
long
end
);
unsigned
long
start
,
unsigned
long
end
);
...
@@ -244,4 +242,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
...
@@ -244,4 +242,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
}
}
}
}
static
inline
void
kvm_arch_hardware_disable
(
void
)
{}
static
inline
void
kvm_arch_hardware_unsetup
(
void
)
{}
static
inline
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_vcpu_uninit
(
struct
kvm_vcpu
*
vcpu
)
{}
static
inline
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{}
#endif
/* __ARM64_KVM_HOST_H__ */
#endif
/* __ARM64_KVM_HOST_H__ */
arch/ia64/include/asm/kvm_host.h
浏览文件 @
a875dafc
...
@@ -234,9 +234,6 @@ struct kvm_vm_data {
...
@@ -234,9 +234,6 @@ struct kvm_vm_data {
#define KVM_REQ_PTC_G 32
#define KVM_REQ_PTC_G 32
#define KVM_REQ_RESUME 33
#define KVM_REQ_RESUME 33
struct
kvm
;
struct
kvm_vcpu
;
struct
kvm_mmio_req
{
struct
kvm_mmio_req
{
uint64_t
addr
;
/* physical address */
uint64_t
addr
;
/* physical address */
uint64_t
size
;
/* size in bytes */
uint64_t
size
;
/* size in bytes */
...
@@ -595,6 +592,18 @@ void kvm_sal_emul(struct kvm_vcpu *vcpu);
...
@@ -595,6 +592,18 @@ void kvm_sal_emul(struct kvm_vcpu *vcpu);
struct
kvm
*
kvm_arch_alloc_vm
(
void
);
struct
kvm
*
kvm_arch_alloc_vm
(
void
);
void
kvm_arch_free_vm
(
struct
kvm
*
kvm
);
void
kvm_arch_free_vm
(
struct
kvm
*
kvm
);
static
inline
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_vcpu_put
(
struct
kvm_vcpu
*
vcpu
)
{}
static
inline
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
)
{}
static
inline
void
kvm_arch_free_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
free
,
struct
kvm_memory_slot
*
dont
)
{}
static
inline
void
kvm_arch_memslots_updated
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_commit_memory_region
(
struct
kvm
*
kvm
,
struct
kvm_userspace_memory_region
*
mem
,
const
struct
kvm_memory_slot
*
old
,
enum
kvm_mr_change
change
)
{}
static
inline
void
kvm_arch_hardware_unsetup
(
void
)
{}
#endif
/* __ASSEMBLY__*/
#endif
/* __ASSEMBLY__*/
#endif
#endif
arch/ia64/kvm/kvm-ia64.c
浏览文件 @
a875dafc
...
@@ -125,7 +125,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
...
@@ -125,7 +125,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
static
DEFINE_SPINLOCK
(
vp_lock
);
static
DEFINE_SPINLOCK
(
vp_lock
);
int
kvm_arch_hardware_enable
(
void
*
garbage
)
int
kvm_arch_hardware_enable
(
void
)
{
{
long
status
;
long
status
;
long
tmp_base
;
long
tmp_base
;
...
@@ -160,7 +160,7 @@ int kvm_arch_hardware_enable(void *garbage)
...
@@ -160,7 +160,7 @@ int kvm_arch_hardware_enable(void *garbage)
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_disable
(
void
*
garbage
)
void
kvm_arch_hardware_disable
(
void
)
{
{
long
status
;
long
status
;
...
@@ -1364,10 +1364,6 @@ static void kvm_release_vm_pages(struct kvm *kvm)
...
@@ -1364,10 +1364,6 @@ static void kvm_release_vm_pages(struct kvm *kvm)
}
}
}
}
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{
}
void
kvm_arch_destroy_vm
(
struct
kvm
*
kvm
)
void
kvm_arch_destroy_vm
(
struct
kvm
*
kvm
)
{
{
kvm_iommu_unmap_guest
(
kvm
);
kvm_iommu_unmap_guest
(
kvm
);
...
@@ -1376,10 +1372,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
...
@@ -1376,10 +1372,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_release_vm_pages
(
kvm
);
kvm_release_vm_pages
(
kvm
);
}
}
void
kvm_arch_vcpu_put
(
struct
kvm_vcpu
*
vcpu
)
{
}
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
{
if
(
cpu
!=
vcpu
->
cpu
)
{
if
(
cpu
!=
vcpu
->
cpu
)
{
...
@@ -1468,7 +1460,6 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
...
@@ -1468,7 +1460,6 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
kfree
(
vcpu
->
arch
.
apic
);
kfree
(
vcpu
->
arch
.
apic
);
}
}
long
kvm_arch_vcpu_ioctl
(
struct
file
*
filp
,
long
kvm_arch_vcpu_ioctl
(
struct
file
*
filp
,
unsigned
int
ioctl
,
unsigned
long
arg
)
unsigned
int
ioctl
,
unsigned
long
arg
)
{
{
...
@@ -1551,21 +1542,12 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
...
@@ -1551,21 +1542,12 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return
VM_FAULT_SIGBUS
;
return
VM_FAULT_SIGBUS
;
}
}
void
kvm_arch_free_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
free
,
struct
kvm_memory_slot
*
dont
)
{
}
int
kvm_arch_create_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
,
int
kvm_arch_create_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
,
unsigned
long
npages
)
unsigned
long
npages
)
{
{
return
0
;
return
0
;
}
}
void
kvm_arch_memslots_updated
(
struct
kvm
*
kvm
)
{
}
int
kvm_arch_prepare_memory_region
(
struct
kvm
*
kvm
,
int
kvm_arch_prepare_memory_region
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
memslot
,
struct
kvm_memory_slot
*
memslot
,
struct
kvm_userspace_memory_region
*
mem
,
struct
kvm_userspace_memory_region
*
mem
,
...
@@ -1597,14 +1579,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
...
@@ -1597,14 +1579,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
return
0
;
return
0
;
}
}
void
kvm_arch_commit_memory_region
(
struct
kvm
*
kvm
,
struct
kvm_userspace_memory_region
*
mem
,
const
struct
kvm_memory_slot
*
old
,
enum
kvm_mr_change
change
)
{
return
;
}
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
{
{
kvm_flush_remote_tlbs
(
kvm
);
kvm_flush_remote_tlbs
(
kvm
);
...
@@ -1853,10 +1827,6 @@ int kvm_arch_hardware_setup(void)
...
@@ -1853,10 +1827,6 @@ int kvm_arch_hardware_setup(void)
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_unsetup
(
void
)
{
}
int
kvm_apic_set_irq
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_irq
*
irq
)
int
kvm_apic_set_irq
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_irq
*
irq
)
{
{
return
__apic_accept_irq
(
vcpu
,
irq
->
vector
);
return
__apic_accept_irq
(
vcpu
,
irq
->
vector
);
...
...
arch/mips/include/asm/kvm_host.h
浏览文件 @
a875dafc
...
@@ -96,11 +96,6 @@
...
@@ -96,11 +96,6 @@
#define CAUSEB_DC 27
#define CAUSEB_DC 27
#define CAUSEF_DC (_ULCAST_(1) << 27)
#define CAUSEF_DC (_ULCAST_(1) << 27)
struct
kvm
;
struct
kvm_run
;
struct
kvm_vcpu
;
struct
kvm_interrupt
;
extern
atomic_t
kvm_mips_instance
;
extern
atomic_t
kvm_mips_instance
;
extern
pfn_t
(
*
kvm_mips_gfn_to_pfn
)
(
struct
kvm
*
kvm
,
gfn_t
gfn
);
extern
pfn_t
(
*
kvm_mips_gfn_to_pfn
)
(
struct
kvm
*
kvm
,
gfn_t
gfn
);
extern
void
(
*
kvm_mips_release_pfn_clean
)
(
pfn_t
pfn
);
extern
void
(
*
kvm_mips_release_pfn_clean
)
(
pfn_t
pfn
);
...
@@ -767,5 +762,16 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
...
@@ -767,5 +762,16 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
extern
void
kvm_mips_dump_stats
(
struct
kvm_vcpu
*
vcpu
);
extern
void
kvm_mips_dump_stats
(
struct
kvm_vcpu
*
vcpu
);
extern
unsigned
long
kvm_mips_get_ramsize
(
struct
kvm
*
kvm
);
extern
unsigned
long
kvm_mips_get_ramsize
(
struct
kvm
*
kvm
);
static
inline
void
kvm_arch_hardware_disable
(
void
)
{}
static
inline
void
kvm_arch_hardware_unsetup
(
void
)
{}
static
inline
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_free_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
free
,
struct
kvm_memory_slot
*
dont
)
{}
static
inline
void
kvm_arch_memslots_updated
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_flush_shadow_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
)
{}
static
inline
void
kvm_arch_vcpu_uninit
(
struct
kvm_vcpu
*
vcpu
)
{}
static
inline
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{}
#endif
/* __MIPS_KVM_HOST_H__ */
#endif
/* __MIPS_KVM_HOST_H__ */
arch/mips/kvm/mips.c
浏览文件 @
a875dafc
...
@@ -77,24 +77,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
...
@@ -77,24 +77,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return
1
;
return
1
;
}
}
int
kvm_arch_hardware_enable
(
void
*
garbage
)
int
kvm_arch_hardware_enable
(
void
)
{
{
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_disable
(
void
*
garbage
)
{
}
int
kvm_arch_hardware_setup
(
void
)
int
kvm_arch_hardware_setup
(
void
)
{
{
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_unsetup
(
void
)
{
}
void
kvm_arch_check_processor_compat
(
void
*
rtn
)
void
kvm_arch_check_processor_compat
(
void
*
rtn
)
{
{
*
(
int
*
)
rtn
=
0
;
*
(
int
*
)
rtn
=
0
;
...
@@ -163,10 +155,6 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
...
@@ -163,10 +155,6 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
mutex_unlock
(
&
kvm
->
lock
);
mutex_unlock
(
&
kvm
->
lock
);
}
}
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{
}
static
void
kvm_mips_uninit_tlbs
(
void
*
arg
)
static
void
kvm_mips_uninit_tlbs
(
void
*
arg
)
{
{
/* Restore wired count */
/* Restore wired count */
...
@@ -194,21 +182,12 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
...
@@ -194,21 +182,12 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
return
-
ENOIOCTLCMD
;
return
-
ENOIOCTLCMD
;
}
}
void
kvm_arch_free_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
free
,
struct
kvm_memory_slot
*
dont
)
{
}
int
kvm_arch_create_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
,
int
kvm_arch_create_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
,
unsigned
long
npages
)
unsigned
long
npages
)
{
{
return
0
;
return
0
;
}
}
void
kvm_arch_memslots_updated
(
struct
kvm
*
kvm
)
{
}
int
kvm_arch_prepare_memory_region
(
struct
kvm
*
kvm
,
int
kvm_arch_prepare_memory_region
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
memslot
,
struct
kvm_memory_slot
*
memslot
,
struct
kvm_userspace_memory_region
*
mem
,
struct
kvm_userspace_memory_region
*
mem
,
...
@@ -254,19 +233,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
...
@@ -254,19 +233,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
}
}
}
}
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
{
}
void
kvm_arch_flush_shadow_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
)
{
}
void
kvm_arch_flush_shadow
(
struct
kvm
*
kvm
)
{
}
struct
kvm_vcpu
*
kvm_arch_vcpu_create
(
struct
kvm
*
kvm
,
unsigned
int
id
)
struct
kvm_vcpu
*
kvm_arch_vcpu_create
(
struct
kvm
*
kvm
,
unsigned
int
id
)
{
{
int
err
,
size
,
offset
;
int
err
,
size
,
offset
;
...
@@ -998,14 +964,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
...
@@ -998,14 +964,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
return
0
;
return
0
;
}
}
void
kvm_arch_vcpu_uninit
(
struct
kvm_vcpu
*
vcpu
)
{
}
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
}
int
kvm_arch_vcpu_ioctl_translate
(
struct
kvm_vcpu
*
vcpu
,
int
kvm_arch_vcpu_ioctl_translate
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_translation
*
tr
)
struct
kvm_translation
*
tr
)
{
{
...
...
arch/powerpc/include/asm/kvm_host.h
浏览文件 @
a875dafc
...
@@ -53,7 +53,6 @@
...
@@ -53,7 +53,6 @@
#define KVM_ARCH_WANT_MMU_NOTIFIER
#define KVM_ARCH_WANT_MMU_NOTIFIER
struct
kvm
;
extern
int
kvm_unmap_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
);
extern
int
kvm_unmap_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
);
extern
int
kvm_unmap_hva_range
(
struct
kvm
*
kvm
,
extern
int
kvm_unmap_hva_range
(
struct
kvm
*
kvm
,
unsigned
long
start
,
unsigned
long
end
);
unsigned
long
start
,
unsigned
long
end
);
...
@@ -76,10 +75,6 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
...
@@ -76,10 +75,6 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
/* Physical Address Mask - allowed range of real mode RAM access */
/* Physical Address Mask - allowed range of real mode RAM access */
#define KVM_PAM 0x0fffffffffffffffULL
#define KVM_PAM 0x0fffffffffffffffULL
struct
kvm
;
struct
kvm_run
;
struct
kvm_vcpu
;
struct
lppaca
;
struct
lppaca
;
struct
slb_shadow
;
struct
slb_shadow
;
struct
dtl_entry
;
struct
dtl_entry
;
...
@@ -687,4 +682,12 @@ struct kvm_vcpu_arch {
...
@@ -687,4 +682,12 @@ struct kvm_vcpu_arch {
#define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE
#define __KVM_HAVE_CREATE_DEVICE
static
inline
void
kvm_arch_hardware_disable
(
void
)
{}
static
inline
void
kvm_arch_hardware_unsetup
(
void
)
{}
static
inline
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_memslots_updated
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{}
static
inline
void
kvm_arch_exit
(
void
)
{}
#endif
/* __POWERPC_KVM_HOST_H__ */
#endif
/* __POWERPC_KVM_HOST_H__ */
arch/powerpc/kvm/powerpc.c
浏览文件 @
a875dafc
...
@@ -384,24 +384,16 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
...
@@ -384,24 +384,16 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
}
}
EXPORT_SYMBOL_GPL
(
kvmppc_ld
);
EXPORT_SYMBOL_GPL
(
kvmppc_ld
);
int
kvm_arch_hardware_enable
(
void
*
garbage
)
int
kvm_arch_hardware_enable
(
void
)
{
{
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_disable
(
void
*
garbage
)
{
}
int
kvm_arch_hardware_setup
(
void
)
int
kvm_arch_hardware_setup
(
void
)
{
{
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_unsetup
(
void
)
{
}
void
kvm_arch_check_processor_compat
(
void
*
rtn
)
void
kvm_arch_check_processor_compat
(
void
*
rtn
)
{
{
*
(
int
*
)
rtn
=
kvmppc_core_check_processor_compat
();
*
(
int
*
)
rtn
=
kvmppc_core_check_processor_compat
();
...
@@ -462,10 +454,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
...
@@ -462,10 +454,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
module_put
(
kvm
->
arch
.
kvm_ops
->
owner
);
module_put
(
kvm
->
arch
.
kvm_ops
->
owner
);
}
}
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{
}
int
kvm_vm_ioctl_check_extension
(
struct
kvm
*
kvm
,
long
ext
)
int
kvm_vm_ioctl_check_extension
(
struct
kvm
*
kvm
,
long
ext
)
{
{
int
r
;
int
r
;
...
@@ -608,10 +596,6 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
...
@@ -608,10 +596,6 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
return
kvmppc_core_create_memslot
(
kvm
,
slot
,
npages
);
return
kvmppc_core_create_memslot
(
kvm
,
slot
,
npages
);
}
}
void
kvm_arch_memslots_updated
(
struct
kvm
*
kvm
)
{
}
int
kvm_arch_prepare_memory_region
(
struct
kvm
*
kvm
,
int
kvm_arch_prepare_memory_region
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
memslot
,
struct
kvm_memory_slot
*
memslot
,
struct
kvm_userspace_memory_region
*
mem
,
struct
kvm_userspace_memory_region
*
mem
,
...
@@ -628,10 +612,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
...
@@ -628,10 +612,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvmppc_core_commit_memory_region
(
kvm
,
mem
,
old
);
kvmppc_core_commit_memory_region
(
kvm
,
mem
,
old
);
}
}
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
{
}
void
kvm_arch_flush_shadow_memslot
(
struct
kvm
*
kvm
,
void
kvm_arch_flush_shadow_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
)
struct
kvm_memory_slot
*
slot
)
{
{
...
@@ -720,10 +700,6 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
...
@@ -720,10 +700,6 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
kvmppc_subarch_vcpu_uninit
(
vcpu
);
kvmppc_subarch_vcpu_uninit
(
vcpu
);
}
}
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
}
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
{
#ifdef CONFIG_BOOKE
#ifdef CONFIG_BOOKE
...
@@ -1347,9 +1323,4 @@ int kvm_arch_init(void *opaque)
...
@@ -1347,9 +1323,4 @@ int kvm_arch_init(void *opaque)
return
0
;
return
0
;
}
}
void
kvm_arch_exit
(
void
)
{
}
EXPORT_TRACEPOINT_SYMBOL_GPL
(
kvm_ppc_instr
);
EXPORT_TRACEPOINT_SYMBOL_GPL
(
kvm_ppc_instr
);
arch/s390/include/asm/kvm_host.h
浏览文件 @
a875dafc
...
@@ -13,8 +13,11 @@
...
@@ -13,8 +13,11 @@
#ifndef ASM_KVM_HOST_H
#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
#include <linux/types.h>
#include <linux/hrtimer.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/interrupt.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/kvm.h>
#include <asm/debug.h>
#include <asm/debug.h>
...
@@ -154,7 +157,9 @@ struct kvm_s390_sie_block {
...
@@ -154,7 +157,9 @@ struct kvm_s390_sie_block {
__u8
armid
;
/* 0x00e3 */
__u8
armid
;
/* 0x00e3 */
__u8
reservede4
[
4
];
/* 0x00e4 */
__u8
reservede4
[
4
];
/* 0x00e4 */
__u64
tecmc
;
/* 0x00e8 */
__u64
tecmc
;
/* 0x00e8 */
__u8
reservedf0
[
16
];
/* 0x00f0 */
__u8
reservedf0
[
12
];
/* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
__u32
crycbd
;
/* 0x00fc */
__u64
gcr
[
16
];
/* 0x0100 */
__u64
gcr
[
16
];
/* 0x0100 */
__u64
gbea
;
/* 0x0180 */
__u64
gbea
;
/* 0x0180 */
__u8
reserved188
[
24
];
/* 0x0188 */
__u8
reserved188
[
24
];
/* 0x0188 */
...
@@ -407,6 +412,15 @@ struct s390_io_adapter {
...
@@ -407,6 +412,15 @@ struct s390_io_adapter {
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256
#define MAX_S390_ADAPTER_MAPS 256
struct
kvm_s390_crypto
{
struct
kvm_s390_crypto_cb
*
crycb
;
__u32
crycbd
;
};
struct
kvm_s390_crypto_cb
{
__u8
reserved00
[
128
];
/* 0x0000 */
};
struct
kvm_arch
{
struct
kvm_arch
{
struct
sca_block
*
sca
;
struct
sca_block
*
sca
;
debug_info_t
*
dbf
;
debug_info_t
*
dbf
;
...
@@ -420,6 +434,7 @@ struct kvm_arch{
...
@@ -420,6 +434,7 @@ struct kvm_arch{
struct
s390_io_adapter
*
adapters
[
MAX_S390_IO_ADAPTERS
];
struct
s390_io_adapter
*
adapters
[
MAX_S390_IO_ADAPTERS
];
wait_queue_head_t
ipte_wq
;
wait_queue_head_t
ipte_wq
;
spinlock_t
start_stop_lock
;
spinlock_t
start_stop_lock
;
struct
kvm_s390_crypto
crypto
;
};
};
#define KVM_HVA_ERR_BAD (-1UL)
#define KVM_HVA_ERR_BAD (-1UL)
...
@@ -431,8 +446,6 @@ static inline bool kvm_is_error_hva(unsigned long addr)
...
@@ -431,8 +446,6 @@ static inline bool kvm_is_error_hva(unsigned long addr)
}
}
#define ASYNC_PF_PER_VCPU 64
#define ASYNC_PF_PER_VCPU 64
struct
kvm_vcpu
;
struct
kvm_async_pf
;
struct
kvm_arch_async_pf
{
struct
kvm_arch_async_pf
{
unsigned
long
pfault_token
;
unsigned
long
pfault_token
;
};
};
...
@@ -450,4 +463,18 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
...
@@ -450,4 +463,18 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
extern
int
sie64a
(
struct
kvm_s390_sie_block
*
,
u64
*
);
extern
int
sie64a
(
struct
kvm_s390_sie_block
*
,
u64
*
);
extern
char
sie_exit
;
extern
char
sie_exit
;
static
inline
void
kvm_arch_hardware_disable
(
void
)
{}
static
inline
void
kvm_arch_check_processor_compat
(
void
*
rtn
)
{}
static
inline
void
kvm_arch_exit
(
void
)
{}
static
inline
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_vcpu_uninit
(
struct
kvm_vcpu
*
vcpu
)
{}
static
inline
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{}
static
inline
void
kvm_arch_free_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
free
,
struct
kvm_memory_slot
*
dont
)
{}
static
inline
void
kvm_arch_memslots_updated
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
{}
static
inline
void
kvm_arch_flush_shadow_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
)
{}
#endif
#endif
arch/s390/include/asm/pgalloc.h
浏览文件 @
a875dafc
...
@@ -18,9 +18,9 @@
...
@@ -18,9 +18,9 @@
unsigned
long
*
crst_table_alloc
(
struct
mm_struct
*
);
unsigned
long
*
crst_table_alloc
(
struct
mm_struct
*
);
void
crst_table_free
(
struct
mm_struct
*
,
unsigned
long
*
);
void
crst_table_free
(
struct
mm_struct
*
,
unsigned
long
*
);
unsigned
long
*
page_table_alloc
(
struct
mm_struct
*
,
unsigned
long
);
unsigned
long
*
page_table_alloc
(
struct
mm_struct
*
);
void
page_table_free
(
struct
mm_struct
*
,
unsigned
long
*
);
void
page_table_free
(
struct
mm_struct
*
,
unsigned
long
*
);
void
page_table_free_rcu
(
struct
mmu_gather
*
,
unsigned
long
*
);
void
page_table_free_rcu
(
struct
mmu_gather
*
,
unsigned
long
*
,
unsigned
long
);
void
page_table_reset_pgste
(
struct
mm_struct
*
,
unsigned
long
,
unsigned
long
,
void
page_table_reset_pgste
(
struct
mm_struct
*
,
unsigned
long
,
unsigned
long
,
bool
init_skey
);
bool
init_skey
);
...
@@ -145,8 +145,8 @@ static inline void pmd_populate(struct mm_struct *mm,
...
@@ -145,8 +145,8 @@ static inline void pmd_populate(struct mm_struct *mm,
/*
/*
* page table entry allocation/free routines.
* page table entry allocation/free routines.
*/
*/
#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm
, vmaddr
))
#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm
, vmaddr
))
#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
...
...
arch/s390/include/asm/pgtable.h
浏览文件 @
a875dafc
...
@@ -30,6 +30,7 @@
...
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
#include <asm/bug.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/page.h>
...
@@ -789,82 +790,67 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
...
@@ -789,82 +790,67 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
/**
/**
* struct gmap_struct - guest address space
* struct gmap_struct - guest address space
* @crst_list: list of all crst tables used in the guest address space
* @mm: pointer to the parent mm_struct
* @mm: pointer to the parent mm_struct
* @guest_to_host: radix tree with guest to host address translation
* @host_to_guest: radix tree with pointer to segment table entries
* @guest_table_lock: spinlock to protect all entries in the guest page table
* @table: pointer to the page directory
* @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @asce: address space control element for gmap page table
* @crst_list: list of all crst tables used in the guest address space
* @pfault_enabled: defines if pfaults are applicable for the guest
* @pfault_enabled: defines if pfaults are applicable for the guest
*/
*/
struct
gmap
{
struct
gmap
{
struct
list_head
list
;
struct
list_head
list
;
struct
list_head
crst_list
;
struct
mm_struct
*
mm
;
struct
mm_struct
*
mm
;
struct
radix_tree_root
guest_to_host
;
struct
radix_tree_root
host_to_guest
;
spinlock_t
guest_table_lock
;
unsigned
long
*
table
;
unsigned
long
*
table
;
unsigned
long
asce
;
unsigned
long
asce
;
unsigned
long
asce_end
;
void
*
private
;
void
*
private
;
struct
list_head
crst_list
;
bool
pfault_enabled
;
bool
pfault_enabled
;
};
};
/**
* struct gmap_rmap - reverse mapping for segment table entries
* @gmap: pointer to the gmap_struct
* @entry: pointer to a segment table entry
* @vmaddr: virtual address in the guest address space
*/
struct
gmap_rmap
{
struct
list_head
list
;
struct
gmap
*
gmap
;
unsigned
long
*
entry
;
unsigned
long
vmaddr
;
};
/**
* struct gmap_pgtable - gmap information attached to a page table
* @vmaddr: address of the 1MB segment in the process virtual memory
* @mapper: list of segment table entries mapping a page table
*/
struct
gmap_pgtable
{
unsigned
long
vmaddr
;
struct
list_head
mapper
;
};
/**
/**
* struct gmap_notifier - notify function block for page invalidation
* struct gmap_notifier - notify function block for page invalidation
* @notifier_call: address of callback function
* @notifier_call: address of callback function
*/
*/
struct
gmap_notifier
{
struct
gmap_notifier
{
struct
list_head
list
;
struct
list_head
list
;
void
(
*
notifier_call
)(
struct
gmap
*
gmap
,
unsigned
long
address
);
void
(
*
notifier_call
)(
struct
gmap
*
gmap
,
unsigned
long
gaddr
);
};
};
struct
gmap
*
gmap_alloc
(
struct
mm_struct
*
mm
);
struct
gmap
*
gmap_alloc
(
struct
mm_struct
*
mm
,
unsigned
long
limit
);
void
gmap_free
(
struct
gmap
*
gmap
);
void
gmap_free
(
struct
gmap
*
gmap
);
void
gmap_enable
(
struct
gmap
*
gmap
);
void
gmap_enable
(
struct
gmap
*
gmap
);
void
gmap_disable
(
struct
gmap
*
gmap
);
void
gmap_disable
(
struct
gmap
*
gmap
);
int
gmap_map_segment
(
struct
gmap
*
gmap
,
unsigned
long
from
,
int
gmap_map_segment
(
struct
gmap
*
gmap
,
unsigned
long
from
,
unsigned
long
to
,
unsigned
long
len
);
unsigned
long
to
,
unsigned
long
len
);
int
gmap_unmap_segment
(
struct
gmap
*
gmap
,
unsigned
long
to
,
unsigned
long
len
);
int
gmap_unmap_segment
(
struct
gmap
*
gmap
,
unsigned
long
to
,
unsigned
long
len
);
unsigned
long
__gmap_translate
(
unsigned
long
address
,
struct
gmap
*
);
unsigned
long
__gmap_translate
(
struct
gmap
*
,
unsigned
long
gaddr
);
unsigned
long
gmap_translate
(
unsigned
long
address
,
struct
gmap
*
);
unsigned
long
gmap_translate
(
struct
gmap
*
,
unsigned
long
gaddr
);
unsigned
long
__gmap_fault
(
unsigned
long
address
,
struct
gmap
*
);
int
__gmap_link
(
struct
gmap
*
gmap
,
unsigned
long
gaddr
,
unsigned
long
vmaddr
);
unsigned
long
gmap_fault
(
unsigned
long
address
,
struct
gmap
*
);
int
gmap_fault
(
struct
gmap
*
,
unsigned
long
gaddr
,
unsigned
int
fault_flags
);
void
gmap_discard
(
unsigned
long
from
,
unsigned
long
to
,
struct
gmap
*
);
void
gmap_discard
(
struct
gmap
*
,
unsigned
long
from
,
unsigned
long
to
);
void
__gmap_zap
(
unsigned
long
address
,
struct
gmap
*
);
void
__gmap_zap
(
struct
gmap
*
,
unsigned
long
gaddr
);
bool
gmap_test_and_clear_dirty
(
unsigned
long
address
,
struct
gmap
*
);
bool
gmap_test_and_clear_dirty
(
unsigned
long
address
,
struct
gmap
*
);
void
gmap_register_ipte_notifier
(
struct
gmap_notifier
*
);
void
gmap_register_ipte_notifier
(
struct
gmap_notifier
*
);
void
gmap_unregister_ipte_notifier
(
struct
gmap_notifier
*
);
void
gmap_unregister_ipte_notifier
(
struct
gmap_notifier
*
);
int
gmap_ipte_notify
(
struct
gmap
*
,
unsigned
long
start
,
unsigned
long
len
);
int
gmap_ipte_notify
(
struct
gmap
*
,
unsigned
long
start
,
unsigned
long
len
);
void
gmap_do_ipte_notify
(
struct
mm_struct
*
,
pte_t
*
);
void
gmap_do_ipte_notify
(
struct
mm_struct
*
,
unsigned
long
addr
,
pte_t
*
);
static
inline
pgste_t
pgste_ipte_notify
(
struct
mm_struct
*
mm
,
static
inline
pgste_t
pgste_ipte_notify
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
,
pgste_t
pgste
)
pte_t
*
ptep
,
pgste_t
pgste
)
{
{
#ifdef CONFIG_PGSTE
#ifdef CONFIG_PGSTE
if
(
pgste_val
(
pgste
)
&
PGSTE_IN_BIT
)
{
if
(
pgste_val
(
pgste
)
&
PGSTE_IN_BIT
)
{
pgste_val
(
pgste
)
&=
~
PGSTE_IN_BIT
;
pgste_val
(
pgste
)
&=
~
PGSTE_IN_BIT
;
gmap_do_ipte_notify
(
mm
,
ptep
);
gmap_do_ipte_notify
(
mm
,
addr
,
ptep
);
}
}
#endif
#endif
return
pgste
;
return
pgste
;
...
@@ -1110,7 +1096,7 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
...
@@ -1110,7 +1096,7 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
pgste_val
(
pgste
)
&=
~
PGSTE_UC_BIT
;
pgste_val
(
pgste
)
&=
~
PGSTE_UC_BIT
;
pte
=
*
ptep
;
pte
=
*
ptep
;
if
(
dirty
&&
(
pte_val
(
pte
)
&
_PAGE_PRESENT
))
{
if
(
dirty
&&
(
pte_val
(
pte
)
&
_PAGE_PRESENT
))
{
pgste
=
pgste_ipte_notify
(
mm
,
ptep
,
pgste
);
pgste
=
pgste_ipte_notify
(
mm
,
addr
,
ptep
,
pgste
);
__ptep_ipte
(
addr
,
ptep
);
__ptep_ipte
(
addr
,
ptep
);
if
(
MACHINE_HAS_ESOP
||
!
(
pte_val
(
pte
)
&
_PAGE_WRITE
))
if
(
MACHINE_HAS_ESOP
||
!
(
pte_val
(
pte
)
&
_PAGE_WRITE
))
pte_val
(
pte
)
|=
_PAGE_PROTECT
;
pte_val
(
pte
)
|=
_PAGE_PROTECT
;
...
@@ -1132,7 +1118,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
...
@@ -1132,7 +1118,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
if
(
mm_has_pgste
(
vma
->
vm_mm
))
{
if
(
mm_has_pgste
(
vma
->
vm_mm
))
{
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_ipte_notify
(
vma
->
vm_mm
,
ptep
,
pgste
);
pgste
=
pgste_ipte_notify
(
vma
->
vm_mm
,
addr
,
ptep
,
pgste
);
}
}
pte
=
*
ptep
;
pte
=
*
ptep
;
...
@@ -1178,7 +1164,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
...
@@ -1178,7 +1164,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
if
(
mm_has_pgste
(
mm
))
{
if
(
mm_has_pgste
(
mm
))
{
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_ipte_notify
(
mm
,
ptep
,
pgste
);
pgste
=
pgste_ipte_notify
(
mm
,
address
,
ptep
,
pgste
);
}
}
pte
=
*
ptep
;
pte
=
*
ptep
;
...
@@ -1202,7 +1188,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
...
@@ -1202,7 +1188,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
if
(
mm_has_pgste
(
mm
))
{
if
(
mm_has_pgste
(
mm
))
{
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_get_lock
(
ptep
);
pgste_ipte_notify
(
mm
,
ptep
,
pgste
);
pgste_ipte_notify
(
mm
,
address
,
ptep
,
pgste
);
}
}
pte
=
*
ptep
;
pte
=
*
ptep
;
...
@@ -1239,7 +1225,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
...
@@ -1239,7 +1225,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
if
(
mm_has_pgste
(
vma
->
vm_mm
))
{
if
(
mm_has_pgste
(
vma
->
vm_mm
))
{
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_ipte_notify
(
vma
->
vm_mm
,
ptep
,
pgste
);
pgste
=
pgste_ipte_notify
(
vma
->
vm_mm
,
address
,
ptep
,
pgste
);
}
}
pte
=
*
ptep
;
pte
=
*
ptep
;
...
@@ -1273,7 +1259,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
...
@@ -1273,7 +1259,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
if
(
!
full
&&
mm_has_pgste
(
mm
))
{
if
(
!
full
&&
mm_has_pgste
(
mm
))
{
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_ipte_notify
(
mm
,
ptep
,
pgste
);
pgste
=
pgste_ipte_notify
(
mm
,
address
,
ptep
,
pgste
);
}
}
pte
=
*
ptep
;
pte
=
*
ptep
;
...
@@ -1298,7 +1284,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
...
@@ -1298,7 +1284,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
if
(
pte_write
(
pte
))
{
if
(
pte_write
(
pte
))
{
if
(
mm_has_pgste
(
mm
))
{
if
(
mm_has_pgste
(
mm
))
{
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_ipte_notify
(
mm
,
ptep
,
pgste
);
pgste
=
pgste_ipte_notify
(
mm
,
address
,
ptep
,
pgste
);
}
}
ptep_flush_lazy
(
mm
,
address
,
ptep
);
ptep_flush_lazy
(
mm
,
address
,
ptep
);
...
@@ -1324,7 +1310,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
...
@@ -1324,7 +1310,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
return
0
;
return
0
;
if
(
mm_has_pgste
(
vma
->
vm_mm
))
{
if
(
mm_has_pgste
(
vma
->
vm_mm
))
{
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_get_lock
(
ptep
);
pgste
=
pgste_ipte_notify
(
vma
->
vm_mm
,
ptep
,
pgste
);
pgste
=
pgste_ipte_notify
(
vma
->
vm_mm
,
address
,
ptep
,
pgste
);
}
}
ptep_flush_direct
(
vma
->
vm_mm
,
address
,
ptep
);
ptep_flush_direct
(
vma
->
vm_mm
,
address
,
ptep
);
...
...
arch/s390/include/asm/tlb.h
浏览文件 @
a875dafc
...
@@ -105,7 +105,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
...
@@ -105,7 +105,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static
inline
void
pte_free_tlb
(
struct
mmu_gather
*
tlb
,
pgtable_t
pte
,
static
inline
void
pte_free_tlb
(
struct
mmu_gather
*
tlb
,
pgtable_t
pte
,
unsigned
long
address
)
unsigned
long
address
)
{
{
page_table_free_rcu
(
tlb
,
(
unsigned
long
*
)
pte
);
page_table_free_rcu
(
tlb
,
(
unsigned
long
*
)
pte
,
address
);
}
}
/*
/*
...
...
arch/s390/include/uapi/asm/kvm.h
浏览文件 @
a875dafc
...
@@ -111,12 +111,22 @@ struct kvm_guest_debug_arch {
...
@@ -111,12 +111,22 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_GPRS (1UL << 1)
#define KVM_SYNC_GPRS (1UL << 1)
#define KVM_SYNC_ACRS (1UL << 2)
#define KVM_SYNC_ACRS (1UL << 2)
#define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5)
/* definition of registers in kvm_run */
/* definition of registers in kvm_run */
struct
kvm_sync_regs
{
struct
kvm_sync_regs
{
__u64
prefix
;
/* prefix register */
__u64
prefix
;
/* prefix register */
__u64
gprs
[
16
];
/* general purpose registers */
__u64
gprs
[
16
];
/* general purpose registers */
__u32
acrs
[
16
];
/* access registers */
__u32
acrs
[
16
];
/* access registers */
__u64
crs
[
16
];
/* control registers */
__u64
crs
[
16
];
/* control registers */
__u64
todpr
;
/* tod programmable register [ARCH0] */
__u64
cputm
;
/* cpu timer [ARCH0] */
__u64
ckc
;
/* clock comparator [ARCH0] */
__u64
pp
;
/* program parameter [ARCH0] */
__u64
gbea
;
/* guest breaking-event address [ARCH0] */
__u64
pft
;
/* pfault token [PFAULT] */
__u64
pfs
;
/* pfault select [PFAULT] */
__u64
pfc
;
/* pfault compare [PFAULT] */
};
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
...
...
arch/s390/kvm/diag.c
浏览文件 @
a875dafc
...
@@ -28,22 +28,32 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
...
@@ -28,22 +28,32 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
start
=
vcpu
->
run
->
s
.
regs
.
gprs
[(
vcpu
->
arch
.
sie_block
->
ipa
&
0xf0
)
>>
4
];
start
=
vcpu
->
run
->
s
.
regs
.
gprs
[(
vcpu
->
arch
.
sie_block
->
ipa
&
0xf0
)
>>
4
];
end
=
vcpu
->
run
->
s
.
regs
.
gprs
[
vcpu
->
arch
.
sie_block
->
ipa
&
0xf
]
+
4096
;
end
=
vcpu
->
run
->
s
.
regs
.
gprs
[
vcpu
->
arch
.
sie_block
->
ipa
&
0xf
]
+
4096
;
if
(
start
&
~
PAGE_MASK
||
end
&
~
PAGE_MASK
||
start
>
end
if
(
start
&
~
PAGE_MASK
||
end
&
~
PAGE_MASK
||
start
>
=
end
||
start
<
2
*
PAGE_SIZE
)
||
start
<
2
*
PAGE_SIZE
)
return
kvm_s390_inject_program_int
(
vcpu
,
PGM_SPECIFICATION
);
return
kvm_s390_inject_program_int
(
vcpu
,
PGM_SPECIFICATION
);
VCPU_EVENT
(
vcpu
,
5
,
"diag release pages %lX %lX"
,
start
,
end
);
VCPU_EVENT
(
vcpu
,
5
,
"diag release pages %lX %lX"
,
start
,
end
);
vcpu
->
stat
.
diagnose_10
++
;
vcpu
->
stat
.
diagnose_10
++
;
/* we checked for start > end above */
/*
if
(
end
<
prefix
||
start
>=
prefix
+
2
*
PAGE_SIZE
)
{
* We checked for start >= end above, so lets check for the
gmap_discard
(
start
,
end
,
vcpu
->
arch
.
gmap
);
* fast path (no prefix swap page involved)
*/
if
(
end
<=
prefix
||
start
>=
prefix
+
2
*
PAGE_SIZE
)
{
gmap_discard
(
vcpu
->
arch
.
gmap
,
start
,
end
);
}
else
{
}
else
{
if
(
start
<
prefix
)
/*
gmap_discard
(
start
,
prefix
,
vcpu
->
arch
.
gmap
);
* This is slow path. gmap_discard will check for start
if
(
end
>=
prefix
)
* so lets split this into before prefix, prefix, after
gmap_discard
(
prefix
+
2
*
PAGE_SIZE
,
* prefix and let gmap_discard make some of these calls
end
,
vcpu
->
arch
.
gmap
);
* NOPs.
*/
gmap_discard
(
vcpu
->
arch
.
gmap
,
start
,
prefix
);
if
(
start
<=
prefix
)
gmap_discard
(
vcpu
->
arch
.
gmap
,
0
,
4096
);
if
(
end
>
prefix
+
4096
)
gmap_discard
(
vcpu
->
arch
.
gmap
,
4096
,
8192
);
gmap_discard
(
vcpu
->
arch
.
gmap
,
prefix
+
2
*
PAGE_SIZE
,
end
);
}
}
return
0
;
return
0
;
}
}
...
...
arch/s390/kvm/gaccess.c
浏览文件 @
a875dafc
...
@@ -254,8 +254,7 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
...
@@ -254,8 +254,7 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
new
=
old
=
ACCESS_ONCE
(
*
ic
);
new
=
old
=
ACCESS_ONCE
(
*
ic
);
new
.
k
=
0
;
new
.
k
=
0
;
}
while
(
cmpxchg
(
&
ic
->
val
,
old
.
val
,
new
.
val
)
!=
old
.
val
);
}
while
(
cmpxchg
(
&
ic
->
val
,
old
.
val
,
new
.
val
)
!=
old
.
val
);
if
(
!
ipte_lock_count
)
wake_up
(
&
vcpu
->
kvm
->
arch
.
ipte_wq
);
wake_up
(
&
vcpu
->
kvm
->
arch
.
ipte_wq
);
out:
out:
mutex_unlock
(
&
ipte_mutex
);
mutex_unlock
(
&
ipte_mutex
);
}
}
...
...
arch/s390/kvm/interrupt.c
浏览文件 @
a875dafc
...
@@ -26,8 +26,9 @@
...
@@ -26,8 +26,9 @@
#define IOINT_SSID_MASK 0x00030000
#define IOINT_SSID_MASK 0x00030000
#define IOINT_CSSID_MASK 0x03fc0000
#define IOINT_CSSID_MASK 0x03fc0000
#define IOINT_AI_MASK 0x04000000
#define IOINT_AI_MASK 0x04000000
#define PFAULT_INIT 0x0600
static
void
deliver_ckc_interrupt
(
struct
kvm_vcpu
*
vcpu
);
static
int
__must_check
deliver_ckc_interrupt
(
struct
kvm_vcpu
*
vcpu
);
static
int
is_ioint
(
u64
type
)
static
int
is_ioint
(
u64
type
)
{
{
...
@@ -76,7 +77,7 @@ static u64 int_word_to_isc_bits(u32 int_word)
...
@@ -76,7 +77,7 @@ static u64 int_word_to_isc_bits(u32 int_word)
return
(
0x80
>>
isc
)
<<
24
;
return
(
0x80
>>
isc
)
<<
24
;
}
}
static
int
__interrupt_is_deliverable
(
struct
kvm_vcpu
*
vcpu
,
static
int
__
must_check
__
interrupt_is_deliverable
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_s390_interrupt_info
*
inti
)
struct
kvm_s390_interrupt_info
*
inti
)
{
{
switch
(
inti
->
type
)
{
switch
(
inti
->
type
)
{
...
@@ -85,6 +86,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
...
@@ -85,6 +86,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
return
0
;
return
0
;
if
(
vcpu
->
arch
.
sie_block
->
gcr
[
0
]
&
0x2000ul
)
if
(
vcpu
->
arch
.
sie_block
->
gcr
[
0
]
&
0x2000ul
)
return
1
;
return
1
;
return
0
;
case
KVM_S390_INT_EMERGENCY
:
case
KVM_S390_INT_EMERGENCY
:
if
(
psw_extint_disabled
(
vcpu
))
if
(
psw_extint_disabled
(
vcpu
))
return
0
;
return
0
;
...
@@ -205,11 +207,30 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
...
@@ -205,11 +207,30 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
}
}
}
}
static
int
__deliver_prog_irq
(
struct
kvm_vcpu
*
vcpu
,
static
u16
get_ilc
(
struct
kvm_vcpu
*
vcpu
)
struct
kvm_s390_pgm_info
*
pgm_info
)
{
{
const
unsigned
short
table
[]
=
{
2
,
4
,
4
,
6
};
const
unsigned
short
table
[]
=
{
2
,
4
,
4
,
6
};
switch
(
vcpu
->
arch
.
sie_block
->
icptcode
)
{
case
ICPT_INST
:
case
ICPT_INSTPROGI
:
case
ICPT_OPEREXC
:
case
ICPT_PARTEXEC
:
case
ICPT_IOINST
:
/* last instruction only stored for these icptcodes */
return
table
[
vcpu
->
arch
.
sie_block
->
ipa
>>
14
];
case
ICPT_PROGI
:
return
vcpu
->
arch
.
sie_block
->
pgmilc
;
default:
return
0
;
}
}
static
int
__must_check
__deliver_prog_irq
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_s390_pgm_info
*
pgm_info
)
{
int
rc
=
0
;
int
rc
=
0
;
u16
ilc
=
get_ilc
(
vcpu
);
switch
(
pgm_info
->
code
&
~
PGM_PER
)
{
switch
(
pgm_info
->
code
&
~
PGM_PER
)
{
case
PGM_AFX_TRANSLATION
:
case
PGM_AFX_TRANSLATION
:
...
@@ -276,25 +297,7 @@ static int __deliver_prog_irq(struct kvm_vcpu *vcpu,
...
@@ -276,25 +297,7 @@ static int __deliver_prog_irq(struct kvm_vcpu *vcpu,
(
u8
*
)
__LC_PER_ACCESS_ID
);
(
u8
*
)
__LC_PER_ACCESS_ID
);
}
}
switch
(
vcpu
->
arch
.
sie_block
->
icptcode
)
{
rc
|=
put_guest_lc
(
vcpu
,
ilc
,
(
u16
*
)
__LC_PGM_ILC
);
case
ICPT_INST
:
case
ICPT_INSTPROGI
:
case
ICPT_OPEREXC
:
case
ICPT_PARTEXEC
:
case
ICPT_IOINST
:
/* last instruction only stored for these icptcodes */
rc
|=
put_guest_lc
(
vcpu
,
table
[
vcpu
->
arch
.
sie_block
->
ipa
>>
14
],
(
u16
*
)
__LC_PGM_ILC
);
break
;
case
ICPT_PROGI
:
rc
|=
put_guest_lc
(
vcpu
,
vcpu
->
arch
.
sie_block
->
pgmilc
,
(
u16
*
)
__LC_PGM_ILC
);
break
;
default:
rc
|=
put_guest_lc
(
vcpu
,
0
,
(
u16
*
)
__LC_PGM_ILC
);
}
rc
|=
put_guest_lc
(
vcpu
,
pgm_info
->
code
,
rc
|=
put_guest_lc
(
vcpu
,
pgm_info
->
code
,
(
u16
*
)
__LC_PGM_INT_CODE
);
(
u16
*
)
__LC_PGM_INT_CODE
);
rc
|=
write_guest_lc
(
vcpu
,
__LC_PGM_OLD_PSW
,
rc
|=
write_guest_lc
(
vcpu
,
__LC_PGM_OLD_PSW
,
...
@@ -305,7 +308,7 @@ static int __deliver_prog_irq(struct kvm_vcpu *vcpu,
...
@@ -305,7 +308,7 @@ static int __deliver_prog_irq(struct kvm_vcpu *vcpu,
return
rc
;
return
rc
;
}
}
static
void
__do_deliver_interrupt
(
struct
kvm_vcpu
*
vcpu
,
static
int
__must_check
__do_deliver_interrupt
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_s390_interrupt_info
*
inti
)
struct
kvm_s390_interrupt_info
*
inti
)
{
{
const
unsigned
short
table
[]
=
{
2
,
4
,
4
,
6
};
const
unsigned
short
table
[]
=
{
2
,
4
,
4
,
6
};
...
@@ -343,7 +346,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
...
@@ -343,7 +346,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
case
KVM_S390_INT_CLOCK_COMP
:
case
KVM_S390_INT_CLOCK_COMP
:
trace_kvm_s390_deliver_interrupt
(
vcpu
->
vcpu_id
,
inti
->
type
,
trace_kvm_s390_deliver_interrupt
(
vcpu
->
vcpu_id
,
inti
->
type
,
inti
->
ext
.
ext_params
,
0
);
inti
->
ext
.
ext_params
,
0
);
deliver_ckc_interrupt
(
vcpu
);
rc
=
deliver_ckc_interrupt
(
vcpu
);
break
;
break
;
case
KVM_S390_INT_CPU_TIMER
:
case
KVM_S390_INT_CPU_TIMER
:
trace_kvm_s390_deliver_interrupt
(
vcpu
->
vcpu_id
,
inti
->
type
,
trace_kvm_s390_deliver_interrupt
(
vcpu
->
vcpu_id
,
inti
->
type
,
...
@@ -376,8 +379,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
...
@@ -376,8 +379,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
case
KVM_S390_INT_PFAULT_INIT
:
case
KVM_S390_INT_PFAULT_INIT
:
trace_kvm_s390_deliver_interrupt
(
vcpu
->
vcpu_id
,
inti
->
type
,
0
,
trace_kvm_s390_deliver_interrupt
(
vcpu
->
vcpu_id
,
inti
->
type
,
0
,
inti
->
ext
.
ext_params2
);
inti
->
ext
.
ext_params2
);
rc
=
put_guest_lc
(
vcpu
,
0x2603
,
(
u16
*
)
__LC_EXT_INT_CODE
);
rc
=
put_guest_lc
(
vcpu
,
EXT_IRQ_CP_SERVICE
,
rc
|=
put_guest_lc
(
vcpu
,
0x0600
,
(
u16
*
)
__LC_EXT_CPU_ADDR
);
(
u16
*
)
__LC_EXT_INT_CODE
);
rc
|=
put_guest_lc
(
vcpu
,
PFAULT_INIT
,
(
u16
*
)
__LC_EXT_CPU_ADDR
);
rc
|=
write_guest_lc
(
vcpu
,
__LC_EXT_OLD_PSW
,
rc
|=
write_guest_lc
(
vcpu
,
__LC_EXT_OLD_PSW
,
&
vcpu
->
arch
.
sie_block
->
gpsw
,
sizeof
(
psw_t
));
&
vcpu
->
arch
.
sie_block
->
gpsw
,
sizeof
(
psw_t
));
rc
|=
read_guest_lc
(
vcpu
,
__LC_EXT_NEW_PSW
,
rc
|=
read_guest_lc
(
vcpu
,
__LC_EXT_NEW_PSW
,
...
@@ -501,14 +505,11 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
...
@@ -501,14 +505,11 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
default:
default:
BUG
();
BUG
();
}
}
if
(
rc
)
{
printk
(
"kvm: The guest lowcore is not mapped during interrupt "
return
rc
;
"delivery, killing userspace
\n
"
);
do_exit
(
SIGKILL
);
}
}
}
static
void
deliver_ckc_interrupt
(
struct
kvm_vcpu
*
vcpu
)
static
int
__must_check
deliver_ckc_interrupt
(
struct
kvm_vcpu
*
vcpu
)
{
{
int
rc
;
int
rc
;
...
@@ -518,11 +519,7 @@ static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
...
@@ -518,11 +519,7 @@ static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
rc
|=
read_guest_lc
(
vcpu
,
__LC_EXT_NEW_PSW
,
rc
|=
read_guest_lc
(
vcpu
,
__LC_EXT_NEW_PSW
,
&
vcpu
->
arch
.
sie_block
->
gpsw
,
&
vcpu
->
arch
.
sie_block
->
gpsw
,
sizeof
(
psw_t
));
sizeof
(
psw_t
));
if
(
rc
)
{
return
rc
;
printk
(
"kvm: The guest lowcore is not mapped during interrupt "
"delivery, killing userspace
\n
"
);
do_exit
(
SIGKILL
);
}
}
}
/* Check whether SIGP interpretation facility has an external call pending */
/* Check whether SIGP interpretation facility has an external call pending */
...
@@ -661,12 +658,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
...
@@ -661,12 +658,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
&
vcpu
->
kvm
->
arch
.
sca
->
cpu
[
vcpu
->
vcpu_id
].
ctrl
);
&
vcpu
->
kvm
->
arch
.
sca
->
cpu
[
vcpu
->
vcpu_id
].
ctrl
);
}
}
void
kvm_s390_deliver_pending_interrupts
(
struct
kvm_vcpu
*
vcpu
)
int
__must_check
kvm_s390_deliver_pending_interrupts
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
kvm_s390_local_interrupt
*
li
=
&
vcpu
->
arch
.
local_int
;
struct
kvm_s390_local_interrupt
*
li
=
&
vcpu
->
arch
.
local_int
;
struct
kvm_s390_float_interrupt
*
fi
=
vcpu
->
arch
.
local_int
.
float_int
;
struct
kvm_s390_float_interrupt
*
fi
=
vcpu
->
arch
.
local_int
.
float_int
;
struct
kvm_s390_interrupt_info
*
n
,
*
inti
=
NULL
;
struct
kvm_s390_interrupt_info
*
n
,
*
inti
=
NULL
;
int
deliver
;
int
deliver
;
int
rc
=
0
;
__reset_intercept_indicators
(
vcpu
);
__reset_intercept_indicators
(
vcpu
);
if
(
atomic_read
(
&
li
->
active
))
{
if
(
atomic_read
(
&
li
->
active
))
{
...
@@ -685,16 +683,16 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
...
@@ -685,16 +683,16 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
atomic_set
(
&
li
->
active
,
0
);
atomic_set
(
&
li
->
active
,
0
);
spin_unlock
(
&
li
->
lock
);
spin_unlock
(
&
li
->
lock
);
if
(
deliver
)
{
if
(
deliver
)
{
__do_deliver_interrupt
(
vcpu
,
inti
);
rc
=
__do_deliver_interrupt
(
vcpu
,
inti
);
kfree
(
inti
);
kfree
(
inti
);
}
}
}
while
(
deliver
);
}
while
(
!
rc
&&
deliver
);
}
}
if
(
kvm_cpu_has_pending_timer
(
vcpu
))
if
(
!
rc
&&
kvm_cpu_has_pending_timer
(
vcpu
))
deliver_ckc_interrupt
(
vcpu
);
rc
=
deliver_ckc_interrupt
(
vcpu
);
if
(
atomic_read
(
&
fi
->
active
))
{
if
(
!
rc
&&
atomic_read
(
&
fi
->
active
))
{
do
{
do
{
deliver
=
0
;
deliver
=
0
;
spin_lock
(
&
fi
->
lock
);
spin_lock
(
&
fi
->
lock
);
...
@@ -711,67 +709,13 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
...
@@ -711,67 +709,13 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
atomic_set
(
&
fi
->
active
,
0
);
atomic_set
(
&
fi
->
active
,
0
);
spin_unlock
(
&
fi
->
lock
);
spin_unlock
(
&
fi
->
lock
);
if
(
deliver
)
{
if
(
deliver
)
{
__do_deliver_interrupt
(
vcpu
,
inti
);
rc
=
__do_deliver_interrupt
(
vcpu
,
inti
);
kfree
(
inti
);
}
}
while
(
deliver
);
}
}
void
kvm_s390_deliver_pending_machine_checks
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_s390_local_interrupt
*
li
=
&
vcpu
->
arch
.
local_int
;
struct
kvm_s390_float_interrupt
*
fi
=
vcpu
->
arch
.
local_int
.
float_int
;
struct
kvm_s390_interrupt_info
*
n
,
*
inti
=
NULL
;
int
deliver
;
__reset_intercept_indicators
(
vcpu
);
if
(
atomic_read
(
&
li
->
active
))
{
do
{
deliver
=
0
;
spin_lock
(
&
li
->
lock
);
list_for_each_entry_safe
(
inti
,
n
,
&
li
->
list
,
list
)
{
if
((
inti
->
type
==
KVM_S390_MCHK
)
&&
__interrupt_is_deliverable
(
vcpu
,
inti
))
{
list_del
(
&
inti
->
list
);
deliver
=
1
;
break
;
}
__set_intercept_indicator
(
vcpu
,
inti
);
}
if
(
list_empty
(
&
li
->
list
))
atomic_set
(
&
li
->
active
,
0
);
spin_unlock
(
&
li
->
lock
);
if
(
deliver
)
{
__do_deliver_interrupt
(
vcpu
,
inti
);
kfree
(
inti
);
kfree
(
inti
);
}
}
}
while
(
deliver
);
}
while
(
!
rc
&&
deliver
);
}
}
if
(
atomic_read
(
&
fi
->
active
))
{
return
rc
;
do
{
deliver
=
0
;
spin_lock
(
&
fi
->
lock
);
list_for_each_entry_safe
(
inti
,
n
,
&
fi
->
list
,
list
)
{
if
((
inti
->
type
==
KVM_S390_MCHK
)
&&
__interrupt_is_deliverable
(
vcpu
,
inti
))
{
list_del
(
&
inti
->
list
);
fi
->
irq_count
--
;
deliver
=
1
;
break
;
}
__set_intercept_indicator
(
vcpu
,
inti
);
}
if
(
list_empty
(
&
fi
->
list
))
atomic_set
(
&
fi
->
active
,
0
);
spin_unlock
(
&
fi
->
lock
);
if
(
deliver
)
{
__do_deliver_interrupt
(
vcpu
,
inti
);
kfree
(
inti
);
}
}
while
(
deliver
);
}
}
}
int
kvm_s390_inject_program_int
(
struct
kvm_vcpu
*
vcpu
,
u16
code
)
int
kvm_s390_inject_program_int
(
struct
kvm_vcpu
*
vcpu
,
u16
code
)
...
@@ -1048,7 +992,6 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
...
@@ -1048,7 +992,6 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
trace_kvm_s390_inject_vcpu
(
vcpu
->
vcpu_id
,
s390int
->
type
,
s390int
->
parm
,
trace_kvm_s390_inject_vcpu
(
vcpu
->
vcpu_id
,
s390int
->
type
,
s390int
->
parm
,
s390int
->
parm64
,
2
);
s390int
->
parm64
,
2
);
mutex_lock
(
&
vcpu
->
kvm
->
lock
);
li
=
&
vcpu
->
arch
.
local_int
;
li
=
&
vcpu
->
arch
.
local_int
;
spin_lock
(
&
li
->
lock
);
spin_lock
(
&
li
->
lock
);
if
(
inti
->
type
==
KVM_S390_PROGRAM_INT
)
if
(
inti
->
type
==
KVM_S390_PROGRAM_INT
)
...
@@ -1060,7 +1003,6 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
...
@@ -1060,7 +1003,6 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
li
->
action_bits
|=
ACTION_STOP_ON_STOP
;
li
->
action_bits
|=
ACTION_STOP_ON_STOP
;
atomic_set_mask
(
CPUSTAT_EXT_INT
,
li
->
cpuflags
);
atomic_set_mask
(
CPUSTAT_EXT_INT
,
li
->
cpuflags
);
spin_unlock
(
&
li
->
lock
);
spin_unlock
(
&
li
->
lock
);
mutex_unlock
(
&
vcpu
->
kvm
->
lock
);
kvm_s390_vcpu_wakeup
(
vcpu
);
kvm_s390_vcpu_wakeup
(
vcpu
);
return
0
;
return
0
;
}
}
...
@@ -1300,7 +1242,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
...
@@ -1300,7 +1242,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
}
}
INIT_LIST_HEAD
(
&
map
->
list
);
INIT_LIST_HEAD
(
&
map
->
list
);
map
->
guest_addr
=
addr
;
map
->
guest_addr
=
addr
;
map
->
addr
=
gmap_translate
(
addr
,
kvm
->
arch
.
gmap
);
map
->
addr
=
gmap_translate
(
kvm
->
arch
.
gmap
,
addr
);
if
(
map
->
addr
==
-
EFAULT
)
{
if
(
map
->
addr
==
-
EFAULT
)
{
ret
=
-
EFAULT
;
ret
=
-
EFAULT
;
goto
out
;
goto
out
;
...
@@ -1410,7 +1352,6 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
...
@@ -1410,7 +1352,6 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
r
=
enqueue_floating_irq
(
dev
,
attr
);
r
=
enqueue_floating_irq
(
dev
,
attr
);
break
;
break
;
case
KVM_DEV_FLIC_CLEAR_IRQS
:
case
KVM_DEV_FLIC_CLEAR_IRQS
:
r
=
0
;
kvm_s390_clear_float_irqs
(
dev
->
kvm
);
kvm_s390_clear_float_irqs
(
dev
->
kvm
);
break
;
break
;
case
KVM_DEV_FLIC_APF_ENABLE
:
case
KVM_DEV_FLIC_APF_ENABLE
:
...
...
arch/s390/kvm/kvm-s390.c
浏览文件 @
a875dafc
...
@@ -100,16 +100,12 @@ int test_vfacility(unsigned long nr)
...
@@ -100,16 +100,12 @@ int test_vfacility(unsigned long nr)
}
}
/* Section: not file related */
/* Section: not file related */
int
kvm_arch_hardware_enable
(
void
*
garbage
)
int
kvm_arch_hardware_enable
(
void
)
{
{
/* every s390 is virtualization enabled ;-) */
/* every s390 is virtualization enabled ;-) */
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_disable
(
void
*
garbage
)
{
}
static
void
kvm_gmap_notifier
(
struct
gmap
*
gmap
,
unsigned
long
address
);
static
void
kvm_gmap_notifier
(
struct
gmap
*
gmap
,
unsigned
long
address
);
int
kvm_arch_hardware_setup
(
void
)
int
kvm_arch_hardware_setup
(
void
)
...
@@ -124,17 +120,10 @@ void kvm_arch_hardware_unsetup(void)
...
@@ -124,17 +120,10 @@ void kvm_arch_hardware_unsetup(void)
gmap_unregister_ipte_notifier
(
&
gmap_notifier
);
gmap_unregister_ipte_notifier
(
&
gmap_notifier
);
}
}
void
kvm_arch_check_processor_compat
(
void
*
rtn
)
{
}
int
kvm_arch_init
(
void
*
opaque
)
int
kvm_arch_init
(
void
*
opaque
)
{
{
return
0
;
/* Register floating interrupt controller interface. */
}
return
kvm_register_device_ops
(
&
kvm_flic_ops
,
KVM_DEV_TYPE_FLIC
);
void
kvm_arch_exit
(
void
)
{
}
}
/* Section: device related */
/* Section: device related */
...
@@ -404,6 +393,22 @@ long kvm_arch_vm_ioctl(struct file *filp,
...
@@ -404,6 +393,22 @@ long kvm_arch_vm_ioctl(struct file *filp,
return
r
;
return
r
;
}
}
static
int
kvm_s390_crypto_init
(
struct
kvm
*
kvm
)
{
if
(
!
test_vfacility
(
76
))
return
0
;
kvm
->
arch
.
crypto
.
crycb
=
kzalloc
(
sizeof
(
*
kvm
->
arch
.
crypto
.
crycb
),
GFP_KERNEL
|
GFP_DMA
);
if
(
!
kvm
->
arch
.
crypto
.
crycb
)
return
-
ENOMEM
;
kvm
->
arch
.
crypto
.
crycbd
=
(
__u32
)
(
unsigned
long
)
kvm
->
arch
.
crypto
.
crycb
|
CRYCB_FORMAT1
;
return
0
;
}
int
kvm_arch_init_vm
(
struct
kvm
*
kvm
,
unsigned
long
type
)
int
kvm_arch_init_vm
(
struct
kvm
*
kvm
,
unsigned
long
type
)
{
{
int
rc
;
int
rc
;
...
@@ -441,6 +446,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
...
@@ -441,6 +446,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if
(
!
kvm
->
arch
.
dbf
)
if
(
!
kvm
->
arch
.
dbf
)
goto
out_nodbf
;
goto
out_nodbf
;
if
(
kvm_s390_crypto_init
(
kvm
)
<
0
)
goto
out_crypto
;
spin_lock_init
(
&
kvm
->
arch
.
float_int
.
lock
);
spin_lock_init
(
&
kvm
->
arch
.
float_int
.
lock
);
INIT_LIST_HEAD
(
&
kvm
->
arch
.
float_int
.
list
);
INIT_LIST_HEAD
(
&
kvm
->
arch
.
float_int
.
list
);
init_waitqueue_head
(
&
kvm
->
arch
.
ipte_wq
);
init_waitqueue_head
(
&
kvm
->
arch
.
ipte_wq
);
...
@@ -451,7 +459,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
...
@@ -451,7 +459,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if
(
type
&
KVM_VM_S390_UCONTROL
)
{
if
(
type
&
KVM_VM_S390_UCONTROL
)
{
kvm
->
arch
.
gmap
=
NULL
;
kvm
->
arch
.
gmap
=
NULL
;
}
else
{
}
else
{
kvm
->
arch
.
gmap
=
gmap_alloc
(
current
->
mm
);
kvm
->
arch
.
gmap
=
gmap_alloc
(
current
->
mm
,
(
1UL
<<
44
)
-
1
);
if
(
!
kvm
->
arch
.
gmap
)
if
(
!
kvm
->
arch
.
gmap
)
goto
out_nogmap
;
goto
out_nogmap
;
kvm
->
arch
.
gmap
->
private
=
kvm
;
kvm
->
arch
.
gmap
->
private
=
kvm
;
...
@@ -465,6 +473,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
...
@@ -465,6 +473,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return
0
;
return
0
;
out_nogmap:
out_nogmap:
kfree
(
kvm
->
arch
.
crypto
.
crycb
);
out_crypto:
debug_unregister
(
kvm
->
arch
.
dbf
);
debug_unregister
(
kvm
->
arch
.
dbf
);
out_nodbf:
out_nodbf:
free_page
((
unsigned
long
)(
kvm
->
arch
.
sca
));
free_page
((
unsigned
long
)(
kvm
->
arch
.
sca
));
...
@@ -514,15 +524,12 @@ static void kvm_free_vcpus(struct kvm *kvm)
...
@@ -514,15 +524,12 @@ static void kvm_free_vcpus(struct kvm *kvm)
mutex_unlock
(
&
kvm
->
lock
);
mutex_unlock
(
&
kvm
->
lock
);
}
}
void
kvm_arch_sync_events
(
struct
kvm
*
kvm
)
{
}
void
kvm_arch_destroy_vm
(
struct
kvm
*
kvm
)
void
kvm_arch_destroy_vm
(
struct
kvm
*
kvm
)
{
{
kvm_free_vcpus
(
kvm
);
kvm_free_vcpus
(
kvm
);
free_page
((
unsigned
long
)(
kvm
->
arch
.
sca
));
free_page
((
unsigned
long
)(
kvm
->
arch
.
sca
));
debug_unregister
(
kvm
->
arch
.
dbf
);
debug_unregister
(
kvm
->
arch
.
dbf
);
kfree
(
kvm
->
arch
.
crypto
.
crycb
);
if
(
!
kvm_is_ucontrol
(
kvm
))
if
(
!
kvm_is_ucontrol
(
kvm
))
gmap_free
(
kvm
->
arch
.
gmap
);
gmap_free
(
kvm
->
arch
.
gmap
);
kvm_s390_destroy_adapters
(
kvm
);
kvm_s390_destroy_adapters
(
kvm
);
...
@@ -535,7 +542,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
...
@@ -535,7 +542,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu
->
arch
.
pfault_token
=
KVM_S390_PFAULT_TOKEN_INVALID
;
vcpu
->
arch
.
pfault_token
=
KVM_S390_PFAULT_TOKEN_INVALID
;
kvm_clear_async_pf_completion_queue
(
vcpu
);
kvm_clear_async_pf_completion_queue
(
vcpu
);
if
(
kvm_is_ucontrol
(
vcpu
->
kvm
))
{
if
(
kvm_is_ucontrol
(
vcpu
->
kvm
))
{
vcpu
->
arch
.
gmap
=
gmap_alloc
(
current
->
mm
);
vcpu
->
arch
.
gmap
=
gmap_alloc
(
current
->
mm
,
-
1UL
);
if
(
!
vcpu
->
arch
.
gmap
)
if
(
!
vcpu
->
arch
.
gmap
)
return
-
ENOMEM
;
return
-
ENOMEM
;
vcpu
->
arch
.
gmap
->
private
=
vcpu
->
kvm
;
vcpu
->
arch
.
gmap
->
private
=
vcpu
->
kvm
;
...
@@ -546,19 +553,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
...
@@ -546,19 +553,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu
->
run
->
kvm_valid_regs
=
KVM_SYNC_PREFIX
|
vcpu
->
run
->
kvm_valid_regs
=
KVM_SYNC_PREFIX
|
KVM_SYNC_GPRS
|
KVM_SYNC_GPRS
|
KVM_SYNC_ACRS
|
KVM_SYNC_ACRS
|
KVM_SYNC_CRS
;
KVM_SYNC_CRS
|
KVM_SYNC_ARCH0
|
KVM_SYNC_PFAULT
;
return
0
;
return
0
;
}
}
void
kvm_arch_vcpu_uninit
(
struct
kvm_vcpu
*
vcpu
)
{
/* Nothing todo */
}
void
kvm_arch_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
}
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
{
save_fp_ctl
(
&
vcpu
->
arch
.
host_fpregs
.
fpc
);
save_fp_ctl
(
&
vcpu
->
arch
.
host_fpregs
.
fpc
);
...
@@ -611,6 +611,14 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
...
@@ -611,6 +611,14 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
return
0
;
return
0
;
}
}
static
void
kvm_s390_vcpu_crypto_setup
(
struct
kvm_vcpu
*
vcpu
)
{
if
(
!
test_vfacility
(
76
))
return
;
vcpu
->
arch
.
sie_block
->
crycbd
=
vcpu
->
kvm
->
arch
.
crypto
.
crycbd
;
}
void
kvm_s390_vcpu_unsetup_cmma
(
struct
kvm_vcpu
*
vcpu
)
void
kvm_s390_vcpu_unsetup_cmma
(
struct
kvm_vcpu
*
vcpu
)
{
{
free_page
(
vcpu
->
arch
.
sie_block
->
cbrlo
);
free_page
(
vcpu
->
arch
.
sie_block
->
cbrlo
);
...
@@ -657,6 +665,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
...
@@ -657,6 +665,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu
->
arch
.
ckc_timer
.
function
=
kvm_s390_idle_wakeup
;
vcpu
->
arch
.
ckc_timer
.
function
=
kvm_s390_idle_wakeup
;
get_cpu_id
(
&
vcpu
->
arch
.
cpu_id
);
get_cpu_id
(
&
vcpu
->
arch
.
cpu_id
);
vcpu
->
arch
.
cpu_id
.
version
=
0xff
;
vcpu
->
arch
.
cpu_id
.
version
=
0xff
;
kvm_s390_vcpu_crypto_setup
(
vcpu
);
return
rc
;
return
rc
;
}
}
...
@@ -1053,6 +1064,11 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
...
@@ -1053,6 +1064,11 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
goto
retry
;
goto
retry
;
}
}
if
(
kvm_check_request
(
KVM_REQ_TLB_FLUSH
,
vcpu
))
{
vcpu
->
arch
.
sie_block
->
ihcpu
=
0xffff
;
goto
retry
;
}
if
(
kvm_check_request
(
KVM_REQ_ENABLE_IBS
,
vcpu
))
{
if
(
kvm_check_request
(
KVM_REQ_ENABLE_IBS
,
vcpu
))
{
if
(
!
ibs_enabled
(
vcpu
))
{
if
(
!
ibs_enabled
(
vcpu
))
{
trace_kvm_s390_enable_disable_ibs
(
vcpu
->
vcpu_id
,
1
);
trace_kvm_s390_enable_disable_ibs
(
vcpu
->
vcpu_id
,
1
);
...
@@ -1089,18 +1105,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
...
@@ -1089,18 +1105,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
*/
*/
long
kvm_arch_fault_in_page
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
int
writable
)
long
kvm_arch_fault_in_page
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
int
writable
)
{
{
struct
mm_struct
*
mm
=
current
->
mm
;
return
gmap_fault
(
vcpu
->
arch
.
gmap
,
gpa
,
hva_t
hva
;
writable
?
FAULT_FLAG_WRITE
:
0
);
long
rc
;
hva
=
gmap_fault
(
gpa
,
vcpu
->
arch
.
gmap
);
if
(
IS_ERR_VALUE
(
hva
))
return
(
long
)
hva
;
down_read
(
&
mm
->
mmap_sem
);
rc
=
get_user_pages
(
current
,
mm
,
hva
,
1
,
writable
,
0
,
NULL
,
NULL
);
up_read
(
&
mm
->
mmap_sem
);
return
rc
<
0
?
rc
:
0
;
}
}
static
void
__kvm_inject_pfault_token
(
struct
kvm_vcpu
*
vcpu
,
bool
start_token
,
static
void
__kvm_inject_pfault_token
(
struct
kvm_vcpu
*
vcpu
,
bool
start_token
,
...
@@ -1195,8 +1201,11 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
...
@@ -1195,8 +1201,11 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
if
(
test_cpu_flag
(
CIF_MCCK_PENDING
))
if
(
test_cpu_flag
(
CIF_MCCK_PENDING
))
s390_handle_mcck
();
s390_handle_mcck
();
if
(
!
kvm_is_ucontrol
(
vcpu
->
kvm
))
if
(
!
kvm_is_ucontrol
(
vcpu
->
kvm
))
{
kvm_s390_deliver_pending_interrupts
(
vcpu
);
rc
=
kvm_s390_deliver_pending_interrupts
(
vcpu
);
if
(
rc
)
return
rc
;
}
rc
=
kvm_s390_handle_requests
(
vcpu
);
rc
=
kvm_s390_handle_requests
(
vcpu
);
if
(
rc
)
if
(
rc
)
...
@@ -1300,6 +1309,48 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
...
@@ -1300,6 +1309,48 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return
rc
;
return
rc
;
}
}
static
void
sync_regs
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
kvm_run
)
{
vcpu
->
arch
.
sie_block
->
gpsw
.
mask
=
kvm_run
->
psw_mask
;
vcpu
->
arch
.
sie_block
->
gpsw
.
addr
=
kvm_run
->
psw_addr
;
if
(
kvm_run
->
kvm_dirty_regs
&
KVM_SYNC_PREFIX
)
kvm_s390_set_prefix
(
vcpu
,
kvm_run
->
s
.
regs
.
prefix
);
if
(
kvm_run
->
kvm_dirty_regs
&
KVM_SYNC_CRS
)
{
memcpy
(
&
vcpu
->
arch
.
sie_block
->
gcr
,
&
kvm_run
->
s
.
regs
.
crs
,
128
);
/* some control register changes require a tlb flush */
kvm_make_request
(
KVM_REQ_TLB_FLUSH
,
vcpu
);
}
if
(
kvm_run
->
kvm_dirty_regs
&
KVM_SYNC_ARCH0
)
{
vcpu
->
arch
.
sie_block
->
cputm
=
kvm_run
->
s
.
regs
.
cputm
;
vcpu
->
arch
.
sie_block
->
ckc
=
kvm_run
->
s
.
regs
.
ckc
;
vcpu
->
arch
.
sie_block
->
todpr
=
kvm_run
->
s
.
regs
.
todpr
;
vcpu
->
arch
.
sie_block
->
pp
=
kvm_run
->
s
.
regs
.
pp
;
vcpu
->
arch
.
sie_block
->
gbea
=
kvm_run
->
s
.
regs
.
gbea
;
}
if
(
kvm_run
->
kvm_dirty_regs
&
KVM_SYNC_PFAULT
)
{
vcpu
->
arch
.
pfault_token
=
kvm_run
->
s
.
regs
.
pft
;
vcpu
->
arch
.
pfault_select
=
kvm_run
->
s
.
regs
.
pfs
;
vcpu
->
arch
.
pfault_compare
=
kvm_run
->
s
.
regs
.
pfc
;
}
kvm_run
->
kvm_dirty_regs
=
0
;
}
static
void
store_regs
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
kvm_run
)
{
kvm_run
->
psw_mask
=
vcpu
->
arch
.
sie_block
->
gpsw
.
mask
;
kvm_run
->
psw_addr
=
vcpu
->
arch
.
sie_block
->
gpsw
.
addr
;
kvm_run
->
s
.
regs
.
prefix
=
kvm_s390_get_prefix
(
vcpu
);
memcpy
(
&
kvm_run
->
s
.
regs
.
crs
,
&
vcpu
->
arch
.
sie_block
->
gcr
,
128
);
kvm_run
->
s
.
regs
.
cputm
=
vcpu
->
arch
.
sie_block
->
cputm
;
kvm_run
->
s
.
regs
.
ckc
=
vcpu
->
arch
.
sie_block
->
ckc
;
kvm_run
->
s
.
regs
.
todpr
=
vcpu
->
arch
.
sie_block
->
todpr
;
kvm_run
->
s
.
regs
.
pp
=
vcpu
->
arch
.
sie_block
->
pp
;
kvm_run
->
s
.
regs
.
gbea
=
vcpu
->
arch
.
sie_block
->
gbea
;
kvm_run
->
s
.
regs
.
pft
=
vcpu
->
arch
.
pfault_token
;
kvm_run
->
s
.
regs
.
pfs
=
vcpu
->
arch
.
pfault_select
;
kvm_run
->
s
.
regs
.
pfc
=
vcpu
->
arch
.
pfault_compare
;
}
int
kvm_arch_vcpu_ioctl_run
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
kvm_run
)
int
kvm_arch_vcpu_ioctl_run
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
kvm_run
)
{
{
int
rc
;
int
rc
;
...
@@ -1321,30 +1372,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
...
@@ -1321,30 +1372,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return
-
EINVAL
;
return
-
EINVAL
;
}
}
switch
(
kvm_run
->
exit_reason
)
{
sync_regs
(
vcpu
,
kvm_run
);
case
KVM_EXIT_S390_SIEIC
:
case
KVM_EXIT_UNKNOWN
:
case
KVM_EXIT_INTR
:
case
KVM_EXIT_S390_RESET
:
case
KVM_EXIT_S390_UCONTROL
:
case
KVM_EXIT_S390_TSCH
:
case
KVM_EXIT_DEBUG
:
break
;
default:
BUG
();
}
vcpu
->
arch
.
sie_block
->
gpsw
.
mask
=
kvm_run
->
psw_mask
;
vcpu
->
arch
.
sie_block
->
gpsw
.
addr
=
kvm_run
->
psw_addr
;
if
(
kvm_run
->
kvm_dirty_regs
&
KVM_SYNC_PREFIX
)
{
kvm_run
->
kvm_dirty_regs
&=
~
KVM_SYNC_PREFIX
;
kvm_s390_set_prefix
(
vcpu
,
kvm_run
->
s
.
regs
.
prefix
);
}
if
(
kvm_run
->
kvm_dirty_regs
&
KVM_SYNC_CRS
)
{
kvm_run
->
kvm_dirty_regs
&=
~
KVM_SYNC_CRS
;
memcpy
(
&
vcpu
->
arch
.
sie_block
->
gcr
,
&
kvm_run
->
s
.
regs
.
crs
,
128
);
kvm_s390_set_prefix
(
vcpu
,
kvm_run
->
s
.
regs
.
prefix
);
}
might_fault
();
might_fault
();
rc
=
__vcpu_run
(
vcpu
);
rc
=
__vcpu_run
(
vcpu
);
...
@@ -1374,10 +1402,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
...
@@ -1374,10 +1402,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
rc
=
0
;
rc
=
0
;
}
}
kvm_run
->
psw_mask
=
vcpu
->
arch
.
sie_block
->
gpsw
.
mask
;
store_regs
(
vcpu
,
kvm_run
);
kvm_run
->
psw_addr
=
vcpu
->
arch
.
sie_block
->
gpsw
.
addr
;
kvm_run
->
s
.
regs
.
prefix
=
kvm_s390_get_prefix
(
vcpu
);
memcpy
(
&
kvm_run
->
s
.
regs
.
crs
,
&
vcpu
->
arch
.
sie_block
->
gcr
,
128
);
if
(
vcpu
->
sigset_active
)
if
(
vcpu
->
sigset_active
)
sigprocmask
(
SIG_SETMASK
,
&
sigsaved
,
NULL
);
sigprocmask
(
SIG_SETMASK
,
&
sigsaved
,
NULL
);
...
@@ -1506,7 +1531,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
...
@@ -1506,7 +1531,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
* Another VCPU might have used IBS while we were offline.
* Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup.
* Let's play safe and flush the VCPU at startup.
*/
*/
vcpu
->
arch
.
sie_block
->
ihcpu
=
0xffff
;
kvm_make_request
(
KVM_REQ_TLB_FLUSH
,
vcpu
)
;
spin_unlock
(
&
vcpu
->
kvm
->
arch
.
start_stop_lock
);
spin_unlock
(
&
vcpu
->
kvm
->
arch
.
start_stop_lock
);
return
;
return
;
}
}
...
@@ -1661,9 +1686,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
...
@@ -1661,9 +1686,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
}
#endif
#endif
case
KVM_S390_VCPU_FAULT
:
{
case
KVM_S390_VCPU_FAULT
:
{
r
=
gmap_fault
(
arg
,
vcpu
->
arch
.
gmap
);
r
=
gmap_fault
(
vcpu
->
arch
.
gmap
,
arg
,
0
);
if
(
!
IS_ERR_VALUE
(
r
))
r
=
0
;
break
;
break
;
}
}
case
KVM_ENABLE_CAP
:
case
KVM_ENABLE_CAP
:
...
@@ -1694,21 +1717,12 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
...
@@ -1694,21 +1717,12 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return
VM_FAULT_SIGBUS
;
return
VM_FAULT_SIGBUS
;
}
}
void
kvm_arch_free_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
free
,
struct
kvm_memory_slot
*
dont
)
{
}
int
kvm_arch_create_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
,
int
kvm_arch_create_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
,
unsigned
long
npages
)
unsigned
long
npages
)
{
{
return
0
;
return
0
;
}
}
void
kvm_arch_memslots_updated
(
struct
kvm
*
kvm
)
{
}
/* Section: memory related */
/* Section: memory related */
int
kvm_arch_prepare_memory_region
(
struct
kvm
*
kvm
,
int
kvm_arch_prepare_memory_region
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
memslot
,
struct
kvm_memory_slot
*
memslot
,
...
@@ -1754,15 +1768,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
...
@@ -1754,15 +1768,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
return
;
return
;
}
}
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
{
}
void
kvm_arch_flush_shadow_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
slot
)
{
}
static
int
__init
kvm_s390_init
(
void
)
static
int
__init
kvm_s390_init
(
void
)
{
{
int
ret
;
int
ret
;
...
...
arch/s390/kvm/kvm-s390.h
浏览文件 @
a875dafc
...
@@ -70,7 +70,7 @@ static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
...
@@ -70,7 +70,7 @@ static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
static
inline
void
kvm_s390_set_prefix
(
struct
kvm_vcpu
*
vcpu
,
u32
prefix
)
static
inline
void
kvm_s390_set_prefix
(
struct
kvm_vcpu
*
vcpu
,
u32
prefix
)
{
{
vcpu
->
arch
.
sie_block
->
prefix
=
prefix
>>
GUEST_PREFIX_SHIFT
;
vcpu
->
arch
.
sie_block
->
prefix
=
prefix
>>
GUEST_PREFIX_SHIFT
;
vcpu
->
arch
.
sie_block
->
ihcpu
=
0xffff
;
kvm_make_request
(
KVM_REQ_TLB_FLUSH
,
vcpu
)
;
kvm_make_request
(
KVM_REQ_MMU_RELOAD
,
vcpu
);
kvm_make_request
(
KVM_REQ_MMU_RELOAD
,
vcpu
);
}
}
...
@@ -138,8 +138,7 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
...
@@ -138,8 +138,7 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
int
kvm_s390_handle_wait
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_s390_handle_wait
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_vcpu_wakeup
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_vcpu_wakeup
(
struct
kvm_vcpu
*
vcpu
);
enum
hrtimer_restart
kvm_s390_idle_wakeup
(
struct
hrtimer
*
timer
);
enum
hrtimer_restart
kvm_s390_idle_wakeup
(
struct
hrtimer
*
timer
);
void
kvm_s390_deliver_pending_interrupts
(
struct
kvm_vcpu
*
vcpu
);
int
__must_check
kvm_s390_deliver_pending_interrupts
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_deliver_pending_machine_checks
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_clear_local_irqs
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_clear_local_irqs
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_clear_float_irqs
(
struct
kvm
*
kvm
);
void
kvm_s390_clear_float_irqs
(
struct
kvm
*
kvm
);
int
__must_check
kvm_s390_inject_vm
(
struct
kvm
*
kvm
,
int
__must_check
kvm_s390_inject_vm
(
struct
kvm
*
kvm
,
...
@@ -228,6 +227,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
...
@@ -228,6 +227,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int
psw_extint_disabled
(
struct
kvm_vcpu
*
vcpu
);
int
psw_extint_disabled
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_destroy_adapters
(
struct
kvm
*
kvm
);
void
kvm_s390_destroy_adapters
(
struct
kvm
*
kvm
);
int
kvm_s390_si_ext_call_pending
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_s390_si_ext_call_pending
(
struct
kvm_vcpu
*
vcpu
);
extern
struct
kvm_device_ops
kvm_flic_ops
;
/* implemented in guestdbg.c */
/* implemented in guestdbg.c */
void
kvm_s390_backup_guest_per_regs
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_backup_guest_per_regs
(
struct
kvm_vcpu
*
vcpu
);
...
...
arch/s390/kvm/priv.c
浏览文件 @
a875dafc
...
@@ -352,13 +352,6 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
...
@@ -352,13 +352,6 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
return
0
;
return
0
;
}
}
static
void
handle_new_psw
(
struct
kvm_vcpu
*
vcpu
)
{
/* Check whether the new psw is enabled for machine checks. */
if
(
vcpu
->
arch
.
sie_block
->
gpsw
.
mask
&
PSW_MASK_MCHECK
)
kvm_s390_deliver_pending_machine_checks
(
vcpu
);
}
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
#define PSW_ADDR_24 0x0000000000ffffffUL
#define PSW_ADDR_24 0x0000000000ffffffUL
...
@@ -405,7 +398,6 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
...
@@ -405,7 +398,6 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
gpsw
->
addr
=
new_psw
.
addr
&
~
PSW32_ADDR_AMODE
;
gpsw
->
addr
=
new_psw
.
addr
&
~
PSW32_ADDR_AMODE
;
if
(
!
is_valid_psw
(
gpsw
))
if
(
!
is_valid_psw
(
gpsw
))
return
kvm_s390_inject_program_int
(
vcpu
,
PGM_SPECIFICATION
);
return
kvm_s390_inject_program_int
(
vcpu
,
PGM_SPECIFICATION
);
handle_new_psw
(
vcpu
);
return
0
;
return
0
;
}
}
...
@@ -427,7 +419,6 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
...
@@ -427,7 +419,6 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
vcpu
->
arch
.
sie_block
->
gpsw
=
new_psw
;
vcpu
->
arch
.
sie_block
->
gpsw
=
new_psw
;
if
(
!
is_valid_psw
(
&
vcpu
->
arch
.
sie_block
->
gpsw
))
if
(
!
is_valid_psw
(
&
vcpu
->
arch
.
sie_block
->
gpsw
))
return
kvm_s390_inject_program_int
(
vcpu
,
PGM_SPECIFICATION
);
return
kvm_s390_inject_program_int
(
vcpu
,
PGM_SPECIFICATION
);
handle_new_psw
(
vcpu
);
return
0
;
return
0
;
}
}
...
@@ -738,7 +729,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
...
@@ -738,7 +729,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
/* invalid entry */
/* invalid entry */
break
;
break
;
/* try to free backing */
/* try to free backing */
__gmap_zap
(
cbrle
,
gmap
);
__gmap_zap
(
gmap
,
cbrle
);
}
}
up_read
(
&
gmap
->
mm
->
mmap_sem
);
up_read
(
&
gmap
->
mm
->
mmap_sem
);
if
(
i
<
entries
)
if
(
i
<
entries
)
...
...
arch/s390/mm/fault.c
浏览文件 @
a875dafc
...
@@ -442,18 +442,15 @@ static inline int do_exception(struct pt_regs *regs, int access)
...
@@ -442,18 +442,15 @@ static inline int do_exception(struct pt_regs *regs, int access)
down_read
(
&
mm
->
mmap_sem
);
down_read
(
&
mm
->
mmap_sem
);
#ifdef CONFIG_PGSTE
#ifdef CONFIG_PGSTE
gmap
=
(
struct
gmap
*
)
gmap
=
(
current
->
flags
&
PF_VCPU
)
?
(
(
current
->
flags
&
PF_VCPU
)
?
S390_lowcore
.
gmap
:
0
)
;
(
struct
gmap
*
)
S390_lowcore
.
gmap
:
NULL
;
if
(
gmap
)
{
if
(
gmap
)
{
address
=
__gmap_fault
(
address
,
gmap
);
current
->
thread
.
gmap_addr
=
address
;
address
=
__gmap_translate
(
gmap
,
address
);
if
(
address
==
-
EFAULT
)
{
if
(
address
==
-
EFAULT
)
{
fault
=
VM_FAULT_BADMAP
;
fault
=
VM_FAULT_BADMAP
;
goto
out_up
;
goto
out_up
;
}
}
if
(
address
==
-
ENOMEM
)
{
fault
=
VM_FAULT_OOM
;
goto
out_up
;
}
if
(
gmap
->
pfault_enabled
)
if
(
gmap
->
pfault_enabled
)
flags
|=
FAULT_FLAG_RETRY_NOWAIT
;
flags
|=
FAULT_FLAG_RETRY_NOWAIT
;
}
}
...
@@ -530,6 +527,20 @@ static inline int do_exception(struct pt_regs *regs, int access)
...
@@ -530,6 +527,20 @@ static inline int do_exception(struct pt_regs *regs, int access)
goto
retry
;
goto
retry
;
}
}
}
}
#ifdef CONFIG_PGSTE
if
(
gmap
)
{
address
=
__gmap_link
(
gmap
,
current
->
thread
.
gmap_addr
,
address
);
if
(
address
==
-
EFAULT
)
{
fault
=
VM_FAULT_BADMAP
;
goto
out_up
;
}
if
(
address
==
-
ENOMEM
)
{
fault
=
VM_FAULT_OOM
;
goto
out_up
;
}
}
#endif
fault
=
0
;
fault
=
0
;
out_up:
out_up:
up_read
(
&
mm
->
mmap_sem
);
up_read
(
&
mm
->
mmap_sem
);
...
...
arch/s390/mm/pgtable.c
浏览文件 @
a875dafc
此差异已折叠。
点击以展开。
arch/s390/mm/vmem.c
浏览文件 @
a875dafc
...
@@ -65,7 +65,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
...
@@ -65,7 +65,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
pte_t
*
pte
;
pte_t
*
pte
;
if
(
slab_is_available
())
if
(
slab_is_available
())
pte
=
(
pte_t
*
)
page_table_alloc
(
&
init_mm
,
address
);
pte
=
(
pte_t
*
)
page_table_alloc
(
&
init_mm
);
else
else
pte
=
alloc_bootmem_align
(
PTRS_PER_PTE
*
sizeof
(
pte_t
),
pte
=
alloc_bootmem_align
(
PTRS_PER_PTE
*
sizeof
(
pte_t
),
PTRS_PER_PTE
*
sizeof
(
pte_t
));
PTRS_PER_PTE
*
sizeof
(
pte_t
));
...
...
arch/x86/include/asm/kvm_host.h
浏览文件 @
a875dafc
...
@@ -99,10 +99,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
...
@@ -99,10 +99,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
#define ASYNC_PF_PER_VCPU 64
#define ASYNC_PF_PER_VCPU 64
struct
kvm_vcpu
;
struct
kvm
;
struct
kvm_async_pf
;
enum
kvm_reg
{
enum
kvm_reg
{
VCPU_REGS_RAX
=
0
,
VCPU_REGS_RAX
=
0
,
VCPU_REGS_RCX
=
1
,
VCPU_REGS_RCX
=
1
,
...
@@ -266,7 +262,8 @@ struct kvm_mmu {
...
@@ -266,7 +262,8 @@ struct kvm_mmu {
struct
x86_exception
*
fault
);
struct
x86_exception
*
fault
);
gpa_t
(
*
gva_to_gpa
)(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
,
u32
access
,
gpa_t
(
*
gva_to_gpa
)(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
,
u32
access
,
struct
x86_exception
*
exception
);
struct
x86_exception
*
exception
);
gpa_t
(
*
translate_gpa
)(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
access
);
gpa_t
(
*
translate_gpa
)(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
access
,
struct
x86_exception
*
exception
);
int
(
*
sync_page
)(
struct
kvm_vcpu
*
vcpu
,
int
(
*
sync_page
)(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_mmu_page
*
sp
);
struct
kvm_mmu_page
*
sp
);
void
(
*
invlpg
)(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
);
void
(
*
invlpg
)(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
);
...
@@ -481,6 +478,7 @@ struct kvm_vcpu_arch {
...
@@ -481,6 +478,7 @@ struct kvm_vcpu_arch {
u64
mmio_gva
;
u64
mmio_gva
;
unsigned
access
;
unsigned
access
;
gfn_t
mmio_gfn
;
gfn_t
mmio_gfn
;
u64
mmio_gen
;
struct
kvm_pmu
pmu
;
struct
kvm_pmu
pmu
;
...
@@ -580,7 +578,6 @@ struct kvm_arch {
...
@@ -580,7 +578,6 @@ struct kvm_arch {
gpa_t
wall_clock
;
gpa_t
wall_clock
;
struct
page
*
ept_identity_pagetable
;
bool
ept_identity_pagetable_done
;
bool
ept_identity_pagetable_done
;
gpa_t
ept_identity_map_addr
;
gpa_t
ept_identity_map_addr
;
...
@@ -665,8 +662,8 @@ struct msr_data {
...
@@ -665,8 +662,8 @@ struct msr_data {
struct
kvm_x86_ops
{
struct
kvm_x86_ops
{
int
(
*
cpu_has_kvm_support
)(
void
);
/* __init */
int
(
*
cpu_has_kvm_support
)(
void
);
/* __init */
int
(
*
disabled_by_bios
)(
void
);
/* __init */
int
(
*
disabled_by_bios
)(
void
);
/* __init */
int
(
*
hardware_enable
)(
void
*
dummy
);
int
(
*
hardware_enable
)(
void
);
void
(
*
hardware_disable
)(
void
*
dummy
);
void
(
*
hardware_disable
)(
void
);
void
(
*
check_processor_compatibility
)(
void
*
rtn
);
void
(
*
check_processor_compatibility
)(
void
*
rtn
);
int
(
*
hardware_setup
)(
void
);
/* __init */
int
(
*
hardware_setup
)(
void
);
/* __init */
void
(
*
hardware_unsetup
)(
void
);
/* __exit */
void
(
*
hardware_unsetup
)(
void
);
/* __exit */
...
@@ -896,7 +893,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
...
@@ -896,7 +893,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
int
kvm_read_guest_page_mmu
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_mmu
*
mmu
,
int
kvm_read_guest_page_mmu
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_mmu
*
mmu
,
gfn_t
gfn
,
void
*
data
,
int
offset
,
int
len
,
gfn_t
gfn
,
void
*
data
,
int
offset
,
int
len
,
u32
access
);
u32
access
);
void
kvm_propagate_fault
(
struct
kvm_vcpu
*
vcpu
,
struct
x86_exception
*
fault
);
bool
kvm_require_cpl
(
struct
kvm_vcpu
*
vcpu
,
int
required_cpl
);
bool
kvm_require_cpl
(
struct
kvm_vcpu
*
vcpu
,
int
required_cpl
);
static
inline
int
__kvm_irq_line_state
(
unsigned
long
*
irq_state
,
static
inline
int
__kvm_irq_line_state
(
unsigned
long
*
irq_state
,
...
@@ -927,7 +923,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
...
@@ -927,7 +923,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int
kvm_mmu_load
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_mmu_load
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_mmu_unload
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_mmu_unload
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_mmu_sync_roots
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_mmu_sync_roots
(
struct
kvm_vcpu
*
vcpu
);
gpa_t
translate_nested_gpa
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
access
);
gpa_t
translate_nested_gpa
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
access
,
struct
x86_exception
*
exception
);
gpa_t
kvm_mmu_gva_to_gpa_read
(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
,
gpa_t
kvm_mmu_gva_to_gpa_read
(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
,
struct
x86_exception
*
exception
);
struct
x86_exception
*
exception
);
gpa_t
kvm_mmu_gva_to_gpa_fetch
(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
,
gpa_t
kvm_mmu_gva_to_gpa_fetch
(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
,
...
@@ -947,7 +944,8 @@ void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
...
@@ -947,7 +944,8 @@ void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
void
kvm_enable_tdp
(
void
);
void
kvm_enable_tdp
(
void
);
void
kvm_disable_tdp
(
void
);
void
kvm_disable_tdp
(
void
);
static
inline
gpa_t
translate_gpa
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
access
)
static
inline
gpa_t
translate_gpa
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
access
,
struct
x86_exception
*
exception
)
{
{
return
gpa
;
return
gpa
;
}
}
...
...
arch/x86/kvm/cpuid.h
浏览文件 @
a875dafc
...
@@ -88,6 +88,14 @@ static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
...
@@ -88,6 +88,14 @@ static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
return
best
&&
(
best
->
ecx
&
bit
(
X86_FEATURE_X2APIC
));
return
best
&&
(
best
->
ecx
&
bit
(
X86_FEATURE_X2APIC
));
}
}
static
inline
bool
guest_cpuid_is_amd
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_cpuid_entry2
*
best
;
best
=
kvm_find_cpuid_entry
(
vcpu
,
0
,
0
);
return
best
&&
best
->
ebx
==
X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
;
}
static
inline
bool
guest_cpuid_has_gbpages
(
struct
kvm_vcpu
*
vcpu
)
static
inline
bool
guest_cpuid_has_gbpages
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
kvm_cpuid_entry2
*
best
;
struct
kvm_cpuid_entry2
*
best
;
...
...
arch/x86/kvm/emulate.c
浏览文件 @
a875dafc
...
@@ -3139,12 +3139,8 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
...
@@ -3139,12 +3139,8 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
static
int
em_vmcall
(
struct
x86_emulate_ctxt
*
ctxt
)
static
int
em_vmcall
(
struct
x86_emulate_ctxt
*
ctxt
)
{
{
int
rc
;
int
rc
=
ctxt
->
ops
->
fix_hypercall
(
ctxt
);
if
(
ctxt
->
modrm_mod
!=
3
||
ctxt
->
modrm_rm
!=
1
)
return
X86EMUL_UNHANDLEABLE
;
rc
=
ctxt
->
ops
->
fix_hypercall
(
ctxt
);
if
(
rc
!=
X86EMUL_CONTINUE
)
if
(
rc
!=
X86EMUL_CONTINUE
)
return
rc
;
return
rc
;
...
@@ -3562,6 +3558,12 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
...
@@ -3562,6 +3558,12 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static
const
struct
opcode
group7_rm0
[]
=
{
N
,
I
(
SrcNone
|
Priv
|
EmulateOnUD
,
em_vmcall
),
N
,
N
,
N
,
N
,
N
,
N
,
};
static
const
struct
opcode
group7_rm1
[]
=
{
static
const
struct
opcode
group7_rm1
[]
=
{
DI
(
SrcNone
|
Priv
,
monitor
),
DI
(
SrcNone
|
Priv
,
monitor
),
DI
(
SrcNone
|
Priv
,
mwait
),
DI
(
SrcNone
|
Priv
,
mwait
),
...
@@ -3655,7 +3657,7 @@ static const struct group_dual group7 = { {
...
@@ -3655,7 +3657,7 @@ static const struct group_dual group7 = { {
II
(
SrcMem16
|
Mov
|
Priv
,
em_lmsw
,
lmsw
),
II
(
SrcMem16
|
Mov
|
Priv
,
em_lmsw
,
lmsw
),
II
(
SrcMem
|
ByteOp
|
Priv
|
NoAccess
,
em_invlpg
,
invlpg
),
II
(
SrcMem
|
ByteOp
|
Priv
|
NoAccess
,
em_invlpg
,
invlpg
),
},
{
},
{
I
(
SrcNone
|
Priv
|
EmulateOnUD
,
em_vmcall
),
EXT
(
0
,
group7_rm0
),
EXT
(
0
,
group7_rm1
),
EXT
(
0
,
group7_rm1
),
N
,
EXT
(
0
,
group7_rm3
),
N
,
EXT
(
0
,
group7_rm3
),
II
(
SrcNone
|
DstMem
|
Mov
,
em_smsw
,
smsw
),
N
,
II
(
SrcNone
|
DstMem
|
Mov
,
em_smsw
,
smsw
),
N
,
...
@@ -3686,14 +3688,18 @@ static const struct gprefix pfx_0f_6f_0f_7f = {
...
@@ -3686,14 +3688,18 @@ static const struct gprefix pfx_0f_6f_0f_7f = {
I
(
Mmx
,
em_mov
),
I
(
Sse
|
Aligned
,
em_mov
),
N
,
I
(
Sse
|
Unaligned
,
em_mov
),
I
(
Mmx
,
em_mov
),
I
(
Sse
|
Aligned
,
em_mov
),
N
,
I
(
Sse
|
Unaligned
,
em_mov
),
};
};
static
const
struct
gprefix
pfx_
vmovntpx
=
{
static
const
struct
gprefix
pfx_
0f_2b
=
{
I
(
0
,
em_mov
),
N
,
N
,
N
,
I
(
0
,
em_mov
),
I
(
0
,
em_mov
)
,
N
,
N
,
};
};
static
const
struct
gprefix
pfx_0f_28_0f_29
=
{
static
const
struct
gprefix
pfx_0f_28_0f_29
=
{
I
(
Aligned
,
em_mov
),
I
(
Aligned
,
em_mov
),
N
,
N
,
I
(
Aligned
,
em_mov
),
I
(
Aligned
,
em_mov
),
N
,
N
,
};
};
static
const
struct
gprefix
pfx_0f_e7
=
{
N
,
I
(
Sse
,
em_mov
),
N
,
N
,
};
static
const
struct
escape
escape_d9
=
{
{
static
const
struct
escape
escape_d9
=
{
{
N
,
N
,
N
,
N
,
N
,
N
,
N
,
I
(
DstMem
,
em_fnstcw
),
N
,
N
,
N
,
N
,
N
,
N
,
N
,
I
(
DstMem
,
em_fnstcw
),
},
{
},
{
...
@@ -3900,7 +3906,7 @@ static const struct opcode twobyte_table[256] = {
...
@@ -3900,7 +3906,7 @@ static const struct opcode twobyte_table[256] = {
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
GP
(
ModRM
|
DstReg
|
SrcMem
|
Mov
|
Sse
,
&
pfx_0f_28_0f_29
),
GP
(
ModRM
|
DstReg
|
SrcMem
|
Mov
|
Sse
,
&
pfx_0f_28_0f_29
),
GP
(
ModRM
|
DstMem
|
SrcReg
|
Mov
|
Sse
,
&
pfx_0f_28_0f_29
),
GP
(
ModRM
|
DstMem
|
SrcReg
|
Mov
|
Sse
,
&
pfx_0f_28_0f_29
),
N
,
GP
(
ModRM
|
DstMem
|
SrcReg
|
Sse
|
Mov
|
Aligned
,
&
pfx_vmovntpx
),
N
,
GP
(
ModRM
|
DstMem
|
SrcReg
|
Mov
|
Sse
,
&
pfx_0f_2b
),
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
/* 0x30 - 0x3F */
/* 0x30 - 0x3F */
II
(
ImplicitOps
|
Priv
,
em_wrmsr
,
wrmsr
),
II
(
ImplicitOps
|
Priv
,
em_wrmsr
,
wrmsr
),
...
@@ -3964,7 +3970,8 @@ static const struct opcode twobyte_table[256] = {
...
@@ -3964,7 +3970,8 @@ static const struct opcode twobyte_table[256] = {
/* 0xD0 - 0xDF */
/* 0xD0 - 0xDF */
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
/* 0xE0 - 0xEF */
/* 0xE0 - 0xEF */
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
GP
(
SrcReg
|
DstMem
|
ModRM
|
Mov
,
&
pfx_0f_e7
),
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
/* 0xF0 - 0xFF */
/* 0xF0 - 0xFF */
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
,
N
};
};
...
...
arch/x86/kvm/lapic.c
浏览文件 @
a875dafc
...
@@ -709,6 +709,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
...
@@ -709,6 +709,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int
result
=
0
;
int
result
=
0
;
struct
kvm_vcpu
*
vcpu
=
apic
->
vcpu
;
struct
kvm_vcpu
*
vcpu
=
apic
->
vcpu
;
trace_kvm_apic_accept_irq
(
vcpu
->
vcpu_id
,
delivery_mode
,
trig_mode
,
vector
);
switch
(
delivery_mode
)
{
switch
(
delivery_mode
)
{
case
APIC_DM_LOWEST
:
case
APIC_DM_LOWEST
:
vcpu
->
arch
.
apic_arb_prio
++
;
vcpu
->
arch
.
apic_arb_prio
++
;
...
@@ -730,8 +732,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
...
@@ -730,8 +732,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
kvm_make_request
(
KVM_REQ_EVENT
,
vcpu
);
kvm_make_request
(
KVM_REQ_EVENT
,
vcpu
);
kvm_vcpu_kick
(
vcpu
);
kvm_vcpu_kick
(
vcpu
);
}
}
trace_kvm_apic_accept_irq
(
vcpu
->
vcpu_id
,
delivery_mode
,
trig_mode
,
vector
,
false
);
break
;
break
;
case
APIC_DM_REMRD
:
case
APIC_DM_REMRD
:
...
...
arch/x86/kvm/mmu.c
浏览文件 @
a875dafc
...
@@ -199,16 +199,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
...
@@ -199,16 +199,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
EXPORT_SYMBOL_GPL
(
kvm_mmu_set_mmio_spte_mask
);
EXPORT_SYMBOL_GPL
(
kvm_mmu_set_mmio_spte_mask
);
/*
/*
* spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number,
* the low bit of the generation number is always presumed to be zero.
* the bits of bits 52 ~ bit 61 are used as high 10 bits of generation
* This disables mmio caching during memslot updates. The concept is
* number.
* similar to a seqcount but instead of retrying the access we just punt
* and ignore the cache.
*
* spte bits 3-11 are used as bits 1-9 of the generation number,
* the bits 52-61 are used as bits 10-19 of the generation number.
*/
*/
#define MMIO_SPTE_GEN_LOW_SHIFT
3
#define MMIO_SPTE_GEN_LOW_SHIFT
2
#define MMIO_SPTE_GEN_HIGH_SHIFT 52
#define MMIO_SPTE_GEN_HIGH_SHIFT 52
#define MMIO_GEN_SHIFT
19
#define MMIO_GEN_SHIFT
20
#define MMIO_GEN_LOW_SHIFT
9
#define MMIO_GEN_LOW_SHIFT
10
#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) -
1
)
#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) -
2
)
#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
#define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
#define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
...
@@ -236,12 +240,7 @@ static unsigned int get_mmio_spte_generation(u64 spte)
...
@@ -236,12 +240,7 @@ static unsigned int get_mmio_spte_generation(u64 spte)
static
unsigned
int
kvm_current_mmio_generation
(
struct
kvm
*
kvm
)
static
unsigned
int
kvm_current_mmio_generation
(
struct
kvm
*
kvm
)
{
{
/*
return
kvm_memslots
(
kvm
)
->
generation
&
MMIO_GEN_MASK
;
* Init kvm generation close to MMIO_MAX_GEN to easily test the
* code of handling generation number wrap-around.
*/
return
(
kvm_memslots
(
kvm
)
->
generation
+
MMIO_MAX_GEN
-
150
)
&
MMIO_GEN_MASK
;
}
}
static
void
mark_mmio_spte
(
struct
kvm
*
kvm
,
u64
*
sptep
,
u64
gfn
,
static
void
mark_mmio_spte
(
struct
kvm
*
kvm
,
u64
*
sptep
,
u64
gfn
,
...
@@ -296,11 +295,6 @@ static bool check_mmio_spte(struct kvm *kvm, u64 spte)
...
@@ -296,11 +295,6 @@ static bool check_mmio_spte(struct kvm *kvm, u64 spte)
return
likely
(
kvm_gen
==
spte_gen
);
return
likely
(
kvm_gen
==
spte_gen
);
}
}
static
inline
u64
rsvd_bits
(
int
s
,
int
e
)
{
return
((
1ULL
<<
(
e
-
s
+
1
))
-
1
)
<<
s
;
}
void
kvm_mmu_set_mask_ptes
(
u64
user_mask
,
u64
accessed_mask
,
void
kvm_mmu_set_mask_ptes
(
u64
user_mask
,
u64
accessed_mask
,
u64
dirty_mask
,
u64
nx_mask
,
u64
x_mask
)
u64
dirty_mask
,
u64
nx_mask
,
u64
x_mask
)
{
{
...
@@ -3163,7 +3157,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
...
@@ -3163,7 +3157,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
if
(
!
VALID_PAGE
(
vcpu
->
arch
.
mmu
.
root_hpa
))
if
(
!
VALID_PAGE
(
vcpu
->
arch
.
mmu
.
root_hpa
))
return
;
return
;
vcpu_clear_mmio_info
(
vcpu
,
~
0ul
);
vcpu_clear_mmio_info
(
vcpu
,
MMIO_GVA_ANY
);
kvm_mmu_audit
(
vcpu
,
AUDIT_PRE_SYNC
);
kvm_mmu_audit
(
vcpu
,
AUDIT_PRE_SYNC
);
if
(
vcpu
->
arch
.
mmu
.
root_level
==
PT64_ROOT_LEVEL
)
{
if
(
vcpu
->
arch
.
mmu
.
root_level
==
PT64_ROOT_LEVEL
)
{
hpa_t
root
=
vcpu
->
arch
.
mmu
.
root_hpa
;
hpa_t
root
=
vcpu
->
arch
.
mmu
.
root_hpa
;
...
@@ -3206,7 +3200,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
...
@@ -3206,7 +3200,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
{
{
if
(
exception
)
if
(
exception
)
exception
->
error_code
=
0
;
exception
->
error_code
=
0
;
return
vcpu
->
arch
.
nested_mmu
.
translate_gpa
(
vcpu
,
vaddr
,
access
);
return
vcpu
->
arch
.
nested_mmu
.
translate_gpa
(
vcpu
,
vaddr
,
access
,
exception
);
}
}
static
bool
quickly_check_mmio_pf
(
struct
kvm_vcpu
*
vcpu
,
u64
addr
,
bool
direct
)
static
bool
quickly_check_mmio_pf
(
struct
kvm_vcpu
*
vcpu
,
u64
addr
,
bool
direct
)
...
@@ -3518,6 +3512,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
...
@@ -3518,6 +3512,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
int
maxphyaddr
=
cpuid_maxphyaddr
(
vcpu
);
int
maxphyaddr
=
cpuid_maxphyaddr
(
vcpu
);
u64
exb_bit_rsvd
=
0
;
u64
exb_bit_rsvd
=
0
;
u64
gbpages_bit_rsvd
=
0
;
u64
gbpages_bit_rsvd
=
0
;
u64
nonleaf_bit8_rsvd
=
0
;
context
->
bad_mt_xwr
=
0
;
context
->
bad_mt_xwr
=
0
;
...
@@ -3525,6 +3520,14 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
...
@@ -3525,6 +3520,14 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
exb_bit_rsvd
=
rsvd_bits
(
63
,
63
);
exb_bit_rsvd
=
rsvd_bits
(
63
,
63
);
if
(
!
guest_cpuid_has_gbpages
(
vcpu
))
if
(
!
guest_cpuid_has_gbpages
(
vcpu
))
gbpages_bit_rsvd
=
rsvd_bits
(
7
,
7
);
gbpages_bit_rsvd
=
rsvd_bits
(
7
,
7
);
/*
* Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
* leaf entries) on AMD CPUs only.
*/
if
(
guest_cpuid_is_amd
(
vcpu
))
nonleaf_bit8_rsvd
=
rsvd_bits
(
8
,
8
);
switch
(
context
->
root_level
)
{
switch
(
context
->
root_level
)
{
case
PT32_ROOT_LEVEL
:
case
PT32_ROOT_LEVEL
:
/* no rsvd bits for 2 level 4K page table entries */
/* no rsvd bits for 2 level 4K page table entries */
...
@@ -3559,9 +3562,9 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
...
@@ -3559,9 +3562,9 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
break
;
break
;
case
PT64_ROOT_LEVEL
:
case
PT64_ROOT_LEVEL
:
context
->
rsvd_bits_mask
[
0
][
3
]
=
exb_bit_rsvd
|
context
->
rsvd_bits_mask
[
0
][
3
]
=
exb_bit_rsvd
|
rsvd_bits
(
maxphyaddr
,
51
)
|
rsvd_bits
(
7
,
7
);
nonleaf_bit8_rsvd
|
rsvd_bits
(
7
,
7
)
|
rsvd_bits
(
maxphyaddr
,
51
);
context
->
rsvd_bits_mask
[
0
][
2
]
=
exb_bit_rsvd
|
context
->
rsvd_bits_mask
[
0
][
2
]
=
exb_bit_rsvd
|
gbpages_bit_rsvd
|
rsvd_bits
(
maxphyaddr
,
51
);
nonleaf_bit8_rsvd
|
gbpages_bit_rsvd
|
rsvd_bits
(
maxphyaddr
,
51
);
context
->
rsvd_bits_mask
[
0
][
1
]
=
exb_bit_rsvd
|
context
->
rsvd_bits_mask
[
0
][
1
]
=
exb_bit_rsvd
|
rsvd_bits
(
maxphyaddr
,
51
);
rsvd_bits
(
maxphyaddr
,
51
);
context
->
rsvd_bits_mask
[
0
][
0
]
=
exb_bit_rsvd
|
context
->
rsvd_bits_mask
[
0
][
0
]
=
exb_bit_rsvd
|
...
@@ -4433,7 +4436,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
...
@@ -4433,7 +4436,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
* The very rare case: if the generation-number is round,
* The very rare case: if the generation-number is round,
* zap all shadow pages.
* zap all shadow pages.
*/
*/
if
(
unlikely
(
kvm_current_mmio_generation
(
kvm
)
>=
MMIO_MAX_GEN
))
{
if
(
unlikely
(
kvm_current_mmio_generation
(
kvm
)
==
0
))
{
printk_ratelimited
(
KERN_INFO
"kvm: zapping shadow pages for mmio generation wraparound
\n
"
);
printk_ratelimited
(
KERN_INFO
"kvm: zapping shadow pages for mmio generation wraparound
\n
"
);
kvm_mmu_invalidate_zap_all_pages
(
kvm
);
kvm_mmu_invalidate_zap_all_pages
(
kvm
);
}
}
...
...
arch/x86/kvm/mmu.h
浏览文件 @
a875dafc
...
@@ -56,6 +56,11 @@
...
@@ -56,6 +56,11 @@
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
static
inline
u64
rsvd_bits
(
int
s
,
int
e
)
{
return
((
1ULL
<<
(
e
-
s
+
1
))
-
1
)
<<
s
;
}
int
kvm_mmu_get_spte_hierarchy
(
struct
kvm_vcpu
*
vcpu
,
u64
addr
,
u64
sptes
[
4
]);
int
kvm_mmu_get_spte_hierarchy
(
struct
kvm_vcpu
*
vcpu
,
u64
addr
,
u64
sptes
[
4
]);
void
kvm_mmu_set_mmio_spte_mask
(
u64
mmio_mask
);
void
kvm_mmu_set_mmio_spte_mask
(
u64
mmio_mask
);
...
...
arch/x86/kvm/paging_tmpl.h
浏览文件 @
a875dafc
...
@@ -321,9 +321,22 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
...
@@ -321,9 +321,22 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
walker
->
pte_gpa
[
walker
->
level
-
1
]
=
pte_gpa
;
walker
->
pte_gpa
[
walker
->
level
-
1
]
=
pte_gpa
;
real_gfn
=
mmu
->
translate_gpa
(
vcpu
,
gfn_to_gpa
(
table_gfn
),
real_gfn
=
mmu
->
translate_gpa
(
vcpu
,
gfn_to_gpa
(
table_gfn
),
PFERR_USER_MASK
|
PFERR_WRITE_MASK
);
PFERR_USER_MASK
|
PFERR_WRITE_MASK
,
&
walker
->
fault
);
/*
* FIXME: This can happen if emulation (for of an INS/OUTS
* instruction) triggers a nested page fault. The exit
* qualification / exit info field will incorrectly have
* "guest page access" as the nested page fault's cause,
* instead of "guest page structure access". To fix this,
* the x86_exception struct should be augmented with enough
* information to fix the exit_qualification or exit_info_1
* fields.
*/
if
(
unlikely
(
real_gfn
==
UNMAPPED_GVA
))
if
(
unlikely
(
real_gfn
==
UNMAPPED_GVA
))
goto
error
;
return
0
;
real_gfn
=
gpa_to_gfn
(
real_gfn
);
real_gfn
=
gpa_to_gfn
(
real_gfn
);
host_addr
=
gfn_to_hva_prot
(
vcpu
->
kvm
,
real_gfn
,
host_addr
=
gfn_to_hva_prot
(
vcpu
->
kvm
,
real_gfn
,
...
@@ -364,7 +377,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
...
@@ -364,7 +377,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
if
(
PTTYPE
==
32
&&
walker
->
level
==
PT_DIRECTORY_LEVEL
&&
is_cpuid_PSE36
())
if
(
PTTYPE
==
32
&&
walker
->
level
==
PT_DIRECTORY_LEVEL
&&
is_cpuid_PSE36
())
gfn
+=
pse36_gfn_delta
(
pte
);
gfn
+=
pse36_gfn_delta
(
pte
);
real_gpa
=
mmu
->
translate_gpa
(
vcpu
,
gfn_to_gpa
(
gfn
),
access
);
real_gpa
=
mmu
->
translate_gpa
(
vcpu
,
gfn_to_gpa
(
gfn
),
access
,
&
walker
->
fault
);
if
(
real_gpa
==
UNMAPPED_GVA
)
if
(
real_gpa
==
UNMAPPED_GVA
)
return
0
;
return
0
;
...
...
arch/x86/kvm/svm.c
浏览文件 @
a875dafc
...
@@ -622,7 +622,7 @@ static int has_svm(void)
...
@@ -622,7 +622,7 @@ static int has_svm(void)
return
1
;
return
1
;
}
}
static
void
svm_hardware_disable
(
void
*
garbage
)
static
void
svm_hardware_disable
(
void
)
{
{
/* Make sure we clean up behind us */
/* Make sure we clean up behind us */
if
(
static_cpu_has
(
X86_FEATURE_TSCRATEMSR
))
if
(
static_cpu_has
(
X86_FEATURE_TSCRATEMSR
))
...
@@ -633,7 +633,7 @@ static void svm_hardware_disable(void *garbage)
...
@@ -633,7 +633,7 @@ static void svm_hardware_disable(void *garbage)
amd_pmu_disable_virt
();
amd_pmu_disable_virt
();
}
}
static
int
svm_hardware_enable
(
void
*
garbage
)
static
int
svm_hardware_enable
(
void
)
{
{
struct
svm_cpu_data
*
sd
;
struct
svm_cpu_data
*
sd
;
...
@@ -1257,7 +1257,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
...
@@ -1257,7 +1257,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm
->
asid_generation
=
0
;
svm
->
asid_generation
=
0
;
init_vmcb
(
svm
);
init_vmcb
(
svm
);
svm
->
vcpu
.
arch
.
apic_base
=
0xfee00000
|
MSR_IA32_APICBASE_ENABLE
;
svm
->
vcpu
.
arch
.
apic_base
=
APIC_DEFAULT_PHYS_BASE
|
MSR_IA32_APICBASE_ENABLE
;
if
(
kvm_vcpu_is_bsp
(
&
svm
->
vcpu
))
if
(
kvm_vcpu_is_bsp
(
&
svm
->
vcpu
))
svm
->
vcpu
.
arch
.
apic_base
|=
MSR_IA32_APICBASE_BSP
;
svm
->
vcpu
.
arch
.
apic_base
|=
MSR_IA32_APICBASE_BSP
;
...
@@ -1974,10 +1975,26 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
...
@@ -1974,10 +1975,26 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
{
{
struct
vcpu_svm
*
svm
=
to_svm
(
vcpu
);
struct
vcpu_svm
*
svm
=
to_svm
(
vcpu
);
svm
->
vmcb
->
control
.
exit_code
=
SVM_EXIT_NPF
;
if
(
svm
->
vmcb
->
control
.
exit_code
!=
SVM_EXIT_NPF
)
{
svm
->
vmcb
->
control
.
exit_code_hi
=
0
;
/*
svm
->
vmcb
->
control
.
exit_info_1
=
fault
->
error_code
;
* TODO: track the cause of the nested page fault, and
svm
->
vmcb
->
control
.
exit_info_2
=
fault
->
address
;
* correctly fill in the high bits of exit_info_1.
*/
svm
->
vmcb
->
control
.
exit_code
=
SVM_EXIT_NPF
;
svm
->
vmcb
->
control
.
exit_code_hi
=
0
;
svm
->
vmcb
->
control
.
exit_info_1
=
(
1ULL
<<
32
);
svm
->
vmcb
->
control
.
exit_info_2
=
fault
->
address
;
}
svm
->
vmcb
->
control
.
exit_info_1
&=
~
0xffffffffULL
;
svm
->
vmcb
->
control
.
exit_info_1
|=
fault
->
error_code
;
/*
* The present bit is always zero for page structure faults on real
* hardware.
*/
if
(
svm
->
vmcb
->
control
.
exit_info_1
&
(
2ULL
<<
32
))
svm
->
vmcb
->
control
.
exit_info_1
&=
~
1
;
nested_svm_vmexit
(
svm
);
nested_svm_vmexit
(
svm
);
}
}
...
@@ -3031,7 +3048,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
...
@@ -3031,7 +3048,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
return
0
;
return
0
;
}
}
u64
svm_read_l1_tsc
(
struct
kvm_vcpu
*
vcpu
,
u64
host_tsc
)
static
u64
svm_read_l1_tsc
(
struct
kvm_vcpu
*
vcpu
,
u64
host_tsc
)
{
{
struct
vmcb
*
vmcb
=
get_host_vmcb
(
to_svm
(
vcpu
));
struct
vmcb
*
vmcb
=
get_host_vmcb
(
to_svm
(
vcpu
));
return
vmcb
->
control
.
tsc_offset
+
return
vmcb
->
control
.
tsc_offset
+
...
...
arch/x86/kvm/trace.h
浏览文件 @
a875dafc
...
@@ -415,15 +415,14 @@ TRACE_EVENT(kvm_apic_ipi,
...
@@ -415,15 +415,14 @@ TRACE_EVENT(kvm_apic_ipi,
);
);
TRACE_EVENT
(
kvm_apic_accept_irq
,
TRACE_EVENT
(
kvm_apic_accept_irq
,
TP_PROTO
(
__u32
apicid
,
__u16
dm
,
__u8
tm
,
__u8
vec
,
bool
coalesced
),
TP_PROTO
(
__u32
apicid
,
__u16
dm
,
__u8
tm
,
__u8
vec
),
TP_ARGS
(
apicid
,
dm
,
tm
,
vec
,
coalesced
),
TP_ARGS
(
apicid
,
dm
,
tm
,
vec
),
TP_STRUCT__entry
(
TP_STRUCT__entry
(
__field
(
__u32
,
apicid
)
__field
(
__u32
,
apicid
)
__field
(
__u16
,
dm
)
__field
(
__u16
,
dm
)
__field
(
__u8
,
tm
)
__field
(
__u8
,
tm
)
__field
(
__u8
,
vec
)
__field
(
__u8
,
vec
)
__field
(
bool
,
coalesced
)
),
),
TP_fast_assign
(
TP_fast_assign
(
...
@@ -431,14 +430,12 @@ TRACE_EVENT(kvm_apic_accept_irq,
...
@@ -431,14 +430,12 @@ TRACE_EVENT(kvm_apic_accept_irq,
__entry
->
dm
=
dm
;
__entry
->
dm
=
dm
;
__entry
->
tm
=
tm
;
__entry
->
tm
=
tm
;
__entry
->
vec
=
vec
;
__entry
->
vec
=
vec
;
__entry
->
coalesced
=
coalesced
;
),
),
TP_printk
(
"apicid %x vec %u (%s|%s)
%s
"
,
TP_printk
(
"apicid %x vec %u (%s|%s)"
,
__entry
->
apicid
,
__entry
->
vec
,
__entry
->
apicid
,
__entry
->
vec
,
__print_symbolic
((
__entry
->
dm
>>
8
&
0x7
),
kvm_deliver_mode
),
__print_symbolic
((
__entry
->
dm
>>
8
&
0x7
),
kvm_deliver_mode
),
__entry
->
tm
?
"level"
:
"edge"
,
__entry
->
tm
?
"level"
:
"edge"
)
__entry
->
coalesced
?
" (coalesced)"
:
""
)
);
);
TRACE_EVENT
(
kvm_eoi
,
TRACE_EVENT
(
kvm_eoi
,
...
@@ -848,6 +845,8 @@ TRACE_EVENT(kvm_track_tsc,
...
@@ -848,6 +845,8 @@ TRACE_EVENT(kvm_track_tsc,
__print_symbolic
(
__entry
->
host_clock
,
host_clocks
))
__print_symbolic
(
__entry
->
host_clock
,
host_clocks
))
);
);
#endif
/* CONFIG_X86_64 */
TRACE_EVENT
(
kvm_ple_window
,
TRACE_EVENT
(
kvm_ple_window
,
TP_PROTO
(
bool
grow
,
unsigned
int
vcpu_id
,
int
new
,
int
old
),
TP_PROTO
(
bool
grow
,
unsigned
int
vcpu_id
,
int
new
,
int
old
),
TP_ARGS
(
grow
,
vcpu_id
,
new
,
old
),
TP_ARGS
(
grow
,
vcpu_id
,
new
,
old
),
...
@@ -878,8 +877,6 @@ TRACE_EVENT(kvm_ple_window,
...
@@ -878,8 +877,6 @@ TRACE_EVENT(kvm_ple_window,
#define trace_kvm_ple_window_shrink(vcpu_id, new, old) \
#define trace_kvm_ple_window_shrink(vcpu_id, new, old) \
trace_kvm_ple_window(false, vcpu_id, new, old)
trace_kvm_ple_window(false, vcpu_id, new, old)
#endif
/* CONFIG_X86_64 */
#endif
/* _TRACE_KVM_H */
#endif
/* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_PATH
...
...
arch/x86/kvm/vmx.c
浏览文件 @
a875dafc
...
@@ -397,6 +397,7 @@ struct nested_vmx {
...
@@ -397,6 +397,7 @@ struct nested_vmx {
* we must keep them pinned while L2 runs.
* we must keep them pinned while L2 runs.
*/
*/
struct
page
*
apic_access_page
;
struct
page
*
apic_access_page
;
struct
page
*
virtual_apic_page
;
u64
msr_ia32_feature_control
;
u64
msr_ia32_feature_control
;
struct
hrtimer
preemption_timer
;
struct
hrtimer
preemption_timer
;
...
@@ -555,6 +556,7 @@ static int max_shadow_read_only_fields =
...
@@ -555,6 +556,7 @@ static int max_shadow_read_only_fields =
ARRAY_SIZE
(
shadow_read_only_fields
);
ARRAY_SIZE
(
shadow_read_only_fields
);
static
unsigned
long
shadow_read_write_fields
[]
=
{
static
unsigned
long
shadow_read_write_fields
[]
=
{
TPR_THRESHOLD
,
GUEST_RIP
,
GUEST_RIP
,
GUEST_RSP
,
GUEST_RSP
,
GUEST_CR0
,
GUEST_CR0
,
...
@@ -765,6 +767,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var);
...
@@ -765,6 +767,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var);
static
void
vmx_sync_pir_to_irr_dummy
(
struct
kvm_vcpu
*
vcpu
);
static
void
vmx_sync_pir_to_irr_dummy
(
struct
kvm_vcpu
*
vcpu
);
static
void
copy_vmcs12_to_shadow
(
struct
vcpu_vmx
*
vmx
);
static
void
copy_vmcs12_to_shadow
(
struct
vcpu_vmx
*
vmx
);
static
void
copy_shadow_to_vmcs12
(
struct
vcpu_vmx
*
vmx
);
static
void
copy_shadow_to_vmcs12
(
struct
vcpu_vmx
*
vmx
);
static
int
alloc_identity_pagetable
(
struct
kvm
*
kvm
);
static
DEFINE_PER_CPU
(
struct
vmcs
*
,
vmxarea
);
static
DEFINE_PER_CPU
(
struct
vmcs
*
,
vmxarea
);
static
DEFINE_PER_CPU
(
struct
vmcs
*
,
current_vmcs
);
static
DEFINE_PER_CPU
(
struct
vmcs
*
,
current_vmcs
);
...
@@ -2157,7 +2160,7 @@ static u64 guest_read_tsc(void)
...
@@ -2157,7 +2160,7 @@ static u64 guest_read_tsc(void)
* Like guest_read_tsc, but always returns L1's notion of the timestamp
* Like guest_read_tsc, but always returns L1's notion of the timestamp
* counter, even if a nested guest (L2) is currently running.
* counter, even if a nested guest (L2) is currently running.
*/
*/
u64
vmx_read_l1_tsc
(
struct
kvm_vcpu
*
vcpu
,
u64
host_tsc
)
static
u64
vmx_read_l1_tsc
(
struct
kvm_vcpu
*
vcpu
,
u64
host_tsc
)
{
{
u64
tsc_offset
;
u64
tsc_offset
;
...
@@ -2352,7 +2355,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
...
@@ -2352,7 +2355,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
CPU_BASED_MOV_DR_EXITING
|
CPU_BASED_UNCOND_IO_EXITING
|
CPU_BASED_MOV_DR_EXITING
|
CPU_BASED_UNCOND_IO_EXITING
|
CPU_BASED_USE_IO_BITMAPS
|
CPU_BASED_MONITOR_EXITING
|
CPU_BASED_USE_IO_BITMAPS
|
CPU_BASED_MONITOR_EXITING
|
CPU_BASED_RDPMC_EXITING
|
CPU_BASED_RDTSC_EXITING
|
CPU_BASED_RDPMC_EXITING
|
CPU_BASED_RDTSC_EXITING
|
CPU_BASED_PAUSE_EXITING
|
CPU_BASED_PAUSE_EXITING
|
CPU_BASED_TPR_SHADOW
|
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
/*
/*
* We can allow some features even when not supported by the
* We can allow some features even when not supported by the
...
@@ -2726,7 +2729,7 @@ static void kvm_cpu_vmxon(u64 addr)
...
@@ -2726,7 +2729,7 @@ static void kvm_cpu_vmxon(u64 addr)
:
"memory"
,
"cc"
);
:
"memory"
,
"cc"
);
}
}
static
int
hardware_enable
(
void
*
garbage
)
static
int
hardware_enable
(
void
)
{
{
int
cpu
=
raw_smp_processor_id
();
int
cpu
=
raw_smp_processor_id
();
u64
phys_addr
=
__pa
(
per_cpu
(
vmxarea
,
cpu
));
u64
phys_addr
=
__pa
(
per_cpu
(
vmxarea
,
cpu
));
...
@@ -2790,7 +2793,7 @@ static void kvm_cpu_vmxoff(void)
...
@@ -2790,7 +2793,7 @@ static void kvm_cpu_vmxoff(void)
asm
volatile
(
__ex
(
ASM_VMX_VMXOFF
)
:
:
:
"cc"
);
asm
volatile
(
__ex
(
ASM_VMX_VMXOFF
)
:
:
:
"cc"
);
}
}
static
void
hardware_disable
(
void
*
garbage
)
static
void
hardware_disable
(
void
)
{
{
if
(
vmm_exclusive
)
{
if
(
vmm_exclusive
)
{
vmclear_local_loaded_vmcss
();
vmclear_local_loaded_vmcss
();
...
@@ -3960,21 +3963,25 @@ static int init_rmode_tss(struct kvm *kvm)
...
@@ -3960,21 +3963,25 @@ static int init_rmode_tss(struct kvm *kvm)
static
int
init_rmode_identity_map
(
struct
kvm
*
kvm
)
static
int
init_rmode_identity_map
(
struct
kvm
*
kvm
)
{
{
int
i
,
idx
,
r
,
ret
;
int
i
,
idx
,
r
=
0
;
pfn_t
identity_map_pfn
;
pfn_t
identity_map_pfn
;
u32
tmp
;
u32
tmp
;
if
(
!
enable_ept
)
if
(
!
enable_ept
)
return
1
;
if
(
unlikely
(
!
kvm
->
arch
.
ept_identity_pagetable
))
{
printk
(
KERN_ERR
"EPT: identity-mapping pagetable "
"haven't been allocated!
\n
"
);
return
0
;
return
0
;
}
/* Protect kvm->arch.ept_identity_pagetable_done. */
mutex_lock
(
&
kvm
->
slots_lock
);
if
(
likely
(
kvm
->
arch
.
ept_identity_pagetable_done
))
if
(
likely
(
kvm
->
arch
.
ept_identity_pagetable_done
))
return
1
;
goto
out2
;
ret
=
0
;
identity_map_pfn
=
kvm
->
arch
.
ept_identity_map_addr
>>
PAGE_SHIFT
;
identity_map_pfn
=
kvm
->
arch
.
ept_identity_map_addr
>>
PAGE_SHIFT
;
r
=
alloc_identity_pagetable
(
kvm
);
if
(
r
<
0
)
goto
out2
;
idx
=
srcu_read_lock
(
&
kvm
->
srcu
);
idx
=
srcu_read_lock
(
&
kvm
->
srcu
);
r
=
kvm_clear_guest_page
(
kvm
,
identity_map_pfn
,
0
,
PAGE_SIZE
);
r
=
kvm_clear_guest_page
(
kvm
,
identity_map_pfn
,
0
,
PAGE_SIZE
);
if
(
r
<
0
)
if
(
r
<
0
)
...
@@ -3989,10 +3996,13 @@ static int init_rmode_identity_map(struct kvm *kvm)
...
@@ -3989,10 +3996,13 @@ static int init_rmode_identity_map(struct kvm *kvm)
goto
out
;
goto
out
;
}
}
kvm
->
arch
.
ept_identity_pagetable_done
=
true
;
kvm
->
arch
.
ept_identity_pagetable_done
=
true
;
ret
=
1
;
out:
out:
srcu_read_unlock
(
&
kvm
->
srcu
,
idx
);
srcu_read_unlock
(
&
kvm
->
srcu
,
idx
);
return
ret
;
out2:
mutex_unlock
(
&
kvm
->
slots_lock
);
return
r
;
}
}
static
void
seg_setup
(
int
seg
)
static
void
seg_setup
(
int
seg
)
...
@@ -4021,13 +4031,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
...
@@ -4021,13 +4031,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
goto
out
;
goto
out
;
kvm_userspace_mem
.
slot
=
APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
;
kvm_userspace_mem
.
slot
=
APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
;
kvm_userspace_mem
.
flags
=
0
;
kvm_userspace_mem
.
flags
=
0
;
kvm_userspace_mem
.
guest_phys_addr
=
0xfee00000ULL
;
kvm_userspace_mem
.
guest_phys_addr
=
APIC_DEFAULT_PHYS_BASE
;
kvm_userspace_mem
.
memory_size
=
PAGE_SIZE
;
kvm_userspace_mem
.
memory_size
=
PAGE_SIZE
;
r
=
__kvm_set_memory_region
(
kvm
,
&
kvm_userspace_mem
);
r
=
__kvm_set_memory_region
(
kvm
,
&
kvm_userspace_mem
);
if
(
r
)
if
(
r
)
goto
out
;
goto
out
;
page
=
gfn_to_page
(
kvm
,
0xfee00
);
page
=
gfn_to_page
(
kvm
,
APIC_DEFAULT_PHYS_BASE
>>
PAGE_SHIFT
);
if
(
is_error_page
(
page
))
{
if
(
is_error_page
(
page
))
{
r
=
-
EFAULT
;
r
=
-
EFAULT
;
goto
out
;
goto
out
;
...
@@ -4041,31 +4051,20 @@ static int alloc_apic_access_page(struct kvm *kvm)
...
@@ -4041,31 +4051,20 @@ static int alloc_apic_access_page(struct kvm *kvm)
static
int
alloc_identity_pagetable
(
struct
kvm
*
kvm
)
static
int
alloc_identity_pagetable
(
struct
kvm
*
kvm
)
{
{
struct
page
*
page
;
/* Called with kvm->slots_lock held. */
struct
kvm_userspace_memory_region
kvm_userspace_mem
;
struct
kvm_userspace_memory_region
kvm_userspace_mem
;
int
r
=
0
;
int
r
=
0
;
mutex_lock
(
&
kvm
->
slots_lock
);
BUG_ON
(
kvm
->
arch
.
ept_identity_pagetable_done
);
if
(
kvm
->
arch
.
ept_identity_pagetable
)
goto
out
;
kvm_userspace_mem
.
slot
=
IDENTITY_PAGETABLE_PRIVATE_MEMSLOT
;
kvm_userspace_mem
.
slot
=
IDENTITY_PAGETABLE_PRIVATE_MEMSLOT
;
kvm_userspace_mem
.
flags
=
0
;
kvm_userspace_mem
.
flags
=
0
;
kvm_userspace_mem
.
guest_phys_addr
=
kvm_userspace_mem
.
guest_phys_addr
=
kvm
->
arch
.
ept_identity_map_addr
;
kvm
->
arch
.
ept_identity_map_addr
;
kvm_userspace_mem
.
memory_size
=
PAGE_SIZE
;
kvm_userspace_mem
.
memory_size
=
PAGE_SIZE
;
r
=
__kvm_set_memory_region
(
kvm
,
&
kvm_userspace_mem
);
r
=
__kvm_set_memory_region
(
kvm
,
&
kvm_userspace_mem
);
if
(
r
)
goto
out
;
page
=
gfn_to_page
(
kvm
,
kvm
->
arch
.
ept_identity_map_addr
>>
PAGE_SHIFT
);
if
(
is_error_page
(
page
))
{
r
=
-
EFAULT
;
goto
out
;
}
kvm
->
arch
.
ept_identity_pagetable
=
page
;
out:
mutex_unlock
(
&
kvm
->
slots_lock
);
return
r
;
return
r
;
}
}
...
@@ -4500,7 +4499,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
...
@@ -4500,7 +4499,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx
->
vcpu
.
arch
.
regs
[
VCPU_REGS_RDX
]
=
get_rdx_init_val
();
vmx
->
vcpu
.
arch
.
regs
[
VCPU_REGS_RDX
]
=
get_rdx_init_val
();
kvm_set_cr8
(
&
vmx
->
vcpu
,
0
);
kvm_set_cr8
(
&
vmx
->
vcpu
,
0
);
apic_base_msr
.
data
=
0xfee00000
|
MSR_IA32_APICBASE_ENABLE
;
apic_base_msr
.
data
=
APIC_DEFAULT_PHYS_BASE
|
MSR_IA32_APICBASE_ENABLE
;
if
(
kvm_vcpu_is_bsp
(
&
vmx
->
vcpu
))
if
(
kvm_vcpu_is_bsp
(
&
vmx
->
vcpu
))
apic_base_msr
.
data
|=
MSR_IA32_APICBASE_BSP
;
apic_base_msr
.
data
|=
MSR_IA32_APICBASE_BSP
;
apic_base_msr
.
host_initiated
=
true
;
apic_base_msr
.
host_initiated
=
true
;
...
@@ -6244,7 +6243,11 @@ static void free_nested(struct vcpu_vmx *vmx)
...
@@ -6244,7 +6243,11 @@ static void free_nested(struct vcpu_vmx *vmx)
/* Unpin physical memory we referred to in current vmcs02 */
/* Unpin physical memory we referred to in current vmcs02 */
if
(
vmx
->
nested
.
apic_access_page
)
{
if
(
vmx
->
nested
.
apic_access_page
)
{
nested_release_page
(
vmx
->
nested
.
apic_access_page
);
nested_release_page
(
vmx
->
nested
.
apic_access_page
);
vmx
->
nested
.
apic_access_page
=
0
;
vmx
->
nested
.
apic_access_page
=
NULL
;
}
if
(
vmx
->
nested
.
virtual_apic_page
)
{
nested_release_page
(
vmx
->
nested
.
virtual_apic_page
);
vmx
->
nested
.
virtual_apic_page
=
NULL
;
}
}
nested_free_all_saved_vmcss
(
vmx
);
nested_free_all_saved_vmcss
(
vmx
);
...
@@ -7034,7 +7037,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
...
@@ -7034,7 +7037,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case
EXIT_REASON_MCE_DURING_VMENTRY
:
case
EXIT_REASON_MCE_DURING_VMENTRY
:
return
0
;
return
0
;
case
EXIT_REASON_TPR_BELOW_THRESHOLD
:
case
EXIT_REASON_TPR_BELOW_THRESHOLD
:
return
1
;
return
nested_cpu_has
(
vmcs12
,
CPU_BASED_TPR_SHADOW
)
;
case
EXIT_REASON_APIC_ACCESS
:
case
EXIT_REASON_APIC_ACCESS
:
return
nested_cpu_has2
(
vmcs12
,
return
nested_cpu_has2
(
vmcs12
,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
);
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
);
...
@@ -7155,6 +7158,12 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
...
@@ -7155,6 +7158,12 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
static
void
update_cr8_intercept
(
struct
kvm_vcpu
*
vcpu
,
int
tpr
,
int
irr
)
static
void
update_cr8_intercept
(
struct
kvm_vcpu
*
vcpu
,
int
tpr
,
int
irr
)
{
{
struct
vmcs12
*
vmcs12
=
get_vmcs12
(
vcpu
);
if
(
is_guest_mode
(
vcpu
)
&&
nested_cpu_has
(
vmcs12
,
CPU_BASED_TPR_SHADOW
))
return
;
if
(
irr
==
-
1
||
tpr
<
irr
)
{
if
(
irr
==
-
1
||
tpr
<
irr
)
{
vmcs_write32
(
TPR_THRESHOLD
,
0
);
vmcs_write32
(
TPR_THRESHOLD
,
0
);
return
;
return
;
...
@@ -7745,10 +7754,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
...
@@ -7745,10 +7754,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if
(
!
kvm
->
arch
.
ept_identity_map_addr
)
if
(
!
kvm
->
arch
.
ept_identity_map_addr
)
kvm
->
arch
.
ept_identity_map_addr
=
kvm
->
arch
.
ept_identity_map_addr
=
VMX_EPT_IDENTITY_PAGETABLE_ADDR
;
VMX_EPT_IDENTITY_PAGETABLE_ADDR
;
err
=
-
ENOMEM
;
err
=
init_rmode_identity_map
(
kvm
);
if
(
alloc_identity_pagetable
(
kvm
)
!=
0
)
if
(
err
)
goto
free_vmcs
;
if
(
!
init_rmode_identity_map
(
kvm
))
goto
free_vmcs
;
goto
free_vmcs
;
}
}
...
@@ -7927,6 +7934,55 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
...
@@ -7927,6 +7934,55 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
kvm_inject_page_fault
(
vcpu
,
fault
);
kvm_inject_page_fault
(
vcpu
,
fault
);
}
}
static
bool
nested_get_vmcs12_pages
(
struct
kvm_vcpu
*
vcpu
,
struct
vmcs12
*
vmcs12
)
{
struct
vcpu_vmx
*
vmx
=
to_vmx
(
vcpu
);
if
(
nested_cpu_has2
(
vmcs12
,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
))
{
/* TODO: Also verify bits beyond physical address width are 0 */
if
(
!
PAGE_ALIGNED
(
vmcs12
->
apic_access_addr
))
return
false
;
/*
* Translate L1 physical address to host physical
* address for vmcs02. Keep the page pinned, so this
* physical address remains valid. We keep a reference
* to it so we can release it later.
*/
if
(
vmx
->
nested
.
apic_access_page
)
/* shouldn't happen */
nested_release_page
(
vmx
->
nested
.
apic_access_page
);
vmx
->
nested
.
apic_access_page
=
nested_get_page
(
vcpu
,
vmcs12
->
apic_access_addr
);
}
if
(
nested_cpu_has
(
vmcs12
,
CPU_BASED_TPR_SHADOW
))
{
/* TODO: Also verify bits beyond physical address width are 0 */
if
(
!
PAGE_ALIGNED
(
vmcs12
->
virtual_apic_page_addr
))
return
false
;
if
(
vmx
->
nested
.
virtual_apic_page
)
/* shouldn't happen */
nested_release_page
(
vmx
->
nested
.
virtual_apic_page
);
vmx
->
nested
.
virtual_apic_page
=
nested_get_page
(
vcpu
,
vmcs12
->
virtual_apic_page_addr
);
/*
* Failing the vm entry is _not_ what the processor does
* but it's basically the only possibility we have.
* We could still enter the guest if CR8 load exits are
* enabled, CR8 store exits are enabled, and virtualize APIC
* access is disabled; in this case the processor would never
* use the TPR shadow and we could simply clear the bit from
* the execution control. But such a configuration is useless,
* so let's keep the code simple.
*/
if
(
!
vmx
->
nested
.
virtual_apic_page
)
return
false
;
}
return
true
;
}
static
void
vmx_start_preemption_timer
(
struct
kvm_vcpu
*
vcpu
)
static
void
vmx_start_preemption_timer
(
struct
kvm_vcpu
*
vcpu
)
{
{
u64
preemption_timeout
=
get_vmcs12
(
vcpu
)
->
vmx_preemption_timer_value
;
u64
preemption_timeout
=
get_vmcs12
(
vcpu
)
->
vmx_preemption_timer_value
;
...
@@ -8072,16 +8128,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
...
@@ -8072,16 +8128,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exec_control
|=
vmcs12
->
secondary_vm_exec_control
;
exec_control
|=
vmcs12
->
secondary_vm_exec_control
;
if
(
exec_control
&
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)
{
if
(
exec_control
&
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)
{
/*
* Translate L1 physical address to host physical
* address for vmcs02. Keep the page pinned, so this
* physical address remains valid. We keep a reference
* to it so we can release it later.
*/
if
(
vmx
->
nested
.
apic_access_page
)
/* shouldn't happen */
nested_release_page
(
vmx
->
nested
.
apic_access_page
);
vmx
->
nested
.
apic_access_page
=
nested_get_page
(
vcpu
,
vmcs12
->
apic_access_addr
);
/*
/*
* If translation failed, no matter: This feature asks
* If translation failed, no matter: This feature asks
* to exit when accessing the given address, and if it
* to exit when accessing the given address, and if it
...
@@ -8127,6 +8173,13 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
...
@@ -8127,6 +8173,13 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exec_control
&=
~
CPU_BASED_VIRTUAL_NMI_PENDING
;
exec_control
&=
~
CPU_BASED_VIRTUAL_NMI_PENDING
;
exec_control
&=
~
CPU_BASED_TPR_SHADOW
;
exec_control
&=
~
CPU_BASED_TPR_SHADOW
;
exec_control
|=
vmcs12
->
cpu_based_vm_exec_control
;
exec_control
|=
vmcs12
->
cpu_based_vm_exec_control
;
if
(
exec_control
&
CPU_BASED_TPR_SHADOW
)
{
vmcs_write64
(
VIRTUAL_APIC_PAGE_ADDR
,
page_to_phys
(
vmx
->
nested
.
virtual_apic_page
));
vmcs_write32
(
TPR_THRESHOLD
,
vmcs12
->
tpr_threshold
);
}
/*
/*
* Merging of IO and MSR bitmaps not currently supported.
* Merging of IO and MSR bitmaps not currently supported.
* Rather, exit every time.
* Rather, exit every time.
...
@@ -8288,8 +8341,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
...
@@ -8288,8 +8341,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return
1
;
return
1
;
}
}
if
(
nested_cpu_has2
(
vmcs12
,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)
&&
if
(
!
nested_get_vmcs12_pages
(
vcpu
,
vmcs12
))
{
!
PAGE_ALIGNED
(
vmcs12
->
apic_access_addr
))
{
/*TODO: Also verify bits beyond physical address width are 0*/
/*TODO: Also verify bits beyond physical address width are 0*/
nested_vmx_failValid
(
vcpu
,
VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
nested_vmx_failValid
(
vcpu
,
VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
return
1
;
return
1
;
...
@@ -8893,7 +8945,11 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
...
@@ -8893,7 +8945,11 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
/* Unpin physical memory we referred to in vmcs02 */
/* Unpin physical memory we referred to in vmcs02 */
if
(
vmx
->
nested
.
apic_access_page
)
{
if
(
vmx
->
nested
.
apic_access_page
)
{
nested_release_page
(
vmx
->
nested
.
apic_access_page
);
nested_release_page
(
vmx
->
nested
.
apic_access_page
);
vmx
->
nested
.
apic_access_page
=
0
;
vmx
->
nested
.
apic_access_page
=
NULL
;
}
if
(
vmx
->
nested
.
virtual_apic_page
)
{
nested_release_page
(
vmx
->
nested
.
virtual_apic_page
);
vmx
->
nested
.
virtual_apic_page
=
NULL
;
}
}
/*
/*
...
@@ -8949,7 +9005,7 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
...
@@ -8949,7 +9005,7 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
return
X86EMUL_CONTINUE
;
return
X86EMUL_CONTINUE
;
}
}
void
vmx_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
static
void
vmx_sched_in
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
{
if
(
ple_gap
)
if
(
ple_gap
)
shrink_ple_window
(
vcpu
);
shrink_ple_window
(
vcpu
);
...
...
arch/x86/kvm/x86.c
浏览文件 @
a875dafc
...
@@ -246,7 +246,7 @@ void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
...
@@ -246,7 +246,7 @@ void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
}
}
EXPORT_SYMBOL_GPL
(
kvm_set_shared_msr
);
EXPORT_SYMBOL_GPL
(
kvm_set_shared_msr
);
static
void
drop_user_return_notifiers
(
void
*
ignore
)
static
void
drop_user_return_notifiers
(
void
)
{
{
unsigned
int
cpu
=
smp_processor_id
();
unsigned
int
cpu
=
smp_processor_id
();
struct
kvm_shared_msrs
*
smsr
=
per_cpu_ptr
(
shared_msrs
,
cpu
);
struct
kvm_shared_msrs
*
smsr
=
per_cpu_ptr
(
shared_msrs
,
cpu
);
...
@@ -408,12 +408,14 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
...
@@ -408,12 +408,14 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
}
}
EXPORT_SYMBOL_GPL
(
kvm_inject_page_fault
);
EXPORT_SYMBOL_GPL
(
kvm_inject_page_fault
);
void
kvm_propagate_fault
(
struct
kvm_vcpu
*
vcpu
,
struct
x86_exception
*
fault
)
static
bool
kvm_propagate_fault
(
struct
kvm_vcpu
*
vcpu
,
struct
x86_exception
*
fault
)
{
{
if
(
mmu_is_nested
(
vcpu
)
&&
!
fault
->
nested_page_fault
)
if
(
mmu_is_nested
(
vcpu
)
&&
!
fault
->
nested_page_fault
)
vcpu
->
arch
.
nested_mmu
.
inject_page_fault
(
vcpu
,
fault
);
vcpu
->
arch
.
nested_mmu
.
inject_page_fault
(
vcpu
,
fault
);
else
else
vcpu
->
arch
.
mmu
.
inject_page_fault
(
vcpu
,
fault
);
vcpu
->
arch
.
mmu
.
inject_page_fault
(
vcpu
,
fault
);
return
fault
->
nested_page_fault
;
}
}
void
kvm_inject_nmi
(
struct
kvm_vcpu
*
vcpu
)
void
kvm_inject_nmi
(
struct
kvm_vcpu
*
vcpu
)
...
@@ -457,11 +459,12 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
...
@@ -457,11 +459,12 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t
ngfn
,
void
*
data
,
int
offset
,
int
len
,
gfn_t
ngfn
,
void
*
data
,
int
offset
,
int
len
,
u32
access
)
u32
access
)
{
{
struct
x86_exception
exception
;
gfn_t
real_gfn
;
gfn_t
real_gfn
;
gpa_t
ngpa
;
gpa_t
ngpa
;
ngpa
=
gfn_to_gpa
(
ngfn
);
ngpa
=
gfn_to_gpa
(
ngfn
);
real_gfn
=
mmu
->
translate_gpa
(
vcpu
,
ngpa
,
access
);
real_gfn
=
mmu
->
translate_gpa
(
vcpu
,
ngpa
,
access
,
&
exception
);
if
(
real_gfn
==
UNMAPPED_GVA
)
if
(
real_gfn
==
UNMAPPED_GVA
)
return
-
EFAULT
;
return
-
EFAULT
;
...
@@ -1518,7 +1521,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
...
@@ -1518,7 +1521,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
pvclock_update_vm_gtod_copy
(
kvm
);
pvclock_update_vm_gtod_copy
(
kvm
);
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
set_bit
(
KVM_REQ_CLOCK_UPDATE
,
&
vcpu
->
requests
);
kvm_make_request
(
KVM_REQ_CLOCK_UPDATE
,
vcpu
);
/* guest entries allowed */
/* guest entries allowed */
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
...
@@ -1661,7 +1664,7 @@ static void kvmclock_update_fn(struct work_struct *work)
...
@@ -1661,7 +1664,7 @@ static void kvmclock_update_fn(struct work_struct *work)
struct
kvm_vcpu
*
vcpu
;
struct
kvm_vcpu
*
vcpu
;
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
set_bit
(
KVM_REQ_CLOCK_UPDATE
,
&
vcpu
->
requests
);
kvm_make_request
(
KVM_REQ_CLOCK_UPDATE
,
vcpu
);
kvm_vcpu_kick
(
vcpu
);
kvm_vcpu_kick
(
vcpu
);
}
}
}
}
...
@@ -1670,7 +1673,7 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
...
@@ -1670,7 +1673,7 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{
{
struct
kvm
*
kvm
=
v
->
kvm
;
struct
kvm
*
kvm
=
v
->
kvm
;
set_bit
(
KVM_REQ_CLOCK_UPDATE
,
&
v
->
requests
);
kvm_make_request
(
KVM_REQ_CLOCK_UPDATE
,
v
);
schedule_delayed_work
(
&
kvm
->
arch
.
kvmclock_update_work
,
schedule_delayed_work
(
&
kvm
->
arch
.
kvmclock_update_work
,
KVMCLOCK_UPDATE_DELAY
);
KVMCLOCK_UPDATE_DELAY
);
}
}
...
@@ -1726,7 +1729,7 @@ static bool valid_mtrr_type(unsigned t)
...
@@ -1726,7 +1729,7 @@ static bool valid_mtrr_type(unsigned t)
static
bool
mtrr_valid
(
struct
kvm_vcpu
*
vcpu
,
u32
msr
,
u64
data
)
static
bool
mtrr_valid
(
struct
kvm_vcpu
*
vcpu
,
u32
msr
,
u64
data
)
{
{
int
i
;
int
i
;
u64
mask
=
0
;
u64
mask
;
if
(
!
msr_mtrr_valid
(
msr
))
if
(
!
msr_mtrr_valid
(
msr
))
return
false
;
return
false
;
...
@@ -1750,8 +1753,7 @@ static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
...
@@ -1750,8 +1753,7 @@ static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
/* variable MTRRs */
/* variable MTRRs */
WARN_ON
(
!
(
msr
>=
0x200
&&
msr
<
0x200
+
2
*
KVM_NR_VAR_MTRR
));
WARN_ON
(
!
(
msr
>=
0x200
&&
msr
<
0x200
+
2
*
KVM_NR_VAR_MTRR
));
for
(
i
=
63
;
i
>
boot_cpu_data
.
x86_phys_bits
;
i
--
)
mask
=
(
~
0ULL
)
<<
cpuid_maxphyaddr
(
vcpu
);
mask
|=
(
1ULL
<<
i
);
if
((
msr
&
1
)
==
0
)
{
if
((
msr
&
1
)
==
0
)
{
/* MTRR base */
/* MTRR base */
if
(
!
valid_mtrr_type
(
data
&
0xff
))
if
(
!
valid_mtrr_type
(
data
&
0xff
))
...
@@ -2847,7 +2849,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
...
@@ -2847,7 +2849,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if
(
unlikely
(
vcpu
->
arch
.
tsc_offset_adjustment
))
{
if
(
unlikely
(
vcpu
->
arch
.
tsc_offset_adjustment
))
{
adjust_tsc_offset_host
(
vcpu
,
vcpu
->
arch
.
tsc_offset_adjustment
);
adjust_tsc_offset_host
(
vcpu
,
vcpu
->
arch
.
tsc_offset_adjustment
);
vcpu
->
arch
.
tsc_offset_adjustment
=
0
;
vcpu
->
arch
.
tsc_offset_adjustment
=
0
;
set_bit
(
KVM_REQ_CLOCK_UPDATE
,
&
vcpu
->
requests
);
kvm_make_request
(
KVM_REQ_CLOCK_UPDATE
,
vcpu
);
}
}
if
(
unlikely
(
vcpu
->
cpu
!=
cpu
)
||
check_tsc_unstable
())
{
if
(
unlikely
(
vcpu
->
cpu
!=
cpu
)
||
check_tsc_unstable
())
{
...
@@ -4064,16 +4066,16 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
...
@@ -4064,16 +4066,16 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
kvm_x86_ops
->
get_segment
(
vcpu
,
var
,
seg
);
kvm_x86_ops
->
get_segment
(
vcpu
,
var
,
seg
);
}
}
gpa_t
translate_nested_gpa
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
access
)
gpa_t
translate_nested_gpa
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
access
,
struct
x86_exception
*
exception
)
{
{
gpa_t
t_gpa
;
gpa_t
t_gpa
;
struct
x86_exception
exception
;
BUG_ON
(
!
mmu_is_nested
(
vcpu
));
BUG_ON
(
!
mmu_is_nested
(
vcpu
));
/* NPT walks are always user-walks */
/* NPT walks are always user-walks */
access
|=
PFERR_USER_MASK
;
access
|=
PFERR_USER_MASK
;
t_gpa
=
vcpu
->
arch
.
mmu
.
gva_to_gpa
(
vcpu
,
gpa
,
access
,
&
exception
);
t_gpa
=
vcpu
->
arch
.
mmu
.
gva_to_gpa
(
vcpu
,
gpa
,
access
,
exception
);
return
t_gpa
;
return
t_gpa
;
}
}
...
@@ -4930,16 +4932,18 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
...
@@ -4930,16 +4932,18 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
}
}
}
}
static
void
inject_emulated_exception
(
struct
kvm_vcpu
*
vcpu
)
static
bool
inject_emulated_exception
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
x86_emulate_ctxt
*
ctxt
=
&
vcpu
->
arch
.
emulate_ctxt
;
struct
x86_emulate_ctxt
*
ctxt
=
&
vcpu
->
arch
.
emulate_ctxt
;
if
(
ctxt
->
exception
.
vector
==
PF_VECTOR
)
if
(
ctxt
->
exception
.
vector
==
PF_VECTOR
)
kvm_propagate_fault
(
vcpu
,
&
ctxt
->
exception
);
return
kvm_propagate_fault
(
vcpu
,
&
ctxt
->
exception
);
else
if
(
ctxt
->
exception
.
error_code_valid
)
if
(
ctxt
->
exception
.
error_code_valid
)
kvm_queue_exception_e
(
vcpu
,
ctxt
->
exception
.
vector
,
kvm_queue_exception_e
(
vcpu
,
ctxt
->
exception
.
vector
,
ctxt
->
exception
.
error_code
);
ctxt
->
exception
.
error_code
);
else
else
kvm_queue_exception
(
vcpu
,
ctxt
->
exception
.
vector
);
kvm_queue_exception
(
vcpu
,
ctxt
->
exception
.
vector
);
return
false
;
}
}
static
void
init_emulate_ctxt
(
struct
kvm_vcpu
*
vcpu
)
static
void
init_emulate_ctxt
(
struct
kvm_vcpu
*
vcpu
)
...
@@ -5301,8 +5305,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
...
@@ -5301,8 +5305,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
}
}
if
(
ctxt
->
have_exception
)
{
if
(
ctxt
->
have_exception
)
{
inject_emulated_exception
(
vcpu
);
r
=
EMULATE_DONE
;
r
=
EMULATE_DONE
;
if
(
inject_emulated_exception
(
vcpu
))
return
r
;
}
else
if
(
vcpu
->
arch
.
pio
.
count
)
{
}
else
if
(
vcpu
->
arch
.
pio
.
count
)
{
if
(
!
vcpu
->
arch
.
pio
.
in
)
{
if
(
!
vcpu
->
arch
.
pio
.
in
)
{
/* FIXME: return into emulator if single-stepping. */
/* FIXME: return into emulator if single-stepping. */
...
@@ -5570,7 +5575,7 @@ static void kvm_set_mmio_spte_mask(void)
...
@@ -5570,7 +5575,7 @@ static void kvm_set_mmio_spte_mask(void)
* entry to generate page fault with PFER.RSV = 1.
* entry to generate page fault with PFER.RSV = 1.
*/
*/
/* Mask the reserved physical address bits. */
/* Mask the reserved physical address bits. */
mask
=
((
1ull
<<
(
51
-
maxphyaddr
+
1
))
-
1
)
<<
maxphyaddr
;
mask
=
rsvd_bits
(
maxphyaddr
,
51
)
;
/* Bit 62 is always reserved for 32bit host. */
/* Bit 62 is always reserved for 32bit host. */
mask
|=
0x3ull
<<
62
;
mask
|=
0x3ull
<<
62
;
...
@@ -5601,7 +5606,7 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
...
@@ -5601,7 +5606,7 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
spin_lock
(
&
kvm_lock
);
spin_lock
(
&
kvm_lock
);
list_for_each_entry
(
kvm
,
&
vm_list
,
vm_list
)
list_for_each_entry
(
kvm
,
&
vm_list
,
vm_list
)
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
set_bit
(
KVM_REQ_MASTERCLOCK_UPDATE
,
&
vcpu
->
requests
);
kvm_make_request
(
KVM_REQ_MASTERCLOCK_UPDATE
,
vcpu
);
atomic_set
(
&
kvm_guest_has_master_clock
,
0
);
atomic_set
(
&
kvm_guest_has_master_clock
,
0
);
spin_unlock
(
&
kvm_lock
);
spin_unlock
(
&
kvm_lock
);
}
}
...
@@ -6959,7 +6964,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
...
@@ -6959,7 +6964,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
kvm_rip_write
(
vcpu
,
0
);
kvm_rip_write
(
vcpu
,
0
);
}
}
int
kvm_arch_hardware_enable
(
void
*
garbage
)
int
kvm_arch_hardware_enable
(
void
)
{
{
struct
kvm
*
kvm
;
struct
kvm
*
kvm
;
struct
kvm_vcpu
*
vcpu
;
struct
kvm_vcpu
*
vcpu
;
...
@@ -6970,7 +6975,7 @@ int kvm_arch_hardware_enable(void *garbage)
...
@@ -6970,7 +6975,7 @@ int kvm_arch_hardware_enable(void *garbage)
bool
stable
,
backwards_tsc
=
false
;
bool
stable
,
backwards_tsc
=
false
;
kvm_shared_msr_cpu_online
();
kvm_shared_msr_cpu_online
();
ret
=
kvm_x86_ops
->
hardware_enable
(
garbage
);
ret
=
kvm_x86_ops
->
hardware_enable
();
if
(
ret
!=
0
)
if
(
ret
!=
0
)
return
ret
;
return
ret
;
...
@@ -6979,7 +6984,7 @@ int kvm_arch_hardware_enable(void *garbage)
...
@@ -6979,7 +6984,7 @@ int kvm_arch_hardware_enable(void *garbage)
list_for_each_entry
(
kvm
,
&
vm_list
,
vm_list
)
{
list_for_each_entry
(
kvm
,
&
vm_list
,
vm_list
)
{
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
if
(
!
stable
&&
vcpu
->
cpu
==
smp_processor_id
())
if
(
!
stable
&&
vcpu
->
cpu
==
smp_processor_id
())
set_bit
(
KVM_REQ_CLOCK_UPDATE
,
&
vcpu
->
requests
);
kvm_make_request
(
KVM_REQ_CLOCK_UPDATE
,
vcpu
);
if
(
stable
&&
vcpu
->
arch
.
last_host_tsc
>
local_tsc
)
{
if
(
stable
&&
vcpu
->
arch
.
last_host_tsc
>
local_tsc
)
{
backwards_tsc
=
true
;
backwards_tsc
=
true
;
if
(
vcpu
->
arch
.
last_host_tsc
>
max_tsc
)
if
(
vcpu
->
arch
.
last_host_tsc
>
max_tsc
)
...
@@ -7033,8 +7038,7 @@ int kvm_arch_hardware_enable(void *garbage)
...
@@ -7033,8 +7038,7 @@ int kvm_arch_hardware_enable(void *garbage)
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
vcpu
->
arch
.
tsc_offset_adjustment
+=
delta_cyc
;
vcpu
->
arch
.
tsc_offset_adjustment
+=
delta_cyc
;
vcpu
->
arch
.
last_host_tsc
=
local_tsc
;
vcpu
->
arch
.
last_host_tsc
=
local_tsc
;
set_bit
(
KVM_REQ_MASTERCLOCK_UPDATE
,
kvm_make_request
(
KVM_REQ_MASTERCLOCK_UPDATE
,
vcpu
);
&
vcpu
->
requests
);
}
}
/*
/*
...
@@ -7051,10 +7055,10 @@ int kvm_arch_hardware_enable(void *garbage)
...
@@ -7051,10 +7055,10 @@ int kvm_arch_hardware_enable(void *garbage)
return
0
;
return
0
;
}
}
void
kvm_arch_hardware_disable
(
void
*
garbage
)
void
kvm_arch_hardware_disable
(
void
)
{
{
kvm_x86_ops
->
hardware_disable
(
garbage
);
kvm_x86_ops
->
hardware_disable
();
drop_user_return_notifiers
(
garbage
);
drop_user_return_notifiers
();
}
}
int
kvm_arch_hardware_setup
(
void
)
int
kvm_arch_hardware_setup
(
void
)
...
@@ -7269,8 +7273,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
...
@@ -7269,8 +7273,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_free_vcpus
(
kvm
);
kvm_free_vcpus
(
kvm
);
if
(
kvm
->
arch
.
apic_access_page
)
if
(
kvm
->
arch
.
apic_access_page
)
put_page
(
kvm
->
arch
.
apic_access_page
);
put_page
(
kvm
->
arch
.
apic_access_page
);
if
(
kvm
->
arch
.
ept_identity_pagetable
)
put_page
(
kvm
->
arch
.
ept_identity_pagetable
);
kfree
(
rcu_dereference_check
(
kvm
->
arch
.
apic_map
,
1
));
kfree
(
rcu_dereference_check
(
kvm
->
arch
.
apic_map
,
1
));
}
}
...
...
arch/x86/kvm/x86.h
浏览文件 @
a875dafc
...
@@ -88,15 +88,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
...
@@ -88,15 +88,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
vcpu
->
arch
.
mmio_gva
=
gva
&
PAGE_MASK
;
vcpu
->
arch
.
mmio_gva
=
gva
&
PAGE_MASK
;
vcpu
->
arch
.
access
=
access
;
vcpu
->
arch
.
access
=
access
;
vcpu
->
arch
.
mmio_gfn
=
gfn
;
vcpu
->
arch
.
mmio_gfn
=
gfn
;
vcpu
->
arch
.
mmio_gen
=
kvm_memslots
(
vcpu
->
kvm
)
->
generation
;
}
static
inline
bool
vcpu_match_mmio_gen
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
mmio_gen
==
kvm_memslots
(
vcpu
->
kvm
)
->
generation
;
}
}
/*
/*
* Clear the mmio cache info for the given gva
,
* Clear the mmio cache info for the given gva
. If gva is MMIO_GVA_ANY, we
*
specially, if gva is ~0ul, we
clear all mmio cache info.
* clear all mmio cache info.
*/
*/
#define MMIO_GVA_ANY (~(gva_t)0)
static
inline
void
vcpu_clear_mmio_info
(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
)
static
inline
void
vcpu_clear_mmio_info
(
struct
kvm_vcpu
*
vcpu
,
gva_t
gva
)
{
{
if
(
gva
!=
(
~
0ul
)
&&
vcpu
->
arch
.
mmio_gva
!=
(
gva
&
PAGE_MASK
))
if
(
gva
!=
MMIO_GVA_ANY
&&
vcpu
->
arch
.
mmio_gva
!=
(
gva
&
PAGE_MASK
))
return
;
return
;
vcpu
->
arch
.
mmio_gva
=
0
;
vcpu
->
arch
.
mmio_gva
=
0
;
...
@@ -104,7 +112,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
...
@@ -104,7 +112,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
static
inline
bool
vcpu_match_mmio_gva
(
struct
kvm_vcpu
*
vcpu
,
unsigned
long
gva
)
static
inline
bool
vcpu_match_mmio_gva
(
struct
kvm_vcpu
*
vcpu
,
unsigned
long
gva
)
{
{
if
(
vcpu
->
arch
.
mmio_gva
&&
vcpu
->
arch
.
mmio_gva
==
(
gva
&
PAGE_MASK
))
if
(
vcpu_match_mmio_gen
(
vcpu
)
&&
vcpu
->
arch
.
mmio_gva
&&
vcpu
->
arch
.
mmio_gva
==
(
gva
&
PAGE_MASK
))
return
true
;
return
true
;
return
false
;
return
false
;
...
@@ -112,7 +121,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
...
@@ -112,7 +121,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
static
inline
bool
vcpu_match_mmio_gpa
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
)
static
inline
bool
vcpu_match_mmio_gpa
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
)
{
{
if
(
vcpu
->
arch
.
mmio_gfn
&&
vcpu
->
arch
.
mmio_gfn
==
gpa
>>
PAGE_SHIFT
)
if
(
vcpu_match_mmio_gen
(
vcpu
)
&&
vcpu
->
arch
.
mmio_gfn
&&
vcpu
->
arch
.
mmio_gfn
==
gpa
>>
PAGE_SHIFT
)
return
true
;
return
true
;
return
false
;
return
false
;
...
...
include/linux/kvm_host.h
浏览文件 @
a875dafc
...
@@ -140,8 +140,6 @@ static inline bool is_error_page(struct page *page)
...
@@ -140,8 +140,6 @@ static inline bool is_error_page(struct page *page)
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
struct
kvm
;
struct
kvm_vcpu
;
extern
struct
kmem_cache
*
kvm_vcpu_cache
;
extern
struct
kmem_cache
*
kvm_vcpu_cache
;
extern
spinlock_t
kvm_lock
;
extern
spinlock_t
kvm_lock
;
...
@@ -325,8 +323,6 @@ struct kvm_kernel_irq_routing_entry {
...
@@ -325,8 +323,6 @@ struct kvm_kernel_irq_routing_entry {
struct
hlist_node
link
;
struct
hlist_node
link
;
};
};
struct
kvm_irq_routing_table
;
#ifndef KVM_PRIVATE_MEM_SLOTS
#ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_PRIVATE_MEM_SLOTS 0
#endif
#endif
...
@@ -636,8 +632,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
...
@@ -636,8 +632,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
int
kvm_arch_vcpu_postcreate
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_arch_vcpu_postcreate
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_arch_vcpu_destroy
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_arch_vcpu_destroy
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_arch_hardware_enable
(
void
*
garbage
);
int
kvm_arch_hardware_enable
(
void
);
void
kvm_arch_hardware_disable
(
void
*
garbage
);
void
kvm_arch_hardware_disable
(
void
);
int
kvm_arch_hardware_setup
(
void
);
int
kvm_arch_hardware_setup
(
void
);
void
kvm_arch_hardware_unsetup
(
void
);
void
kvm_arch_hardware_unsetup
(
void
);
void
kvm_arch_check_processor_compat
(
void
*
rtn
);
void
kvm_arch_check_processor_compat
(
void
*
rtn
);
...
@@ -1038,8 +1034,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
...
@@ -1038,8 +1034,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
extern
bool
kvm_rebooting
;
extern
bool
kvm_rebooting
;
struct
kvm_device_ops
;
struct
kvm_device
{
struct
kvm_device
{
struct
kvm_device_ops
*
ops
;
struct
kvm_device_ops
*
ops
;
struct
kvm
*
kvm
;
struct
kvm
*
kvm
;
...
@@ -1072,12 +1066,10 @@ struct kvm_device_ops {
...
@@ -1072,12 +1066,10 @@ struct kvm_device_ops {
void
kvm_device_get
(
struct
kvm_device
*
dev
);
void
kvm_device_get
(
struct
kvm_device
*
dev
);
void
kvm_device_put
(
struct
kvm_device
*
dev
);
void
kvm_device_put
(
struct
kvm_device
*
dev
);
struct
kvm_device
*
kvm_device_from_filp
(
struct
file
*
filp
);
struct
kvm_device
*
kvm_device_from_filp
(
struct
file
*
filp
);
int
kvm_register_device_ops
(
struct
kvm_device_ops
*
ops
,
u32
type
);
extern
struct
kvm_device_ops
kvm_mpic_ops
;
extern
struct
kvm_device_ops
kvm_mpic_ops
;
extern
struct
kvm_device_ops
kvm_xics_ops
;
extern
struct
kvm_device_ops
kvm_xics_ops
;
extern
struct
kvm_device_ops
kvm_vfio_ops
;
extern
struct
kvm_device_ops
kvm_arm_vgic_v2_ops
;
extern
struct
kvm_device_ops
kvm_flic_ops
;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
...
...
include/linux/kvm_types.h
浏览文件 @
a875dafc
...
@@ -17,6 +17,20 @@
...
@@ -17,6 +17,20 @@
#ifndef __KVM_TYPES_H__
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
#define __KVM_TYPES_H__
struct
kvm
;
struct
kvm_async_pf
;
struct
kvm_device_ops
;
struct
kvm_interrupt
;
struct
kvm_irq_routing_table
;
struct
kvm_memory_slot
;
struct
kvm_one_reg
;
struct
kvm_run
;
struct
kvm_userspace_memory_region
;
struct
kvm_vcpu
;
struct
kvm_vcpu_init
;
enum
kvm_mr_change
;
#include <asm/types.h>
#include <asm/types.h>
/*
/*
...
...
include/trace/events/kvm.h
浏览文件 @
a875dafc
...
@@ -95,6 +95,26 @@ TRACE_EVENT(kvm_ioapic_set_irq,
...
@@ -95,6 +95,26 @@ TRACE_EVENT(kvm_ioapic_set_irq,
__entry
->
coalesced
?
" (coalesced)"
:
""
)
__entry
->
coalesced
?
" (coalesced)"
:
""
)
);
);
TRACE_EVENT
(
kvm_ioapic_delayed_eoi_inj
,
TP_PROTO
(
__u64
e
),
TP_ARGS
(
e
),
TP_STRUCT__entry
(
__field
(
__u64
,
e
)
),
TP_fast_assign
(
__entry
->
e
=
e
;
),
TP_printk
(
"dst %x vec=%u (%s|%s|%s%s)"
,
(
u8
)(
__entry
->
e
>>
56
),
(
u8
)
__entry
->
e
,
__print_symbolic
((
__entry
->
e
>>
8
&
0x7
),
kvm_deliver_mode
),
(
__entry
->
e
&
(
1
<<
11
))
?
"logical"
:
"physical"
,
(
__entry
->
e
&
(
1
<<
15
))
?
"level"
:
"edge"
,
(
__entry
->
e
&
(
1
<<
16
))
?
"|masked"
:
""
)
);
TRACE_EVENT
(
kvm_msi_set_irq
,
TRACE_EVENT
(
kvm_msi_set_irq
,
TP_PROTO
(
__u64
address
,
__u64
data
),
TP_PROTO
(
__u64
address
,
__u64
data
),
TP_ARGS
(
address
,
data
),
TP_ARGS
(
address
,
data
),
...
...
include/uapi/linux/kvm.h
浏览文件 @
a875dafc
...
@@ -654,9 +654,7 @@ struct kvm_ppc_smmu_info {
...
@@ -654,9 +654,7 @@ struct kvm_ppc_smmu_info {
#endif
#endif
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
#ifdef __KVM_HAVE_USER_NMI
#define KVM_CAP_USER_NMI 22
#define KVM_CAP_USER_NMI 22
#endif
#ifdef __KVM_HAVE_GUEST_DEBUG
#ifdef __KVM_HAVE_GUEST_DEBUG
#define KVM_CAP_SET_GUEST_DEBUG 23
#define KVM_CAP_SET_GUEST_DEBUG 23
#endif
#endif
...
@@ -738,9 +736,7 @@ struct kvm_ppc_smmu_info {
...
@@ -738,9 +736,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_GET_SMMU_INFO 78
#define KVM_CAP_PPC_GET_SMMU_INFO 78
#define KVM_CAP_S390_COW 79
#define KVM_CAP_S390_COW 79
#define KVM_CAP_PPC_ALLOC_HTAB 80
#define KVM_CAP_PPC_ALLOC_HTAB 80
#ifdef __KVM_HAVE_READONLY_MEM
#define KVM_CAP_READONLY_MEM 81
#define KVM_CAP_READONLY_MEM 81
#endif
#define KVM_CAP_IRQFD_RESAMPLE 82
#define KVM_CAP_IRQFD_RESAMPLE 82
#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
#define KVM_CAP_PPC_HTAB_FD 84
#define KVM_CAP_PPC_HTAB_FD 84
...
@@ -947,15 +943,25 @@ struct kvm_device_attr {
...
@@ -947,15 +943,25 @@ struct kvm_device_attr {
__u64
addr
;
/* userspace address of attr data */
__u64
addr
;
/* userspace address of attr data */
};
};
#define KVM_DEV_TYPE_FSL_MPIC_20 1
#define KVM_DEV_TYPE_FSL_MPIC_42 2
#define KVM_DEV_TYPE_XICS 3
#define KVM_DEV_TYPE_VFIO 4
#define KVM_DEV_VFIO_GROUP 1
#define KVM_DEV_VFIO_GROUP 1
#define KVM_DEV_VFIO_GROUP_ADD 1
#define KVM_DEV_VFIO_GROUP_ADD 1
#define KVM_DEV_VFIO_GROUP_DEL 2
#define KVM_DEV_VFIO_GROUP_DEL 2
#define KVM_DEV_TYPE_ARM_VGIC_V2 5
#define KVM_DEV_TYPE_FLIC 6
enum
kvm_device_type
{
KVM_DEV_TYPE_FSL_MPIC_20
=
1
,
#define KVM_DEV_TYPE_FSL_MPIC_20 KVM_DEV_TYPE_FSL_MPIC_20
KVM_DEV_TYPE_FSL_MPIC_42
,
#define KVM_DEV_TYPE_FSL_MPIC_42 KVM_DEV_TYPE_FSL_MPIC_42
KVM_DEV_TYPE_XICS
,
#define KVM_DEV_TYPE_XICS KVM_DEV_TYPE_XICS
KVM_DEV_TYPE_VFIO
,
#define KVM_DEV_TYPE_VFIO KVM_DEV_TYPE_VFIO
KVM_DEV_TYPE_ARM_VGIC_V2
,
#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2
KVM_DEV_TYPE_FLIC
,
#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
KVM_DEV_TYPE_MAX
,
};
/*
/*
* ioctls for VM fds
* ioctls for VM fds
...
@@ -1093,7 +1099,7 @@ struct kvm_s390_ucas_mapping {
...
@@ -1093,7 +1099,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
/* Available with KVM_CAP_NMI */
/* Available with KVM_CAP_
USER_
NMI */
#define KVM_NMI _IO(KVMIO, 0x9a)
#define KVM_NMI _IO(KVMIO, 0x9a)
/* Available with KVM_CAP_SET_GUEST_DEBUG */
/* Available with KVM_CAP_SET_GUEST_DEBUG */
#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
...
...
virt/kvm/arm/vgic.c
浏览文件 @
a875dafc
...
@@ -1522,83 +1522,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
...
@@ -1522,83 +1522,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
return
0
;
return
0
;
}
}
static
void
vgic_init_maintenance_interrupt
(
void
*
info
)
{
enable_percpu_irq
(
vgic
->
maint_irq
,
0
);
}
static
int
vgic_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
cpu
)
{
switch
(
action
)
{
case
CPU_STARTING
:
case
CPU_STARTING_FROZEN
:
vgic_init_maintenance_interrupt
(
NULL
);
break
;
case
CPU_DYING
:
case
CPU_DYING_FROZEN
:
disable_percpu_irq
(
vgic
->
maint_irq
);
break
;
}
return
NOTIFY_OK
;
}
static
struct
notifier_block
vgic_cpu_nb
=
{
.
notifier_call
=
vgic_cpu_notify
,
};
static
const
struct
of_device_id
vgic_ids
[]
=
{
{
.
compatible
=
"arm,cortex-a15-gic"
,
.
data
=
vgic_v2_probe
,
},
{
.
compatible
=
"arm,gic-v3"
,
.
data
=
vgic_v3_probe
,
},
{},
};
int
kvm_vgic_hyp_init
(
void
)
{
const
struct
of_device_id
*
matched_id
;
const
int
(
*
vgic_probe
)(
struct
device_node
*
,
const
struct
vgic_ops
**
,
const
struct
vgic_params
**
);
struct
device_node
*
vgic_node
;
int
ret
;
vgic_node
=
of_find_matching_node_and_match
(
NULL
,
vgic_ids
,
&
matched_id
);
if
(
!
vgic_node
)
{
kvm_err
(
"error: no compatible GIC node found
\n
"
);
return
-
ENODEV
;
}
vgic_probe
=
matched_id
->
data
;
ret
=
vgic_probe
(
vgic_node
,
&
vgic_ops
,
&
vgic
);
if
(
ret
)
return
ret
;
ret
=
request_percpu_irq
(
vgic
->
maint_irq
,
vgic_maintenance_handler
,
"vgic"
,
kvm_get_running_vcpus
());
if
(
ret
)
{
kvm_err
(
"Cannot register interrupt %d
\n
"
,
vgic
->
maint_irq
);
return
ret
;
}
ret
=
__register_cpu_notifier
(
&
vgic_cpu_nb
);
if
(
ret
)
{
kvm_err
(
"Cannot register vgic CPU notifier
\n
"
);
goto
out_free_irq
;
}
/* Callback into for arch code for setup */
vgic_arch_setup
(
vgic
);
on_each_cpu
(
vgic_init_maintenance_interrupt
,
NULL
,
1
);
return
0
;
out_free_irq:
free_percpu_irq
(
vgic
->
maint_irq
,
kvm_get_running_vcpus
());
return
ret
;
}
/**
/**
* kvm_vgic_init - Initialize global VGIC state before running any VCPUs
* kvm_vgic_init - Initialize global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct
* @kvm: pointer to the kvm struct
...
@@ -2062,7 +1985,7 @@ static int vgic_create(struct kvm_device *dev, u32 type)
...
@@ -2062,7 +1985,7 @@ static int vgic_create(struct kvm_device *dev, u32 type)
return
kvm_vgic_create
(
dev
->
kvm
);
return
kvm_vgic_create
(
dev
->
kvm
);
}
}
struct
kvm_device_ops
kvm_arm_vgic_v2_ops
=
{
st
atic
st
ruct
kvm_device_ops
kvm_arm_vgic_v2_ops
=
{
.
name
=
"kvm-arm-vgic"
,
.
name
=
"kvm-arm-vgic"
,
.
create
=
vgic_create
,
.
create
=
vgic_create
,
.
destroy
=
vgic_destroy
,
.
destroy
=
vgic_destroy
,
...
@@ -2070,3 +1993,81 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
...
@@ -2070,3 +1993,81 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
.
get_attr
=
vgic_get_attr
,
.
get_attr
=
vgic_get_attr
,
.
has_attr
=
vgic_has_attr
,
.
has_attr
=
vgic_has_attr
,
};
};
static
void
vgic_init_maintenance_interrupt
(
void
*
info
)
{
enable_percpu_irq
(
vgic
->
maint_irq
,
0
);
}
static
int
vgic_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
cpu
)
{
switch
(
action
)
{
case
CPU_STARTING
:
case
CPU_STARTING_FROZEN
:
vgic_init_maintenance_interrupt
(
NULL
);
break
;
case
CPU_DYING
:
case
CPU_DYING_FROZEN
:
disable_percpu_irq
(
vgic
->
maint_irq
);
break
;
}
return
NOTIFY_OK
;
}
static
struct
notifier_block
vgic_cpu_nb
=
{
.
notifier_call
=
vgic_cpu_notify
,
};
static
const
struct
of_device_id
vgic_ids
[]
=
{
{
.
compatible
=
"arm,cortex-a15-gic"
,
.
data
=
vgic_v2_probe
,
},
{
.
compatible
=
"arm,gic-v3"
,
.
data
=
vgic_v3_probe
,
},
{},
};
int
kvm_vgic_hyp_init
(
void
)
{
const
struct
of_device_id
*
matched_id
;
const
int
(
*
vgic_probe
)(
struct
device_node
*
,
const
struct
vgic_ops
**
,
const
struct
vgic_params
**
);
struct
device_node
*
vgic_node
;
int
ret
;
vgic_node
=
of_find_matching_node_and_match
(
NULL
,
vgic_ids
,
&
matched_id
);
if
(
!
vgic_node
)
{
kvm_err
(
"error: no compatible GIC node found
\n
"
);
return
-
ENODEV
;
}
vgic_probe
=
matched_id
->
data
;
ret
=
vgic_probe
(
vgic_node
,
&
vgic_ops
,
&
vgic
);
if
(
ret
)
return
ret
;
ret
=
request_percpu_irq
(
vgic
->
maint_irq
,
vgic_maintenance_handler
,
"vgic"
,
kvm_get_running_vcpus
());
if
(
ret
)
{
kvm_err
(
"Cannot register interrupt %d
\n
"
,
vgic
->
maint_irq
);
return
ret
;
}
ret
=
__register_cpu_notifier
(
&
vgic_cpu_nb
);
if
(
ret
)
{
kvm_err
(
"Cannot register vgic CPU notifier
\n
"
);
goto
out_free_irq
;
}
/* Callback into for arch code for setup */
vgic_arch_setup
(
vgic
);
on_each_cpu
(
vgic_init_maintenance_interrupt
,
NULL
,
1
);
return
kvm_register_device_ops
(
&
kvm_arm_vgic_v2_ops
,
KVM_DEV_TYPE_ARM_VGIC_V2
);
out_free_irq:
free_percpu_irq
(
vgic
->
maint_irq
,
kvm_get_running_vcpus
());
return
ret
;
}
virt/kvm/ioapic.c
浏览文件 @
a875dafc
...
@@ -405,6 +405,26 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
...
@@ -405,6 +405,26 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
spin_unlock
(
&
ioapic
->
lock
);
spin_unlock
(
&
ioapic
->
lock
);
}
}
static
void
kvm_ioapic_eoi_inject_work
(
struct
work_struct
*
work
)
{
int
i
;
struct
kvm_ioapic
*
ioapic
=
container_of
(
work
,
struct
kvm_ioapic
,
eoi_inject
.
work
);
spin_lock
(
&
ioapic
->
lock
);
for
(
i
=
0
;
i
<
IOAPIC_NUM_PINS
;
i
++
)
{
union
kvm_ioapic_redirect_entry
*
ent
=
&
ioapic
->
redirtbl
[
i
];
if
(
ent
->
fields
.
trig_mode
!=
IOAPIC_LEVEL_TRIG
)
continue
;
if
(
ioapic
->
irr
&
(
1
<<
i
)
&&
!
ent
->
fields
.
remote_irr
)
ioapic_service
(
ioapic
,
i
,
false
);
}
spin_unlock
(
&
ioapic
->
lock
);
}
#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
static
void
__kvm_ioapic_update_eoi
(
struct
kvm_vcpu
*
vcpu
,
static
void
__kvm_ioapic_update_eoi
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_ioapic
*
ioapic
,
int
vector
,
int
trigger_mode
)
struct
kvm_ioapic
*
ioapic
,
int
vector
,
int
trigger_mode
)
{
{
...
@@ -435,8 +455,26 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
...
@@ -435,8 +455,26 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
ASSERT
(
ent
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
);
ASSERT
(
ent
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
);
ent
->
fields
.
remote_irr
=
0
;
ent
->
fields
.
remote_irr
=
0
;
if
(
ioapic
->
irr
&
(
1
<<
i
))
if
(
!
ent
->
fields
.
mask
&&
(
ioapic
->
irr
&
(
1
<<
i
)))
{
ioapic_service
(
ioapic
,
i
,
false
);
++
ioapic
->
irq_eoi
[
i
];
if
(
ioapic
->
irq_eoi
[
i
]
==
IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT
)
{
/*
* Real hardware does not deliver the interrupt
* immediately during eoi broadcast, and this
* lets a buggy guest make slow progress
* even if it does not correctly handle a
* level-triggered interrupt. Emulate this
* behavior if we detect an interrupt storm.
*/
schedule_delayed_work
(
&
ioapic
->
eoi_inject
,
HZ
/
100
);
ioapic
->
irq_eoi
[
i
]
=
0
;
trace_kvm_ioapic_delayed_eoi_inj
(
ent
->
bits
);
}
else
{
ioapic_service
(
ioapic
,
i
,
false
);
}
}
else
{
ioapic
->
irq_eoi
[
i
]
=
0
;
}
}
}
}
}
...
@@ -565,12 +603,14 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
...
@@ -565,12 +603,14 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
{
{
int
i
;
int
i
;
cancel_delayed_work_sync
(
&
ioapic
->
eoi_inject
);
for
(
i
=
0
;
i
<
IOAPIC_NUM_PINS
;
i
++
)
for
(
i
=
0
;
i
<
IOAPIC_NUM_PINS
;
i
++
)
ioapic
->
redirtbl
[
i
].
fields
.
mask
=
1
;
ioapic
->
redirtbl
[
i
].
fields
.
mask
=
1
;
ioapic
->
base_address
=
IOAPIC_DEFAULT_BASE_ADDRESS
;
ioapic
->
base_address
=
IOAPIC_DEFAULT_BASE_ADDRESS
;
ioapic
->
ioregsel
=
0
;
ioapic
->
ioregsel
=
0
;
ioapic
->
irr
=
0
;
ioapic
->
irr
=
0
;
ioapic
->
id
=
0
;
ioapic
->
id
=
0
;
memset
(
ioapic
->
irq_eoi
,
0x00
,
IOAPIC_NUM_PINS
);
rtc_irq_eoi_tracking_reset
(
ioapic
);
rtc_irq_eoi_tracking_reset
(
ioapic
);
update_handled_vectors
(
ioapic
);
update_handled_vectors
(
ioapic
);
}
}
...
@@ -589,6 +629,7 @@ int kvm_ioapic_init(struct kvm *kvm)
...
@@ -589,6 +629,7 @@ int kvm_ioapic_init(struct kvm *kvm)
if
(
!
ioapic
)
if
(
!
ioapic
)
return
-
ENOMEM
;
return
-
ENOMEM
;
spin_lock_init
(
&
ioapic
->
lock
);
spin_lock_init
(
&
ioapic
->
lock
);
INIT_DELAYED_WORK
(
&
ioapic
->
eoi_inject
,
kvm_ioapic_eoi_inject_work
);
kvm
->
arch
.
vioapic
=
ioapic
;
kvm
->
arch
.
vioapic
=
ioapic
;
kvm_ioapic_reset
(
ioapic
);
kvm_ioapic_reset
(
ioapic
);
kvm_iodevice_init
(
&
ioapic
->
dev
,
&
ioapic_mmio_ops
);
kvm_iodevice_init
(
&
ioapic
->
dev
,
&
ioapic_mmio_ops
);
...
@@ -609,6 +650,7 @@ void kvm_ioapic_destroy(struct kvm *kvm)
...
@@ -609,6 +650,7 @@ void kvm_ioapic_destroy(struct kvm *kvm)
{
{
struct
kvm_ioapic
*
ioapic
=
kvm
->
arch
.
vioapic
;
struct
kvm_ioapic
*
ioapic
=
kvm
->
arch
.
vioapic
;
cancel_delayed_work_sync
(
&
ioapic
->
eoi_inject
);
if
(
ioapic
)
{
if
(
ioapic
)
{
kvm_io_bus_unregister_dev
(
kvm
,
KVM_MMIO_BUS
,
&
ioapic
->
dev
);
kvm_io_bus_unregister_dev
(
kvm
,
KVM_MMIO_BUS
,
&
ioapic
->
dev
);
kvm
->
arch
.
vioapic
=
NULL
;
kvm
->
arch
.
vioapic
=
NULL
;
...
...
virt/kvm/ioapic.h
浏览文件 @
a875dafc
...
@@ -59,6 +59,8 @@ struct kvm_ioapic {
...
@@ -59,6 +59,8 @@ struct kvm_ioapic {
spinlock_t
lock
;
spinlock_t
lock
;
DECLARE_BITMAP
(
handled_vectors
,
256
);
DECLARE_BITMAP
(
handled_vectors
,
256
);
struct
rtc_status
rtc_status
;
struct
rtc_status
rtc_status
;
struct
delayed_work
eoi_inject
;
u32
irq_eoi
[
IOAPIC_NUM_PINS
];
};
};
#ifdef DEBUG
#ifdef DEBUG
...
...
virt/kvm/kvm_main.c
浏览文件 @
a875dafc
...
@@ -95,8 +95,6 @@ static int hardware_enable_all(void);
...
@@ -95,8 +95,6 @@ static int hardware_enable_all(void);
static
void
hardware_disable_all
(
void
);
static
void
hardware_disable_all
(
void
);
static
void
kvm_io_bus_destroy
(
struct
kvm_io_bus
*
bus
);
static
void
kvm_io_bus_destroy
(
struct
kvm_io_bus
*
bus
);
static
void
update_memslots
(
struct
kvm_memslots
*
slots
,
struct
kvm_memory_slot
*
new
,
u64
last_generation
);
static
void
kvm_release_pfn_dirty
(
pfn_t
pfn
);
static
void
kvm_release_pfn_dirty
(
pfn_t
pfn
);
static
void
mark_page_dirty_in_slot
(
struct
kvm
*
kvm
,
static
void
mark_page_dirty_in_slot
(
struct
kvm
*
kvm
,
...
@@ -477,6 +475,13 @@ static struct kvm *kvm_create_vm(unsigned long type)
...
@@ -477,6 +475,13 @@ static struct kvm *kvm_create_vm(unsigned long type)
kvm
->
memslots
=
kzalloc
(
sizeof
(
struct
kvm_memslots
),
GFP_KERNEL
);
kvm
->
memslots
=
kzalloc
(
sizeof
(
struct
kvm_memslots
),
GFP_KERNEL
);
if
(
!
kvm
->
memslots
)
if
(
!
kvm
->
memslots
)
goto
out_err_no_srcu
;
goto
out_err_no_srcu
;
/*
* Init kvm generation close to the maximum to easily test the
* code of handling generation number wrap-around.
*/
kvm
->
memslots
->
generation
=
-
150
;
kvm_init_memslots_id
(
kvm
);
kvm_init_memslots_id
(
kvm
);
if
(
init_srcu_struct
(
&
kvm
->
srcu
))
if
(
init_srcu_struct
(
&
kvm
->
srcu
))
goto
out_err_no_srcu
;
goto
out_err_no_srcu
;
...
@@ -688,8 +693,7 @@ static void sort_memslots(struct kvm_memslots *slots)
...
@@ -688,8 +693,7 @@ static void sort_memslots(struct kvm_memslots *slots)
}
}
static
void
update_memslots
(
struct
kvm_memslots
*
slots
,
static
void
update_memslots
(
struct
kvm_memslots
*
slots
,
struct
kvm_memory_slot
*
new
,
struct
kvm_memory_slot
*
new
)
u64
last_generation
)
{
{
if
(
new
)
{
if
(
new
)
{
int
id
=
new
->
id
;
int
id
=
new
->
id
;
...
@@ -700,15 +704,13 @@ static void update_memslots(struct kvm_memslots *slots,
...
@@ -700,15 +704,13 @@ static void update_memslots(struct kvm_memslots *slots,
if
(
new
->
npages
!=
npages
)
if
(
new
->
npages
!=
npages
)
sort_memslots
(
slots
);
sort_memslots
(
slots
);
}
}
slots
->
generation
=
last_generation
+
1
;
}
}
static
int
check_memory_region_flags
(
struct
kvm_userspace_memory_region
*
mem
)
static
int
check_memory_region_flags
(
struct
kvm_userspace_memory_region
*
mem
)
{
{
u32
valid_flags
=
KVM_MEM_LOG_DIRTY_PAGES
;
u32
valid_flags
=
KVM_MEM_LOG_DIRTY_PAGES
;
#ifdef
KVM_CAP
_READONLY_MEM
#ifdef
__KVM_HAVE
_READONLY_MEM
valid_flags
|=
KVM_MEM_READONLY
;
valid_flags
|=
KVM_MEM_READONLY
;
#endif
#endif
...
@@ -723,10 +725,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
...
@@ -723,10 +725,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
{
{
struct
kvm_memslots
*
old_memslots
=
kvm
->
memslots
;
struct
kvm_memslots
*
old_memslots
=
kvm
->
memslots
;
update_memslots
(
slots
,
new
,
kvm
->
memslots
->
generation
);
/*
* Set the low bit in the generation, which disables SPTE caching
* until the end of synchronize_srcu_expedited.
*/
WARN_ON
(
old_memslots
->
generation
&
1
);
slots
->
generation
=
old_memslots
->
generation
+
1
;
update_memslots
(
slots
,
new
);
rcu_assign_pointer
(
kvm
->
memslots
,
slots
);
rcu_assign_pointer
(
kvm
->
memslots
,
slots
);
synchronize_srcu_expedited
(
&
kvm
->
srcu
);
synchronize_srcu_expedited
(
&
kvm
->
srcu
);
/*
* Increment the new memslot generation a second time. This prevents
* vm exits that race with memslot updates from caching a memslot
* generation that will (potentially) be valid forever.
*/
slots
->
generation
++
;
kvm_arch_memslots_updated
(
kvm
);
kvm_arch_memslots_updated
(
kvm
);
return
old_memslots
;
return
old_memslots
;
...
@@ -777,7 +793,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
...
@@ -777,7 +793,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
base_gfn
=
mem
->
guest_phys_addr
>>
PAGE_SHIFT
;
base_gfn
=
mem
->
guest_phys_addr
>>
PAGE_SHIFT
;
npages
=
mem
->
memory_size
>>
PAGE_SHIFT
;
npages
=
mem
->
memory_size
>>
PAGE_SHIFT
;
r
=
-
EINVAL
;
if
(
npages
>
KVM_MEM_MAX_NR_PAGES
)
if
(
npages
>
KVM_MEM_MAX_NR_PAGES
)
goto
out
;
goto
out
;
...
@@ -791,7 +806,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
...
@@ -791,7 +806,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
new
.
npages
=
npages
;
new
.
npages
=
npages
;
new
.
flags
=
mem
->
flags
;
new
.
flags
=
mem
->
flags
;
r
=
-
EINVAL
;
if
(
npages
)
{
if
(
npages
)
{
if
(
!
old
.
npages
)
if
(
!
old
.
npages
)
change
=
KVM_MR_CREATE
;
change
=
KVM_MR_CREATE
;
...
@@ -847,7 +861,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
...
@@ -847,7 +861,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
}
}
if
((
change
==
KVM_MR_DELETE
)
||
(
change
==
KVM_MR_MOVE
))
{
if
((
change
==
KVM_MR_DELETE
)
||
(
change
==
KVM_MR_MOVE
))
{
r
=
-
ENOMEM
;
slots
=
kmemdup
(
kvm
->
memslots
,
sizeof
(
struct
kvm_memslots
),
slots
=
kmemdup
(
kvm
->
memslots
,
sizeof
(
struct
kvm_memslots
),
GFP_KERNEL
);
GFP_KERNEL
);
if
(
!
slots
)
if
(
!
slots
)
...
@@ -1776,8 +1789,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
...
@@ -1776,8 +1789,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
bool
eligible
;
bool
eligible
;
eligible
=
!
vcpu
->
spin_loop
.
in_spin_loop
||
eligible
=
!
vcpu
->
spin_loop
.
in_spin_loop
||
(
vcpu
->
spin_loop
.
in_spin_loop
&&
vcpu
->
spin_loop
.
dy_eligible
;
vcpu
->
spin_loop
.
dy_eligible
);
if
(
vcpu
->
spin_loop
.
in_spin_loop
)
if
(
vcpu
->
spin_loop
.
in_spin_loop
)
kvm_vcpu_set_dy_eligible
(
vcpu
,
!
vcpu
->
spin_loop
.
dy_eligible
);
kvm_vcpu_set_dy_eligible
(
vcpu
,
!
vcpu
->
spin_loop
.
dy_eligible
);
...
@@ -2267,6 +2279,29 @@ struct kvm_device *kvm_device_from_filp(struct file *filp)
...
@@ -2267,6 +2279,29 @@ struct kvm_device *kvm_device_from_filp(struct file *filp)
return
filp
->
private_data
;
return
filp
->
private_data
;
}
}
static
struct
kvm_device_ops
*
kvm_device_ops_table
[
KVM_DEV_TYPE_MAX
]
=
{
#ifdef CONFIG_KVM_MPIC
[
KVM_DEV_TYPE_FSL_MPIC_20
]
=
&
kvm_mpic_ops
,
[
KVM_DEV_TYPE_FSL_MPIC_42
]
=
&
kvm_mpic_ops
,
#endif
#ifdef CONFIG_KVM_XICS
[
KVM_DEV_TYPE_XICS
]
=
&
kvm_xics_ops
,
#endif
};
int
kvm_register_device_ops
(
struct
kvm_device_ops
*
ops
,
u32
type
)
{
if
(
type
>=
ARRAY_SIZE
(
kvm_device_ops_table
))
return
-
ENOSPC
;
if
(
kvm_device_ops_table
[
type
]
!=
NULL
)
return
-
EEXIST
;
kvm_device_ops_table
[
type
]
=
ops
;
return
0
;
}
static
int
kvm_ioctl_create_device
(
struct
kvm
*
kvm
,
static
int
kvm_ioctl_create_device
(
struct
kvm
*
kvm
,
struct
kvm_create_device
*
cd
)
struct
kvm_create_device
*
cd
)
{
{
...
@@ -2275,36 +2310,12 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
...
@@ -2275,36 +2310,12 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
bool
test
=
cd
->
flags
&
KVM_CREATE_DEVICE_TEST
;
bool
test
=
cd
->
flags
&
KVM_CREATE_DEVICE_TEST
;
int
ret
;
int
ret
;
switch
(
cd
->
type
)
{
if
(
cd
->
type
>=
ARRAY_SIZE
(
kvm_device_ops_table
))
#ifdef CONFIG_KVM_MPIC
return
-
ENODEV
;
case
KVM_DEV_TYPE_FSL_MPIC_20
:
case
KVM_DEV_TYPE_FSL_MPIC_42
:
ops
=
kvm_device_ops_table
[
cd
->
type
];
ops
=
&
kvm_mpic_ops
;
if
(
ops
==
NULL
)
break
;
#endif
#ifdef CONFIG_KVM_XICS
case
KVM_DEV_TYPE_XICS
:
ops
=
&
kvm_xics_ops
;
break
;
#endif
#ifdef CONFIG_KVM_VFIO
case
KVM_DEV_TYPE_VFIO
:
ops
=
&
kvm_vfio_ops
;
break
;
#endif
#ifdef CONFIG_KVM_ARM_VGIC
case
KVM_DEV_TYPE_ARM_VGIC_V2
:
ops
=
&
kvm_arm_vgic_v2_ops
;
break
;
#endif
#ifdef CONFIG_S390
case
KVM_DEV_TYPE_FLIC
:
ops
=
&
kvm_flic_ops
;
break
;
#endif
default:
return
-
ENODEV
;
return
-
ENODEV
;
}
if
(
test
)
if
(
test
)
return
0
;
return
0
;
...
@@ -2619,7 +2630,6 @@ static long kvm_dev_ioctl(struct file *filp,
...
@@ -2619,7 +2630,6 @@ static long kvm_dev_ioctl(struct file *filp,
switch
(
ioctl
)
{
switch
(
ioctl
)
{
case
KVM_GET_API_VERSION
:
case
KVM_GET_API_VERSION
:
r
=
-
EINVAL
;
if
(
arg
)
if
(
arg
)
goto
out
;
goto
out
;
r
=
KVM_API_VERSION
;
r
=
KVM_API_VERSION
;
...
@@ -2631,7 +2641,6 @@ static long kvm_dev_ioctl(struct file *filp,
...
@@ -2631,7 +2641,6 @@ static long kvm_dev_ioctl(struct file *filp,
r
=
kvm_vm_ioctl_check_extension_generic
(
NULL
,
arg
);
r
=
kvm_vm_ioctl_check_extension_generic
(
NULL
,
arg
);
break
;
break
;
case
KVM_GET_VCPU_MMAP_SIZE
:
case
KVM_GET_VCPU_MMAP_SIZE
:
r
=
-
EINVAL
;
if
(
arg
)
if
(
arg
)
goto
out
;
goto
out
;
r
=
PAGE_SIZE
;
/* struct kvm_run */
r
=
PAGE_SIZE
;
/* struct kvm_run */
...
@@ -2676,7 +2685,7 @@ static void hardware_enable_nolock(void *junk)
...
@@ -2676,7 +2685,7 @@ static void hardware_enable_nolock(void *junk)
cpumask_set_cpu
(
cpu
,
cpus_hardware_enabled
);
cpumask_set_cpu
(
cpu
,
cpus_hardware_enabled
);
r
=
kvm_arch_hardware_enable
(
NULL
);
r
=
kvm_arch_hardware_enable
();
if
(
r
)
{
if
(
r
)
{
cpumask_clear_cpu
(
cpu
,
cpus_hardware_enabled
);
cpumask_clear_cpu
(
cpu
,
cpus_hardware_enabled
);
...
@@ -2701,7 +2710,7 @@ static void hardware_disable_nolock(void *junk)
...
@@ -2701,7 +2710,7 @@ static void hardware_disable_nolock(void *junk)
if
(
!
cpumask_test_cpu
(
cpu
,
cpus_hardware_enabled
))
if
(
!
cpumask_test_cpu
(
cpu
,
cpus_hardware_enabled
))
return
;
return
;
cpumask_clear_cpu
(
cpu
,
cpus_hardware_enabled
);
cpumask_clear_cpu
(
cpu
,
cpus_hardware_enabled
);
kvm_arch_hardware_disable
(
NULL
);
kvm_arch_hardware_disable
();
}
}
static
void
hardware_disable
(
void
)
static
void
hardware_disable
(
void
)
...
...
virt/kvm/vfio.c
浏览文件 @
a875dafc
...
@@ -246,6 +246,16 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
...
@@ -246,6 +246,16 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
kfree
(
dev
);
/* alloc by kvm_ioctl_create_device, free by .destroy */
kfree
(
dev
);
/* alloc by kvm_ioctl_create_device, free by .destroy */
}
}
static
int
kvm_vfio_create
(
struct
kvm_device
*
dev
,
u32
type
);
static
struct
kvm_device_ops
kvm_vfio_ops
=
{
.
name
=
"kvm-vfio"
,
.
create
=
kvm_vfio_create
,
.
destroy
=
kvm_vfio_destroy
,
.
set_attr
=
kvm_vfio_set_attr
,
.
has_attr
=
kvm_vfio_has_attr
,
};
static
int
kvm_vfio_create
(
struct
kvm_device
*
dev
,
u32
type
)
static
int
kvm_vfio_create
(
struct
kvm_device
*
dev
,
u32
type
)
{
{
struct
kvm_device
*
tmp
;
struct
kvm_device
*
tmp
;
...
@@ -268,10 +278,8 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
...
@@ -268,10 +278,8 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
return
0
;
return
0
;
}
}
struct
kvm_device_ops
kvm_vfio_ops
=
{
static
int
__init
kvm_vfio_ops_init
(
void
)
.
name
=
"kvm-vfio"
,
{
.
create
=
kvm_vfio_create
,
return
kvm_register_device_ops
(
&
kvm_vfio_ops
,
KVM_DEV_TYPE_VFIO
);
.
destroy
=
kvm_vfio_destroy
,
}
.
set_attr
=
kvm_vfio_set_attr
,
module_init
(
kvm_vfio_ops_init
);
.
has_attr
=
kvm_vfio_has_attr
,
};
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录