Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
2dfee7b2
cloud-kernel
项目概览
openanolis
/
cloud-kernel
接近 2 年 前同步成功
通知
170
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2dfee7b2
编写于
4月 25, 2013
作者:
G
Gleb Natapov
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'kvm-arm-cleanup' from
git://github.com/columbia/linux-kvm-arm.git
上级
660696d1
f42798c6
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
585 addition
and
387 deletion
+585
-387
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_arm.h
+4
-0
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/kvm_asm.h
+1
-1
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_emulate.h
+100
-7
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_host.h
+34
-8
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/kvm_mmu.h
+67
-0
arch/arm/include/asm/kvm_vgic.h
arch/arm/include/asm/kvm_vgic.h
+0
-1
arch/arm/include/uapi/asm/kvm.h
arch/arm/include/uapi/asm/kvm.h
+6
-6
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/asm-offsets.c
+4
-4
arch/arm/kvm/Makefile
arch/arm/kvm/Makefile
+1
-1
arch/arm/kvm/arm.c
arch/arm/kvm/arm.c
+4
-190
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.c
+14
-14
arch/arm/kvm/coproc.h
arch/arm/kvm/coproc.h
+2
-2
arch/arm/kvm/emulate.c
arch/arm/kvm/emulate.c
+52
-23
arch/arm/kvm/guest.c
arch/arm/kvm/guest.c
+17
-0
arch/arm/kvm/handle_exit.c
arch/arm/kvm/handle_exit.c
+164
-0
arch/arm/kvm/interrupts.S
arch/arm/kvm/interrupts.S
+8
-5
arch/arm/kvm/mmio.c
arch/arm/kvm/mmio.c
+18
-28
arch/arm/kvm/mmu.c
arch/arm/kvm/mmu.c
+88
-96
arch/arm/kvm/vgic.c
arch/arm/kvm/vgic.c
+1
-1
未找到文件。
arch/arm/include/asm/kvm_arm.h
浏览文件 @
2dfee7b2
...
@@ -211,4 +211,8 @@
...
@@ -211,4 +211,8 @@
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
#define HSR_DABT_S1PTW (1U << 7)
#define HSR_DABT_CM (1U << 8)
#define HSR_DABT_EA (1U << 9)
#endif
/* __ARM_KVM_ARM_H__ */
#endif
/* __ARM_KVM_ARM_H__ */
arch/arm/include/asm/kvm_asm.h
浏览文件 @
2dfee7b2
...
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
...
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
extern
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
);
extern
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
);
extern
void
__kvm_flush_vm_context
(
void
);
extern
void
__kvm_flush_vm_context
(
void
);
extern
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
);
extern
void
__kvm_tlb_flush_vmid
_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
);
extern
int
__kvm_vcpu_run
(
struct
kvm_vcpu
*
vcpu
);
extern
int
__kvm_vcpu_run
(
struct
kvm_vcpu
*
vcpu
);
#endif
#endif
...
...
arch/arm/include/asm/kvm_emulate.h
浏览文件 @
2dfee7b2
...
@@ -22,11 +22,12 @@
...
@@ -22,11 +22,12 @@
#include <linux/kvm_host.h>
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_arm.h>
u
32
*
vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
);
u
nsigned
long
*
vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
);
u
32
*
vcpu_spsr
(
struct
kvm_vcpu
*
vcpu
);
u
nsigned
long
*
vcpu_spsr
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_handle_wfi
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
bool
kvm_condition_valid
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_skip_instr
(
struct
kvm_vcpu
*
vcpu
,
bool
is_wide_instr
);
void
kvm_skip_instr
(
struct
kvm_vcpu
*
vcpu
,
bool
is_wide_instr
);
void
kvm_inject_undefined
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_inject_undefined
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_inject_dabt
(
struct
kvm_vcpu
*
vcpu
,
unsigned
long
addr
);
void
kvm_inject_dabt
(
struct
kvm_vcpu
*
vcpu
,
unsigned
long
addr
);
...
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
...
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
return
1
;
return
1
;
}
}
static
inline
u
32
*
vcpu_pc
(
struct
kvm_vcpu
*
vcpu
)
static
inline
u
nsigned
long
*
vcpu_pc
(
struct
kvm_vcpu
*
vcpu
)
{
{
return
(
u32
*
)
&
vcpu
->
arch
.
regs
.
usr_regs
.
ARM_pc
;
return
&
vcpu
->
arch
.
regs
.
usr_regs
.
ARM_pc
;
}
}
static
inline
u
32
*
vcpu_cpsr
(
struct
kvm_vcpu
*
vcpu
)
static
inline
u
nsigned
long
*
vcpu_cpsr
(
struct
kvm_vcpu
*
vcpu
)
{
{
return
(
u32
*
)
&
vcpu
->
arch
.
regs
.
usr_regs
.
ARM_cpsr
;
return
&
vcpu
->
arch
.
regs
.
usr_regs
.
ARM_cpsr
;
}
}
static
inline
void
vcpu_set_thumb
(
struct
kvm_vcpu
*
vcpu
)
static
inline
void
vcpu_set_thumb
(
struct
kvm_vcpu
*
vcpu
)
...
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
...
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
return
reg
==
15
;
return
reg
==
15
;
}
}
static
inline
u32
kvm_vcpu_get_hsr
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
fault
.
hsr
;
}
static
inline
unsigned
long
kvm_vcpu_get_hfar
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
fault
.
hxfar
;
}
static
inline
phys_addr_t
kvm_vcpu_get_fault_ipa
(
struct
kvm_vcpu
*
vcpu
)
{
return
((
phys_addr_t
)
vcpu
->
arch
.
fault
.
hpfar
&
HPFAR_MASK
)
<<
8
;
}
static
inline
unsigned
long
kvm_vcpu_get_hyp_pc
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
fault
.
hyp_pc
;
}
static
inline
bool
kvm_vcpu_dabt_isvalid
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_ISV
;
}
static
inline
bool
kvm_vcpu_dabt_iswrite
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_WNR
;
}
static
inline
bool
kvm_vcpu_dabt_issext
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_SSE
;
}
static
inline
int
kvm_vcpu_dabt_get_rd
(
struct
kvm_vcpu
*
vcpu
)
{
return
(
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_SRT_MASK
)
>>
HSR_SRT_SHIFT
;
}
static
inline
bool
kvm_vcpu_dabt_isextabt
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_DABT_EA
;
}
static
inline
bool
kvm_vcpu_dabt_iss1tw
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_DABT_S1PTW
;
}
/* Get Access Size from a data abort */
static
inline
int
kvm_vcpu_dabt_get_as
(
struct
kvm_vcpu
*
vcpu
)
{
switch
((
kvm_vcpu_get_hsr
(
vcpu
)
>>
22
)
&
0x3
)
{
case
0
:
return
1
;
case
1
:
return
2
;
case
2
:
return
4
;
default:
kvm_err
(
"Hardware is weird: SAS 0b11 is reserved
\n
"
);
return
-
EFAULT
;
}
}
/* This one is not specific to Data Abort */
static
inline
bool
kvm_vcpu_trap_il_is32bit
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_IL
;
}
static
inline
u8
kvm_vcpu_trap_get_class
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
>>
HSR_EC_SHIFT
;
}
static
inline
bool
kvm_vcpu_trap_is_iabt
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_trap_get_class
(
vcpu
)
==
HSR_EC_IABT
;
}
static
inline
u8
kvm_vcpu_trap_get_fault
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_FSC_TYPE
;
}
static
inline
u32
kvm_vcpu_hvc_get_imm
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_HVC_IMM_MASK
;
}
#endif
/* __ARM_KVM_EMULATE_H__ */
#endif
/* __ARM_KVM_EMULATE_H__ */
arch/arm/include/asm/kvm_host.h
浏览文件 @
2dfee7b2
...
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
...
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
void
*
objects
[
KVM_NR_MEM_OBJS
];
void
*
objects
[
KVM_NR_MEM_OBJS
];
};
};
struct
kvm_vcpu_fault_info
{
u32
hsr
;
/* Hyp Syndrome Register */
u32
hxfar
;
/* Hyp Data/Inst. Fault Address Register */
u32
hpfar
;
/* Hyp IPA Fault Address Register */
u32
hyp_pc
;
/* PC when exception was taken from Hyp mode */
};
typedef
struct
vfp_hard_struct
kvm_kernel_vfp_t
;
struct
kvm_vcpu_arch
{
struct
kvm_vcpu_arch
{
struct
kvm_regs
regs
;
struct
kvm_regs
regs
;
...
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
...
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
u32
midr
;
u32
midr
;
/* Exception Information */
/* Exception Information */
u32
hsr
;
/* Hyp Syndrome Register */
struct
kvm_vcpu_fault_info
fault
;
u32
hxfar
;
/* Hyp Data/Inst Fault Address Register */
u32
hpfar
;
/* Hyp IPA Fault Address Register */
/* Floating point registers (VFP and Advanced SIMD/NEON) */
/* Floating point registers (VFP and Advanced SIMD/NEON) */
struct
vfp_hard_struc
t
vfp_guest
;
kvm_kernel_vfp_
t
vfp_guest
;
struct
vfp_hard_struc
t
*
vfp_host
;
kvm_kernel_vfp_
t
*
vfp_host
;
/* VGIC state */
/* VGIC state */
struct
vgic_cpu
vgic_cpu
;
struct
vgic_cpu
vgic_cpu
;
...
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
...
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
/* Interrupt related fields */
/* Interrupt related fields */
u32
irq_lines
;
/* IRQ and FIQ levels */
u32
irq_lines
;
/* IRQ and FIQ levels */
/* Hyp exception information */
u32
hyp_pc
;
/* PC when exception was taken from Hyp mode */
/* Cache some mmu pages needed inside spinlock regions */
/* Cache some mmu pages needed inside spinlock regions */
struct
kvm_mmu_memory_cache
mmu_page_cache
;
struct
kvm_mmu_memory_cache
mmu_page_cache
;
...
@@ -181,4 +185,26 @@ struct kvm_one_reg;
...
@@ -181,4 +185,26 @@ struct kvm_one_reg;
int
kvm_arm_coproc_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
kvm_arm_coproc_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
kvm_arm_coproc_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
kvm_arm_coproc_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
handle_exit
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
,
int
exception_index
);
static
inline
void
__cpu_init_hyp_mode
(
unsigned
long
long
pgd_ptr
,
unsigned
long
hyp_stack_ptr
,
unsigned
long
vector_ptr
)
{
unsigned
long
pgd_low
,
pgd_high
;
pgd_low
=
(
pgd_ptr
&
((
1ULL
<<
32
)
-
1
));
pgd_high
=
(
pgd_ptr
>>
32ULL
);
/*
* Call initialization code, and switch to the full blown
* HYP code. The init code doesn't need to preserve these registers as
* r1-r3 and r12 are already callee save according to the AAPCS.
* Note that we slightly misuse the prototype by casing the pgd_low to
* a void *.
*/
kvm_call_hyp
((
void
*
)
pgd_low
,
pgd_high
,
hyp_stack_ptr
,
vector_ptr
);
}
#endif
/* __ARM_KVM_HOST_H__ */
#endif
/* __ARM_KVM_HOST_H__ */
arch/arm/include/asm/kvm_mmu.h
浏览文件 @
2dfee7b2
...
@@ -19,6 +19,18 @@
...
@@ -19,6 +19,18 @@
#ifndef __ARM_KVM_MMU_H__
#ifndef __ARM_KVM_MMU_H__
#define __ARM_KVM_MMU_H__
#define __ARM_KVM_MMU_H__
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/idmap.h>
/*
* We directly use the kernel VA for the HYP, as we can directly share
* the mapping (HTTBR "covers" TTBR1).
*/
#define HYP_PAGE_OFFSET_MASK (~0UL)
#define HYP_PAGE_OFFSET PAGE_OFFSET
#define KERN_TO_HYP(kva) (kva)
int
create_hyp_mappings
(
void
*
from
,
void
*
to
);
int
create_hyp_mappings
(
void
*
from
,
void
*
to
);
int
create_hyp_io_mappings
(
void
*
from
,
void
*
to
,
phys_addr_t
);
int
create_hyp_io_mappings
(
void
*
from
,
void
*
to
,
phys_addr_t
);
void
free_hyp_pmds
(
void
);
void
free_hyp_pmds
(
void
);
...
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
...
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
int
kvm_mmu_init
(
void
);
int
kvm_mmu_init
(
void
);
void
kvm_clear_hyp_idmap
(
void
);
void
kvm_clear_hyp_idmap
(
void
);
static
inline
void
kvm_set_pte
(
pte_t
*
pte
,
pte_t
new_pte
)
{
pte_val
(
*
pte
)
=
new_pte
;
/*
* flush_pmd_entry just takes a void pointer and cleans the necessary
* cache entries, so we can reuse the function for ptes.
*/
flush_pmd_entry
(
pte
);
}
static
inline
bool
kvm_is_write_fault
(
unsigned
long
hsr
)
static
inline
bool
kvm_is_write_fault
(
unsigned
long
hsr
)
{
{
unsigned
long
hsr_ec
=
hsr
>>
HSR_EC_SHIFT
;
unsigned
long
hsr_ec
=
hsr
>>
HSR_EC_SHIFT
;
...
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
...
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
return
true
;
return
true
;
}
}
static
inline
void
kvm_clean_pgd
(
pgd_t
*
pgd
)
{
clean_dcache_area
(
pgd
,
PTRS_PER_S2_PGD
*
sizeof
(
pgd_t
));
}
static
inline
void
kvm_clean_pmd_entry
(
pmd_t
*
pmd
)
{
clean_pmd_entry
(
pmd
);
}
static
inline
void
kvm_clean_pte
(
pte_t
*
pte
)
{
clean_pte_table
(
pte
);
}
static
inline
void
kvm_set_s2pte_writable
(
pte_t
*
pte
)
{
pte_val
(
*
pte
)
|=
L_PTE_S2_RDWR
;
}
struct
kvm
;
static
inline
void
coherent_icache_guest_page
(
struct
kvm
*
kvm
,
gfn_t
gfn
)
{
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
* (or another VM) may have used the same page as this guest, and we
* read incorrect data from the icache. If we're using a PIPT cache,
* we can invalidate just that page, but if we are using a VIPT cache
* we need to invalidate the entire icache - damn shame - as written
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
if
(
icache_is_pipt
())
{
unsigned
long
hva
=
gfn_to_hva
(
kvm
,
gfn
);
__cpuc_coherent_user_range
(
hva
,
hva
+
PAGE_SIZE
);
}
else
if
(
!
icache_is_vivt_asid_tagged
())
{
/* any kind of VIPT cache */
__flush_icache_all
();
}
}
#endif
/* __ARM_KVM_MMU_H__ */
#endif
/* __ARM_KVM_MMU_H__ */
arch/arm/include/asm/kvm_vgic.h
浏览文件 @
2dfee7b2
...
@@ -21,7 +21,6 @@
...
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/kvm.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/irqreturn.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/types.h>
...
...
arch/arm/include/uapi/asm/kvm.h
浏览文件 @
2dfee7b2
...
@@ -53,12 +53,12 @@
...
@@ -53,12 +53,12 @@
#define KVM_ARM_FIQ_spsr fiq_regs[7]
#define KVM_ARM_FIQ_spsr fiq_regs[7]
struct
kvm_regs
{
struct
kvm_regs
{
struct
pt_regs
usr_regs
;
/* R0_usr - R14_usr, PC, CPSR */
struct
pt_regs
usr_regs
;
/* R0_usr - R14_usr, PC, CPSR */
__u32
svc_regs
[
3
];
/* SP_svc, LR_svc, SPSR_svc */
unsigned
long
svc_regs
[
3
];
/* SP_svc, LR_svc, SPSR_svc */
__u32
abt_regs
[
3
];
/* SP_abt, LR_abt, SPSR_abt */
unsigned
long
abt_regs
[
3
];
/* SP_abt, LR_abt, SPSR_abt */
__u32
und_regs
[
3
];
/* SP_und, LR_und, SPSR_und */
unsigned
long
und_regs
[
3
];
/* SP_und, LR_und, SPSR_und */
__u32
irq_regs
[
3
];
/* SP_irq, LR_irq, SPSR_irq */
unsigned
long
irq_regs
[
3
];
/* SP_irq, LR_irq, SPSR_irq */
__u32
fiq_regs
[
8
];
/* R8_fiq - R14_fiq, SPSR_fiq */
unsigned
long
fiq_regs
[
8
];
/* R8_fiq - R14_fiq, SPSR_fiq */
};
};
/* Supported Processor Types */
/* Supported Processor Types */
...
...
arch/arm/kernel/asm-offsets.c
浏览文件 @
2dfee7b2
...
@@ -165,10 +165,10 @@ int main(void)
...
@@ -165,10 +165,10 @@ int main(void)
DEFINE
(
VCPU_PC
,
offsetof
(
struct
kvm_vcpu
,
arch
.
regs
.
usr_regs
.
ARM_pc
));
DEFINE
(
VCPU_PC
,
offsetof
(
struct
kvm_vcpu
,
arch
.
regs
.
usr_regs
.
ARM_pc
));
DEFINE
(
VCPU_CPSR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
regs
.
usr_regs
.
ARM_cpsr
));
DEFINE
(
VCPU_CPSR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
regs
.
usr_regs
.
ARM_cpsr
));
DEFINE
(
VCPU_IRQ_LINES
,
offsetof
(
struct
kvm_vcpu
,
arch
.
irq_lines
));
DEFINE
(
VCPU_IRQ_LINES
,
offsetof
(
struct
kvm_vcpu
,
arch
.
irq_lines
));
DEFINE
(
VCPU_HSR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hsr
));
DEFINE
(
VCPU_HSR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
hsr
));
DEFINE
(
VCPU_HxFAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hxfar
));
DEFINE
(
VCPU_HxFAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
hxfar
));
DEFINE
(
VCPU_HPFAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hpfar
));
DEFINE
(
VCPU_HPFAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
hpfar
));
DEFINE
(
VCPU_HYP_PC
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hyp_pc
));
DEFINE
(
VCPU_HYP_PC
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
hyp_pc
));
#ifdef CONFIG_KVM_ARM_VGIC
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE
(
VCPU_VGIC_CPU
,
offsetof
(
struct
kvm_vcpu
,
arch
.
vgic_cpu
));
DEFINE
(
VCPU_VGIC_CPU
,
offsetof
(
struct
kvm_vcpu
,
arch
.
vgic_cpu
));
DEFINE
(
VGIC_CPU_HCR
,
offsetof
(
struct
vgic_cpu
,
vgic_hcr
));
DEFINE
(
VGIC_CPU_HCR
,
offsetof
(
struct
vgic_cpu
,
vgic_hcr
));
...
...
arch/arm/kvm/Makefile
浏览文件 @
2dfee7b2
...
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
...
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
kvm-arm-y
=
$(
addprefix
../../../virt/kvm/, kvm_main.o coalesced_mmio.o
)
kvm-arm-y
=
$(
addprefix
../../../virt/kvm/, kvm_main.o coalesced_mmio.o
)
obj-y
+=
kvm-arm.o init.o interrupts.o
obj-y
+=
kvm-arm.o init.o interrupts.o
obj-y
+=
arm.o guest.o mmu.o emulate.o reset.o
obj-y
+=
arm.o
handle_exit.o
guest.o mmu.o emulate.o reset.o
obj-y
+=
coproc.o coproc_a15.o mmio.o psci.o
obj-y
+=
coproc.o coproc_a15.o mmio.o psci.o
obj-$(CONFIG_KVM_ARM_VGIC)
+=
vgic.o
obj-$(CONFIG_KVM_ARM_VGIC)
+=
vgic.o
obj-$(CONFIG_KVM_ARM_TIMER)
+=
arch_timer.o
obj-$(CONFIG_KVM_ARM_TIMER)
+=
arch_timer.o
arch/arm/kvm/arm.c
浏览文件 @
2dfee7b2
...
@@ -30,11 +30,9 @@
...
@@ -30,11 +30,9 @@
#define CREATE_TRACE_POINTS
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "trace.h"
#include <asm/unified.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/mman.h>
#include <asm/mman.h>
#include <asm/cputype.h>
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/virt.h>
#include <asm/virt.h>
...
@@ -44,14 +42,13 @@
...
@@ -44,14 +42,13 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_psci.h>
#include <asm/kvm_psci.h>
#include <asm/opcodes.h>
#ifdef REQUIRES_VIRT
#ifdef REQUIRES_VIRT
__asm__
(
".arch_extension virt"
);
__asm__
(
".arch_extension virt"
);
#endif
#endif
static
DEFINE_PER_CPU
(
unsigned
long
,
kvm_arm_hyp_stack_page
);
static
DEFINE_PER_CPU
(
unsigned
long
,
kvm_arm_hyp_stack_page
);
static
struct
vfp_hard_struc
t
__percpu
*
kvm_host_vfp_state
;
static
kvm_kernel_vfp_
t
__percpu
*
kvm_host_vfp_state
;
static
unsigned
long
hyp_default_vectors
;
static
unsigned
long
hyp_default_vectors
;
/* Per-CPU variable containing the currently running vcpu. */
/* Per-CPU variable containing the currently running vcpu. */
...
@@ -294,22 +291,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
...
@@ -294,22 +291,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return
0
;
return
0
;
}
}
int
__attribute_const__
kvm_target_cpu
(
void
)
{
unsigned
long
implementor
=
read_cpuid_implementor
();
unsigned
long
part_number
=
read_cpuid_part_number
();
if
(
implementor
!=
ARM_CPU_IMP_ARM
)
return
-
EINVAL
;
switch
(
part_number
)
{
case
ARM_CPU_PART_CORTEX_A15
:
return
KVM_ARM_TARGET_CORTEX_A15
;
default:
return
-
EINVAL
;
}
}
int
kvm_arch_vcpu_init
(
struct
kvm_vcpu
*
vcpu
)
int
kvm_arch_vcpu_init
(
struct
kvm_vcpu
*
vcpu
)
{
{
int
ret
;
int
ret
;
...
@@ -472,163 +453,6 @@ static void update_vttbr(struct kvm *kvm)
...
@@ -472,163 +453,6 @@ static void update_vttbr(struct kvm *kvm)
spin_unlock
(
&
kvm_vmid_lock
);
spin_unlock
(
&
kvm_vmid_lock
);
}
}
static
int
handle_svc_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* SVC called from Hyp mode should never get here */
kvm_debug
(
"SVC called from Hyp mode shouldn't go here
\n
"
);
BUG
();
return
-
EINVAL
;
/* Squash warning */
}
static
int
handle_hvc
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
trace_kvm_hvc
(
*
vcpu_pc
(
vcpu
),
*
vcpu_reg
(
vcpu
,
0
),
vcpu
->
arch
.
hsr
&
HSR_HVC_IMM_MASK
);
if
(
kvm_psci_call
(
vcpu
))
return
1
;
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
int
handle_smc
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
if
(
kvm_psci_call
(
vcpu
))
return
1
;
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
int
handle_pabt_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* The hypervisor should never cause aborts */
kvm_err
(
"Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)
\n
"
,
vcpu
->
arch
.
hxfar
,
vcpu
->
arch
.
hsr
);
return
-
EFAULT
;
}
static
int
handle_dabt_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* This is either an error in the ws. code or an external abort */
kvm_err
(
"Data Abort taken from Hyp mode at %#08x (HSR: %#08x)
\n
"
,
vcpu
->
arch
.
hxfar
,
vcpu
->
arch
.
hsr
);
return
-
EFAULT
;
}
typedef
int
(
*
exit_handle_fn
)(
struct
kvm_vcpu
*
,
struct
kvm_run
*
);
static
exit_handle_fn
arm_exit_handlers
[]
=
{
[
HSR_EC_WFI
]
=
kvm_handle_wfi
,
[
HSR_EC_CP15_32
]
=
kvm_handle_cp15_32
,
[
HSR_EC_CP15_64
]
=
kvm_handle_cp15_64
,
[
HSR_EC_CP14_MR
]
=
kvm_handle_cp14_access
,
[
HSR_EC_CP14_LS
]
=
kvm_handle_cp14_load_store
,
[
HSR_EC_CP14_64
]
=
kvm_handle_cp14_access
,
[
HSR_EC_CP_0_13
]
=
kvm_handle_cp_0_13_access
,
[
HSR_EC_CP10_ID
]
=
kvm_handle_cp10_id
,
[
HSR_EC_SVC_HYP
]
=
handle_svc_hyp
,
[
HSR_EC_HVC
]
=
handle_hvc
,
[
HSR_EC_SMC
]
=
handle_smc
,
[
HSR_EC_IABT
]
=
kvm_handle_guest_abort
,
[
HSR_EC_IABT_HYP
]
=
handle_pabt_hyp
,
[
HSR_EC_DABT
]
=
kvm_handle_guest_abort
,
[
HSR_EC_DABT_HYP
]
=
handle_dabt_hyp
,
};
/*
* A conditional instruction is allowed to trap, even though it
* wouldn't be executed. So let's re-implement the hardware, in
* software!
*/
static
bool
kvm_condition_valid
(
struct
kvm_vcpu
*
vcpu
)
{
unsigned
long
cpsr
,
cond
,
insn
;
/*
* Exception Code 0 can only happen if we set HCR.TGE to 1, to
* catch undefined instructions, and then we won't get past
* the arm_exit_handlers test anyway.
*/
BUG_ON
(((
vcpu
->
arch
.
hsr
&
HSR_EC
)
>>
HSR_EC_SHIFT
)
==
0
);
/* Top two bits non-zero? Unconditional. */
if
(
vcpu
->
arch
.
hsr
>>
30
)
return
true
;
cpsr
=
*
vcpu_cpsr
(
vcpu
);
/* Is condition field valid? */
if
((
vcpu
->
arch
.
hsr
&
HSR_CV
)
>>
HSR_CV_SHIFT
)
cond
=
(
vcpu
->
arch
.
hsr
&
HSR_COND
)
>>
HSR_COND_SHIFT
;
else
{
/* This can happen in Thumb mode: examine IT state. */
unsigned
long
it
;
it
=
((
cpsr
>>
8
)
&
0xFC
)
|
((
cpsr
>>
25
)
&
0x3
);
/* it == 0 => unconditional. */
if
(
it
==
0
)
return
true
;
/* The cond for this insn works out as the top 4 bits. */
cond
=
(
it
>>
4
);
}
/* Shift makes it look like an ARM-mode instruction */
insn
=
cond
<<
28
;
return
arm_check_condition
(
insn
,
cpsr
)
!=
ARM_OPCODE_CONDTEST_FAIL
;
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to QEMU.
*/
static
int
handle_exit
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
,
int
exception_index
)
{
unsigned
long
hsr_ec
;
switch
(
exception_index
)
{
case
ARM_EXCEPTION_IRQ
:
return
1
;
case
ARM_EXCEPTION_UNDEFINED
:
kvm_err
(
"Undefined exception in Hyp mode at: %#08x
\n
"
,
vcpu
->
arch
.
hyp_pc
);
BUG
();
panic
(
"KVM: Hypervisor undefined exception!
\n
"
);
case
ARM_EXCEPTION_DATA_ABORT
:
case
ARM_EXCEPTION_PREF_ABORT
:
case
ARM_EXCEPTION_HVC
:
hsr_ec
=
(
vcpu
->
arch
.
hsr
&
HSR_EC
)
>>
HSR_EC_SHIFT
;
if
(
hsr_ec
>=
ARRAY_SIZE
(
arm_exit_handlers
)
||
!
arm_exit_handlers
[
hsr_ec
])
{
kvm_err
(
"Unkown exception class: %#08lx, "
"hsr: %#08x
\n
"
,
hsr_ec
,
(
unsigned
int
)
vcpu
->
arch
.
hsr
);
BUG
();
}
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if
(
!
kvm_condition_valid
(
vcpu
))
{
bool
is_wide
=
vcpu
->
arch
.
hsr
&
HSR_IL
;
kvm_skip_instr
(
vcpu
,
is_wide
);
return
1
;
}
return
arm_exit_handlers
[
hsr_ec
](
vcpu
,
run
);
default:
kvm_pr_unimpl
(
"Unsupported exception type: %d"
,
exception_index
);
run
->
exit_reason
=
KVM_EXIT_INTERNAL_ERROR
;
return
0
;
}
}
static
int
kvm_vcpu_first_run_init
(
struct
kvm_vcpu
*
vcpu
)
static
int
kvm_vcpu_first_run_init
(
struct
kvm_vcpu
*
vcpu
)
{
{
if
(
likely
(
vcpu
->
arch
.
has_run_once
))
if
(
likely
(
vcpu
->
arch
.
has_run_once
))
...
@@ -964,7 +788,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
...
@@ -964,7 +788,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
static
void
cpu_init_hyp_mode
(
void
*
vector
)
static
void
cpu_init_hyp_mode
(
void
*
vector
)
{
{
unsigned
long
long
pgd_ptr
;
unsigned
long
long
pgd_ptr
;
unsigned
long
pgd_low
,
pgd_high
;
unsigned
long
hyp_stack_ptr
;
unsigned
long
hyp_stack_ptr
;
unsigned
long
stack_page
;
unsigned
long
stack_page
;
unsigned
long
vector_ptr
;
unsigned
long
vector_ptr
;
...
@@ -973,20 +796,11 @@ static void cpu_init_hyp_mode(void *vector)
...
@@ -973,20 +796,11 @@ static void cpu_init_hyp_mode(void *vector)
__hyp_set_vectors
((
unsigned
long
)
vector
);
__hyp_set_vectors
((
unsigned
long
)
vector
);
pgd_ptr
=
(
unsigned
long
long
)
kvm_mmu_get_httbr
();
pgd_ptr
=
(
unsigned
long
long
)
kvm_mmu_get_httbr
();
pgd_low
=
(
pgd_ptr
&
((
1ULL
<<
32
)
-
1
));
pgd_high
=
(
pgd_ptr
>>
32ULL
);
stack_page
=
__get_cpu_var
(
kvm_arm_hyp_stack_page
);
stack_page
=
__get_cpu_var
(
kvm_arm_hyp_stack_page
);
hyp_stack_ptr
=
stack_page
+
PAGE_SIZE
;
hyp_stack_ptr
=
stack_page
+
PAGE_SIZE
;
vector_ptr
=
(
unsigned
long
)
__kvm_hyp_vector
;
vector_ptr
=
(
unsigned
long
)
__kvm_hyp_vector
;
/*
__cpu_init_hyp_mode
(
pgd_ptr
,
hyp_stack_ptr
,
vector_ptr
);
* Call initialization code, and switch to the full blown
* HYP code. The init code doesn't need to preserve these registers as
* r1-r3 and r12 are already callee save according to the AAPCS.
* Note that we slightly misuse the prototype by casing the pgd_low to
* a void *.
*/
kvm_call_hyp
((
void
*
)
pgd_low
,
pgd_high
,
hyp_stack_ptr
,
vector_ptr
);
}
}
/**
/**
...
@@ -1069,7 +883,7 @@ static int init_hyp_mode(void)
...
@@ -1069,7 +883,7 @@ static int init_hyp_mode(void)
/*
/*
* Map the host VFP structures
* Map the host VFP structures
*/
*/
kvm_host_vfp_state
=
alloc_percpu
(
struct
vfp_hard_struc
t
);
kvm_host_vfp_state
=
alloc_percpu
(
kvm_kernel_vfp_
t
);
if
(
!
kvm_host_vfp_state
)
{
if
(
!
kvm_host_vfp_state
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
kvm_err
(
"Cannot allocate host VFP state
\n
"
);
kvm_err
(
"Cannot allocate host VFP state
\n
"
);
...
@@ -1077,7 +891,7 @@ static int init_hyp_mode(void)
...
@@ -1077,7 +891,7 @@ static int init_hyp_mode(void)
}
}
for_each_possible_cpu
(
cpu
)
{
for_each_possible_cpu
(
cpu
)
{
struct
vfp_hard_struc
t
*
vfp
;
kvm_kernel_vfp_
t
*
vfp
;
vfp
=
per_cpu_ptr
(
kvm_host_vfp_state
,
cpu
);
vfp
=
per_cpu_ptr
(
kvm_host_vfp_state
,
cpu
);
err
=
create_hyp_mappings
(
vfp
,
vfp
+
1
);
err
=
create_hyp_mappings
(
vfp
,
vfp
+
1
);
...
...
arch/arm/kvm/coproc.c
浏览文件 @
2dfee7b2
...
@@ -76,7 +76,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
...
@@ -76,7 +76,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
const
struct
coproc_params
*
p
,
const
struct
coproc_params
*
p
,
const
struct
coproc_reg
*
r
)
const
struct
coproc_reg
*
r
)
{
{
u
32
val
;
u
nsigned
long
val
;
int
cpu
;
int
cpu
;
cpu
=
get_cpu
();
cpu
=
get_cpu
();
...
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
...
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
if
(
likely
(
r
->
access
(
vcpu
,
params
,
r
)))
{
if
(
likely
(
r
->
access
(
vcpu
,
params
,
r
)))
{
/* Skip instruction, since it was emulated */
/* Skip instruction, since it was emulated */
kvm_skip_instr
(
vcpu
,
(
vcpu
->
arch
.
hsr
>>
25
)
&
1
);
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
)
);
return
1
;
return
1
;
}
}
/* If access function fails, it should complain. */
/* If access function fails, it should complain. */
}
else
{
}
else
{
kvm_err
(
"Unsupported guest CP15 access at: %08x
\n
"
,
kvm_err
(
"Unsupported guest CP15 access at: %08
l
x
\n
"
,
*
vcpu_pc
(
vcpu
));
*
vcpu_pc
(
vcpu
));
print_cp_instr
(
params
);
print_cp_instr
(
params
);
}
}
...
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
{
struct
coproc_params
params
;
struct
coproc_params
params
;
params
.
CRm
=
(
vcpu
->
arch
.
hsr
>>
1
)
&
0xf
;
params
.
CRm
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
1
)
&
0xf
;
params
.
Rt1
=
(
vcpu
->
arch
.
hsr
>>
5
)
&
0xf
;
params
.
Rt1
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
5
)
&
0xf
;
params
.
is_write
=
((
vcpu
->
arch
.
hsr
&
1
)
==
0
);
params
.
is_write
=
((
kvm_vcpu_get_hsr
(
vcpu
)
&
1
)
==
0
);
params
.
is_64bit
=
true
;
params
.
is_64bit
=
true
;
params
.
Op1
=
(
vcpu
->
arch
.
hsr
>>
16
)
&
0xf
;
params
.
Op1
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
16
)
&
0xf
;
params
.
Op2
=
0
;
params
.
Op2
=
0
;
params
.
Rt2
=
(
vcpu
->
arch
.
hsr
>>
10
)
&
0xf
;
params
.
Rt2
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
10
)
&
0xf
;
params
.
CRn
=
0
;
params
.
CRn
=
0
;
return
emulate_cp15
(
vcpu
,
&
params
);
return
emulate_cp15
(
vcpu
,
&
params
);
...
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
{
struct
coproc_params
params
;
struct
coproc_params
params
;
params
.
CRm
=
(
vcpu
->
arch
.
hsr
>>
1
)
&
0xf
;
params
.
CRm
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
1
)
&
0xf
;
params
.
Rt1
=
(
vcpu
->
arch
.
hsr
>>
5
)
&
0xf
;
params
.
Rt1
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
5
)
&
0xf
;
params
.
is_write
=
((
vcpu
->
arch
.
hsr
&
1
)
==
0
);
params
.
is_write
=
((
kvm_vcpu_get_hsr
(
vcpu
)
&
1
)
==
0
);
params
.
is_64bit
=
false
;
params
.
is_64bit
=
false
;
params
.
CRn
=
(
vcpu
->
arch
.
hsr
>>
10
)
&
0xf
;
params
.
CRn
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
10
)
&
0xf
;
params
.
Op1
=
(
vcpu
->
arch
.
hsr
>>
14
)
&
0x7
;
params
.
Op1
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
14
)
&
0x7
;
params
.
Op2
=
(
vcpu
->
arch
.
hsr
>>
17
)
&
0x7
;
params
.
Op2
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
17
)
&
0x7
;
params
.
Rt2
=
0
;
params
.
Rt2
=
0
;
return
emulate_cp15
(
vcpu
,
&
params
);
return
emulate_cp15
(
vcpu
,
&
params
);
...
...
arch/arm/kvm/coproc.h
浏览文件 @
2dfee7b2
...
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
...
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
static
inline
bool
write_to_read_only
(
struct
kvm_vcpu
*
vcpu
,
static
inline
bool
write_to_read_only
(
struct
kvm_vcpu
*
vcpu
,
const
struct
coproc_params
*
params
)
const
struct
coproc_params
*
params
)
{
{
kvm_debug
(
"CP15 write to read-only register at: %08x
\n
"
,
kvm_debug
(
"CP15 write to read-only register at: %08
l
x
\n
"
,
*
vcpu_pc
(
vcpu
));
*
vcpu_pc
(
vcpu
));
print_cp_instr
(
params
);
print_cp_instr
(
params
);
return
false
;
return
false
;
...
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
...
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
static
inline
bool
read_from_write_only
(
struct
kvm_vcpu
*
vcpu
,
static
inline
bool
read_from_write_only
(
struct
kvm_vcpu
*
vcpu
,
const
struct
coproc_params
*
params
)
const
struct
coproc_params
*
params
)
{
{
kvm_debug
(
"CP15 read to write-only register at: %08x
\n
"
,
kvm_debug
(
"CP15 read to write-only register at: %08
l
x
\n
"
,
*
vcpu_pc
(
vcpu
));
*
vcpu_pc
(
vcpu
));
print_cp_instr
(
params
);
print_cp_instr
(
params
);
return
false
;
return
false
;
...
...
arch/arm/kvm/emulate.c
浏览文件 @
2dfee7b2
...
@@ -20,6 +20,7 @@
...
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_emulate.h>
#include <asm/opcodes.h>
#include <trace/events/kvm.h>
#include <trace/events/kvm.h>
#include "trace.h"
#include "trace.h"
...
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
...
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
* Return a pointer to the register number valid in the current mode of
* Return a pointer to the register number valid in the current mode of
* the virtual CPU.
* the virtual CPU.
*/
*/
u
32
*
vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
)
u
nsigned
long
*
vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
)
{
{
u
32
*
reg_array
=
(
u32
*
)
&
vcpu
->
arch
.
regs
;
u
nsigned
long
*
reg_array
=
(
unsigned
long
*
)
&
vcpu
->
arch
.
regs
;
u
32
mode
=
*
vcpu_cpsr
(
vcpu
)
&
MODE_MASK
;
u
nsigned
long
mode
=
*
vcpu_cpsr
(
vcpu
)
&
MODE_MASK
;
switch
(
mode
)
{
switch
(
mode
)
{
case
USR_MODE
...
SVC_MODE
:
case
USR_MODE
...
SVC_MODE
:
...
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
...
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
/*
/*
* Return the SPSR for the current mode of the virtual CPU.
* Return the SPSR for the current mode of the virtual CPU.
*/
*/
u
32
*
vcpu_spsr
(
struct
kvm_vcpu
*
vcpu
)
u
nsigned
long
*
vcpu_spsr
(
struct
kvm_vcpu
*
vcpu
)
{
{
u
32
mode
=
*
vcpu_cpsr
(
vcpu
)
&
MODE_MASK
;
u
nsigned
long
mode
=
*
vcpu_cpsr
(
vcpu
)
&
MODE_MASK
;
switch
(
mode
)
{
switch
(
mode
)
{
case
SVC_MODE
:
case
SVC_MODE
:
return
&
vcpu
->
arch
.
regs
.
KVM_ARM_SVC_spsr
;
return
&
vcpu
->
arch
.
regs
.
KVM_ARM_SVC_spsr
;
...
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
...
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
}
}
}
}
/**
/*
* kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
* A conditional instruction is allowed to trap, even though it
* @vcpu: the vcpu pointer
* wouldn't be executed. So let's re-implement the hardware, in
* @run: the kvm_run structure pointer
* software!
*
* Simply sets the wait_for_interrupts flag on the vcpu structure, which will
* halt execution of world-switches and schedule other host processes until
* there is an incoming IRQ or FIQ to the VM.
*/
*/
int
kvm_handle_wfi
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
bool
kvm_condition_valid
(
struct
kvm_vcpu
*
vcpu
)
{
{
trace_kvm_wfi
(
*
vcpu_pc
(
vcpu
));
unsigned
long
cpsr
,
cond
,
insn
;
kvm_vcpu_block
(
vcpu
);
return
1
;
/*
* Exception Code 0 can only happen if we set HCR.TGE to 1, to
* catch undefined instructions, and then we won't get past
* the arm_exit_handlers test anyway.
*/
BUG_ON
(
!
kvm_vcpu_trap_get_class
(
vcpu
));
/* Top two bits non-zero? Unconditional. */
if
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
30
)
return
true
;
cpsr
=
*
vcpu_cpsr
(
vcpu
);
/* Is condition field valid? */
if
((
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_CV
)
>>
HSR_CV_SHIFT
)
cond
=
(
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_COND
)
>>
HSR_COND_SHIFT
;
else
{
/* This can happen in Thumb mode: examine IT state. */
unsigned
long
it
;
it
=
((
cpsr
>>
8
)
&
0xFC
)
|
((
cpsr
>>
25
)
&
0x3
);
/* it == 0 => unconditional. */
if
(
it
==
0
)
return
true
;
/* The cond for this insn works out as the top 4 bits. */
cond
=
(
it
>>
4
);
}
/* Shift makes it look like an ARM-mode instruction */
insn
=
cond
<<
28
;
return
arm_check_condition
(
insn
,
cpsr
)
!=
ARM_OPCODE_CONDTEST_FAIL
;
}
}
/**
/**
...
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
...
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
*/
*/
void
kvm_inject_undefined
(
struct
kvm_vcpu
*
vcpu
)
void
kvm_inject_undefined
(
struct
kvm_vcpu
*
vcpu
)
{
{
u
32
new_lr_value
;
u
nsigned
long
new_lr_value
;
u
32
new_spsr_value
;
u
nsigned
long
new_spsr_value
;
u
32
cpsr
=
*
vcpu_cpsr
(
vcpu
);
u
nsigned
long
cpsr
=
*
vcpu_cpsr
(
vcpu
);
u32
sctlr
=
vcpu
->
arch
.
cp15
[
c1_SCTLR
];
u32
sctlr
=
vcpu
->
arch
.
cp15
[
c1_SCTLR
];
bool
is_thumb
=
(
cpsr
&
PSR_T_BIT
);
bool
is_thumb
=
(
cpsr
&
PSR_T_BIT
);
u32
vect_offset
=
4
;
u32
vect_offset
=
4
;
...
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
...
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/
*/
static
void
inject_abt
(
struct
kvm_vcpu
*
vcpu
,
bool
is_pabt
,
unsigned
long
addr
)
static
void
inject_abt
(
struct
kvm_vcpu
*
vcpu
,
bool
is_pabt
,
unsigned
long
addr
)
{
{
u
32
new_lr_value
;
u
nsigned
long
new_lr_value
;
u
32
new_spsr_value
;
u
nsigned
long
new_spsr_value
;
u
32
cpsr
=
*
vcpu_cpsr
(
vcpu
);
u
nsigned
long
cpsr
=
*
vcpu_cpsr
(
vcpu
);
u32
sctlr
=
vcpu
->
arch
.
cp15
[
c1_SCTLR
];
u32
sctlr
=
vcpu
->
arch
.
cp15
[
c1_SCTLR
];
bool
is_thumb
=
(
cpsr
&
PSR_T_BIT
);
bool
is_thumb
=
(
cpsr
&
PSR_T_BIT
);
u32
vect_offset
;
u32
vect_offset
;
...
...
arch/arm/kvm/guest.c
浏览文件 @
2dfee7b2
...
@@ -22,6 +22,7 @@
...
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_asm.h>
...
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
...
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return
-
EINVAL
;
return
-
EINVAL
;
}
}
int
__attribute_const__
kvm_target_cpu
(
void
)
{
unsigned
long
implementor
=
read_cpuid_implementor
();
unsigned
long
part_number
=
read_cpuid_part_number
();
if
(
implementor
!=
ARM_CPU_IMP_ARM
)
return
-
EINVAL
;
switch
(
part_number
)
{
case
ARM_CPU_PART_CORTEX_A15
:
return
KVM_ARM_TARGET_CORTEX_A15
;
default:
return
-
EINVAL
;
}
}
int
kvm_vcpu_set_target
(
struct
kvm_vcpu
*
vcpu
,
int
kvm_vcpu_set_target
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_vcpu_init
*
init
)
const
struct
kvm_vcpu_init
*
init
)
{
{
...
...
arch/arm/kvm/handle_exit.c
0 → 100644
浏览文件 @
2dfee7b2
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
#include <trace/events/kvm.h>
#include "trace.h"
#include "trace.h"
typedef
int
(
*
exit_handle_fn
)(
struct
kvm_vcpu
*
,
struct
kvm_run
*
);
static
int
handle_svc_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* SVC called from Hyp mode should never get here */
kvm_debug
(
"SVC called from Hyp mode shouldn't go here
\n
"
);
BUG
();
return
-
EINVAL
;
/* Squash warning */
}
static
int
handle_hvc
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
trace_kvm_hvc
(
*
vcpu_pc
(
vcpu
),
*
vcpu_reg
(
vcpu
,
0
),
kvm_vcpu_hvc_get_imm
(
vcpu
));
if
(
kvm_psci_call
(
vcpu
))
return
1
;
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
int
handle_smc
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
if
(
kvm_psci_call
(
vcpu
))
return
1
;
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
int
handle_pabt_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* The hypervisor should never cause aborts */
kvm_err
(
"Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)
\n
"
,
kvm_vcpu_get_hfar
(
vcpu
),
kvm_vcpu_get_hsr
(
vcpu
));
return
-
EFAULT
;
}
static
int
handle_dabt_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* This is either an error in the ws. code or an external abort */
kvm_err
(
"Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)
\n
"
,
kvm_vcpu_get_hfar
(
vcpu
),
kvm_vcpu_get_hsr
(
vcpu
));
return
-
EFAULT
;
}
/**
* kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
* @vcpu: the vcpu pointer
* @run: the kvm_run structure pointer
*
* Simply sets the wait_for_interrupts flag on the vcpu structure, which will
* halt execution of world-switches and schedule other host processes until
* there is an incoming IRQ or FIQ to the VM.
*/
static
int
kvm_handle_wfi
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
trace_kvm_wfi
(
*
vcpu_pc
(
vcpu
));
kvm_vcpu_block
(
vcpu
);
return
1
;
}
static
exit_handle_fn
arm_exit_handlers
[]
=
{
[
HSR_EC_WFI
]
=
kvm_handle_wfi
,
[
HSR_EC_CP15_32
]
=
kvm_handle_cp15_32
,
[
HSR_EC_CP15_64
]
=
kvm_handle_cp15_64
,
[
HSR_EC_CP14_MR
]
=
kvm_handle_cp14_access
,
[
HSR_EC_CP14_LS
]
=
kvm_handle_cp14_load_store
,
[
HSR_EC_CP14_64
]
=
kvm_handle_cp14_access
,
[
HSR_EC_CP_0_13
]
=
kvm_handle_cp_0_13_access
,
[
HSR_EC_CP10_ID
]
=
kvm_handle_cp10_id
,
[
HSR_EC_SVC_HYP
]
=
handle_svc_hyp
,
[
HSR_EC_HVC
]
=
handle_hvc
,
[
HSR_EC_SMC
]
=
handle_smc
,
[
HSR_EC_IABT
]
=
kvm_handle_guest_abort
,
[
HSR_EC_IABT_HYP
]
=
handle_pabt_hyp
,
[
HSR_EC_DABT
]
=
kvm_handle_guest_abort
,
[
HSR_EC_DABT_HYP
]
=
handle_dabt_hyp
,
};
static
exit_handle_fn
kvm_get_exit_handler
(
struct
kvm_vcpu
*
vcpu
)
{
u8
hsr_ec
=
kvm_vcpu_trap_get_class
(
vcpu
);
if
(
hsr_ec
>=
ARRAY_SIZE
(
arm_exit_handlers
)
||
!
arm_exit_handlers
[
hsr_ec
])
{
kvm_err
(
"Unkown exception class: hsr: %#08x
\n
"
,
(
unsigned
int
)
kvm_vcpu_get_hsr
(
vcpu
));
BUG
();
}
return
arm_exit_handlers
[
hsr_ec
];
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
*/
int
handle_exit
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
,
int
exception_index
)
{
exit_handle_fn
exit_handler
;
switch
(
exception_index
)
{
case
ARM_EXCEPTION_IRQ
:
return
1
;
case
ARM_EXCEPTION_UNDEFINED
:
kvm_err
(
"Undefined exception in Hyp mode at: %#08lx
\n
"
,
kvm_vcpu_get_hyp_pc
(
vcpu
));
BUG
();
panic
(
"KVM: Hypervisor undefined exception!
\n
"
);
case
ARM_EXCEPTION_DATA_ABORT
:
case
ARM_EXCEPTION_PREF_ABORT
:
case
ARM_EXCEPTION_HVC
:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if
(
!
kvm_condition_valid
(
vcpu
))
{
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
));
return
1
;
}
exit_handler
=
kvm_get_exit_handler
(
vcpu
);
return
exit_handler
(
vcpu
,
run
);
default:
kvm_pr_unimpl
(
"Unsupported exception type: %d"
,
exception_index
);
run
->
exit_reason
=
KVM_EXIT_INTERNAL_ERROR
;
return
0
;
}
}
arch/arm/kvm/interrupts.S
浏览文件 @
2dfee7b2
...
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
...
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
/********************************************************************
/********************************************************************
*
Flush
per
-
VMID
TLBs
*
Flush
per
-
VMID
TLBs
*
*
*
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
)
;
*
void
__kvm_tlb_flush_vmid
_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
)
;
*
*
*
We
rely
on
the
hardware
to
broadcast
the
TLB
invalidation
to
all
CPUs
*
We
rely
on
the
hardware
to
broadcast
the
TLB
invalidation
to
all
CPUs
*
inside
the
inner
-
shareable
domain
(
which
is
the
case
for
all
v7
*
inside
the
inner
-
shareable
domain
(
which
is
the
case
for
all
v7
*
implementations
)
.
If
we
come
across
a
non
-
IS
SMP
implementation
,
we
'll
*
implementations
)
.
If
we
come
across
a
non
-
IS
SMP
implementation
,
we
'll
*
have
to
use
an
IPI
based
mechanism
.
Until
then
,
we
stick
to
the
simple
*
have
to
use
an
IPI
based
mechanism
.
Until
then
,
we
stick
to
the
simple
*
hardware
assisted
version
.
*
hardware
assisted
version
.
*
*
As
v7
does
not
support
flushing
per
IPA
,
just
nuke
the
whole
TLB
*
instead
,
ignoring
the
ipa
value
.
*/
*/
ENTRY
(
__kvm_tlb_flush_vmid
)
ENTRY
(
__kvm_tlb_flush_vmid
_ipa
)
push
{
r2
,
r3
}
push
{
r2
,
r3
}
add
r0
,
r0
,
#
KVM_VTTBR
add
r0
,
r0
,
#
KVM_VTTBR
...
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
...
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
pop
{
r2
,
r3
}
pop
{
r2
,
r3
}
bx
lr
bx
lr
ENDPROC
(
__kvm_tlb_flush_vmid
)
ENDPROC
(
__kvm_tlb_flush_vmid
_ipa
)
/********************************************************************
/********************************************************************
*
Flush
TLBs
and
instruction
caches
of
all
CPUs
inside
the
inner
-
shareable
*
Flush
TLBs
and
instruction
caches
of
all
CPUs
inside
the
inner
-
shareable
...
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)
...
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)
*
instruction
is
issued
since
all
traps
are
disabled
when
running
the
host
*
instruction
is
issued
since
all
traps
are
disabled
when
running
the
host
*
kernel
as
per
the
Hyp
-
mode
initialization
at
boot
time
.
*
kernel
as
per
the
Hyp
-
mode
initialization
at
boot
time
.
*
*
*
HVC
instructions
cause
a
trap
to
the
vector
page
+
offset
0x1
8
(
see
hyp_hvc
*
HVC
instructions
cause
a
trap
to
the
vector
page
+
offset
0x1
4
(
see
hyp_hvc
*
below
)
when
the
HVC
instruction
is
called
from
SVC
mode
(
i
.
e
.
a
guest
or
the
*
below
)
when
the
HVC
instruction
is
called
from
SVC
mode
(
i
.
e
.
a
guest
or
the
*
host
kernel
)
and
they
cause
a
trap
to
the
vector
page
+
offset
0x
c
when
HVC
*
host
kernel
)
and
they
cause
a
trap
to
the
vector
page
+
offset
0x
8
when
HVC
*
instructions
are
called
from
within
Hyp
-
mode
.
*
instructions
are
called
from
within
Hyp
-
mode
.
*
*
*
Hyp
-
ABI
:
Calling
HYP
-
mode
functions
from
host
(
in
SVC
mode
)
:
*
Hyp
-
ABI
:
Calling
HYP
-
mode
functions
from
host
(
in
SVC
mode
)
:
...
...
arch/arm/kvm/mmio.c
浏览文件 @
2dfee7b2
...
@@ -33,16 +33,16 @@
...
@@ -33,16 +33,16 @@
*/
*/
int
kvm_handle_mmio_return
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
int
kvm_handle_mmio_return
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
{
__u32
*
dest
;
unsigned
long
*
dest
;
unsigned
int
len
;
unsigned
int
len
;
int
mask
;
int
mask
;
if
(
!
run
->
mmio
.
is_write
)
{
if
(
!
run
->
mmio
.
is_write
)
{
dest
=
vcpu_reg
(
vcpu
,
vcpu
->
arch
.
mmio_decode
.
rt
);
dest
=
vcpu_reg
(
vcpu
,
vcpu
->
arch
.
mmio_decode
.
rt
);
memset
(
dest
,
0
,
sizeof
(
int
))
;
*
dest
=
0
;
len
=
run
->
mmio
.
len
;
len
=
run
->
mmio
.
len
;
if
(
len
>
4
)
if
(
len
>
sizeof
(
unsigned
long
)
)
return
-
EINVAL
;
return
-
EINVAL
;
memcpy
(
dest
,
run
->
mmio
.
data
,
len
);
memcpy
(
dest
,
run
->
mmio
.
data
,
len
);
...
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_mmio
(
KVM_TRACE_MMIO_READ
,
len
,
run
->
mmio
.
phys_addr
,
trace_kvm_mmio
(
KVM_TRACE_MMIO_READ
,
len
,
run
->
mmio
.
phys_addr
,
*
((
u64
*
)
run
->
mmio
.
data
));
*
((
u64
*
)
run
->
mmio
.
data
));
if
(
vcpu
->
arch
.
mmio_decode
.
sign_extend
&&
len
<
4
)
{
if
(
vcpu
->
arch
.
mmio_decode
.
sign_extend
&&
len
<
sizeof
(
unsigned
long
))
{
mask
=
1U
<<
((
len
*
8
)
-
1
);
mask
=
1U
<<
((
len
*
8
)
-
1
);
*
dest
=
(
*
dest
^
mask
)
-
mask
;
*
dest
=
(
*
dest
^
mask
)
-
mask
;
}
}
...
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
...
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned
long
rt
,
len
;
unsigned
long
rt
,
len
;
bool
is_write
,
sign_extend
;
bool
is_write
,
sign_extend
;
if
(
(
vcpu
->
arch
.
hsr
>>
8
)
&
1
)
{
if
(
kvm_vcpu_dabt_isextabt
(
vcpu
)
)
{
/* cache operation on I/O addr, tell guest unsupported */
/* cache operation on I/O addr, tell guest unsupported */
kvm_inject_dabt
(
vcpu
,
vcpu
->
arch
.
hxfar
);
kvm_inject_dabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
)
);
return
1
;
return
1
;
}
}
if
(
(
vcpu
->
arch
.
hsr
>>
7
)
&
1
)
{
if
(
kvm_vcpu_dabt_iss1tw
(
vcpu
)
)
{
/* page table accesses IO mem: tell guest to fix its TTBR */
/* page table accesses IO mem: tell guest to fix its TTBR */
kvm_inject_dabt
(
vcpu
,
vcpu
->
arch
.
hxfar
);
kvm_inject_dabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
)
);
return
1
;
return
1
;
}
}
switch
((
vcpu
->
arch
.
hsr
>>
22
)
&
0x3
)
{
len
=
kvm_vcpu_dabt_get_as
(
vcpu
);
case
0
:
if
(
unlikely
(
len
<
0
))
len
=
1
;
return
len
;
break
;
case
1
:
len
=
2
;
break
;
case
2
:
len
=
4
;
break
;
default:
kvm_err
(
"Hardware is weird: SAS 0b11 is reserved
\n
"
);
return
-
EFAULT
;
}
is_write
=
vcpu
->
arch
.
hsr
&
HSR_WNR
;
is_write
=
kvm_vcpu_dabt_iswrite
(
vcpu
)
;
sign_extend
=
vcpu
->
arch
.
hsr
&
HSR_SSE
;
sign_extend
=
kvm_vcpu_dabt_issext
(
vcpu
)
;
rt
=
(
vcpu
->
arch
.
hsr
&
HSR_SRT_MASK
)
>>
HSR_SRT_SHIFT
;
rt
=
kvm_vcpu_dabt_get_rd
(
vcpu
)
;
if
(
kvm_vcpu_reg_is_pc
(
vcpu
,
rt
))
{
if
(
kvm_vcpu_reg_is_pc
(
vcpu
,
rt
))
{
/* IO memory trying to read/write pc */
/* IO memory trying to read/write pc */
kvm_inject_pabt
(
vcpu
,
vcpu
->
arch
.
hxfar
);
kvm_inject_pabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
)
);
return
1
;
return
1
;
}
}
...
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
...
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* The MMIO instruction is emulated and should not be re-executed
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
* in the guest.
*/
*/
kvm_skip_instr
(
vcpu
,
(
vcpu
->
arch
.
hsr
>>
25
)
&
1
);
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
)
);
return
0
;
return
0
;
}
}
...
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
...
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
* space do its magic.
* space do its magic.
*/
*/
if
(
vcpu
->
arch
.
hsr
&
HSR_ISV
)
{
if
(
kvm_vcpu_dabt_isvalid
(
vcpu
)
)
{
ret
=
decode_hsr
(
vcpu
,
fault_ipa
,
&
mmio
);
ret
=
decode_hsr
(
vcpu
,
fault_ipa
,
&
mmio
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
...
...
arch/arm/kvm/mmu.c
浏览文件 @
2dfee7b2
...
@@ -20,7 +20,6 @@
...
@@ -20,7 +20,6 @@
#include <linux/kvm_host.h>
#include <linux/kvm_host.h>
#include <linux/io.h>
#include <linux/io.h>
#include <trace/events/kvm.h>
#include <trace/events/kvm.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_arm.h>
...
@@ -28,8 +27,6 @@
...
@@ -28,8 +27,6 @@
#include <asm/kvm_mmio.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_emulate.h>
#include <asm/mach/map.h>
#include <trace/events/kvm.h>
#include "trace.h"
#include "trace.h"
...
@@ -37,19 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
...
@@ -37,19 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
static
DEFINE_MUTEX
(
kvm_hyp_pgd_mutex
);
static
DEFINE_MUTEX
(
kvm_hyp_pgd_mutex
);
static
void
kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
)
static
void
kvm_tlb_flush_vmid
_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
)
{
{
kvm_call_hyp
(
__kvm_tlb_flush_vmid
,
kvm
);
kvm_call_hyp
(
__kvm_tlb_flush_vmid_ipa
,
kvm
,
ipa
);
}
static
void
kvm_set_pte
(
pte_t
*
pte
,
pte_t
new_pte
)
{
pte_val
(
*
pte
)
=
new_pte
;
/*
* flush_pmd_entry just takes a void pointer and cleans the necessary
* cache entries, so we can reuse the function for ptes.
*/
flush_pmd_entry
(
pte
);
}
}
static
int
mmu_topup_memory_cache
(
struct
kvm_mmu_memory_cache
*
cache
,
static
int
mmu_topup_memory_cache
(
struct
kvm_mmu_memory_cache
*
cache
,
...
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr)
...
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr)
}
}
}
}
static
void
free_hyp_pgd_entry
(
unsigned
long
addr
)
{
pgd_t
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
unsigned
long
hyp_addr
=
KERN_TO_HYP
(
addr
);
pgd
=
hyp_pgd
+
pgd_index
(
hyp_addr
);
pud
=
pud_offset
(
pgd
,
hyp_addr
);
if
(
pud_none
(
*
pud
))
return
;
BUG_ON
(
pud_bad
(
*
pud
));
pmd
=
pmd_offset
(
pud
,
hyp_addr
);
free_ptes
(
pmd
,
addr
);
pmd_free
(
NULL
,
pmd
);
pud_clear
(
pud
);
}
/**
/**
* free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
* free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
*
*
* Assumes this is a page table used strictly in Hyp-mode and therefore contains
* Assumes this is a page table used strictly in Hyp-mode and therefore contains
* only mappings in the kernel memory area, which is above PAGE_OFFSET.
* either mappings in the kernel memory area (above PAGE_OFFSET), or
* device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
*/
*/
void
free_hyp_pmds
(
void
)
void
free_hyp_pmds
(
void
)
{
{
pgd_t
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
unsigned
long
addr
;
unsigned
long
addr
;
mutex_lock
(
&
kvm_hyp_pgd_mutex
);
mutex_lock
(
&
kvm_hyp_pgd_mutex
);
for
(
addr
=
PAGE_OFFSET
;
addr
!=
0
;
addr
+=
PGDIR_SIZE
)
{
for
(
addr
=
PAGE_OFFSET
;
virt_addr_valid
(
addr
);
addr
+=
PGDIR_SIZE
)
pgd
=
hyp_pgd
+
pgd_index
(
addr
);
free_hyp_pgd_entry
(
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
for
(
addr
=
VMALLOC_START
;
is_vmalloc_addr
((
void
*
)
addr
);
addr
+=
PGDIR_SIZE
)
free_hyp_pgd_entry
(
addr
);
if
(
pud_none
(
*
pud
))
continue
;
BUG_ON
(
pud_bad
(
*
pud
));
pmd
=
pmd_offset
(
pud
,
addr
);
free_ptes
(
pmd
,
addr
);
pmd_free
(
NULL
,
pmd
);
pud_clear
(
pud
);
}
mutex_unlock
(
&
kvm_hyp_pgd_mutex
);
mutex_unlock
(
&
kvm_hyp_pgd_mutex
);
}
}
...
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
...
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
struct
page
*
page
;
struct
page
*
page
;
for
(
addr
=
start
&
PAGE_MASK
;
addr
<
end
;
addr
+=
PAGE_SIZE
)
{
for
(
addr
=
start
&
PAGE_MASK
;
addr
<
end
;
addr
+=
PAGE_SIZE
)
{
pte
=
pte_offset_kernel
(
pmd
,
addr
);
unsigned
long
hyp_addr
=
KERN_TO_HYP
(
addr
);
pte
=
pte_offset_kernel
(
pmd
,
hyp_addr
);
BUG_ON
(
!
virt_addr_valid
(
addr
));
BUG_ON
(
!
virt_addr_valid
(
addr
));
page
=
virt_to_page
(
addr
);
page
=
virt_to_page
(
addr
);
kvm_set_pte
(
pte
,
mk_pte
(
page
,
PAGE_HYP
));
kvm_set_pte
(
pte
,
mk_pte
(
page
,
PAGE_HYP
));
...
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
...
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
unsigned
long
addr
;
unsigned
long
addr
;
for
(
addr
=
start
&
PAGE_MASK
;
addr
<
end
;
addr
+=
PAGE_SIZE
)
{
for
(
addr
=
start
&
PAGE_MASK
;
addr
<
end
;
addr
+=
PAGE_SIZE
)
{
pte
=
pte_offset_kernel
(
pmd
,
addr
);
unsigned
long
hyp_addr
=
KERN_TO_HYP
(
addr
);
pte
=
pte_offset_kernel
(
pmd
,
hyp_addr
);
BUG_ON
(
pfn_valid
(
*
pfn_base
));
BUG_ON
(
pfn_valid
(
*
pfn_base
));
kvm_set_pte
(
pte
,
pfn_pte
(
*
pfn_base
,
PAGE_HYP_DEVICE
));
kvm_set_pte
(
pte
,
pfn_pte
(
*
pfn_base
,
PAGE_HYP_DEVICE
));
(
*
pfn_base
)
++
;
(
*
pfn_base
)
++
;
...
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
...
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
unsigned
long
addr
,
next
;
unsigned
long
addr
,
next
;
for
(
addr
=
start
;
addr
<
end
;
addr
=
next
)
{
for
(
addr
=
start
;
addr
<
end
;
addr
=
next
)
{
pmd
=
pmd_offset
(
pud
,
addr
);
unsigned
long
hyp_addr
=
KERN_TO_HYP
(
addr
);
pmd
=
pmd_offset
(
pud
,
hyp_addr
);
BUG_ON
(
pmd_sect
(
*
pmd
));
BUG_ON
(
pmd_sect
(
*
pmd
));
if
(
pmd_none
(
*
pmd
))
{
if
(
pmd_none
(
*
pmd
))
{
pte
=
pte_alloc_one_kernel
(
NULL
,
addr
);
pte
=
pte_alloc_one_kernel
(
NULL
,
hyp_
addr
);
if
(
!
pte
)
{
if
(
!
pte
)
{
kvm_err
(
"Cannot allocate Hyp pte
\n
"
);
kvm_err
(
"Cannot allocate Hyp pte
\n
"
);
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
...
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
unsigned
long
addr
,
next
;
unsigned
long
addr
,
next
;
int
err
=
0
;
int
err
=
0
;
BUG_ON
(
start
>
end
);
if
(
start
>=
end
)
if
(
start
<
PAGE_OFFSET
)
return
-
EINVAL
;
/* Check for a valid kernel memory mapping */
if
(
!
pfn_base
&&
(
!
virt_addr_valid
(
from
)
||
!
virt_addr_valid
(
to
-
1
)))
return
-
EINVAL
;
/* Check for a valid kernel IO mapping */
if
(
pfn_base
&&
(
!
is_vmalloc_addr
(
from
)
||
!
is_vmalloc_addr
(
to
-
1
)))
return
-
EINVAL
;
return
-
EINVAL
;
mutex_lock
(
&
kvm_hyp_pgd_mutex
);
mutex_lock
(
&
kvm_hyp_pgd_mutex
);
for
(
addr
=
start
;
addr
<
end
;
addr
=
next
)
{
for
(
addr
=
start
;
addr
<
end
;
addr
=
next
)
{
pgd
=
hyp_pgd
+
pgd_index
(
addr
);
unsigned
long
hyp_addr
=
KERN_TO_HYP
(
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
pgd
=
hyp_pgd
+
pgd_index
(
hyp_addr
);
pud
=
pud_offset
(
pgd
,
hyp_addr
);
if
(
pud_none_or_clear_bad
(
pud
))
{
if
(
pud_none_or_clear_bad
(
pud
))
{
pmd
=
pmd_alloc_one
(
NULL
,
addr
);
pmd
=
pmd_alloc_one
(
NULL
,
hyp_
addr
);
if
(
!
pmd
)
{
if
(
!
pmd
)
{
kvm_err
(
"Cannot allocate Hyp pmd
\n
"
);
kvm_err
(
"Cannot allocate Hyp pmd
\n
"
);
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
...
@@ -236,12 +243,13 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
...
@@ -236,12 +243,13 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
}
}
/**
/**
* create_hyp_mappings -
map
a kernel virtual address range in Hyp mode
* create_hyp_mappings -
duplicate
a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
* @from: The virtual kernel start address of the range
* @to: The virtual kernel end address of the range (exclusive)
* @to: The virtual kernel end address of the range (exclusive)
*
*
* The same virtual address as the kernel virtual address is also used in
* The same virtual address as the kernel virtual address is also used
* Hyp-mode mapping to the same underlying physical pages.
* in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
* physical pages.
*
*
* Note: Wrapping around zero in the "to" address is not supported.
* Note: Wrapping around zero in the "to" address is not supported.
*/
*/
...
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to)
...
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to)
}
}
/**
/**
* create_hyp_io_mappings -
map a physical IO range in
Hyp mode
* create_hyp_io_mappings -
duplicate a kernel IO mapping into
Hyp mode
* @from: The
virtual HYP start address
of the range
* @from: The
kernel start VA
of the range
* @to: The
virtual HYP end address
of the range (exclusive)
* @to: The
kernel end VA
of the range (exclusive)
* @addr: The physical start address which gets mapped
* @addr: The physical start address which gets mapped
*
* The resulting HYP VA is the same as the kernel VA, modulo
* HYP_PAGE_OFFSET.
*/
*/
int
create_hyp_io_mappings
(
void
*
from
,
void
*
to
,
phys_addr_t
addr
)
int
create_hyp_io_mappings
(
void
*
from
,
void
*
to
,
phys_addr_t
addr
)
{
{
...
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
...
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
VM_BUG_ON
((
unsigned
long
)
pgd
&
(
S2_PGD_SIZE
-
1
));
VM_BUG_ON
((
unsigned
long
)
pgd
&
(
S2_PGD_SIZE
-
1
));
memset
(
pgd
,
0
,
PTRS_PER_S2_PGD
*
sizeof
(
pgd_t
));
memset
(
pgd
,
0
,
PTRS_PER_S2_PGD
*
sizeof
(
pgd_t
));
clean_dcache_area
(
pgd
,
PTRS_PER_S2_PGD
*
sizeof
(
pgd_t
)
);
kvm_clean_pgd
(
pgd
);
kvm
->
arch
.
pgd
=
pgd
;
kvm
->
arch
.
pgd
=
pgd
;
return
0
;
return
0
;
...
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
...
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
return
0
;
/* ignore calls from kvm_set_spte_hva */
return
0
;
/* ignore calls from kvm_set_spte_hva */
pmd
=
mmu_memory_cache_alloc
(
cache
);
pmd
=
mmu_memory_cache_alloc
(
cache
);
pud_populate
(
NULL
,
pud
,
pmd
);
pud_populate
(
NULL
,
pud
,
pmd
);
pmd
+=
pmd_index
(
addr
);
get_page
(
virt_to_page
(
pud
));
get_page
(
virt_to_page
(
pud
));
}
else
}
pmd
=
pmd_offset
(
pud
,
addr
);
pmd
=
pmd_offset
(
pud
,
addr
);
/* Create 2nd stage page table mapping - Level 2 */
/* Create 2nd stage page table mapping - Level 2 */
if
(
pmd_none
(
*
pmd
))
{
if
(
pmd_none
(
*
pmd
))
{
if
(
!
cache
)
if
(
!
cache
)
return
0
;
/* ignore calls from kvm_set_spte_hva */
return
0
;
/* ignore calls from kvm_set_spte_hva */
pte
=
mmu_memory_cache_alloc
(
cache
);
pte
=
mmu_memory_cache_alloc
(
cache
);
clean_pte_tabl
e
(
pte
);
kvm_clean_pt
e
(
pte
);
pmd_populate_kernel
(
NULL
,
pmd
,
pte
);
pmd_populate_kernel
(
NULL
,
pmd
,
pte
);
pte
+=
pte_index
(
addr
);
get_page
(
virt_to_page
(
pmd
));
get_page
(
virt_to_page
(
pmd
));
}
else
}
pte
=
pte_offset_kernel
(
pmd
,
addr
);
pte
=
pte_offset_kernel
(
pmd
,
addr
);
if
(
iomap
&&
pte_present
(
*
pte
))
if
(
iomap
&&
pte_present
(
*
pte
))
return
-
EFAULT
;
return
-
EFAULT
;
...
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
...
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
old_pte
=
*
pte
;
old_pte
=
*
pte
;
kvm_set_pte
(
pte
,
*
new_pte
);
kvm_set_pte
(
pte
,
*
new_pte
);
if
(
pte_present
(
old_pte
))
if
(
pte_present
(
old_pte
))
kvm_tlb_flush_vmid
(
kvm
);
kvm_tlb_flush_vmid
_ipa
(
kvm
,
addr
);
else
else
get_page
(
virt_to_page
(
pte
));
get_page
(
virt_to_page
(
pte
));
...
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
...
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
pfn
=
__phys_to_pfn
(
pa
);
pfn
=
__phys_to_pfn
(
pa
);
for
(
addr
=
guest_ipa
;
addr
<
end
;
addr
+=
PAGE_SIZE
)
{
for
(
addr
=
guest_ipa
;
addr
<
end
;
addr
+=
PAGE_SIZE
)
{
pte_t
pte
=
pfn_pte
(
pfn
,
PAGE_S2_DEVICE
|
L_PTE_S2_RDWR
);
pte_t
pte
=
pfn_pte
(
pfn
,
PAGE_S2_DEVICE
);
kvm_set_s2pte_writable
(
&
pte
);
ret
=
mmu_topup_memory_cache
(
&
cache
,
2
,
2
);
ret
=
mmu_topup_memory_cache
(
&
cache
,
2
,
2
);
if
(
ret
)
if
(
ret
)
...
@@ -492,29 +504,6 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
...
@@ -492,29 +504,6 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
return
ret
;
return
ret
;
}
}
static
void
coherent_icache_guest_page
(
struct
kvm
*
kvm
,
gfn_t
gfn
)
{
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
* (or another VM) may have used the same page as this guest, and we
* read incorrect data from the icache. If we're using a PIPT cache,
* we can invalidate just that page, but if we are using a VIPT cache
* we need to invalidate the entire icache - damn shame - as written
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
if
(
icache_is_pipt
())
{
unsigned
long
hva
=
gfn_to_hva
(
kvm
,
gfn
);
__cpuc_coherent_user_range
(
hva
,
hva
+
PAGE_SIZE
);
}
else
if
(
!
icache_is_vivt_asid_tagged
())
{
/* any kind of VIPT cache */
__flush_icache_all
();
}
}
static
int
user_mem_abort
(
struct
kvm_vcpu
*
vcpu
,
phys_addr_t
fault_ipa
,
static
int
user_mem_abort
(
struct
kvm_vcpu
*
vcpu
,
phys_addr_t
fault_ipa
,
gfn_t
gfn
,
struct
kvm_memory_slot
*
memslot
,
gfn_t
gfn
,
struct
kvm_memory_slot
*
memslot
,
unsigned
long
fault_status
)
unsigned
long
fault_status
)
...
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
...
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned
long
mmu_seq
;
unsigned
long
mmu_seq
;
struct
kvm_mmu_memory_cache
*
memcache
=
&
vcpu
->
arch
.
mmu_page_cache
;
struct
kvm_mmu_memory_cache
*
memcache
=
&
vcpu
->
arch
.
mmu_page_cache
;
write_fault
=
kvm_is_write_fault
(
vcpu
->
arch
.
hsr
);
write_fault
=
kvm_is_write_fault
(
kvm_vcpu_get_hsr
(
vcpu
)
);
if
(
fault_status
==
FSC_PERM
&&
!
write_fault
)
{
if
(
fault_status
==
FSC_PERM
&&
!
write_fault
)
{
kvm_err
(
"Unexpected L2 read permission error
\n
"
);
kvm_err
(
"Unexpected L2 read permission error
\n
"
);
return
-
EFAULT
;
return
-
EFAULT
;
...
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
...
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if
(
mmu_notifier_retry
(
vcpu
->
kvm
,
mmu_seq
))
if
(
mmu_notifier_retry
(
vcpu
->
kvm
,
mmu_seq
))
goto
out_unlock
;
goto
out_unlock
;
if
(
writable
)
{
if
(
writable
)
{
pte_val
(
new_pte
)
|=
L_PTE_S2_RDWR
;
kvm_set_s2pte_writable
(
&
new_pte
)
;
kvm_set_pfn_dirty
(
pfn
);
kvm_set_pfn_dirty
(
pfn
);
}
}
stage2_set_pte
(
vcpu
->
kvm
,
memcache
,
fault_ipa
,
&
new_pte
,
false
);
stage2_set_pte
(
vcpu
->
kvm
,
memcache
,
fault_ipa
,
&
new_pte
,
false
);
...
@@ -585,7 +574,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
...
@@ -585,7 +574,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
*/
*/
int
kvm_handle_guest_abort
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
int
kvm_handle_guest_abort
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
{
unsigned
long
hsr_ec
;
unsigned
long
fault_status
;
unsigned
long
fault_status
;
phys_addr_t
fault_ipa
;
phys_addr_t
fault_ipa
;
struct
kvm_memory_slot
*
memslot
;
struct
kvm_memory_slot
*
memslot
;
...
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
gfn_t
gfn
;
gfn_t
gfn
;
int
ret
,
idx
;
int
ret
,
idx
;
hsr_ec
=
vcpu
->
arch
.
hsr
>>
HSR_EC_SHIFT
;
is_iabt
=
kvm_vcpu_trap_is_iabt
(
vcpu
);
is_iabt
=
(
hsr_ec
==
HSR_EC_IABT
);
fault_ipa
=
kvm_vcpu_get_fault_ipa
(
vcpu
);
fault_ipa
=
((
phys_addr_t
)
vcpu
->
arch
.
hpfar
&
HPFAR_MASK
)
<<
8
;
trace_kvm_guest_fault
(
*
vcpu_pc
(
vcpu
),
vcpu
->
arch
.
hsr
,
trace_kvm_guest_fault
(
*
vcpu_pc
(
vcpu
),
kvm_vcpu_get_hsr
(
vcpu
)
,
vcpu
->
arch
.
hxfar
,
fault_ipa
);
kvm_vcpu_get_hfar
(
vcpu
)
,
fault_ipa
);
/* Check the stage-2 fault is trans. fault or write fault */
/* Check the stage-2 fault is trans. fault or write fault */
fault_status
=
(
vcpu
->
arch
.
hsr
&
HSR_FSC_TYPE
);
fault_status
=
kvm_vcpu_trap_get_fault
(
vcpu
);
if
(
fault_status
!=
FSC_FAULT
&&
fault_status
!=
FSC_PERM
)
{
if
(
fault_status
!=
FSC_FAULT
&&
fault_status
!=
FSC_PERM
)
{
kvm_err
(
"Unsupported fault status: EC=%#
l
x DFCS=%#lx
\n
"
,
kvm_err
(
"Unsupported fault status: EC=%#x DFCS=%#lx
\n
"
,
hsr_ec
,
fault_status
);
kvm_vcpu_trap_get_class
(
vcpu
)
,
fault_status
);
return
-
EFAULT
;
return
-
EFAULT
;
}
}
...
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
if
(
!
kvm_is_visible_gfn
(
vcpu
->
kvm
,
gfn
))
{
if
(
!
kvm_is_visible_gfn
(
vcpu
->
kvm
,
gfn
))
{
if
(
is_iabt
)
{
if
(
is_iabt
)
{
/* Prefetch Abort on I/O address */
/* Prefetch Abort on I/O address */
kvm_inject_pabt
(
vcpu
,
vcpu
->
arch
.
hxfar
);
kvm_inject_pabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
)
);
ret
=
1
;
ret
=
1
;
goto
out_unlock
;
goto
out_unlock
;
}
}
...
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
goto
out_unlock
;
goto
out_unlock
;
}
}
/* Adjust page offset */
/*
fault_ipa
|=
vcpu
->
arch
.
hxfar
&
~
PAGE_MASK
;
* The IPA is reported as [MAX:12], so we need to
* complement it with the bottom 12 bits from the
* faulting VA. This is always 12 bits, irrespective
* of the page size.
*/
fault_ipa
|=
kvm_vcpu_get_hfar
(
vcpu
)
&
((
1
<<
12
)
-
1
);
ret
=
io_mem_abort
(
vcpu
,
run
,
fault_ipa
);
ret
=
io_mem_abort
(
vcpu
,
run
,
fault_ipa
);
goto
out_unlock
;
goto
out_unlock
;
}
}
...
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,
...
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,
static
void
kvm_unmap_hva_handler
(
struct
kvm
*
kvm
,
gpa_t
gpa
,
void
*
data
)
static
void
kvm_unmap_hva_handler
(
struct
kvm
*
kvm
,
gpa_t
gpa
,
void
*
data
)
{
{
unmap_stage2_range
(
kvm
,
gpa
,
PAGE_SIZE
);
unmap_stage2_range
(
kvm
,
gpa
,
PAGE_SIZE
);
kvm_tlb_flush_vmid
(
kvm
);
kvm_tlb_flush_vmid
_ipa
(
kvm
,
gpa
);
}
}
int
kvm_unmap_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
)
int
kvm_unmap_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
)
...
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void)
...
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void)
pmd
=
pmd_offset
(
pud
,
addr
);
pmd
=
pmd_offset
(
pud
,
addr
);
pud_clear
(
pud
);
pud_clear
(
pud
);
clean_pmd_entry
(
pmd
);
kvm_
clean_pmd_entry
(
pmd
);
pmd_free
(
NULL
,
(
pmd_t
*
)((
unsigned
long
)
pmd
&
PAGE_MASK
));
pmd_free
(
NULL
,
(
pmd_t
*
)((
unsigned
long
)
pmd
&
PAGE_MASK
));
}
while
(
pgd
++
,
addr
=
next
,
addr
<
end
);
}
while
(
pgd
++
,
addr
=
next
,
addr
<
end
);
}
}
arch/arm/kvm/vgic.c
浏览文件 @
2dfee7b2
...
@@ -1484,7 +1484,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
...
@@ -1484,7 +1484,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
if
(
addr
&
~
KVM_PHYS_MASK
)
if
(
addr
&
~
KVM_PHYS_MASK
)
return
-
E2BIG
;
return
-
E2BIG
;
if
(
addr
&
~
PAGE_MASK
)
if
(
addr
&
(
SZ_4K
-
1
)
)
return
-
EINVAL
;
return
-
EINVAL
;
mutex_lock
(
&
kvm
->
lock
);
mutex_lock
(
&
kvm
->
lock
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录