Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
6f2f10ca
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
6f2f10ca
编写于
6月 15, 2017
作者:
M
Marc Zyngier
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'kvmarm-master/master' into HEAD
上级
ebb127f2
33b5c388
变更
22
隐藏空白更改
内联
并排
Showing
22 changed file
with
280 addition
and
104 deletion
+280
-104
arch/arm/include/asm/kvm_coproc.h
arch/arm/include/asm/kvm_coproc.h
+2
-1
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.c
+74
-32
arch/arm/kvm/handle_exit.c
arch/arm/kvm/handle_exit.c
+2
-2
arch/arm/kvm/hyp/Makefile
arch/arm/kvm/hyp/Makefile
+2
-0
arch/arm/kvm/hyp/switch.c
arch/arm/kvm/hyp/switch.c
+3
-1
arch/arm/kvm/init.S
arch/arm/kvm/init.S
+2
-3
arch/arm/kvm/trace.h
arch/arm/kvm/trace.h
+4
-4
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/sysreg.h
+4
-0
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/hyp-init.S
+7
-4
arch/arm64/kvm/hyp/Makefile
arch/arm64/kvm/hyp/Makefile
+2
-0
arch/arm64/kvm/vgic-sys-reg-v3.c
arch/arm64/kvm/vgic-sys-reg-v3.c
+5
-5
include/kvm/arm_vgic.h
include/kvm/arm_vgic.h
+4
-1
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic-v3.h
+4
-0
include/linux/irqchip/arm-gic.h
include/linux/irqchip/arm-gic.h
+25
-3
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/hyp/vgic-v3-sr.c
+9
-9
virt/kvm/arm/mmu.c
virt/kvm/arm/mmu.c
+24
-12
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-init.c
+4
-1
virt/kvm/arm/vgic/vgic-mmio-v2.c
virt/kvm/arm/vgic/vgic-mmio-v2.c
+14
-2
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
+9
-3
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v2.c
+32
-3
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic-v3.c
+40
-14
virt/kvm/arm/vgic/vgic.h
virt/kvm/arm/vgic/vgic.h
+8
-4
未找到文件。
arch/arm/include/asm/kvm_coproc.h
浏览文件 @
6f2f10ca
...
...
@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
int
kvm_handle_cp10_id
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
int
kvm_handle_cp_0_13_access
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
int
kvm_handle_cp14_load_store
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
int
kvm_handle_cp14_access
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
int
kvm_handle_cp14_32
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
int
kvm_handle_cp14_64
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
int
kvm_handle_cp15_32
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
int
kvm_handle_cp15_64
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
...
...
arch/arm/kvm/coproc.c
浏览文件 @
6f2f10ca
...
...
@@ -32,6 +32,7 @@
#include <asm/vfp.h>
#include "../vfp/vfpinstr.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "coproc.h"
...
...
@@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
return
1
;
}
int
kvm_handle_cp14_access
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
void
reset_mpidr
(
struct
kvm_vcpu
*
vcpu
,
const
struct
coproc_reg
*
r
)
{
/*
...
...
@@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
* all PM registers, which doesn't crash the guest kernel at least.
*/
static
bool
pm_fake
(
struct
kvm_vcpu
*
vcpu
,
static
bool
trap_raz_wi
(
struct
kvm_vcpu
*
vcpu
,
const
struct
coproc_params
*
p
,
const
struct
coproc_reg
*
r
)
{
...
...
@@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
return
read_zero
(
vcpu
,
p
);
}
#define access_pmcr
pm_fake
#define access_pmcntenset
pm_fake
#define access_pmcntenclr
pm_fake
#define access_pmovsr
pm_fake
#define access_pmselr
pm_fake
#define access_pmceid0
pm_fake
#define access_pmceid1
pm_fake
#define access_pmccntr
pm_fake
#define access_pmxevtyper
pm_fake
#define access_pmxevcntr
pm_fake
#define access_pmuserenr
pm_fake
#define access_pmintenset
pm_fake
#define access_pmintenclr
pm_fake
#define access_pmcr
trap_raz_wi
#define access_pmcntenset
trap_raz_wi
#define access_pmcntenclr
trap_raz_wi
#define access_pmovsr
trap_raz_wi
#define access_pmselr
trap_raz_wi
#define access_pmceid0
trap_raz_wi
#define access_pmceid1
trap_raz_wi
#define access_pmccntr
trap_raz_wi
#define access_pmxevtyper
trap_raz_wi
#define access_pmxevcntr
trap_raz_wi
#define access_pmuserenr
trap_raz_wi
#define access_pmintenset
trap_raz_wi
#define access_pmintenclr
trap_raz_wi
/* Architected CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
...
...
@@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
return
1
;
}
/**
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int
kvm_handle_cp15_64
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
static
struct
coproc_params
decode_64bit_hsr
(
struct
kvm_vcpu
*
vcpu
)
{
struct
coproc_params
params
;
...
...
@@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
params
.
Rt2
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
10
)
&
0xf
;
params
.
CRm
=
0
;
return
params
;
}
/**
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int
kvm_handle_cp15_64
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
struct
coproc_params
params
=
decode_64bit_hsr
(
vcpu
);
return
emulate_cp15
(
vcpu
,
&
params
);
}
/**
* kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int
kvm_handle_cp14_64
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
struct
coproc_params
params
=
decode_64bit_hsr
(
vcpu
);
/* raz_wi cp14 */
trap_raz_wi
(
vcpu
,
&
params
,
NULL
);
/* handled */
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
));
return
1
;
}
static
void
reset_coproc_regs
(
struct
kvm_vcpu
*
vcpu
,
const
struct
coproc_reg
*
table
,
size_t
num
)
{
...
...
@@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
table
[
i
].
reset
(
vcpu
,
&
table
[
i
]);
}
/**
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int
kvm_handle_cp15_32
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
static
struct
coproc_params
decode_32bit_hsr
(
struct
kvm_vcpu
*
vcpu
)
{
struct
coproc_params
params
;
...
...
@@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
params
.
Op2
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
17
)
&
0x7
;
params
.
Rt2
=
0
;
return
params
;
}
/**
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int
kvm_handle_cp15_32
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
struct
coproc_params
params
=
decode_32bit_hsr
(
vcpu
);
return
emulate_cp15
(
vcpu
,
&
params
);
}
/**
* kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int
kvm_handle_cp14_32
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
struct
coproc_params
params
=
decode_32bit_hsr
(
vcpu
);
/* raz_wi cp14 */
trap_raz_wi
(
vcpu
,
&
params
,
NULL
);
/* handled */
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
));
return
1
;
}
/******************************************************************************
* Userspace API
*****************************************************************************/
...
...
arch/arm/kvm/handle_exit.c
浏览文件 @
6f2f10ca
...
...
@@ -96,9 +96,9 @@ static exit_handle_fn arm_exit_handlers[] = {
[
HSR_EC_WFI
]
=
kvm_handle_wfx
,
[
HSR_EC_CP15_32
]
=
kvm_handle_cp15_32
,
[
HSR_EC_CP15_64
]
=
kvm_handle_cp15_64
,
[
HSR_EC_CP14_MR
]
=
kvm_handle_cp14_
access
,
[
HSR_EC_CP14_MR
]
=
kvm_handle_cp14_
32
,
[
HSR_EC_CP14_LS
]
=
kvm_handle_cp14_load_store
,
[
HSR_EC_CP14_64
]
=
kvm_handle_cp14_
access
,
[
HSR_EC_CP14_64
]
=
kvm_handle_cp14_
64
,
[
HSR_EC_CP_0_13
]
=
kvm_handle_cp_0_13_access
,
[
HSR_EC_CP10_ID
]
=
kvm_handle_cp10_id
,
[
HSR_EC_HVC
]
=
handle_hvc
,
...
...
arch/arm/kvm/hyp/Makefile
浏览文件 @
6f2f10ca
...
...
@@ -2,6 +2,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
ccflags-y
+=
-fno-stack-protector
KVM
=
../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST)
+=
$(KVM)
/arm/hyp/vgic-v2-sr.o
...
...
arch/arm/kvm/hyp/switch.c
浏览文件 @
6f2f10ca
...
...
@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
write_sysreg
(
HSTR_T
(
15
),
HSTR
);
write_sysreg
(
HCPTR_TTA
|
HCPTR_TCP
(
10
)
|
HCPTR_TCP
(
11
),
HCPTR
);
val
=
read_sysreg
(
HDCR
);
write_sysreg
(
val
|
HDCR_TPM
|
HDCR_TPMCR
,
HDCR
);
val
|=
HDCR_TPM
|
HDCR_TPMCR
;
/* trap performance monitors */
val
|=
HDCR_TDRA
|
HDCR_TDOSA
|
HDCR_TDA
;
/* trap debug regs */
write_sysreg
(
val
,
HDCR
);
}
static
void
__hyp_text
__deactivate_traps
(
struct
kvm_vcpu
*
vcpu
)
...
...
arch/arm/kvm/init.S
浏览文件 @
6f2f10ca
...
...
@@ -104,7 +104,6 @@ __do_hyp_init:
@
-
Write
permission
implies
XN
:
disabled
@
-
Instruction
cache
:
enabled
@
-
Data
/
Unified
cache
:
enabled
@
-
Memory
alignment
checks
:
enabled
@
-
MMU
:
enabled
(
this
code
must
be
run
from
an
identity
mapping
)
mrc
p15
,
4
,
r0
,
c1
,
c0
,
0
@
HSCR
ldr
r2
,
=
HSCTLR_MASK
...
...
@@ -112,8 +111,8 @@ __do_hyp_init:
mrc
p15
,
0
,
r1
,
c1
,
c0
,
0
@
SCTLR
ldr
r2
,
=(
HSCTLR_EE
| HSCTLR_FI |
HSCTLR_I
|
HSCTLR_C
)
and
r1
,
r1
,
r2
ARM
(
ldr
r2
,
=(
HSCTLR_M
|
HSCTLR_A
)
)
THUMB
(
ldr
r2
,
=(
HSCTLR_M
| HSCTLR_
A |
HSCTLR_TE
)
)
ARM
(
ldr
r2
,
=(
HSCTLR_M
)
)
THUMB
(
ldr
r2
,
=(
HSCTLR_M
|
HSCTLR_
TE
)
)
orr
r1
,
r1
,
r2
orr
r0
,
r0
,
r1
mcr
p15
,
4
,
r0
,
c1
,
c0
,
0
@
HSCR
...
...
arch/arm/kvm/trace.h
浏览文件 @
6f2f10ca
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_H
#if !defined(_TRACE_
ARM_
KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_
ARM_
KVM_H
#include <linux/tracepoint.h>
...
...
@@ -74,10 +74,10 @@ TRACE_EVENT(kvm_hvc,
__entry
->
vcpu_pc
,
__entry
->
r0
,
__entry
->
imm
)
);
#endif
/* _TRACE_KVM_H */
#endif
/* _TRACE_
ARM_
KVM_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH
arch/arm/kvm
#define TRACE_INCLUDE_PATH
.
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
...
...
arch/arm64/include/asm/sysreg.h
浏览文件 @
6f2f10ca
...
...
@@ -286,6 +286,10 @@
#define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
(1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
(1 << 28) | (1 << 29))
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I)
...
...
arch/arm64/kvm/hyp-init.S
浏览文件 @
6f2f10ca
...
...
@@ -106,10 +106,13 @@ __do_hyp_init:
tlbi
alle2
dsb
sy
mrs
x4
,
sctlr_el2
and
x4
,
x4
,
#
SCTLR_ELx_EE
//
preserve
endianness
of
EL2
ldr
x5
,
=
SCTLR_ELx_FLAGS
orr
x4
,
x4
,
x5
/
*
*
Preserve
all
the
RES1
bits
while
setting
the
default
flags
,
*
as
well
as
the
EE
bit
on
BE
.
Drop
the
A
flag
since
the
compiler
*
is
allowed
to
generate
unaligned
accesses
.
*/
ldr
x4
,
=(
SCTLR_EL2_RES1
|
(
SCTLR_ELx_FLAGS
&
~
SCTLR_ELx_A
))
CPU_BE
(
orr
x4
,
x4
,
#
SCTLR_ELx_EE
)
msr
sctlr_el2
,
x4
isb
...
...
arch/arm64/kvm/hyp/Makefile
浏览文件 @
6f2f10ca
...
...
@@ -2,6 +2,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
ccflags-y
+=
-fno-stack-protector
KVM
=
../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST)
+=
$(KVM)
/arm/hyp/vgic-v2-sr.o
...
...
arch/arm64/kvm/vgic-sys-reg-v3.c
浏览文件 @
6f2f10ca
...
...
@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
*/
vmcr
.
c
tlr
=
val
&
ICC_CTLR_EL1_CBPR_MASK
;
vmcr
.
ctlr
|=
val
&
ICC_CTLR_EL1_EOImode_MASK
;
vmcr
.
c
bpr
=
(
val
&
ICC_CTLR_EL1_CBPR_MASK
)
>>
ICC_CTLR_EL1_CBPR_SHIFT
;
vmcr
.
eoim
=
(
val
&
ICC_CTLR_EL1_EOImode_MASK
)
>>
ICC_CTLR_EL1_EOImode_SHIFT
;
vgic_set_vmcr
(
vcpu
,
&
vmcr
);
}
else
{
val
=
0
;
...
...
@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions.
*/
val
|=
vmcr
.
ctlr
&
ICC_CTLR_EL1_CBPR_MASK
;
val
|=
vmcr
.
ctlr
&
ICC_CTLR_EL1_EOImode_MASK
;
val
|=
(
vmcr
.
cbpr
<<
ICC_CTLR_EL1_CBPR_SHIFT
)
&
ICC_CTLR_EL1_CBPR_MASK
;
val
|=
(
vmcr
.
eoim
<<
ICC_CTLR_EL1_EOImode_SHIFT
)
&
ICC_CTLR_EL1_EOImode_MASK
;
p
->
regval
=
val
;
}
...
...
@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p
->
regval
=
0
;
vgic_get_vmcr
(
vcpu
,
&
vmcr
);
if
(
!
((
vmcr
.
ctlr
&
ICH_VMCR_CBPR_MASK
)
>>
ICH_VMCR_CBPR_SHIFT
)
)
{
if
(
!
vmcr
.
cbpr
)
{
if
(
p
->
is_write
)
{
vmcr
.
abpr
=
(
p
->
regval
&
ICC_BPR1_EL1_MASK
)
>>
ICC_BPR1_EL1_SHIFT
;
...
...
include/kvm/arm_vgic.h
浏览文件 @
6f2f10ca
...
...
@@ -202,7 +202,10 @@ struct vgic_dist {
/* either a GICv2 CPU interface */
gpa_t
vgic_cpu_base
;
/* or a number of GICv3 redistributor regions */
gpa_t
vgic_redist_base
;
struct
{
gpa_t
vgic_redist_base
;
gpa_t
vgic_redist_free_offset
;
};
};
/* distributor enabled */
...
...
include/linux/irqchip/arm-gic-v3.h
浏览文件 @
6f2f10ca
...
...
@@ -417,6 +417,10 @@
#define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1)
#define ICH_VMCR_ACK_CTL_SHIFT 2
#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
#define ICH_VMCR_FIQ_EN_SHIFT 3
#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
#define ICH_VMCR_CBPR_SHIFT 4
#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
#define ICH_VMCR_EOIM_SHIFT 9
...
...
include/linux/irqchip/arm-gic.h
浏览文件 @
6f2f10ca
...
...
@@ -25,7 +25,18 @@
#define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0
#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
#define GIC_CPU_CTRL_AckCtl_SHIFT 2
#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
#define GIC_CPU_CTRL_FIQEn_SHIFT 3
#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
#define GIC_CPU_CTRL_CBPR_SHIFT 4
#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
#define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023
...
...
@@ -84,8 +95,19 @@
#define GICH_LR_EOI (1 << 19)
#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_CTRL_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
#define GICH_VMCR_ACK_CTL_SHIFT 2
#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
#define GICH_VMCR_FIQ_EN_SHIFT 3
#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
#define GICH_VMCR_CBPR_SHIFT 4
#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
#define GICH_VMCR_EOI_MODE_SHIFT 9
#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
#define GICH_VMCR_PRIMASK_SHIFT 27
#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
#define GICH_VMCR_BINPOINT_SHIFT 21
...
...
virt/kvm/arm/hyp/vgic-v3-sr.c
浏览文件 @
6f2f10ca
...
...
@@ -22,7 +22,7 @@
#include <asm/kvm_hyp.h>
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
#define vtr_to_nr_pr
i_bits(v) (((u32)(v) >> 29
) + 1)
#define vtr_to_nr_pr
e_bits(v) ((((u32)(v) >> 26) & 7
) + 1)
static
u64
__hyp_text
__gic_v3_get_lr
(
unsigned
int
lr
)
{
...
...
@@ -135,13 +135,13 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
if
(
used_lrs
)
{
int
i
;
u32
nr_pr
i
_bits
;
u32
nr_pr
e
_bits
;
cpu_if
->
vgic_elrsr
=
read_gicreg
(
ICH_ELSR_EL2
);
write_gicreg
(
0
,
ICH_HCR_EL2
);
val
=
read_gicreg
(
ICH_VTR_EL2
);
nr_pr
i_bits
=
vtr_to_nr_pri
_bits
(
val
);
nr_pr
e_bits
=
vtr_to_nr_pre
_bits
(
val
);
for
(
i
=
0
;
i
<
used_lrs
;
i
++
)
{
if
(
cpu_if
->
vgic_elrsr
&
(
1
<<
i
))
...
...
@@ -152,7 +152,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
__gic_v3_set_lr
(
0
,
i
);
}
switch
(
nr_pr
i
_bits
)
{
switch
(
nr_pr
e
_bits
)
{
case
7
:
cpu_if
->
vgic_ap0r
[
3
]
=
read_gicreg
(
ICH_AP0R3_EL2
);
cpu_if
->
vgic_ap0r
[
2
]
=
read_gicreg
(
ICH_AP0R2_EL2
);
...
...
@@ -162,7 +162,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
cpu_if
->
vgic_ap0r
[
0
]
=
read_gicreg
(
ICH_AP0R0_EL2
);
}
switch
(
nr_pr
i
_bits
)
{
switch
(
nr_pr
e
_bits
)
{
case
7
:
cpu_if
->
vgic_ap1r
[
3
]
=
read_gicreg
(
ICH_AP1R3_EL2
);
cpu_if
->
vgic_ap1r
[
2
]
=
read_gicreg
(
ICH_AP1R2_EL2
);
...
...
@@ -198,7 +198,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
struct
vgic_v3_cpu_if
*
cpu_if
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
;
u64
used_lrs
=
vcpu
->
arch
.
vgic_cpu
.
used_lrs
;
u64
val
;
u32
nr_pr
i
_bits
;
u32
nr_pr
e
_bits
;
int
i
;
/*
...
...
@@ -217,12 +217,12 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
}
val
=
read_gicreg
(
ICH_VTR_EL2
);
nr_pr
i_bits
=
vtr_to_nr_pri
_bits
(
val
);
nr_pr
e_bits
=
vtr_to_nr_pre
_bits
(
val
);
if
(
used_lrs
)
{
write_gicreg
(
cpu_if
->
vgic_hcr
,
ICH_HCR_EL2
);
switch
(
nr_pr
i
_bits
)
{
switch
(
nr_pr
e
_bits
)
{
case
7
:
write_gicreg
(
cpu_if
->
vgic_ap0r
[
3
],
ICH_AP0R3_EL2
);
write_gicreg
(
cpu_if
->
vgic_ap0r
[
2
],
ICH_AP0R2_EL2
);
...
...
@@ -232,7 +232,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
write_gicreg
(
cpu_if
->
vgic_ap0r
[
0
],
ICH_AP0R0_EL2
);
}
switch
(
nr_pr
i
_bits
)
{
switch
(
nr_pr
e
_bits
)
{
case
7
:
write_gicreg
(
cpu_if
->
vgic_ap1r
[
3
],
ICH_AP1R3_EL2
);
write_gicreg
(
cpu_if
->
vgic_ap1r
[
2
],
ICH_AP1R2_EL2
);
...
...
virt/kvm/arm/mmu.c
浏览文件 @
6f2f10ca
...
...
@@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
assert_spin_locked
(
&
kvm
->
mmu_lock
);
pgd
=
kvm
->
arch
.
pgd
+
stage2_pgd_index
(
addr
);
do
{
/*
* Make sure the page table is still active, as another thread
* could have possibly freed the page table, while we released
* the lock.
*/
if
(
!
READ_ONCE
(
kvm
->
arch
.
pgd
))
break
;
next
=
stage2_pgd_addr_end
(
addr
,
end
);
if
(
!
stage2_pgd_none
(
*
pgd
))
unmap_stage2_puds
(
kvm
,
pgd
,
addr
,
next
);
...
...
@@ -829,22 +836,22 @@ void stage2_unmap_vm(struct kvm *kvm)
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
* underlying level-2 and level-3 tables before freeing the actual level-1 table
* and setting the struct pointer to NULL.
*
* Note we don't need locking here as this is only called when the VM is
* destroyed, which can only be done once.
*/
void
kvm_free_stage2_pgd
(
struct
kvm
*
kvm
)
{
if
(
kvm
->
arch
.
pgd
==
NULL
)
return
;
void
*
pgd
=
NULL
;
spin_lock
(
&
kvm
->
mmu_lock
);
unmap_stage2_range
(
kvm
,
0
,
KVM_PHYS_SIZE
);
if
(
kvm
->
arch
.
pgd
)
{
unmap_stage2_range
(
kvm
,
0
,
KVM_PHYS_SIZE
);
pgd
=
READ_ONCE
(
kvm
->
arch
.
pgd
);
kvm
->
arch
.
pgd
=
NULL
;
}
spin_unlock
(
&
kvm
->
mmu_lock
);
/* Free the HW pgd, one page at a time */
free_pages_exact
(
kvm
->
arch
.
pgd
,
S2_PGD_SIZE
);
kvm
->
arch
.
pgd
=
NULL
;
if
(
pgd
)
free_pages_exact
(
pgd
,
S2_PGD_SIZE
)
;
}
static
pud_t
*
stage2_get_pud
(
struct
kvm
*
kvm
,
struct
kvm_mmu_memory_cache
*
cache
,
...
...
@@ -872,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pmd_t
*
pmd
;
pud
=
stage2_get_pud
(
kvm
,
cache
,
addr
);
if
(
!
pud
)
return
NULL
;
if
(
stage2_pud_none
(
*
pud
))
{
if
(
!
cache
)
return
NULL
;
...
...
@@ -1170,11 +1180,13 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
* large. Otherwise, we may see kernel panics with
* CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
* CONFIG_LOCKDEP. Additionally, holding the lock too long
* will also starve other vCPUs.
* will also starve other vCPUs. We have to also make sure
* that the page tables are not freed while we released
* the lock.
*/
if
(
need_resched
()
||
spin_needbreak
(
&
kvm
->
mmu_lock
))
cond_resched_lock
(
&
kvm
->
mmu_lock
);
cond_resched_lock
(
&
kvm
->
mmu_lock
);
if
(
!
READ_ONCE
(
kvm
->
arch
.
pgd
))
break
;
next
=
stage2_pgd_addr_end
(
addr
,
end
);
if
(
stage2_pgd_present
(
*
pgd
))
stage2_wp_puds
(
pgd
,
addr
,
next
);
...
...
virt/kvm/arm/vgic/vgic-init.c
浏览文件 @
6f2f10ca
...
...
@@ -242,8 +242,11 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* If we are creating a VCPU with a GICv3 we must also register the
* KVM io device for the redistributor that belongs to this VCPU.
*/
if
(
dist
->
vgic_model
==
KVM_DEV_TYPE_ARM_VGIC_V3
)
if
(
dist
->
vgic_model
==
KVM_DEV_TYPE_ARM_VGIC_V3
)
{
mutex_lock
(
&
vcpu
->
kvm
->
lock
);
ret
=
vgic_register_redist_iodev
(
vcpu
);
mutex_unlock
(
&
vcpu
->
kvm
->
lock
);
}
return
ret
;
}
...
...
virt/kvm/arm/vgic/vgic-mmio-v2.c
浏览文件 @
6f2f10ca
...
...
@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
switch
(
addr
&
0xff
)
{
case
GIC_CPU_CTRL
:
val
=
vmcr
.
ctlr
;
val
=
vmcr
.
grpen0
<<
GIC_CPU_CTRL_EnableGrp0_SHIFT
;
val
|=
vmcr
.
grpen1
<<
GIC_CPU_CTRL_EnableGrp1_SHIFT
;
val
|=
vmcr
.
ackctl
<<
GIC_CPU_CTRL_AckCtl_SHIFT
;
val
|=
vmcr
.
fiqen
<<
GIC_CPU_CTRL_FIQEn_SHIFT
;
val
|=
vmcr
.
cbpr
<<
GIC_CPU_CTRL_CBPR_SHIFT
;
val
|=
vmcr
.
eoim
<<
GIC_CPU_CTRL_EOImodeNS_SHIFT
;
break
;
case
GIC_CPU_PRIMASK
:
/*
...
...
@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
switch
(
addr
&
0xff
)
{
case
GIC_CPU_CTRL
:
vmcr
.
ctlr
=
val
;
vmcr
.
grpen0
=
!!
(
val
&
GIC_CPU_CTRL_EnableGrp0
);
vmcr
.
grpen1
=
!!
(
val
&
GIC_CPU_CTRL_EnableGrp1
);
vmcr
.
ackctl
=
!!
(
val
&
GIC_CPU_CTRL_AckCtl
);
vmcr
.
fiqen
=
!!
(
val
&
GIC_CPU_CTRL_FIQEn
);
vmcr
.
cbpr
=
!!
(
val
&
GIC_CPU_CTRL_CBPR
);
vmcr
.
eoim
=
!!
(
val
&
GIC_CPU_CTRL_EOImodeNS
);
break
;
case
GIC_CPU_PRIMASK
:
/*
...
...
virt/kvm/arm/vgic/vgic-mmio-v3.c
浏览文件 @
6f2f10ca
...
...
@@ -590,7 +590,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
if
(
!
vgic_v3_check_base
(
kvm
))
return
-
EINVAL
;
rd_base
=
vgic
->
vgic_redist_base
+
kvm_vcpu_get_idx
(
vcpu
)
*
SZ_64K
*
2
;
rd_base
=
vgic
->
vgic_redist_base
+
vgic
->
vgic_redist_free_offset
;
sgi_base
=
rd_base
+
SZ_64K
;
kvm_iodevice_init
(
&
rd_dev
->
dev
,
&
kvm_io_gic_ops
);
...
...
@@ -618,11 +618,15 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
mutex_lock
(
&
kvm
->
slots_lock
);
ret
=
kvm_io_bus_register_dev
(
kvm
,
KVM_MMIO_BUS
,
sgi_base
,
SZ_64K
,
&
sgi_dev
->
dev
);
mutex_unlock
(
&
kvm
->
slots_lock
);
if
(
ret
)
if
(
ret
)
{
kvm_io_bus_unregister_dev
(
kvm
,
KVM_MMIO_BUS
,
&
rd_dev
->
dev
);
goto
out
;
}
vgic
->
vgic_redist_free_offset
+=
2
*
SZ_64K
;
out:
mutex_unlock
(
&
kvm
->
slots_lock
);
return
ret
;
}
...
...
@@ -648,10 +652,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
if
(
ret
)
{
/* The current c failed, so we start with the previous one. */
mutex_lock
(
&
kvm
->
slots_lock
);
for
(
c
--
;
c
>=
0
;
c
--
)
{
vcpu
=
kvm_get_vcpu
(
kvm
,
c
);
vgic_unregister_redist_iodev
(
vcpu
);
}
mutex_unlock
(
&
kvm
->
slots_lock
);
}
return
ret
;
...
...
virt/kvm/arm/vgic/vgic-v2.c
浏览文件 @
6f2f10ca
...
...
@@ -149,6 +149,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if
(
irq
->
hw
)
{
val
|=
GICH_LR_HW
;
val
|=
irq
->
hwintid
<<
GICH_LR_PHYSID_CPUID_SHIFT
;
/*
* Never set pending+active on a HW interrupt, as the
* pending state is kept at the physical distributor
* level.
*/
if
(
irq
->
active
&&
irq_is_pending
(
irq
))
val
&=
~
GICH_LR_PENDING_BIT
;
}
else
{
if
(
irq
->
config
==
VGIC_CONFIG_LEVEL
)
val
|=
GICH_LR_EOI
;
...
...
@@ -170,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
struct
vgic_v2_cpu_if
*
cpu_if
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v2
;
u32
vmcr
;
vmcr
=
(
vmcrp
->
ctlr
<<
GICH_VMCR_CTRL_SHIFT
)
&
GICH_VMCR_CTRL_MASK
;
vmcr
=
(
vmcrp
->
grpen0
<<
GICH_VMCR_ENABLE_GRP0_SHIFT
)
&
GICH_VMCR_ENABLE_GRP0_MASK
;
vmcr
|=
(
vmcrp
->
grpen1
<<
GICH_VMCR_ENABLE_GRP1_SHIFT
)
&
GICH_VMCR_ENABLE_GRP1_MASK
;
vmcr
|=
(
vmcrp
->
ackctl
<<
GICH_VMCR_ACK_CTL_SHIFT
)
&
GICH_VMCR_ACK_CTL_MASK
;
vmcr
|=
(
vmcrp
->
fiqen
<<
GICH_VMCR_FIQ_EN_SHIFT
)
&
GICH_VMCR_FIQ_EN_MASK
;
vmcr
|=
(
vmcrp
->
cbpr
<<
GICH_VMCR_CBPR_SHIFT
)
&
GICH_VMCR_CBPR_MASK
;
vmcr
|=
(
vmcrp
->
eoim
<<
GICH_VMCR_EOI_MODE_SHIFT
)
&
GICH_VMCR_EOI_MODE_MASK
;
vmcr
|=
(
vmcrp
->
abpr
<<
GICH_VMCR_ALIAS_BINPOINT_SHIFT
)
&
GICH_VMCR_ALIAS_BINPOINT_MASK
;
vmcr
|=
(
vmcrp
->
bpr
<<
GICH_VMCR_BINPOINT_SHIFT
)
&
...
...
@@ -188,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vmcr
=
cpu_if
->
vgic_vmcr
;
vmcrp
->
ctlr
=
(
vmcr
&
GICH_VMCR_CTRL_MASK
)
>>
GICH_VMCR_CTRL_SHIFT
;
vmcrp
->
grpen0
=
(
vmcr
&
GICH_VMCR_ENABLE_GRP0_MASK
)
>>
GICH_VMCR_ENABLE_GRP0_SHIFT
;
vmcrp
->
grpen1
=
(
vmcr
&
GICH_VMCR_ENABLE_GRP1_MASK
)
>>
GICH_VMCR_ENABLE_GRP1_SHIFT
;
vmcrp
->
ackctl
=
(
vmcr
&
GICH_VMCR_ACK_CTL_MASK
)
>>
GICH_VMCR_ACK_CTL_SHIFT
;
vmcrp
->
fiqen
=
(
vmcr
&
GICH_VMCR_FIQ_EN_MASK
)
>>
GICH_VMCR_FIQ_EN_SHIFT
;
vmcrp
->
cbpr
=
(
vmcr
&
GICH_VMCR_CBPR_MASK
)
>>
GICH_VMCR_CBPR_SHIFT
;
vmcrp
->
eoim
=
(
vmcr
&
GICH_VMCR_EOI_MODE_MASK
)
>>
GICH_VMCR_EOI_MODE_SHIFT
;
vmcrp
->
abpr
=
(
vmcr
&
GICH_VMCR_ALIAS_BINPOINT_MASK
)
>>
GICH_VMCR_ALIAS_BINPOINT_SHIFT
;
vmcrp
->
bpr
=
(
vmcr
&
GICH_VMCR_BINPOINT_MASK
)
>>
...
...
virt/kvm/arm/vgic/vgic-v3.c
浏览文件 @
6f2f10ca
...
...
@@ -127,6 +127,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if
(
irq
->
hw
)
{
val
|=
ICH_LR_HW
;
val
|=
((
u64
)
irq
->
hwintid
)
<<
ICH_LR_PHYS_ID_SHIFT
;
/*
* Never set pending+active on a HW interrupt, as the
* pending state is kept at the physical distributor
* level.
*/
if
(
irq
->
active
&&
irq_is_pending
(
irq
))
val
&=
~
ICH_LR_PENDING_BIT
;
}
else
{
if
(
irq
->
config
==
VGIC_CONFIG_LEVEL
)
val
|=
ICH_LR_EOI
;
...
...
@@ -152,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
void
vgic_v3_set_vmcr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_vmcr
*
vmcrp
)
{
struct
vgic_v3_cpu_if
*
cpu_if
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
;
u32
model
=
vcpu
->
kvm
->
arch
.
vgic
.
vgic_model
;
u32
vmcr
;
/*
* Ignore the FIQen bit, because GIC emulation always implies
* SRE=1 which means the vFIQEn bit is also RES1.
*/
vmcr
=
((
vmcrp
->
ctlr
>>
ICC_CTLR_EL1_EOImode_SHIFT
)
<<
ICH_VMCR_EOIM_SHIFT
)
&
ICH_VMCR_EOIM_MASK
;
vmcr
|=
(
vmcrp
->
ctlr
<<
ICH_VMCR_CBPR_SHIFT
)
&
ICH_VMCR_CBPR_MASK
;
if
(
model
==
KVM_DEV_TYPE_ARM_VGIC_V2
)
{
vmcr
=
(
vmcrp
->
ackctl
<<
ICH_VMCR_ACK_CTL_SHIFT
)
&
ICH_VMCR_ACK_CTL_MASK
;
vmcr
|=
(
vmcrp
->
fiqen
<<
ICH_VMCR_FIQ_EN_SHIFT
)
&
ICH_VMCR_FIQ_EN_MASK
;
}
else
{
/*
* When emulating GICv3 on GICv3 with SRE=1 on the
* VFIQEn bit is RES1 and the VAckCtl bit is RES0.
*/
vmcr
=
ICH_VMCR_FIQ_EN_MASK
;
}
vmcr
|=
(
vmcrp
->
cbpr
<<
ICH_VMCR_CBPR_SHIFT
)
&
ICH_VMCR_CBPR_MASK
;
vmcr
|=
(
vmcrp
->
eoim
<<
ICH_VMCR_EOIM_SHIFT
)
&
ICH_VMCR_EOIM_MASK
;
vmcr
|=
(
vmcrp
->
abpr
<<
ICH_VMCR_BPR1_SHIFT
)
&
ICH_VMCR_BPR1_MASK
;
vmcr
|=
(
vmcrp
->
bpr
<<
ICH_VMCR_BPR0_SHIFT
)
&
ICH_VMCR_BPR0_MASK
;
vmcr
|=
(
vmcrp
->
pmr
<<
ICH_VMCR_PMR_SHIFT
)
&
ICH_VMCR_PMR_MASK
;
...
...
@@ -173,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
void
vgic_v3_get_vmcr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_vmcr
*
vmcrp
)
{
struct
vgic_v3_cpu_if
*
cpu_if
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
;
u32
model
=
vcpu
->
kvm
->
arch
.
vgic
.
vgic_model
;
u32
vmcr
;
vmcr
=
cpu_if
->
vgic_vmcr
;
/*
* Ignore the FIQen bit, because GIC emulation always implies
* SRE=1 which means the vFIQEn bit is also RES1.
*/
vmcrp
->
ctlr
=
((
vmcr
>>
ICH_VMCR_EOIM_SHIFT
)
<<
ICC_CTLR_EL1_EOImode_SHIFT
)
&
ICC_CTLR_EL1_EOImode_MASK
;
vmcrp
->
ctlr
|=
(
vmcr
&
ICH_VMCR_CBPR_MASK
)
>>
ICH_VMCR_CBPR_SHIFT
;
if
(
model
==
KVM_DEV_TYPE_ARM_VGIC_V2
)
{
vmcrp
->
ackctl
=
(
vmcr
&
ICH_VMCR_ACK_CTL_MASK
)
>>
ICH_VMCR_ACK_CTL_SHIFT
;
vmcrp
->
fiqen
=
(
vmcr
&
ICH_VMCR_FIQ_EN_MASK
)
>>
ICH_VMCR_FIQ_EN_SHIFT
;
}
else
{
/*
* When emulating GICv3 on GICv3 with SRE=1 on the
* VFIQEn bit is RES1 and the VAckCtl bit is RES0.
*/
vmcrp
->
fiqen
=
1
;
vmcrp
->
ackctl
=
0
;
}
vmcrp
->
cbpr
=
(
vmcr
&
ICH_VMCR_CBPR_MASK
)
>>
ICH_VMCR_CBPR_SHIFT
;
vmcrp
->
eoim
=
(
vmcr
&
ICH_VMCR_EOIM_MASK
)
>>
ICH_VMCR_EOIM_SHIFT
;
vmcrp
->
abpr
=
(
vmcr
&
ICH_VMCR_BPR1_MASK
)
>>
ICH_VMCR_BPR1_SHIFT
;
vmcrp
->
bpr
=
(
vmcr
&
ICH_VMCR_BPR0_MASK
)
>>
ICH_VMCR_BPR0_SHIFT
;
vmcrp
->
pmr
=
(
vmcr
&
ICH_VMCR_PMR_MASK
)
>>
ICH_VMCR_PMR_SHIFT
;
...
...
virt/kvm/arm/vgic/vgic.h
浏览文件 @
6f2f10ca
...
...
@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
* registers regardless of the hardware backed GIC used.
*/
struct
vgic_vmcr
{
u32
ctlr
;
u32
grpen0
;
u32
grpen1
;
u32
ackctl
;
u32
fiqen
;
u32
cbpr
;
u32
eoim
;
u32
abpr
;
u32
bpr
;
u32
pmr
;
/* Priority mask field in the GICC_PMR and
* ICC_PMR_EL1 priority field format */
/* Below member variable are valid only for GICv3 */
u32
grpen0
;
u32
grpen1
;
};
struct
vgic_reg_attr
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录