Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
5fbb0df6
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
5fbb0df6
编写于
3月 19, 2018
作者:
M
Marc Zyngier
浏览文件
操作
浏览文件
下载
差异文件
Merge tag 'kvm-arm-fixes-for-v4.16-2' into HEAD
Resolve conflicts with current mainline
上级
4b472ffd
27e91ad1
变更
12
显示空白变更内容
内联
并排
Showing
12 changed file
with
181 addition
and
72 deletion
+181
-72
arch/arm/kvm/hyp/Makefile
arch/arm/kvm/hyp/Makefile
+5
-0
arch/arm/kvm/hyp/banked-sr.c
arch/arm/kvm/hyp/banked-sr.c
+4
-0
include/kvm/arm_vgic.h
include/kvm/arm_vgic.h
+1
-0
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic-v3.h
+1
-0
include/linux/irqchip/arm-gic.h
include/linux/irqchip/arm-gic.h
+1
-0
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arch_timer.c
+69
-53
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/hyp/vgic-v3-sr.c
+4
-2
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-mmio.c
+3
-0
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v2.c
+9
-2
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic-v3.c
+8
-1
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.c
+73
-14
virt/kvm/arm/vgic/vgic.h
virt/kvm/arm/vgic/vgic.h
+3
-0
未找到文件。
arch/arm/kvm/hyp/Makefile
浏览文件 @
5fbb0df6
...
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
...
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM
=
../../../../virt/kvm
KVM
=
../../../../virt/kvm
CFLAGS_ARMV7VE
:=
$(
call
cc-option,
-march
=
armv7ve
)
obj-$(CONFIG_KVM_ARM_HOST)
+=
$(KVM)
/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
$(KVM)
/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
$(KVM)
/arm/hyp/timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
$(KVM)
/arm/hyp/timer-sr.o
...
@@ -14,7 +16,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
...
@@ -14,7 +16,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
vfp.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
vfp.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
banked-sr.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
banked-sr.o
CFLAGS_banked-sr.o
+=
$(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST)
+=
entry.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
entry.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
switch.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
switch.o
CFLAGS_switch.o
+=
$(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST)
+=
s2-setup.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
s2-setup.o
arch/arm/kvm/hyp/banked-sr.c
浏览文件 @
5fbb0df6
...
@@ -20,6 +20,10 @@
...
@@ -20,6 +20,10 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_hyp.h>
/*
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
* trick the assembler.
*/
__asm__
(
".arch_extension virt"
);
__asm__
(
".arch_extension virt"
);
void
__hyp_text
__banked_save_state
(
struct
kvm_cpu_context
*
ctxt
)
void
__hyp_text
__banked_save_state
(
struct
kvm_cpu_context
*
ctxt
)
...
...
include/kvm/arm_vgic.h
浏览文件 @
5fbb0df6
...
@@ -358,6 +358,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
...
@@ -358,6 +358,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
bool
kvm_vcpu_has_pending_irqs
(
struct
kvm_vcpu
*
vcpu
);
bool
kvm_vcpu_has_pending_irqs
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vgic_sync_hwstate
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vgic_sync_hwstate
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vgic_flush_hwstate
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vgic_flush_hwstate
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vgic_reset_mapped_irq
(
struct
kvm_vcpu
*
vcpu
,
u32
vintid
);
void
vgic_v3_dispatch_sgi
(
struct
kvm_vcpu
*
vcpu
,
u64
reg
);
void
vgic_v3_dispatch_sgi
(
struct
kvm_vcpu
*
vcpu
,
u64
reg
);
...
...
include/linux/irqchip/arm-gic-v3.h
浏览文件 @
5fbb0df6
...
@@ -503,6 +503,7 @@
...
@@ -503,6 +503,7 @@
#define ICH_HCR_EN (1 << 0)
#define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1)
#define ICH_HCR_UIE (1 << 1)
#define ICH_HCR_NPIE (1 << 3)
#define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL1 (1 << 12)
#define ICH_HCR_TALL1 (1 << 12)
...
...
include/linux/irqchip/arm-gic.h
浏览文件 @
5fbb0df6
...
@@ -84,6 +84,7 @@
...
@@ -84,6 +84,7 @@
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
#define GICH_HCR_UIE (1 << 1)
#define GICH_HCR_NPIE (1 << 3)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
...
...
virt/kvm/arm/arch_timer.c
浏览文件 @
5fbb0df6
...
@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
...
@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
static
unsigned
int
host_vtimer_irq
;
static
unsigned
int
host_vtimer_irq
;
static
u32
host_vtimer_irq_flags
;
static
u32
host_vtimer_irq_flags
;
static
DEFINE_STATIC_KEY_FALSE
(
has_gic_active_state
);
static
const
struct
kvm_irq_level
default_ptimer_irq
=
{
static
const
struct
kvm_irq_level
default_ptimer_irq
=
{
.
irq
=
30
,
.
irq
=
30
,
.
level
=
1
,
.
level
=
1
,
...
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
...
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
return
timecounter
->
cc
->
read
(
timecounter
->
cc
);
return
timecounter
->
cc
->
read
(
timecounter
->
cc
);
}
}
static
inline
bool
userspace_irqchip
(
struct
kvm
*
kvm
)
{
return
static_branch_unlikely
(
&
userspace_irqchip_in_use
)
&&
unlikely
(
!
irqchip_in_kernel
(
kvm
));
}
static
void
soft_timer_start
(
struct
hrtimer
*
hrt
,
u64
ns
)
static
void
soft_timer_start
(
struct
hrtimer
*
hrt
,
u64
ns
)
{
{
hrtimer_start
(
hrt
,
ktime_add_ns
(
ktime_get
(),
ns
),
hrtimer_start
(
hrt
,
ktime_add_ns
(
ktime_get
(),
ns
),
...
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
...
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
cancel_work_sync
(
work
);
cancel_work_sync
(
work
);
}
}
static
void
kvm_vtimer_update_mask_user
(
struct
kvm_vcpu
*
vcpu
)
{
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
/*
* When using a userspace irqchip with the architected timers, we must
* prevent continuously exiting from the guest, and therefore mask the
* physical interrupt by disabling it on the host interrupt controller
* when the virtual level is high, such that the guest can make
* forward progress. Once we detect the output level being
* de-asserted, we unmask the interrupt again so that we exit from the
* guest when the timer fires.
*/
if
(
vtimer
->
irq
.
level
)
disable_percpu_irq
(
host_vtimer_irq
);
else
enable_percpu_irq
(
host_vtimer_irq
,
0
);
}
static
irqreturn_t
kvm_arch_timer_handler
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
kvm_arch_timer_handler
(
int
irq
,
void
*
dev_id
)
{
{
struct
kvm_vcpu
*
vcpu
=
*
(
struct
kvm_vcpu
**
)
dev_id
;
struct
kvm_vcpu
*
vcpu
=
*
(
struct
kvm_vcpu
**
)
dev_id
;
...
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
...
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
if
(
kvm_timer_should_fire
(
vtimer
))
if
(
kvm_timer_should_fire
(
vtimer
))
kvm_timer_update_irq
(
vcpu
,
true
,
vtimer
);
kvm_timer_update_irq
(
vcpu
,
true
,
vtimer
);
if
(
static_branch_unlikely
(
&
userspace_irqchip_in_use
)
&&
if
(
userspace_irqchip
(
vcpu
->
kvm
)
&&
unlikely
(
!
irqchip_in_kernel
(
vcpu
->
kvm
)
))
!
static_branch_unlikely
(
&
has_gic_active_state
))
kvm_vtimer_update_mask_user
(
vcpu
);
disable_percpu_irq
(
host_vtimer_irq
);
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
}
}
...
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
...
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
trace_kvm_timer_update_irq
(
vcpu
->
vcpu_id
,
timer_ctx
->
irq
.
irq
,
trace_kvm_timer_update_irq
(
vcpu
->
vcpu_id
,
timer_ctx
->
irq
.
irq
,
timer_ctx
->
irq
.
level
);
timer_ctx
->
irq
.
level
);
if
(
!
static_branch_unlikely
(
&
userspace_irqchip_in_use
)
||
if
(
!
userspace_irqchip
(
vcpu
->
kvm
))
{
likely
(
irqchip_in_kernel
(
vcpu
->
kvm
)))
{
ret
=
kvm_vgic_inject_irq
(
vcpu
->
kvm
,
vcpu
->
vcpu_id
,
ret
=
kvm_vgic_inject_irq
(
vcpu
->
kvm
,
vcpu
->
vcpu_id
,
timer_ctx
->
irq
.
irq
,
timer_ctx
->
irq
.
irq
,
timer_ctx
->
irq
.
level
,
timer_ctx
->
irq
.
level
,
...
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
...
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
phys_timer_emulate
(
vcpu
);
phys_timer_emulate
(
vcpu
);
}
}
static
void
__timer_snapshot_state
(
struct
arch_timer_context
*
timer
)
{
timer
->
cnt_ctl
=
read_sysreg_el0
(
cntv_ctl
);
timer
->
cnt_cval
=
read_sysreg_el0
(
cntv_cval
);
}
static
void
vtimer_save_state
(
struct
kvm_vcpu
*
vcpu
)
static
void
vtimer_save_state
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
arch_timer_cpu
*
timer
=
&
vcpu
->
arch
.
timer_cpu
;
struct
arch_timer_cpu
*
timer
=
&
vcpu
->
arch
.
timer_cpu
;
...
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
...
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
if
(
!
vtimer
->
loaded
)
if
(
!
vtimer
->
loaded
)
goto
out
;
goto
out
;
if
(
timer
->
enabled
)
if
(
timer
->
enabled
)
{
__timer_snapshot_state
(
vtimer
);
vtimer
->
cnt_ctl
=
read_sysreg_el0
(
cntv_ctl
);
vtimer
->
cnt_cval
=
read_sysreg_el0
(
cntv_cval
);
}
/* Disable the virtual timer */
/* Disable the virtual timer */
write_sysreg_el0
(
0
,
cntv_ctl
);
write_sysreg_el0
(
0
,
cntv_ctl
);
...
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
...
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
kvm_call_hyp
(
__kvm_timer_set_cntvoff
,
low
,
high
);
kvm_call_hyp
(
__kvm_timer_set_cntvoff
,
low
,
high
);
}
}
static
void
kvm_timer_vcpu_load_vgic
(
struct
kvm_vcpu
*
vcpu
)
static
inline
void
set_vtimer_irq_phys_active
(
struct
kvm_vcpu
*
vcpu
,
bool
active
)
{
int
r
;
r
=
irq_set_irqchip_state
(
host_vtimer_irq
,
IRQCHIP_STATE_ACTIVE
,
active
);
WARN_ON
(
r
);
}
static
void
kvm_timer_vcpu_load_gic
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
bool
phys_active
;
bool
phys_active
;
int
ret
;
if
(
irqchip_in_kernel
(
vcpu
->
kvm
))
phys_active
=
kvm_vgic_map_is_active
(
vcpu
,
vtimer
->
irq
.
irq
);
phys_active
=
kvm_vgic_map_is_active
(
vcpu
,
vtimer
->
irq
.
irq
);
else
ret
=
irq_set_irqchip_state
(
host_vtimer_irq
,
phys_active
=
vtimer
->
irq
.
level
;
IRQCHIP_STATE_ACTIVE
,
set_vtimer_irq_phys_active
(
vcpu
,
phys_active
);
phys_active
);
WARN_ON
(
ret
);
}
}
static
void
kvm_timer_vcpu_load_
user
(
struct
kvm_vcpu
*
vcpu
)
static
void
kvm_timer_vcpu_load_
nogic
(
struct
kvm_vcpu
*
vcpu
)
{
{
kvm_vtimer_update_mask_user
(
vcpu
);
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
/*
* When using a userspace irqchip with the architected timers and a
* host interrupt controller that doesn't support an active state, we
* must still prevent continuously exiting from the guest, and
* therefore mask the physical interrupt by disabling it on the host
* interrupt controller when the virtual level is high, such that the
* guest can make forward progress. Once we detect the output level
* being de-asserted, we unmask the interrupt again so that we exit
* from the guest when the timer fires.
*/
if
(
vtimer
->
irq
.
level
)
disable_percpu_irq
(
host_vtimer_irq
);
else
enable_percpu_irq
(
host_vtimer_irq
,
host_vtimer_irq_flags
);
}
}
void
kvm_timer_vcpu_load
(
struct
kvm_vcpu
*
vcpu
)
void
kvm_timer_vcpu_load
(
struct
kvm_vcpu
*
vcpu
)
...
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
...
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
if
(
unlikely
(
!
timer
->
enabled
))
if
(
unlikely
(
!
timer
->
enabled
))
return
;
return
;
if
(
unlikely
(
!
irqchip_in_kernel
(
vcpu
->
kvm
)
))
if
(
static_branch_likely
(
&
has_gic_active_state
))
kvm_timer_vcpu_load_
user
(
vcpu
);
kvm_timer_vcpu_load_
gic
(
vcpu
);
else
else
kvm_timer_vcpu_load_
v
gic
(
vcpu
);
kvm_timer_vcpu_load_
no
gic
(
vcpu
);
set_cntvoff
(
vtimer
->
cntvoff
);
set_cntvoff
(
vtimer
->
cntvoff
);
...
@@ -557,22 +561,29 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
...
@@ -557,22 +561,29 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
{
{
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
if
(
unlikely
(
!
irqchip_in_kernel
(
vcpu
->
kvm
)))
{
__timer_snapshot_state
(
vtimer
);
if
(
!
kvm_timer_should_fire
(
vtimer
))
{
if
(
!
kvm_timer_should_fire
(
vtimer
))
{
kvm_timer_update_irq
(
vcpu
,
false
,
vtimer
);
kvm_timer_update_irq
(
vcpu
,
false
,
vtimer
);
kvm_vtimer_update_mask_user
(
vcpu
);
if
(
static_branch_likely
(
&
has_gic_active_state
))
}
set_vtimer_irq_phys_active
(
vcpu
,
false
);
else
enable_percpu_irq
(
host_vtimer_irq
,
host_vtimer_irq_flags
);
}
}
}
}
void
kvm_timer_sync_hwstate
(
struct
kvm_vcpu
*
vcpu
)
void
kvm_timer_sync_hwstate
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
arch_timer_cpu
*
timer
=
&
vcpu
->
arch
.
timer_cpu
;
if
(
unlikely
(
!
timer
->
enabled
))
return
;
if
(
unlikely
(
!
irqchip_in_kernel
(
vcpu
->
kvm
)))
unmask_vtimer_irq_user
(
vcpu
);
unmask_vtimer_irq_user
(
vcpu
);
}
}
int
kvm_timer_vcpu_reset
(
struct
kvm_vcpu
*
vcpu
)
int
kvm_timer_vcpu_reset
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
arch_timer_cpu
*
timer
=
&
vcpu
->
arch
.
timer_cpu
;
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
struct
arch_timer_context
*
ptimer
=
vcpu_ptimer
(
vcpu
);
struct
arch_timer_context
*
ptimer
=
vcpu_ptimer
(
vcpu
);
...
@@ -586,6 +597,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
...
@@ -586,6 +597,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
ptimer
->
cnt_ctl
=
0
;
ptimer
->
cnt_ctl
=
0
;
kvm_timer_update_state
(
vcpu
);
kvm_timer_update_state
(
vcpu
);
if
(
timer
->
enabled
&&
irqchip_in_kernel
(
vcpu
->
kvm
))
kvm_vgic_reset_mapped_irq
(
vcpu
,
vtimer
->
irq
.
irq
);
return
0
;
return
0
;
}
}
...
@@ -755,9 +769,11 @@ int kvm_timer_hyp_init(bool has_gic)
...
@@ -755,9 +769,11 @@ int kvm_timer_hyp_init(bool has_gic)
kvm_err
(
"kvm_arch_timer: error setting vcpu affinity
\n
"
);
kvm_err
(
"kvm_arch_timer: error setting vcpu affinity
\n
"
);
goto
out_free_irq
;
goto
out_free_irq
;
}
}
static_branch_enable
(
&
has_gic_active_state
);
}
}
kvm_
info
(
"virtual timer IRQ%d
\n
"
,
host_vtimer_irq
);
kvm_
debug
(
"virtual timer IRQ%d
\n
"
,
host_vtimer_irq
);
cpuhp_setup_state
(
CPUHP_AP_KVM_ARM_TIMER_STARTING
,
cpuhp_setup_state
(
CPUHP_AP_KVM_ARM_TIMER_STARTING
,
"kvm/arm/timer:starting"
,
kvm_timer_starting_cpu
,
"kvm/arm/timer:starting"
,
kvm_timer_starting_cpu
,
...
...
virt/kvm/arm/hyp/vgic-v3-sr.c
浏览文件 @
5fbb0df6
...
@@ -216,8 +216,10 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
...
@@ -216,8 +216,10 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
* LRs, and when reading back the VMCR on non-VHE systems.
* LRs, and when reading back the VMCR on non-VHE systems.
*/
*/
if
(
used_lrs
||
!
has_vhe
())
{
if
(
used_lrs
||
!
has_vhe
())
{
if
(
!
cpu_if
->
vgic_sre
)
if
(
!
cpu_if
->
vgic_sre
)
{
dsb
(
st
);
dsb
(
sy
);
isb
();
}
}
}
if
(
used_lrs
)
{
if
(
used_lrs
)
{
...
...
virt/kvm/arm/vgic/vgic-mmio.c
浏览文件 @
5fbb0df6
...
@@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
...
@@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
/* Loop over all IRQs affected by this read */
/* Loop over all IRQs affected by this read */
for
(
i
=
0
;
i
<
len
*
8
;
i
++
)
{
for
(
i
=
0
;
i
<
len
*
8
;
i
++
)
{
struct
vgic_irq
*
irq
=
vgic_get_irq
(
vcpu
->
kvm
,
vcpu
,
intid
+
i
);
struct
vgic_irq
*
irq
=
vgic_get_irq
(
vcpu
->
kvm
,
vcpu
,
intid
+
i
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
irq
->
irq_lock
,
flags
);
if
(
irq_is_pending
(
irq
))
if
(
irq_is_pending
(
irq
))
value
|=
(
1U
<<
i
);
value
|=
(
1U
<<
i
);
spin_unlock_irqrestore
(
&
irq
->
irq_lock
,
flags
);
vgic_put_irq
(
vcpu
->
kvm
,
irq
);
vgic_put_irq
(
vcpu
->
kvm
,
irq
);
}
}
...
...
virt/kvm/arm/vgic/vgic-v2.c
浏览文件 @
5fbb0df6
...
@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void)
...
@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void)
vgic_v2_write_lr
(
i
,
0
);
vgic_v2_write_lr
(
i
,
0
);
}
}
void
vgic_v2_set_npie
(
struct
kvm_vcpu
*
vcpu
)
{
struct
vgic_v2_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v2
;
cpuif
->
vgic_hcr
|=
GICH_HCR_NPIE
;
}
void
vgic_v2_set_underflow
(
struct
kvm_vcpu
*
vcpu
)
void
vgic_v2_set_underflow
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
vgic_v2_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v2
;
struct
vgic_v2_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v2
;
...
@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
...
@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
int
lr
;
int
lr
;
unsigned
long
flags
;
unsigned
long
flags
;
cpuif
->
vgic_hcr
&=
~
GICH_HCR_UIE
;
cpuif
->
vgic_hcr
&=
~
(
GICH_HCR_UIE
|
GICH_HCR_NPIE
)
;
for
(
lr
=
0
;
lr
<
vgic_cpu
->
used_lrs
;
lr
++
)
{
for
(
lr
=
0
;
lr
<
vgic_cpu
->
used_lrs
;
lr
++
)
{
u32
val
=
cpuif
->
vgic_lr
[
lr
];
u32
val
=
cpuif
->
vgic_lr
[
lr
];
...
@@ -396,7 +403,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
...
@@ -396,7 +403,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state
.
type
=
VGIC_V2
;
kvm_vgic_global_state
.
type
=
VGIC_V2
;
kvm_vgic_global_state
.
max_gic_vcpus
=
VGIC_V2_MAX_CPUS
;
kvm_vgic_global_state
.
max_gic_vcpus
=
VGIC_V2_MAX_CPUS
;
kvm_
info
(
"vgic-v2@%llx
\n
"
,
info
->
vctrl
.
start
);
kvm_
debug
(
"vgic-v2@%llx
\n
"
,
info
->
vctrl
.
start
);
return
0
;
return
0
;
out:
out:
...
...
virt/kvm/arm/vgic/vgic-v3.c
浏览文件 @
5fbb0df6
...
@@ -27,6 +27,13 @@ static bool group1_trap;
...
@@ -27,6 +27,13 @@ static bool group1_trap;
static
bool
common_trap
;
static
bool
common_trap
;
static
bool
gicv4_enable
;
static
bool
gicv4_enable
;
void
vgic_v3_set_npie
(
struct
kvm_vcpu
*
vcpu
)
{
struct
vgic_v3_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
;
cpuif
->
vgic_hcr
|=
ICH_HCR_NPIE
;
}
void
vgic_v3_set_underflow
(
struct
kvm_vcpu
*
vcpu
)
void
vgic_v3_set_underflow
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
vgic_v3_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
;
struct
vgic_v3_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
;
...
@@ -48,7 +55,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
...
@@ -48,7 +55,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
int
lr
;
int
lr
;
unsigned
long
flags
;
unsigned
long
flags
;
cpuif
->
vgic_hcr
&=
~
ICH_HCR_UIE
;
cpuif
->
vgic_hcr
&=
~
(
ICH_HCR_UIE
|
ICH_HCR_NPIE
)
;
for
(
lr
=
0
;
lr
<
vgic_cpu
->
used_lrs
;
lr
++
)
{
for
(
lr
=
0
;
lr
<
vgic_cpu
->
used_lrs
;
lr
++
)
{
u64
val
=
cpuif
->
vgic_lr
[
lr
];
u64
val
=
cpuif
->
vgic_lr
[
lr
];
...
...
virt/kvm/arm/vgic/vgic.c
浏览文件 @
5fbb0df6
...
@@ -496,6 +496,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
...
@@ -496,6 +496,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
return
ret
;
return
ret
;
}
}
/**
* kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
* @vcpu: The VCPU pointer
* @vintid: The INTID of the interrupt
*
* Reset the active and pending states of a mapped interrupt. Kernel
* subsystems injecting mapped interrupts should reset their interrupt lines
* when we are doing a reset of the VM.
*/
void
kvm_vgic_reset_mapped_irq
(
struct
kvm_vcpu
*
vcpu
,
u32
vintid
)
{
struct
vgic_irq
*
irq
=
vgic_get_irq
(
vcpu
->
kvm
,
vcpu
,
vintid
);
unsigned
long
flags
;
if
(
!
irq
->
hw
)
goto
out
;
spin_lock_irqsave
(
&
irq
->
irq_lock
,
flags
);
irq
->
active
=
false
;
irq
->
pending_latch
=
false
;
irq
->
line_level
=
false
;
spin_unlock_irqrestore
(
&
irq
->
irq_lock
,
flags
);
out:
vgic_put_irq
(
vcpu
->
kvm
,
irq
);
}
int
kvm_vgic_unmap_phys_irq
(
struct
kvm_vcpu
*
vcpu
,
unsigned
int
vintid
)
int
kvm_vgic_unmap_phys_irq
(
struct
kvm_vcpu
*
vcpu
,
unsigned
int
vintid
)
{
{
struct
vgic_irq
*
irq
;
struct
vgic_irq
*
irq
;
...
@@ -685,22 +711,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
...
@@ -685,22 +711,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
vgic_v3_set_underflow
(
vcpu
);
vgic_v3_set_underflow
(
vcpu
);
}
}
static
inline
void
vgic_set_npie
(
struct
kvm_vcpu
*
vcpu
)
{
if
(
kvm_vgic_global_state
.
type
==
VGIC_V2
)
vgic_v2_set_npie
(
vcpu
);
else
vgic_v3_set_npie
(
vcpu
);
}
/* Requires the ap_list_lock to be held. */
/* Requires the ap_list_lock to be held. */
static
int
compute_ap_list_depth
(
struct
kvm_vcpu
*
vcpu
)
static
int
compute_ap_list_depth
(
struct
kvm_vcpu
*
vcpu
,
bool
*
multi_sgi
)
{
{
struct
vgic_cpu
*
vgic_cpu
=
&
vcpu
->
arch
.
vgic_cpu
;
struct
vgic_cpu
*
vgic_cpu
=
&
vcpu
->
arch
.
vgic_cpu
;
struct
vgic_irq
*
irq
;
struct
vgic_irq
*
irq
;
int
count
=
0
;
int
count
=
0
;
*
multi_sgi
=
false
;
DEBUG_SPINLOCK_BUG_ON
(
!
spin_is_locked
(
&
vgic_cpu
->
ap_list_lock
));
DEBUG_SPINLOCK_BUG_ON
(
!
spin_is_locked
(
&
vgic_cpu
->
ap_list_lock
));
list_for_each_entry
(
irq
,
&
vgic_cpu
->
ap_list_head
,
ap_list
)
{
list_for_each_entry
(
irq
,
&
vgic_cpu
->
ap_list_head
,
ap_list
)
{
spin_lock
(
&
irq
->
irq_lock
);
spin_lock
(
&
irq
->
irq_lock
);
/* GICv2 SGIs can count for more than one... */
/* GICv2 SGIs can count for more than one... */
if
(
vgic_irq_is_sgi
(
irq
->
intid
)
&&
irq
->
source
)
if
(
vgic_irq_is_sgi
(
irq
->
intid
)
&&
irq
->
source
)
{
count
+=
hweight8
(
irq
->
source
);
int
w
=
hweight8
(
irq
->
source
);
else
count
+=
w
;
*
multi_sgi
|=
(
w
>
1
);
}
else
{
count
++
;
count
++
;
}
spin_unlock
(
&
irq
->
irq_lock
);
spin_unlock
(
&
irq
->
irq_lock
);
}
}
return
count
;
return
count
;
...
@@ -711,28 +752,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
...
@@ -711,28 +752,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{
{
struct
vgic_cpu
*
vgic_cpu
=
&
vcpu
->
arch
.
vgic_cpu
;
struct
vgic_cpu
*
vgic_cpu
=
&
vcpu
->
arch
.
vgic_cpu
;
struct
vgic_irq
*
irq
;
struct
vgic_irq
*
irq
;
int
count
=
0
;
int
count
;
bool
npie
=
false
;
bool
multi_sgi
;
u8
prio
=
0xff
;
DEBUG_SPINLOCK_BUG_ON
(
!
spin_is_locked
(
&
vgic_cpu
->
ap_list_lock
));
DEBUG_SPINLOCK_BUG_ON
(
!
spin_is_locked
(
&
vgic_cpu
->
ap_list_lock
));
if
(
compute_ap_list_depth
(
vcpu
)
>
kvm_vgic_global_state
.
nr_lr
)
count
=
compute_ap_list_depth
(
vcpu
,
&
multi_sgi
);
if
(
count
>
kvm_vgic_global_state
.
nr_lr
||
multi_sgi
)
vgic_sort_ap_list
(
vcpu
);
vgic_sort_ap_list
(
vcpu
);
count
=
0
;
list_for_each_entry
(
irq
,
&
vgic_cpu
->
ap_list_head
,
ap_list
)
{
list_for_each_entry
(
irq
,
&
vgic_cpu
->
ap_list_head
,
ap_list
)
{
spin_lock
(
&
irq
->
irq_lock
);
spin_lock
(
&
irq
->
irq_lock
);
if
(
unlikely
(
vgic_target_oracle
(
irq
)
!=
vcpu
))
goto
next
;
/*
/*
* If we get an SGI with multiple sources, try to get
* If we have multi-SGIs in the pipeline, we need to
* them in all at once.
* guarantee that they are all seen before any IRQ of
* lower priority. In that case, we need to filter out
* these interrupts by exiting early. This is easy as
* the AP list has been sorted already.
*/
*/
do
{
if
(
multi_sgi
&&
irq
->
priority
>
prio
)
{
spin_unlock
(
&
irq
->
irq_lock
);
break
;
}
if
(
likely
(
vgic_target_oracle
(
irq
)
==
vcpu
))
{
vgic_populate_lr
(
vcpu
,
irq
,
count
++
);
vgic_populate_lr
(
vcpu
,
irq
,
count
++
);
}
while
(
irq
->
source
&&
count
<
kvm_vgic_global_state
.
nr_lr
);
next:
if
(
irq
->
source
)
{
npie
=
true
;
prio
=
irq
->
priority
;
}
}
spin_unlock
(
&
irq
->
irq_lock
);
spin_unlock
(
&
irq
->
irq_lock
);
if
(
count
==
kvm_vgic_global_state
.
nr_lr
)
{
if
(
count
==
kvm_vgic_global_state
.
nr_lr
)
{
...
@@ -743,6 +799,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
...
@@ -743,6 +799,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
}
}
}
}
if
(
npie
)
vgic_set_npie
(
vcpu
);
vcpu
->
arch
.
vgic_cpu
.
used_lrs
=
count
;
vcpu
->
arch
.
vgic_cpu
.
used_lrs
=
count
;
/* Nuke remaining LRs */
/* Nuke remaining LRs */
...
...
virt/kvm/arm/vgic/vgic.h
浏览文件 @
5fbb0df6
...
@@ -96,6 +96,7 @@
...
@@ -96,6 +96,7 @@
/* we only support 64 kB translation table page size */
/* we only support 64 kB translation table page size */
#define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16)
#define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16)
/* Requires the irq_lock to be held by the caller. */
static
inline
bool
irq_is_pending
(
struct
vgic_irq
*
irq
)
static
inline
bool
irq_is_pending
(
struct
vgic_irq
*
irq
)
{
{
if
(
irq
->
config
==
VGIC_CONFIG_EDGE
)
if
(
irq
->
config
==
VGIC_CONFIG_EDGE
)
...
@@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
...
@@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void
vgic_v2_populate_lr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_irq
*
irq
,
int
lr
);
void
vgic_v2_populate_lr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_irq
*
irq
,
int
lr
);
void
vgic_v2_clear_lr
(
struct
kvm_vcpu
*
vcpu
,
int
lr
);
void
vgic_v2_clear_lr
(
struct
kvm_vcpu
*
vcpu
,
int
lr
);
void
vgic_v2_set_underflow
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v2_set_underflow
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v2_set_npie
(
struct
kvm_vcpu
*
vcpu
);
int
vgic_v2_has_attr_regs
(
struct
kvm_device
*
dev
,
struct
kvm_device_attr
*
attr
);
int
vgic_v2_has_attr_regs
(
struct
kvm_device
*
dev
,
struct
kvm_device_attr
*
attr
);
int
vgic_v2_dist_uaccess
(
struct
kvm_vcpu
*
vcpu
,
bool
is_write
,
int
vgic_v2_dist_uaccess
(
struct
kvm_vcpu
*
vcpu
,
bool
is_write
,
int
offset
,
u32
*
val
);
int
offset
,
u32
*
val
);
...
@@ -191,6 +193,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
...
@@ -191,6 +193,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void
vgic_v3_populate_lr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_irq
*
irq
,
int
lr
);
void
vgic_v3_populate_lr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_irq
*
irq
,
int
lr
);
void
vgic_v3_clear_lr
(
struct
kvm_vcpu
*
vcpu
,
int
lr
);
void
vgic_v3_clear_lr
(
struct
kvm_vcpu
*
vcpu
,
int
lr
);
void
vgic_v3_set_underflow
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v3_set_underflow
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v3_set_npie
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v3_set_vmcr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_vmcr
*
vmcr
);
void
vgic_v3_set_vmcr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_vmcr
*
vmcr
);
void
vgic_v3_get_vmcr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_vmcr
*
vmcr
);
void
vgic_v3_get_vmcr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_vmcr
*
vmcr
);
void
vgic_v3_enable
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v3_enable
(
struct
kvm_vcpu
*
vcpu
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录