Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
2e02cbb2
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
2e02cbb2
编写于
9月 29, 2020
作者:
M
Marc Zyngier
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'kvm-arm64/pmu-5.9' into kvmarm-master/next
Signed-off-by:
N
Marc Zyngier
<
maz@kernel.org
>
上级
41fa0f59
51dd2eb9
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
257 addition
and
30 deletion
+257
-30
Documentation/virt/kvm/devices/vcpu.rst
Documentation/virt/kvm/devices/vcpu.rst
+53
-4
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
+7
-0
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h
+16
-0
arch/arm64/kvm/arm.c
arch/arm64/kvm/arm.c
+2
-0
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/pmu-emul.c
+173
-22
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.c
+1
-4
include/kvm/arm_pmu.h
include/kvm/arm_pmu.h
+5
-0
未找到文件。
Documentation/virt/kvm/devices/vcpu.rst
浏览文件 @
2e02cbb2
...
...
@@ -25,8 +25,10 @@ Returns:
======= ========================================================
-EBUSY The PMU overflow interrupt is already set
-ENXIO The overflow interrupt not set when attempting to get it
-ENODEV PMUv3 not supported
-EFAULT Error reading interrupt number
-ENXIO PMUv3 not supported or the overflow interrupt not set
when attempting to get it
-ENODEV KVM_ARM_VCPU_PMU_V3 feature missing from VCPU
-EINVAL Invalid PMU overflow interrupt number supplied or
trying to set the IRQ number without using an in-kernel
irqchip.
...
...
@@ -45,9 +47,10 @@ all vcpus, while as an SPI it must be a separate number per vcpu.
Returns:
======= ======================================================
-EEXIST Interrupt number already used
-ENODEV PMUv3 not supported or GIC not initialized
-ENXIO PMUv3 not
properly configured or in-kernel irqchip no
t
configured as required prior to calling this attribute
-ENXIO PMUv3 not
supported, missing VCPU feature or interrup
t
number not set
-EBUSY PMUv3 already initialized
======= ======================================================
...
...
@@ -55,6 +58,52 @@ Request the initialization of the PMUv3. If using the PMUv3 with an in-kernel
virtual GIC implementation, this must be done after initializing the in-kernel
irqchip.
1.3 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_FILTER
-----------------------------------------
:Parameters: in kvm_device_attr.addr the address for a PMU event filter is a
pointer to a struct kvm_pmu_event_filter
:Returns:
======= ======================================================
-ENODEV: PMUv3 not supported or GIC not initialized
-ENXIO: PMUv3 not properly configured or in-kernel irqchip not
configured as required prior to calling this attribute
-EBUSY: PMUv3 already initialized
-EINVAL: Invalid filter range
======= ======================================================
Request the installation of a PMU event filter described as follows:
struct kvm_pmu_event_filter {
__u16 base_event;
__u16 nevents;
#define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1
__u8 action;
__u8 pad[3];
};
A filter range is defined as the range [@base_event, @base_event + @nevents),
together with an @action (KVM_PMU_EVENT_ALLOW or KVM_PMU_EVENT_DENY). The
first registered range defines the global policy (global ALLOW if the first
@action is DENY, global DENY if the first @action is ALLOW). Multiple ranges
can be programmed, and must fit within the event space defined by the PMU
architecture (10 bits on ARMv8.0, 16 bits from ARMv8.1 onwards).
Note: "Cancelling" a filter by registering the opposite action for the same
range doesn't change the default action. For example, installing an ALLOW
filter for event range [0:10) as the first filter and then applying a DENY
action for the same range will leave the whole range as disabled.
Restrictions: Event 0 (SW_INCR) is never filtered, as it doesn't count a
hardware event. Filtering event 0x1E (CHAIN) has no effect either, as it
isn't strictly speaking an event. Filtering the cycle counter is possible
using event 0x11 (CPU_CYCLES).
2. GROUP: KVM_ARM_VCPU_TIMER_CTRL
=================================
...
...
arch/arm64/include/asm/kvm_host.h
浏览文件 @
2e02cbb2
...
...
@@ -111,6 +111,13 @@ struct kvm_arch {
* supported.
*/
bool
return_nisv_io_abort_to_user
;
/*
* VM-wide PMU filter, implemented as a bitmap and big enough for
* up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
*/
unsigned
long
*
pmu_filter
;
unsigned
int
pmuver
;
};
struct
kvm_vcpu_fault_info
{
...
...
arch/arm64/include/uapi/asm/kvm.h
浏览文件 @
2e02cbb2
...
...
@@ -159,6 +159,21 @@ struct kvm_sync_regs {
struct
kvm_arch_memory_slot
{
};
/*
* PMU filter structure. Describe a range of events with a particular
* action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
*/
struct
kvm_pmu_event_filter
{
__u16
base_event
;
__u16
nevents
;
#define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1
__u8
action
;
__u8
pad
[
3
];
};
/* for KVM_GET/SET_VCPU_EVENTS */
struct
kvm_vcpu_events
{
struct
{
...
...
@@ -329,6 +344,7 @@ struct kvm_vcpu_events {
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0
#define KVM_ARM_VCPU_PMU_V3_INIT 1
#define KVM_ARM_VCPU_PMU_V3_FILTER 2
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
...
...
arch/arm64/kvm/arm.c
浏览文件 @
2e02cbb2
...
...
@@ -147,6 +147,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int
i
;
bitmap_free
(
kvm
->
arch
.
pmu_filter
);
kvm_vgic_destroy
(
kvm
);
for
(
i
=
0
;
i
<
KVM_MAX_VCPUS
;
++
i
)
{
...
...
arch/arm64/kvm/pmu-emul.c
浏览文件 @
2e02cbb2
...
...
@@ -20,6 +20,21 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
static
u32
kvm_pmu_event_mask
(
struct
kvm
*
kvm
)
{
switch
(
kvm
->
arch
.
pmuver
)
{
case
1
:
/* ARMv8.0 */
return
GENMASK
(
9
,
0
);
case
4
:
/* ARMv8.1 */
case
5
:
/* ARMv8.4 */
case
6
:
/* ARMv8.5 */
return
GENMASK
(
15
,
0
);
default:
/* Shouldn't be here, just for sanity */
WARN_ONCE
(
1
,
"Unknown PMU version %d
\n
"
,
kvm
->
arch
.
pmuver
);
return
0
;
}
}
/**
* kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
* @vcpu: The vcpu pointer
...
...
@@ -100,7 +115,7 @@ static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
return
false
;
reg
=
PMEVTYPER0_EL0
+
select_idx
;
eventsel
=
__vcpu_sys_reg
(
vcpu
,
reg
)
&
ARMV8_PMU_EVTYPE_EVENT
;
eventsel
=
__vcpu_sys_reg
(
vcpu
,
reg
)
&
kvm_pmu_event_mask
(
vcpu
->
kvm
)
;
return
eventsel
==
ARMV8_PMUV3_PERFCTR_CHAIN
;
}
...
...
@@ -495,7 +510,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
/* PMSWINC only applies to ... SW_INC! */
type
=
__vcpu_sys_reg
(
vcpu
,
PMEVTYPER0_EL0
+
i
);
type
&=
ARMV8_PMU_EVTYPE_EVENT
;
type
&=
kvm_pmu_event_mask
(
vcpu
->
kvm
)
;
if
(
type
!=
ARMV8_PMUV3_PERFCTR_SW_INCR
)
continue
;
...
...
@@ -578,11 +593,21 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
data
=
__vcpu_sys_reg
(
vcpu
,
reg
);
kvm_pmu_stop_counter
(
vcpu
,
pmc
);
eventsel
=
data
&
ARMV8_PMU_EVTYPE_EVENT
;
if
(
pmc
->
idx
==
ARMV8_PMU_CYCLE_IDX
)
eventsel
=
ARMV8_PMUV3_PERFCTR_CPU_CYCLES
;
else
eventsel
=
data
&
kvm_pmu_event_mask
(
vcpu
->
kvm
);
/* Software increment event does't need to be backed by a perf event */
if
(
eventsel
==
ARMV8_PMUV3_PERFCTR_SW_INCR
&&
pmc
->
idx
!=
ARMV8_PMU_CYCLE_IDX
)
/* Software increment event doesn't need to be backed by a perf event */
if
(
eventsel
==
ARMV8_PMUV3_PERFCTR_SW_INCR
)
return
;
/*
* If we have a filter in place and that the event isn't allowed, do
* not install a perf event either.
*/
if
(
vcpu
->
kvm
->
arch
.
pmu_filter
&&
!
test_bit
(
eventsel
,
vcpu
->
kvm
->
arch
.
pmu_filter
))
return
;
memset
(
&
attr
,
0
,
sizeof
(
struct
perf_event_attr
));
...
...
@@ -594,8 +619,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
attr
.
exclude_kernel
=
data
&
ARMV8_PMU_EXCLUDE_EL1
?
1
:
0
;
attr
.
exclude_hv
=
1
;
/* Don't count EL2 events */
attr
.
exclude_host
=
1
;
/* Don't count host events */
attr
.
config
=
(
pmc
->
idx
==
ARMV8_PMU_CYCLE_IDX
)
?
ARMV8_PMUV3_PERFCTR_CPU_CYCLES
:
eventsel
;
attr
.
config
=
eventsel
;
counter
=
kvm_pmu_get_pair_counter_value
(
vcpu
,
pmc
);
...
...
@@ -679,17 +703,95 @@ static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
void
kvm_pmu_set_counter_event_type
(
struct
kvm_vcpu
*
vcpu
,
u64
data
,
u64
select_idx
)
{
u64
reg
,
event_type
=
data
&
ARMV8_PMU_EVTYPE_MASK
;
u64
reg
,
mask
;
mask
=
ARMV8_PMU_EVTYPE_MASK
;
mask
&=
~
ARMV8_PMU_EVTYPE_EVENT
;
mask
|=
kvm_pmu_event_mask
(
vcpu
->
kvm
);
reg
=
(
select_idx
==
ARMV8_PMU_CYCLE_IDX
)
?
PMCCFILTR_EL0
:
PMEVTYPER0_EL0
+
select_idx
;
__vcpu_sys_reg
(
vcpu
,
reg
)
=
event_type
;
__vcpu_sys_reg
(
vcpu
,
reg
)
=
data
&
mask
;
kvm_pmu_update_pmc_chained
(
vcpu
,
select_idx
);
kvm_pmu_create_perf_event
(
vcpu
,
select_idx
);
}
static
int
kvm_pmu_probe_pmuver
(
void
)
{
struct
perf_event_attr
attr
=
{
};
struct
perf_event
*
event
;
struct
arm_pmu
*
pmu
;
int
pmuver
=
0xf
;
/*
* Create a dummy event that only counts user cycles. As we'll never
* leave this function with the event being live, it will never
* count anything. But it allows us to probe some of the PMU
* details. Yes, this is terrible.
*/
attr
.
type
=
PERF_TYPE_RAW
;
attr
.
size
=
sizeof
(
attr
);
attr
.
pinned
=
1
;
attr
.
disabled
=
0
;
attr
.
exclude_user
=
0
;
attr
.
exclude_kernel
=
1
;
attr
.
exclude_hv
=
1
;
attr
.
exclude_host
=
1
;
attr
.
config
=
ARMV8_PMUV3_PERFCTR_CPU_CYCLES
;
attr
.
sample_period
=
GENMASK
(
63
,
0
);
event
=
perf_event_create_kernel_counter
(
&
attr
,
-
1
,
current
,
kvm_pmu_perf_overflow
,
&
attr
);
if
(
IS_ERR
(
event
))
{
pr_err_once
(
"kvm: pmu event creation failed %ld
\n
"
,
PTR_ERR
(
event
));
return
0xf
;
}
if
(
event
->
pmu
)
{
pmu
=
to_arm_pmu
(
event
->
pmu
);
if
(
pmu
->
pmuver
)
pmuver
=
pmu
->
pmuver
;
}
perf_event_disable
(
event
);
perf_event_release_kernel
(
event
);
return
pmuver
;
}
u64
kvm_pmu_get_pmceid
(
struct
kvm_vcpu
*
vcpu
,
bool
pmceid1
)
{
unsigned
long
*
bmap
=
vcpu
->
kvm
->
arch
.
pmu_filter
;
u64
val
,
mask
=
0
;
int
base
,
i
;
if
(
!
pmceid1
)
{
val
=
read_sysreg
(
pmceid0_el0
);
base
=
0
;
}
else
{
val
=
read_sysreg
(
pmceid1_el0
);
base
=
32
;
}
if
(
!
bmap
)
return
val
;
for
(
i
=
0
;
i
<
32
;
i
+=
8
)
{
u64
byte
;
byte
=
bitmap_get_value8
(
bmap
,
base
+
i
);
mask
|=
byte
<<
i
;
byte
=
bitmap_get_value8
(
bmap
,
0x4000
+
base
+
i
);
mask
|=
byte
<<
(
32
+
i
);
}
return
val
&
mask
;
}
bool
kvm_arm_support_pmu_v3
(
void
)
{
/*
...
...
@@ -735,15 +837,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
static
int
kvm_arm_pmu_v3_init
(
struct
kvm_vcpu
*
vcpu
)
{
if
(
!
kvm_arm_support_pmu_v3
())
return
-
ENODEV
;
if
(
!
test_bit
(
KVM_ARM_VCPU_PMU_V3
,
vcpu
->
arch
.
features
))
return
-
ENXIO
;
if
(
vcpu
->
arch
.
pmu
.
created
)
return
-
EBUSY
;
if
(
irqchip_in_kernel
(
vcpu
->
kvm
))
{
int
ret
;
...
...
@@ -796,6 +889,19 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
int
kvm_arm_pmu_v3_set_attr
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_device_attr
*
attr
)
{
if
(
!
kvm_arm_support_pmu_v3
()
||
!
test_bit
(
KVM_ARM_VCPU_PMU_V3
,
vcpu
->
arch
.
features
))
return
-
ENODEV
;
if
(
vcpu
->
arch
.
pmu
.
created
)
return
-
EBUSY
;
if
(
!
vcpu
->
kvm
->
arch
.
pmuver
)
vcpu
->
kvm
->
arch
.
pmuver
=
kvm_pmu_probe_pmuver
();
if
(
vcpu
->
kvm
->
arch
.
pmuver
==
0xf
)
return
-
ENODEV
;
switch
(
attr
->
attr
)
{
case
KVM_ARM_VCPU_PMU_V3_IRQ
:
{
int
__user
*
uaddr
=
(
int
__user
*
)(
long
)
attr
->
addr
;
...
...
@@ -804,9 +910,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if
(
!
irqchip_in_kernel
(
vcpu
->
kvm
))
return
-
EINVAL
;
if
(
!
test_bit
(
KVM_ARM_VCPU_PMU_V3
,
vcpu
->
arch
.
features
))
return
-
ENODEV
;
if
(
get_user
(
irq
,
uaddr
))
return
-
EFAULT
;
...
...
@@ -824,6 +927,53 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
vcpu
->
arch
.
pmu
.
irq_num
=
irq
;
return
0
;
}
case
KVM_ARM_VCPU_PMU_V3_FILTER
:
{
struct
kvm_pmu_event_filter
__user
*
uaddr
;
struct
kvm_pmu_event_filter
filter
;
int
nr_events
;
nr_events
=
kvm_pmu_event_mask
(
vcpu
->
kvm
)
+
1
;
uaddr
=
(
struct
kvm_pmu_event_filter
__user
*
)(
long
)
attr
->
addr
;
if
(
copy_from_user
(
&
filter
,
uaddr
,
sizeof
(
filter
)))
return
-
EFAULT
;
if
(((
u32
)
filter
.
base_event
+
filter
.
nevents
)
>
nr_events
||
(
filter
.
action
!=
KVM_PMU_EVENT_ALLOW
&&
filter
.
action
!=
KVM_PMU_EVENT_DENY
))
return
-
EINVAL
;
mutex_lock
(
&
vcpu
->
kvm
->
lock
);
if
(
!
vcpu
->
kvm
->
arch
.
pmu_filter
)
{
vcpu
->
kvm
->
arch
.
pmu_filter
=
bitmap_alloc
(
nr_events
,
GFP_KERNEL
);
if
(
!
vcpu
->
kvm
->
arch
.
pmu_filter
)
{
mutex_unlock
(
&
vcpu
->
kvm
->
lock
);
return
-
ENOMEM
;
}
/*
* The default depends on the first applied filter.
* If it allows events, the default is to deny.
* Conversely, if the first filter denies a set of
* events, the default is to allow.
*/
if
(
filter
.
action
==
KVM_PMU_EVENT_ALLOW
)
bitmap_zero
(
vcpu
->
kvm
->
arch
.
pmu_filter
,
nr_events
);
else
bitmap_fill
(
vcpu
->
kvm
->
arch
.
pmu_filter
,
nr_events
);
}
if
(
filter
.
action
==
KVM_PMU_EVENT_ALLOW
)
bitmap_set
(
vcpu
->
kvm
->
arch
.
pmu_filter
,
filter
.
base_event
,
filter
.
nevents
);
else
bitmap_clear
(
vcpu
->
kvm
->
arch
.
pmu_filter
,
filter
.
base_event
,
filter
.
nevents
);
mutex_unlock
(
&
vcpu
->
kvm
->
lock
);
return
0
;
}
case
KVM_ARM_VCPU_PMU_V3_INIT
:
return
kvm_arm_pmu_v3_init
(
vcpu
);
}
...
...
@@ -860,6 +1010,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
switch
(
attr
->
attr
)
{
case
KVM_ARM_VCPU_PMU_V3_IRQ
:
case
KVM_ARM_VCPU_PMU_V3_INIT
:
case
KVM_ARM_VCPU_PMU_V3_FILTER
:
if
(
kvm_arm_support_pmu_v3
()
&&
test_bit
(
KVM_ARM_VCPU_PMU_V3
,
vcpu
->
arch
.
features
))
return
0
;
...
...
arch/arm64/kvm/sys_regs.c
浏览文件 @
2e02cbb2
...
...
@@ -769,10 +769,7 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if
(
pmu_access_el0_disabled
(
vcpu
))
return
false
;
if
(
!
(
p
->
Op2
&
1
))
pmceid
=
read_sysreg
(
pmceid0_el0
);
else
pmceid
=
read_sysreg
(
pmceid1_el0
);
pmceid
=
kvm_pmu_get_pmceid
(
vcpu
,
(
p
->
Op2
&
1
));
p
->
regval
=
pmceid
;
...
...
include/kvm/arm_pmu.h
浏览文件 @
2e02cbb2
...
...
@@ -34,6 +34,7 @@ struct kvm_pmu {
u64
kvm_pmu_get_counter_value
(
struct
kvm_vcpu
*
vcpu
,
u64
select_idx
);
void
kvm_pmu_set_counter_value
(
struct
kvm_vcpu
*
vcpu
,
u64
select_idx
,
u64
val
);
u64
kvm_pmu_valid_counter_mask
(
struct
kvm_vcpu
*
vcpu
);
u64
kvm_pmu_get_pmceid
(
struct
kvm_vcpu
*
vcpu
,
bool
pmceid1
);
void
kvm_pmu_vcpu_init
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_pmu_vcpu_reset
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_pmu_vcpu_destroy
(
struct
kvm_vcpu
*
vcpu
);
...
...
@@ -108,6 +109,10 @@ static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
return
0
;
}
static
inline
u64
kvm_pmu_get_pmceid
(
struct
kvm_vcpu
*
vcpu
,
bool
pmceid1
)
{
return
0
;
}
#endif
#endif
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录