提交 7ff775ac 编写于 作者: J Jim Mattson 提交者: Paolo Bonzini

KVM: x86/pmu: Use binary search to check filtered events

The PMU event filter may contain up to 300 events. Replace the linear
search in reprogram_gp_counter() with a binary search.
Signed-off-by: NJim Mattson <jmattson@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220115052431.447232-2-jmattson@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 1a1d1dbc
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include "x86.h" #include "x86.h"
#include "cpuid.h" #include "cpuid.h"
...@@ -172,12 +174,16 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) ...@@ -172,12 +174,16 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
return true; return true;
} }
static int cmp_u64(const void *a, const void *b)
{
return *(__u64 *)a - *(__u64 *)b;
}
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{ {
unsigned config, type = PERF_TYPE_RAW; unsigned config, type = PERF_TYPE_RAW;
struct kvm *kvm = pmc->vcpu->kvm; struct kvm *kvm = pmc->vcpu->kvm;
struct kvm_pmu_event_filter *filter; struct kvm_pmu_event_filter *filter;
int i;
bool allow_event = true; bool allow_event = true;
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
...@@ -192,16 +198,13 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) ...@@ -192,16 +198,13 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
if (filter) { if (filter) {
for (i = 0; i < filter->nevents; i++) __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
if (filter->events[i] ==
(eventsel & AMD64_RAW_EVENT_MASK_NB)) if (bsearch(&key, filter->events, filter->nevents,
break; sizeof(__u64), cmp_u64))
if (filter->action == KVM_PMU_EVENT_ALLOW && allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
i == filter->nevents) else
allow_event = false; allow_event = filter->action == KVM_PMU_EVENT_DENY;
if (filter->action == KVM_PMU_EVENT_DENY &&
i < filter->nevents)
allow_event = false;
} }
if (!allow_event) if (!allow_event)
return; return;
...@@ -576,6 +579,11 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) ...@@ -576,6 +579,11 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
/* Ensure nevents can't be changed between the user copies. */ /* Ensure nevents can't be changed between the user copies. */
*filter = tmp; *filter = tmp;
/*
* Sort the in-kernel list so that we can search it with bsearch.
*/
sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter, filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
mutex_is_locked(&kvm->lock)); mutex_is_locked(&kvm->lock));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册