提交 15c7ad51 编写于 作者: R Robert Richter 提交者: Ingo Molnar

perf/x86: Rename Intel specific macros

There are macros that are Intel specific and not x86 generic. Rename
them into INTEL_*.

This patch removes X86_PMC_IDX_GENERIC and does:

 $ sed -i -e 's/X86_PMC_MAX_/INTEL_PMC_MAX_/g'           \
         arch/x86/include/asm/kvm_host.h                 \
         arch/x86/include/asm/perf_event.h               \
         arch/x86/kernel/cpu/perf_event.c                \
         arch/x86/kernel/cpu/perf_event_p4.c             \
         arch/x86/kvm/pmu.c
 $ sed -i -e 's/X86_PMC_IDX_FIXED/INTEL_PMC_IDX_FIXED/g' \
         arch/x86/include/asm/perf_event.h               \
         arch/x86/kernel/cpu/perf_event.c                \
         arch/x86/kernel/cpu/perf_event_intel.c          \
         arch/x86/kernel/cpu/perf_event_intel_ds.c       \
         arch/x86/kvm/pmu.c
 $ sed -i -e 's/X86_PMC_MSK_/INTEL_PMC_MSK_/g'           \
         arch/x86/include/asm/perf_event.h               \
         arch/x86/kernel/cpu/perf_event.c
Signed-off-by: NRobert Richter <robert.richter@amd.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1340217996-2254-2-git-send-email-robert.richter@amd.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 1070505d
...@@ -313,8 +313,8 @@ struct kvm_pmu { ...@@ -313,8 +313,8 @@ struct kvm_pmu {
u64 counter_bitmask[2]; u64 counter_bitmask[2];
u64 global_ctrl_mask; u64 global_ctrl_mask;
u8 version; u8 version;
struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC]; struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED]; struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
struct irq_work irq_work; struct irq_work irq_work;
u64 reprogram_pmi; u64 reprogram_pmi;
}; };
......
...@@ -5,11 +5,10 @@ ...@@ -5,11 +5,10 @@
* Performance event hw details: * Performance event hw details:
*/ */
#define X86_PMC_MAX_GENERIC 32 #define INTEL_PMC_MAX_GENERIC 32
#define X86_PMC_MAX_FIXED 3 #define INTEL_PMC_MAX_FIXED 3
#define INTEL_PMC_IDX_FIXED 32
#define X86_PMC_IDX_GENERIC 0
#define X86_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64 #define X86_PMC_IDX_MAX 64
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
...@@ -121,16 +120,16 @@ struct x86_pmu_capability { ...@@ -121,16 +120,16 @@ struct x86_pmu_capability {
/* Instr_Retired.Any: */ /* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
/* CPU_CLK_Unhalted.Core: */ /* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
/* CPU_CLK_Unhalted.Ref: */ /* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2) #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
/* /*
* We model BTS tracing as another fixed-mode PMC. * We model BTS tracing as another fixed-mode PMC.
...@@ -139,7 +138,7 @@ struct x86_pmu_capability { ...@@ -139,7 +138,7 @@ struct x86_pmu_capability {
* values are used by actual fixed events and higher values are used * values are used by actual fixed events and higher values are used
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
*/ */
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
/* /*
* IBS cpuid feature detection * IBS cpuid feature detection
......
...@@ -63,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event) ...@@ -63,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event)
int idx = hwc->idx; int idx = hwc->idx;
s64 delta; s64 delta;
if (idx == X86_PMC_IDX_FIXED_BTS) if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0; return 0;
/* /*
...@@ -626,8 +626,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) ...@@ -626,8 +626,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
c = sched->constraints[sched->state.event]; c = sched->constraints[sched->state.event];
/* Prefer fixed purpose counters */ /* Prefer fixed purpose counters */
if (c->idxmsk64 & (~0ULL << X86_PMC_IDX_FIXED)) { if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
idx = X86_PMC_IDX_FIXED; idx = INTEL_PMC_IDX_FIXED;
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
if (!__test_and_set_bit(idx, sched->state.used)) if (!__test_and_set_bit(idx, sched->state.used))
goto done; goto done;
...@@ -635,7 +635,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) ...@@ -635,7 +635,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
} }
/* Grab the first unused counter starting with idx */ /* Grab the first unused counter starting with idx */
idx = sched->state.counter; idx = sched->state.counter;
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) { for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
if (!__test_and_set_bit(idx, sched->state.used)) if (!__test_and_set_bit(idx, sched->state.used))
goto done; goto done;
} }
...@@ -813,13 +813,13 @@ static inline void x86_assign_hw_event(struct perf_event *event, ...@@ -813,13 +813,13 @@ static inline void x86_assign_hw_event(struct perf_event *event,
hwc->last_cpu = smp_processor_id(); hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i]; hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0; hwc->config_base = 0;
hwc->event_base = 0; hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) { } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED); hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
hwc->event_base_rdpmc = (hwc->idx - X86_PMC_IDX_FIXED) | 1<<30; hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
} else { } else {
hwc->config_base = x86_pmu_config_addr(hwc->idx); hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx); hwc->event_base = x86_pmu_event_addr(hwc->idx);
...@@ -921,7 +921,7 @@ int x86_perf_event_set_period(struct perf_event *event) ...@@ -921,7 +921,7 @@ int x86_perf_event_set_period(struct perf_event *event)
s64 period = hwc->sample_period; s64 period = hwc->sample_period;
int ret = 0, idx = hwc->idx; int ret = 0, idx = hwc->idx;
if (idx == X86_PMC_IDX_FIXED_BTS) if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0; return 0;
/* /*
...@@ -1338,21 +1338,21 @@ static int __init init_hw_perf_events(void) ...@@ -1338,21 +1338,21 @@ static int __init init_hw_perf_events(void)
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func(); quirk->func();
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, X86_PMC_MAX_GENERIC); x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = X86_PMC_MAX_GENERIC; x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
} }
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
} }
x86_pmu.intel_ctrl |= x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
perf_events_lapic_init(); perf_events_lapic_init();
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
...@@ -1368,7 +1368,7 @@ static int __init init_hw_perf_events(void) ...@@ -1368,7 +1368,7 @@ static int __init init_hw_perf_events(void)
*/ */
for_each_event_constraint(c, x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != X86_RAW_EVENT_MASK if (c->cmask != X86_RAW_EVENT_MASK
|| c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) { || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
continue; continue;
} }
...@@ -1611,8 +1611,8 @@ static int x86_pmu_event_idx(struct perf_event *event) ...@@ -1611,8 +1611,8 @@ static int x86_pmu_event_idx(struct perf_event *event)
if (!x86_pmu.attr_rdpmc) if (!x86_pmu.attr_rdpmc)
return 0; return 0;
if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) { if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
idx -= X86_PMC_IDX_FIXED; idx -= INTEL_PMC_IDX_FIXED;
idx |= 1 << 30; idx |= 1 << 30;
} }
......
...@@ -747,7 +747,7 @@ static void intel_pmu_disable_all(void) ...@@ -747,7 +747,7 @@ static void intel_pmu_disable_all(void)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts(); intel_pmu_disable_bts();
intel_pmu_pebs_disable_all(); intel_pmu_pebs_disable_all();
...@@ -763,9 +763,9 @@ static void intel_pmu_enable_all(int added) ...@@ -763,9 +763,9 @@ static void intel_pmu_enable_all(int added)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event = struct perf_event *event =
cpuc->events[X86_PMC_IDX_FIXED_BTS]; cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
if (WARN_ON_ONCE(!event)) if (WARN_ON_ONCE(!event))
return; return;
...@@ -871,7 +871,7 @@ static inline void intel_pmu_ack_status(u64 ack) ...@@ -871,7 +871,7 @@ static inline void intel_pmu_ack_status(u64 ack)
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{ {
int idx = hwc->idx - X86_PMC_IDX_FIXED; int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, mask; u64 ctrl_val, mask;
mask = 0xfULL << (idx * 4); mask = 0xfULL << (idx * 4);
...@@ -886,7 +886,7 @@ static void intel_pmu_disable_event(struct perf_event *event) ...@@ -886,7 +886,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts(); intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer(); intel_pmu_drain_bts_buffer();
return; return;
...@@ -915,7 +915,7 @@ static void intel_pmu_disable_event(struct perf_event *event) ...@@ -915,7 +915,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{ {
int idx = hwc->idx - X86_PMC_IDX_FIXED; int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask; u64 ctrl_val, bits, mask;
/* /*
...@@ -949,7 +949,7 @@ static void intel_pmu_enable_event(struct perf_event *event) ...@@ -949,7 +949,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled)) if (!__this_cpu_read(cpu_hw_events.enabled))
return; return;
......
...@@ -248,7 +248,7 @@ void reserve_ds_buffers(void) ...@@ -248,7 +248,7 @@ void reserve_ds_buffers(void)
*/ */
struct event_constraint bts_constraint = struct event_constraint bts_constraint =
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
void intel_pmu_enable_bts(u64 config) void intel_pmu_enable_bts(u64 config)
{ {
...@@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void) ...@@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void)
u64 to; u64 to;
u64 flags; u64 flags;
}; };
struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
struct bts_record *at, *top; struct bts_record *at, *top;
struct perf_output_handle handle; struct perf_output_handle handle;
struct perf_event_header header; struct perf_event_header header;
......
...@@ -1325,7 +1325,7 @@ __init int p4_pmu_init(void) ...@@ -1325,7 +1325,7 @@ __init int p4_pmu_init(void)
unsigned int low, high; unsigned int low, high;
/* If we get stripped -- indexing fails */ /* If we get stripped -- indexing fails */
BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
rdmsr(MSR_IA32_MISC_ENABLE, low, high); rdmsr(MSR_IA32_MISC_ENABLE, low, high);
if (!(low & (1 << 7))) { if (!(low & (1 << 7))) {
......
...@@ -80,10 +80,10 @@ static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx) ...@@ -80,10 +80,10 @@ static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx) static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{ {
if (idx < X86_PMC_IDX_FIXED) if (idx < INTEL_PMC_IDX_FIXED)
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0); return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
else else
return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED); return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
} }
void kvm_deliver_pmi(struct kvm_vcpu *vcpu) void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
...@@ -291,7 +291,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx) ...@@ -291,7 +291,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
if (pmc_is_gp(pmc)) if (pmc_is_gp(pmc))
reprogram_gp_counter(pmc, pmc->eventsel); reprogram_gp_counter(pmc, pmc->eventsel);
else { else {
int fidx = idx - X86_PMC_IDX_FIXED; int fidx = idx - INTEL_PMC_IDX_FIXED;
reprogram_fixed_counter(pmc, reprogram_fixed_counter(pmc,
fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx); fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
} }
...@@ -452,7 +452,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) ...@@ -452,7 +452,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
return; return;
pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff, pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
X86_PMC_MAX_GENERIC); INTEL_PMC_MAX_GENERIC);
pmu->counter_bitmask[KVM_PMC_GP] = pmu->counter_bitmask[KVM_PMC_GP] =
((u64)1 << ((entry->eax >> 16) & 0xff)) - 1; ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
bitmap_len = (entry->eax >> 24) & 0xff; bitmap_len = (entry->eax >> 24) & 0xff;
...@@ -462,13 +462,13 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) ...@@ -462,13 +462,13 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters = 0; pmu->nr_arch_fixed_counters = 0;
} else { } else {
pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
X86_PMC_MAX_FIXED); INTEL_PMC_MAX_FIXED);
pmu->counter_bitmask[KVM_PMC_FIXED] = pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
} }
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED); (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl; pmu->global_ctrl_mask = ~pmu->global_ctrl;
} }
...@@ -478,15 +478,15 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu) ...@@ -478,15 +478,15 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
memset(pmu, 0, sizeof(*pmu)); memset(pmu, 0, sizeof(*pmu));
for (i = 0; i < X86_PMC_MAX_GENERIC; i++) { for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP; pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu; pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i; pmu->gp_counters[i].idx = i;
} }
for (i = 0; i < X86_PMC_MAX_FIXED; i++) { for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED; pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu; pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED; pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
} }
init_irq_work(&pmu->irq_work, trigger_pmi); init_irq_work(&pmu->irq_work, trigger_pmi);
kvm_pmu_cpuid_update(vcpu); kvm_pmu_cpuid_update(vcpu);
...@@ -498,13 +498,13 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu) ...@@ -498,13 +498,13 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
int i; int i;
irq_work_sync(&pmu->irq_work); irq_work_sync(&pmu->irq_work);
for (i = 0; i < X86_PMC_MAX_GENERIC; i++) { for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
struct kvm_pmc *pmc = &pmu->gp_counters[i]; struct kvm_pmc *pmc = &pmu->gp_counters[i];
stop_counter(pmc); stop_counter(pmc);
pmc->counter = pmc->eventsel = 0; pmc->counter = pmc->eventsel = 0;
} }
for (i = 0; i < X86_PMC_MAX_FIXED; i++) for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
stop_counter(&pmu->fixed_counters[i]); stop_counter(&pmu->fixed_counters[i]);
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册