提交 89e07569 编写于 作者: A Andi Kleen 提交者: Andi Kleen

[PATCH] x86-64: Let oprofile reserve MSR on all CPUs

The MSR reservation is per CPU and oprofile would only allocate them
on the CPU it was initialized on. Change this to handle all CPUs.

This also fixes a warning about unprotected use of smp_processor_id()
in preemptible kernels.
Signed-off-by: NAndi Kleen <ak@suse.de>
上级 3556ddfa
...@@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) ...@@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* checks for a bit availability (hack for oprofile) */ /* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{ {
int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
for_each_possible_cpu (cpu) {
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 0;
}
return 1;
} }
/* checks the an msr for availability */ /* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr) int avail_to_resrv_perfctr_nmi(unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
int cpu;
counter = nmi_perfctr_msr_to_bit(msr); counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); for_each_possible_cpu (cpu) {
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 0;
}
return 1;
} }
int reserve_perfctr_nmi(unsigned int msr) static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr); counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1; return 1;
return 0; return 0;
} }
void release_perfctr_nmi(unsigned int msr) static void __release_perfctr_nmi(int cpu, unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr); counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
} }
int reserve_evntsel_nmi(unsigned int msr) int reserve_perfctr_nmi(unsigned int msr)
{
int cpu, i;
for_each_possible_cpu (cpu) {
if (!__reserve_perfctr_nmi(cpu, msr)) {
for_each_possible_cpu (i) {
if (i >= cpu)
break;
__release_perfctr_nmi(i, msr);
}
return 0;
}
}
return 1;
}
void release_perfctr_nmi(unsigned int msr)
{
int cpu;
for_each_possible_cpu (cpu) {
__release_perfctr_nmi(cpu, msr);
}
}
int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr); counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1; return 1;
return 0; return 0;
} }
void release_evntsel_nmi(unsigned int msr) static void __release_evntsel_nmi(int cpu, unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr); counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
}
int reserve_evntsel_nmi(unsigned int msr)
{
int cpu, i;
for_each_possible_cpu (cpu) {
if (!__reserve_evntsel_nmi(cpu, msr)) {
for_each_possible_cpu (i) {
if (i >= cpu)
break;
__release_evntsel_nmi(i, msr);
}
return 0;
}
}
return 1;
}
void release_evntsel_nmi(unsigned int msr)
{
int cpu;
for_each_possible_cpu (cpu) {
__release_evntsel_nmi(cpu, msr);
}
} }
static __cpuinit inline int nmi_known_cpu(void) static __cpuinit inline int nmi_known_cpu(void)
...@@ -507,10 +572,10 @@ static int setup_k7_watchdog(void) ...@@ -507,10 +572,10 @@ static int setup_k7_watchdog(void)
perfctr_msr = MSR_K7_PERFCTR0; perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0; evntsel_msr = MSR_K7_EVNTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr)) if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail; goto fail;
if (!reserve_evntsel_nmi(evntsel_msr)) if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1; goto fail1;
wrmsrl(perfctr_msr, 0UL); wrmsrl(perfctr_msr, 0UL);
...@@ -533,7 +598,7 @@ static int setup_k7_watchdog(void) ...@@ -533,7 +598,7 @@ static int setup_k7_watchdog(void)
wd->check_bit = 1ULL<<63; wd->check_bit = 1ULL<<63;
return 1; return 1;
fail1: fail1:
release_perfctr_nmi(perfctr_msr); __release_perfctr_nmi(-1, perfctr_msr);
fail: fail:
return 0; return 0;
} }
...@@ -544,8 +609,8 @@ static void stop_k7_watchdog(void) ...@@ -544,8 +609,8 @@ static void stop_k7_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr); __release_evntsel_nmi(-1, wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr); __release_perfctr_nmi(-1, wd->perfctr_msr);
} }
#define P6_EVNTSEL0_ENABLE (1 << 22) #define P6_EVNTSEL0_ENABLE (1 << 22)
...@@ -563,10 +628,10 @@ static int setup_p6_watchdog(void) ...@@ -563,10 +628,10 @@ static int setup_p6_watchdog(void)
perfctr_msr = MSR_P6_PERFCTR0; perfctr_msr = MSR_P6_PERFCTR0;
evntsel_msr = MSR_P6_EVNTSEL0; evntsel_msr = MSR_P6_EVNTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr)) if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail; goto fail;
if (!reserve_evntsel_nmi(evntsel_msr)) if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1; goto fail1;
wrmsrl(perfctr_msr, 0UL); wrmsrl(perfctr_msr, 0UL);
...@@ -590,7 +655,7 @@ static int setup_p6_watchdog(void) ...@@ -590,7 +655,7 @@ static int setup_p6_watchdog(void)
wd->check_bit = 1ULL<<39; wd->check_bit = 1ULL<<39;
return 1; return 1;
fail1: fail1:
release_perfctr_nmi(perfctr_msr); __release_perfctr_nmi(-1, perfctr_msr);
fail: fail:
return 0; return 0;
} }
...@@ -601,8 +666,8 @@ static void stop_p6_watchdog(void) ...@@ -601,8 +666,8 @@ static void stop_p6_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr); __release_evntsel_nmi(-1, wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr); __release_perfctr_nmi(-1, wd->perfctr_msr);
} }
/* Note that these events don't tick when the CPU idles. This means /* Note that these events don't tick when the CPU idles. This means
...@@ -668,10 +733,10 @@ static int setup_p4_watchdog(void) ...@@ -668,10 +733,10 @@ static int setup_p4_watchdog(void)
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
} }
if (!reserve_perfctr_nmi(perfctr_msr)) if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail; goto fail;
if (!reserve_evntsel_nmi(evntsel_msr)) if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1; goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F) evntsel = P4_ESCR_EVENT_SELECT(0x3F)
...@@ -695,7 +760,7 @@ static int setup_p4_watchdog(void) ...@@ -695,7 +760,7 @@ static int setup_p4_watchdog(void)
wd->check_bit = 1ULL<<39; wd->check_bit = 1ULL<<39;
return 1; return 1;
fail1: fail1:
release_perfctr_nmi(perfctr_msr); __release_perfctr_nmi(-1, perfctr_msr);
fail: fail:
return 0; return 0;
} }
...@@ -707,8 +772,8 @@ static void stop_p4_watchdog(void) ...@@ -707,8 +772,8 @@ static void stop_p4_watchdog(void)
wrmsr(wd->cccr_msr, 0, 0); wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr); __release_evntsel_nmi(-1, wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr); __release_perfctr_nmi(-1, wd->perfctr_msr);
} }
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
...@@ -736,10 +801,10 @@ static int setup_intel_arch_watchdog(void) ...@@ -736,10 +801,10 @@ static int setup_intel_arch_watchdog(void)
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr)) if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail; goto fail;
if (!reserve_evntsel_nmi(evntsel_msr)) if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1; goto fail1;
wrmsrl(perfctr_msr, 0UL); wrmsrl(perfctr_msr, 0UL);
...@@ -764,7 +829,7 @@ static int setup_intel_arch_watchdog(void) ...@@ -764,7 +829,7 @@ static int setup_intel_arch_watchdog(void)
wd->check_bit = 1ULL << (eax.split.bit_width - 1); wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1; return 1;
fail1: fail1:
release_perfctr_nmi(perfctr_msr); __release_perfctr_nmi(-1, perfctr_msr);
fail: fail:
return 0; return 0;
} }
...@@ -787,8 +852,8 @@ static void stop_intel_arch_watchdog(void) ...@@ -787,8 +852,8 @@ static void stop_intel_arch_watchdog(void)
return; return;
wrmsr(wd->evntsel_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr); __release_evntsel_nmi(-1, wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr); __release_perfctr_nmi(-1, wd->perfctr_msr);
} }
void setup_apic_nmi_watchdog (void *unused) void setup_apic_nmi_watchdog (void *unused)
......
...@@ -108,64 +108,128 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) ...@@ -108,64 +108,128 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* checks for a bit availability (hack for oprofile) */ /* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{ {
int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
for_each_possible_cpu (cpu) {
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 0;
}
return 1;
} }
/* checks the an msr for availability */ /* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr) int avail_to_resrv_perfctr_nmi(unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
int cpu;
counter = nmi_perfctr_msr_to_bit(msr); counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); for_each_possible_cpu (cpu) {
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 0;
}
return 1;
} }
int reserve_perfctr_nmi(unsigned int msr) static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr); counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1; return 1;
return 0; return 0;
} }
void release_perfctr_nmi(unsigned int msr) static void __release_perfctr_nmi(int cpu, unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr); counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
} }
int reserve_evntsel_nmi(unsigned int msr) int reserve_perfctr_nmi(unsigned int msr)
{
int cpu, i;
for_each_possible_cpu (cpu) {
if (!__reserve_perfctr_nmi(cpu, msr)) {
for_each_possible_cpu (i) {
if (i >= cpu)
break;
__release_perfctr_nmi(i, msr);
}
return 0;
}
}
return 1;
}
void release_perfctr_nmi(unsigned int msr)
{
int cpu;
for_each_possible_cpu (cpu)
__release_perfctr_nmi(cpu, msr);
}
int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr); counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner))) if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1; return 1;
return 0; return 0;
} }
void release_evntsel_nmi(unsigned int msr) static void __release_evntsel_nmi(int cpu, unsigned int msr)
{ {
unsigned int counter; unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr); counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)); clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
}
int reserve_evntsel_nmi(unsigned int msr)
{
int cpu, i;
for_each_possible_cpu (cpu) {
if (!__reserve_evntsel_nmi(cpu, msr)) {
for_each_possible_cpu (i) {
if (i >= cpu)
break;
__release_evntsel_nmi(i, msr);
}
return 0;
}
}
return 1;
}
void release_evntsel_nmi(unsigned int msr)
{
int cpu;
for_each_possible_cpu (cpu) {
__release_evntsel_nmi(cpu, msr);
}
} }
static __cpuinit inline int nmi_known_cpu(void) static __cpuinit inline int nmi_known_cpu(void)
...@@ -472,10 +536,10 @@ static int setup_k7_watchdog(void) ...@@ -472,10 +536,10 @@ static int setup_k7_watchdog(void)
perfctr_msr = MSR_K7_PERFCTR0; perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0; evntsel_msr = MSR_K7_EVNTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr)) if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail; goto fail;
if (!reserve_evntsel_nmi(evntsel_msr)) if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1; goto fail1;
/* Simulator may not support it */ /* Simulator may not support it */
...@@ -501,9 +565,9 @@ static int setup_k7_watchdog(void) ...@@ -501,9 +565,9 @@ static int setup_k7_watchdog(void)
wd->check_bit = 1ULL<<63; wd->check_bit = 1ULL<<63;
return 1; return 1;
fail2: fail2:
release_evntsel_nmi(evntsel_msr); __release_evntsel_nmi(-1, evntsel_msr);
fail1: fail1:
release_perfctr_nmi(perfctr_msr); __release_perfctr_nmi(-1, perfctr_msr);
fail: fail:
return 0; return 0;
} }
...@@ -514,8 +578,8 @@ static void stop_k7_watchdog(void) ...@@ -514,8 +578,8 @@ static void stop_k7_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr); __release_evntsel_nmi(-1, wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr); __release_perfctr_nmi(-1, wd->perfctr_msr);
} }
/* Note that these events don't tick when the CPU idles. This means /* Note that these events don't tick when the CPU idles. This means
...@@ -581,10 +645,10 @@ static int setup_p4_watchdog(void) ...@@ -581,10 +645,10 @@ static int setup_p4_watchdog(void)
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
} }
if (!reserve_perfctr_nmi(perfctr_msr)) if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail; goto fail;
if (!reserve_evntsel_nmi(evntsel_msr)) if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1; goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F) evntsel = P4_ESCR_EVENT_SELECT(0x3F)
...@@ -609,7 +673,7 @@ static int setup_p4_watchdog(void) ...@@ -609,7 +673,7 @@ static int setup_p4_watchdog(void)
wd->check_bit = 1ULL<<39; wd->check_bit = 1ULL<<39;
return 1; return 1;
fail1: fail1:
release_perfctr_nmi(perfctr_msr); __release_perfctr_nmi(-1, perfctr_msr);
fail: fail:
return 0; return 0;
} }
...@@ -621,8 +685,8 @@ static void stop_p4_watchdog(void) ...@@ -621,8 +685,8 @@ static void stop_p4_watchdog(void)
wrmsr(wd->cccr_msr, 0, 0); wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr); __release_evntsel_nmi(-1, wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr); __release_perfctr_nmi(-1, wd->perfctr_msr);
} }
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
...@@ -650,10 +714,10 @@ static int setup_intel_arch_watchdog(void) ...@@ -650,10 +714,10 @@ static int setup_intel_arch_watchdog(void)
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr)) if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail; goto fail;
if (!reserve_evntsel_nmi(evntsel_msr)) if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1; goto fail1;
wrmsrl(perfctr_msr, 0UL); wrmsrl(perfctr_msr, 0UL);
...@@ -680,7 +744,7 @@ static int setup_intel_arch_watchdog(void) ...@@ -680,7 +744,7 @@ static int setup_intel_arch_watchdog(void)
wd->check_bit = 1ULL << (eax.split.bit_width - 1); wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1; return 1;
fail1: fail1:
release_perfctr_nmi(perfctr_msr); __release_perfctr_nmi(-1, perfctr_msr);
fail: fail:
return 0; return 0;
} }
...@@ -704,8 +768,8 @@ static void stop_intel_arch_watchdog(void) ...@@ -704,8 +768,8 @@ static void stop_intel_arch_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr); __release_evntsel_nmi(-1, wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr); __release_perfctr_nmi(-1, wd->perfctr_msr);
} }
void setup_apic_nmi_watchdog(void *unused) void setup_apic_nmi_watchdog(void *unused)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册