提交 01b2838c 编写于 作者: I Ingo Molnar

perf counters: consolidate hw_perf save/restore APIs

Impact: cleanup

Rename them to better match up the usual IRQ disable/enable APIs:

 hw_perf_disable_all()  => hw_perf_save_disable()
 hw_perf_restore_ctrl() => hw_perf_restore()
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 5c92d124
...@@ -118,13 +118,13 @@ void hw_perf_enable_all(void) ...@@ -118,13 +118,13 @@ void hw_perf_enable_all(void)
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
} }
void hw_perf_restore_ctrl(u64 ctrl) void hw_perf_restore(u64 ctrl)
{ {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
} }
EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl); EXPORT_SYMBOL_GPL(hw_perf_restore);
u64 hw_perf_disable_all(void) u64 hw_perf_save_disable(void)
{ {
u64 ctrl; u64 ctrl;
...@@ -132,7 +132,7 @@ u64 hw_perf_disable_all(void) ...@@ -132,7 +132,7 @@ u64 hw_perf_disable_all(void)
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
return ctrl; return ctrl;
} }
EXPORT_SYMBOL_GPL(hw_perf_disable_all); EXPORT_SYMBOL_GPL(hw_perf_save_disable);
static inline void static inline void
__x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) __x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
......
...@@ -270,11 +270,11 @@ static atomic_t c3_cpu_count; ...@@ -270,11 +270,11 @@ static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */ /* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate) static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{ {
u64 pctrl; u64 perf_flags;
/* Don't trace irqs off for idle */ /* Don't trace irqs off for idle */
stop_critical_timings(); stop_critical_timings();
pctrl = hw_perf_disable_all(); perf_flags = hw_perf_save_disable();
if (cstate->entry_method == ACPI_CSTATE_FFH) { if (cstate->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */ /* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cstate); acpi_processor_ffh_cstate_enter(cstate);
...@@ -287,7 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) ...@@ -287,7 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
gets asserted in time to freeze execution properly. */ gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address); unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
hw_perf_restore_ctrl(pctrl); hw_perf_restore(perf_flags);
start_critical_timings(); start_critical_timings();
} }
#endif /* !CONFIG_CPU_IDLE */ #endif /* !CONFIG_CPU_IDLE */
...@@ -1433,7 +1433,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) ...@@ -1433,7 +1433,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
/* Don't trace irqs off for idle */ /* Don't trace irqs off for idle */
stop_critical_timings(); stop_critical_timings();
pctrl = hw_perf_disable_all(); pctrl = hw_perf_save_disable();
if (cx->entry_method == ACPI_CSTATE_FFH) { if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */ /* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx); acpi_processor_ffh_cstate_enter(cx);
...@@ -1448,7 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) ...@@ -1448,7 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
gets asserted in time to freeze execution properly. */ gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address); unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
hw_perf_restore_ctrl(pctrl); hw_perf_restore(pctrl);
start_critical_timings(); start_critical_timings();
} }
......
...@@ -67,7 +67,7 @@ enum perf_counter_record_type { ...@@ -67,7 +67,7 @@ enum perf_counter_record_type {
* Hardware event to monitor via a performance monitoring counter: * Hardware event to monitor via a performance monitoring counter:
*/ */
struct perf_counter_hw_event { struct perf_counter_hw_event {
u64 type; s64 type;
u64 irq_period; u64 irq_period;
u32 record_type; u32 record_type;
...@@ -206,8 +206,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu); ...@@ -206,8 +206,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu);
extern void perf_counter_init_task(struct task_struct *task); extern void perf_counter_init_task(struct task_struct *task);
extern void perf_counter_notify(struct pt_regs *regs); extern void perf_counter_notify(struct pt_regs *regs);
extern void perf_counter_print_debug(void); extern void perf_counter_print_debug(void);
extern void hw_perf_restore_ctrl(u64 ctrl); extern u64 hw_perf_save_disable(void);
extern u64 hw_perf_disable_all(void); extern void hw_perf_restore(u64 ctrl);
extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); extern void atomic64_counter_set(struct perf_counter *counter, u64 val64);
extern u64 atomic64_counter_read(struct perf_counter *counter); extern u64 atomic64_counter_read(struct perf_counter *counter);
...@@ -221,8 +221,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { } ...@@ -221,8 +221,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { }
static inline void perf_counter_init_task(struct task_struct *task) { } static inline void perf_counter_init_task(struct task_struct *task) { }
static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_notify(struct pt_regs *regs) { }
static inline void perf_counter_print_debug(void) { } static inline void perf_counter_print_debug(void) { }
static inline void hw_perf_restore_ctrl(u64 ctrl) { } static inline void hw_perf_restore(u64 ctrl) { }
static inline u64 hw_perf_disable_all(void) { return 0; } static inline u64 hw_perf_save_disable(void) { return 0; }
#endif #endif
#endif /* _LINUX_PERF_COUNTER_H */ #endif /* _LINUX_PERF_COUNTER_H */
...@@ -43,8 +43,8 @@ hw_perf_counter_init(struct perf_counter *counter) ...@@ -43,8 +43,8 @@ hw_perf_counter_init(struct perf_counter *counter)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
u64 __weak hw_perf_disable_all(void) { return 0; } u64 __weak hw_perf_save_disable(void) { return 0; }
void __weak hw_perf_restore_ctrl(u64 ctrl) { } void __weak hw_perf_restore(u64 ctrl) { }
void __weak hw_perf_counter_setup(void) { } void __weak hw_perf_counter_setup(void) { }
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
...@@ -180,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info) ...@@ -180,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info)
* Protect the list operation against NMI by disabling the * Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters. * counters on a global level. NOP for non NMI based counters.
*/ */
perf_flags = hw_perf_disable_all(); perf_flags = hw_perf_save_disable();
list_del_counter(counter, ctx); list_del_counter(counter, ctx);
hw_perf_restore_ctrl(perf_flags); hw_perf_restore(perf_flags);
if (!ctx->task) { if (!ctx->task) {
/* /*
...@@ -273,9 +273,9 @@ static void __perf_install_in_context(void *info) ...@@ -273,9 +273,9 @@ static void __perf_install_in_context(void *info)
* Protect the list operation against NMI by disabling the * Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters. * counters on a global level. NOP for non NMI based counters.
*/ */
perf_flags = hw_perf_disable_all(); perf_flags = hw_perf_save_disable();
list_add_counter(counter, ctx); list_add_counter(counter, ctx);
hw_perf_restore_ctrl(perf_flags); hw_perf_restore(perf_flags);
ctx->nr_counters++; ctx->nr_counters++;
...@@ -495,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) ...@@ -495,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
/* /*
* Rotate the first entry last (works just fine for group counters too): * Rotate the first entry last (works just fine for group counters too):
*/ */
perf_flags = hw_perf_disable_all(); perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) { list_for_each_entry(counter, &ctx->counter_list, list_entry) {
list_del(&counter->list_entry); list_del(&counter->list_entry);
list_add_tail(&counter->list_entry, &ctx->counter_list); list_add_tail(&counter->list_entry, &ctx->counter_list);
break; break;
} }
hw_perf_restore_ctrl(perf_flags); hw_perf_restore(perf_flags);
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册