提交 eddc0e92 编写于 作者: S Seiji Aguchi 提交者: H. Peter Anvin

x86, trace: Introduce entering/exiting_irq()

When implementing tracepoints in interrupt handers, if the tracepoints are
simply added in the performance sensitive path of interrupt handers,
it may cause potential performance problem due to the time penalty.

To solve the problem, an idea is to prepare non-trace/trace irq handers and
switch their IDTs at the enabling/disabling time.

So, let's introduce entering_irq()/exiting_irq() for pre/post-
processing of each irq handler.

A way to use them is as follows.

Non-trace irq handler:
smp_irq_handler()
{
	entering_irq();		/* pre-processing of this handler */
	__smp_irq_handler();	/*
				 * common logic between non-trace and trace handlers
				 * in a vector.
				 */
	exiting_irq();		/* post-processing of this handler */

}

Trace irq_handler:
smp_trace_irq_handler()
{
	entering_irq();		/* pre-processing of this handler */
	trace_irq_entry();	/* tracepoint for irq entry */
	__smp_irq_handler();	/*
				 * common logic between non-trace and trace handlers
				 * in a vector.
				 */
	trace_irq_exit();	/* tracepoint for irq exit */
	exiting_irq();		/* post-processing of this handler */

}

If tracepoints can place outside entering_irq()/exiting_irq() as follows,
it looks cleaner.

smp_trace_irq_handler()
{
	trace_irq_entry();
	smp_irq_handler();
	trace_irq_exit();
}

But it doesn't work.
The problem is with irq_enter/exit() being called. They must be called before
trace_irq_enter/exit(),  because of the rcu_irq_enter() must be called before
any tracepoints are used, as tracepoints use  rcu to synchronize.

As a possible alternative, we may be able to call irq_enter() first as follows
if irq_enter() can nest.

smp_trace_irq_hander()
{
	irq_entry();
	trace_irq_entry();
	smp_irq_handler();
	trace_irq_exit();
	irq_exit();
}

But it doesn't work, either.
If irq_enter() is nested, it may have a time penalty because it has to check if it
was already called or not. The time penalty is not desired in performance sensitive
paths even if it is tiny.
Signed-off-by: NSeiji Aguchi <seiji.aguchi@hds.com>
Link: http://lkml.kernel.org/r/51C3238D.9040706@hds.comSigned-off-by: NH. Peter Anvin <hpa@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
上级 f5abaa1b
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/idle.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1 #define ARCH_APICTIMER_STOPS_ON_C3 1
...@@ -687,5 +688,31 @@ extern int default_check_phys_apicid_present(int phys_apicid); ...@@ -687,5 +688,31 @@ extern int default_check_phys_apicid_present(int phys_apicid);
#endif #endif
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
extern void irq_enter(void);
extern void irq_exit(void);
static inline void entering_irq(void)
{
irq_enter();
exit_idle();
}
static inline void entering_ack_irq(void)
{
ack_APIC_irq();
entering_irq();
}
static inline void exiting_irq(void)
{
irq_exit();
}
static inline void exiting_ack_irq(void)
{
irq_exit();
/* Ack only at the end to avoid potential reentry */
ack_APIC_irq();
}
#endif /* _ASM_X86_APIC_H */ #endif /* _ASM_X86_APIC_H */
...@@ -919,17 +919,14 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) ...@@ -919,17 +919,14 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
/* /*
* NOTE! We'd better ACK the irq immediately, * NOTE! We'd better ACK the irq immediately,
* because timer handling can be slow. * because timer handling can be slow.
*/ *
ack_APIC_irq();
/*
* update_process_times() expects us to have done irq_enter(). * update_process_times() expects us to have done irq_enter().
* Besides, if we don't timer interrupts ignore the global * Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do. * interrupt lock, which is the WrongThing (tm) to do.
*/ */
irq_enter(); entering_ack_irq();
exit_idle();
local_apic_timer_interrupt(); local_apic_timer_interrupt();
irq_exit(); exiting_irq();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
...@@ -1907,12 +1904,10 @@ int __init APIC_init_uniprocessor(void) ...@@ -1907,12 +1904,10 @@ int __init APIC_init_uniprocessor(void)
/* /*
* This interrupt should _never_ happen with our APIC/SMP architecture * This interrupt should _never_ happen with our APIC/SMP architecture
*/ */
void smp_spurious_interrupt(struct pt_regs *regs) static inline void __smp_spurious_interrupt(void)
{ {
u32 v; u32 v;
irq_enter();
exit_idle();
/* /*
* Check if this really is a spurious interrupt and ACK it * Check if this really is a spurious interrupt and ACK it
* if it is a vectored one. Just in case... * if it is a vectored one. Just in case...
...@@ -1927,13 +1922,19 @@ void smp_spurious_interrupt(struct pt_regs *regs) ...@@ -1927,13 +1922,19 @@ void smp_spurious_interrupt(struct pt_regs *regs)
/* see sw-dev-man vol 3, chapter 7.4.13.5 */ /* see sw-dev-man vol 3, chapter 7.4.13.5 */
pr_info("spurious APIC interrupt on CPU#%d, " pr_info("spurious APIC interrupt on CPU#%d, "
"should never happen.\n", smp_processor_id()); "should never happen.\n", smp_processor_id());
irq_exit(); }
void smp_spurious_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_spurious_interrupt();
exiting_irq();
} }
/* /*
* This interrupt should never happen with our APIC/SMP architecture * This interrupt should never happen with our APIC/SMP architecture
*/ */
void smp_error_interrupt(struct pt_regs *regs) static inline void __smp_error_interrupt(struct pt_regs *regs)
{ {
u32 v0, v1; u32 v0, v1;
u32 i = 0; u32 i = 0;
...@@ -1948,8 +1949,6 @@ void smp_error_interrupt(struct pt_regs *regs) ...@@ -1948,8 +1949,6 @@ void smp_error_interrupt(struct pt_regs *regs)
"Illegal register address", /* APIC Error Bit 7 */ "Illegal register address", /* APIC Error Bit 7 */
}; };
irq_enter();
exit_idle();
/* First tickle the hardware, only then report what went on. -- REW */ /* First tickle the hardware, only then report what went on. -- REW */
v0 = apic_read(APIC_ESR); v0 = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0);
...@@ -1970,7 +1969,13 @@ void smp_error_interrupt(struct pt_regs *regs) ...@@ -1970,7 +1969,13 @@ void smp_error_interrupt(struct pt_regs *regs)
apic_printk(APIC_DEBUG, KERN_CONT "\n"); apic_printk(APIC_DEBUG, KERN_CONT "\n");
irq_exit(); }
void smp_error_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_error_interrupt(regs);
exiting_irq();
} }
/** /**
......
...@@ -378,15 +378,17 @@ static void unexpected_thermal_interrupt(void) ...@@ -378,15 +378,17 @@ static void unexpected_thermal_interrupt(void)
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) static inline void __smp_thermal_interrupt(void)
{ {
irq_enter();
exit_idle();
inc_irq_stat(irq_thermal_count); inc_irq_stat(irq_thermal_count);
smp_thermal_vector(); smp_thermal_vector();
irq_exit(); }
/* Ack only at the end to avoid potential reentry */
ack_APIC_irq(); asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_thermal_interrupt();
exiting_ack_irq();
} }
/* Thermal monitoring depends on APIC, ACPI and clock modulation */ /* Thermal monitoring depends on APIC, ACPI and clock modulation */
......
...@@ -17,13 +17,15 @@ static void default_threshold_interrupt(void) ...@@ -17,13 +17,15 @@ static void default_threshold_interrupt(void)
void (*mce_threshold_vector)(void) = default_threshold_interrupt; void (*mce_threshold_vector)(void) = default_threshold_interrupt;
asmlinkage void smp_threshold_interrupt(void) static inline void __smp_threshold_interrupt(void)
{ {
irq_enter();
exit_idle();
inc_irq_stat(irq_threshold_count); inc_irq_stat(irq_threshold_count);
mce_threshold_vector(); mce_threshold_vector();
irq_exit(); }
/* Ack only at the end to avoid potential reentry */
ack_APIC_irq(); asmlinkage void smp_threshold_interrupt(void)
{
entering_irq();
__smp_threshold_interrupt();
exiting_ack_irq();
} }
...@@ -204,23 +204,21 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -204,23 +204,21 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
/* /*
* Handler for X86_PLATFORM_IPI_VECTOR. * Handler for X86_PLATFORM_IPI_VECTOR.
*/ */
void smp_x86_platform_ipi(struct pt_regs *regs) void __smp_x86_platform_ipi(void)
{ {
struct pt_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
irq_enter();
exit_idle();
inc_irq_stat(x86_platform_ipis); inc_irq_stat(x86_platform_ipis);
if (x86_platform_ipi_callback) if (x86_platform_ipi_callback)
x86_platform_ipi_callback(); x86_platform_ipi_callback();
}
irq_exit(); void smp_x86_platform_ipi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
entering_ack_irq();
__smp_x86_platform_ipi();
exiting_irq();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
......
...@@ -9,13 +9,23 @@ ...@@ -9,13 +9,23 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <asm/apic.h> #include <asm/apic.h>
void smp_irq_work_interrupt(struct pt_regs *regs) static inline void irq_work_entering_irq(void)
{ {
irq_enter(); irq_enter();
ack_APIC_irq(); ack_APIC_irq();
}
static inline void __smp_irq_work_interrupt(void)
{
inc_irq_stat(apic_irq_work_irqs); inc_irq_stat(apic_irq_work_irqs);
irq_work_run(); irq_work_run();
irq_exit(); }
void smp_irq_work_interrupt(struct pt_regs *regs)
{
irq_work_entering_irq();
__smp_irq_work_interrupt();
exiting_irq();
} }
void arch_irq_work_raise(void) void arch_irq_work_raise(void)
......
...@@ -249,32 +249,51 @@ static void native_stop_other_cpus(int wait) ...@@ -249,32 +249,51 @@ static void native_stop_other_cpus(int wait)
/* /*
* Reschedule call back. * Reschedule call back.
*/ */
void smp_reschedule_interrupt(struct pt_regs *regs) static inline void __smp_reschedule_interrupt(void)
{ {
ack_APIC_irq();
inc_irq_stat(irq_resched_count); inc_irq_stat(irq_resched_count);
scheduler_ipi(); scheduler_ipi();
}
void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
__smp_reschedule_interrupt();
/* /*
* KVM uses this interrupt to force a cpu out of guest mode * KVM uses this interrupt to force a cpu out of guest mode
*/ */
} }
void smp_call_function_interrupt(struct pt_regs *regs) static inline void call_function_entering_irq(void)
{ {
ack_APIC_irq(); ack_APIC_irq();
irq_enter(); irq_enter();
}
static inline void __smp_call_function_interrupt(void)
{
generic_smp_call_function_interrupt(); generic_smp_call_function_interrupt();
inc_irq_stat(irq_call_count); inc_irq_stat(irq_call_count);
irq_exit();
} }
void smp_call_function_single_interrupt(struct pt_regs *regs) void smp_call_function_interrupt(struct pt_regs *regs)
{
call_function_entering_irq();
__smp_call_function_interrupt();
exiting_irq();
}
static inline void __smp_call_function_single_interrupt(void)
{ {
ack_APIC_irq();
irq_enter();
generic_smp_call_function_single_interrupt(); generic_smp_call_function_single_interrupt();
inc_irq_stat(irq_call_count); inc_irq_stat(irq_call_count);
irq_exit(); }
void smp_call_function_single_interrupt(struct pt_regs *regs)
{
call_function_entering_irq();
__smp_call_function_single_interrupt();
exiting_irq();
} }
static int __init nonmi_ipi_setup(char *str) static int __init nonmi_ipi_setup(char *str)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册