提交 95833c83 编写于 作者: A Andi Kleen 提交者: Linus Torvalds

[PATCH] x86_64: Add idle notifiers

This adds a new notifier chain that is called with IDLE_START
when a CPU goes idle and IDLE_END when it goes out of idle.
The context can be idle thread or interrupt context.

Since we cannot rely on MONITOR/MWAIT existing the idle
end check currently has to be done in all interrupt
handlers.

They were originally inspired by the similar s390 implementation.

They have a variety of applications:
- They will be needed for CONFIG_NO_IDLE_HZ
- They can be used for oprofile to fix up the missing time
in idle when performance counters don't tick.
- They can be used for better C state management in ACPI
- They could be used for microstate accounting.

This is just infrastructure so far, no users.
Signed-off-by: NAndi Kleen <ak@suse.de>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 6b050f80
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/mach_apic.h> #include <asm/mach_apic.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/idle.h>
int apic_verbosity; int apic_verbosity;
...@@ -922,6 +923,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs) ...@@ -922,6 +923,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* Besides, if we don't timer interrupts ignore the global * Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do. * interrupt lock, which is the WrongThing (tm) to do.
*/ */
exit_idle();
irq_enter(); irq_enter();
smp_local_timer_interrupt(regs); smp_local_timer_interrupt(regs);
irq_exit(); irq_exit();
...@@ -981,6 +983,7 @@ __init int oem_force_hpet_timer(void) ...@@ -981,6 +983,7 @@ __init int oem_force_hpet_timer(void)
asmlinkage void smp_spurious_interrupt(void) asmlinkage void smp_spurious_interrupt(void)
{ {
unsigned int v; unsigned int v;
exit_idle();
irq_enter(); irq_enter();
/* /*
* Check if this really is a spurious interrupt and ACK it * Check if this really is a spurious interrupt and ACK it
...@@ -1016,6 +1019,7 @@ asmlinkage void smp_error_interrupt(void) ...@@ -1016,6 +1019,7 @@ asmlinkage void smp_error_interrupt(void)
{ {
unsigned int v, v1; unsigned int v, v1;
exit_idle();
irq_enter(); irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */ /* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR); v = apic_read(APIC_ESR);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/idle.h>
atomic_t irq_err_count; atomic_t irq_err_count;
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
...@@ -98,6 +99,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) ...@@ -98,6 +99,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
/* high bits used in ret_from_ code */ /* high bits used in ret_from_ code */
unsigned irq = regs->orig_rax & 0xff; unsigned irq = regs->orig_rax & 0xff;
exit_idle();
irq_enter(); irq_enter();
__do_IRQ(irq, regs); __do_IRQ(irq, regs);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/idle.h>
#define PFX "mce_threshold: " #define PFX "mce_threshold: "
#define VERSION "version 1.00.9" #define VERSION "version 1.00.9"
...@@ -140,6 +141,7 @@ asmlinkage void mce_threshold_interrupt(void) ...@@ -140,6 +141,7 @@ asmlinkage void mce_threshold_interrupt(void)
struct mce m; struct mce m;
ack_APIC_irq(); ack_APIC_irq();
exit_idle();
irq_enter(); irq_enter();
memset(&m, 0, sizeof(m)); memset(&m, 0, sizeof(m));
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/idle.h>
static DEFINE_PER_CPU(unsigned long, next_check); static DEFINE_PER_CPU(unsigned long, next_check);
...@@ -19,6 +20,7 @@ asmlinkage void smp_thermal_interrupt(void) ...@@ -19,6 +20,7 @@ asmlinkage void smp_thermal_interrupt(void)
ack_APIC_irq(); ack_APIC_irq();
exit_idle();
irq_enter(); irq_enter();
if (time_before(jiffies, __get_cpu_var(next_check))) if (time_before(jiffies, __get_cpu_var(next_check)))
goto done; goto done;
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/notifier.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -50,6 +51,7 @@ ...@@ -50,6 +51,7 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/ia32.h> #include <asm/ia32.h>
#include <asm/idle.h>
asmlinkage extern void ret_from_fork(void); asmlinkage extern void ret_from_fork(void);
...@@ -64,6 +66,50 @@ EXPORT_SYMBOL(boot_option_idle_override); ...@@ -64,6 +66,50 @@ EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_idle)(void); void (*pm_idle)(void);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state); static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
static struct notifier_block *idle_notifier;
static DEFINE_SPINLOCK(idle_notifier_lock);
void idle_notifier_register(struct notifier_block *n)
{
unsigned long flags;
spin_lock_irqsave(&idle_notifier_lock, flags);
notifier_chain_register(&idle_notifier, n);
spin_unlock_irqrestore(&idle_notifier_lock, flags);
}
EXPORT_SYMBOL_GPL(idle_notifier_register);
void idle_notifier_unregister(struct notifier_block *n)
{
unsigned long flags;
spin_lock_irqsave(&idle_notifier_lock, flags);
notifier_chain_unregister(&idle_notifier, n);
spin_unlock_irqrestore(&idle_notifier_lock, flags);
}
EXPORT_SYMBOL(idle_notifier_unregister);
enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
void enter_idle(void)
{
__get_cpu_var(idle_state) = CPU_IDLE;
notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}
static void __exit_idle(void)
{
__get_cpu_var(idle_state) = CPU_NOT_IDLE;
notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}
/* Called from interrupts to signify idle end */
void exit_idle(void)
{
if (current->pid | read_pda(irqcount))
return;
__exit_idle();
}
/* /*
* We use this if we don't have any better * We use this if we don't have any better
* idle routine.. * idle routine..
...@@ -180,7 +226,9 @@ void cpu_idle (void) ...@@ -180,7 +226,9 @@ void cpu_idle (void)
idle = default_idle; idle = default_idle;
if (cpu_is_offline(smp_processor_id())) if (cpu_is_offline(smp_processor_id()))
play_dead(); play_dead();
enter_idle();
idle(); idle();
__exit_idle();
} }
preempt_enable_no_resched(); preempt_enable_no_resched();
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/idle.h>
/* /*
* Smarter SMP flushing macros. * Smarter SMP flushing macros.
...@@ -512,6 +513,7 @@ asmlinkage void smp_call_function_interrupt(void) ...@@ -512,6 +513,7 @@ asmlinkage void smp_call_function_interrupt(void)
/* /*
* At this point the info structure may be out of scope unless wait==1 * At this point the info structure may be out of scope unless wait==1
*/ */
exit_idle();
irq_enter(); irq_enter();
(*func)(info); (*func)(info);
irq_exit(); irq_exit();
......
#ifndef _ASM_X86_64_IDLE_H
#define _ASM_X86_64_IDLE_H 1
#define IDLE_START 1
#define IDLE_END 2
struct notifier_block;
void idle_notifier_register(struct notifier_block *n);
void idle_notifier_unregister(struct notifier_block *n);
void enter_idle(void);
void exit_idle(void);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册