提交 495ab9c0 编写于 作者: A Andi Kleen 提交者: Linus Torvalds

[PATCH] i386/x86-64/ia64: Move polling flag into thread_info_status

During some profiling I noticed that default_idle causes a lot of
memory traffic. I think that is caused by the atomic operations
to clear/set the polling flag in thread_info. There is actually
no reason to make this atomic - only the idle thread does it
to itself, other CPUs only read it. So I moved it into ti->status.

Converted i386/x86-64/ia64 for now because that was the easiest
way to fix ACPI which also manipulates these flags in its idle
function.

Cc: Nick Piggin <npiggin@novell.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Len Brown <len.brown@intel.com>
Signed-off-by: NAndi Kleen <ak@suse.de>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 d9005b52
...@@ -764,9 +764,9 @@ static int apm_do_idle(void) ...@@ -764,9 +764,9 @@ static int apm_do_idle(void)
int idled = 0; int idled = 0;
int polling; int polling;
polling = test_thread_flag(TIF_POLLING_NRFLAG); polling = !!(current_thread_info()->status & TS_POLLING);
if (polling) { if (polling) {
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
} }
if (!need_resched()) { if (!need_resched()) {
...@@ -774,7 +774,7 @@ static int apm_do_idle(void) ...@@ -774,7 +774,7 @@ static int apm_do_idle(void)
ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax);
} }
if (polling) if (polling)
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
if (!idled) if (!idled)
return 0; return 0;
......
...@@ -102,7 +102,7 @@ void default_idle(void) ...@@ -102,7 +102,7 @@ void default_idle(void)
local_irq_enable(); local_irq_enable();
if (!hlt_counter && boot_cpu_data.hlt_works_ok) { if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
while (!need_resched()) { while (!need_resched()) {
local_irq_disable(); local_irq_disable();
...@@ -111,7 +111,7 @@ void default_idle(void) ...@@ -111,7 +111,7 @@ void default_idle(void)
else else
local_irq_enable(); local_irq_enable();
} }
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
} else { } else {
while (!need_resched()) while (!need_resched())
cpu_relax(); cpu_relax();
...@@ -174,7 +174,7 @@ void cpu_idle(void) ...@@ -174,7 +174,7 @@ void cpu_idle(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
......
...@@ -272,9 +272,9 @@ cpu_idle (void) ...@@ -272,9 +272,9 @@ cpu_idle (void)
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
if (can_do_pal_halt) if (can_do_pal_halt)
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
else else
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
if (!need_resched()) { if (!need_resched()) {
void (*idle)(void); void (*idle)(void);
......
...@@ -110,7 +110,7 @@ static void default_idle(void) ...@@ -110,7 +110,7 @@ static void default_idle(void)
{ {
local_irq_enable(); local_irq_enable();
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
while (!need_resched()) { while (!need_resched()) {
local_irq_disable(); local_irq_disable();
...@@ -119,7 +119,7 @@ static void default_idle(void) ...@@ -119,7 +119,7 @@ static void default_idle(void)
else else
local_irq_enable(); local_irq_enable();
} }
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
} }
/* /*
...@@ -202,8 +202,7 @@ static inline void play_dead(void) ...@@ -202,8 +202,7 @@ static inline void play_dead(void)
*/ */
void cpu_idle (void) void cpu_idle (void)
{ {
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
while (!need_resched()) { while (!need_resched()) {
......
...@@ -206,11 +206,11 @@ acpi_processor_power_activate(struct acpi_processor *pr, ...@@ -206,11 +206,11 @@ acpi_processor_power_activate(struct acpi_processor *pr,
static void acpi_safe_halt(void) static void acpi_safe_halt(void)
{ {
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
if (!need_resched()) if (!need_resched())
safe_halt(); safe_halt();
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
} }
static atomic_t c3_cpu_count; static atomic_t c3_cpu_count;
...@@ -330,10 +330,10 @@ static void acpi_processor_idle(void) ...@@ -330,10 +330,10 @@ static void acpi_processor_idle(void)
* Invoke the current Cx state to put the processor to sleep. * Invoke the current Cx state to put the processor to sleep.
*/ */
if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
if (need_resched()) { if (need_resched()) {
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
local_irq_enable(); local_irq_enable();
return; return;
} }
...@@ -371,7 +371,7 @@ static void acpi_processor_idle(void) ...@@ -371,7 +371,7 @@ static void acpi_processor_idle(void)
t2 = inl(acpi_fadt.xpm_tmr_blk.address); t2 = inl(acpi_fadt.xpm_tmr_blk.address);
/* Re-enable interrupts */ /* Re-enable interrupts */
local_irq_enable(); local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
/* Compute time (ticks) that we were actually asleep */ /* Compute time (ticks) that we were actually asleep */
sleep_ticks = sleep_ticks =
ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
...@@ -411,7 +411,7 @@ static void acpi_processor_idle(void) ...@@ -411,7 +411,7 @@ static void acpi_processor_idle(void)
/* Re-enable interrupts */ /* Re-enable interrupts */
local_irq_enable(); local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
/* Compute time (ticks) that we were actually asleep */ /* Compute time (ticks) that we were actually asleep */
sleep_ticks = sleep_ticks =
ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
......
...@@ -140,8 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; ...@@ -140,8 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */ #define TIF_SECCOMP 8 /* secure computing */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 16
#define TIF_MEMDIE 17
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
...@@ -153,7 +152,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; ...@@ -153,7 +152,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
/* work to do on interrupt/exception return */ /* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \ #define _TIF_WORK_MASK \
...@@ -170,6 +168,9 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; ...@@ -170,6 +168,9 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
* have to worry about atomic accesses. * have to worry about atomic accesses.
*/ */
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -27,6 +27,7 @@ struct thread_info { ...@@ -27,6 +27,7 @@ struct thread_info {
__u32 flags; /* thread_info flags (see TIF_*) */ __u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */ __u32 cpu; /* current CPU */
__u32 last_cpu; /* Last CPU thread ran on */ __u32 last_cpu; /* Last CPU thread ran on */
__u32 status; /* Thread synchronous flags */
mm_segment_t addr_limit; /* user-level address space limit */ mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
struct restart_block restart_block; struct restart_block restart_block;
...@@ -103,4 +104,8 @@ struct thread_info { ...@@ -103,4 +104,8 @@ struct thread_info {
/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
#define TS_POLLING 1 /* true if in idle loop and not sleeping */
#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
#endif /* _ASM_IA64_THREAD_INFO_H */ #endif /* _ASM_IA64_THREAD_INFO_H */
...@@ -101,7 +101,7 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -101,7 +101,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_IRET 5 /* force IRET */ #define TIF_IRET 5 /* force IRET */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */ #define TIF_SECCOMP 8 /* secure computing */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ /* 16 free */
#define TIF_IA32 17 /* 32bit process */ #define TIF_IA32 17 /* 32bit process */
#define TIF_FORK 18 /* ret_from_fork */ #define TIF_FORK 18 /* ret_from_fork */
#define TIF_ABI_PENDING 19 #define TIF_ABI_PENDING 19
...@@ -115,7 +115,6 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -115,7 +115,6 @@ static inline struct thread_info *stack_thread_info(void)
#define _TIF_IRET (1<<TIF_IRET) #define _TIF_IRET (1<<TIF_IRET)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_IA32 (1<<TIF_IA32) #define _TIF_IA32 (1<<TIF_IA32)
#define _TIF_FORK (1<<TIF_FORK) #define _TIF_FORK (1<<TIF_FORK)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
...@@ -137,6 +136,9 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -137,6 +136,9 @@ static inline struct thread_info *stack_thread_info(void)
*/ */
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#define TS_COMPAT 0x0002 /* 32bit syscall active */ #define TS_COMPAT 0x0002 /* 32bit syscall active */
#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -818,6 +818,11 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq) ...@@ -818,6 +818,11 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
* the target CPU. * the target CPU.
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifndef tsk_is_polling
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
static void resched_task(task_t *p) static void resched_task(task_t *p)
{ {
int cpu; int cpu;
...@@ -833,9 +838,9 @@ static void resched_task(task_t *p) ...@@ -833,9 +838,9 @@ static void resched_task(task_t *p)
if (cpu == smp_processor_id()) if (cpu == smp_processor_id())
return; return;
/* NEED_RESCHED must be visible before we test POLLING_NRFLAG */ /* NEED_RESCHED must be visible before we test polling */
smp_mb(); smp_mb();
if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG)) if (!tsk_is_polling(p))
smp_send_reschedule(cpu); smp_send_reschedule(cpu);
} }
#else #else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册