提交 a64692a3 编写于 作者: M Mike Galbraith 提交者: Ingo Molnar

sched: Cleanup/optimize clock updates

Now that we no longer depend on the clock being updated prior to enqueueing
on migratory wakeup, we can clean up a bit, placing calls to update_rq_clock()
exactly where they are needed, ie on enqueue, dequeue and schedule events.

In the case of a freshly enqueued task immediately preempting, we can skip the
update during preemption, as the clock was just updated by the enqueue event.
We also save an unneeded call during a migratory wakeup by not updating the
previous runqueue, where update_curr() won't be invoked.
Signed-off-by: NMike Galbraith <efault@gmx.de>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1268301199.6785.32.camel@marge.simson.net>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 e12f31d3
...@@ -495,6 +495,8 @@ struct rq { ...@@ -495,6 +495,8 @@ struct rq {
u64 nohz_stamp; u64 nohz_stamp;
unsigned char in_nohz_recently; unsigned char in_nohz_recently;
#endif #endif
unsigned int skip_clock_update;
/* capture load from *all* tasks on this cpu: */ /* capture load from *all* tasks on this cpu: */
struct load_weight load; struct load_weight load;
unsigned long nr_load_updates; unsigned long nr_load_updates;
...@@ -592,6 +594,13 @@ static inline ...@@ -592,6 +594,13 @@ static inline
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{ {
rq->curr->sched_class->check_preempt_curr(rq, p, flags); rq->curr->sched_class->check_preempt_curr(rq, p, flags);
/*
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (test_tsk_need_resched(p))
rq->skip_clock_update = 1;
} }
static inline int cpu_of(struct rq *rq) static inline int cpu_of(struct rq *rq)
...@@ -626,6 +635,7 @@ static inline int cpu_of(struct rq *rq) ...@@ -626,6 +635,7 @@ static inline int cpu_of(struct rq *rq)
inline void update_rq_clock(struct rq *rq) inline void update_rq_clock(struct rq *rq)
{ {
if (!rq->skip_clock_update)
rq->clock = sched_clock_cpu(cpu_of(rq)); rq->clock = sched_clock_cpu(cpu_of(rq));
} }
...@@ -1782,8 +1792,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) ...@@ -1782,8 +1792,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
} }
} }
update_rq_clock(rq1);
update_rq_clock(rq2);
} }
/* /*
...@@ -1880,6 +1888,7 @@ static void update_avg(u64 *avg, u64 sample) ...@@ -1880,6 +1888,7 @@ static void update_avg(u64 *avg, u64 sample)
static void static void
enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{ {
update_rq_clock(rq);
sched_info_queued(p); sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, wakeup, head); p->sched_class->enqueue_task(rq, p, wakeup, head);
p->se.on_rq = 1; p->se.on_rq = 1;
...@@ -1887,6 +1896,7 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) ...@@ -1887,6 +1896,7 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{ {
update_rq_clock(rq);
sched_info_dequeued(p); sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, sleep); p->sched_class->dequeue_task(rq, p, sleep);
p->se.on_rq = 0; p->se.on_rq = 0;
...@@ -2366,7 +2376,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, ...@@ -2366,7 +2376,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
smp_wmb(); smp_wmb();
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
update_rq_clock(rq);
if (!(p->state & state)) if (!(p->state & state))
goto out; goto out;
...@@ -2407,7 +2416,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, ...@@ -2407,7 +2416,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
update_rq_clock(rq);
/* /*
* We migrated the task without holding either rq->lock, however * We migrated the task without holding either rq->lock, however
...@@ -2624,7 +2632,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) ...@@ -2624,7 +2632,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
BUG_ON(p->state != TASK_WAKING); BUG_ON(p->state != TASK_WAKING);
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
update_rq_clock(rq);
activate_task(rq, p, 0); activate_task(rq, p, 0);
trace_sched_wakeup_new(rq, p, 1); trace_sched_wakeup_new(rq, p, 1);
check_preempt_curr(rq, p, WF_FORK); check_preempt_curr(rq, p, WF_FORK);
...@@ -3578,6 +3585,9 @@ static inline void schedule_debug(struct task_struct *prev) ...@@ -3578,6 +3585,9 @@ static inline void schedule_debug(struct task_struct *prev)
static void put_prev_task(struct rq *rq, struct task_struct *prev) static void put_prev_task(struct rq *rq, struct task_struct *prev)
{ {
if (prev->se.on_rq)
update_rq_clock(rq);
rq->skip_clock_update = 0;
prev->sched_class->put_prev_task(rq, prev); prev->sched_class->put_prev_task(rq, prev);
} }
...@@ -3640,7 +3650,6 @@ asmlinkage void __sched schedule(void) ...@@ -3640,7 +3650,6 @@ asmlinkage void __sched schedule(void)
hrtick_clear(rq); hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock); raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
clear_tsk_need_resched(prev); clear_tsk_need_resched(prev);
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
...@@ -4197,7 +4206,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio) ...@@ -4197,7 +4206,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
BUG_ON(prio < 0 || prio > MAX_PRIO); BUG_ON(prio < 0 || prio > MAX_PRIO);
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
update_rq_clock(rq);
oldprio = p->prio; oldprio = p->prio;
prev_class = p->sched_class; prev_class = p->sched_class;
...@@ -4240,7 +4248,6 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -4240,7 +4248,6 @@ void set_user_nice(struct task_struct *p, long nice)
* the task might be in the middle of scheduling on another CPU. * the task might be in the middle of scheduling on another CPU.
*/ */
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
update_rq_clock(rq);
/* /*
* The RT priorities are set via sched_setscheduler(), but we still * The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected * allow the 'normal' nice value to be set - but as expected
...@@ -4523,7 +4530,6 @@ static int __sched_setscheduler(struct task_struct *p, int policy, ...@@ -4523,7 +4530,6 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
raw_spin_unlock_irqrestore(&p->pi_lock, flags); raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck; goto recheck;
} }
update_rq_clock(rq);
on_rq = p->se.on_rq; on_rq = p->se.on_rq;
running = task_current(rq, p); running = task_current(rq, p);
if (on_rq) if (on_rq)
...@@ -5530,7 +5536,6 @@ void sched_idle_next(void) ...@@ -5530,7 +5536,6 @@ void sched_idle_next(void)
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0); activate_task(rq, p, 0);
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
...@@ -5585,7 +5590,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu) ...@@ -5585,7 +5590,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
for ( ; ; ) { for ( ; ; ) {
if (!rq->nr_running) if (!rq->nr_running)
break; break;
update_rq_clock(rq);
next = pick_next_task(rq); next = pick_next_task(rq);
if (!next) if (!next)
break; break;
...@@ -5869,7 +5873,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5869,7 +5873,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
rq->migration_thread = NULL; rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */ /* Idle task back to normal (off runqueue, low prio) */
raw_spin_lock_irq(&rq->lock); raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0); deactivate_task(rq, rq->idle, 0);
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0); __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class; rq->idle->sched_class = &idle_sched_class;
...@@ -7815,7 +7818,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p) ...@@ -7815,7 +7818,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
{ {
int on_rq; int on_rq;
update_rq_clock(rq);
on_rq = p->se.on_rq; on_rq = p->se.on_rq;
if (on_rq) if (on_rq)
deactivate_task(rq, p, 0); deactivate_task(rq, p, 0);
...@@ -8177,8 +8179,6 @@ void sched_move_task(struct task_struct *tsk) ...@@ -8177,8 +8179,6 @@ void sched_move_task(struct task_struct *tsk)
rq = task_rq_lock(tsk, &flags); rq = task_rq_lock(tsk, &flags);
update_rq_clock(rq);
running = task_current(rq, tsk); running = task_current(rq, tsk);
on_rq = tsk->se.on_rq; on_rq = tsk->se.on_rq;
......
...@@ -3064,8 +3064,6 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) ...@@ -3064,8 +3064,6 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
/* move a task from busiest_rq to target_rq */ /* move a task from busiest_rq to target_rq */
double_lock_balance(busiest_rq, target_rq); double_lock_balance(busiest_rq, target_rq);
update_rq_clock(busiest_rq);
update_rq_clock(target_rq);
/* Search for an sd spanning us and the target CPU. */ /* Search for an sd spanning us and the target CPU. */
for_each_domain(target_cpu, sd) { for_each_domain(target_cpu, sd) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册