提交 2509940f 编写于 作者: A Alex Shi 提交者: Ingo Molnar

sched/cfs_rq: Change atomic64_t removed_load to atomic_long_t

Similar to runnable_load_avg, blocked_load_avg variable, long type is
enough for removed_load in 64 bit or 32 bit machine.

Then we avoid the expensive atomic64 operations on 32 bit machine.
Signed-off-by: NAlex Shi <alex.shi@intel.com>
Reviewed-by: NPaul Turner <pjt@google.com>
Tested-by: NVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1371694737-29336-12-git-send-email-alex.shi@intel.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 bf5b986e
...@@ -1517,8 +1517,9 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) ...@@ -1517,8 +1517,9 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
if (!decays && !force_update) if (!decays && !force_update)
return; return;
if (atomic64_read(&cfs_rq->removed_load)) { if (atomic_long_read(&cfs_rq->removed_load)) {
u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0); unsigned long removed_load;
removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
subtract_blocked_load_contrib(cfs_rq, removed_load); subtract_blocked_load_contrib(cfs_rq, removed_load);
} }
...@@ -3480,7 +3481,8 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu) ...@@ -3480,7 +3481,8 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
*/ */
if (se->avg.decay_count) { if (se->avg.decay_count) {
se->avg.decay_count = -__synchronize_entity_decay(se); se->avg.decay_count = -__synchronize_entity_decay(se);
atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); atomic_long_add(se->avg.load_avg_contrib,
&cfs_rq->removed_load);
} }
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -5942,7 +5944,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -5942,7 +5944,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
atomic64_set(&cfs_rq->decay_counter, 1); atomic64_set(&cfs_rq->decay_counter, 1);
atomic64_set(&cfs_rq->removed_load, 0); atomic_long_set(&cfs_rq->removed_load, 0);
#endif #endif
} }
......
...@@ -278,8 +278,9 @@ struct cfs_rq { ...@@ -278,8 +278,9 @@ struct cfs_rq {
* the FAIR_GROUP_SCHED case). * the FAIR_GROUP_SCHED case).
*/ */
unsigned long runnable_load_avg, blocked_load_avg; unsigned long runnable_load_avg, blocked_load_avg;
atomic64_t decay_counter, removed_load; atomic64_t decay_counter;
u64 last_decay; u64 last_decay;
atomic_long_t removed_load;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
/* Required to track per-cpu representation of a task_group */ /* Required to track per-cpu representation of a task_group */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册