提交 2ed41a55 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched/core: Optimize update_stats_*()

These functions are already gated by schedstats_enabled(), there is no
point in then issuing another static_branch for every individual
update in them.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 b85c8b71
......@@ -871,7 +871,7 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
likely(wait_start > prev_wait_start))
wait_start -= prev_wait_start;
schedstat_set(se->statistics.wait_start, wait_start);
__schedstat_set(se->statistics.wait_start, wait_start);
}
static inline void
......@@ -893,17 +893,17 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
* time stamp can be adjusted to accumulate wait time
* prior to migration.
*/
schedstat_set(se->statistics.wait_start, delta);
__schedstat_set(se->statistics.wait_start, delta);
return;
}
trace_sched_stat_wait(p, delta);
}
schedstat_set(se->statistics.wait_max,
__schedstat_set(se->statistics.wait_max,
max(schedstat_val(se->statistics.wait_max), delta));
schedstat_inc(se->statistics.wait_count);
schedstat_add(se->statistics.wait_sum, delta);
schedstat_set(se->statistics.wait_start, 0);
__schedstat_inc(se->statistics.wait_count);
__schedstat_add(se->statistics.wait_sum, delta);
__schedstat_set(se->statistics.wait_start, 0);
}
static inline void
......@@ -928,10 +928,10 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
delta = 0;
if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
schedstat_set(se->statistics.sleep_max, delta);
__schedstat_set(se->statistics.sleep_max, delta);
schedstat_set(se->statistics.sleep_start, 0);
schedstat_add(se->statistics.sum_sleep_runtime, delta);
__schedstat_set(se->statistics.sleep_start, 0);
__schedstat_add(se->statistics.sum_sleep_runtime, delta);
if (tsk) {
account_scheduler_latency(tsk, delta >> 10, 1);
......@@ -945,15 +945,15 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
delta = 0;
if (unlikely(delta > schedstat_val(se->statistics.block_max)))
schedstat_set(se->statistics.block_max, delta);
__schedstat_set(se->statistics.block_max, delta);
schedstat_set(se->statistics.block_start, 0);
schedstat_add(se->statistics.sum_sleep_runtime, delta);
__schedstat_set(se->statistics.block_start, 0);
__schedstat_add(se->statistics.sum_sleep_runtime, delta);
if (tsk) {
if (tsk->in_iowait) {
schedstat_add(se->statistics.iowait_sum, delta);
schedstat_inc(se->statistics.iowait_count);
__schedstat_add(se->statistics.iowait_sum, delta);
__schedstat_inc(se->statistics.iowait_count);
trace_sched_stat_iowait(tsk, delta);
}
......@@ -1012,10 +1012,10 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_INTERRUPTIBLE)
schedstat_set(se->statistics.sleep_start,
__schedstat_set(se->statistics.sleep_start,
rq_clock(rq_of(cfs_rq)));
if (tsk->state & TASK_UNINTERRUPTIBLE)
schedstat_set(se->statistics.block_start,
__schedstat_set(se->statistics.block_start,
rq_clock(rq_of(cfs_rq)));
}
}
......
......@@ -33,7 +33,9 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
#define __schedstat_inc(var) do { var++; } while (0)
#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
#define __schedstat_add(var, amt) do { var += (amt); } while (0)
#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
#define __schedstat_set(var, val) do { var = (val); } while (0)
#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
#define schedstat_val(var) (var)
#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
......@@ -51,7 +53,9 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
#define schedstat_enabled() 0
#define __schedstat_inc(var) do { } while (0)
#define schedstat_inc(var) do { } while (0)
#define __schedstat_add(var, amt) do { } while (0)
#define schedstat_add(var, amt) do { } while (0)
#define __schedstat_set(var, val) do { } while (0)
#define schedstat_set(var, val) do { } while (0)
#define schedstat_val(var) 0
#define schedstat_val_or_zero(var) 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册