提交 11d4afd4 编写于 作者: V Vincent Guittot 提交者: Ingo Molnar

sched/pelt: Fix warning and clean up IRQ PELT config

Create a config for enabling irq load tracking in the scheduler.
irq load tracking is useful only when irq or paravirtual time is
accounted but it's only possible with SMP for now.

Also use __maybe_unused to remove the compilation warning in
update_rq_clock_task() that has been introduced by:

  2e62c474 ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
Suggested-by: NIngo Molnar <mingo@redhat.com>
Reported-by: NDou Liyang <douly.fnst@cn.fujitsu.com>
Reported-by: NMiguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Signed-off-by: NVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bp@alien8.de
Cc: dou_liyang@163.com
Fixes: 2e62c474 ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
Link: http://lkml.kernel.org/r/1537867062-27285-1-git-send-email-vincent.guittot@linaro.orgSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 b429f71b
...@@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING ...@@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
If in doubt, say N here. If in doubt, say N here.
config HAVE_SCHED_AVG_IRQ
def_bool y
depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
depends on SMP
config BSD_PROCESS_ACCT config BSD_PROCESS_ACCT
bool "BSD Process Accounting" bool "BSD Process Accounting"
depends on MULTIUSER depends on MULTIUSER
......
...@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) ...@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
* In theory, the compile should just see 0 here, and optimize out the call * In theory, the compile should just see 0 here, and optimize out the call
* to sched_rt_avg_update. But I don't trust it... * to sched_rt_avg_update. But I don't trust it...
*/ */
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) s64 __maybe_unused steal = 0, irq_delta = 0;
s64 steal = 0, irq_delta = 0;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING #ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
...@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) ...@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->clock_task += delta; rq->clock_task += delta;
#ifdef HAVE_SCHED_AVG_IRQ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal); update_irq_load_avg(rq, irq_delta + steal);
#endif #endif
......
...@@ -7317,7 +7317,7 @@ static inline bool others_have_blocked(struct rq *rq) ...@@ -7317,7 +7317,7 @@ static inline bool others_have_blocked(struct rq *rq)
if (READ_ONCE(rq->avg_dl.util_avg)) if (READ_ONCE(rq->avg_dl.util_avg))
return true; return true;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if (READ_ONCE(rq->avg_irq.util_avg)) if (READ_ONCE(rq->avg_irq.util_avg))
return true; return true;
#endif #endif
......
...@@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) ...@@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
return 0; return 0;
} }
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
/* /*
* irq: * irq:
* *
......
...@@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq); ...@@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
int update_irq_load_avg(struct rq *rq, u64 running); int update_irq_load_avg(struct rq *rq, u64 running);
#else #else
static inline int static inline int
......
...@@ -862,8 +862,7 @@ struct rq { ...@@ -862,8 +862,7 @@ struct rq {
struct sched_avg avg_rt; struct sched_avg avg_rt;
struct sched_avg avg_dl; struct sched_avg avg_dl;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
#define HAVE_SCHED_AVG_IRQ
struct sched_avg avg_irq; struct sched_avg avg_irq;
#endif #endif
u64 idle_stamp; u64 idle_stamp;
...@@ -2223,7 +2222,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq) ...@@ -2223,7 +2222,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
} }
#endif #endif
#ifdef HAVE_SCHED_AVG_IRQ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq) static inline unsigned long cpu_util_irq(struct rq *rq)
{ {
return rq->avg_irq.util_avg; return rq->avg_irq.util_avg;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册