提交 55e12e5e 编写于 作者: D Dhaval Giani 提交者: Ingo Molnar

sched: make sched_{rt,fair}.c ifdefs more readable

Signed-off-by: NDhaval Giani <dhaval@linux.vnet.ibm.com>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 f5bfb7d9
...@@ -921,7 +921,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) ...@@ -921,7 +921,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
hrtick_start(rq, delta, requeue); hrtick_start(rq, delta, requeue);
} }
} }
#else #else /* !CONFIG_SCHED_HRTICK */
static inline void static inline void
hrtick_start_fair(struct rq *rq, struct task_struct *p) hrtick_start_fair(struct rq *rq, struct task_struct *p)
{ {
...@@ -1062,7 +1062,7 @@ static int wake_idle(int cpu, struct task_struct *p) ...@@ -1062,7 +1062,7 @@ static int wake_idle(int cpu, struct task_struct *p)
} }
return cpu; return cpu;
} }
#else #else /* !ARCH_HAS_SCHED_WAKE_IDLE*/
static inline int wake_idle(int cpu, struct task_struct *p) static inline int wake_idle(int cpu, struct task_struct *p)
{ {
return cpu; return cpu;
...@@ -1586,7 +1586,7 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -1586,7 +1586,7 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0; return 0;
} }
#endif #endif /* CONFIG_SMP */
/* /*
* scheduler tick hitting a task of our scheduling class: * scheduler tick hitting a task of our scheduling class:
......
...@@ -161,7 +161,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) ...@@ -161,7 +161,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
return &rt_rq->tg->rt_bandwidth; return &rt_rq->tg->rt_bandwidth;
} }
#else #else /* !CONFIG_RT_GROUP_SCHED */
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{ {
...@@ -226,7 +226,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) ...@@ -226,7 +226,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
return &def_rt_bandwidth; return &def_rt_bandwidth;
} }
#endif #endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int do_balance_runtime(struct rt_rq *rt_rq) static int do_balance_runtime(struct rt_rq *rt_rq)
...@@ -374,12 +374,12 @@ static int balance_runtime(struct rt_rq *rt_rq) ...@@ -374,12 +374,12 @@ static int balance_runtime(struct rt_rq *rt_rq)
return more; return more;
} }
#else #else /* !CONFIG_SMP */
static inline int balance_runtime(struct rt_rq *rt_rq) static inline int balance_runtime(struct rt_rq *rt_rq)
{ {
return 0; return 0;
} }
#endif #endif /* CONFIG_SMP */
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{ {
...@@ -1472,4 +1472,4 @@ static void print_rt_stats(struct seq_file *m, int cpu) ...@@ -1472,4 +1472,4 @@ static void print_rt_stats(struct seq_file *m, int cpu)
print_rt_rq(m, cpu, rt_rq); print_rt_rq(m, cpu, rt_rq);
rcu_read_unlock(); rcu_read_unlock();
} }
#endif #endif /* CONFIG_SCHED_DEBUG */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册