提交 a9957449 编写于 作者: A Alexey Dobriyan 提交者: Ingo Molnar

sched: uninline scheduler

* save ~300 bytes
* activate_idle_task() was moved to avoid a warning

bloat-o-meter output:

add/remove: 6/0 grow/shrink: 0/16 up/down: 438/-733 (-295)		<===
function                                     old     new   delta
__enqueue_entity                               -     165    +165
finish_task_switch                             -     110    +110
update_curr_rt                                 -      79     +79
__load_balance_iterator                        -      32     +32
__task_rq_unlock                               -      28     +28
find_process_by_pid                            -      24     +24
do_sched_setscheduler                        133     123     -10
sys_sched_rr_get_interval                    176     165     -11
sys_sched_getparam                           156     145     -11
normalize_rt_tasks                           482     470     -12
sched_getaffinity                            112      99     -13
sys_sched_getscheduler                        86      72     -14
sched_setaffinity                            226     212     -14
sched_setscheduler                           666     642     -24
load_balance_start_fair                       33       9     -24
load_balance_next_fair                        33       9     -24
dequeue_task_rt                              133      67     -66
put_prev_task_rt                              97      28     -69
schedule_tail                                133      50     -83
schedule                                     682     594     -88
enqueue_entity                               499     366    -133
task_new_fair                                317     180    -137
Signed-off-by: NAlexey Dobriyan <adobriyan@sw.ru>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 155bb293
...@@ -608,7 +608,7 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) ...@@ -608,7 +608,7 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
return rq; return rq;
} }
static inline void __task_rq_unlock(struct rq *rq) static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock) __releases(rq->lock)
{ {
spin_unlock(&rq->lock); spin_unlock(&rq->lock);
...@@ -623,7 +623,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) ...@@ -623,7 +623,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
/* /*
* this_rq_lock - lock this runqueue and disable interrupts. * this_rq_lock - lock this runqueue and disable interrupts.
*/ */
static inline struct rq *this_rq_lock(void) static struct rq *this_rq_lock(void)
__acquires(rq->lock) __acquires(rq->lock)
{ {
struct rq *rq; struct rq *rq;
...@@ -985,20 +985,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) ...@@ -985,20 +985,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
inc_nr_running(p, rq); inc_nr_running(p, rq);
} }
/*
* activate_idle_task - move idle task to the _front_ of runqueue.
*/
static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
{
update_rq_clock(rq);
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;
enqueue_task(rq, p, 0);
inc_nr_running(p, rq);
}
/* /*
* deactivate_task - remove a task from the runqueue. * deactivate_task - remove a task from the runqueue.
*/ */
...@@ -1206,7 +1192,7 @@ void kick_process(struct task_struct *p) ...@@ -1206,7 +1192,7 @@ void kick_process(struct task_struct *p)
* We want to under-estimate the load of migration sources, to * We want to under-estimate the load of migration sources, to
* balance conservatively. * balance conservatively.
*/ */
static inline unsigned long source_load(int cpu, int type) static unsigned long source_load(int cpu, int type)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu); unsigned long total = weighted_cpuload(cpu);
...@@ -1221,7 +1207,7 @@ static inline unsigned long source_load(int cpu, int type) ...@@ -1221,7 +1207,7 @@ static inline unsigned long source_load(int cpu, int type)
* Return a high guess at the load of a migration-target cpu weighted * Return a high guess at the load of a migration-target cpu weighted
* according to the scheduling class and "nice" value. * according to the scheduling class and "nice" value.
*/ */
static inline unsigned long target_load(int cpu, int type) static unsigned long target_load(int cpu, int type)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu); unsigned long total = weighted_cpuload(cpu);
...@@ -1813,7 +1799,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, ...@@ -1813,7 +1799,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* with the lock held can cause deadlocks; see schedule() for * with the lock held can cause deadlocks; see schedule() for
* details.) * details.)
*/ */
static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) static void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock) __releases(rq->lock)
{ {
struct mm_struct *mm = rq->prev_mm; struct mm_struct *mm = rq->prev_mm;
...@@ -3020,7 +3006,7 @@ static DEFINE_SPINLOCK(balancing); ...@@ -3020,7 +3006,7 @@ static DEFINE_SPINLOCK(balancing);
* *
* Balancing parameters are set up in arch_init_sched_domains. * Balancing parameters are set up in arch_init_sched_domains.
*/ */
static inline void rebalance_domains(int cpu, enum cpu_idle_type idle) static void rebalance_domains(int cpu, enum cpu_idle_type idle)
{ {
int balance = 1; int balance = 1;
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
...@@ -4140,7 +4126,7 @@ struct task_struct *idle_task(int cpu) ...@@ -4140,7 +4126,7 @@ struct task_struct *idle_task(int cpu)
* find_process_by_pid - find a process with a matching PID value. * find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question. * @pid: the pid in question.
*/ */
static inline struct task_struct *find_process_by_pid(pid_t pid) static struct task_struct *find_process_by_pid(pid_t pid)
{ {
return pid ? find_task_by_pid(pid) : current; return pid ? find_task_by_pid(pid) : current;
} }
...@@ -5156,6 +5142,20 @@ static void migrate_live_tasks(int src_cpu) ...@@ -5156,6 +5142,20 @@ static void migrate_live_tasks(int src_cpu)
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
} }
/*
* activate_idle_task - move idle task to the _front_ of runqueue.
*/
static void activate_idle_task(struct task_struct *p, struct rq *rq)
{
update_rq_clock(rq);
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;
enqueue_task(rq, p, 0);
inc_nr_running(p, rq);
}
/* /*
* Schedules idle task to be the next runnable task on current CPU. * Schedules idle task to be the next runnable task on current CPU.
* It does so by boosting its priority to highest possible and adding it to * It does so by boosting its priority to highest possible and adding it to
...@@ -6494,7 +6494,7 @@ int in_sched_functions(unsigned long addr) ...@@ -6494,7 +6494,7 @@ int in_sched_functions(unsigned long addr)
&& addr < (unsigned long)__sched_text_end); && addr < (unsigned long)__sched_text_end);
} }
static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{ {
cfs_rq->tasks_timeline = RB_ROOT; cfs_rq->tasks_timeline = RB_ROOT;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
......
...@@ -892,7 +892,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) ...@@ -892,7 +892,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
* achieve that by always pre-iterating before returning * achieve that by always pre-iterating before returning
* the current task: * the current task:
*/ */
static inline struct task_struct * static struct task_struct *
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
{ {
struct task_struct *p; struct task_struct *p;
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* Update the current task's runtime statistics. Skip current tasks that * Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class. * are not in our scheduling class.
*/ */
static inline void update_curr_rt(struct rq *rq) static void update_curr_rt(struct rq *rq)
{ {
struct task_struct *curr = rq->curr; struct task_struct *curr = rq->curr;
u64 delta_exec; u64 delta_exec;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册