提交 917b627d 编写于 作者: G Gregory Haskins

sched: create "pushable_tasks" list to limit pushing to one attempt

The RT scheduler employs a "push/pull" design to actively balance tasks
within the system (on a per disjoint cpuset basis).  When a task is
awoken, it is immediately determined if there are any lower priority
cpus which should be preempted.  This is opposed to the way normal
SCHED_OTHER tasks behave, which will wait for a periodic rebalancing
operation to occur before spreading out load.

When a particular RQ has more than 1 active RT task, it is said to
be in an "overloaded" state.  Once this occurs, the system enters
the active balancing mode, where it will try to push the task away,
or persuade a different cpu to pull it over.  The system will stay
in this state until the system falls back below the <= 1 queued RT
task per RQ.

However, the current implementation suffers from a limitation in the
push logic.  Once overloaded, all tasks (other than current) on the
RQ are analyzed on every push operation, even if it was previously
unpushable (due to affinity, etc).  Whats more, the operation stops
at the first task that is unpushable and will not look at items
lower in the queue.  This causes two problems:

1) We can have the same tasks analyzed over and over again during each
   push, which extends out the fast path in the scheduler for no
   gain.  Consider a RQ that has dozens of tasks that are bound to a
   core.  Each one of those tasks will be encountered and skipped
   for each push operation while they are queued.

2) There may be lower-priority tasks under the unpushable task that
   could have been successfully pushed, but will never be considered
   until either the unpushable task is cleared, or a pull operation
   succeeds.  The net result is a potential latency source for mid
   priority tasks.

This patch aims to rectify these two conditions by introducing a new
priority sorted list: "pushable_tasks".  A task is added to the list
each time a task is activated or preempted.  It is removed from the
list any time it is deactivated, made current, or fails to push.

This works because a task only needs to be attempted to push once.
After an initial failure to push, the other cpus will eventually try to
pull the task when the conditions are proper.  This also solves the
problem that we don't completely analyze all tasks due to encountering
an unpushable tasks.  Now every task will have a push attempted (when
appropriate).

This reduces latency both by shorting the critical section of the
rq->lock for certain workloads, and by making sure the algorithm
considers all eligible tasks in the system.

[ rostedt: added a couple more BUG_ONs ]
Signed-off-by: NGregory Haskins <ghaskins@novell.com>
Acked-by: NSteven Rostedt <srostedt@redhat.com>
上级 4075134e
...@@ -140,6 +140,7 @@ extern struct group_info init_groups; ...@@ -140,6 +140,7 @@ extern struct group_info init_groups;
.nr_cpus_allowed = NR_CPUS, \ .nr_cpus_allowed = NR_CPUS, \
}, \ }, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \ .tasks = LIST_HEAD_INIT(tsk.tasks), \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \ .real_parent = &tsk, \
......
...@@ -1179,6 +1179,7 @@ struct task_struct { ...@@ -1179,6 +1179,7 @@ struct task_struct {
#endif #endif
struct list_head tasks; struct list_head tasks;
struct plist_node pushable_tasks;
struct mm_struct *mm, *active_mm; struct mm_struct *mm, *active_mm;
......
...@@ -471,6 +471,7 @@ struct rt_rq { ...@@ -471,6 +471,7 @@ struct rt_rq {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long rt_nr_migratory; unsigned long rt_nr_migratory;
int overloaded; int overloaded;
struct plist_head pushable_tasks;
#endif #endif
int rt_throttled; int rt_throttled;
u64 rt_time; u64 rt_time;
...@@ -2481,6 +2482,8 @@ void sched_fork(struct task_struct *p, int clone_flags) ...@@ -2481,6 +2482,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
/* Want to start with kernel preemption disabled. */ /* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1; task_thread_info(p)->preempt_count = 1;
#endif #endif
plist_node_init(&p->pushable_tasks, MAX_PRIO);
put_cpu(); put_cpu();
} }
...@@ -8237,6 +8240,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) ...@@ -8237,6 +8240,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0; rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0; rt_rq->overloaded = 0;
plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
#endif #endif
rt_rq->rt_time = 0; rt_rq->rt_time = 0;
......
...@@ -49,6 +49,24 @@ static void update_rt_migration(struct rq *rq) ...@@ -49,6 +49,24 @@ static void update_rt_migration(struct rq *rq)
rq->rt.overloaded = 0; rq->rt.overloaded = 0;
} }
} }
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
plist_node_init(&p->pushable_tasks, p->prio);
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
}
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
}
#else
#define enqueue_pushable_task(rq, p) do { } while (0)
#define dequeue_pushable_task(rq, p) do { } while (0)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
...@@ -751,6 +769,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) ...@@ -751,6 +769,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
enqueue_rt_entity(rt_se); enqueue_rt_entity(rt_se);
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
inc_cpu_load(rq, p->se.load.weight); inc_cpu_load(rq, p->se.load.weight);
} }
...@@ -761,6 +782,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) ...@@ -761,6 +782,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
update_curr_rt(rq); update_curr_rt(rq);
dequeue_rt_entity(rt_se); dequeue_rt_entity(rt_se);
dequeue_pushable_task(rq, p);
dec_cpu_load(rq, p->se.load.weight); dec_cpu_load(rq, p->se.load.weight);
} }
...@@ -911,7 +934,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, ...@@ -911,7 +934,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
return next; return next;
} }
static struct task_struct *pick_next_task_rt(struct rq *rq) static struct task_struct *_pick_next_task_rt(struct rq *rq)
{ {
struct sched_rt_entity *rt_se; struct sched_rt_entity *rt_se;
struct task_struct *p; struct task_struct *p;
...@@ -933,6 +956,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) ...@@ -933,6 +956,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
p = rt_task_of(rt_se); p = rt_task_of(rt_se);
p->se.exec_start = rq->clock; p->se.exec_start = rq->clock;
return p;
}
static struct task_struct *pick_next_task_rt(struct rq *rq)
{
struct task_struct *p = _pick_next_task_rt(rq);
/* The running task is never eligible for pushing */
if (p)
dequeue_pushable_task(rq, p);
return p; return p;
} }
...@@ -940,6 +975,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) ...@@ -940,6 +975,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{ {
update_curr_rt(rq); update_curr_rt(rq);
p->se.exec_start = 0; p->se.exec_start = 0;
/*
* The previous task needs to be made eligible for pushing
* if it is still active
*/
if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1116,6 +1158,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) ...@@ -1116,6 +1158,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
return lowest_rq; return lowest_rq;
} }
static inline int has_pushable_tasks(struct rq *rq)
{
return !plist_head_empty(&rq->rt.pushable_tasks);
}
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->rt.nr_cpus_allowed <= 1);
BUG_ON(!p->se.on_rq);
BUG_ON(!rt_task(p));
return p;
}
/* /*
* If the current CPU has more than one RT task, see if the non * If the current CPU has more than one RT task, see if the non
* running task can migrate over to a CPU that is running a task * running task can migrate over to a CPU that is running a task
...@@ -1125,13 +1192,12 @@ static int push_rt_task(struct rq *rq) ...@@ -1125,13 +1192,12 @@ static int push_rt_task(struct rq *rq)
{ {
struct task_struct *next_task; struct task_struct *next_task;
struct rq *lowest_rq; struct rq *lowest_rq;
int ret = 0;
int paranoid = RT_MAX_TRIES; int paranoid = RT_MAX_TRIES;
if (!rq->rt.overloaded) if (!rq->rt.overloaded)
return 0; return 0;
next_task = pick_next_highest_task_rt(rq, -1); next_task = pick_next_pushable_task(rq);
if (!next_task) if (!next_task)
return 0; return 0;
...@@ -1163,12 +1229,19 @@ static int push_rt_task(struct rq *rq) ...@@ -1163,12 +1229,19 @@ static int push_rt_task(struct rq *rq)
* so it is possible that next_task has changed. * so it is possible that next_task has changed.
* If it has, then try again. * If it has, then try again.
*/ */
task = pick_next_highest_task_rt(rq, -1); task = pick_next_pushable_task(rq);
if (unlikely(task != next_task) && task && paranoid--) { if (unlikely(task != next_task) && task && paranoid--) {
put_task_struct(next_task); put_task_struct(next_task);
next_task = task; next_task = task;
goto retry; goto retry;
} }
/*
* Once we have failed to push this task, we will not
* try again, since the other cpus will pull from us
* when they are ready
*/
dequeue_pushable_task(rq, next_task);
goto out; goto out;
} }
...@@ -1180,23 +1253,12 @@ static int push_rt_task(struct rq *rq) ...@@ -1180,23 +1253,12 @@ static int push_rt_task(struct rq *rq)
double_unlock_balance(rq, lowest_rq); double_unlock_balance(rq, lowest_rq);
ret = 1;
out: out:
put_task_struct(next_task); put_task_struct(next_task);
return ret; return 1;
} }
/*
* TODO: Currently we just use the second highest prio task on
* the queue, and stop when it can't migrate (or there's
* no more RT tasks). There may be a case where a lower
* priority RT task has a different affinity than the
* higher RT task. In this case the lower RT task could
* possibly be able to migrate where as the higher priority
* RT task could not. We currently ignore this issue.
* Enhancements are welcome!
*/
static void push_rt_tasks(struct rq *rq) static void push_rt_tasks(struct rq *rq)
{ {
/* push_rt_task will return true if it moved an RT */ /* push_rt_task will return true if it moved an RT */
...@@ -1295,7 +1357,7 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) ...@@ -1295,7 +1357,7 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
*/ */
static int needs_post_schedule_rt(struct rq *rq) static int needs_post_schedule_rt(struct rq *rq)
{ {
return rq->rt.overloaded ? 1 : 0; return has_pushable_tasks(rq);
} }
static void post_schedule_rt(struct rq *rq) static void post_schedule_rt(struct rq *rq)
...@@ -1317,7 +1379,7 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p) ...@@ -1317,7 +1379,7 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
{ {
if (!task_running(rq, p) && if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) && !test_tsk_need_resched(rq->curr) &&
rq->rt.overloaded && has_pushable_tasks(rq) &&
p->rt.nr_cpus_allowed > 1) p->rt.nr_cpus_allowed > 1)
push_rt_tasks(rq); push_rt_tasks(rq);
} }
...@@ -1354,6 +1416,24 @@ static void set_cpus_allowed_rt(struct task_struct *p, ...@@ -1354,6 +1416,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
struct rq *rq = task_rq(p); struct rq *rq = task_rq(p);
if (!task_current(rq, p)) {
/*
* Make sure we dequeue this task from the pushable list
* before going further. It will either remain off of
* the list because we are no longer pushable, or it
* will be requeued.
*/
if (p->rt.nr_cpus_allowed > 1)
dequeue_pushable_task(rq, p);
/*
* Requeue if our weight is changing and still > 1
*/
if (weight > 1)
enqueue_pushable_task(rq, p);
}
if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
rq->rt.rt_nr_migratory++; rq->rt.rt_nr_migratory++;
} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
...@@ -1538,6 +1618,9 @@ static void set_curr_task_rt(struct rq *rq) ...@@ -1538,6 +1618,9 @@ static void set_curr_task_rt(struct rq *rq)
struct task_struct *p = rq->curr; struct task_struct *p = rq->curr;
p->se.exec_start = rq->clock; p->se.exec_start = rq->clock;
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
} }
static const struct sched_class rt_sched_class = { static const struct sched_class rt_sched_class = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册