提交 2161573e 编写于 作者: D Davidlohr Bueso 提交者: Linus Torvalds

sched/deadline: replace earliest dl and rq leftmost caching

... with the generic rbtree flavor instead. No changes
in semantics whatsoever.

Link: http://lkml.kernel.org/r/20170719014603.19029-9-dave@stgolabs.netSigned-off-by: NDavidlohr Bueso <dbueso@suse.de>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 bfb06889
...@@ -296,7 +296,7 @@ static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) ...@@ -296,7 +296,7 @@ static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
{ {
struct sched_dl_entity *dl_se = &p->dl; struct sched_dl_entity *dl_se = &p->dl;
return dl_rq->rb_leftmost == &dl_se->rb_node; return dl_rq->root.rb_leftmost == &dl_se->rb_node;
} }
void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
...@@ -320,7 +320,7 @@ void init_dl_bw(struct dl_bw *dl_b) ...@@ -320,7 +320,7 @@ void init_dl_bw(struct dl_bw *dl_b)
void init_dl_rq(struct dl_rq *dl_rq) void init_dl_rq(struct dl_rq *dl_rq)
{ {
dl_rq->rb_root = RB_ROOT; dl_rq->root = RB_ROOT_CACHED;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* zero means no -deadline tasks */ /* zero means no -deadline tasks */
...@@ -328,7 +328,7 @@ void init_dl_rq(struct dl_rq *dl_rq) ...@@ -328,7 +328,7 @@ void init_dl_rq(struct dl_rq *dl_rq)
dl_rq->dl_nr_migratory = 0; dl_rq->dl_nr_migratory = 0;
dl_rq->overloaded = 0; dl_rq->overloaded = 0;
dl_rq->pushable_dl_tasks_root = RB_ROOT; dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
#else #else
init_dl_bw(&dl_rq->dl_bw); init_dl_bw(&dl_rq->dl_bw);
#endif #endif
...@@ -410,10 +410,10 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -410,10 +410,10 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{ {
struct dl_rq *dl_rq = &rq->dl; struct dl_rq *dl_rq = &rq->dl;
struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node; struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct task_struct *entry; struct task_struct *entry;
int leftmost = 1; bool leftmost = true;
BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
...@@ -425,17 +425,16 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) ...@@ -425,17 +425,16 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
link = &parent->rb_left; link = &parent->rb_left;
else { else {
link = &parent->rb_right; link = &parent->rb_right;
leftmost = 0; leftmost = false;
} }
} }
if (leftmost) { if (leftmost)
dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
dl_rq->earliest_dl.next = p->dl.deadline; dl_rq->earliest_dl.next = p->dl.deadline;
}
rb_link_node(&p->pushable_dl_tasks, parent, link); rb_link_node(&p->pushable_dl_tasks, parent, link);
rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); rb_insert_color_cached(&p->pushable_dl_tasks,
&dl_rq->pushable_dl_tasks_root, leftmost);
} }
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
...@@ -445,24 +444,23 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) ...@@ -445,24 +444,23 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
return; return;
if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
struct rb_node *next_node; struct rb_node *next_node;
next_node = rb_next(&p->pushable_dl_tasks); next_node = rb_next(&p->pushable_dl_tasks);
dl_rq->pushable_dl_tasks_leftmost = next_node;
if (next_node) { if (next_node) {
dl_rq->earliest_dl.next = rb_entry(next_node, dl_rq->earliest_dl.next = rb_entry(next_node,
struct task_struct, pushable_dl_tasks)->dl.deadline; struct task_struct, pushable_dl_tasks)->dl.deadline;
} }
} }
rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
RB_CLEAR_NODE(&p->pushable_dl_tasks); RB_CLEAR_NODE(&p->pushable_dl_tasks);
} }
static inline int has_pushable_dl_tasks(struct rq *rq) static inline int has_pushable_dl_tasks(struct rq *rq)
{ {
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root); return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
} }
static int push_dl_task(struct rq *rq); static int push_dl_task(struct rq *rq);
...@@ -1266,7 +1264,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) ...@@ -1266,7 +1264,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
dl_rq->earliest_dl.next = 0; dl_rq->earliest_dl.next = 0;
cpudl_clear(&rq->rd->cpudl, rq->cpu); cpudl_clear(&rq->rd->cpudl, rq->cpu);
} else { } else {
struct rb_node *leftmost = dl_rq->rb_leftmost; struct rb_node *leftmost = dl_rq->root.rb_leftmost;
struct sched_dl_entity *entry; struct sched_dl_entity *entry;
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
...@@ -1313,7 +1311,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -1313,7 +1311,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
{ {
struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rb_node **link = &dl_rq->rb_root.rb_node; struct rb_node **link = &dl_rq->root.rb_root.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct sched_dl_entity *entry; struct sched_dl_entity *entry;
int leftmost = 1; int leftmost = 1;
...@@ -1331,11 +1329,8 @@ static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) ...@@ -1331,11 +1329,8 @@ static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
} }
} }
if (leftmost)
dl_rq->rb_leftmost = &dl_se->rb_node;
rb_link_node(&dl_se->rb_node, parent, link); rb_link_node(&dl_se->rb_node, parent, link);
rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root); rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
inc_dl_tasks(dl_se, dl_rq); inc_dl_tasks(dl_se, dl_rq);
} }
...@@ -1347,14 +1342,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) ...@@ -1347,14 +1342,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
if (RB_EMPTY_NODE(&dl_se->rb_node)) if (RB_EMPTY_NODE(&dl_se->rb_node))
return; return;
if (dl_rq->rb_leftmost == &dl_se->rb_node) { rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
struct rb_node *next_node;
next_node = rb_next(&dl_se->rb_node);
dl_rq->rb_leftmost = next_node;
}
rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
RB_CLEAR_NODE(&dl_se->rb_node); RB_CLEAR_NODE(&dl_se->rb_node);
dec_dl_tasks(dl_se, dl_rq); dec_dl_tasks(dl_se, dl_rq);
...@@ -1647,7 +1635,7 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p) ...@@ -1647,7 +1635,7 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
struct dl_rq *dl_rq) struct dl_rq *dl_rq)
{ {
struct rb_node *left = dl_rq->rb_leftmost; struct rb_node *left = rb_first_cached(&dl_rq->root);
if (!left) if (!left)
return NULL; return NULL;
...@@ -1771,7 +1759,7 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) ...@@ -1771,7 +1759,7 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
*/ */
static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
{ {
struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost; struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
struct task_struct *p = NULL; struct task_struct *p = NULL;
if (!has_pushable_dl_tasks(rq)) if (!has_pushable_dl_tasks(rq))
...@@ -1945,7 +1933,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) ...@@ -1945,7 +1933,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
if (!has_pushable_dl_tasks(rq)) if (!has_pushable_dl_tasks(rq))
return NULL; return NULL;
p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
struct task_struct, pushable_dl_tasks); struct task_struct, pushable_dl_tasks);
BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(rq->cpu != task_cpu(p));
......
...@@ -549,8 +549,7 @@ struct rt_rq { ...@@ -549,8 +549,7 @@ struct rt_rq {
/* Deadline class' related fields in a runqueue */ /* Deadline class' related fields in a runqueue */
struct dl_rq { struct dl_rq {
/* runqueue is an rbtree, ordered by deadline */ /* runqueue is an rbtree, ordered by deadline */
struct rb_root rb_root; struct rb_root_cached root;
struct rb_node *rb_leftmost;
unsigned long dl_nr_running; unsigned long dl_nr_running;
...@@ -574,8 +573,7 @@ struct dl_rq { ...@@ -574,8 +573,7 @@ struct dl_rq {
* an rb-tree, ordered by tasks' deadlines, with caching * an rb-tree, ordered by tasks' deadlines, with caching
* of the leftmost (earliest deadline) element. * of the leftmost (earliest deadline) element.
*/ */
struct rb_root pushable_dl_tasks_root; struct rb_root_cached pushable_dl_tasks_root;
struct rb_node *pushable_dl_tasks_leftmost;
#else #else
struct dl_bw dl_bw; struct dl_bw dl_bw;
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册