提交 36da4fe9 编写于 作者: X Xunlei Pang 提交者: Yihao Wu

alinux: sched: Maintain "nr_uninterruptible" in runqueue

to #26424323

It's relatively easy to maintain nr_uninterruptible in scheduler
compared to doing it in cpuacct, we assume that "cpu,cpuacct" are
bound together, so that it can be used for per-cgroup load.

This will be needed to calculate per-cgroup load average later.
Reviewed-by: NMichael Wang <yun.wang@linux.alibaba.com>
Signed-off-by: NXunlei Pang <xlpang@linux.alibaba.com>
Signed-off-by: NYihao Wu <wuyihao@linux.alibaba.com>
上级 50fb11b1
......@@ -117,6 +117,8 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
extern struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp,
int ssid);
void cgroup_fork(struct task_struct *p);
extern int cgroup_can_fork(struct task_struct *p);
......
......@@ -472,6 +472,12 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
return &cgrp->self;
}
struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp,
int ssid)
{
return cgroup_css(cgrp, cgroup_subsys[(ssid)]);
}
/**
* cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
* @cgrp: the cgroup of interest
......
......@@ -744,18 +744,28 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
p->sched_class->dequeue_task(rq, p, flags);
}
static void update_nr_uninterruptible(struct task_struct *tsk, long inc)
{
if (tsk->sched_class->update_nr_uninterruptible)
tsk->sched_class->update_nr_uninterruptible(tsk, inc);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
if (task_contributes_to_load(p)) {
update_nr_uninterruptible(p, -1);
rq->nr_uninterruptible--;
}
enqueue_task(rq, p, flags);
}
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
if (task_contributes_to_load(p)) {
update_nr_uninterruptible(p, 1);
rq->nr_uninterruptible++;
}
dequeue_task(rq, p, flags);
}
......@@ -1689,8 +1699,10 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
lockdep_assert_held(&rq->lock);
#ifdef CONFIG_SMP
if (p->sched_contributes_to_load)
if (p->sched_contributes_to_load) {
update_nr_uninterruptible(p, -1);
rq->nr_uninterruptible--;
}
if (wake_flags & WF_MIGRATED)
en_flags |= ENQUEUE_MIGRATED;
......@@ -6378,8 +6390,18 @@ void sched_move_task(struct task_struct *tsk)
if (running)
put_prev_task(rq, tsk);
/* decrease old group */
if ((!queued && task_contributes_to_load(tsk)) ||
(tsk->state == TASK_WAKING && tsk->sched_contributes_to_load))
update_nr_uninterruptible(tsk, -1);
sched_change_group(tsk, TASK_MOVE_GROUP);
/* increase new group after change */
if ((!queued && task_contributes_to_load(tsk)) ||
(tsk->state == TASK_WAKING && tsk->sched_contributes_to_load))
update_nr_uninterruptible(tsk, 1);
if (queued)
enqueue_task(rq, tsk, queue_flags);
if (running)
......
......@@ -10233,6 +10233,16 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
return rr_interval;
}
#ifdef CONFIG_SCHED_SLI
static void update_nr_uninterruptible_fair(struct task_struct *p, long inc)
{
struct sched_entity *se = &p->se;
for_each_sched_entity(se)
cfs_rq_of(se)->nr_uninterruptible += inc;
}
#endif
/*
* All the scheduling class methods:
*/
......@@ -10274,6 +10284,10 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_FAIR_GROUP_SCHED
.task_change_group = task_change_group_fair,
#endif
#ifdef CONFIG_SCHED_SLI
.update_nr_uninterruptible = update_nr_uninterruptible_fair,
#endif
};
#ifdef CONFIG_SCHED_DEBUG
......
......@@ -2374,6 +2374,16 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
return 0;
}
#ifdef CONFIG_SCHED_SLI
static void update_nr_uninterruptible_rt(struct task_struct *p, long inc)
{
struct sched_rt_entity *se = &p->rt;
for_each_sched_rt_entity(se)
rt_rq_of_se(se)->nr_uninterruptible += inc;
}
#endif
const struct sched_class rt_sched_class = {
.next = &fair_sched_class,
.enqueue_task = enqueue_task_rt,
......@@ -2404,6 +2414,10 @@ const struct sched_class rt_sched_class = {
.switched_to = switched_to_rt,
.update_curr = update_curr_rt,
#ifdef CONFIG_SCHED_SLI
.update_nr_uninterruptible = update_nr_uninterruptible_rt,
#endif
};
#ifdef CONFIG_RT_GROUP_SCHED
......
......@@ -575,6 +575,8 @@ struct cfs_rq {
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
unsigned long nr_uninterruptible;
ALI_HOTFIX_RESERVE(1)
ALI_HOTFIX_RESERVE(2)
ALI_HOTFIX_RESERVE(3)
......@@ -625,6 +627,8 @@ struct rt_rq {
struct rq *rq;
struct task_group *tg;
#endif
unsigned long nr_uninterruptible;
};
static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
......@@ -1671,6 +1675,8 @@ struct sched_class {
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*task_change_group)(struct task_struct *p, int type);
#endif
void (*update_nr_uninterruptible)(struct task_struct *p, long inc);
};
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册