提交 bf8d90ce 编写于 作者: Z Zheng Zucheng 提交者: Zheng Zengkai

sched: Fix offline task can't be killed in a timely

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZX4D
CVE: NA

--------------------------------

If online tasks occupy 100% CPU resources, offline tasks can't be scheduled
since offline tasks are throttled, as a result, offline task can't timely
respond after receiving SIGKILL signal.
Signed-off-by: NZheng Zucheng <zhengzucheng@huawei.com>
Reviewed-by: NChen Hui <judy.chenhui@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 2686c74f
......@@ -959,4 +959,8 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
#ifdef CONFIG_QOS_SCHED
void cgroup_move_task_to_root(struct task_struct *tsk);
#endif
#endif /* _LINUX_CGROUP_H */
......@@ -2091,4 +2091,8 @@ int sched_trace_rq_nr_running(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
#ifdef CONFIG_QOS_SCHED
void sched_move_offline_task(struct task_struct *p);
#endif
#endif
......@@ -2842,6 +2842,28 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
ss->post_attach();
}
#ifdef CONFIG_QOS_SCHED
void cgroup_move_task_to_root(struct task_struct *tsk)
{
struct css_set *css;
struct cgroup *cpu_cgrp;
struct cgroup *cpu_root_cgrp;
mutex_lock(&cgroup_mutex);
percpu_down_write(&cgroup_threadgroup_rwsem);
spin_lock_irq(&css_set_lock);
css = task_css_set(tsk);
cpu_cgrp = css->subsys[cpu_cgrp_id]->cgroup;
cpu_root_cgrp = &cpu_cgrp->root->cgrp;
spin_unlock_irq(&css_set_lock);
(void)cgroup_attach_task(cpu_root_cgrp, tsk, false);
percpu_up_write(&cgroup_threadgroup_rwsem);
mutex_unlock(&cgroup_mutex);
}
#endif
static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
{
struct cgroup_subsys *ss;
......
......@@ -7451,6 +7451,38 @@ static void sched_change_qos_group(struct task_struct *tsk, struct task_group *t
__setscheduler(rq, tsk, &attr, 0);
}
}
struct offline_args {
struct work_struct work;
struct task_struct *p;
};
static void sched_move_work(struct work_struct *work)
{
struct sched_param param = { .sched_priority = 0 };
struct offline_args *args = container_of(work, struct offline_args, work);
cgroup_move_task_to_root(args->p);
sched_setscheduler(args->p, SCHED_NORMAL, &param);
put_task_struct(args->p);
kfree(args);
}
void sched_move_offline_task(struct task_struct *p)
{
struct offline_args *args;
if (unlikely(task_group(p)->qos_level != -1))
return;
args = kmalloc(sizeof(struct offline_args), GFP_ATOMIC);
if (args) {
get_task_struct(p);
args->p = p;
INIT_WORK(&args->work, sched_move_work);
queue_work(system_highpri_wq, &args->work);
}
}
#endif
static inline void alloc_uclamp_sched_group(struct task_group *tg,
......
......@@ -1047,6 +1047,9 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
signal->group_stop_count = 0;
t = p;
do {
#ifdef CONFIG_QOS_SCHED
sched_move_offline_task(t);
#endif
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
新手
引导
客服 返回
顶部