Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
e56bd0b6
K
Kernel
项目概览
openeuler
/
Kernel
大约 2 年 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
e56bd0b6
编写于
5月 07, 2021
作者:
C
Cheng Jian
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Revert "sched: Introduce qos scheduler for co-location"
This reverts commit
8abded2a
.
上级
d7315323
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
0 addition
and
104 deletion
+0
-104
init/Kconfig
init/Kconfig
+0
-5
kernel/sched/core.c
kernel/sched/core.c
+0
-95
kernel/sched/sched.h
kernel/sched/sched.h
+0
-4
未找到文件。
init/Kconfig
浏览文件 @
e56bd0b6
...
@@ -777,11 +777,6 @@ menuconfig CGROUP_SCHED
...
@@ -777,11 +777,6 @@ menuconfig CGROUP_SCHED
tasks.
tasks.
if CGROUP_SCHED
if CGROUP_SCHED
config QOS_SCHED
bool "Qos task scheduling"
depends on CGROUP_SCHED
default n
config FAIR_GROUP_SCHED
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
depends on CGROUP_SCHED
...
...
kernel/sched/core.c
浏览文件 @
e56bd0b6
...
@@ -6367,15 +6367,6 @@ void ia64_set_curr_task(int cpu, struct task_struct *p)
...
@@ -6367,15 +6367,6 @@ void ia64_set_curr_task(int cpu, struct task_struct *p)
/* task_group_lock serializes the addition/removal of task groups */
/* task_group_lock serializes the addition/removal of task groups */
static
DEFINE_SPINLOCK
(
task_group_lock
);
static
DEFINE_SPINLOCK
(
task_group_lock
);
#ifdef CONFIG_QOS_SCHED
static
int
alloc_qos_sched_group
(
struct
task_group
*
tg
,
struct
task_group
*
parent
)
{
tg
->
qos_level
=
parent
->
qos_level
;
return
1
;
}
#endif
static
void
sched_free_group
(
struct
task_group
*
tg
)
static
void
sched_free_group
(
struct
task_group
*
tg
)
{
{
free_fair_sched_group
(
tg
);
free_fair_sched_group
(
tg
);
...
@@ -6396,11 +6387,6 @@ struct task_group *sched_create_group(struct task_group *parent)
...
@@ -6396,11 +6387,6 @@ struct task_group *sched_create_group(struct task_group *parent)
if
(
!
alloc_fair_sched_group
(
tg
,
parent
))
if
(
!
alloc_fair_sched_group
(
tg
,
parent
))
goto
err
;
goto
err
;
#ifdef CONFIG_QOS_SCHED
if
(
!
alloc_qos_sched_group
(
tg
,
parent
))
goto
err
;
#endif
if
(
!
alloc_rt_sched_group
(
tg
,
parent
))
if
(
!
alloc_rt_sched_group
(
tg
,
parent
))
goto
err
;
goto
err
;
...
@@ -6469,30 +6455,6 @@ static void sched_change_group(struct task_struct *tsk, int type)
...
@@ -6469,30 +6455,6 @@ static void sched_change_group(struct task_struct *tsk, int type)
tg
=
autogroup_task_group
(
tsk
,
tg
);
tg
=
autogroup_task_group
(
tsk
,
tg
);
tsk
->
sched_task_group
=
tg
;
tsk
->
sched_task_group
=
tg
;
#ifdef CONFIG_QOS_SCHED
/*
* No need to re-setcheduler when a task is exiting or the task
* is in an autogroup.
*/
if
(
!
rt_task
(
tsk
)
&&
!
(
tsk
->
flags
&
PF_EXITING
)
&&
!
task_group_is_autogroup
(
tg
))
{
struct
rq
*
rq
=
task_rq
(
tsk
);
struct
sched_attr
attr
=
{
.
sched_priority
=
0
,
};
if
(
tg
->
qos_level
==
-
1
)
{
attr
.
sched_policy
=
SCHED_IDLE
;
}
else
{
attr
.
sched_policy
=
SCHED_NORMAL
;
}
attr
.
sched_nice
=
PRIO_TO_NICE
(
tsk
->
static_prio
);
__setscheduler
(
rq
,
tsk
,
&
attr
,
0
);
}
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
if
(
tsk
->
sched_class
->
task_change_group
)
if
(
tsk
->
sched_class
->
task_change_group
)
tsk
->
sched_class
->
task_change_group
(
tsk
,
type
);
tsk
->
sched_class
->
task_change_group
(
tsk
,
type
);
...
@@ -6953,56 +6915,6 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
...
@@ -6953,56 +6915,6 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
}
}
#endif
/* CONFIG_RT_GROUP_SCHED */
#endif
/* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_QOS_SCHED
static
int
cpu_qos_write
(
struct
cgroup_subsys_state
*
css
,
struct
cftype
*
cftype
,
s64
qos_level
)
{
struct
css_task_iter
it
;
struct
task_struct
*
tsk
;
struct
task_group
*
tg
;
struct
sched_param
param
;
int
pid
,
policy
;
tg
=
css_tg
(
css
);
if
(
!
tg
->
se
[
0
])
return
-
EINVAL
;
if
(
qos_level
!=
-
1
&&
qos_level
!=
0
)
return
-
EINVAL
;
if
(
tg
->
qos_level
==
qos_level
)
goto
done
;
if
(
qos_level
==
-
1
)
{
policy
=
SCHED_IDLE
;
}
else
{
policy
=
SCHED_NORMAL
;
}
tg
->
qos_level
=
qos_level
;
param
.
sched_priority
=
0
;
css_task_iter_start
(
css
,
0
,
&
it
);
while
((
tsk
=
css_task_iter_next
(
&
it
)))
{
pid
=
task_tgid_vnr
(
tsk
);
if
(
pid
>
0
&&
!
rt_task
(
tsk
))
{
sched_setscheduler
(
tsk
,
policy
,
&
param
);
}
}
css_task_iter_end
(
&
it
);
done:
return
0
;
}
static
s64
cpu_qos_read
(
struct
cgroup_subsys_state
*
css
,
struct
cftype
*
cft
)
{
struct
task_group
*
tg
=
css_tg
(
css
);
return
tg
->
qos_level
;
}
#endif
/* CONFIG_QOS_SCHED */
static
struct
cftype
cpu_legacy_files
[]
=
{
static
struct
cftype
cpu_legacy_files
[]
=
{
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
{
{
...
@@ -7038,13 +6950,6 @@ static struct cftype cpu_legacy_files[] = {
...
@@ -7038,13 +6950,6 @@ static struct cftype cpu_legacy_files[] = {
.
read_u64
=
cpu_rt_period_read_uint
,
.
read_u64
=
cpu_rt_period_read_uint
,
.
write_u64
=
cpu_rt_period_write_uint
,
.
write_u64
=
cpu_rt_period_write_uint
,
},
},
#endif
#ifdef CONFIG_QOS_SCHED
{
.
name
=
"qos_level"
,
.
read_s64
=
cpu_qos_read
,
.
write_s64
=
cpu_qos_write
,
},
#endif
#endif
{
}
/* Terminate */
{
}
/* Terminate */
};
};
...
...
kernel/sched/sched.h
浏览文件 @
e56bd0b6
...
@@ -400,11 +400,7 @@ struct task_group {
...
@@ -400,11 +400,7 @@ struct task_group {
struct
cfs_bandwidth
cfs_bandwidth
;
struct
cfs_bandwidth
cfs_bandwidth
;
#if defined(CONFIG_QOS_SCHED) && !defined(__GENKSYMS__)
long
qos_level
;
#else
KABI_RESERVE
(
1
)
KABI_RESERVE
(
1
)
#endif
KABI_RESERVE
(
2
)
KABI_RESERVE
(
2
)
};
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录