Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
d7315323
K
Kernel
项目概览
openeuler
/
Kernel
大约 2 年 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
d7315323
编写于
5月 07, 2021
作者:
C
Cheng Jian
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Revert "sched: Throttle qos cfs_rq when current cpu is running online task"
This reverts commit
a10c09df
.
上级
1061396b
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
0 addition
and
148 deletion
+0
-148
kernel/sched/core.c
kernel/sched/core.c
+0
-2
kernel/sched/fair.c
kernel/sched/fair.c
+0
-146
未找到文件。
kernel/sched/core.c
浏览文件 @
d7315323
...
@@ -6975,10 +6975,8 @@ static int cpu_qos_write(struct cgroup_subsys_state *css,
...
@@ -6975,10 +6975,8 @@ static int cpu_qos_write(struct cgroup_subsys_state *css,
if
(
qos_level
==
-
1
)
{
if
(
qos_level
==
-
1
)
{
policy
=
SCHED_IDLE
;
policy
=
SCHED_IDLE
;
cfs_bandwidth_usage_inc
();
}
else
{
}
else
{
policy
=
SCHED_NORMAL
;
policy
=
SCHED_NORMAL
;
cfs_bandwidth_usage_dec
();
}
}
tg
->
qos_level
=
qos_level
;
tg
->
qos_level
=
qos_level
;
...
...
kernel/sched/fair.c
浏览文件 @
d7315323
...
@@ -99,10 +99,6 @@ int __weak arch_asym_cpu_priority(int cpu)
...
@@ -99,10 +99,6 @@ int __weak arch_asym_cpu_priority(int cpu)
}
}
#endif
#endif
#ifdef CONFIG_QOS_SCHED
static
DEFINE_PER_CPU_SHARED_ALIGNED
(
struct
list_head
,
qos_throttled_cfs_rq
);
#endif
#ifdef CONFIG_CFS_BANDWIDTH
#ifdef CONFIG_CFS_BANDWIDTH
/*
/*
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
...
@@ -6873,128 +6869,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
...
@@ -6873,128 +6869,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_last_buddy
(
se
);
set_last_buddy
(
se
);
}
}
#ifdef CONFIG_QOS_SCHED
static
void
throttle_qos_cfs_rq
(
struct
cfs_rq
*
cfs_rq
)
{
struct
rq
*
rq
=
rq_of
(
cfs_rq
);
struct
sched_entity
*
se
;
long
task_delta
,
idle_task_delta
,
dequeue
=
1
;
se
=
cfs_rq
->
tg
->
se
[
cpu_of
(
rq_of
(
cfs_rq
))];
/* freeze hierarchy runnable averages while throttled */
rcu_read_lock
();
walk_tg_tree_from
(
cfs_rq
->
tg
,
tg_throttle_down
,
tg_nop
,
(
void
*
)
rq
);
rcu_read_unlock
();
task_delta
=
cfs_rq
->
h_nr_running
;
idle_task_delta
=
cfs_rq
->
idle_h_nr_running
;
for_each_sched_entity
(
se
)
{
struct
cfs_rq
*
qcfs_rq
=
cfs_rq_of
(
se
);
/* throttled entity or throttle-on-deactivate */
if
(
!
se
->
on_rq
)
break
;
if
(
dequeue
)
dequeue_entity
(
qcfs_rq
,
se
,
DEQUEUE_SLEEP
);
qcfs_rq
->
h_nr_running
-=
task_delta
;
qcfs_rq
->
idle_h_nr_running
-=
idle_task_delta
;
if
(
qcfs_rq
->
load
.
weight
)
dequeue
=
0
;
}
if
(
!
se
)
{
sub_nr_running
(
rq
,
task_delta
);
}
cfs_rq
->
throttled
=
1
;
cfs_rq
->
throttled_clock
=
rq_clock
(
rq
);
list_add
(
&
cfs_rq
->
throttled_list
,
&
per_cpu
(
qos_throttled_cfs_rq
,
cpu_of
(
rq
)));
}
static
void
unthrottle_qos_cfs_rq
(
struct
cfs_rq
*
cfs_rq
)
{
struct
rq
*
rq
=
rq_of
(
cfs_rq
);
struct
cfs_bandwidth
*
cfs_b
=
tg_cfs_bandwidth
(
cfs_rq
->
tg
);
struct
sched_entity
*
se
;
int
enqueue
=
1
;
long
task_delta
,
idle_task_delta
;
se
=
cfs_rq
->
tg
->
se
[
cpu_of
(
rq
)];
cfs_rq
->
throttled
=
0
;
update_rq_clock
(
rq
);
cfs_b
->
throttled_time
+=
rq_clock
(
rq
)
-
cfs_rq
->
throttled_clock
;
list_del_init
(
&
cfs_rq
->
throttled_list
);
/* update hierarchical throttle state */
walk_tg_tree_from
(
cfs_rq
->
tg
,
tg_nop
,
tg_unthrottle_up
,
(
void
*
)
rq
);
if
(
!
cfs_rq
->
load
.
weight
)
return
;
task_delta
=
cfs_rq
->
h_nr_running
;
idle_task_delta
=
cfs_rq
->
idle_h_nr_running
;
for_each_sched_entity
(
se
)
{
if
(
se
->
on_rq
)
enqueue
=
0
;
cfs_rq
=
cfs_rq_of
(
se
);
if
(
enqueue
)
enqueue_entity
(
cfs_rq
,
se
,
ENQUEUE_WAKEUP
);
cfs_rq
->
h_nr_running
+=
task_delta
;
cfs_rq
->
idle_h_nr_running
+=
idle_task_delta
;
if
(
cfs_rq_throttled
(
cfs_rq
))
break
;
}
assert_list_leaf_cfs_rq
(
rq
);
if
(
!
se
)
{
add_nr_running
(
rq
,
task_delta
);
}
/* Determine whether we need to wake up potentially idle CPU: */
if
(
rq
->
curr
==
rq
->
idle
&&
rq
->
cfs
.
nr_running
)
resched_curr
(
rq
);
}
static
int
unthrottle_qos_cfs_rqs
(
int
cpu
)
{
struct
cfs_rq
*
cfs_rq
,
*
tmp_rq
;
int
res
=
0
;
list_for_each_entry_safe
(
cfs_rq
,
tmp_rq
,
&
per_cpu
(
qos_throttled_cfs_rq
,
cpu
),
throttled_list
)
{
if
(
cfs_rq_throttled
(
cfs_rq
))
{
unthrottle_qos_cfs_rq
(
cfs_rq
);
res
++
;
}
}
return
res
;
}
static
bool
check_qos_cfs_rq
(
struct
cfs_rq
*
cfs_rq
)
{
if
(
!
cfs_bandwidth_used
())
return
false
;
if
(
cfs_rq
&&
cfs_rq
->
tg
->
qos_level
<
0
&&
!
sched_idle_cpu
(
cpu_of
(
rq_of
(
cfs_rq
))))
{
throttle_qos_cfs_rq
(
cfs_rq
);
return
true
;
}
return
false
;
}
#endif
static
struct
task_struct
*
static
struct
task_struct
*
pick_next_task_fair
(
struct
rq
*
rq
,
struct
task_struct
*
prev
,
struct
rq_flags
*
rf
)
pick_next_task_fair
(
struct
rq
*
rq
,
struct
task_struct
*
prev
,
struct
rq_flags
*
rf
)
{
{
...
@@ -7053,12 +6927,6 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
...
@@ -7053,12 +6927,6 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
se
=
pick_next_entity
(
cfs_rq
,
curr
);
se
=
pick_next_entity
(
cfs_rq
,
curr
);
cfs_rq
=
group_cfs_rq
(
se
);
cfs_rq
=
group_cfs_rq
(
se
);
#ifdef CONFIG_QOS_SCHED
if
(
check_qos_cfs_rq
(
cfs_rq
))
{
cfs_rq
=
&
rq
->
cfs
;
BUG_ON
(
cfs_rq
->
nr_running
==
0
);
}
#endif
}
while
(
cfs_rq
);
}
while
(
cfs_rq
);
p
=
task_of
(
se
);
p
=
task_of
(
se
);
...
@@ -7148,12 +7016,6 @@ done: __maybe_unused;
...
@@ -7148,12 +7016,6 @@ done: __maybe_unused;
if
(
new_tasks
>
0
)
if
(
new_tasks
>
0
)
goto
again
;
goto
again
;
#ifdef CONFIG_QOS_SCHED
if
(
unthrottle_qos_cfs_rqs
(
cpu_of
(
rq
)))
{
goto
again
;
}
#endif
return
NULL
;
return
NULL
;
}
}
...
@@ -10821,14 +10683,6 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
...
@@ -10821,14 +10683,6 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
__init
void
init_sched_fair_class
(
void
)
__init
void
init_sched_fair_class
(
void
)
{
{
#ifdef CONFIG_QOS_SCHED
int
i
;
for_each_possible_cpu
(
i
)
{
INIT_LIST_HEAD
(
&
per_cpu
(
qos_throttled_cfs_rq
,
i
));
}
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
open_softirq
(
SCHED_SOFTIRQ
,
run_rebalance_domains
);
open_softirq
(
SCHED_SOFTIRQ
,
run_rebalance_domains
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录