Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
59e1678c
K
Kernel
项目概览
openeuler
/
Kernel
接近 2 年 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
59e1678c
编写于
11月 12, 2018
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'sched/urgent' into sched/core, to pick up dependent fixes
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
ff1cdc94
c469933e
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
50 addition
and
16 deletion
+50
-16
kernel/sched/fair.c
kernel/sched/fair.c
+50
-16
未找到文件。
kernel/sched/fair.c
浏览文件 @
59e1678c
...
...
@@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
local
=
1
;
/*
* Retry t
ask to preferred node migration
periodically, in case it
*
case it
previously failed, or the scheduler moved us.
* Retry t
o migrate task to preferred node
periodically, in case it
* previously failed, or the scheduler moved us.
*/
if
(
time_after
(
jiffies
,
p
->
numa_migrate_retry
))
{
task_numa_placement
(
p
);
...
...
@@ -5674,11 +5674,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
return
target
;
}
static
unsigned
long
cpu_util_w
ake
(
int
cpu
,
struct
task_struct
*
p
);
static
unsigned
long
cpu_util_w
ithout
(
int
cpu
,
struct
task_struct
*
p
);
static
unsigned
long
capacity_spare_w
ake
(
int
cpu
,
struct
task_struct
*
p
)
static
unsigned
long
capacity_spare_w
ithout
(
int
cpu
,
struct
task_struct
*
p
)
{
return
max_t
(
long
,
capacity_of
(
cpu
)
-
cpu_util_w
ake
(
cpu
,
p
),
0
);
return
max_t
(
long
,
capacity_of
(
cpu
)
-
cpu_util_w
ithout
(
cpu
,
p
),
0
);
}
/*
...
...
@@ -5738,7 +5738,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
avg_load
+=
cfs_rq_load_avg
(
&
cpu_rq
(
i
)
->
cfs
);
spare_cap
=
capacity_spare_w
ake
(
i
,
p
);
spare_cap
=
capacity_spare_w
ithout
(
i
,
p
);
if
(
spare_cap
>
max_spare_cap
)
max_spare_cap
=
spare_cap
;
...
...
@@ -5889,8 +5889,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
return
prev_cpu
;
/*
* We need task's util for capacity_spare_w
ake, sync it up to prev_cpu's
* last_update_time.
* We need task's util for capacity_spare_w
ithout, sync it up to
*
prev_cpu's
last_update_time.
*/
if
(
!
(
sd_flag
&
SD_BALANCE_FORK
))
sync_entity_load_avg
(
&
p
->
se
);
...
...
@@ -6216,10 +6216,19 @@ static inline unsigned long cpu_util(int cpu)
}
/*
* cpu_util_wake: Compute CPU utilization with any contributions from
* the waking task p removed.
* cpu_util_without: compute cpu utilization without any contributions from *p
* @cpu: the CPU which utilization is requested
* @p: the task which utilization should be discounted
*
* The utilization of a CPU is defined by the utilization of tasks currently
* enqueued on that CPU as well as tasks which are currently sleeping after an
* execution on that CPU.
*
* This method returns the utilization of the specified CPU by discounting the
* utilization of the specified task, whenever the task is currently
* contributing to the CPU utilization.
*/
static
unsigned
long
cpu_util_w
ake
(
int
cpu
,
struct
task_struct
*
p
)
static
unsigned
long
cpu_util_w
ithout
(
int
cpu
,
struct
task_struct
*
p
)
{
struct
cfs_rq
*
cfs_rq
;
unsigned
int
util
;
...
...
@@ -6231,7 +6240,7 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
cfs_rq
=
&
cpu_rq
(
cpu
)
->
cfs
;
util
=
READ_ONCE
(
cfs_rq
->
avg
.
util_avg
);
/* Discount task's
blocked
util from CPU's util */
/* Discount task's util from CPU's util */
util
-=
min_t
(
unsigned
int
,
util
,
task_util
(
p
));
/*
...
...
@@ -6240,14 +6249,14 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
* a) if *p is the only task sleeping on this CPU, then:
* cpu_util (== task_util) > util_est (== 0)
* and thus we return:
* cpu_util_w
ake
= (cpu_util - task_util) = 0
* cpu_util_w
ithout
= (cpu_util - task_util) = 0
*
* b) if other tasks are SLEEPING on this CPU, which is now exiting
* IDLE, then:
* cpu_util >= task_util
* cpu_util > util_est (== 0)
* and thus we discount *p's blocked utilization to return:
* cpu_util_w
ake
= (cpu_util - task_util) >= 0
* cpu_util_w
ithout
= (cpu_util - task_util) >= 0
*
* c) if other tasks are RUNNABLE on that CPU and
* util_est > cpu_util
...
...
@@ -6260,8 +6269,33 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
* covered by the following code when estimated utilization is
* enabled.
*/
if
(
sched_feat
(
UTIL_EST
))
util
=
max
(
util
,
READ_ONCE
(
cfs_rq
->
avg
.
util_est
.
enqueued
));
if
(
sched_feat
(
UTIL_EST
))
{
unsigned
int
estimated
=
READ_ONCE
(
cfs_rq
->
avg
.
util_est
.
enqueued
);
/*
* Despite the following checks we still have a small window
* for a possible race, when an execl's select_task_rq_fair()
* races with LB's detach_task():
*
* detach_task()
* p->on_rq = TASK_ON_RQ_MIGRATING;
* ---------------------------------- A
* deactivate_task() \
* dequeue_task() + RaceTime
* util_est_dequeue() /
* ---------------------------------- B
*
* The additional check on "current == p" it's required to
* properly fix the execl regression and it helps in further
* reducing the chances for the above race.
*/
if
(
unlikely
(
task_on_rq_queued
(
p
)
||
current
==
p
))
{
estimated
-=
min_t
(
unsigned
int
,
estimated
,
(
_task_util_est
(
p
)
|
UTIL_AVG_UNCHANGED
));
}
util
=
max
(
util
,
estimated
);
}
/*
* Utilization (estimated) can exceed the CPU capacity, thus let's
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录