Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
gsplhtlxg
clone-Linux
提交
a4133765
C
clone-Linux
项目概览
gsplhtlxg
/
clone-Linux
通知
2
Star
0
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
C
clone-Linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
a4133765
编写于
8月 13, 2012
作者:
T
Thomas Gleixner
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'sched/urgent' into sched/core
上级
3bf671af
8f618968
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
70 addition
and
19 deletion
+70
-19
kernel/sched/core.c
kernel/sched/core.c
+21
-14
kernel/sched/fair.c
kernel/sched/fair.c
+9
-2
kernel/sched/rt.c
kernel/sched/rt.c
+13
-0
kernel/sched/sched.h
kernel/sched/sched.h
+6
-2
kernel/sched/stop_task.c
kernel/sched/stop_task.c
+21
-1
未找到文件。
kernel/sched/core.c
浏览文件 @
a4133765
...
...
@@ -3142,6 +3142,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
#endif
static
cputime_t
scale_utime
(
cputime_t
utime
,
cputime_t
rtime
,
cputime_t
total
)
{
u64
temp
=
(
__force
u64
)
rtime
;
temp
*=
(
__force
u64
)
utime
;
if
(
sizeof
(
cputime_t
)
==
4
)
temp
=
div_u64
(
temp
,
(
__force
u32
)
total
);
else
temp
=
div64_u64
(
temp
,
(
__force
u64
)
total
);
return
(
__force
cputime_t
)
temp
;
}
void
task_times
(
struct
task_struct
*
p
,
cputime_t
*
ut
,
cputime_t
*
st
)
{
cputime_t
rtime
,
utime
=
p
->
utime
,
total
=
utime
+
p
->
stime
;
...
...
@@ -3151,13 +3165,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
*/
rtime
=
nsecs_to_cputime
(
p
->
se
.
sum_exec_runtime
);
if
(
total
)
{
u64
temp
=
(
__force
u64
)
rtime
;
temp
*=
(
__force
u64
)
utime
;
do_div
(
temp
,
(
__force
u32
)
total
);
utime
=
(
__force
cputime_t
)
temp
;
}
else
if
(
total
)
utime
=
scale_utime
(
utime
,
rtime
,
total
);
else
utime
=
rtime
;
/*
...
...
@@ -3184,13 +3194,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
total
=
cputime
.
utime
+
cputime
.
stime
;
rtime
=
nsecs_to_cputime
(
cputime
.
sum_exec_runtime
);
if
(
total
)
{
u64
temp
=
(
__force
u64
)
rtime
;
temp
*=
(
__force
u64
)
cputime
.
utime
;
do_div
(
temp
,
(
__force
u32
)
total
);
utime
=
(
__force
cputime_t
)
temp
;
}
else
if
(
total
)
utime
=
scale_utime
(
cputime
.
utime
,
rtime
,
total
);
else
utime
=
rtime
;
sig
->
prev_utime
=
max
(
sig
->
prev_utime
,
utime
);
...
...
@@ -7246,6 +7252,7 @@ int in_sched_functions(unsigned long addr)
#ifdef CONFIG_CGROUP_SCHED
struct
task_group
root_task_group
;
LIST_HEAD
(
task_groups
);
#endif
DECLARE_PER_CPU
(
cpumask_var_t
,
load_balance_tmpmask
);
...
...
kernel/sched/fair.c
浏览文件 @
a4133765
...
...
@@ -3387,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data)
static
void
update_h_load
(
long
cpu
)
{
struct
rq
*
rq
=
cpu_rq
(
cpu
);
unsigned
long
now
=
jiffies
;
if
(
rq
->
h_load_throttle
==
now
)
return
;
rq
->
h_load_throttle
=
now
;
rcu_read_lock
();
walk_tg_tree
(
tg_load_down
,
tg_nop
,
(
void
*
)
cpu
);
rcu_read_unlock
();
...
...
@@ -4293,11 +4301,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
env
.
src_rq
=
busiest
;
env
.
loop_max
=
min
(
sysctl_sched_nr_migrate
,
busiest
->
nr_running
);
update_h_load
(
env
.
src_cpu
);
more_balance:
local_irq_save
(
flags
);
double_rq_lock
(
this_rq
,
busiest
);
if
(
!
env
.
loop
)
update_h_load
(
env
.
src_cpu
);
/*
* cur_ld_moved - load moved in current iteration
...
...
kernel/sched/rt.c
浏览文件 @
a4133765
...
...
@@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
const
struct
cpumask
*
span
;
span
=
sched_rt_period_mask
();
#ifdef CONFIG_RT_GROUP_SCHED
/*
* FIXME: isolated CPUs should really leave the root task group,
* whether they are isolcpus or were isolated via cpusets, lest
* the timer run on a CPU which does not service all runqueues,
* potentially leaving other CPUs indefinitely throttled. If
* isolation is really required, the user will turn the throttle
* off to kill the perturbations it causes anyway. Meanwhile,
* this maintains functionality for boot and/or troubleshooting.
*/
if
(
rt_b
==
&
root_task_group
.
rt_bandwidth
)
span
=
cpu_online_mask
;
#endif
for_each_cpu
(
i
,
span
)
{
int
enqueue
=
0
;
struct
rt_rq
*
rt_rq
=
sched_rt_period_rt_rq
(
rt_b
,
i
);
...
...
kernel/sched/sched.h
浏览文件 @
a4133765
...
...
@@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex;
struct
cfs_rq
;
struct
rt_rq
;
static
LIST_HEAD
(
task_groups
)
;
extern
struct
list_head
task_groups
;
struct
cfs_bandwidth
{
#ifdef CONFIG_CFS_BANDWIDTH
...
...
@@ -374,7 +374,11 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct
list_head
leaf_cfs_rq_list
;
#endif
#ifdef CONFIG_SMP
unsigned
long
h_load_throttle
;
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
struct
list_head
leaf_rt_rq_list
;
#endif
...
...
kernel/sched/stop_task.c
浏览文件 @
a4133765
...
...
@@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
{
struct
task_struct
*
stop
=
rq
->
stop
;
if
(
stop
&&
stop
->
on_rq
)
if
(
stop
&&
stop
->
on_rq
)
{
stop
->
se
.
exec_start
=
rq
->
clock_task
;
return
stop
;
}
return
NULL
;
}
...
...
@@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq)
static
void
put_prev_task_stop
(
struct
rq
*
rq
,
struct
task_struct
*
prev
)
{
struct
task_struct
*
curr
=
rq
->
curr
;
u64
delta_exec
;
delta_exec
=
rq
->
clock_task
-
curr
->
se
.
exec_start
;
if
(
unlikely
((
s64
)
delta_exec
<
0
))
delta_exec
=
0
;
schedstat_set
(
curr
->
se
.
statistics
.
exec_max
,
max
(
curr
->
se
.
statistics
.
exec_max
,
delta_exec
));
curr
->
se
.
sum_exec_runtime
+=
delta_exec
;
account_group_exec_runtime
(
curr
,
delta_exec
);
curr
->
se
.
exec_start
=
rq
->
clock_task
;
cpuacct_charge
(
curr
,
delta_exec
);
}
static
void
task_tick_stop
(
struct
rq
*
rq
,
struct
task_struct
*
curr
,
int
queued
)
...
...
@@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
static
void
set_curr_task_stop
(
struct
rq
*
rq
)
{
struct
task_struct
*
stop
=
rq
->
stop
;
stop
->
se
.
exec_start
=
rq
->
clock_task
;
}
static
void
switched_to_stop
(
struct
rq
*
rq
,
struct
task_struct
*
p
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录