Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
1cdad715
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 3 年多
通知
13
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
1cdad715
编写于
6月 19, 2008
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'sched' into sched-devel
Conflicts: kernel/sched_rt.c Signed-off-by:
N
Ingo Molnar
<
mingo@elte.hu
>
上级
20b6331b
15a8641e
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
40 addition
and
30 deletion
+40
-30
kernel/sched.c
kernel/sched.c
+1
-2
kernel/sched_rt.c
kernel/sched_rt.c
+39
-28
未找到文件。
kernel/sched.c
浏览文件 @
1cdad715
...
...
@@ -7697,7 +7697,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
else
rt_se
->
rt_rq
=
parent
->
my_q
;
rt_se
->
rt_rq
=
&
rq
->
rt
;
rt_se
->
my_q
=
rt_rq
;
rt_se
->
parent
=
parent
;
INIT_LIST_HEAD
(
&
rt_se
->
run_list
);
...
...
@@ -8420,7 +8419,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_CGROUP_SCHED
static
int
__rt_schedulable
(
struct
task_group
*
tg
,
u64
period
,
u64
runtime
)
{
struct
task_group
*
tgi
,
*
parent
=
tg
->
parent
;
struct
task_group
*
tgi
,
*
parent
=
tg
?
tg
->
parent
:
NULL
;
unsigned
long
total
=
0
;
if
(
!
parent
)
{
...
...
kernel/sched_rt.c
浏览文件 @
1cdad715
...
...
@@ -571,14 +571,20 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
#endif
}
static
void
enqueue_rt_entity
(
struct
sched_rt_entity
*
rt_se
)
static
void
__
enqueue_rt_entity
(
struct
sched_rt_entity
*
rt_se
)
{
struct
rt_rq
*
rt_rq
=
rt_rq_of_se
(
rt_se
);
struct
rt_prio_array
*
array
=
&
rt_rq
->
active
;
struct
rt_rq
*
group_rq
=
group_rt_rq
(
rt_se
);
struct
list_head
*
queue
=
array
->
queue
+
rt_se_prio
(
rt_se
);
if
(
group_rq
&&
rt_rq_throttled
(
group_rq
))
/*
* Don't enqueue the group if its throttled, or when empty.
* The latter is a consequence of the former when a child group
* get throttled and the current group doesn't have any other
* active members.
*/
if
(
group_rq
&&
(
rt_rq_throttled
(
group_rq
)
||
!
group_rq
->
rt_nr_running
))
return
;
if
(
rt_se
->
nr_cpus_allowed
==
1
)
...
...
@@ -591,7 +597,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
inc_rt_tasks
(
rt_se
,
rt_rq
);
}
static
void
dequeue_rt_entity
(
struct
sched_rt_entity
*
rt_se
)
static
void
__
dequeue_rt_entity
(
struct
sched_rt_entity
*
rt_se
)
{
struct
rt_rq
*
rt_rq
=
rt_rq_of_se
(
rt_se
);
struct
rt_prio_array
*
array
=
&
rt_rq
->
active
;
...
...
@@ -607,11 +613,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
*/
static
void
dequeue_rt_stack
(
struct
task_struct
*
p
)
static
void
dequeue_rt_stack
(
struct
sched_rt_entity
*
rt_se
)
{
struct
sched_rt_entity
*
rt_se
,
*
back
=
NULL
;
struct
sched_rt_entity
*
back
=
NULL
;
rt_se
=
&
p
->
rt
;
for_each_sched_rt_entity
(
rt_se
)
{
rt_se
->
back
=
back
;
back
=
rt_se
;
...
...
@@ -619,7 +624,26 @@ static void dequeue_rt_stack(struct task_struct *p)
for
(
rt_se
=
back
;
rt_se
;
rt_se
=
rt_se
->
back
)
{
if
(
on_rt_rq
(
rt_se
))
dequeue_rt_entity
(
rt_se
);
__dequeue_rt_entity
(
rt_se
);
}
}
static
void
enqueue_rt_entity
(
struct
sched_rt_entity
*
rt_se
)
{
dequeue_rt_stack
(
rt_se
);
for_each_sched_rt_entity
(
rt_se
)
__enqueue_rt_entity
(
rt_se
);
}
static
void
dequeue_rt_entity
(
struct
sched_rt_entity
*
rt_se
)
{
dequeue_rt_stack
(
rt_se
);
for_each_sched_rt_entity
(
rt_se
)
{
struct
rt_rq
*
rt_rq
=
group_rt_rq
(
rt_se
);
if
(
rt_rq
&&
rt_rq
->
rt_nr_running
)
__enqueue_rt_entity
(
rt_se
);
}
}
...
...
@@ -633,32 +657,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
if
(
wakeup
)
rt_se
->
timeout
=
0
;
dequeue_rt_stack
(
p
);
/*
* enqueue everybody, bottom - up.
*/
for_each_sched_rt_entity
(
rt_se
)
enqueue_rt_entity
(
rt_se
);
enqueue_rt_entity
(
rt_se
);
}
static
void
dequeue_task_rt
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
sleep
)
{
struct
sched_rt_entity
*
rt_se
=
&
p
->
rt
;
struct
rt_rq
*
rt_rq
;
update_curr_rt
(
rq
);
dequeue_rt_stack
(
p
);
/*
* re-enqueue all non-empty rt_rq entities.
*/
for_each_sched_rt_entity
(
rt_se
)
{
rt_rq
=
group_rt_rq
(
rt_se
);
if
(
rt_rq
&&
rt_rq
->
rt_nr_running
)
enqueue_rt_entity
(
rt_se
);
}
dequeue_rt_entity
(
rt_se
);
}
/*
...
...
@@ -669,9 +676,13 @@ static
void
requeue_rt_entity
(
struct
rt_rq
*
rt_rq
,
struct
sched_rt_entity
*
rt_se
)
{
struct
rt_prio_array
*
array
=
&
rt_rq
->
active
;
struct
list_head
*
queue
=
array
->
queue
+
rt_se_prio
(
rt_se
);
list_del_init
(
&
rt_se
->
run_list
);
list_add_tail
(
&
rt_se
->
run_list
,
array
->
queue
+
rt_se_prio
(
rt_se
));
if
(
on_rt_rq
(
rt_se
))
{
list_del_init
(
&
rt_se
->
run_list
);
list_add_tail
(
&
rt_se
->
run_list
,
array
->
queue
+
rt_se_prio
(
rt_se
));
}
}
static
void
requeue_task_rt
(
struct
rq
*
rq
,
struct
task_struct
*
p
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录