Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
8075b542
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8075b542
编写于
12月 03, 2015
作者:
T
Tejun Heo
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'for-4.4-fixes' into for-4.5
上级
d2b43658
67cde9c4
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
169 addition
and
161 deletion
+169
-161
block/blk-cgroup.c
block/blk-cgroup.c
+3
-3
include/linux/cgroup-defs.h
include/linux/cgroup-defs.h
+3
-6
include/linux/cgroup.h
include/linux/cgroup.h
+22
-11
kernel/cgroup.c
kernel/cgroup.c
+44
-13
kernel/cgroup_freezer.c
kernel/cgroup_freezer.c
+10
-13
kernel/cgroup_pids.c
kernel/cgroup_pids.c
+20
-57
kernel/cpuset.c
kernel/cpuset.c
+21
-12
kernel/events/core.c
kernel/events/core.c
+3
-3
kernel/fork.c
kernel/fork.c
+3
-6
kernel/sched/core.c
kernel/sched/core.c
+6
-6
mm/memcontrol.c
mm/memcontrol.c
+23
-22
net/core/netclassid_cgroup.c
net/core/netclassid_cgroup.c
+6
-5
net/core/netprio_cgroup.c
net/core/netprio_cgroup.c
+5
-4
未找到文件。
block/blk-cgroup.c
浏览文件 @
8075b542
...
...
@@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
static
int
blkcg_can_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
int
blkcg_can_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
task_struct
*
task
;
struct
cgroup_subsys_state
*
dst_css
;
struct
io_context
*
ioc
;
int
ret
=
0
;
/* task_lock() is needed to avoid races with exit_io_context() */
cgroup_taskset_for_each
(
task
,
tset
)
{
cgroup_taskset_for_each
(
task
,
dst_css
,
tset
)
{
task_lock
(
task
);
ioc
=
task
->
io_context
;
if
(
ioc
&&
atomic_read
(
&
ioc
->
nr_tasks
)
>
1
)
...
...
include/linux/cgroup-defs.h
浏览文件 @
8075b542
...
...
@@ -421,12 +421,9 @@ struct cgroup_subsys {
void
(
*
css_reset
)(
struct
cgroup_subsys_state
*
css
);
void
(
*
css_e_css_changed
)(
struct
cgroup_subsys_state
*
css
);
int
(
*
can_attach
)(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
);
void
(
*
cancel_attach
)(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
);
void
(
*
attach
)(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
);
int
(
*
can_attach
)(
struct
cgroup_taskset
*
tset
);
void
(
*
cancel_attach
)(
struct
cgroup_taskset
*
tset
);
void
(
*
attach
)(
struct
cgroup_taskset
*
tset
);
int
(
*
can_fork
)(
struct
task_struct
*
task
,
void
**
priv_p
);
void
(
*
cancel_fork
)(
struct
task_struct
*
task
,
void
*
priv
);
void
(
*
fork
)(
struct
task_struct
*
task
,
void
*
priv
);
...
...
include/linux/cgroup.h
浏览文件 @
8075b542
...
...
@@ -120,8 +120,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state
struct
cgroup_subsys_state
*
css_next_descendant_post
(
struct
cgroup_subsys_state
*
pos
,
struct
cgroup_subsys_state
*
css
);
struct
task_struct
*
cgroup_taskset_first
(
struct
cgroup_taskset
*
tset
);
struct
task_struct
*
cgroup_taskset_next
(
struct
cgroup_taskset
*
tset
);
struct
task_struct
*
cgroup_taskset_first
(
struct
cgroup_taskset
*
tset
,
struct
cgroup_subsys_state
**
dst_cssp
);
struct
task_struct
*
cgroup_taskset_next
(
struct
cgroup_taskset
*
tset
,
struct
cgroup_subsys_state
**
dst_cssp
);
void
css_task_iter_start
(
struct
cgroup_subsys_state
*
css
,
struct
css_task_iter
*
it
);
...
...
@@ -236,30 +238,39 @@ void css_task_iter_end(struct css_task_iter *it);
/**
* cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor
* @dst_css: the destination css
* @tset: taskset to iterate
*
* @tset may contain multiple tasks and they may belong to multiple
* processes. When there are multiple tasks in @tset, if a task of a
* process is in @tset, all tasks of the process are in @tset. Also, all
* are guaranteed to share the same source and destination csses.
* processes.
*
* On the v2 hierarchy, there may be tasks from multiple processes and they
* may not share the source or destination csses.
*
* On traditional hierarchies, when there are multiple tasks in @tset, if a
* task of a process is in @tset, all tasks of the process are in @tset.
* Also, all are guaranteed to share the same source and destination csses.
*
* Iteration is not in any specific order.
*/
#define cgroup_taskset_for_each(task, tset) \
for ((task) = cgroup_taskset_first((tset)); (task); \
(task) = cgroup_taskset_next((tset)))
#define cgroup_taskset_for_each(task, dst_css, tset) \
for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
(task); \
(task) = cgroup_taskset_next((tset), &(dst_css)))
/**
* cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
* @leader: the loop cursor
* @dst_css: the destination css
* @tset: takset to iterate
*
* Iterate threadgroup leaders of @tset. For single-task migrations, @tset
* may not contain any.
*/
#define cgroup_taskset_for_each_leader(leader, tset) \
for ((leader) = cgroup_taskset_first((tset)); (leader); \
(leader) = cgroup_taskset_next((tset))) \
#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
(leader); \
(leader) = cgroup_taskset_next((tset), &(dst_css))) \
if ((leader) != (leader)->group_leader) \
; \
else
...
...
kernel/cgroup.c
浏览文件 @
8075b542
...
...
@@ -761,9 +761,11 @@ static void put_css_set_locked(struct css_set *cset)
if
(
!
atomic_dec_and_test
(
&
cset
->
refcount
))
return
;
/* This css_set is dead. unlink it and release cgroup
refcount
s */
for_each_subsys
(
ss
,
ssid
)
/* This css_set is dead. unlink it and release cgroup
and css ref
s */
for_each_subsys
(
ss
,
ssid
)
{
list_del
(
&
cset
->
e_cset_node
[
ssid
]);
css_put
(
cset
->
subsys
[
ssid
]);
}
hash_del
(
&
cset
->
hlist
);
css_set_count
--
;
...
...
@@ -1063,9 +1065,13 @@ static struct css_set *find_css_set(struct css_set *old_cset,
key
=
css_set_hash
(
cset
->
subsys
);
hash_add
(
css_set_table
,
&
cset
->
hlist
,
key
);
for_each_subsys
(
ss
,
ssid
)
for_each_subsys
(
ss
,
ssid
)
{
struct
cgroup_subsys_state
*
css
=
cset
->
subsys
[
ssid
];
list_add_tail
(
&
cset
->
e_cset_node
[
ssid
],
&
cset
->
subsys
[
ssid
]
->
cgroup
->
e_csets
[
ssid
]);
&
css
->
cgroup
->
e_csets
[
ssid
]);
css_get
(
css
);
}
spin_unlock_bh
(
&
css_set_lock
);
...
...
@@ -2229,6 +2235,9 @@ struct cgroup_taskset {
struct
list_head
src_csets
;
struct
list_head
dst_csets
;
/* the subsys currently being processed */
int
ssid
;
/*
* Fields for cgroup_taskset_*() iteration.
*
...
...
@@ -2291,25 +2300,29 @@ static void cgroup_taskset_add(struct task_struct *task,
/**
* cgroup_taskset_first - reset taskset and return the first task
* @tset: taskset of interest
* @dst_cssp: output variable for the destination css
*
* @tset iteration is initialized and the first task is returned.
*/
struct
task_struct
*
cgroup_taskset_first
(
struct
cgroup_taskset
*
tset
)
struct
task_struct
*
cgroup_taskset_first
(
struct
cgroup_taskset
*
tset
,
struct
cgroup_subsys_state
**
dst_cssp
)
{
tset
->
cur_cset
=
list_first_entry
(
tset
->
csets
,
struct
css_set
,
mg_node
);
tset
->
cur_task
=
NULL
;
return
cgroup_taskset_next
(
tset
);
return
cgroup_taskset_next
(
tset
,
dst_cssp
);
}
/**
* cgroup_taskset_next - iterate to the next task in taskset
* @tset: taskset of interest
* @dst_cssp: output variable for the destination css
*
* Return the next task in @tset. Iteration must have been initialized
* with cgroup_taskset_first().
*/
struct
task_struct
*
cgroup_taskset_next
(
struct
cgroup_taskset
*
tset
)
struct
task_struct
*
cgroup_taskset_next
(
struct
cgroup_taskset
*
tset
,
struct
cgroup_subsys_state
**
dst_cssp
)
{
struct
css_set
*
cset
=
tset
->
cur_cset
;
struct
task_struct
*
task
=
tset
->
cur_task
;
...
...
@@ -2324,6 +2337,18 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
if
(
&
task
->
cg_list
!=
&
cset
->
mg_tasks
)
{
tset
->
cur_cset
=
cset
;
tset
->
cur_task
=
task
;
/*
* This function may be called both before and
* after cgroup_taskset_migrate(). The two cases
* can be distinguished by looking at whether @cset
* has its ->mg_dst_cset set.
*/
if
(
cset
->
mg_dst_cset
)
*
dst_cssp
=
cset
->
mg_dst_cset
->
subsys
[
tset
->
ssid
];
else
*
dst_cssp
=
cset
->
subsys
[
tset
->
ssid
];
return
task
;
}
...
...
@@ -2359,7 +2384,8 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
/* check that we can legitimately attach to the cgroup */
for_each_e_css
(
css
,
i
,
dst_cgrp
)
{
if
(
css
->
ss
->
can_attach
)
{
ret
=
css
->
ss
->
can_attach
(
css
,
tset
);
tset
->
ssid
=
i
;
ret
=
css
->
ss
->
can_attach
(
tset
);
if
(
ret
)
{
failed_css
=
css
;
goto
out_cancel_attach
;
...
...
@@ -2392,9 +2418,12 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
*/
tset
->
csets
=
&
tset
->
dst_csets
;
for_each_e_css
(
css
,
i
,
dst_cgrp
)
if
(
css
->
ss
->
attach
)
css
->
ss
->
attach
(
css
,
tset
);
for_each_e_css
(
css
,
i
,
dst_cgrp
)
{
if
(
css
->
ss
->
attach
)
{
tset
->
ssid
=
i
;
css
->
ss
->
attach
(
tset
);
}
}
ret
=
0
;
goto
out_release_tset
;
...
...
@@ -2403,8 +2432,10 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
for_each_e_css
(
css
,
i
,
dst_cgrp
)
{
if
(
css
==
failed_css
)
break
;
if
(
css
->
ss
->
cancel_attach
)
css
->
ss
->
cancel_attach
(
css
,
tset
);
if
(
css
->
ss
->
cancel_attach
)
{
tset
->
ssid
=
i
;
css
->
ss
->
cancel_attach
(
tset
);
}
}
out_release_tset:
spin_lock_bh
(
&
css_set_lock
);
...
...
kernel/cgroup_freezer.c
浏览文件 @
8075b542
...
...
@@ -155,12 +155,10 @@ static void freezer_css_free(struct cgroup_subsys_state *css)
* @freezer->lock. freezer_attach() makes the new tasks conform to the
* current state and all following state changes can see the new tasks.
*/
static
void
freezer_attach
(
struct
cgroup_subsys_state
*
new_css
,
struct
cgroup_taskset
*
tset
)
static
void
freezer_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
freezer
*
freezer
=
css_freezer
(
new_css
);
struct
task_struct
*
task
;
bool
clear_frozen
=
false
;
struct
cgroup_subsys_state
*
new_css
;
mutex_lock
(
&
freezer_mutex
);
...
...
@@ -174,22 +172,21 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
* current state before executing the following - !frozen tasks may
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
*/
cgroup_taskset_for_each
(
task
,
tset
)
{
cgroup_taskset_for_each
(
task
,
new_css
,
tset
)
{
struct
freezer
*
freezer
=
css_freezer
(
new_css
);
if
(
!
(
freezer
->
state
&
CGROUP_FREEZING
))
{
__thaw_task
(
task
);
}
else
{
freeze_task
(
task
);
freezer
->
state
&=
~
CGROUP_FROZEN
;
clear_frozen
=
true
;
/* clear FROZEN and propagate upwards */
while
(
freezer
&&
(
freezer
->
state
&
CGROUP_FROZEN
))
{
freezer
->
state
&=
~
CGROUP_FROZEN
;
freezer
=
parent_freezer
(
freezer
);
}
}
}
/* propagate FROZEN clearing upwards */
while
(
clear_frozen
&&
(
freezer
=
parent_freezer
(
freezer
)))
{
freezer
->
state
&=
~
CGROUP_FROZEN
;
clear_frozen
=
freezer
->
state
&
CGROUP_FREEZING
;
}
mutex_unlock
(
&
freezer_mutex
);
}
...
...
kernel/cgroup_pids.c
浏览文件 @
8075b542
...
...
@@ -106,7 +106,7 @@ static void pids_uncharge(struct pids_cgroup *pids, int num)
{
struct
pids_cgroup
*
p
;
for
(
p
=
pids
;
p
;
p
=
parent_pids
(
p
))
for
(
p
=
pids
;
p
arent_pids
(
p
)
;
p
=
parent_pids
(
p
))
pids_cancel
(
p
,
num
);
}
...
...
@@ -123,7 +123,7 @@ static void pids_charge(struct pids_cgroup *pids, int num)
{
struct
pids_cgroup
*
p
;
for
(
p
=
pids
;
p
;
p
=
parent_pids
(
p
))
for
(
p
=
pids
;
p
arent_pids
(
p
)
;
p
=
parent_pids
(
p
))
atomic64_add
(
num
,
&
p
->
counter
);
}
...
...
@@ -140,7 +140,7 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
{
struct
pids_cgroup
*
p
,
*
q
;
for
(
p
=
pids
;
p
;
p
=
parent_pids
(
p
))
{
for
(
p
=
pids
;
p
arent_pids
(
p
)
;
p
=
parent_pids
(
p
))
{
int64_t
new
=
atomic64_add_return
(
num
,
&
p
->
counter
);
/*
...
...
@@ -162,13 +162,13 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
return
-
EAGAIN
;
}
static
int
pids_can_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
int
pids_can_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
pids_cgroup
*
pids
=
css_pids
(
css
);
struct
task_struct
*
task
;
struct
cgroup_subsys_state
*
dst_css
;
cgroup_taskset_for_each
(
task
,
tset
)
{
cgroup_taskset_for_each
(
task
,
dst_css
,
tset
)
{
struct
pids_cgroup
*
pids
=
css_pids
(
dst_css
);
struct
cgroup_subsys_state
*
old_css
;
struct
pids_cgroup
*
old_pids
;
...
...
@@ -187,13 +187,13 @@ static int pids_can_attach(struct cgroup_subsys_state *css,
return
0
;
}
static
void
pids_cancel_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
pids_cancel_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
pids_cgroup
*
pids
=
css_pids
(
css
);
struct
task_struct
*
task
;
struct
cgroup_subsys_state
*
dst_css
;
cgroup_taskset_for_each
(
task
,
tset
)
{
cgroup_taskset_for_each
(
task
,
dst_css
,
tset
)
{
struct
pids_cgroup
*
pids
=
css_pids
(
dst_css
);
struct
cgroup_subsys_state
*
old_css
;
struct
pids_cgroup
*
old_pids
;
...
...
@@ -205,65 +205,28 @@ static void pids_cancel_attach(struct cgroup_subsys_state *css,
}
}
/*
* task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
* on threadgroup_change_begin() held by the copy_process().
*/
static
int
pids_can_fork
(
struct
task_struct
*
task
,
void
**
priv_p
)
{
struct
cgroup_subsys_state
*
css
;
struct
pids_cgroup
*
pids
;
int
err
;
/*
* Use the "current" task_css for the pids subsystem as the tentative
* css. It is possible we will charge the wrong hierarchy, in which
* case we will forcefully revert/reapply the charge on the right
* hierarchy after it is committed to the task proper.
*/
css
=
task_get_css
(
current
,
pids_cgrp_id
);
css
=
task_css_check
(
current
,
pids_cgrp_id
,
true
);
pids
=
css_pids
(
css
);
err
=
pids_try_charge
(
pids
,
1
);
if
(
err
)
goto
err_css_put
;
*
priv_p
=
css
;
return
0
;
err_css_put:
css_put
(
css
);
return
err
;
return
pids_try_charge
(
pids
,
1
);
}
static
void
pids_cancel_fork
(
struct
task_struct
*
task
,
void
*
priv
)
{
struct
cgroup_subsys_state
*
css
=
priv
;
struct
pids_cgroup
*
pids
=
css_pids
(
css
);
pids_uncharge
(
pids
,
1
);
css_put
(
css
);
}
static
void
pids_fork
(
struct
task_struct
*
task
,
void
*
priv
)
{
struct
cgroup_subsys_state
*
css
;
struct
cgroup_subsys_state
*
old_css
=
priv
;
struct
pids_cgroup
*
pids
;
struct
pids_cgroup
*
old_pids
=
css_pids
(
old_css
);
css
=
task_
get_css
(
task
,
pids_cgrp_id
);
css
=
task_
css_check
(
current
,
pids_cgrp_id
,
true
);
pids
=
css_pids
(
css
);
/*
* If the association has changed, we have to revert and reapply the
* charge/uncharge on the wrong hierarchy to the current one. Since
* the association can only change due to an organisation event, its
* okay for us to ignore the limit in this case.
*/
if
(
pids
!=
old_pids
)
{
pids_uncharge
(
old_pids
,
1
);
pids_charge
(
pids
,
1
);
}
css_put
(
css
);
css_put
(
old_css
);
pids_uncharge
(
pids
,
1
);
}
static
void
pids_free
(
struct
task_struct
*
task
)
...
...
@@ -335,6 +298,7 @@ static struct cftype pids_files[] = {
{
.
name
=
"current"
,
.
read_s64
=
pids_current_read
,
.
flags
=
CFTYPE_NOT_ON_ROOT
,
},
{
}
/* terminate */
};
...
...
@@ -346,7 +310,6 @@ struct cgroup_subsys pids_cgrp_subsys = {
.
cancel_attach
=
pids_cancel_attach
,
.
can_fork
=
pids_can_fork
,
.
cancel_fork
=
pids_cancel_fork
,
.
fork
=
pids_fork
,
.
free
=
pids_free
,
.
legacy_cftypes
=
pids_files
,
.
dfl_cftypes
=
pids_files
,
...
...
kernel/cpuset.c
浏览文件 @
8075b542
...
...
@@ -1433,15 +1433,16 @@ static int fmeter_getrate(struct fmeter *fmp)
static
struct
cpuset
*
cpuset_attach_old_cs
;
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static
int
cpuset_can_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
int
cpuset_can_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
cpuset
*
cs
=
css_cs
(
css
);
struct
cgroup_subsys_state
*
css
;
struct
cpuset
*
cs
;
struct
task_struct
*
task
;
int
ret
;
/* used later by cpuset_attach() */
cpuset_attach_old_cs
=
task_cs
(
cgroup_taskset_first
(
tset
));
cpuset_attach_old_cs
=
task_cs
(
cgroup_taskset_first
(
tset
,
&
css
));
cs
=
css_cs
(
css
);
mutex_lock
(
&
cpuset_mutex
);
...
...
@@ -1451,7 +1452,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
(
cpumask_empty
(
cs
->
cpus_allowed
)
||
nodes_empty
(
cs
->
mems_allowed
)))
goto
out_unlock
;
cgroup_taskset_for_each
(
task
,
tset
)
{
cgroup_taskset_for_each
(
task
,
css
,
tset
)
{
ret
=
task_can_attach
(
task
,
cs
->
cpus_allowed
);
if
(
ret
)
goto
out_unlock
;
...
...
@@ -1471,9 +1472,14 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
return
ret
;
}
static
void
cpuset_cancel_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
cpuset_cancel_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
cgroup_subsys_state
*
css
;
struct
cpuset
*
cs
;
cgroup_taskset_first
(
tset
,
&
css
);
cs
=
css_cs
(
css
);
mutex_lock
(
&
cpuset_mutex
);
css_cs
(
css
)
->
attach_in_progress
--
;
mutex_unlock
(
&
cpuset_mutex
);
...
...
@@ -1486,16 +1492,19 @@ static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
*/
static
cpumask_var_t
cpus_attach
;
static
void
cpuset_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
cpuset_attach
(
struct
cgroup_taskset
*
tset
)
{
/* static buf protected by cpuset_mutex */
static
nodemask_t
cpuset_attach_nodemask_to
;
struct
task_struct
*
task
;
struct
task_struct
*
leader
;
struct
cpuset
*
cs
=
css_cs
(
css
);
struct
cgroup_subsys_state
*
css
;
struct
cpuset
*
cs
;
struct
cpuset
*
oldcs
=
cpuset_attach_old_cs
;
cgroup_taskset_first
(
tset
,
&
css
);
cs
=
css_cs
(
css
);
mutex_lock
(
&
cpuset_mutex
);
/* prepare for attach */
...
...
@@ -1506,7 +1515,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
guarantee_online_mems
(
cs
,
&
cpuset_attach_nodemask_to
);
cgroup_taskset_for_each
(
task
,
tset
)
{
cgroup_taskset_for_each
(
task
,
css
,
tset
)
{
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
...
...
@@ -1522,7 +1531,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
* sleep and should be moved outside migration path proper.
*/
cpuset_attach_nodemask_to
=
cs
->
effective_mems
;
cgroup_taskset_for_each_leader
(
leader
,
tset
)
{
cgroup_taskset_for_each_leader
(
leader
,
css
,
tset
)
{
struct
mm_struct
*
mm
=
get_task_mm
(
leader
);
if
(
mm
)
{
...
...
kernel/events/core.c
浏览文件 @
8075b542
...
...
@@ -9456,12 +9456,12 @@ static int __perf_cgroup_move(void *info)
return
0
;
}
static
void
perf_cgroup_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
perf_cgroup_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
task_struct
*
task
;
struct
cgroup_subsys_state
*
css
;
cgroup_taskset_for_each
(
task
,
tset
)
cgroup_taskset_for_each
(
task
,
css
,
tset
)
task_function_call
(
task
,
__perf_cgroup_move
,
task
);
}
...
...
kernel/fork.c
浏览文件 @
8075b542
...
...
@@ -1368,8 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p
->
real_start_time
=
ktime_get_boot_ns
();
p
->
io_context
=
NULL
;
p
->
audit_context
=
NULL
;
if
(
clone_flags
&
CLONE_THREAD
)
threadgroup_change_begin
(
current
);
threadgroup_change_begin
(
current
);
cgroup_fork
(
p
);
#ifdef CONFIG_NUMA
p
->
mempolicy
=
mpol_dup
(
p
->
mempolicy
);
...
...
@@ -1610,8 +1609,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
proc_fork_connector
(
p
);
cgroup_post_fork
(
p
,
cgrp_ss_priv
);
if
(
clone_flags
&
CLONE_THREAD
)
threadgroup_change_end
(
current
);
threadgroup_change_end
(
current
);
perf_event_fork
(
p
);
trace_task_newtask
(
p
,
clone_flags
);
...
...
@@ -1652,8 +1650,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
mpol_put
(
p
->
mempolicy
);
bad_fork_cleanup_threadgroup_lock:
#endif
if
(
clone_flags
&
CLONE_THREAD
)
threadgroup_change_end
(
current
);
threadgroup_change_end
(
current
);
delayacct_tsk_free
(
p
);
bad_fork_cleanup_count:
atomic_dec
(
&
p
->
cred
->
user
->
processes
);
...
...
kernel/sched/core.c
浏览文件 @
8075b542
...
...
@@ -8217,12 +8217,12 @@ static void cpu_cgroup_fork(struct task_struct *task, void *private)
sched_move_task
(
task
);
}
static
int
cpu_cgroup_can_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
int
cpu_cgroup_can_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
task_struct
*
task
;
struct
cgroup_subsys_state
*
css
;
cgroup_taskset_for_each
(
task
,
tset
)
{
cgroup_taskset_for_each
(
task
,
css
,
tset
)
{
#ifdef CONFIG_RT_GROUP_SCHED
if
(
!
sched_rt_can_attach
(
css_tg
(
css
),
task
))
return
-
EINVAL
;
...
...
@@ -8235,12 +8235,12 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
return
0
;
}
static
void
cpu_cgroup_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
cpu_cgroup_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
task_struct
*
task
;
struct
cgroup_subsys_state
*
css
;
cgroup_taskset_for_each
(
task
,
tset
)
cgroup_taskset_for_each
(
task
,
css
,
tset
)
sched_move_task
(
task
);
}
...
...
mm/memcontrol.c
浏览文件 @
8075b542
...
...
@@ -4779,23 +4779,18 @@ static void mem_cgroup_clear_mc(void)
spin_unlock
(
&
mc
.
lock
);
}
static
int
mem_cgroup_can_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
int
mem_cgroup_can_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
mem_cgroup
*
memcg
=
mem_cgroup_from_css
(
css
);
struct
cgroup_subsys_state
*
css
;
struct
mem_cgroup
*
memcg
;
struct
mem_cgroup
*
from
;
struct
task_struct
*
leader
,
*
p
;
struct
mm_struct
*
mm
;
unsigned
long
move_flags
;
int
ret
=
0
;
/*
* We are now commited to this value whatever it is. Changes in this
* tunable will only affect upcoming migrations, not the current one.
* So we need to save it, and keep it going.
*/
move_flags
=
READ_ONCE
(
memcg
->
move_charge_at_immigrate
);
if
(
!
move_flags
)
/* charge immigration isn't supported on the default hierarchy */
if
(
cgroup_subsys_on_dfl
(
memory_cgrp_subsys
))
return
0
;
/*
...
...
@@ -4805,13 +4800,23 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
* multiple.
*/
p
=
NULL
;
cgroup_taskset_for_each_leader
(
leader
,
tset
)
{
cgroup_taskset_for_each_leader
(
leader
,
css
,
tset
)
{
WARN_ON_ONCE
(
p
);
p
=
leader
;
memcg
=
mem_cgroup_from_css
(
css
);
}
if
(
!
p
)
return
0
;
/*
* We are now commited to this value whatever it is. Changes in this
* tunable will only affect upcoming migrations, not the current one.
* So we need to save it, and keep it going.
*/
move_flags
=
READ_ONCE
(
memcg
->
move_charge_at_immigrate
);
if
(
!
move_flags
)
return
0
;
from
=
mem_cgroup_from_task
(
p
);
VM_BUG_ON
(
from
==
memcg
);
...
...
@@ -4842,8 +4847,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
return
ret
;
}
static
void
mem_cgroup_cancel_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
mem_cgroup_cancel_attach
(
struct
cgroup_taskset
*
tset
)
{
if
(
mc
.
to
)
mem_cgroup_clear_mc
();
...
...
@@ -4985,10 +4989,10 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
atomic_dec
(
&
mc
.
from
->
moving_account
);
}
static
void
mem_cgroup_move_task
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
mem_cgroup_move_task
(
struct
cgroup_taskset
*
tset
)
{
struct
task_struct
*
p
=
cgroup_taskset_first
(
tset
);
struct
cgroup_subsys_state
*
css
;
struct
task_struct
*
p
=
cgroup_taskset_first
(
tset
,
&
css
);
struct
mm_struct
*
mm
=
get_task_mm
(
p
);
if
(
mm
)
{
...
...
@@ -5000,17 +5004,14 @@ static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
mem_cgroup_clear_mc
();
}
#else
/* !CONFIG_MMU */
static
int
mem_cgroup_can_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
int
mem_cgroup_can_attach
(
struct
cgroup_taskset
*
tset
)
{
return
0
;
}
static
void
mem_cgroup_cancel_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
mem_cgroup_cancel_attach
(
struct
cgroup_taskset
*
tset
)
{
}
static
void
mem_cgroup_move_task
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
mem_cgroup_move_task
(
struct
cgroup_taskset
*
tset
)
{
}
#endif
...
...
net/core/netclassid_cgroup.c
浏览文件 @
8075b542
...
...
@@ -67,14 +67,15 @@ static int update_classid(const void *v, struct file *file, unsigned n)
return
0
;
}
static
void
cgrp_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
cgrp_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
cgroup_cls_state
*
cs
=
css_cls_state
(
css
);
void
*
v
=
(
void
*
)(
unsigned
long
)
cs
->
classid
;
struct
task_struct
*
p
;
struct
cgroup_subsys_state
*
css
;
cgroup_taskset_for_each
(
p
,
css
,
tset
)
{
struct
cgroup_cls_state
*
cs
=
css_cls_state
(
css
);
void
*
v
=
(
void
*
)(
unsigned
long
)
cs
->
classid
;
cgroup_taskset_for_each
(
p
,
tset
)
{
task_lock
(
p
);
iterate_fd
(
p
->
files
,
0
,
update_classid
,
v
);
task_unlock
(
p
);
...
...
net/core/netprio_cgroup.c
浏览文件 @
8075b542
...
...
@@ -218,13 +218,14 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
return
0
;
}
static
void
net_prio_attach
(
struct
cgroup_subsys_state
*
css
,
struct
cgroup_taskset
*
tset
)
static
void
net_prio_attach
(
struct
cgroup_taskset
*
tset
)
{
struct
task_struct
*
p
;
void
*
v
=
(
void
*
)(
unsigned
long
)
css
->
cgroup
->
id
;
struct
cgroup_subsys_state
*
css
;
cgroup_taskset_for_each
(
p
,
css
,
tset
)
{
void
*
v
=
(
void
*
)(
unsigned
long
)
css
->
cgroup
->
id
;
cgroup_taskset_for_each
(
p
,
tset
)
{
task_lock
(
p
);
iterate_fd
(
p
->
files
,
0
,
update_netprio
,
v
);
task_unlock
(
p
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录