Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
053d8f66
K
Kernel
项目概览
openeuler
/
Kernel
大约 1 年 前同步成功
通知
6
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
053d8f66
编写于
9月 09, 2010
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'vhost-net' of
git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
上级
c9cedbba
615cc221
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
73 addition
and
27 deletion
+73
-27
drivers/vhost/vhost.c
drivers/vhost/vhost.c
+58
-22
include/linux/cgroup.h
include/linux/cgroup.h
+10
-1
kernel/cgroup.c
kernel/cgroup.c
+5
-4
未找到文件。
drivers/vhost/vhost.c
浏览文件 @
053d8f66
...
...
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return
0
;
}
static
void
vhost_work_init
(
struct
vhost_work
*
work
,
vhost_work_fn_t
fn
)
{
INIT_LIST_HEAD
(
&
work
->
node
);
work
->
fn
=
fn
;
init_waitqueue_head
(
&
work
->
done
);
work
->
flushing
=
0
;
work
->
queue_seq
=
work
->
done_seq
=
0
;
}
/* Init poll structure */
void
vhost_poll_init
(
struct
vhost_poll
*
poll
,
vhost_work_fn_t
fn
,
unsigned
long
mask
,
struct
vhost_dev
*
dev
)
{
struct
vhost_work
*
work
=
&
poll
->
work
;
init_waitqueue_func_entry
(
&
poll
->
wait
,
vhost_poll_wakeup
);
init_poll_funcptr
(
&
poll
->
table
,
vhost_poll_func
);
poll
->
mask
=
mask
;
poll
->
dev
=
dev
;
INIT_LIST_HEAD
(
&
work
->
node
);
work
->
fn
=
fn
;
init_waitqueue_head
(
&
work
->
done
);
work
->
flushing
=
0
;
work
->
queue_seq
=
work
->
done_seq
=
0
;
vhost_work_init
(
&
poll
->
work
,
fn
);
}
/* Start polling a file. We add ourselves to file's wait queue. The caller must
...
...
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue
(
poll
->
wqh
,
&
poll
->
wait
);
}
/* Flush any work that has been scheduled. When calling this, don't hold any
* locks that are also used by the callback. */
void
vhost_poll_flush
(
struct
vhost_poll
*
poll
)
static
void
vhost_work_flush
(
struct
vhost_dev
*
dev
,
struct
vhost_work
*
work
)
{
struct
vhost_work
*
work
=
&
poll
->
work
;
unsigned
seq
;
int
left
;
int
flushing
;
spin_lock_irq
(
&
poll
->
dev
->
work_lock
);
spin_lock_irq
(
&
dev
->
work_lock
);
seq
=
work
->
queue_seq
;
work
->
flushing
++
;
spin_unlock_irq
(
&
poll
->
dev
->
work_lock
);
spin_unlock_irq
(
&
dev
->
work_lock
);
wait_event
(
work
->
done
,
({
spin_lock_irq
(
&
poll
->
dev
->
work_lock
);
spin_lock_irq
(
&
dev
->
work_lock
);
left
=
seq
-
work
->
done_seq
<=
0
;
spin_unlock_irq
(
&
poll
->
dev
->
work_lock
);
spin_unlock_irq
(
&
dev
->
work_lock
);
left
;
}));
spin_lock_irq
(
&
poll
->
dev
->
work_lock
);
spin_lock_irq
(
&
dev
->
work_lock
);
flushing
=
--
work
->
flushing
;
spin_unlock_irq
(
&
poll
->
dev
->
work_lock
);
spin_unlock_irq
(
&
dev
->
work_lock
);
BUG_ON
(
flushing
<
0
);
}
void
vhost_poll_queue
(
struct
vhost_poll
*
poll
)
/* Flush any work that has been scheduled. When calling this, don't hold any
* locks that are also used by the callback. */
void
vhost_poll_flush
(
struct
vhost_poll
*
poll
)
{
vhost_work_flush
(
poll
->
dev
,
&
poll
->
work
);
}
static
inline
void
vhost_work_queue
(
struct
vhost_dev
*
dev
,
struct
vhost_work
*
work
)
{
struct
vhost_dev
*
dev
=
poll
->
dev
;
struct
vhost_work
*
work
=
&
poll
->
work
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
dev
->
work_lock
,
flags
);
...
...
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
spin_unlock_irqrestore
(
&
dev
->
work_lock
,
flags
);
}
void
vhost_poll_queue
(
struct
vhost_poll
*
poll
)
{
vhost_work_queue
(
poll
->
dev
,
&
poll
->
work
);
}
static
void
vhost_vq_reset
(
struct
vhost_dev
*
dev
,
struct
vhost_virtqueue
*
vq
)
{
...
...
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
return
dev
->
mm
==
current
->
mm
?
0
:
-
EPERM
;
}
struct
vhost_attach_cgroups_struct
{
struct
vhost_work
work
;
struct
task_struct
*
owner
;
int
ret
;
};
static
void
vhost_attach_cgroups_work
(
struct
vhost_work
*
work
)
{
struct
vhost_attach_cgroups_struct
*
s
;
s
=
container_of
(
work
,
struct
vhost_attach_cgroups_struct
,
work
);
s
->
ret
=
cgroup_attach_task_all
(
s
->
owner
,
current
);
}
static
int
vhost_attach_cgroups
(
struct
vhost_dev
*
dev
)
{
struct
vhost_attach_cgroups_struct
attach
;
attach
.
owner
=
current
;
vhost_work_init
(
&
attach
.
work
,
vhost_attach_cgroups_work
);
vhost_work_queue
(
dev
,
&
attach
.
work
);
vhost_work_flush
(
dev
,
&
attach
.
work
);
return
attach
.
ret
;
}
/* Caller should have device mutex */
static
long
vhost_dev_set_owner
(
struct
vhost_dev
*
dev
)
{
...
...
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
}
dev
->
worker
=
worker
;
err
=
cgroup_attach_task_current_cg
(
worker
);
wake_up_process
(
worker
);
/* avoid contributing to loadavg */
err
=
vhost_attach_cgroups
(
dev
);
if
(
err
)
goto
err_cgroup
;
wake_up_process
(
worker
);
/* avoid contributing to loadavg */
return
0
;
err_cgroup:
kthread_stop
(
worker
);
dev
->
worker
=
NULL
;
err_worker:
if
(
dev
->
mm
)
mmput
(
dev
->
mm
);
...
...
include/linux/cgroup.h
浏览文件 @
053d8f66
...
...
@@ -578,7 +578,11 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
void
cgroup_iter_end
(
struct
cgroup
*
cgrp
,
struct
cgroup_iter
*
it
);
int
cgroup_scan_tasks
(
struct
cgroup_scanner
*
scan
);
int
cgroup_attach_task
(
struct
cgroup
*
,
struct
task_struct
*
);
int
cgroup_attach_task_current_cg
(
struct
task_struct
*
);
int
cgroup_attach_task_all
(
struct
task_struct
*
from
,
struct
task_struct
*
);
static
inline
int
cgroup_attach_task_current_cg
(
struct
task_struct
*
tsk
)
{
return
cgroup_attach_task_all
(
current
,
tsk
);
}
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
...
...
@@ -636,6 +640,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
}
/* No cgroups - nothing to do */
static
inline
int
cgroup_attach_task_all
(
struct
task_struct
*
from
,
struct
task_struct
*
t
)
{
return
0
;
}
static
inline
int
cgroup_attach_task_current_cg
(
struct
task_struct
*
t
)
{
return
0
;
...
...
kernel/cgroup.c
浏览文件 @
053d8f66
...
...
@@ -1791,10 +1791,11 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
}
/**
* cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
* cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
* @from: attach to all cgroups of a given task
* @tsk: the task to be attached
*/
int
cgroup_attach_task_
current_cg
(
struct
task_struct
*
tsk
)
int
cgroup_attach_task_
all
(
struct
task_struct
*
from
,
struct
task_struct
*
tsk
)
{
struct
cgroupfs_root
*
root
;
struct
cgroup
*
cur_cg
;
...
...
@@ -1802,7 +1803,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
cgroup_lock
();
for_each_active_root
(
root
)
{
cur_cg
=
task_cgroup_from_root
(
current
,
root
);
cur_cg
=
task_cgroup_from_root
(
from
,
root
);
retval
=
cgroup_attach_task
(
cur_cg
,
tsk
);
if
(
retval
)
break
;
...
...
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
return
retval
;
}
EXPORT_SYMBOL_GPL
(
cgroup_attach_task_
current_cg
);
EXPORT_SYMBOL_GPL
(
cgroup_attach_task_
all
);
/*
* Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录