Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
b646fc59
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b646fc59
编写于
7月 28, 2008
作者:
J
Jens Axboe
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
block: split softirq handling into blk-softirq.c
Signed-off-by:
N
Jens Axboe
<
jens.axboe@oracle.com
>
上级
0835da67
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
105 addition
and
90 deletion
+105
-90
block/Makefile
block/Makefile
+2
-2
block/blk-core.c
block/blk-core.c
+0
-88
block/blk-softirq.c
block/blk-softirq.c
+103
-0
未找到文件。
block/Makefile
浏览文件 @
b646fc59
...
...
@@ -4,8 +4,8 @@
obj-$(CONFIG_BLOCK)
:=
elevator.o blk-core.o blk-tag.o blk-sysfs.o
\
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o
\
blk-exec.o blk-merge.o
ioctl.o genhd.o scsi_ioctl
.o
\
cmd-filter.o
blk-exec.o blk-merge.o
blk-softirq.o ioctl.o genhd
.o
\
scsi_ioctl.o
cmd-filter.o
obj-$(CONFIG_BLK_DEV_BSG)
+=
bsg.o
obj-$(CONFIG_IOSCHED_NOOP)
+=
noop-iosched.o
...
...
block/blk-core.c
浏览文件 @
b646fc59
...
...
@@ -26,8 +26,6 @@
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/blktrace_api.h>
#include <linux/fault-inject.h>
...
...
@@ -50,8 +48,6 @@ struct kmem_cache *blk_requestq_cachep;
*/
static
struct
workqueue_struct
*
kblockd_workqueue
;
static
DEFINE_PER_CPU
(
struct
list_head
,
blk_cpu_done
);
static
void
drive_stat_acct
(
struct
request
*
rq
,
int
new_io
)
{
struct
hd_struct
*
part
;
...
...
@@ -1643,82 +1639,6 @@ static int __end_that_request_first(struct request *req, int error,
return
1
;
}
/*
* splice the completion data to a local structure and hand off to
* process_completion_queue() to complete the requests
*/
static
void
blk_done_softirq
(
struct
softirq_action
*
h
)
{
struct
list_head
*
cpu_list
,
local_list
;
local_irq_disable
();
cpu_list
=
&
__get_cpu_var
(
blk_cpu_done
);
list_replace_init
(
cpu_list
,
&
local_list
);
local_irq_enable
();
while
(
!
list_empty
(
&
local_list
))
{
struct
request
*
rq
;
rq
=
list_entry
(
local_list
.
next
,
struct
request
,
donelist
);
list_del_init
(
&
rq
->
donelist
);
rq
->
q
->
softirq_done_fn
(
rq
);
}
}
static
int
__cpuinit
blk_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
hcpu
)
{
/*
* If a CPU goes away, splice its entries to the current CPU
* and trigger a run of the softirq
*/
if
(
action
==
CPU_DEAD
||
action
==
CPU_DEAD_FROZEN
)
{
int
cpu
=
(
unsigned
long
)
hcpu
;
local_irq_disable
();
list_splice_init
(
&
per_cpu
(
blk_cpu_done
,
cpu
),
&
__get_cpu_var
(
blk_cpu_done
));
raise_softirq_irqoff
(
BLOCK_SOFTIRQ
);
local_irq_enable
();
}
return
NOTIFY_OK
;
}
static
struct
notifier_block
blk_cpu_notifier
__cpuinitdata
=
{
.
notifier_call
=
blk_cpu_notify
,
};
/**
* blk_complete_request - end I/O on a request
* @req: the request being processed
*
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completion callback
* through requeueing. The actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
**/
void
blk_complete_request
(
struct
request
*
req
)
{
struct
list_head
*
cpu_list
;
unsigned
long
flags
;
BUG_ON
(
!
req
->
q
->
softirq_done_fn
);
local_irq_save
(
flags
);
cpu_list
=
&
__get_cpu_var
(
blk_cpu_done
);
list_add_tail
(
&
req
->
donelist
,
cpu_list
);
raise_softirq_irqoff
(
BLOCK_SOFTIRQ
);
local_irq_restore
(
flags
);
}
EXPORT_SYMBOL
(
blk_complete_request
);
/*
* queue lock must be held
*/
...
...
@@ -2053,8 +1973,6 @@ EXPORT_SYMBOL(kblockd_flush_work);
int
__init
blk_dev_init
(
void
)
{
int
i
;
kblockd_workqueue
=
create_workqueue
(
"kblockd"
);
if
(
!
kblockd_workqueue
)
panic
(
"Failed to create kblockd
\n
"
);
...
...
@@ -2065,12 +1983,6 @@ int __init blk_dev_init(void)
blk_requestq_cachep
=
kmem_cache_create
(
"blkdev_queue"
,
sizeof
(
struct
request_queue
),
0
,
SLAB_PANIC
,
NULL
);
for_each_possible_cpu
(
i
)
INIT_LIST_HEAD
(
&
per_cpu
(
blk_cpu_done
,
i
));
open_softirq
(
BLOCK_SOFTIRQ
,
blk_done_softirq
);
register_hotcpu_notifier
(
&
blk_cpu_notifier
);
return
0
;
}
block/blk-softirq.c
0 → 100644
浏览文件 @
b646fc59
/*
* Functions related to softirq rq completions
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include "blk.h"
static
DEFINE_PER_CPU
(
struct
list_head
,
blk_cpu_done
);
static
int
__cpuinit
blk_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
hcpu
)
{
/*
* If a CPU goes away, splice its entries to the current CPU
* and trigger a run of the softirq
*/
if
(
action
==
CPU_DEAD
||
action
==
CPU_DEAD_FROZEN
)
{
int
cpu
=
(
unsigned
long
)
hcpu
;
local_irq_disable
();
list_splice_init
(
&
per_cpu
(
blk_cpu_done
,
cpu
),
&
__get_cpu_var
(
blk_cpu_done
));
raise_softirq_irqoff
(
BLOCK_SOFTIRQ
);
local_irq_enable
();
}
return
NOTIFY_OK
;
}
static
struct
notifier_block
blk_cpu_notifier
__cpuinitdata
=
{
.
notifier_call
=
blk_cpu_notify
,
};
/*
* splice the completion data to a local structure and hand off to
* process_completion_queue() to complete the requests
*/
static
void
blk_done_softirq
(
struct
softirq_action
*
h
)
{
struct
list_head
*
cpu_list
,
local_list
;
local_irq_disable
();
cpu_list
=
&
__get_cpu_var
(
blk_cpu_done
);
list_replace_init
(
cpu_list
,
&
local_list
);
local_irq_enable
();
while
(
!
list_empty
(
&
local_list
))
{
struct
request
*
rq
;
rq
=
list_entry
(
local_list
.
next
,
struct
request
,
donelist
);
list_del_init
(
&
rq
->
donelist
);
rq
->
q
->
softirq_done_fn
(
rq
);
}
}
/**
* blk_complete_request - end I/O on a request
* @req: the request being processed
*
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completion callback
* through requeueing. The actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
**/
void
blk_complete_request
(
struct
request
*
req
)
{
struct
list_head
*
cpu_list
;
unsigned
long
flags
;
BUG_ON
(
!
req
->
q
->
softirq_done_fn
);
local_irq_save
(
flags
);
cpu_list
=
&
__get_cpu_var
(
blk_cpu_done
);
list_add_tail
(
&
req
->
donelist
,
cpu_list
);
raise_softirq_irqoff
(
BLOCK_SOFTIRQ
);
local_irq_restore
(
flags
);
}
EXPORT_SYMBOL
(
blk_complete_request
);
int
__init
blk_softirq_init
(
void
)
{
int
i
;
for_each_possible_cpu
(
i
)
INIT_LIST_HEAD
(
&
per_cpu
(
blk_cpu_done
,
i
));
open_softirq
(
BLOCK_SOFTIRQ
,
blk_done_softirq
);
register_hotcpu_notifier
(
&
blk_cpu_notifier
);
return
0
;
}
subsys_initcall
(
blk_softirq_init
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录