Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
8d76d101
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8d76d101
编写于
11月 17, 2014
作者:
J
Jens Axboe
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'for-3.19/core' into for-3.19/drivers
上级
e805b983
7c7f2f2b
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
53 addition
and
22 deletion
+53
-22
Documentation/block/biodoc.txt
Documentation/block/biodoc.txt
+5
-1
block/blk-mq.c
block/blk-mq.c
+25
-14
fs/fs-writeback.c
fs/fs-writeback.c
+22
-7
include/linux/blk-mq.h
include/linux/blk-mq.h
+1
-0
未找到文件。
Documentation/block/biodoc.txt
浏览文件 @
8d76d101
...
...
@@ -946,7 +946,11 @@ elevator_allow_merge_fn called whenever the block layer determines
request safely. The io scheduler may still
want to stop a merge at this point if it
results in some sort of conflict internally,
this hook allows it to do that.
this hook allows it to do that. Note however
that two *requests* can still be merged at later
time. Currently the io scheduler has no way to
prevent that. It can only learn about the fact
from elevator_merge_req_fn callback.
elevator_dispatch_fn* fills the dispatch queue with ready requests.
I/O schedulers are free to postpone requests by
...
...
block/blk-mq.c
浏览文件 @
8d76d101
...
...
@@ -269,17 +269,25 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
blk_mq_queue_exit
(
q
);
}
void
blk_mq_free_
request
(
struct
request
*
rq
)
void
blk_mq_free_
hctx_request
(
struct
blk_mq_hw_ctx
*
hctx
,
struct
request
*
rq
)
{
struct
blk_mq_ctx
*
ctx
=
rq
->
mq_ctx
;
struct
blk_mq_hw_ctx
*
hctx
;
struct
request_queue
*
q
=
rq
->
q
;
ctx
->
rq_completed
[
rq_is_sync
(
rq
)]
++
;
hctx
=
q
->
mq_ops
->
map_queue
(
q
,
ctx
->
cpu
);
__blk_mq_free_request
(
hctx
,
ctx
,
rq
);
}
EXPORT_SYMBOL_GPL
(
blk_mq_free_hctx_request
);
void
blk_mq_free_request
(
struct
request
*
rq
)
{
struct
blk_mq_hw_ctx
*
hctx
;
struct
request_queue
*
q
=
rq
->
q
;
hctx
=
q
->
mq_ops
->
map_queue
(
q
,
rq
->
mq_ctx
->
cpu
);
blk_mq_free_hctx_request
(
hctx
,
rq
);
}
EXPORT_SYMBOL_GPL
(
blk_mq_free_request
);
inline
void
__blk_mq_end_request
(
struct
request
*
rq
,
int
error
)
{
...
...
@@ -801,9 +809,18 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
if
(
unlikely
(
test_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
)))
return
;
if
(
!
async
&&
cpumask_test_cpu
(
smp_processor_id
(),
hctx
->
cpumask
))
__blk_mq_run_hw_queue
(
hctx
);
else
if
(
hctx
->
queue
->
nr_hw_queues
==
1
)
if
(
!
async
)
{
int
cpu
=
get_cpu
();
if
(
cpumask_test_cpu
(
cpu
,
hctx
->
cpumask
))
{
__blk_mq_run_hw_queue
(
hctx
);
put_cpu
();
return
;
}
put_cpu
();
}
if
(
hctx
->
queue
->
nr_hw_queues
==
1
)
kblockd_schedule_delayed_work
(
&
hctx
->
run_work
,
0
);
else
{
unsigned
int
cpu
;
...
...
@@ -824,9 +841,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
test_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
))
continue
;
preempt_disable
();
blk_mq_run_hw_queue
(
hctx
,
async
);
preempt_enable
();
}
}
EXPORT_SYMBOL
(
blk_mq_run_queues
);
...
...
@@ -853,9 +868,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
clear_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
);
preempt_disable
();
blk_mq_run_hw_queue
(
hctx
,
false
);
preempt_enable
();
}
EXPORT_SYMBOL
(
blk_mq_start_hw_queue
);
...
...
@@ -880,9 +893,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
continue
;
clear_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
);
preempt_disable
();
blk_mq_run_hw_queue
(
hctx
,
async
);
preempt_enable
();
}
}
EXPORT_SYMBOL
(
blk_mq_start_stopped_hw_queues
);
...
...
fs/fs-writeback.c
浏览文件 @
8d76d101
...
...
@@ -479,12 +479,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* write_inode()
*/
spin_lock
(
&
inode
->
i_lock
);
/* Clear I_DIRTY_PAGES if we've written out all dirty pages */
if
(
!
mapping_tagged
(
mapping
,
PAGECACHE_TAG_DIRTY
))
inode
->
i_state
&=
~
I_DIRTY_PAGES
;
dirty
=
inode
->
i_state
&
I_DIRTY
;
inode
->
i_state
&=
~
(
I_DIRTY_SYNC
|
I_DIRTY_DATASYNC
);
inode
->
i_state
&=
~
I_DIRTY
;
/*
* Paired with smp_mb() in __mark_inode_dirty(). This allows
* __mark_inode_dirty() to test i_state without grabbing i_lock -
* either they see the I_DIRTY bits cleared or we see the dirtied
* inode.
*
* I_DIRTY_PAGES is always cleared together above even if @mapping
* still has dirty pages. The flag is reinstated after smp_mb() if
* necessary. This guarantees that either __mark_inode_dirty()
* sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
*/
smp_mb
();
if
(
mapping_tagged
(
mapping
,
PAGECACHE_TAG_DIRTY
))
inode
->
i_state
|=
I_DIRTY_PAGES
;
spin_unlock
(
&
inode
->
i_lock
);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if
(
dirty
&
(
I_DIRTY_SYNC
|
I_DIRTY_DATASYNC
))
{
int
err
=
write_inode
(
inode
,
wbc
);
...
...
@@ -1148,12 +1164,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
}
/*
*
make sure that changes are seen by all cpus before we test i_stat
e
*
-- mikulas
*
Paired with smp_mb() in __writeback_single_inode() for th
e
*
following lockless i_state test. See there for details.
*/
smp_mb
();
/* avoid the locking if we can */
if
((
inode
->
i_state
&
flags
)
==
flags
)
return
;
...
...
include/linux/blk-mq.h
浏览文件 @
8d76d101
...
...
@@ -169,6 +169,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void
blk_mq_insert_request
(
struct
request
*
,
bool
,
bool
,
bool
);
void
blk_mq_run_queues
(
struct
request_queue
*
q
,
bool
async
);
void
blk_mq_free_request
(
struct
request
*
rq
);
void
blk_mq_free_hctx_request
(
struct
blk_mq_hw_ctx
*
,
struct
request
*
rq
);
bool
blk_mq_can_queue
(
struct
blk_mq_hw_ctx
*
);
struct
request
*
blk_mq_alloc_request
(
struct
request_queue
*
q
,
int
rw
,
gfp_t
gfp
,
bool
reserved
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录