Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
851c9f38
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
851c9f38
编写于
3月 31, 2015
作者:
M
Mike Snitzer
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'jens/for-4.1/core' into dm/for-next
上级
e9637415
c76cbbcf
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
52 addition
and
25 deletion
+52
-25
block/blk-core.c
block/blk-core.c
+16
-3
block/blk-mq-sysfs.c
block/blk-mq-sysfs.c
+1
-0
block/blk-mq.c
block/blk-mq.c
+32
-22
include/linux/blk-mq.h
include/linux/blk-mq.h
+3
-0
未找到文件。
block/blk-core.c
浏览文件 @
851c9f38
...
...
@@ -557,6 +557,18 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL
(
blk_cleanup_queue
);
/* Allocate memory local to the request queue */
static
void
*
alloc_request_struct
(
gfp_t
gfp_mask
,
void
*
data
)
{
int
nid
=
(
int
)(
long
)
data
;
return
kmem_cache_alloc_node
(
request_cachep
,
gfp_mask
,
nid
);
}
static
void
free_request_struct
(
void
*
element
,
void
*
unused
)
{
kmem_cache_free
(
request_cachep
,
element
);
}
int
blk_init_rl
(
struct
request_list
*
rl
,
struct
request_queue
*
q
,
gfp_t
gfp_mask
)
{
...
...
@@ -569,9 +581,10 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
init_waitqueue_head
(
&
rl
->
wait
[
BLK_RW_SYNC
]);
init_waitqueue_head
(
&
rl
->
wait
[
BLK_RW_ASYNC
]);
rl
->
rq_pool
=
mempool_create_node
(
BLKDEV_MIN_RQ
,
mempool_alloc_slab
,
mempool_free_slab
,
request_cachep
,
gfp_mask
,
q
->
node
);
rl
->
rq_pool
=
mempool_create_node
(
BLKDEV_MIN_RQ
,
alloc_request_struct
,
free_request_struct
,
(
void
*
)(
long
)
q
->
node
,
gfp_mask
,
q
->
node
);
if
(
!
rl
->
rq_pool
)
return
-
ENOMEM
;
...
...
block/blk-mq-sysfs.c
浏览文件 @
851c9f38
...
...
@@ -436,6 +436,7 @@ int blk_mq_register_disk(struct gendisk *disk)
return
0
;
}
EXPORT_SYMBOL_GPL
(
blk_mq_register_disk
);
void
blk_mq_sysfs_unregister
(
struct
request_queue
*
q
)
{
...
...
block/blk-mq.c
浏览文件 @
851c9f38
...
...
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex);
static
LIST_HEAD
(
all_q_list
);
static
void
__blk_mq_run_hw_queue
(
struct
blk_mq_hw_ctx
*
hctx
);
static
void
blk_mq_run_queues
(
struct
request_queue
*
q
);
/*
* Check if any of the ctx's have pending work in this hardware queue
...
...
@@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
clear_bit
(
CTX_TO_BIT
(
hctx
,
ctx
),
&
bm
->
word
);
}
static
int
blk_mq_queue_enter
(
struct
request_queue
*
q
)
static
int
blk_mq_queue_enter
(
struct
request_queue
*
q
,
gfp_t
gfp
)
{
while
(
true
)
{
int
ret
;
...
...
@@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
if
(
percpu_ref_tryget_live
(
&
q
->
mq_usage_counter
))
return
0
;
if
(
!
(
gfp
&
__GFP_WAIT
))
return
-
EBUSY
;
ret
=
wait_event_interruptible
(
q
->
mq_freeze_wq
,
!
q
->
mq_freeze_depth
||
blk_queue_dying
(
q
));
if
(
blk_queue_dying
(
q
))
...
...
@@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
if
(
freeze
)
{
percpu_ref_kill
(
&
q
->
mq_usage_counter
);
blk_mq_run_
queues
(
q
);
blk_mq_run_
hw_queues
(
q
,
false
);
}
}
EXPORT_SYMBOL_GPL
(
blk_mq_freeze_queue_start
);
...
...
@@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct
blk_mq_alloc_data
alloc_data
;
int
ret
;
ret
=
blk_mq_queue_enter
(
q
);
ret
=
blk_mq_queue_enter
(
q
,
gfp
);
if
(
ret
)
return
ERR_PTR
(
ret
);
...
...
@@ -904,7 +906,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
&
hctx
->
run_work
,
0
);
}
static
void
blk_mq_run_queues
(
struct
request_queue
*
q
)
void
blk_mq_run_hw_queues
(
struct
request_queue
*
q
,
bool
async
)
{
struct
blk_mq_hw_ctx
*
hctx
;
int
i
;
...
...
@@ -915,9 +917,10 @@ static void blk_mq_run_queues(struct request_queue *q)
test_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
))
continue
;
blk_mq_run_hw_queue
(
hctx
,
false
);
blk_mq_run_hw_queue
(
hctx
,
async
);
}
}
EXPORT_SYMBOL
(
blk_mq_run_hw_queues
);
void
blk_mq_stop_hw_queue
(
struct
blk_mq_hw_ctx
*
hctx
)
{
...
...
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
int
rw
=
bio_data_dir
(
bio
);
struct
blk_mq_alloc_data
alloc_data
;
if
(
unlikely
(
blk_mq_queue_enter
(
q
)))
{
if
(
unlikely
(
blk_mq_queue_enter
(
q
,
GFP_KERNEL
)))
{
bio_endio
(
bio
,
-
EIO
);
return
NULL
;
}
...
...
@@ -1890,10 +1893,26 @@ void blk_mq_release(struct request_queue *q)
}
struct
request_queue
*
blk_mq_init_queue
(
struct
blk_mq_tag_set
*
set
)
{
struct
request_queue
*
uninit_q
,
*
q
;
uninit_q
=
blk_alloc_queue_node
(
GFP_KERNEL
,
set
->
numa_node
);
if
(
!
uninit_q
)
return
ERR_PTR
(
-
ENOMEM
);
q
=
blk_mq_init_allocated_queue
(
set
,
uninit_q
);
if
(
IS_ERR
(
q
))
blk_cleanup_queue
(
uninit_q
);
return
q
;
}
EXPORT_SYMBOL
(
blk_mq_init_queue
);
struct
request_queue
*
blk_mq_init_allocated_queue
(
struct
blk_mq_tag_set
*
set
,
struct
request_queue
*
q
)
{
struct
blk_mq_hw_ctx
**
hctxs
;
struct
blk_mq_ctx
__percpu
*
ctx
;
struct
request_queue
*
q
;
unsigned
int
*
map
;
int
i
;
...
...
@@ -1928,20 +1947,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
hctxs
[
i
]
->
queue_num
=
i
;
}
q
=
blk_alloc_queue_node
(
GFP_KERNEL
,
set
->
numa_node
);
if
(
!
q
)
goto
err_hctxs
;
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if
(
percpu_ref_init
(
&
q
->
mq_usage_counter
,
blk_mq_usage_counter_release
,
PERCPU_REF_INIT_ATOMIC
,
GFP_KERNEL
))
goto
err_
mq_usage
;
goto
err_
hctxs
;
setup_timer
(
&
q
->
timeout
,
blk_mq_rq_timer
,
(
unsigned
long
)
q
);
blk_queue_rq_timeout
(
q
,
30000
);
blk_queue_rq_timeout
(
q
,
set
->
timeout
?
set
->
timeout
:
30000
);
q
->
nr_queues
=
nr_cpu_ids
;
q
->
nr_hw_queues
=
set
->
nr_hw_queues
;
...
...
@@ -1967,9 +1982,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
else
blk_queue_make_request
(
q
,
blk_sq_make_request
);
if
(
set
->
timeout
)
blk_queue_rq_timeout
(
q
,
set
->
timeout
);
/*
* Do this after blk_queue_make_request() overrides it...
*/
...
...
@@ -1981,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_init_cpu_queues
(
q
,
set
->
nr_hw_queues
);
if
(
blk_mq_init_hw_queues
(
q
,
set
))
goto
err_
mq_usage
;
goto
err_
hctxs
;
mutex_lock
(
&
all_q_mutex
);
list_add_tail
(
&
q
->
all_q_node
,
&
all_q_list
);
...
...
@@ -1993,8 +2005,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return
q
;
err_mq_usage:
blk_cleanup_queue
(
q
);
err_hctxs:
kfree
(
map
);
for
(
i
=
0
;
i
<
set
->
nr_hw_queues
;
i
++
)
{
...
...
@@ -2009,7 +2019,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
free_percpu
(
ctx
);
return
ERR_PTR
(
-
ENOMEM
);
}
EXPORT_SYMBOL
(
blk_mq_init_queue
);
EXPORT_SYMBOL
(
blk_mq_init_
allocated_
queue
);
void
blk_mq_free_queue
(
struct
request_queue
*
q
)
{
...
...
@@ -2161,7 +2171,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if
(
set
->
queue_depth
<
set
->
reserved_tags
+
BLK_MQ_TAG_MIN
)
return
-
EINVAL
;
if
(
!
set
->
nr_hw_queues
||
!
set
->
ops
->
queue_rq
||
!
set
->
ops
->
map_queue
)
if
(
!
set
->
ops
->
queue_rq
||
!
set
->
ops
->
map_queue
)
return
-
EINVAL
;
if
(
set
->
queue_depth
>
BLK_MQ_MAX_DEPTH
)
{
...
...
include/linux/blk-mq.h
浏览文件 @
851c9f38
...
...
@@ -164,6 +164,8 @@ enum {
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct
request_queue
*
blk_mq_init_queue
(
struct
blk_mq_tag_set
*
);
struct
request_queue
*
blk_mq_init_allocated_queue
(
struct
blk_mq_tag_set
*
set
,
struct
request_queue
*
q
);
void
blk_mq_finish_init
(
struct
request_queue
*
q
);
int
blk_mq_register_disk
(
struct
gendisk
*
);
void
blk_mq_unregister_disk
(
struct
gendisk
*
);
...
...
@@ -218,6 +220,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void
blk_mq_stop_hw_queues
(
struct
request_queue
*
q
);
void
blk_mq_start_hw_queues
(
struct
request_queue
*
q
);
void
blk_mq_start_stopped_hw_queues
(
struct
request_queue
*
q
,
bool
async
);
void
blk_mq_run_hw_queues
(
struct
request_queue
*
q
,
bool
async
);
void
blk_mq_delay_queue
(
struct
blk_mq_hw_ctx
*
hctx
,
unsigned
long
msecs
);
void
blk_mq_tag_busy_iter
(
struct
blk_mq_hw_ctx
*
hctx
,
busy_iter_fn
*
fn
,
void
*
priv
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录