Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
cdd972b1
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 2 年 前同步成功
通知
173
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cdd972b1
编写于
9月 10, 2013
作者:
K
Kent Overstreet
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
bcache: Refactor read request code a bit
More refactoring, and renaming. Signed-off-by:
N
Kent Overstreet
<
kmo@daterainc.com
>
上级
84f0db03
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
35 addition
and
36 deletion
+35
-36
drivers/md/bcache/request.c
drivers/md/bcache/request.c
+35
-36
未找到文件。
drivers/md/bcache/request.c
浏览文件 @
cdd972b1
...
@@ -884,7 +884,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct search *s)
...
@@ -884,7 +884,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct search *s)
/* Process reads */
/* Process reads */
static
void
cached_dev_
read_complet
e
(
struct
closure
*
cl
)
static
void
cached_dev_
cache_miss_don
e
(
struct
closure
*
cl
)
{
{
struct
search
*
s
=
container_of
(
cl
,
struct
search
,
cl
);
struct
search
*
s
=
container_of
(
cl
,
struct
search
,
cl
);
...
@@ -902,9 +902,10 @@ static void cached_dev_read_complete(struct closure *cl)
...
@@ -902,9 +902,10 @@ static void cached_dev_read_complete(struct closure *cl)
cached_dev_bio_complete
(
cl
);
cached_dev_bio_complete
(
cl
);
}
}
static
void
request
_read_error
(
struct
closure
*
cl
)
static
void
cached_dev
_read_error
(
struct
closure
*
cl
)
{
{
struct
search
*
s
=
container_of
(
cl
,
struct
search
,
cl
);
struct
search
*
s
=
container_of
(
cl
,
struct
search
,
cl
);
struct
bio
*
bio
=
&
s
->
bio
.
bio
;
struct
bio_vec
*
bv
;
struct
bio_vec
*
bv
;
int
i
;
int
i
;
...
@@ -928,20 +929,20 @@ static void request_read_error(struct closure *cl)
...
@@ -928,20 +929,20 @@ static void request_read_error(struct closure *cl)
/* XXX: invalidate cache */
/* XXX: invalidate cache */
closure_bio_submit
(
&
s
->
bio
.
bio
,
&
s
->
cl
,
s
->
d
);
closure_bio_submit
(
bio
,
cl
,
s
->
d
);
}
}
continue_at
(
cl
,
cached_dev_
read_complet
e
,
NULL
);
continue_at
(
cl
,
cached_dev_
cache_miss_don
e
,
NULL
);
}
}
static
void
request
_read_done
(
struct
closure
*
cl
)
static
void
cached_dev
_read_done
(
struct
closure
*
cl
)
{
{
struct
search
*
s
=
container_of
(
cl
,
struct
search
,
cl
);
struct
search
*
s
=
container_of
(
cl
,
struct
search
,
cl
);
struct
cached_dev
*
dc
=
container_of
(
s
->
d
,
struct
cached_dev
,
disk
);
struct
cached_dev
*
dc
=
container_of
(
s
->
d
,
struct
cached_dev
,
disk
);
/*
/*
*
s->cache_bio != NULL implies that we had a cache miss; cache_bio now
*
We had a cache miss; cache_bio now contains data ready to be inserted
*
contains data ready to be inserted
into the cache.
* into the cache.
*
*
* First, we copy the data we just read from cache_bio's bounce buffers
* First, we copy the data we just read from cache_bio's bounce buffers
* to the buffers the original bio pointed to:
* to the buffers the original bio pointed to:
...
@@ -971,10 +972,10 @@ static void request_read_done(struct closure *cl)
...
@@ -971,10 +972,10 @@ static void request_read_done(struct closure *cl)
closure_call
(
&
s
->
op
.
cl
,
bch_insert_data
,
NULL
,
cl
);
closure_call
(
&
s
->
op
.
cl
,
bch_insert_data
,
NULL
,
cl
);
}
}
continue_at
(
cl
,
cached_dev_
read_complet
e
,
NULL
);
continue_at
(
cl
,
cached_dev_
cache_miss_don
e
,
NULL
);
}
}
static
void
request
_read_done_bh
(
struct
closure
*
cl
)
static
void
cached_dev
_read_done_bh
(
struct
closure
*
cl
)
{
{
struct
search
*
s
=
container_of
(
cl
,
struct
search
,
cl
);
struct
search
*
s
=
container_of
(
cl
,
struct
search
,
cl
);
struct
cached_dev
*
dc
=
container_of
(
s
->
d
,
struct
cached_dev
,
disk
);
struct
cached_dev
*
dc
=
container_of
(
s
->
d
,
struct
cached_dev
,
disk
);
...
@@ -983,11 +984,11 @@ static void request_read_done_bh(struct closure *cl)
...
@@ -983,11 +984,11 @@ static void request_read_done_bh(struct closure *cl)
trace_bcache_read
(
s
->
orig_bio
,
!
s
->
cache_miss
,
s
->
op
.
bypass
);
trace_bcache_read
(
s
->
orig_bio
,
!
s
->
cache_miss
,
s
->
op
.
bypass
);
if
(
s
->
error
)
if
(
s
->
error
)
continue_at_nobarrier
(
cl
,
request
_read_error
,
bcache_wq
);
continue_at_nobarrier
(
cl
,
cached_dev
_read_error
,
bcache_wq
);
else
if
(
s
->
op
.
cache_bio
||
verify
(
dc
,
&
s
->
bio
.
bio
))
else
if
(
s
->
op
.
cache_bio
||
verify
(
dc
,
&
s
->
bio
.
bio
))
continue_at_nobarrier
(
cl
,
request
_read_done
,
bcache_wq
);
continue_at_nobarrier
(
cl
,
cached_dev
_read_done
,
bcache_wq
);
else
else
continue_at_nobarrier
(
cl
,
cached_dev_
read
_complete
,
NULL
);
continue_at_nobarrier
(
cl
,
cached_dev_
bio
_complete
,
NULL
);
}
}
static
int
cached_dev_cache_miss
(
struct
btree
*
b
,
struct
search
*
s
,
static
int
cached_dev_cache_miss
(
struct
btree
*
b
,
struct
search
*
s
,
...
@@ -996,7 +997,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
...
@@ -996,7 +997,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
int
ret
=
0
;
int
ret
=
0
;
unsigned
reada
=
0
;
unsigned
reada
=
0
;
struct
cached_dev
*
dc
=
container_of
(
s
->
d
,
struct
cached_dev
,
disk
);
struct
cached_dev
*
dc
=
container_of
(
s
->
d
,
struct
cached_dev
,
disk
);
struct
bio
*
miss
;
struct
bio
*
miss
,
*
cache_bio
;
if
(
s
->
cache_miss
||
s
->
op
.
bypass
)
{
if
(
s
->
cache_miss
||
s
->
op
.
bypass
)
{
miss
=
bch_bio_split
(
bio
,
sectors
,
GFP_NOIO
,
s
->
d
->
bio_split
);
miss
=
bch_bio_split
(
bio
,
sectors
,
GFP_NOIO
,
s
->
d
->
bio_split
);
...
@@ -1027,33 +1028,31 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
...
@@ -1027,33 +1028,31 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
/* btree_search_recurse()'s btree iterator is no good anymore */
/* btree_search_recurse()'s btree iterator is no good anymore */
ret
=
-
EINTR
;
ret
=
-
EINTR
;
s
->
op
.
cache_bio
=
bio_alloc_bioset
(
GFP_NOWAIT
,
cache_bio
=
bio_alloc_bioset
(
GFP_NOWAIT
,
DIV_ROUND_UP
(
s
->
cache_bio_sectors
,
PAGE_SECTORS
),
DIV_ROUND_UP
(
s
->
cache_bio_sectors
,
PAGE_SECTORS
),
dc
->
disk
.
bio_split
);
dc
->
disk
.
bio_split
);
if
(
!
cache_bio
)
if
(
!
s
->
op
.
cache_bio
)
goto
out_submit
;
goto
out_submit
;
s
->
op
.
cache_bio
->
bi_sector
=
miss
->
bi_sector
;
cache_bio
->
bi_sector
=
miss
->
bi_sector
;
s
->
op
.
cache_bio
->
bi_bdev
=
miss
->
bi_bdev
;
cache_bio
->
bi_bdev
=
miss
->
bi_bdev
;
s
->
op
.
cache_bio
->
bi_size
=
s
->
cache_bio_sectors
<<
9
;
cache_bio
->
bi_size
=
s
->
cache_bio_sectors
<<
9
;
s
->
op
.
cache_bio
->
bi_end_io
=
request_endio
;
cache_bio
->
bi_end_io
=
request_endio
;
s
->
op
.
cache_bio
->
bi_private
=
&
s
->
cl
;
cache_bio
->
bi_private
=
&
s
->
cl
;
bch_bio_map
(
s
->
op
.
cache_bio
,
NULL
);
bch_bio_map
(
cache_bio
,
NULL
);
if
(
bio_alloc_pages
(
s
->
op
.
cache_bio
,
__GFP_NOWARN
|
GFP_NOIO
))
if
(
bio_alloc_pages
(
cache_bio
,
__GFP_NOWARN
|
GFP_NOIO
))
goto
out_put
;
goto
out_put
;
s
->
cache_miss
=
miss
;
s
->
cache_miss
=
miss
;
bio_get
(
s
->
op
.
cache_bio
)
;
s
->
op
.
cache_bio
=
cache_bio
;
bio_get
(
cache_bio
);
closure_bio_submit
(
s
->
op
.
cache_bio
,
&
s
->
cl
,
s
->
d
);
closure_bio_submit
(
cache_bio
,
&
s
->
cl
,
s
->
d
);
return
ret
;
return
ret
;
out_put:
out_put:
bio_put
(
s
->
op
.
cache_bio
);
bio_put
(
cache_bio
);
s
->
op
.
cache_bio
=
NULL
;
out_submit:
out_submit:
miss
->
bi_end_io
=
request_endio
;
miss
->
bi_end_io
=
request_endio
;
miss
->
bi_private
=
&
s
->
cl
;
miss
->
bi_private
=
&
s
->
cl
;
...
@@ -1061,12 +1060,12 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
...
@@ -1061,12 +1060,12 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
return
ret
;
return
ret
;
}
}
static
void
request
_read
(
struct
cached_dev
*
dc
,
struct
search
*
s
)
static
void
cached_dev
_read
(
struct
cached_dev
*
dc
,
struct
search
*
s
)
{
{
struct
closure
*
cl
=
&
s
->
cl
;
struct
closure
*
cl
=
&
s
->
cl
;
closure_call
(
&
s
->
op
.
cl
,
btree_read_async
,
NULL
,
cl
);
closure_call
(
&
s
->
op
.
cl
,
btree_read_async
,
NULL
,
cl
);
continue_at
(
cl
,
request
_read_done_bh
,
NULL
);
continue_at
(
cl
,
cached_dev
_read_done_bh
,
NULL
);
}
}
/* Process writes */
/* Process writes */
...
@@ -1080,7 +1079,7 @@ static void cached_dev_write_complete(struct closure *cl)
...
@@ -1080,7 +1079,7 @@ static void cached_dev_write_complete(struct closure *cl)
cached_dev_bio_complete
(
cl
);
cached_dev_bio_complete
(
cl
);
}
}
static
void
request
_write
(
struct
cached_dev
*
dc
,
struct
search
*
s
)
static
void
cached_dev
_write
(
struct
cached_dev
*
dc
,
struct
search
*
s
)
{
{
struct
closure
*
cl
=
&
s
->
cl
;
struct
closure
*
cl
=
&
s
->
cl
;
struct
bio
*
bio
=
&
s
->
bio
.
bio
;
struct
bio
*
bio
=
&
s
->
bio
.
bio
;
...
@@ -1152,7 +1151,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
...
@@ -1152,7 +1151,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
continue_at
(
cl
,
cached_dev_write_complete
,
NULL
);
continue_at
(
cl
,
cached_dev_write_complete
,
NULL
);
}
}
static
void
request
_nodata
(
struct
cached_dev
*
dc
,
struct
search
*
s
)
static
void
cached_dev
_nodata
(
struct
cached_dev
*
dc
,
struct
search
*
s
)
{
{
struct
closure
*
cl
=
&
s
->
cl
;
struct
closure
*
cl
=
&
s
->
cl
;
struct
bio
*
bio
=
&
s
->
bio
.
bio
;
struct
bio
*
bio
=
&
s
->
bio
.
bio
;
...
@@ -1188,14 +1187,14 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
...
@@ -1188,14 +1187,14 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
trace_bcache_request_start
(
s
,
bio
);
trace_bcache_request_start
(
s
,
bio
);
if
(
!
bio
->
bi_size
)
if
(
!
bio
->
bi_size
)
request
_nodata
(
dc
,
s
);
cached_dev
_nodata
(
dc
,
s
);
else
{
else
{
s
->
op
.
bypass
=
check_should_bypass
(
dc
,
s
);
s
->
op
.
bypass
=
check_should_bypass
(
dc
,
s
);
if
(
rw
)
if
(
rw
)
request
_write
(
dc
,
s
);
cached_dev
_write
(
dc
,
s
);
else
else
request
_read
(
dc
,
s
);
cached_dev
_read
(
dc
,
s
);
}
}
}
else
{
}
else
{
if
((
bio
->
bi_rw
&
REQ_DISCARD
)
&&
if
((
bio
->
bi_rw
&
REQ_DISCARD
)
&&
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录