Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
cb7a583e
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
cb7a583e
编写于
12月 16, 2013
作者:
K
Kent Overstreet
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
bcache: kill closure locking usage
Signed-off-by:
N
Kent Overstreet
<
kmo@daterainc.com
>
上级
a5ae4300
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
98 addition
and
55 deletion
+98
-55
drivers/md/bcache/bcache.h
drivers/md/bcache/bcache.h
+6
-3
drivers/md/bcache/btree.c
drivers/md/bcache/btree.c
+34
-18
drivers/md/bcache/btree.h
drivers/md/bcache/btree.h
+2
-1
drivers/md/bcache/debug.c
drivers/md/bcache/debug.c
+2
-5
drivers/md/bcache/journal.c
drivers/md/bcache/journal.c
+14
-13
drivers/md/bcache/journal.h
drivers/md/bcache/journal.h
+1
-0
drivers/md/bcache/super.c
drivers/md/bcache/super.c
+39
-15
未找到文件。
drivers/md/bcache/bcache.h
浏览文件 @
cb7a583e
...
...
@@ -309,7 +309,8 @@ struct cached_dev {
struct
cache_sb
sb
;
struct
bio
sb_bio
;
struct
bio_vec
sb_bv
[
1
];
struct
closure_with_waitlist
sb_write
;
struct
closure
sb_write
;
struct
semaphore
sb_write_mutex
;
/* Refcount on the cache set. Always nonzero when we're caching. */
atomic_t
count
;
...
...
@@ -514,7 +515,8 @@ struct cache_set {
uint64_t
cached_dev_sectors
;
struct
closure
caching
;
struct
closure_with_waitlist
sb_write
;
struct
closure
sb_write
;
struct
semaphore
sb_write_mutex
;
mempool_t
*
search
;
mempool_t
*
bio_meta
;
...
...
@@ -635,7 +637,8 @@ struct cache_set {
unsigned
nr_uuids
;
struct
uuid_entry
*
uuids
;
BKEY_PADDED
(
uuid_bucket
);
struct
closure_with_waitlist
uuid_write
;
struct
closure
uuid_write
;
struct
semaphore
uuid_write_mutex
;
/*
* A btree node on disk could have too many bsets for an iterator to fit
...
...
drivers/md/bcache/btree.c
浏览文件 @
cb7a583e
...
...
@@ -340,9 +340,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
w
->
journal
=
NULL
;
}
static
void
btree_node_write_unlock
(
struct
closure
*
cl
)
{
struct
btree
*
b
=
container_of
(
cl
,
struct
btree
,
io
);
up
(
&
b
->
io_mutex
);
}
static
void
__btree_node_write_done
(
struct
closure
*
cl
)
{
struct
btree
*
b
=
container_of
(
cl
,
struct
btree
,
io
.
cl
);
struct
btree
*
b
=
container_of
(
cl
,
struct
btree
,
io
);
struct
btree_write
*
w
=
btree_prev_write
(
b
);
bch_bbio_free
(
b
->
bio
,
b
->
c
);
...
...
@@ -353,12 +360,12 @@ static void __btree_node_write_done(struct closure *cl)
queue_delayed_work
(
btree_io_wq
,
&
b
->
work
,
msecs_to_jiffies
(
30000
));
closure_return
(
cl
);
closure_return
_with_destructor
(
cl
,
btree_node_write_unlock
);
}
static
void
btree_node_write_done
(
struct
closure
*
cl
)
{
struct
btree
*
b
=
container_of
(
cl
,
struct
btree
,
io
.
cl
);
struct
btree
*
b
=
container_of
(
cl
,
struct
btree
,
io
);
struct
bio_vec
*
bv
;
int
n
;
...
...
@@ -371,7 +378,7 @@ static void btree_node_write_done(struct closure *cl)
static
void
btree_node_write_endio
(
struct
bio
*
bio
,
int
error
)
{
struct
closure
*
cl
=
bio
->
bi_private
;
struct
btree
*
b
=
container_of
(
cl
,
struct
btree
,
io
.
cl
);
struct
btree
*
b
=
container_of
(
cl
,
struct
btree
,
io
);
if
(
error
)
set_btree_node_io_error
(
b
);
...
...
@@ -382,7 +389,7 @@ static void btree_node_write_endio(struct bio *bio, int error)
static
void
do_btree_node_write
(
struct
btree
*
b
)
{
struct
closure
*
cl
=
&
b
->
io
.
cl
;
struct
closure
*
cl
=
&
b
->
io
;
struct
bset
*
i
=
b
->
sets
[
b
->
nsets
].
data
;
BKEY_PADDED
(
key
)
k
;
...
...
@@ -435,7 +442,7 @@ static void do_btree_node_write(struct btree *b)
bch_submit_bbio
(
b
->
bio
,
b
->
c
,
&
k
.
key
,
0
);
closure_sync
(
cl
);
__btree_node_write_done
(
cl
);
continue_at_nobarrier
(
cl
,
__btree_node_write_done
,
NULL
);
}
}
...
...
@@ -454,7 +461,8 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
cancel_delayed_work
(
&
b
->
work
);
/* If caller isn't waiting for write, parent refcount is cache set */
closure_lock
(
&
b
->
io
,
parent
?:
&
b
->
c
->
cl
);
down
(
&
b
->
io_mutex
);
closure_init
(
&
b
->
io
,
parent
?:
&
b
->
c
->
cl
);
clear_bit
(
BTREE_NODE_dirty
,
&
b
->
flags
);
change_bit
(
BTREE_NODE_write_idx
,
&
b
->
flags
);
...
...
@@ -554,7 +562,8 @@ static void mca_reinit(struct btree *b)
static
void
mca_data_free
(
struct
btree
*
b
)
{
struct
bset_tree
*
t
=
b
->
sets
;
BUG_ON
(
!
closure_is_unlocked
(
&
b
->
io
.
cl
));
BUG_ON
(
b
->
io_mutex
.
count
!=
1
);
if
(
bset_prev_bytes
(
b
)
<
PAGE_SIZE
)
kfree
(
t
->
prev
);
...
...
@@ -635,7 +644,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
INIT_LIST_HEAD
(
&
b
->
list
);
INIT_DELAYED_WORK
(
&
b
->
work
,
btree_node_write_work
);
b
->
c
=
c
;
closure_init_unlocked
(
&
b
->
io
);
sema_init
(
&
b
->
io_mutex
,
1
);
mca_data_alloc
(
b
,
k
,
gfp
);
return
b
;
...
...
@@ -653,22 +662,29 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
BUG_ON
(
btree_node_dirty
(
b
)
&&
!
b
->
sets
[
0
].
data
);
if
(
b
->
page_order
<
min_order
||
(
!
flush
&&
(
btree_node_dirty
(
b
)
||
atomic_read
(
&
b
->
io
.
cl
.
remaining
)
!=
-
1
)))
{
rw_unlock
(
true
,
b
);
return
-
ENOMEM
;
if
(
b
->
page_order
<
min_order
)
goto
out_unlock
;
if
(
!
flush
)
{
if
(
btree_node_dirty
(
b
))
goto
out_unlock
;
if
(
down_trylock
(
&
b
->
io_mutex
))
goto
out_unlock
;
up
(
&
b
->
io_mutex
);
}
if
(
btree_node_dirty
(
b
))
bch_btree_node_write_sync
(
b
);
/* wait for any in flight btree write */
closure_wait_event
(
&
b
->
io
.
wait
,
&
cl
,
atomic_read
(
&
b
->
io
.
cl
.
remaining
)
==
-
1
);
down
(
&
b
->
io_mutex
);
up
(
&
b
->
io_mutex
);
return
0
;
out_unlock:
rw_unlock
(
true
,
b
);
return
-
ENOMEM
;
}
static
unsigned
long
bch_mca_scan
(
struct
shrinker
*
shrink
,
...
...
@@ -918,7 +934,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
if
(
!
b
->
sets
->
data
)
goto
err
;
out:
BUG_ON
(
!
closure_is_unlocked
(
&
b
->
io
.
cl
)
);
BUG_ON
(
b
->
io_mutex
.
count
!=
1
);
bkey_copy
(
&
b
->
key
,
k
);
list_move
(
&
b
->
list
,
&
c
->
btree_cache
);
...
...
drivers/md/bcache/btree.h
浏览文件 @
cb7a583e
...
...
@@ -143,7 +143,8 @@ struct btree {
struct
bset_tree
sets
[
MAX_BSETS
];
/* For outstanding btree writes, used as a lock - protects write_idx */
struct
closure_with_waitlist
io
;
struct
closure
io
;
struct
semaphore
io_mutex
;
struct
list_head
list
;
struct
delayed_work
work
;
...
...
drivers/md/bcache/debug.c
浏览文件 @
cb7a583e
...
...
@@ -127,9 +127,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
if
(
!
b
->
c
->
verify
)
return
;
closure_wait_event
(
&
b
->
io
.
wait
,
&
cl
,
atomic_read
(
&
b
->
io
.
cl
.
remaining
)
==
-
1
);
down
(
&
b
->
io_mutex
);
mutex_lock
(
&
b
->
c
->
verify_lock
);
bkey_copy
(
&
v
->
key
,
&
b
->
key
);
...
...
@@ -137,8 +135,6 @@ void bch_btree_verify(struct btree *b, struct bset *new)
v
->
level
=
b
->
level
;
bch_btree_node_read
(
v
);
closure_wait_event
(
&
v
->
io
.
wait
,
&
cl
,
atomic_read
(
&
b
->
io
.
cl
.
remaining
)
==
-
1
);
if
(
new
->
keys
!=
v
->
sets
[
0
].
data
->
keys
||
memcmp
(
new
->
start
,
...
...
@@ -167,6 +163,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
}
mutex_unlock
(
&
b
->
c
->
verify_lock
);
up
(
&
b
->
io_mutex
);
}
void
bch_data_verify
(
struct
cached_dev
*
dc
,
struct
bio
*
bio
)
...
...
drivers/md/bcache/journal.c
浏览文件 @
cb7a583e
...
...
@@ -564,6 +564,14 @@ static void journal_write_done(struct closure *cl)
continue_at_nobarrier
(
cl
,
journal_write
,
system_wq
);
}
static
void
journal_write_unlock
(
struct
closure
*
cl
)
{
struct
cache_set
*
c
=
container_of
(
cl
,
struct
cache_set
,
journal
.
io
);
c
->
journal
.
io_in_flight
=
0
;
spin_unlock
(
&
c
->
journal
.
lock
);
}
static
void
journal_write_unlocked
(
struct
closure
*
cl
)
__releases
(
c
->
journal
.
lock
)
{
...
...
@@ -578,15 +586,7 @@ static void journal_write_unlocked(struct closure *cl)
bio_list_init
(
&
list
);
if
(
!
w
->
need_write
)
{
/*
* XXX: have to unlock closure before we unlock journal lock,
* else we race with bch_journal(). But this way we race
* against cache set unregister. Doh.
*/
set_closure_fn
(
cl
,
NULL
,
NULL
);
closure_sub
(
cl
,
CLOSURE_RUNNING
+
1
);
spin_unlock
(
&
c
->
journal
.
lock
);
return
;
closure_return_with_destructor
(
cl
,
journal_write_unlock
);
}
else
if
(
journal_full
(
&
c
->
journal
))
{
journal_reclaim
(
c
);
spin_unlock
(
&
c
->
journal
.
lock
);
...
...
@@ -662,10 +662,12 @@ static void journal_try_write(struct cache_set *c)
w
->
need_write
=
true
;
if
(
closure_trylock
(
cl
,
&
c
->
cl
))
journal_write_unlocked
(
cl
);
else
if
(
!
c
->
journal
.
io_in_flight
)
{
c
->
journal
.
io_in_flight
=
1
;
closure_call
(
cl
,
journal_write_unlocked
,
NULL
,
&
c
->
cl
);
}
else
{
spin_unlock
(
&
c
->
journal
.
lock
);
}
}
static
struct
journal_write
*
journal_wait_for_write
(
struct
cache_set
*
c
,
...
...
@@ -793,7 +795,6 @@ int bch_journal_alloc(struct cache_set *c)
{
struct
journal
*
j
=
&
c
->
journal
;
closure_init_unlocked
(
&
j
->
io
);
spin_lock_init
(
&
j
->
lock
);
INIT_DELAYED_WORK
(
&
j
->
work
,
journal_write_work
);
...
...
drivers/md/bcache/journal.h
浏览文件 @
cb7a583e
...
...
@@ -104,6 +104,7 @@ struct journal {
/* used when waiting because the journal was full */
struct
closure_waitlist
wait
;
struct
closure
io
;
int
io_in_flight
;
struct
delayed_work
work
;
/* Number of blocks free in the bucket(s) we're currently writing to */
...
...
drivers/md/bcache/super.c
浏览文件 @
cb7a583e
...
...
@@ -225,7 +225,7 @@ static void write_bdev_super_endio(struct bio *bio, int error)
struct
cached_dev
*
dc
=
bio
->
bi_private
;
/* XXX: error checking */
closure_put
(
&
dc
->
sb_write
.
cl
);
closure_put
(
&
dc
->
sb_write
);
}
static
void
__write_super
(
struct
cache_sb
*
sb
,
struct
bio
*
bio
)
...
...
@@ -263,12 +263,20 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
submit_bio
(
REQ_WRITE
,
bio
);
}
static
void
bch_write_bdev_super_unlock
(
struct
closure
*
cl
)
{
struct
cached_dev
*
dc
=
container_of
(
cl
,
struct
cached_dev
,
sb_write
);
up
(
&
dc
->
sb_write_mutex
);
}
void
bch_write_bdev_super
(
struct
cached_dev
*
dc
,
struct
closure
*
parent
)
{
struct
closure
*
cl
=
&
dc
->
sb_write
.
cl
;
struct
closure
*
cl
=
&
dc
->
sb_write
;
struct
bio
*
bio
=
&
dc
->
sb_bio
;
closure_lock
(
&
dc
->
sb_write
,
parent
);
down
(
&
dc
->
sb_write_mutex
);
closure_init
(
cl
,
parent
);
bio_reset
(
bio
);
bio
->
bi_bdev
=
dc
->
bdev
;
...
...
@@ -278,7 +286,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
closure_get
(
cl
);
__write_super
(
&
dc
->
sb
,
bio
);
closure_return
(
cl
);
closure_return
_with_destructor
(
cl
,
bch_write_bdev_super_unlock
);
}
static
void
write_super_endio
(
struct
bio
*
bio
,
int
error
)
...
...
@@ -286,16 +294,24 @@ static void write_super_endio(struct bio *bio, int error)
struct
cache
*
ca
=
bio
->
bi_private
;
bch_count_io_errors
(
ca
,
error
,
"writing superblock"
);
closure_put
(
&
ca
->
set
->
sb_write
.
cl
);
closure_put
(
&
ca
->
set
->
sb_write
);
}
static
void
bcache_write_super_unlock
(
struct
closure
*
cl
)
{
struct
cache_set
*
c
=
container_of
(
cl
,
struct
cache_set
,
sb_write
);
up
(
&
c
->
sb_write_mutex
);
}
void
bcache_write_super
(
struct
cache_set
*
c
)
{
struct
closure
*
cl
=
&
c
->
sb_write
.
cl
;
struct
closure
*
cl
=
&
c
->
sb_write
;
struct
cache
*
ca
;
unsigned
i
;
closure_lock
(
&
c
->
sb_write
,
&
c
->
cl
);
down
(
&
c
->
sb_write_mutex
);
closure_init
(
cl
,
&
c
->
cl
);
c
->
sb
.
seq
++
;
...
...
@@ -317,7 +333,7 @@ void bcache_write_super(struct cache_set *c)
__write_super
(
&
ca
->
sb
,
bio
);
}
closure_return
(
cl
);
closure_return
_with_destructor
(
cl
,
bcache_write_super_unlock
);
}
/* UUID io */
...
...
@@ -325,23 +341,31 @@ void bcache_write_super(struct cache_set *c)
static
void
uuid_endio
(
struct
bio
*
bio
,
int
error
)
{
struct
closure
*
cl
=
bio
->
bi_private
;
struct
cache_set
*
c
=
container_of
(
cl
,
struct
cache_set
,
uuid_write
.
cl
);
struct
cache_set
*
c
=
container_of
(
cl
,
struct
cache_set
,
uuid_write
);
cache_set_err_on
(
error
,
c
,
"accessing uuids"
);
bch_bbio_free
(
bio
,
c
);
closure_put
(
cl
);
}
static
void
uuid_io_unlock
(
struct
closure
*
cl
)
{
struct
cache_set
*
c
=
container_of
(
cl
,
struct
cache_set
,
uuid_write
);
up
(
&
c
->
uuid_write_mutex
);
}
static
void
uuid_io
(
struct
cache_set
*
c
,
unsigned
long
rw
,
struct
bkey
*
k
,
struct
closure
*
parent
)
{
struct
closure
*
cl
=
&
c
->
uuid_write
.
cl
;
struct
closure
*
cl
=
&
c
->
uuid_write
;
struct
uuid_entry
*
u
;
unsigned
i
;
char
buf
[
80
];
BUG_ON
(
!
parent
);
closure_lock
(
&
c
->
uuid_write
,
parent
);
down
(
&
c
->
uuid_write_mutex
);
closure_init
(
cl
,
parent
);
for
(
i
=
0
;
i
<
KEY_PTRS
(
k
);
i
++
)
{
struct
bio
*
bio
=
bch_bbio_alloc
(
c
);
...
...
@@ -368,7 +392,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
u
-
c
->
uuids
,
u
->
uuid
,
u
->
label
,
u
->
first_reg
,
u
->
last_reg
,
u
->
invalidated
);
closure_return
(
cl
);
closure_return
_with_destructor
(
cl
,
uuid_io_unlock
);
}
static
char
*
uuid_read
(
struct
cache_set
*
c
,
struct
jset
*
j
,
struct
closure
*
cl
)
...
...
@@ -1098,7 +1122,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_closure_fn
(
&
dc
->
disk
.
cl
,
cached_dev_flush
,
system_wq
);
kobject_init
(
&
dc
->
disk
.
kobj
,
&
bch_cached_dev_ktype
);
INIT_WORK
(
&
dc
->
detach
,
cached_dev_detach_finish
);
closure_init_unlocked
(
&
dc
->
sb_write
);
sema_init
(
&
dc
->
sb_write_mutex
,
1
);
INIT_LIST_HEAD
(
&
dc
->
io_lru
);
spin_lock_init
(
&
dc
->
io_lock
);
bch_cache_accounting_init
(
&
dc
->
accounting
,
&
dc
->
disk
.
cl
);
...
...
@@ -1454,11 +1478,11 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c
->
sort_crit_factor
=
int_sqrt
(
c
->
btree_pages
);
closure_init_unlocked
(
&
c
->
sb_write
);
sema_init
(
&
c
->
sb_write_mutex
,
1
);
mutex_init
(
&
c
->
bucket_lock
);
init_waitqueue_head
(
&
c
->
try_wait
);
init_waitqueue_head
(
&
c
->
bucket_wait
);
closure_init_unlocked
(
&
c
->
uuid_write
);
sema_init
(
&
c
->
uuid_write_mutex
,
1
);
mutex_init
(
&
c
->
sort_lock
);
spin_lock_init
(
&
c
->
sort_time
.
lock
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录