Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
2599b53b
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
2599b53b
编写于
7月 24, 2013
作者:
K
Kent Overstreet
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
bcache: Move sector allocator to alloc.c
Just reorganizing things a bit. Signed-off-by:
N
Kent Overstreet
<
kmo@daterainc.com
>
上级
220bb38c
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
189 addition
and
186 deletion
+189
-186
drivers/md/bcache/alloc.c
drivers/md/bcache/alloc.c
+180
-0
drivers/md/bcache/bcache.h
drivers/md/bcache/bcache.h
+4
-0
drivers/md/bcache/request.c
drivers/md/bcache/request.c
+4
-182
drivers/md/bcache/request.h
drivers/md/bcache/request.h
+1
-4
未找到文件。
drivers/md/bcache/alloc.c
浏览文件 @
2599b53b
...
@@ -487,8 +487,188 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
...
@@ -487,8 +487,188 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
return
ret
;
return
ret
;
}
}
/* Sector allocator */
struct
open_bucket
{
struct
list_head
list
;
unsigned
last_write_point
;
unsigned
sectors_free
;
BKEY_PADDED
(
key
);
};
/*
* We keep multiple buckets open for writes, and try to segregate different
* write streams for better cache utilization: first we look for a bucket where
* the last write to it was sequential with the current write, and failing that
* we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
* For example, say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
*
* Both of those tasks will be doing fairly random IO so we can't rely on
* detecting sequential IO to segregate their data, but going off of the task
* should be a sane heuristic.
*/
static
struct
open_bucket
*
pick_data_bucket
(
struct
cache_set
*
c
,
const
struct
bkey
*
search
,
unsigned
write_point
,
struct
bkey
*
alloc
)
{
struct
open_bucket
*
ret
,
*
ret_task
=
NULL
;
list_for_each_entry_reverse
(
ret
,
&
c
->
data_buckets
,
list
)
if
(
!
bkey_cmp
(
&
ret
->
key
,
search
))
goto
found
;
else
if
(
ret
->
last_write_point
==
write_point
)
ret_task
=
ret
;
ret
=
ret_task
?:
list_first_entry
(
&
c
->
data_buckets
,
struct
open_bucket
,
list
);
found:
if
(
!
ret
->
sectors_free
&&
KEY_PTRS
(
alloc
))
{
ret
->
sectors_free
=
c
->
sb
.
bucket_size
;
bkey_copy
(
&
ret
->
key
,
alloc
);
bkey_init
(
alloc
);
}
if
(
!
ret
->
sectors_free
)
ret
=
NULL
;
return
ret
;
}
/*
* Allocates some space in the cache to write to, and k to point to the newly
* allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
* end of the newly allocated space).
*
* May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
* sectors were actually allocated.
*
* If s->writeback is true, will not fail.
*/
bool
bch_alloc_sectors
(
struct
cache_set
*
c
,
struct
bkey
*
k
,
unsigned
sectors
,
unsigned
write_point
,
unsigned
write_prio
,
bool
wait
)
{
struct
open_bucket
*
b
;
BKEY_PADDED
(
key
)
alloc
;
unsigned
i
;
/*
* We might have to allocate a new bucket, which we can't do with a
* spinlock held. So if we have to allocate, we drop the lock, allocate
* and then retry. KEY_PTRS() indicates whether alloc points to
* allocated bucket(s).
*/
bkey_init
(
&
alloc
.
key
);
spin_lock
(
&
c
->
data_bucket_lock
);
while
(
!
(
b
=
pick_data_bucket
(
c
,
k
,
write_point
,
&
alloc
.
key
)))
{
unsigned
watermark
=
write_prio
?
WATERMARK_MOVINGGC
:
WATERMARK_NONE
;
spin_unlock
(
&
c
->
data_bucket_lock
);
if
(
bch_bucket_alloc_set
(
c
,
watermark
,
&
alloc
.
key
,
1
,
wait
))
return
false
;
spin_lock
(
&
c
->
data_bucket_lock
);
}
/*
* If we had to allocate, we might race and not need to allocate the
* second time we call find_data_bucket(). If we allocated a bucket but
* didn't use it, drop the refcount bch_bucket_alloc_set() took:
*/
if
(
KEY_PTRS
(
&
alloc
.
key
))
__bkey_put
(
c
,
&
alloc
.
key
);
for
(
i
=
0
;
i
<
KEY_PTRS
(
&
b
->
key
);
i
++
)
EBUG_ON
(
ptr_stale
(
c
,
&
b
->
key
,
i
));
/* Set up the pointer to the space we're allocating: */
for
(
i
=
0
;
i
<
KEY_PTRS
(
&
b
->
key
);
i
++
)
k
->
ptr
[
i
]
=
b
->
key
.
ptr
[
i
];
sectors
=
min
(
sectors
,
b
->
sectors_free
);
SET_KEY_OFFSET
(
k
,
KEY_OFFSET
(
k
)
+
sectors
);
SET_KEY_SIZE
(
k
,
sectors
);
SET_KEY_PTRS
(
k
,
KEY_PTRS
(
&
b
->
key
));
/*
* Move b to the end of the lru, and keep track of what this bucket was
* last used for:
*/
list_move_tail
(
&
b
->
list
,
&
c
->
data_buckets
);
bkey_copy_key
(
&
b
->
key
,
k
);
b
->
last_write_point
=
write_point
;
b
->
sectors_free
-=
sectors
;
for
(
i
=
0
;
i
<
KEY_PTRS
(
&
b
->
key
);
i
++
)
{
SET_PTR_OFFSET
(
&
b
->
key
,
i
,
PTR_OFFSET
(
&
b
->
key
,
i
)
+
sectors
);
atomic_long_add
(
sectors
,
&
PTR_CACHE
(
c
,
&
b
->
key
,
i
)
->
sectors_written
);
}
if
(
b
->
sectors_free
<
c
->
sb
.
block_size
)
b
->
sectors_free
=
0
;
/*
* k takes refcounts on the buckets it points to until it's inserted
* into the btree, but if we're done with this bucket we just transfer
* get_data_bucket()'s refcount.
*/
if
(
b
->
sectors_free
)
for
(
i
=
0
;
i
<
KEY_PTRS
(
&
b
->
key
);
i
++
)
atomic_inc
(
&
PTR_BUCKET
(
c
,
&
b
->
key
,
i
)
->
pin
);
spin_unlock
(
&
c
->
data_bucket_lock
);
return
true
;
}
/* Init */
/* Init */
void
bch_open_buckets_free
(
struct
cache_set
*
c
)
{
struct
open_bucket
*
b
;
while
(
!
list_empty
(
&
c
->
data_buckets
))
{
b
=
list_first_entry
(
&
c
->
data_buckets
,
struct
open_bucket
,
list
);
list_del
(
&
b
->
list
);
kfree
(
b
);
}
}
int
bch_open_buckets_alloc
(
struct
cache_set
*
c
)
{
int
i
;
spin_lock_init
(
&
c
->
data_bucket_lock
);
for
(
i
=
0
;
i
<
6
;
i
++
)
{
struct
open_bucket
*
b
=
kzalloc
(
sizeof
(
*
b
),
GFP_KERNEL
);
if
(
!
b
)
return
-
ENOMEM
;
list_add
(
&
b
->
list
,
&
c
->
data_buckets
);
}
return
0
;
}
int
bch_cache_allocator_start
(
struct
cache
*
ca
)
int
bch_cache_allocator_start
(
struct
cache
*
ca
)
{
{
struct
task_struct
*
k
=
kthread_run
(
bch_allocator_thread
,
struct
task_struct
*
k
=
kthread_run
(
bch_allocator_thread
,
...
...
drivers/md/bcache/bcache.h
浏览文件 @
2599b53b
...
@@ -1170,6 +1170,8 @@ int __bch_bucket_alloc_set(struct cache_set *, unsigned,
...
@@ -1170,6 +1170,8 @@ int __bch_bucket_alloc_set(struct cache_set *, unsigned,
struct
bkey
*
,
int
,
bool
);
struct
bkey
*
,
int
,
bool
);
int
bch_bucket_alloc_set
(
struct
cache_set
*
,
unsigned
,
int
bch_bucket_alloc_set
(
struct
cache_set
*
,
unsigned
,
struct
bkey
*
,
int
,
bool
);
struct
bkey
*
,
int
,
bool
);
bool
bch_alloc_sectors
(
struct
cache_set
*
,
struct
bkey
*
,
unsigned
,
unsigned
,
unsigned
,
bool
);
__printf
(
2
,
3
)
__printf
(
2
,
3
)
bool
bch_cache_set_error
(
struct
cache_set
*
,
const
char
*
,
...);
bool
bch_cache_set_error
(
struct
cache_set
*
,
const
char
*
,
...);
...
@@ -1210,6 +1212,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *);
...
@@ -1210,6 +1212,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void
bch_btree_cache_free
(
struct
cache_set
*
);
void
bch_btree_cache_free
(
struct
cache_set
*
);
int
bch_btree_cache_alloc
(
struct
cache_set
*
);
int
bch_btree_cache_alloc
(
struct
cache_set
*
);
void
bch_moving_init_cache_set
(
struct
cache_set
*
);
void
bch_moving_init_cache_set
(
struct
cache_set
*
);
int
bch_open_buckets_alloc
(
struct
cache_set
*
);
void
bch_open_buckets_free
(
struct
cache_set
*
);
int
bch_cache_allocator_start
(
struct
cache
*
ca
);
int
bch_cache_allocator_start
(
struct
cache
*
ca
);
int
bch_cache_allocator_init
(
struct
cache
*
ca
);
int
bch_cache_allocator_init
(
struct
cache
*
ca
);
...
...
drivers/md/bcache/request.c
浏览文件 @
2599b53b
...
@@ -255,186 +255,6 @@ static void bch_data_insert_keys(struct closure *cl)
...
@@ -255,186 +255,6 @@ static void bch_data_insert_keys(struct closure *cl)
closure_return
(
cl
);
closure_return
(
cl
);
}
}
struct
open_bucket
{
struct
list_head
list
;
struct
task_struct
*
last
;
unsigned
sectors_free
;
BKEY_PADDED
(
key
);
};
void
bch_open_buckets_free
(
struct
cache_set
*
c
)
{
struct
open_bucket
*
b
;
while
(
!
list_empty
(
&
c
->
data_buckets
))
{
b
=
list_first_entry
(
&
c
->
data_buckets
,
struct
open_bucket
,
list
);
list_del
(
&
b
->
list
);
kfree
(
b
);
}
}
int
bch_open_buckets_alloc
(
struct
cache_set
*
c
)
{
int
i
;
spin_lock_init
(
&
c
->
data_bucket_lock
);
for
(
i
=
0
;
i
<
6
;
i
++
)
{
struct
open_bucket
*
b
=
kzalloc
(
sizeof
(
*
b
),
GFP_KERNEL
);
if
(
!
b
)
return
-
ENOMEM
;
list_add
(
&
b
->
list
,
&
c
->
data_buckets
);
}
return
0
;
}
/*
* We keep multiple buckets open for writes, and try to segregate different
* write streams for better cache utilization: first we look for a bucket where
* the last write to it was sequential with the current write, and failing that
* we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
* For example, say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
*
* Both of those tasks will be doing fairly random IO so we can't rely on
* detecting sequential IO to segregate their data, but going off of the task
* should be a sane heuristic.
*/
static
struct
open_bucket
*
pick_data_bucket
(
struct
cache_set
*
c
,
const
struct
bkey
*
search
,
struct
task_struct
*
task
,
struct
bkey
*
alloc
)
{
struct
open_bucket
*
ret
,
*
ret_task
=
NULL
;
list_for_each_entry_reverse
(
ret
,
&
c
->
data_buckets
,
list
)
if
(
!
bkey_cmp
(
&
ret
->
key
,
search
))
goto
found
;
else
if
(
ret
->
last
==
task
)
ret_task
=
ret
;
ret
=
ret_task
?:
list_first_entry
(
&
c
->
data_buckets
,
struct
open_bucket
,
list
);
found:
if
(
!
ret
->
sectors_free
&&
KEY_PTRS
(
alloc
))
{
ret
->
sectors_free
=
c
->
sb
.
bucket_size
;
bkey_copy
(
&
ret
->
key
,
alloc
);
bkey_init
(
alloc
);
}
if
(
!
ret
->
sectors_free
)
ret
=
NULL
;
return
ret
;
}
/*
* Allocates some space in the cache to write to, and k to point to the newly
* allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
* end of the newly allocated space).
*
* May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
* sectors were actually allocated.
*
* If s->writeback is true, will not fail.
*/
static
bool
bch_alloc_sectors
(
struct
data_insert_op
*
op
,
struct
bkey
*
k
,
unsigned
sectors
)
{
struct
cache_set
*
c
=
op
->
c
;
struct
open_bucket
*
b
;
BKEY_PADDED
(
key
)
alloc
;
unsigned
i
;
/*
* We might have to allocate a new bucket, which we can't do with a
* spinlock held. So if we have to allocate, we drop the lock, allocate
* and then retry. KEY_PTRS() indicates whether alloc points to
* allocated bucket(s).
*/
bkey_init
(
&
alloc
.
key
);
spin_lock
(
&
c
->
data_bucket_lock
);
while
(
!
(
b
=
pick_data_bucket
(
c
,
k
,
op
->
task
,
&
alloc
.
key
)))
{
unsigned
watermark
=
op
->
write_prio
?
WATERMARK_MOVINGGC
:
WATERMARK_NONE
;
spin_unlock
(
&
c
->
data_bucket_lock
);
if
(
bch_bucket_alloc_set
(
c
,
watermark
,
&
alloc
.
key
,
1
,
op
->
writeback
))
return
false
;
spin_lock
(
&
c
->
data_bucket_lock
);
}
/*
* If we had to allocate, we might race and not need to allocate the
* second time we call find_data_bucket(). If we allocated a bucket but
* didn't use it, drop the refcount bch_bucket_alloc_set() took:
*/
if
(
KEY_PTRS
(
&
alloc
.
key
))
__bkey_put
(
c
,
&
alloc
.
key
);
for
(
i
=
0
;
i
<
KEY_PTRS
(
&
b
->
key
);
i
++
)
EBUG_ON
(
ptr_stale
(
c
,
&
b
->
key
,
i
));
/* Set up the pointer to the space we're allocating: */
for
(
i
=
0
;
i
<
KEY_PTRS
(
&
b
->
key
);
i
++
)
k
->
ptr
[
i
]
=
b
->
key
.
ptr
[
i
];
sectors
=
min
(
sectors
,
b
->
sectors_free
);
SET_KEY_OFFSET
(
k
,
KEY_OFFSET
(
k
)
+
sectors
);
SET_KEY_SIZE
(
k
,
sectors
);
SET_KEY_PTRS
(
k
,
KEY_PTRS
(
&
b
->
key
));
/*
* Move b to the end of the lru, and keep track of what this bucket was
* last used for:
*/
list_move_tail
(
&
b
->
list
,
&
c
->
data_buckets
);
bkey_copy_key
(
&
b
->
key
,
k
);
b
->
last
=
op
->
task
;
b
->
sectors_free
-=
sectors
;
for
(
i
=
0
;
i
<
KEY_PTRS
(
&
b
->
key
);
i
++
)
{
SET_PTR_OFFSET
(
&
b
->
key
,
i
,
PTR_OFFSET
(
&
b
->
key
,
i
)
+
sectors
);
atomic_long_add
(
sectors
,
&
PTR_CACHE
(
c
,
&
b
->
key
,
i
)
->
sectors_written
);
}
if
(
b
->
sectors_free
<
c
->
sb
.
block_size
)
b
->
sectors_free
=
0
;
/*
* k takes refcounts on the buckets it points to until it's inserted
* into the btree, but if we're done with this bucket we just transfer
* get_data_bucket()'s refcount.
*/
if
(
b
->
sectors_free
)
for
(
i
=
0
;
i
<
KEY_PTRS
(
&
b
->
key
);
i
++
)
atomic_inc
(
&
PTR_BUCKET
(
c
,
&
b
->
key
,
i
)
->
pin
);
spin_unlock
(
&
c
->
data_bucket_lock
);
return
true
;
}
static
void
bch_data_invalidate
(
struct
closure
*
cl
)
static
void
bch_data_invalidate
(
struct
closure
*
cl
)
{
{
struct
data_insert_op
*
op
=
container_of
(
cl
,
struct
data_insert_op
,
cl
);
struct
data_insert_op
*
op
=
container_of
(
cl
,
struct
data_insert_op
,
cl
);
...
@@ -545,7 +365,9 @@ static void bch_data_insert_start(struct closure *cl)
...
@@ -545,7 +365,9 @@ static void bch_data_insert_start(struct closure *cl)
SET_KEY_INODE
(
k
,
op
->
inode
);
SET_KEY_INODE
(
k
,
op
->
inode
);
SET_KEY_OFFSET
(
k
,
bio
->
bi_sector
);
SET_KEY_OFFSET
(
k
,
bio
->
bi_sector
);
if
(
!
bch_alloc_sectors
(
op
,
k
,
bio_sectors
(
bio
)))
if
(
!
bch_alloc_sectors
(
op
->
c
,
k
,
bio_sectors
(
bio
),
op
->
write_point
,
op
->
write_prio
,
op
->
writeback
))
goto
err
;
goto
err
;
n
=
bch_bio_split
(
bio
,
KEY_SIZE
(
k
),
GFP_NOIO
,
split
);
n
=
bch_bio_split
(
bio
,
KEY_SIZE
(
k
),
GFP_NOIO
,
split
);
...
@@ -968,7 +790,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
...
@@ -968,7 +790,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
s
->
iop
.
c
=
d
->
c
;
s
->
iop
.
c
=
d
->
c
;
s
->
d
=
d
;
s
->
d
=
d
;
s
->
op
.
lock
=
-
1
;
s
->
op
.
lock
=
-
1
;
s
->
iop
.
task
=
current
;
s
->
iop
.
write_point
=
hash_long
((
unsigned
long
)
current
,
16
)
;
s
->
orig_bio
=
bio
;
s
->
orig_bio
=
bio
;
s
->
write
=
(
bio
->
bi_rw
&
REQ_WRITE
)
!=
0
;
s
->
write
=
(
bio
->
bi_rw
&
REQ_WRITE
)
!=
0
;
s
->
iop
.
flush_journal
=
(
bio
->
bi_rw
&
(
REQ_FLUSH
|
REQ_FUA
))
!=
0
;
s
->
iop
.
flush_journal
=
(
bio
->
bi_rw
&
(
REQ_FLUSH
|
REQ_FUA
))
!=
0
;
...
...
drivers/md/bcache/request.h
浏览文件 @
2599b53b
...
@@ -6,10 +6,10 @@
...
@@ -6,10 +6,10 @@
struct
data_insert_op
{
struct
data_insert_op
{
struct
closure
cl
;
struct
closure
cl
;
struct
cache_set
*
c
;
struct
cache_set
*
c
;
struct
task_struct
*
task
;
struct
bio
*
bio
;
struct
bio
*
bio
;
unsigned
inode
;
unsigned
inode
;
uint16_t
write_point
;
uint16_t
write_prio
;
uint16_t
write_prio
;
short
error
;
short
error
;
...
@@ -31,9 +31,6 @@ struct data_insert_op {
...
@@ -31,9 +31,6 @@ struct data_insert_op {
unsigned
bch_get_congested
(
struct
cache_set
*
);
unsigned
bch_get_congested
(
struct
cache_set
*
);
void
bch_data_insert
(
struct
closure
*
cl
);
void
bch_data_insert
(
struct
closure
*
cl
);
void
bch_open_buckets_free
(
struct
cache_set
*
);
int
bch_open_buckets_alloc
(
struct
cache_set
*
);
void
bch_cached_dev_request_init
(
struct
cached_dev
*
dc
);
void
bch_cached_dev_request_init
(
struct
cached_dev
*
dc
);
void
bch_flash_dev_request_init
(
struct
bcache_device
*
d
);
void
bch_flash_dev_request_init
(
struct
bcache_device
*
d
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录