提交 444fc0b6 编写于 作者: K Kent Overstreet

bcache: Initialize sectors_dirty when attaching

Previously, dirty_data wouldn't get initialized until the first garbage
collection... which was a bit of a problem for background writeback (as
the PD controller keys off of it) and also confusing for users.

This is also prep work for making background writeback aware of raid5/6
stripes.
Signed-off-by: NKent Overstreet <koverstreet@google.com>
上级 6ded34d1
......@@ -438,7 +438,6 @@ struct bcache_device {
atomic_t detaching;
atomic_long_t sectors_dirty;
unsigned long sectors_dirty_gc;
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
......@@ -1225,6 +1224,7 @@ void bch_cache_set_stop(struct cache_set *);
struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
void bch_sectors_dirty_init(struct cached_dev *);
void bch_cached_dev_writeback_init(struct cached_dev *);
void bch_moving_init_cache_set(struct cache_set *);
......
......@@ -1119,11 +1119,8 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
gc->nkeys++;
gc->data += KEY_SIZE(k);
if (KEY_DIRTY(k)) {
if (KEY_DIRTY(k))
gc->dirty += KEY_SIZE(k);
if (d)
d->sectors_dirty_gc += KEY_SIZE(k);
}
}
for (t = b->sets; t <= &b->sets[b->nsets]; t++)
......@@ -1377,7 +1374,6 @@ static void btree_gc_start(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
struct bcache_device **d;
unsigned i;
if (!c->gc_mark_valid)
......@@ -1395,12 +1391,6 @@ static void btree_gc_start(struct cache_set *c)
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
}
for (d = c->devices;
d < c->devices + c->nr_uuids;
d++)
if (*d)
(*d)->sectors_dirty_gc = 0;
mutex_unlock(&c->bucket_lock);
}
......@@ -1409,7 +1399,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
size_t available = 0;
struct bucket *b;
struct cache *ca;
struct bcache_device **d;
unsigned i;
mutex_lock(&c->bucket_lock);
......@@ -1452,22 +1441,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
}
}
for (d = c->devices;
d < c->devices + c->nr_uuids;
d++)
if (*d) {
unsigned long last =
atomic_long_read(&((*d)->sectors_dirty));
long difference = (*d)->sectors_dirty_gc - last;
pr_debug("sectors dirty off by %li", difference);
(*d)->sectors_dirty_last += difference;
atomic_long_set(&((*d)->sectors_dirty),
(*d)->sectors_dirty_gc);
}
mutex_unlock(&c->bucket_lock);
return available;
}
......
......@@ -961,6 +961,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
atomic_set(&dc->count, 1);
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(dc);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
......
......@@ -377,6 +377,42 @@ static void read_dirty(struct closure *cl)
refill_dirty(cl);
}
/* Init */
static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
struct cached_dev *dc)
{
struct bkey *k;
struct btree_iter iter;
bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
if (!b->level) {
if (KEY_INODE(k) > dc->disk.id)
break;
if (KEY_DIRTY(k))
atomic_long_add(KEY_SIZE(k),
&dc->disk.sectors_dirty);
} else {
btree(sectors_dirty_init, k, b, op, dc);
if (KEY_INODE(k) > dc->disk.id)
break;
cond_resched();
}
return 0;
}
void bch_sectors_dirty_init(struct cached_dev *dc)
{
struct btree_op op;
bch_btree_op_init_stack(&op);
btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
{
closure_init_unlocked(&dc->writeback);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册