提交 b1a67b0f 编写于 作者: K Kent Overstreet 提交者: Jens Axboe

bcache: Style/checkpatch fixes

Took out some nested functions, and fixed some more checkpatch
complaints.
Signed-off-by: NKent Overstreet <koverstreet@google.com>
Cc: linux-bcache@vger.kernel.org
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 07e86ccb
...@@ -229,24 +229,14 @@ static void invalidate_one_bucket(struct cache *ca, struct bucket *b) ...@@ -229,24 +229,14 @@ static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
fifo_push(&ca->free_inc, b - ca->buckets); fifo_push(&ca->free_inc, b - ca->buckets);
} }
static void invalidate_buckets_lru(struct cache *ca) #define bucket_prio(b) \
{ (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
unsigned bucket_prio(struct bucket *b)
{
return ((unsigned) (b->prio - ca->set->min_prio)) *
GC_SECTORS_USED(b);
}
bool bucket_max_cmp(struct bucket *l, struct bucket *r)
{
return bucket_prio(l) < bucket_prio(r);
}
bool bucket_min_cmp(struct bucket *l, struct bucket *r) #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
{ #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
return bucket_prio(l) > bucket_prio(r);
}
static void invalidate_buckets_lru(struct cache *ca)
{
struct bucket *b; struct bucket *b;
ssize_t i; ssize_t i;
......
...@@ -644,8 +644,8 @@ struct gc_stat { ...@@ -644,8 +644,8 @@ struct gc_stat {
* we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
* flushing dirty data). * flushing dirty data).
* *
* CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down the * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
* allocation thread. * the allocation thread.
*/ */
#define CACHE_SET_UNREGISTERING 0 #define CACHE_SET_UNREGISTERING 0
#define CACHE_SET_STOPPING 1 #define CACHE_SET_STOPPING 1
...@@ -1012,11 +1012,11 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c, ...@@ -1012,11 +1012,11 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
* searches - it told you where a key started. It's not used anymore, * searches - it told you where a key started. It's not used anymore,
* and can probably be safely dropped. * and can probably be safely dropped.
*/ */
#define KEY(dev, sector, len) (struct bkey) \ #define KEY(dev, sector, len) \
{ \ ((struct bkey) { \
.high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \ .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
.low = (sector) \ .low = (sector) \
} })
static inline void bkey_init(struct bkey *k) static inline void bkey_init(struct bkey *k)
{ {
......
...@@ -161,9 +161,9 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k) ...@@ -161,9 +161,9 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
#ifdef CONFIG_BCACHE_EDEBUG #ifdef CONFIG_BCACHE_EDEBUG
bug: bug:
mutex_unlock(&b->c->bucket_lock); mutex_unlock(&b->c->bucket_lock);
btree_bug(b, "inconsistent pointer %s: bucket %zu pin %i " btree_bug(b,
"prio %i gen %i last_gc %i mark %llu gc_gen %i", pkey(k), "inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true; return true;
#endif #endif
...@@ -1049,7 +1049,8 @@ void bch_btree_sort_partial(struct btree *b, unsigned start) ...@@ -1049,7 +1049,8 @@ void bch_btree_sort_partial(struct btree *b, unsigned start)
for (i = start; i <= b->nsets; i++) for (i = start; i <= b->nsets; i++)
keys += b->sets[i].data->keys; keys += b->sets[i].data->keys;
order = roundup_pow_of_two(__set_bytes(b->sets->data, keys)) / PAGE_SIZE; order = roundup_pow_of_two(__set_bytes(b->sets->data,
keys)) / PAGE_SIZE;
if (order) if (order)
order = ilog2(order); order = ilog2(order);
} }
......
...@@ -1021,8 +1021,8 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, int level, ...@@ -1021,8 +1021,8 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, int level,
goto err_free; goto err_free;
if (!b) { if (!b) {
cache_bug(c, "Tried to allocate bucket" cache_bug(c,
" that was in btree cache"); "Tried to allocate bucket that was in btree cache");
__bkey_put(c, &k.key); __bkey_put(c, &k.key);
goto retry; goto retry;
} }
......
...@@ -217,8 +217,8 @@ void bch_data_verify(struct search *s) ...@@ -217,8 +217,8 @@ void bch_data_verify(struct search *s)
if (memcmp(p1 + bv->bv_offset, if (memcmp(p1 + bv->bv_offset,
p2 + bv->bv_offset, p2 + bv->bv_offset,
bv->bv_len)) bv->bv_len))
printk(KERN_ERR "bcache (%s): verify failed" printk(KERN_ERR
" at sector %llu\n", "bcache (%s): verify failed at sector %llu\n",
bdevname(dc->bdev, name), bdevname(dc->bdev, name),
(uint64_t) s->orig_bio->bi_sector); (uint64_t) s->orig_bio->bi_sector);
...@@ -525,8 +525,8 @@ static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a, ...@@ -525,8 +525,8 @@ static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a,
k = bkey_next(k), l = bkey_next(l)) k = bkey_next(k), l = bkey_next(l))
if (bkey_cmp(k, l) || if (bkey_cmp(k, l) ||
KEY_SIZE(k) != KEY_SIZE(l)) KEY_SIZE(k) != KEY_SIZE(l))
pr_err("key %zi differs: %s " pr_err("key %zi differs: %s != %s",
"!= %s", (uint64_t *) k - i->d, (uint64_t *) k - i->d,
pkey(k), pkey(l)); pkey(k), pkey(l));
for (j = 0; j < 3; j++) { for (j = 0; j < 3; j++) {
......
...@@ -293,9 +293,9 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list, ...@@ -293,9 +293,9 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
BUG_ON(i->pin && atomic_read(i->pin) != 1); BUG_ON(i->pin && atomic_read(i->pin) != 1);
if (n != i->j.seq) if (n != i->j.seq)
pr_err("journal entries %llu-%llu " pr_err(
"missing! (replaying %llu-%llu)\n", "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
n, i->j.seq - 1, start, end); n, i->j.seq - 1, start, end);
for (k = i->j.start; for (k = i->j.start;
k < end(&i->j); k < end(&i->j);
...@@ -439,7 +439,7 @@ static void do_journal_discard(struct cache *ca) ...@@ -439,7 +439,7 @@ static void do_journal_discard(struct cache *ca)
bio_init(bio); bio_init(bio);
bio->bi_sector = bucket_to_sector(ca->set, bio->bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]); ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev; bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_DISCARD; bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1; bio->bi_max_vecs = 1;
......
...@@ -183,6 +183,16 @@ err: if (!IS_ERR_OR_NULL(w->private)) ...@@ -183,6 +183,16 @@ err: if (!IS_ERR_OR_NULL(w->private))
closure_return(cl); closure_return(cl);
} }
static bool bucket_cmp(struct bucket *l, struct bucket *r)
{
return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
}
static unsigned bucket_heap_top(struct cache *ca)
{
return GC_SECTORS_USED(heap_peek(&ca->heap));
}
void bch_moving_gc(struct closure *cl) void bch_moving_gc(struct closure *cl)
{ {
struct cache_set *c = container_of(cl, struct cache_set, gc.cl); struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
...@@ -190,16 +200,6 @@ void bch_moving_gc(struct closure *cl) ...@@ -190,16 +200,6 @@ void bch_moving_gc(struct closure *cl)
struct bucket *b; struct bucket *b;
unsigned i; unsigned i;
bool bucket_cmp(struct bucket *l, struct bucket *r)
{
return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
}
unsigned top(struct cache *ca)
{
return GC_SECTORS_USED(heap_peek(&ca->heap));
}
if (!c->copy_gc_enabled) if (!c->copy_gc_enabled)
closure_return(cl); closure_return(cl);
...@@ -220,7 +220,7 @@ void bch_moving_gc(struct closure *cl) ...@@ -220,7 +220,7 @@ void bch_moving_gc(struct closure *cl)
sectors_to_move += GC_SECTORS_USED(b); sectors_to_move += GC_SECTORS_USED(b);
heap_add(&ca->heap, b, bucket_cmp); heap_add(&ca->heap, b, bucket_cmp);
} else if (bucket_cmp(b, heap_peek(&ca->heap))) { } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
sectors_to_move -= top(ca); sectors_to_move -= bucket_heap_top(ca);
sectors_to_move += GC_SECTORS_USED(b); sectors_to_move += GC_SECTORS_USED(b);
ca->heap.data[0] = b; ca->heap.data[0] = b;
...@@ -233,7 +233,7 @@ void bch_moving_gc(struct closure *cl) ...@@ -233,7 +233,7 @@ void bch_moving_gc(struct closure *cl)
sectors_to_move -= GC_SECTORS_USED(b); sectors_to_move -= GC_SECTORS_USED(b);
} }
ca->gc_move_threshold = top(ca); ca->gc_move_threshold = bucket_heap_top(ca);
pr_debug("threshold %u", ca->gc_move_threshold); pr_debug("threshold %u", ca->gc_move_threshold);
} }
......
...@@ -1117,11 +1117,13 @@ static void add_sequential(struct task_struct *t) ...@@ -1117,11 +1117,13 @@ static void add_sequential(struct task_struct *t)
t->sequential_io = 0; t->sequential_io = 0;
} }
static void check_should_skip(struct cached_dev *dc, struct search *s) static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
{ {
struct hlist_head *iohash(uint64_t k) return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
{ return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; } }
static void check_should_skip(struct cached_dev *dc, struct search *s)
{
struct cache_set *c = s->op.c; struct cache_set *c = s->op.c;
struct bio *bio = &s->bio.bio; struct bio *bio = &s->bio.bio;
...@@ -1162,7 +1164,7 @@ static void check_should_skip(struct cached_dev *dc, struct search *s) ...@@ -1162,7 +1164,7 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
spin_lock(&dc->io_lock); spin_lock(&dc->io_lock);
hlist_for_each_entry(i, iohash(bio->bi_sector), hash) hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
if (i->last == bio->bi_sector && if (i->last == bio->bi_sector &&
time_before(jiffies, i->jiffies)) time_before(jiffies, i->jiffies))
goto found; goto found;
...@@ -1180,7 +1182,7 @@ static void check_should_skip(struct cached_dev *dc, struct search *s) ...@@ -1180,7 +1182,7 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
s->task->sequential_io = i->sequential; s->task->sequential_io = i->sequential;
hlist_del(&i->hash); hlist_del(&i->hash);
hlist_add_head(&i->hash, iohash(i->last)); hlist_add_head(&i->hash, iohash(dc, i->last));
list_move_tail(&i->lru, &dc->io_lru); list_move_tail(&i->lru, &dc->io_lru);
spin_unlock(&dc->io_lock); spin_unlock(&dc->io_lock);
......
...@@ -95,7 +95,8 @@ static KTYPE(bch_stats); ...@@ -95,7 +95,8 @@ static KTYPE(bch_stats);
static void scale_accounting(unsigned long data); static void scale_accounting(unsigned long data);
void bch_cache_accounting_init(struct cache_accounting *acc, struct closure *parent) void bch_cache_accounting_init(struct cache_accounting *acc,
struct closure *parent)
{ {
kobject_init(&acc->total.kobj, &bch_stats_ktype); kobject_init(&acc->total.kobj, &bch_stats_ktype);
kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
......
...@@ -526,7 +526,8 @@ void bch_prio_write(struct cache *ca) ...@@ -526,7 +526,8 @@ void bch_prio_write(struct cache *ca)
for (i = prio_buckets(ca) - 1; i >= 0; --i) { for (i = prio_buckets(ca) - 1; i >= 0; --i) {
long bucket; long bucket;
struct prio_set *p = ca->disk_buckets; struct prio_set *p = ca->disk_buckets;
struct bucket_disk *d = p->data, *end = d + prios_per_bucket(ca); struct bucket_disk *d = p->data;
struct bucket_disk *end = d + prios_per_bucket(ca);
for (b = ca->buckets + i * prios_per_bucket(ca); for (b = ca->buckets + i * prios_per_bucket(ca);
b < ca->buckets + ca->sb.nbuckets && d < end; b < ca->buckets + ca->sb.nbuckets && d < end;
...@@ -865,8 +866,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) ...@@ -865,8 +866,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
if (dc->sb.block_size < c->sb.block_size) { if (dc->sb.block_size < c->sb.block_size) {
/* Will die */ /* Will die */
pr_err("Couldn't attach %s: block size " pr_err("Couldn't attach %s: block size less than set's block size",
"less than set's block size", buf); buf);
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册