提交 9a4fd6a2 编写于 作者: B Bart Van Assche 提交者: Jens Axboe

md/bcache: Combine two uuid_io() arguments

Improve uniformity in the kernel of handling of request operation and
flags by passing these as a single argument.

Cc: Coly Li <colyli@suse.de>
Cc: Mingzhe Zou <mingzhe.zou@easystack.cn>
Signed-off-by: NBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-33-bvanassche@acm.orgSigned-off-by: NJens Axboe <axboe@kernel.dk>
上级 4ce4c73f
...@@ -414,8 +414,8 @@ static void uuid_io_unlock(struct closure *cl) ...@@ -414,8 +414,8 @@ static void uuid_io_unlock(struct closure *cl)
up(&c->uuid_write_mutex); up(&c->uuid_write_mutex);
} }
static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k,
struct bkey *k, struct closure *parent) struct closure *parent)
{ {
struct closure *cl = &c->uuid_write; struct closure *cl = &c->uuid_write;
struct uuid_entry *u; struct uuid_entry *u;
...@@ -429,22 +429,22 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, ...@@ -429,22 +429,22 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
for (i = 0; i < KEY_PTRS(k); i++) { for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c); struct bio *bio = bch_bbio_alloc(c);
bio->bi_opf = REQ_SYNC | REQ_META | op_flags; bio->bi_opf = opf | REQ_SYNC | REQ_META;
bio->bi_iter.bi_size = KEY_SIZE(k) << 9; bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio; bio->bi_end_io = uuid_endio;
bio->bi_private = cl; bio->bi_private = cl;
bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bch_bio_map(bio, c->uuids); bch_bio_map(bio, c->uuids);
bch_submit_bbio(bio, c, k, i); bch_submit_bbio(bio, c, k, i);
if (op != REQ_OP_WRITE) if ((opf & REQ_OP_MASK) != REQ_OP_WRITE)
break; break;
} }
bch_extent_to_text(buf, sizeof(buf), k); bch_extent_to_text(buf, sizeof(buf), k);
pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf); pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ?
"wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!bch_is_zero(u->uuid, 16)) if (!bch_is_zero(u->uuid, 16))
...@@ -463,7 +463,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) ...@@ -463,7 +463,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer"; return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k); bkey_copy(&c->uuid_bucket, k);
uuid_io(c, REQ_OP_READ, 0, k, cl); uuid_io(c, REQ_OP_READ, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) { if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids; struct uuid_entry_v0 *u0 = (void *) c->uuids;
...@@ -511,7 +511,7 @@ static int __uuid_write(struct cache_set *c) ...@@ -511,7 +511,7 @@ static int __uuid_write(struct cache_set *c)
size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS; size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
SET_KEY_SIZE(&k.key, size); SET_KEY_SIZE(&k.key, size);
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); uuid_io(c, REQ_OP_WRITE, &k.key, &cl);
closure_sync(&cl); closure_sync(&cl);
/* Only one bucket used for uuid write */ /* Only one bucket used for uuid write */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册