提交 169ef1cf 编写于 作者: K Kent Overstreet 提交者: Jens Axboe

bcache: Don't export utility code, prefix with bch_

Signed-off-by: NKent Overstreet <koverstreet@google.com>
Cc: linux-bcache@vger.kernel.org
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 0b6ef416
...@@ -1033,7 +1033,7 @@ static inline void bkey_init(struct bkey *k) ...@@ -1033,7 +1033,7 @@ static inline void bkey_init(struct bkey *k)
* jset: The checksum is _always_ the first 8 bytes of these structs * jset: The checksum is _always_ the first 8 bytes of these structs
*/ */
#define csum_set(i) \ #define csum_set(i) \
crc64(((void *) (i)) + sizeof(uint64_t), \ bch_crc64(((void *) (i)) + sizeof(uint64_t), \
((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t))) ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
/* Error handling macros */ /* Error handling macros */
......
...@@ -1026,7 +1026,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter, ...@@ -1026,7 +1026,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
if (!start) { if (!start) {
spin_lock(&b->c->sort_time_lock); spin_lock(&b->c->sort_time_lock);
time_stats_update(&b->c->sort_time, start_time); bch_time_stats_update(&b->c->sort_time, start_time);
spin_unlock(&b->c->sort_time_lock); spin_unlock(&b->c->sort_time_lock);
} }
} }
...@@ -1076,7 +1076,7 @@ void bch_btree_sort_into(struct btree *b, struct btree *new) ...@@ -1076,7 +1076,7 @@ void bch_btree_sort_into(struct btree *b, struct btree *new)
btree_mergesort(b, new->sets->data, &iter, false, true); btree_mergesort(b, new->sets->data, &iter, false, true);
spin_lock(&b->c->sort_time_lock); spin_lock(&b->c->sort_time_lock);
time_stats_update(&b->c->sort_time, start_time); bch_time_stats_update(&b->c->sort_time, start_time);
spin_unlock(&b->c->sort_time_lock); spin_unlock(&b->c->sort_time_lock);
bkey_copy_key(&new->key, &b->key); bkey_copy_key(&new->key, &b->key);
......
...@@ -129,7 +129,7 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i) ...@@ -129,7 +129,7 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i)
uint64_t crc = b->key.ptr[0]; uint64_t crc = b->key.ptr[0];
void *data = (void *) i + 8, *end = end(i); void *data = (void *) i + 8, *end = end(i);
crc = crc64_update(crc, data, end - data); crc = bch_crc64_update(crc, data, end - data);
return crc ^ 0xffffffffffffffff; return crc ^ 0xffffffffffffffff;
} }
...@@ -231,7 +231,7 @@ void bch_btree_read_done(struct closure *cl) ...@@ -231,7 +231,7 @@ void bch_btree_read_done(struct closure *cl)
mutex_unlock(&b->c->fill_lock); mutex_unlock(&b->c->fill_lock);
spin_lock(&b->c->btree_read_time_lock); spin_lock(&b->c->btree_read_time_lock);
time_stats_update(&b->c->btree_read_time, b->io_start_time); bch_time_stats_update(&b->c->btree_read_time, b->io_start_time);
spin_unlock(&b->c->btree_read_time_lock); spin_unlock(&b->c->btree_read_time_lock);
smp_wmb(); /* read_done is our write lock */ smp_wmb(); /* read_done is our write lock */
...@@ -259,7 +259,7 @@ void bch_btree_read(struct btree *b) ...@@ -259,7 +259,7 @@ void bch_btree_read(struct btree *b)
b->bio->bi_rw = REQ_META|READ_SYNC; b->bio->bi_rw = REQ_META|READ_SYNC;
b->bio->bi_size = KEY_SIZE(&b->key) << 9; b->bio->bi_size = KEY_SIZE(&b->key) << 9;
bio_map(b->bio, b->sets[0].data); bch_bio_map(b->bio, b->sets[0].data);
pr_debug("%s", pbtree(b)); pr_debug("%s", pbtree(b));
trace_bcache_btree_read(b->bio); trace_bcache_btree_read(b->bio);
...@@ -327,12 +327,12 @@ static void do_btree_write(struct btree *b) ...@@ -327,12 +327,12 @@ static void do_btree_write(struct btree *b)
btree_bio_init(b); btree_bio_init(b);
b->bio->bi_rw = REQ_META|WRITE_SYNC; b->bio->bi_rw = REQ_META|WRITE_SYNC;
b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
bio_map(b->bio, i); bch_bio_map(b->bio, i);
bkey_copy(&k.key, &b->key); bkey_copy(&k.key, &b->key);
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
if (!bio_alloc_pages(b->bio, GFP_NOIO)) { if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) {
int j; int j;
struct bio_vec *bv; struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
...@@ -347,7 +347,7 @@ static void do_btree_write(struct btree *b) ...@@ -347,7 +347,7 @@ static void do_btree_write(struct btree *b)
continue_at(cl, btree_write_done, NULL); continue_at(cl, btree_write_done, NULL);
} else { } else {
b->bio->bi_vcnt = 0; b->bio->bi_vcnt = 0;
bio_map(b->bio, i); bch_bio_map(b->bio, i);
trace_bcache_btree_write(b->bio); trace_bcache_btree_write(b->bio);
bch_submit_bbio(b->bio, b->c, &k.key, 0); bch_submit_bbio(b->bio, b->c, &k.key, 0);
...@@ -815,7 +815,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k, ...@@ -815,7 +815,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl)
{ {
if (c->try_harder == cl) { if (c->try_harder == cl) {
time_stats_update(&c->try_harder_time, c->try_harder_start); bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
c->try_harder = NULL; c->try_harder = NULL;
__closure_wake_up(&c->try_wait); __closure_wake_up(&c->try_wait);
} }
...@@ -1536,7 +1536,7 @@ static void bch_btree_gc(struct closure *cl) ...@@ -1536,7 +1536,7 @@ static void bch_btree_gc(struct closure *cl)
available = bch_btree_gc_finish(c); available = bch_btree_gc_finish(c);
time_stats_update(&c->btree_gc_time, start_time); bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t); stats.key_bytes *= sizeof(uint64_t);
stats.dirty <<= 9; stats.dirty <<= 9;
...@@ -2007,7 +2007,7 @@ static int btree_split(struct btree *b, struct btree_op *op) ...@@ -2007,7 +2007,7 @@ static int btree_split(struct btree *b, struct btree_op *op)
rw_unlock(true, n1); rw_unlock(true, n1);
btree_node_free(b, op); btree_node_free(b, op);
time_stats_update(&b->c->btree_split_time, start_time); bch_time_stats_update(&b->c->btree_split_time, start_time);
return 0; return 0;
err_free2: err_free2:
......
...@@ -200,7 +200,7 @@ void bch_data_verify(struct search *s) ...@@ -200,7 +200,7 @@ void bch_data_verify(struct search *s)
if (!check) if (!check)
return; return;
if (bio_alloc_pages(check, GFP_NOIO)) if (bch_bio_alloc_pages(check, GFP_NOIO))
goto out_put; goto out_put;
check->bi_rw = READ_SYNC; check->bi_rw = READ_SYNC;
......
...@@ -54,7 +54,7 @@ reread: left = ca->sb.bucket_size - offset; ...@@ -54,7 +54,7 @@ reread: left = ca->sb.bucket_size - offset;
bio->bi_end_io = journal_read_endio; bio->bi_end_io = journal_read_endio;
bio->bi_private = &op->cl; bio->bi_private = &op->cl;
bio_map(bio, data); bch_bio_map(bio, data);
closure_bio_submit(bio, &op->cl, ca); closure_bio_submit(bio, &op->cl, ca);
closure_sync(&op->cl); closure_sync(&op->cl);
...@@ -621,7 +621,7 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -621,7 +621,7 @@ static void journal_write_unlocked(struct closure *cl)
bio->bi_end_io = journal_write_endio; bio->bi_end_io = journal_write_endio;
bio->bi_private = w; bio->bi_private = w;
bio_map(bio, w->data); bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio); trace_bcache_journal_write(bio);
bio_list_add(&list, bio); bio_list_add(&list, bio);
......
...@@ -85,7 +85,7 @@ static void moving_init(struct moving_io *io) ...@@ -85,7 +85,7 @@ static void moving_init(struct moving_io *io)
PAGE_SECTORS); PAGE_SECTORS);
bio->bi_private = &io->s.cl; bio->bi_private = &io->s.cl;
bio->bi_io_vec = bio->bi_inline_vecs; bio->bi_io_vec = bio->bi_inline_vecs;
bio_map(bio, NULL); bch_bio_map(bio, NULL);
} }
static void write_moving(struct closure *cl) static void write_moving(struct closure *cl)
...@@ -159,7 +159,7 @@ static void read_moving(struct closure *cl) ...@@ -159,7 +159,7 @@ static void read_moving(struct closure *cl)
bio->bi_rw = READ; bio->bi_rw = READ;
bio->bi_end_io = read_moving_endio; bio->bi_end_io = read_moving_endio;
if (bio_alloc_pages(bio, GFP_KERNEL)) if (bch_bio_alloc_pages(bio, GFP_KERNEL))
goto err; goto err;
pr_debug("%s", pkey(&w->key)); pr_debug("%s", pkey(&w->key));
......
...@@ -58,8 +58,8 @@ static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, ...@@ -58,8 +58,8 @@ static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
char __user *buf, size_t nbytes, loff_t *ppos) char __user *buf, size_t nbytes, loff_t *ppos)
{ {
char tmp[1024]; char tmp[1024];
int len = snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
cgroup_to_bcache(cgrp)->cache_mode + 1); cgroup_to_bcache(cgrp)->cache_mode + 1);
if (len < 0) if (len < 0)
return len; return len;
...@@ -70,7 +70,7 @@ static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, ...@@ -70,7 +70,7 @@ static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
const char *buf) const char *buf)
{ {
int v = read_string_list(buf, bch_cache_modes); int v = bch_read_string_list(buf, bch_cache_modes);
if (v < 0) if (v < 0)
return v; return v;
...@@ -205,7 +205,7 @@ static void bio_csum(struct bio *bio, struct bkey *k) ...@@ -205,7 +205,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
bio_for_each_segment(bv, bio, i) { bio_for_each_segment(bv, bio, i) {
void *d = kmap(bv->bv_page) + bv->bv_offset; void *d = kmap(bv->bv_page) + bv->bv_offset;
csum = crc64_update(csum, d, bv->bv_len); csum = bch_crc64_update(csum, d, bv->bv_len);
kunmap(bv->bv_page); kunmap(bv->bv_page);
} }
...@@ -835,7 +835,7 @@ static void request_read_done(struct closure *cl) ...@@ -835,7 +835,7 @@ static void request_read_done(struct closure *cl)
s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
bio_map(s->op.cache_bio, NULL); bch_bio_map(s->op.cache_bio, NULL);
src = bio_iovec(s->op.cache_bio); src = bio_iovec(s->op.cache_bio);
dst = bio_iovec(s->cache_miss); dst = bio_iovec(s->cache_miss);
...@@ -962,8 +962,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, ...@@ -962,8 +962,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio)) if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
goto out_put; goto out_put;
bio_map(s->op.cache_bio, NULL); bch_bio_map(s->op.cache_bio, NULL);
if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put; goto out_put;
s->cache_miss = miss; s->cache_miss = miss;
......
...@@ -142,7 +142,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, ...@@ -142,7 +142,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
goto err; goto err;
err = "Bad UUID"; err = "Bad UUID";
if (is_zero(sb->uuid, 16)) if (bch_is_zero(sb->uuid, 16))
goto err; goto err;
err = "Unsupported superblock version"; err = "Unsupported superblock version";
...@@ -170,7 +170,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, ...@@ -170,7 +170,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
goto out; goto out;
err = "Bad UUID"; err = "Bad UUID";
if (is_zero(sb->set_uuid, 16)) if (bch_is_zero(sb->set_uuid, 16))
goto err; goto err;
err = "Bad cache device number in set"; err = "Bad cache device number in set";
...@@ -218,7 +218,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) ...@@ -218,7 +218,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
bio->bi_sector = SB_SECTOR; bio->bi_sector = SB_SECTOR;
bio->bi_rw = REQ_SYNC|REQ_META; bio->bi_rw = REQ_SYNC|REQ_META;
bio->bi_size = SB_SIZE; bio->bi_size = SB_SIZE;
bio_map(bio, NULL); bch_bio_map(bio, NULL);
out->offset = cpu_to_le64(sb->offset); out->offset = cpu_to_le64(sb->offset);
out->version = cpu_to_le64(sb->version); out->version = cpu_to_le64(sb->version);
...@@ -332,7 +332,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw, ...@@ -332,7 +332,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
bio->bi_end_io = uuid_endio; bio->bi_end_io = uuid_endio;
bio->bi_private = cl; bio->bi_private = cl;
bio_map(bio, c->uuids); bch_bio_map(bio, c->uuids);
bch_submit_bbio(bio, c, k, i); bch_submit_bbio(bio, c, k, i);
...@@ -344,7 +344,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw, ...@@ -344,7 +344,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
pkey(&c->uuid_bucket)); pkey(&c->uuid_bucket));
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!is_zero(u->uuid, 16)) if (!bch_is_zero(u->uuid, 16))
pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
u - c->uuids, u->uuid, u->label, u - c->uuids, u->uuid, u->label,
u->first_reg, u->last_reg, u->invalidated); u->first_reg, u->last_reg, u->invalidated);
...@@ -491,7 +491,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) ...@@ -491,7 +491,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
bio->bi_end_io = prio_endio; bio->bi_end_io = prio_endio;
bio->bi_private = ca; bio->bi_private = ca;
bio_map(bio, ca->disk_buckets); bch_bio_map(bio, ca->disk_buckets);
closure_bio_submit(bio, &ca->prio, ca); closure_bio_submit(bio, &ca->prio, ca);
closure_sync(cl); closure_sync(cl);
...@@ -538,7 +538,7 @@ void bch_prio_write(struct cache *ca) ...@@ -538,7 +538,7 @@ void bch_prio_write(struct cache *ca)
p->next_bucket = ca->prio_buckets[i + 1]; p->next_bucket = ca->prio_buckets[i + 1];
p->magic = pset_magic(ca); p->magic = pset_magic(ca);
p->csum = crc64(&p->magic, bucket_bytes(ca) - 8); p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
BUG_ON(bucket == -1); BUG_ON(bucket == -1);
...@@ -585,7 +585,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) ...@@ -585,7 +585,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
prio_io(ca, bucket, READ_SYNC); prio_io(ca, bucket, READ_SYNC);
if (p->csum != crc64(&p->magic, bucket_bytes(ca) - 8)) if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities"); pr_warn("bad csum reading priorities");
if (p->magic != pset_magic(ca)) if (p->magic != pset_magic(ca))
...@@ -898,7 +898,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) ...@@ -898,7 +898,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
sysfs_remove_file(&dc->kobj, &sysfs_attach); sysfs_remove_file(&dc->kobj, &sysfs_attach);
*/ */
if (is_zero(u->uuid, 16)) { if (bch_is_zero(u->uuid, 16)) {
struct closure cl; struct closure cl;
closure_init_stack(&cl); closure_init_stack(&cl);
......
...@@ -105,9 +105,9 @@ SHOW(__bch_cached_dev) ...@@ -105,9 +105,9 @@ SHOW(__bch_cached_dev)
#define var(stat) (dc->stat) #define var(stat) (dc->stat)
if (attr == &sysfs_cache_mode) if (attr == &sysfs_cache_mode)
return snprint_string_list(buf, PAGE_SIZE, return bch_snprint_string_list(buf, PAGE_SIZE,
bch_cache_modes + 1, bch_cache_modes + 1,
BDEV_CACHE_MODE(&dc->sb)); BDEV_CACHE_MODE(&dc->sb));
sysfs_printf(data_csum, "%i", dc->disk.data_csum); sysfs_printf(data_csum, "%i", dc->disk.data_csum);
var_printf(verify, "%i"); var_printf(verify, "%i");
...@@ -126,10 +126,10 @@ SHOW(__bch_cached_dev) ...@@ -126,10 +126,10 @@ SHOW(__bch_cached_dev)
char dirty[20]; char dirty[20];
char derivative[20]; char derivative[20];
char target[20]; char target[20];
hprint(dirty, bch_hprint(dirty,
atomic_long_read(&dc->disk.sectors_dirty) << 9); atomic_long_read(&dc->disk.sectors_dirty) << 9);
hprint(derivative, dc->writeback_rate_derivative << 9); bch_hprint(derivative, dc->writeback_rate_derivative << 9);
hprint(target, dc->writeback_rate_target << 9); bch_hprint(target, dc->writeback_rate_target << 9);
return sprintf(buf, return sprintf(buf,
"rate:\t\t%u\n" "rate:\t\t%u\n"
...@@ -202,7 +202,7 @@ STORE(__cached_dev) ...@@ -202,7 +202,7 @@ STORE(__cached_dev)
bch_cached_dev_run(dc); bch_cached_dev_run(dc);
if (attr == &sysfs_cache_mode) { if (attr == &sysfs_cache_mode) {
ssize_t v = read_string_list(buf, bch_cache_modes + 1); ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
if (v < 0) if (v < 0)
return v; return v;
...@@ -224,7 +224,7 @@ STORE(__cached_dev) ...@@ -224,7 +224,7 @@ STORE(__cached_dev)
} }
if (attr == &sysfs_attach) { if (attr == &sysfs_attach) {
if (parse_uuid(buf, dc->sb.set_uuid) < 16) if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
return -EINVAL; return -EINVAL;
list_for_each_entry(c, &bch_cache_sets, list) { list_for_each_entry(c, &bch_cache_sets, list) {
...@@ -657,9 +657,9 @@ SHOW(__bch_cache) ...@@ -657,9 +657,9 @@ SHOW(__bch_cache)
((size_t) ca->sb.nbuckets)); ((size_t) ca->sb.nbuckets));
if (attr == &sysfs_cache_replacement_policy) if (attr == &sysfs_cache_replacement_policy)
return snprint_string_list(buf, PAGE_SIZE, return bch_snprint_string_list(buf, PAGE_SIZE,
cache_replacement_policies, cache_replacement_policies,
CACHE_REPLACEMENT(&ca->sb)); CACHE_REPLACEMENT(&ca->sb));
if (attr == &sysfs_priority_stats) { if (attr == &sysfs_priority_stats) {
int cmp(const void *l, const void *r) int cmp(const void *l, const void *r)
...@@ -747,7 +747,7 @@ STORE(__bch_cache) ...@@ -747,7 +747,7 @@ STORE(__bch_cache)
} }
if (attr == &sysfs_cache_replacement_policy) { if (attr == &sysfs_cache_replacement_policy) {
ssize_t v = read_string_list(buf, cache_replacement_policies); ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
if (v < 0) if (v < 0)
return v; return v;
......
...@@ -62,7 +62,7 @@ do { \ ...@@ -62,7 +62,7 @@ do { \
#define sysfs_hprint(file, val) \ #define sysfs_hprint(file, val) \
do { \ do { \
if (attr == &sysfs_ ## file) { \ if (attr == &sysfs_ ## file) { \
ssize_t ret = hprint(buf, val); \ ssize_t ret = bch_hprint(buf, val); \
strcat(buf, "\n"); \ strcat(buf, "\n"); \
return ret + 1; \ return ret + 1; \
} \ } \
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#define simple_strtouint(c, end, base) simple_strtoul(c, end, base) #define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
#define STRTO_H(name, type) \ #define STRTO_H(name, type) \
int name ## _h(const char *cp, type *res) \ int bch_ ## name ## _h(const char *cp, type *res) \
{ \ { \
int u = 0; \ int u = 0; \
char *e; \ char *e; \
...@@ -67,14 +67,13 @@ int name ## _h(const char *cp, type *res) \ ...@@ -67,14 +67,13 @@ int name ## _h(const char *cp, type *res) \
*res = i; \ *res = i; \
return 0; \ return 0; \
} \ } \
EXPORT_SYMBOL_GPL(name ## _h);
STRTO_H(strtoint, int) STRTO_H(strtoint, int)
STRTO_H(strtouint, unsigned int) STRTO_H(strtouint, unsigned int)
STRTO_H(strtoll, long long) STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long) STRTO_H(strtoull, unsigned long long)
ssize_t hprint(char *buf, int64_t v) ssize_t bch_hprint(char *buf, int64_t v)
{ {
static const char units[] = "?kMGTPEZY"; static const char units[] = "?kMGTPEZY";
char dec[3] = ""; char dec[3] = "";
...@@ -93,9 +92,8 @@ ssize_t hprint(char *buf, int64_t v) ...@@ -93,9 +92,8 @@ ssize_t hprint(char *buf, int64_t v)
return sprintf(buf, "%lli%s%c", v, dec, units[u]); return sprintf(buf, "%lli%s%c", v, dec, units[u]);
} }
EXPORT_SYMBOL_GPL(hprint);
ssize_t snprint_string_list(char *buf, size_t size, const char * const list[], ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
size_t selected) size_t selected)
{ {
char *out = buf; char *out = buf;
...@@ -108,9 +106,8 @@ ssize_t snprint_string_list(char *buf, size_t size, const char * const list[], ...@@ -108,9 +106,8 @@ ssize_t snprint_string_list(char *buf, size_t size, const char * const list[],
out[-1] = '\n'; out[-1] = '\n';
return out - buf; return out - buf;
} }
EXPORT_SYMBOL_GPL(snprint_string_list);
ssize_t read_string_list(const char *buf, const char * const list[]) ssize_t bch_read_string_list(const char *buf, const char * const list[])
{ {
size_t i; size_t i;
char *s, *d = kstrndup(buf, PAGE_SIZE - 1, GFP_KERNEL); char *s, *d = kstrndup(buf, PAGE_SIZE - 1, GFP_KERNEL);
...@@ -130,9 +127,8 @@ ssize_t read_string_list(const char *buf, const char * const list[]) ...@@ -130,9 +127,8 @@ ssize_t read_string_list(const char *buf, const char * const list[])
return i; return i;
} }
EXPORT_SYMBOL_GPL(read_string_list);
bool is_zero(const char *p, size_t n) bool bch_is_zero(const char *p, size_t n)
{ {
size_t i; size_t i;
...@@ -141,9 +137,8 @@ bool is_zero(const char *p, size_t n) ...@@ -141,9 +137,8 @@ bool is_zero(const char *p, size_t n)
return false; return false;
return true; return true;
} }
EXPORT_SYMBOL_GPL(is_zero);
int parse_uuid(const char *s, char *uuid) int bch_parse_uuid(const char *s, char *uuid)
{ {
size_t i, j, x; size_t i, j, x;
memset(uuid, 0, 16); memset(uuid, 0, 16);
...@@ -170,9 +165,8 @@ int parse_uuid(const char *s, char *uuid) ...@@ -170,9 +165,8 @@ int parse_uuid(const char *s, char *uuid)
} }
return i; return i;
} }
EXPORT_SYMBOL_GPL(parse_uuid);
void time_stats_update(struct time_stats *stats, uint64_t start_time) void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
{ {
uint64_t now = local_clock(); uint64_t now = local_clock();
uint64_t duration = time_after64(now, start_time) uint64_t duration = time_after64(now, start_time)
...@@ -195,9 +189,8 @@ void time_stats_update(struct time_stats *stats, uint64_t start_time) ...@@ -195,9 +189,8 @@ void time_stats_update(struct time_stats *stats, uint64_t start_time)
stats->last = now ?: 1; stats->last = now ?: 1;
} }
EXPORT_SYMBOL_GPL(time_stats_update);
unsigned next_delay(struct ratelimit *d, uint64_t done) unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
{ {
uint64_t now = local_clock(); uint64_t now = local_clock();
...@@ -207,9 +200,8 @@ unsigned next_delay(struct ratelimit *d, uint64_t done) ...@@ -207,9 +200,8 @@ unsigned next_delay(struct ratelimit *d, uint64_t done)
? div_u64(d->next - now, NSEC_PER_SEC / HZ) ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
: 0; : 0;
} }
EXPORT_SYMBOL_GPL(next_delay);
void bio_map(struct bio *bio, void *base) void bch_bio_map(struct bio *bio, void *base)
{ {
size_t size = bio->bi_size; size_t size = bio->bi_size;
struct bio_vec *bv = bio->bi_io_vec; struct bio_vec *bv = bio->bi_io_vec;
...@@ -235,9 +227,8 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, ...@@ -235,9 +227,8 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
size -= bv->bv_len; size -= bv->bv_len;
} }
} }
EXPORT_SYMBOL_GPL(bio_map);
int bio_alloc_pages(struct bio *bio, gfp_t gfp) int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp)
{ {
int i; int i;
struct bio_vec *bv; struct bio_vec *bv;
...@@ -253,7 +244,6 @@ int bio_alloc_pages(struct bio *bio, gfp_t gfp) ...@@ -253,7 +244,6 @@ int bio_alloc_pages(struct bio *bio, gfp_t gfp)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(bio_alloc_pages);
/* /*
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
...@@ -365,7 +355,7 @@ static const uint64_t crc_table[256] = { ...@@ -365,7 +355,7 @@ static const uint64_t crc_table[256] = {
0x9AFCE626CE85B507 0x9AFCE626CE85B507
}; };
uint64_t crc64_update(uint64_t crc, const void *_data, size_t len) uint64_t bch_crc64_update(uint64_t crc, const void *_data, size_t len)
{ {
const unsigned char *data = _data; const unsigned char *data = _data;
...@@ -376,14 +366,12 @@ uint64_t crc64_update(uint64_t crc, const void *_data, size_t len) ...@@ -376,14 +366,12 @@ uint64_t crc64_update(uint64_t crc, const void *_data, size_t len)
return crc; return crc;
} }
EXPORT_SYMBOL(crc64_update);
uint64_t crc64(const void *data, size_t len) uint64_t bch_crc64(const void *data, size_t len)
{ {
uint64_t crc = 0xffffffffffffffff; uint64_t crc = 0xffffffffffffffff;
crc = crc64_update(crc, data, len); crc = bch_crc64_update(crc, data, len);
return crc ^ 0xffffffffffffffff; return crc ^ 0xffffffffffffffff;
} }
EXPORT_SYMBOL(crc64);
...@@ -307,42 +307,42 @@ do { \ ...@@ -307,42 +307,42 @@ do { \
#define ANYSINT_MAX(t) \ #define ANYSINT_MAX(t) \
((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
int strtoint_h(const char *, int *); int bch_strtoint_h(const char *, int *);
int strtouint_h(const char *, unsigned int *); int bch_strtouint_h(const char *, unsigned int *);
int strtoll_h(const char *, long long *); int bch_strtoll_h(const char *, long long *);
int strtoull_h(const char *, unsigned long long *); int bch_strtoull_h(const char *, unsigned long long *);
static inline int strtol_h(const char *cp, long *res) static inline int bch_strtol_h(const char *cp, long *res)
{ {
#if BITS_PER_LONG == 32 #if BITS_PER_LONG == 32
return strtoint_h(cp, (int *) res); return bch_strtoint_h(cp, (int *) res);
#else #else
return strtoll_h(cp, (long long *) res); return bch_strtoll_h(cp, (long long *) res);
#endif #endif
} }
static inline int strtoul_h(const char *cp, long *res) static inline int bch_strtoul_h(const char *cp, long *res)
{ {
#if BITS_PER_LONG == 32 #if BITS_PER_LONG == 32
return strtouint_h(cp, (unsigned int *) res); return bch_strtouint_h(cp, (unsigned int *) res);
#else #else
return strtoull_h(cp, (unsigned long long *) res); return bch_strtoull_h(cp, (unsigned long long *) res);
#endif #endif
} }
#define strtoi_h(cp, res) \ #define strtoi_h(cp, res) \
(__builtin_types_compatible_p(typeof(*res), int) \ (__builtin_types_compatible_p(typeof(*res), int) \
? strtoint_h(cp, (void *) res) \ ? bch_strtoint_h(cp, (void *) res) \
: __builtin_types_compatible_p(typeof(*res), long) \ : __builtin_types_compatible_p(typeof(*res), long) \
? strtol_h(cp, (void *) res) \ ? bch_strtol_h(cp, (void *) res) \
: __builtin_types_compatible_p(typeof(*res), long long) \ : __builtin_types_compatible_p(typeof(*res), long long) \
? strtoll_h(cp, (void *) res) \ ? bch_strtoll_h(cp, (void *) res) \
: __builtin_types_compatible_p(typeof(*res), unsigned int) \ : __builtin_types_compatible_p(typeof(*res), unsigned int) \
? strtouint_h(cp, (void *) res) \ ? bch_strtouint_h(cp, (void *) res) \
: __builtin_types_compatible_p(typeof(*res), unsigned long) \ : __builtin_types_compatible_p(typeof(*res), unsigned long) \
? strtoul_h(cp, (void *) res) \ ? bch_strtoul_h(cp, (void *) res) \
: __builtin_types_compatible_p(typeof(*res), unsigned long long)\ : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
? strtoull_h(cp, (void *) res) : -EINVAL) ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
#define strtoul_safe(cp, var) \ #define strtoul_safe(cp, var) \
({ \ ({ \
...@@ -379,15 +379,15 @@ static inline int strtoul_h(const char *cp, long *res) ...@@ -379,15 +379,15 @@ static inline int strtoul_h(const char *cp, long *res)
__builtin_types_compatible_p(typeof(var), const char *) \ __builtin_types_compatible_p(typeof(var), const char *) \
? "%s\n" : "%i\n", var) ? "%s\n" : "%i\n", var)
ssize_t hprint(char *buf, int64_t v); ssize_t bch_hprint(char *buf, int64_t v);
bool is_zero(const char *p, size_t n); bool bch_is_zero(const char *p, size_t n);
int parse_uuid(const char *s, char *uuid); int bch_parse_uuid(const char *s, char *uuid);
ssize_t snprint_string_list(char *buf, size_t size, const char * const list[], ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
size_t selected); size_t selected);
ssize_t read_string_list(const char *buf, const char * const list[]); ssize_t bch_read_string_list(const char *buf, const char * const list[]);
struct time_stats { struct time_stats {
/* /*
...@@ -400,7 +400,7 @@ struct time_stats { ...@@ -400,7 +400,7 @@ struct time_stats {
uint64_t last; uint64_t last;
}; };
void time_stats_update(struct time_stats *stats, uint64_t time); void bch_time_stats_update(struct time_stats *stats, uint64_t time);
#define NSEC_PER_ns 1L #define NSEC_PER_ns 1L
#define NSEC_PER_us NSEC_PER_USEC #define NSEC_PER_us NSEC_PER_USEC
...@@ -462,7 +462,7 @@ static inline void ratelimit_reset(struct ratelimit *d) ...@@ -462,7 +462,7 @@ static inline void ratelimit_reset(struct ratelimit *d)
d->next = local_clock(); d->next = local_clock();
} }
unsigned next_delay(struct ratelimit *d, uint64_t done); unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
#define __DIV_SAFE(n, d, zero) \ #define __DIV_SAFE(n, d, zero) \
({ \ ({ \
...@@ -568,9 +568,9 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) ...@@ -568,9 +568,9 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
#define bio_end(bio) ((bio)->bi_sector + bio_sectors(bio)) #define bio_end(bio) ((bio)->bi_sector + bio_sectors(bio))
void bio_map(struct bio *bio, void *base); void bch_bio_map(struct bio *bio, void *base);
int bio_alloc_pages(struct bio *bio, gfp_t gfp); int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp);
static inline sector_t bdev_sectors(struct block_device *bdev) static inline sector_t bdev_sectors(struct block_device *bdev)
{ {
...@@ -583,7 +583,7 @@ do { \ ...@@ -583,7 +583,7 @@ do { \
bch_generic_make_request(bio, &(dev)->bio_split_hook); \ bch_generic_make_request(bio, &(dev)->bio_split_hook); \
} while (0) } while (0)
uint64_t crc64_update(uint64_t, const void *, size_t); uint64_t bch_crc64_update(uint64_t, const void *, size_t);
uint64_t crc64(const void *, size_t); uint64_t bch_crc64(const void *, size_t);
#endif /* _BCACHE_UTIL_H */ #endif /* _BCACHE_UTIL_H */
...@@ -95,7 +95,7 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) ...@@ -95,7 +95,7 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
!dc->writeback_percent) !dc->writeback_percent)
return 0; return 0;
return next_delay(&dc->writeback_rate, sectors * 10000000ULL); return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
} }
/* Background writeback */ /* Background writeback */
...@@ -118,7 +118,7 @@ static void dirty_init(struct keybuf_key *w) ...@@ -118,7 +118,7 @@ static void dirty_init(struct keybuf_key *w)
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w; bio->bi_private = w;
bio->bi_io_vec = bio->bi_inline_vecs; bio->bi_io_vec = bio->bi_inline_vecs;
bio_map(bio, NULL); bch_bio_map(bio, NULL);
} }
static void refill_dirty(struct closure *cl) static void refill_dirty(struct closure *cl)
...@@ -349,7 +349,7 @@ static void read_dirty(struct closure *cl) ...@@ -349,7 +349,7 @@ static void read_dirty(struct closure *cl)
io->bio.bi_rw = READ; io->bio.bi_rw = READ;
io->bio.bi_end_io = read_dirty_endio; io->bio.bi_end_io = read_dirty_endio;
if (bio_alloc_pages(&io->bio, GFP_KERNEL)) if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
goto err_free; goto err_free;
pr_debug("%s", pkey(&w->key)); pr_debug("%s", pkey(&w->key));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册