提交 30dc9d9c 编写于 作者: L Li Ruilin 提交者: Zheng Zengkai

bcache: provide a switch to bypass all IO requests

euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4LOJ6
CVE: NA

------------------------------

provide a switch named read_bypass. If enbale, all IO requests will
bypass the cache. This option could be useful when we enable userspace
prefetch and the cache device is low capacity.
Signed-off-by: NLi Ruilin <liruilin4@huawei.com>
Reviewed-by: NLuan Jianhai <luanjianhai@huawei.com>
Reviewed-by: NPeng Junyi <pengjunyi1@huawei.com>
Acked-by: NXie Xiuqi <xiexiuqi@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NGuangxing Deng <dengguangxing@huawei.com>
Reviewed-by: Nchao song <chao.song@huawei.com>
Reviewed-by: Nchao song <chao.song@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 a460ae11
...@@ -441,6 +441,10 @@ sequential_cutoff ...@@ -441,6 +441,10 @@ sequential_cutoff
most recent 128 IOs are tracked so sequential IO can be detected even when most recent 128 IOs are tracked so sequential IO can be detected even when
it isn't all done at once. it isn't all done at once.
read_bypass
If enbale, all IO will bypass the cache. This option could be useful when we
enable userspace prefetch and the cache device is low capacity.
sequential_merge sequential_merge
If non zero, bcache keeps a list of the last 128 requests submitted to compare If non zero, bcache keeps a list of the last 128 requests submitted to compare
against all new requests to determine which new requests are sequential against all new requests to determine which new requests are sequential
......
...@@ -376,6 +376,8 @@ struct cached_dev { ...@@ -376,6 +376,8 @@ struct cached_dev {
unsigned char writeback_percent; unsigned char writeback_percent;
unsigned int writeback_delay; unsigned int writeback_delay;
unsigned int read_bypass;
uint64_t writeback_rate_target; uint64_t writeback_rate_target;
int64_t writeback_rate_proportional; int64_t writeback_rate_proportional;
int64_t writeback_rate_integral; int64_t writeback_rate_integral;
......
...@@ -852,7 +852,7 @@ static void cached_dev_read_done(struct closure *cl) ...@@ -852,7 +852,7 @@ static void cached_dev_read_done(struct closure *cl)
if (!s->prefetch) if (!s->prefetch)
bio_complete(s); bio_complete(s);
if (s->iop.bio && if (s->iop.bio && (!dc->read_bypass || s->prefetch) &&
!test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
BUG_ON(!s->iop.replace); BUG_ON(!s->iop.replace);
closure_call(&s->iop.cl, bch_data_insert, NULL, cl); closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
...@@ -897,12 +897,14 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, ...@@ -897,12 +897,14 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->cache_missed = 1; s->cache_missed = 1;
if (s->cache_miss || s->iop.bypass) { if (s->cache_miss || s->iop.bypass ||
(dc->read_bypass && !s->prefetch)) {
miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE; ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit; goto out_submit;
} }
/* if called form do_readahead, no need to do this */
if (!(bio->bi_opf & REQ_RAHEAD) && if (!(bio->bi_opf & REQ_RAHEAD) &&
!(bio->bi_opf & (REQ_META|REQ_PRIO) ) && !(bio->bi_opf & (REQ_META|REQ_PRIO) ) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA && s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA &&
......
...@@ -1439,6 +1439,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) ...@@ -1439,6 +1439,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
dc->sequential_cutoff = 4 << 20; dc->sequential_cutoff = 4 << 20;
dc->read_bypass = 0;
for (io = dc->io; io < dc->io + RECENT_IO; io++) { for (io = dc->io; io < dc->io + RECENT_IO; io++) {
list_add(&io->lru, &dc->io_lru); list_add(&io->lru, &dc->io_lru);
......
...@@ -108,6 +108,7 @@ rw_attribute(congested_read_threshold_us); ...@@ -108,6 +108,7 @@ rw_attribute(congested_read_threshold_us);
rw_attribute(congested_write_threshold_us); rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff); rw_attribute(sequential_cutoff);
rw_attribute(read_bypass);
rw_attribute(data_csum); rw_attribute(data_csum);
rw_attribute(cache_mode); rw_attribute(cache_mode);
rw_attribute(readahead_cache_policy); rw_attribute(readahead_cache_policy);
...@@ -252,6 +253,7 @@ SHOW(__bch_cached_dev) ...@@ -252,6 +253,7 @@ SHOW(__bch_cached_dev)
var_printf(partial_stripes_expensive, "%u"); var_printf(partial_stripes_expensive, "%u");
var_hprint(sequential_cutoff); var_hprint(sequential_cutoff);
var_print(read_bypass);
var_hprint(readahead); var_hprint(readahead);
sysfs_print(running, atomic_read(&dc->running)); sysfs_print(running, atomic_read(&dc->running));
...@@ -346,6 +348,9 @@ STORE(__cached_dev) ...@@ -346,6 +348,9 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(sequential_cutoff, sysfs_strtoul_clamp(sequential_cutoff,
dc->sequential_cutoff, dc->sequential_cutoff,
0, UINT_MAX); 0, UINT_MAX);
sysfs_strtoul_clamp(read_bypass,
dc->read_bypass,
0, 1);
d_strtoi_h(readahead); d_strtoi_h(readahead);
if (attr == &sysfs_clear_stats) if (attr == &sysfs_clear_stats)
...@@ -511,6 +516,7 @@ static struct attribute *bch_cached_dev_files[] = { ...@@ -511,6 +516,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_stripe_size, &sysfs_stripe_size,
&sysfs_partial_stripes_expensive, &sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff, &sysfs_sequential_cutoff,
&sysfs_read_bypass,
&sysfs_clear_stats, &sysfs_clear_stats,
&sysfs_running, &sysfs_running,
&sysfs_state, &sysfs_state,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册