提交 688afe52 编写于 作者: Z Zheng Zengkai

Revert "bcache: provide a switch to bypass all IO requests"

euleros/rtos inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4LOJ6
CVE: NA

--------------------------------

This patch set introduce many conflicts while backporting mainline
bcache patches, revert it temporarily.

This reverts commit 30dc9d9c.
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
Acked-by: NXie XiuQi <xiexiuqi@huawei.com>
上级 819d0d06
...@@ -441,10 +441,6 @@ sequential_cutoff ...@@ -441,10 +441,6 @@ sequential_cutoff
most recent 128 IOs are tracked so sequential IO can be detected even when most recent 128 IOs are tracked so sequential IO can be detected even when
it isn't all done at once. it isn't all done at once.
read_bypass
If enbale, all IO will bypass the cache. This option could be useful when we
enable userspace prefetch and the cache device is low capacity.
sequential_merge sequential_merge
If non zero, bcache keeps a list of the last 128 requests submitted to compare If non zero, bcache keeps a list of the last 128 requests submitted to compare
against all new requests to determine which new requests are sequential against all new requests to determine which new requests are sequential
......
...@@ -376,8 +376,6 @@ struct cached_dev { ...@@ -376,8 +376,6 @@ struct cached_dev {
unsigned char writeback_percent; unsigned char writeback_percent;
unsigned int writeback_delay; unsigned int writeback_delay;
unsigned int read_bypass;
uint64_t writeback_rate_target; uint64_t writeback_rate_target;
int64_t writeback_rate_proportional; int64_t writeback_rate_proportional;
int64_t writeback_rate_integral; int64_t writeback_rate_integral;
......
...@@ -852,7 +852,7 @@ static void cached_dev_read_done(struct closure *cl) ...@@ -852,7 +852,7 @@ static void cached_dev_read_done(struct closure *cl)
if (!s->prefetch) if (!s->prefetch)
bio_complete(s); bio_complete(s);
if (s->iop.bio && (!dc->read_bypass || s->prefetch) && if (s->iop.bio &&
!test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
BUG_ON(!s->iop.replace); BUG_ON(!s->iop.replace);
closure_call(&s->iop.cl, bch_data_insert, NULL, cl); closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
...@@ -897,14 +897,12 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, ...@@ -897,14 +897,12 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->cache_missed = 1; s->cache_missed = 1;
if (s->cache_miss || s->iop.bypass || if (s->cache_miss || s->iop.bypass) {
(dc->read_bypass && !s->prefetch)) {
miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE; ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit; goto out_submit;
} }
/* if called form do_readahead, no need to do this */
if (!(bio->bi_opf & REQ_RAHEAD) && if (!(bio->bi_opf & REQ_RAHEAD) &&
!(bio->bi_opf & (REQ_META|REQ_PRIO) ) && !(bio->bi_opf & (REQ_META|REQ_PRIO) ) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA && s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA &&
......
...@@ -1439,7 +1439,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) ...@@ -1439,7 +1439,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
dc->sequential_cutoff = 4 << 20; dc->sequential_cutoff = 4 << 20;
dc->read_bypass = 0;
for (io = dc->io; io < dc->io + RECENT_IO; io++) { for (io = dc->io; io < dc->io + RECENT_IO; io++) {
list_add(&io->lru, &dc->io_lru); list_add(&io->lru, &dc->io_lru);
......
...@@ -108,7 +108,6 @@ rw_attribute(congested_read_threshold_us); ...@@ -108,7 +108,6 @@ rw_attribute(congested_read_threshold_us);
rw_attribute(congested_write_threshold_us); rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff); rw_attribute(sequential_cutoff);
rw_attribute(read_bypass);
rw_attribute(data_csum); rw_attribute(data_csum);
rw_attribute(cache_mode); rw_attribute(cache_mode);
rw_attribute(readahead_cache_policy); rw_attribute(readahead_cache_policy);
...@@ -253,7 +252,6 @@ SHOW(__bch_cached_dev) ...@@ -253,7 +252,6 @@ SHOW(__bch_cached_dev)
var_printf(partial_stripes_expensive, "%u"); var_printf(partial_stripes_expensive, "%u");
var_hprint(sequential_cutoff); var_hprint(sequential_cutoff);
var_print(read_bypass);
var_hprint(readahead); var_hprint(readahead);
sysfs_print(running, atomic_read(&dc->running)); sysfs_print(running, atomic_read(&dc->running));
...@@ -348,9 +346,6 @@ STORE(__cached_dev) ...@@ -348,9 +346,6 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(sequential_cutoff, sysfs_strtoul_clamp(sequential_cutoff,
dc->sequential_cutoff, dc->sequential_cutoff,
0, UINT_MAX); 0, UINT_MAX);
sysfs_strtoul_clamp(read_bypass,
dc->read_bypass,
0, 1);
d_strtoi_h(readahead); d_strtoi_h(readahead);
if (attr == &sysfs_clear_stats) if (attr == &sysfs_clear_stats)
...@@ -516,7 +511,6 @@ static struct attribute *bch_cached_dev_files[] = { ...@@ -516,7 +511,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_stripe_size, &sysfs_stripe_size,
&sysfs_partial_stripes_expensive, &sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff, &sysfs_sequential_cutoff,
&sysfs_read_bypass,
&sysfs_clear_stats, &sysfs_clear_stats,
&sysfs_running, &sysfs_running,
&sysfs_state, &sysfs_state,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册