提交 819d0d06 编写于 作者: Z Zheng Zengkai

Revert "bcache: inflight prefetch requests block overlapped normal requests"

euleros/rtos inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4LOJ6
CVE: NA

--------------------------------

This patch set introduce many conflicts while backporting mainline
bcache patches, revert it temporarily.

This reverts commit 08a3ac0e.
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
Acked-by: NXie XiuQi <xiexiuqi@huawei.com>
上级 c81a4f07
......@@ -31,12 +31,6 @@ int acache_prefetch_workers = 1000;
module_param_named(prefetch_workers, acache_prefetch_workers, int, 0444);
MODULE_PARM_DESC(prefetch_workers, "num of workers for processing prefetch requests");
struct inflight_list_head {
struct list_head entry;
spinlock_t io_lock;
bool initialized;
};
struct prefetch_worker {
struct acache_info s;
struct work_struct work;
......@@ -56,8 +50,6 @@ struct acache_device {
struct acache_circ *acache_info_circ;
struct inflight_list_head inflight_list;
struct workqueue_struct *wq;
struct prefetch_worker *prefetch_workers;
struct list_head prefetch_workers_free;
......@@ -303,7 +295,6 @@ int acache_dev_init(void)
int major;
struct device *dev;
inflight_list_ops.init();
major = alloc_chrdev_region(&adev.devno, 0, ACACHE_NR_DEVS, DEV_NAME);
if (major < 0) {
pr_err("failed to allocate chrdev region: %d\n", major);
......@@ -386,7 +377,6 @@ int acache_dev_init(void)
fail_class:
unregister_chrdev_region(adev.devno, ACACHE_NR_DEVS);
fail_allocdev:
inflight_list_ops.exit();
return ret;
}
......@@ -405,112 +395,9 @@ void acache_dev_exit(void)
kfree(adev.mem_regionp);
unregister_chrdev_region(adev.devno, ACACHE_NR_DEVS);
class_destroy(adev.class);
inflight_list_ops.exit();
kfree(adev.prefetch_workers);
}
static struct search *__inflight_list_lookup_locked(struct search *s)
{
struct search *iter;
struct bio *bio, *sbio;
if (!adev.inflight_list.initialized)
return NULL;
sbio = &s->bio.bio;
list_for_each_entry(iter, &adev.inflight_list.entry, list_node) {
bio = &iter->bio.bio;
if (sbio->bi_disk == bio->bi_disk &&
sbio->bi_iter.bi_sector < bio_end_sector(bio) &&
bio_end_sector(sbio) > bio->bi_iter.bi_sector) {
return iter;
}
}
return NULL;
}
static void inflight_list_init(void)
{
INIT_LIST_HEAD(&adev.inflight_list.entry);
spin_lock_init(&adev.inflight_list.io_lock);
adev.inflight_list.initialized = true;
}
static void inflight_list_exit(void)
{
if (!list_empty(&adev.inflight_list.entry))
pr_err("existing with inflight list not empty\n");
}
static int inflight_list_insert(struct search *s)
{
if (!adev.inflight_list.initialized)
return -1;
init_waitqueue_head(&s->wqh);
spin_lock(&adev.inflight_list.io_lock);
list_add_tail(&s->list_node, &adev.inflight_list.entry);
spin_unlock(&adev.inflight_list.io_lock);
trace_bcache_inflight_list_insert(s->d, s->orig_bio);
return 0;
}
static int inflight_list_remove(struct search *s)
{
if (!adev.inflight_list.initialized)
return -1;
spin_lock(&adev.inflight_list.io_lock);
list_del_init(&s->list_node);
spin_unlock(&adev.inflight_list.io_lock);
wake_up_interruptible_all(&s->wqh);
trace_bcache_inflight_list_remove(s->d, s->orig_bio);
return 0;
}
static bool inflight_list_wait(struct search *s)
{
struct search *pfs = NULL;
struct cached_dev *dc;
DEFINE_WAIT(wqe);
if (!adev.inflight_list.initialized)
return false;
spin_lock(&adev.inflight_list.io_lock);
pfs = __inflight_list_lookup_locked(s);
if (pfs == NULL) {
spin_unlock(&adev.inflight_list.io_lock);
return false;
}
dc = container_of(pfs->d, struct cached_dev, disk);
if (!dc->inflight_block_enable) {
spin_unlock(&adev.inflight_list.io_lock);
return true;
}
prepare_to_wait(&pfs->wqh, &wqe, TASK_INTERRUPTIBLE);
/* unlock here to ensure pfs not changed. */
spin_unlock(&adev.inflight_list.io_lock);
schedule();
finish_wait(&pfs->wqh, &wqe);
return true;
}
const struct inflight_queue_ops inflight_list_ops = {
.init = inflight_list_init,
.exit = inflight_list_exit,
.insert = inflight_list_insert,
.remove = inflight_list_remove,
.wait = inflight_list_wait,
};
struct cached_dev *get_cached_device_by_dev(dev_t dev)
{
struct cache_set *c, *tc;
......
......@@ -66,14 +66,4 @@ void acache_dev_exit(void);
struct acache_info *fetch_circ_item(struct acache_circ *circ);
void save_circ_item(struct acache_info *data);
struct inflight_queue_ops {
void (*init)(void);
void (*exit)(void);
int (*insert)(struct search *s);
int (*remove)(struct search *s);
bool (*wait)(struct search *s);
};
extern const struct inflight_queue_ops inflight_list_ops;
#endif
......@@ -376,7 +376,6 @@ struct cached_dev {
unsigned char writeback_percent;
unsigned int writeback_delay;
unsigned int inflight_block_enable;
unsigned int read_bypass;
uint64_t writeback_rate_target;
......
......@@ -706,9 +706,6 @@ void search_free(struct closure *cl)
if (s->iop.bio)
bio_put(s->iop.bio);
if (s->prefetch)
inflight_list_ops.remove(s);
bio_complete(s);
closure_debug_destroy(cl);
mempool_free(s, &s->iop.c->search);
......@@ -977,11 +974,6 @@ void cached_dev_read(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
if (s->prefetch)
inflight_list_ops.insert(s);
else if (inflight_list_ops.wait(s))
bch_mark_cache_prefetch_fake_hit(s->iop.c, s->d);
closure_call(&s->iop.cl, cache_lookup, NULL, cl);
continue_at(cl, cached_dev_read_done_bh, NULL);
}
......
......@@ -48,7 +48,6 @@ read_attribute(cache_bypass_misses);
read_attribute(cache_hit_ratio);
read_attribute(cache_readaheads);
read_attribute(cache_miss_collisions);
read_attribute(cache_prefetch_fake_hits);
read_attribute(bypassed);
SHOW(bch_stats)
......@@ -67,7 +66,6 @@ SHOW(bch_stats)
var_print(cache_readaheads);
var_print(cache_miss_collisions);
var_print(cache_prefetch_fake_hits);
sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
#undef var
return 0;
......@@ -90,7 +88,6 @@ static struct attribute *bch_stats_files[] = {
&sysfs_cache_hit_ratio,
&sysfs_cache_readaheads,
&sysfs_cache_miss_collisions,
&sysfs_cache_prefetch_fake_hits,
&sysfs_bypassed,
NULL
};
......@@ -150,7 +147,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
scale_stat(&stats->cache_bypass_misses);
scale_stat(&stats->cache_readaheads);
scale_stat(&stats->cache_miss_collisions);
scale_stat(&stats->cache_prefetch_fake_hits);
scale_stat(&stats->sectors_bypassed);
}
}
......@@ -174,7 +170,6 @@ static void scale_accounting(struct timer_list *t)
move_stat(cache_bypass_misses);
move_stat(cache_readaheads);
move_stat(cache_miss_collisions);
move_stat(cache_prefetch_fake_hits);
move_stat(sectors_bypassed);
scale_stats(&acc->total, 0);
......@@ -230,14 +225,6 @@ void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
atomic_inc(&c->accounting.collector.cache_miss_collisions);
}
void bch_mark_cache_prefetch_fake_hit(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_prefetch_fake_hits);
atomic_inc(&c->accounting.collector.cache_prefetch_fake_hits);
}
void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
int sectors)
{
......
......@@ -9,7 +9,6 @@ struct cache_stat_collector {
atomic_t cache_bypass_misses;
atomic_t cache_readaheads;
atomic_t cache_miss_collisions;
atomic_t cache_prefetch_fake_hits;
atomic_t sectors_bypassed;
};
......@@ -22,7 +21,6 @@ struct cache_stats {
unsigned long cache_bypass_misses;
unsigned long cache_readaheads;
unsigned long cache_miss_collisions;
unsigned long cache_prefetch_fake_hits;
unsigned long sectors_bypassed;
unsigned int rescale;
......@@ -60,7 +58,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d);
void bch_mark_cache_miss_collision(struct cache_set *c,
struct bcache_device *d);
void bch_mark_cache_prefetch_fake_hit(struct cache_set *c, struct bcache_device *d);
void bch_mark_sectors_bypassed(struct cache_set *c,
struct cached_dev *dc,
int sectors);
......
......@@ -1439,7 +1439,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
dc->sequential_cutoff = 4 << 20;
dc->inflight_block_enable = 1;
dc->read_bypass = 0;
for (io = dc->io; io < dc->io + RECENT_IO; io++) {
......
......@@ -109,7 +109,6 @@ rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff);
rw_attribute(read_bypass);
rw_attribute(inflight_block_enable);
rw_attribute(data_csum);
rw_attribute(cache_mode);
rw_attribute(readahead_cache_policy);
......@@ -254,7 +253,6 @@ SHOW(__bch_cached_dev)
var_printf(partial_stripes_expensive, "%u");
var_hprint(sequential_cutoff);
var_print(inflight_block_enable);
var_print(read_bypass);
var_hprint(readahead);
......@@ -353,9 +351,6 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(read_bypass,
dc->read_bypass,
0, 1);
sysfs_strtoul_clamp(inflight_block_enable,
dc->inflight_block_enable,
0, 1);
d_strtoi_h(readahead);
if (attr == &sysfs_clear_stats)
......@@ -522,7 +517,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff,
&sysfs_read_bypass,
&sysfs_inflight_block_enable,
&sysfs_clear_stats,
&sysfs_running,
&sysfs_state,
......
......@@ -81,17 +81,6 @@ DEFINE_EVENT(bcache_request, bcache_prefetch_request,
TP_ARGS(d, bio)
);
/* interface.c */
DEFINE_EVENT(bcache_request, bcache_inflight_list_insert,
TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(d, bio)
);
DEFINE_EVENT(bcache_request, bcache_inflight_list_remove,
TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(d, bio)
);
/* request.c */
DEFINE_EVENT(bcache_request, bcache_request_start,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册