提交 3d14cd06 编写于 作者: Y Yu Kuai 提交者: Yongqiang Liu

block: add a new config to control dispatching bios asynchronously

hulk inclusion
category: performance
bugzilla: 187597, https://gitee.com/openeuler/kernel/issues/I5QK5M
CVE: NA

--------------------------------

If CONFIG_BLK_BIO_DISPATCH_ASYNC is enabled, and driver support
QUEUE_FLAG_DISPATCH_ASYNC, bios will be dispatched asynchronously to
specific CPUs to avoid across nodes memory access in driver.
Signed-off-by: NYu Kuai <yukuai3@huawei.com>
Reviewed-by: NJason Yan <yanaijie@huawei.com>
Signed-off-by: NYongqiang Liu <liuyongqiang13@huawei.com>
上级 b6a187ae
......@@ -200,6 +200,19 @@ config BLK_SED_OPAL
Enabling this option enables users to setup/unlock/lock
Locking ranges for SED devices using the Opal protocol.
config BLK_BIO_DISPATCH_ASYNC
bool "Dispatch bios asynchronously on specific cpus"
default n
depends on BLOCK=y
help
If there are multiple nodes, memory access across nodes is rather bad
compare to local node. And if some drivers are using internal spin
locks, io performance will be bad if bios are issued concurrently from
different nodes. This feature will dispatch bio asynchronously to the
specific CPUs to avoid across nodes memory access in driver, noted this
feature will require special care in the driver to work. If unsure,
say N here.
menu "Partition Types"
source "block/partitions/Kconfig"
......
......@@ -35,7 +35,6 @@
#include <linux/blk-cgroup.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
#include <linux/arch_topology.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
......@@ -86,6 +85,9 @@ struct kmem_cache *blk_requestq_cachep;
*/
static struct workqueue_struct *kblockd_workqueue;
#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC
#include <linux/arch_topology.h>
#define BIO_DISPATCH_MAX_LOOP 16
/* the minimum of cpus that dispatch async can be enabled */
#define MIN_DISPATCH_ASYNC_CPUS 16
......@@ -309,6 +311,23 @@ void queue_init_dispatch_async_cpus(struct request_queue *q, int node)
cpumask_setall(dispatch_async_cpus);
}
EXPORT_SYMBOL_GPL(queue_init_dispatch_async_cpus);
#else
static int blk_alloc_queue_dispatch_async(struct request_queue *q)
{
return 0;
}
static blk_qc_t blk_queue_do_make_request(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
return q->make_request_fn(q, bio);
}
static void init_blk_queue_async_dispatch(void)
{
}
#endif
/**
* blk_queue_flag_set - atomically set a queue flag
......
......@@ -696,6 +696,7 @@ static struct queue_sysfs_entry queue_wb_lat_entry = {
.store = queue_wb_lat_store,
};
#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC
static ssize_t queue_dispatch_async_cpus_show(struct request_queue *q,
char *page)
{
......@@ -731,6 +732,7 @@ static struct queue_sysfs_entry queue_dispatch_async_entry = {
.attr = {.name = "dispatch_async", .mode = 0444 },
.show = queue_show_dispatch_async,
};
#endif
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
static struct queue_sysfs_entry throtl_sample_time_entry = {
......@@ -774,8 +776,10 @@ static struct attribute *default_attrs[] = {
&queue_dax_entry.attr,
&queue_wb_lat_entry.attr,
&queue_poll_delay_entry.attr,
#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC
&queue_dispatch_async_cpus_entry.attr,
&queue_dispatch_async_entry.attr,
#endif
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
&throtl_sample_time_entry.attr,
#endif
......
......@@ -49,9 +49,11 @@ struct request_queue_wrapper {
struct mutex mq_freeze_lock;
int mq_freeze_depth;
#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC
/* used when QUEUE_FLAG_DISPATCH_ASYNC is set */
struct cpumask dispatch_async_cpus;
int __percpu *last_dispatch_cpu;
#endif
};
#define queue_to_wrapper(q) \
......@@ -464,6 +466,12 @@ extern int blk_iolatency_init(struct request_queue *q);
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif
#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC
extern void blk_free_queue_dispatch_async(struct request_queue *q);
#else
static inline void blk_free_queue_dispatch_async(struct request_queue *q)
{
}
#endif
#endif /* BLK_INTERNAL_H */
......@@ -785,7 +785,14 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC
extern void queue_init_dispatch_async_cpus(struct request_queue *q, int node);
#else
static inline void queue_init_dispatch_async_cpus(struct request_queue *q,
int node)
{
}
#endif
static inline int queue_in_flight(struct request_queue *q)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册