提交 d2a27964 编写于 作者: J John Garry 提交者: Jens Axboe

block: Rename BLKDEV_MAX_RQ -> BLKDEV_DEFAULT_RQ

It is a bit confusing that there is BLKDEV_MAX_RQ and MAX_SCHED_RQ, as
the name BLKDEV_MAX_RQ would imply the max requests always, which it is
not.

Rename to BLKDEV_MAX_RQ to BLKDEV_DEFAULT_RQ, matching its usage - that being
the default number of requests assigned when allocating a request queue.
Signed-off-by: NJohn Garry <john.garry@huawei.com>
Reviewed-by: NMing Lei <ming.lei@redhat.com>
Reviewed-by: NHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/1633429419-228500-3-git-send-email-john.garry@huawei.comSigned-off-by: NJens Axboe <axboe@kernel.dk>
上级 65de57bb
...@@ -587,7 +587,7 @@ struct request_queue *blk_alloc_queue(int node_id) ...@@ -587,7 +587,7 @@ struct request_queue *blk_alloc_queue(int node_id)
blk_queue_dma_alignment(q, 511); blk_queue_dma_alignment(q, 511);
blk_set_default_limits(&q->limits); blk_set_default_limits(&q->limits);
q->nr_requests = BLKDEV_MAX_RQ; q->nr_requests = BLKDEV_DEFAULT_RQ;
return q; return q;
......
...@@ -606,7 +606,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) ...@@ -606,7 +606,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
* Additionally, this is a per-hw queue depth. * Additionally, this is a per-hw queue depth.
*/ */
q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
BLKDEV_MAX_RQ); BLKDEV_DEFAULT_RQ);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_sched_alloc_tags(q, hctx, i); ret = blk_mq_sched_alloc_tags(q, hctx, i);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "blk-mq.h" #include "blk-mq.h"
#include "blk-mq-tag.h" #include "blk-mq-tag.h"
#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ) #define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
void blk_mq_sched_assign_ioc(struct request *rq); void blk_mq_sched_assign_ioc(struct request *rq);
......
...@@ -836,7 +836,7 @@ struct rbd_options { ...@@ -836,7 +836,7 @@ struct rbd_options {
u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
}; };
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024) #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false #define RBD_READ_ONLY_DEFAULT false
......
...@@ -12,7 +12,7 @@ struct blk_mq_tags; ...@@ -12,7 +12,7 @@ struct blk_mq_tags;
struct blk_flush_queue; struct blk_flush_queue;
#define BLKDEV_MIN_RQ 4 #define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */ #define BLKDEV_DEFAULT_RQ 128
typedef void (rq_end_io_fn)(struct request *, blk_status_t); typedef void (rq_end_io_fn)(struct request *, blk_status_t);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册