提交 b8617081 编写于 作者: J Jens Axboe 提交者: Caspar Zhang

blk-mq: pass in request/bio flags to queue mapping

to #28991349

commit f9afca4d367b8c915f28d29fcaba7460640403ff upstream

Prep patch for being able to place request based not just on
CPU location, but also on the type of request.
Reviewed-by: NHannes Reinecke <hare@suse.com>
Reviewed-by: NKeith Busch <keith.busch@intel.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Reviewed-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 67c25121
...@@ -242,7 +242,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) ...@@ -242,7 +242,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
if (fq->rq_status != BLK_STS_OK) if (fq->rq_status != BLK_STS_OK)
error = fq->rq_status; error = fq->rq_status;
hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu); hctx = blk_mq_map_queue(q, flush_rq->cmd_flags, flush_rq->mq_ctx->cpu);
if (!q->elevator) { if (!q->elevator) {
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1; flush_rq->tag = -1;
...@@ -352,7 +352,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, ...@@ -352,7 +352,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
if (!q->elevator) { if (!q->elevator) {
fq->orig_rq = first_rq; fq->orig_rq = first_rq;
flush_rq->tag = first_rq->tag; flush_rq->tag = first_rq->tag;
hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu); hctx = blk_mq_map_queue(q, first_rq->cmd_flags, first_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
} else { } else {
flush_rq->internal_tag = first_rq->internal_tag; flush_rq->internal_tag = first_rq->internal_tag;
...@@ -419,7 +419,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) ...@@ -419,7 +419,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
unsigned long flags; unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
hctx = blk_mq_map_queue(q, ctx->cpu); hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
if (q->elevator) { if (q->elevator) {
WARN_ON(rq->tag < 0); WARN_ON(rq->tag < 0);
......
...@@ -514,9 +514,10 @@ struct show_busy_params { ...@@ -514,9 +514,10 @@ struct show_busy_params {
static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved) static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
{ {
const struct show_busy_params *params = data; const struct show_busy_params *params = data;
struct blk_mq_hw_ctx *hctx;
if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx && hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
blk_mq_rq_state(rq) != MQ_RQ_IDLE) if (hctx == params->hctx && blk_mq_rq_state(rq) != MQ_RQ_IDLE)
__blk_mq_debugfs_rq_show(params->m, __blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist)); list_entry_rq(&rq->queuelist));
} }
......
...@@ -312,7 +312,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) ...@@ -312,7 +312,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu);
bool ret = false; bool ret = false;
if (e && e->type->ops.mq.bio_merge) { if (e && e->type->ops.mq.bio_merge) {
...@@ -368,7 +368,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, ...@@ -368,7 +368,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx;
hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
/* flush rq in flush machinery need to be dispatched directly */ /* flush rq in flush machinery need to be dispatched directly */
if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
...@@ -401,8 +403,13 @@ void blk_mq_sched_insert_requests(struct request_queue *q, ...@@ -401,8 +403,13 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_ctx *ctx, struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async) struct list_head *list, bool run_queue_async)
{ {
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx;
struct elevator_queue *e = hctx->queue->elevator; struct elevator_queue *e;
struct request *rq;
/* For list inserts, requests better be on the same hw queue */
rq = list_first_entry(list, struct request, queuelist);
hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
/* /*
* blk_mq_sched_insert_requests() is called from flush plug * blk_mq_sched_insert_requests() is called from flush plug
...@@ -411,6 +418,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q, ...@@ -411,6 +418,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
*/ */
percpu_ref_get(&q->q_usage_counter); percpu_ref_get(&q->q_usage_counter);
e = hctx->queue->elevator;
if (e && e->type->ops.mq.insert_requests) if (e && e->type->ops.mq.insert_requests)
e->type->ops.mq.insert_requests(hctx, list, false); e->type->ops.mq.insert_requests(hctx, list, false);
else { else {
......
...@@ -168,7 +168,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) ...@@ -168,7 +168,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
io_schedule(); io_schedule();
data->ctx = blk_mq_get_ctx(data->q); data->ctx = blk_mq_get_ctx(data->q);
data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
data->ctx->cpu);
tags = blk_mq_tags_from_data(data); tags = blk_mq_tags_from_data(data);
if (data->flags & BLK_MQ_REQ_RESERVED) if (data->flags & BLK_MQ_REQ_RESERVED)
bt = &tags->breserved_tags; bt = &tags->breserved_tags;
...@@ -472,7 +473,7 @@ u32 blk_mq_unique_tag(struct request *rq) ...@@ -472,7 +473,7 @@ u32 blk_mq_unique_tag(struct request *rq)
int hwq = 0; int hwq = 0;
if (q->mq_ops) { if (q->mq_ops) {
hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); hctx = blk_mq_map_queue(q, rq->cmd_flags, rq->mq_ctx->cpu);
hwq = hctx->queue_num; hwq = hctx->queue_num;
} }
......
...@@ -369,8 +369,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, ...@@ -369,8 +369,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
} }
static struct request *blk_mq_get_request(struct request_queue *q, static struct request *blk_mq_get_request(struct request_queue *q,
struct bio *bio, unsigned int op, struct bio *bio,
struct blk_mq_alloc_data *data) struct blk_mq_alloc_data *data)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct request *rq; struct request *rq;
...@@ -390,8 +390,9 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -390,8 +390,9 @@ static struct request *blk_mq_get_request(struct request_queue *q,
put_ctx_on_error = true; put_ctx_on_error = true;
} }
if (likely(!data->hctx)) if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu); data->hctx = blk_mq_map_queue(q, data->cmd_flags,
if (op & REQ_NOWAIT) data->ctx->cpu);
if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT; data->flags |= BLK_MQ_REQ_NOWAIT;
if (e) { if (e) {
...@@ -402,9 +403,9 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -402,9 +403,9 @@ static struct request *blk_mq_get_request(struct request_queue *q,
* dispatch list. Don't include reserved tags in the * dispatch list. Don't include reserved tags in the
* limiting, as it isn't useful. * limiting, as it isn't useful.
*/ */
if (!op_is_flush(op) && e->type->ops.mq.limit_depth && if (!op_is_flush(data->cmd_flags) && e->type->ops.mq.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED)) !(data->flags & BLK_MQ_REQ_RESERVED))
e->type->ops.mq.limit_depth(op, data); e->type->ops.mq.limit_depth(data->cmd_flags, data);
} else { } else {
blk_mq_tag_busy(data->hctx); blk_mq_tag_busy(data->hctx);
} }
...@@ -419,8 +420,8 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -419,8 +420,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
return NULL; return NULL;
} }
rq = blk_mq_rq_ctx_init(data, tag, op, alloc_time_ns); rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags, alloc_time_ns);
if (!op_is_flush(op)) { if (!op_is_flush(data->cmd_flags)) {
rq->elv.icq = NULL; rq->elv.icq = NULL;
if (e && e->type->ops.mq.prepare_request) { if (e && e->type->ops.mq.prepare_request) {
if (e->type->icq_cache && rq_ioc(bio)) if (e->type->icq_cache && rq_ioc(bio))
...@@ -437,7 +438,7 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -437,7 +438,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
blk_mq_req_flags_t flags) blk_mq_req_flags_t flags)
{ {
struct blk_mq_alloc_data alloc_data = { .flags = flags }; struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
struct request *rq; struct request *rq;
int ret; int ret;
...@@ -445,7 +446,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, ...@@ -445,7 +446,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
rq = blk_mq_get_request(q, NULL, op, &alloc_data); rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q); blk_queue_exit(q);
if (!rq) if (!rq)
...@@ -463,7 +464,7 @@ EXPORT_SYMBOL(blk_mq_alloc_request); ...@@ -463,7 +464,7 @@ EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
{ {
struct blk_mq_alloc_data alloc_data = { .flags = flags }; struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
struct request *rq; struct request *rq;
unsigned int cpu; unsigned int cpu;
int ret; int ret;
...@@ -496,7 +497,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -496,7 +497,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu); alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
rq = blk_mq_get_request(q, NULL, op, &alloc_data); rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q); blk_queue_exit(q);
if (!rq) if (!rq)
...@@ -510,7 +511,7 @@ void __blk_mq_free_request(struct request *rq) ...@@ -510,7 +511,7 @@ void __blk_mq_free_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
const int sched_tag = rq->internal_tag; const int sched_tag = rq->internal_tag;
if (rq->tag != -1) if (rq->tag != -1)
...@@ -526,7 +527,7 @@ void blk_mq_free_request(struct request *rq) ...@@ -526,7 +527,7 @@ void blk_mq_free_request(struct request *rq)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
if (rq->rq_flags & RQF_ELVPRIV) { if (rq->rq_flags & RQF_ELVPRIV) {
if (e && e->type->ops.mq.finish_request) if (e && e->type->ops.mq.finish_request)
...@@ -1015,8 +1016,9 @@ bool blk_mq_get_driver_tag(struct request *rq) ...@@ -1015,8 +1016,9 @@ bool blk_mq_get_driver_tag(struct request *rq)
{ {
struct blk_mq_alloc_data data = { struct blk_mq_alloc_data data = {
.q = rq->q, .q = rq->q,
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), .hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu),
.flags = BLK_MQ_REQ_NOWAIT, .flags = BLK_MQ_REQ_NOWAIT,
.cmd_flags = rq->cmd_flags,
}; };
bool shared; bool shared;
...@@ -1180,7 +1182,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, ...@@ -1180,7 +1182,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
rq = list_first_entry(list, struct request, queuelist); rq = list_first_entry(list, struct request, queuelist);
hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
break; break;
...@@ -1619,7 +1621,8 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -1619,7 +1621,8 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
{ {
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
ctx->cpu);
spin_lock(&hctx->lock); spin_lock(&hctx->lock);
list_add_tail(&rq->queuelist, &hctx->dispatch); list_add_tail(&rq->queuelist, &hctx->dispatch);
...@@ -1832,7 +1835,8 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) ...@@ -1832,7 +1835,8 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
int srcu_idx; int srcu_idx;
blk_qc_t unused_cookie; blk_qc_t unused_cookie;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
ctx->cpu);
hctx_lock(hctx, &srcu_idx); hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
...@@ -1888,7 +1892,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1888,7 +1892,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{ {
const int is_sync = op_is_sync(bio->bi_opf); const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0 }; struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
struct request *rq; struct request *rq;
unsigned int request_count = 0; unsigned int request_count = 0;
struct blk_plug *plug; struct blk_plug *plug;
...@@ -1911,7 +1915,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1911,7 +1915,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio, NULL); rq_qos_throttle(q, bio, NULL);
rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) { if (unlikely(!rq)) {
rq_qos_cleanup(q, bio); rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT) if (bio->bi_opf & REQ_NOWAIT)
...@@ -1984,6 +1988,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1984,6 +1988,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (same_queue_rq) { if (same_queue_rq) {
data.hctx = blk_mq_map_queue(q, data.hctx = blk_mq_map_queue(q,
same_queue_rq->cmd_flags,
same_queue_rq->mq_ctx->cpu); same_queue_rq->mq_ctx->cpu);
blk_mq_try_issue_directly(data.hctx, same_queue_rq, blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie); &cookie);
...@@ -2333,7 +2338,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, ...@@ -2333,7 +2338,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
* Set local node, IFF we have more than one hw queue. If * Set local node, IFF we have more than one hw queue. If
* not, we remain on the home node of the device * not, we remain on the home node of the device
*/ */
hctx = blk_mq_map_queue(q, i); hctx = blk_mq_map_queue_type(q, 0, i);
if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
hctx->numa_node = local_memory_node(cpu_to_node(i)); hctx->numa_node = local_memory_node(cpu_to_node(i));
} }
...@@ -2406,7 +2411,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2406,7 +2411,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
} }
ctx = per_cpu_ptr(q->queue_ctx, i); ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i); hctx = blk_mq_map_queue_type(q, 0, i);
cpumask_set_cpu(i, hctx->cpumask); cpumask_set_cpu(i, hctx->cpumask);
ctx->index_hw = hctx->nr_ctx; ctx->index_hw = hctx->nr_ctx;
......
...@@ -79,6 +79,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, ...@@ -79,6 +79,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags,
unsigned int cpu) unsigned int cpu)
{ {
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
...@@ -90,7 +91,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue * ...@@ -90,7 +91,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
unsigned int hctx_type, unsigned int hctx_type,
unsigned int cpu) unsigned int cpu)
{ {
return blk_mq_map_queue(q, cpu); return blk_mq_map_queue(q, hctx_type, cpu);
} }
/* /*
...@@ -141,6 +142,7 @@ struct blk_mq_alloc_data { ...@@ -141,6 +142,7 @@ struct blk_mq_alloc_data {
struct request_queue *q; struct request_queue *q;
blk_mq_req_flags_t flags; blk_mq_req_flags_t flags;
unsigned int shallow_depth; unsigned int shallow_depth;
unsigned int cmd_flags;
/* input & output parameter */ /* input & output parameter */
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
...@@ -217,7 +219,7 @@ static inline void blk_mq_put_driver_tag(struct request *rq) ...@@ -217,7 +219,7 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
if (rq->tag == -1 || rq->internal_tag == -1) if (rq->tag == -1 || rq->internal_tag == -1)
return; return;
hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
__blk_mq_put_driver_tag(hctx, rq); __blk_mq_put_driver_tag(hctx, rq);
} }
......
...@@ -111,11 +111,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -111,11 +111,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
__clear_bit(flag, &q->queue_flags); __clear_bit(flag, &q->queue_flags);
} }
static inline struct blk_flush_queue *blk_get_flush_queue( static inline struct blk_flush_queue *
struct request_queue *q, struct blk_mq_ctx *ctx) blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
{ {
if (q->mq_ops) if (q->mq_ops)
return blk_mq_map_queue(q, ctx->cpu)->fq; return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq;
return q->fq; return q->fq;
} }
......
...@@ -570,7 +570,7 @@ static void dd_finish_request(struct request *rq) ...@@ -570,7 +570,7 @@ static void dd_finish_request(struct request *rq)
if (!list_empty(&dd->fifo_list[WRITE])) { if (!list_empty(&dd->fifo_list[WRITE])) {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); hctx = blk_mq_map_queue(q, rq->cmd_flags, rq->mq_ctx->cpu);
blk_mq_sched_mark_restart_hctx(hctx); blk_mq_sched_mark_restart_hctx(hctx);
} }
spin_unlock_irqrestore(&dd->zone_lock, flags); spin_unlock_irqrestore(&dd->zone_lock, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册