提交 bccb5f7c 编写于 作者: J Jens Axboe

blk-mq: fix potential stall during CPU unplug with IO pending

When a CPU is unplugged, we move the blk_mq_ctx request entries
to the current queue. The current code forgets to remap the
blk_mq_hw_ctx before marking the software context pending,
which breaks if old-cpu and new-cpu don't map to the same
hardware queue.

Additionally, if we mark entries as pending in the new
hardware queue, then make sure we schedule it for running.
Otherwise request could be sitting there until someone else
queues IO for that hardware queue.
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 60b0ea12
...@@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, ...@@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
unsigned int cpu) unsigned int cpu)
{ {
struct blk_mq_hw_ctx *hctx = data; struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
LIST_HEAD(tmp); LIST_HEAD(tmp);
...@@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, ...@@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
/* /*
* Move ctx entries to new CPU, if this one is going away. * Move ctx entries to new CPU, if this one is going away.
*/ */
ctx = __blk_mq_get_ctx(hctx->queue, cpu); ctx = __blk_mq_get_ctx(q, cpu);
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
if (!list_empty(&ctx->rq_list)) { if (!list_empty(&ctx->rq_list)) {
...@@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, ...@@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
if (list_empty(&tmp)) if (list_empty(&tmp))
return; return;
ctx = blk_mq_get_ctx(hctx->queue); ctx = blk_mq_get_ctx(q);
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
while (!list_empty(&tmp)) { while (!list_empty(&tmp)) {
...@@ -988,10 +989,13 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, ...@@ -988,10 +989,13 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
list_move_tail(&rq->queuelist, &ctx->rq_list); list_move_tail(&rq->queuelist, &ctx->rq_list);
} }
hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_hctx_mark_pending(hctx, ctx); blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
blk_mq_run_hw_queue(hctx, true);
} }
static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册