提交 476f8c98 编写于 作者: M Ming Lei 提交者: Jens Axboe

blk-mq: avoid to write intermediate result to hctx->next_cpu

This patch figures out the final selected CPU, then writes
it to hctx->next_cpu once, then we can avoid to intermediate
next cpu observed from other dispatch paths.

Cc: Stefan Haberland <sth@linux.vnet.ibm.com>
Tested-by: NChristian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NSagi Grimberg <sagi@grimberg.me>
Signed-off-by: NMing Lei <ming.lei@redhat.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 bffa9909
...@@ -1344,26 +1344,24 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) ...@@ -1344,26 +1344,24 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{ {
bool tried = false; bool tried = false;
int next_cpu = hctx->next_cpu;
if (hctx->queue->nr_hw_queues == 1) if (hctx->queue->nr_hw_queues == 1)
return WORK_CPU_UNBOUND; return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) { if (--hctx->next_cpu_batch <= 0) {
int next_cpu;
select_cpu: select_cpu:
next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask, next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
cpu_online_mask); cpu_online_mask);
if (next_cpu >= nr_cpu_ids) if (next_cpu >= nr_cpu_ids)
next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask); next_cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
/* /*
* No online CPU is found, so have to make sure hctx->next_cpu * No online CPU is found, so have to make sure hctx->next_cpu
* is set correctly for not breaking workqueue. * is set correctly for not breaking workqueue.
*/ */
if (next_cpu >= nr_cpu_ids) if (next_cpu >= nr_cpu_ids)
hctx->next_cpu = cpumask_first(hctx->cpumask); next_cpu = cpumask_first(hctx->cpumask);
else
hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
} }
...@@ -1371,7 +1369,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) ...@@ -1371,7 +1369,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
* Do unbound schedule if we can't find a online CPU for this hctx, * Do unbound schedule if we can't find a online CPU for this hctx,
* and it should only happen in the path of handling CPU DEAD. * and it should only happen in the path of handling CPU DEAD.
*/ */
if (!cpu_online(hctx->next_cpu)) { if (!cpu_online(next_cpu)) {
if (!tried) { if (!tried) {
tried = true; tried = true;
goto select_cpu; goto select_cpu;
...@@ -1381,10 +1379,13 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) ...@@ -1381,10 +1379,13 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
* Make sure to re-select CPU next time once after CPUs * Make sure to re-select CPU next time once after CPUs
* in hctx->cpumask become online again. * in hctx->cpumask become online again.
*/ */
hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = 1; hctx->next_cpu_batch = 1;
return WORK_CPU_UNBOUND; return WORK_CPU_UNBOUND;
} }
return hctx->next_cpu;
hctx->next_cpu = next_cpu;
return next_cpu;
} }
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册