提交 313efb25 编写于 作者: J Jianchao Wang 提交者: Greg Kroah-Hartman

blk-mq: change gfp flags to GFP_NOIO in blk_mq_realloc_hw_ctxs

[ Upstream commit 5b202853ffbc54b29f23c4b1b5f3948efab489a2 ]

blk_mq_realloc_hw_ctxs could be invoked during update hw queues.
At the momemt, IO is blocked. Change the gfp flags from GFP_KERNEL
to GFP_NOIO to avoid forever hang during memory allocation in
blk_mq_realloc_hw_ctxs.
Signed-off-by: NJianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NSasha Levin <sashal@kernel.org>
上级 75448f40
......@@ -1165,7 +1165,7 @@ int blk_init_allocated_queue(struct request_queue *q)
{
WARN_ON_ONCE(q->mq_ops);
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
if (!q->fq)
return -ENOMEM;
......
......@@ -566,12 +566,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
EXPORT_SYMBOL(blkdev_issue_flush);
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size)
int node, int cmd_size, gfp_t flags)
{
struct blk_flush_queue *fq;
int rq_sz = sizeof(struct request);
fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
fq = kzalloc_node(sizeof(*fq), flags, node);
if (!fq)
goto fail;
......@@ -579,7 +579,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
spin_lock_init(&fq->mq_flush_lock);
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
if (!fq->flush_rq)
goto fail_rq;
......
......@@ -2198,12 +2198,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
* runtime
*/
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
GFP_KERNEL, node);
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
if (!hctx->ctxs)
goto unregister_cpu_notifier;
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
node))
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
goto free_ctxs;
hctx->nr_ctx = 0;
......@@ -2216,7 +2216,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
if (!hctx->fq)
goto exit_hctx;
......@@ -2530,11 +2531,13 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
node = blk_mq_hw_queue_to_node(q->mq_map, i);
hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
GFP_KERNEL, node);
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node);
if (!hctxs[i])
break;
if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask,
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node)) {
kfree(hctxs[i]);
hctxs[i] = NULL;
......
......@@ -124,7 +124,7 @@ static inline void __blk_get_queue(struct request_queue *q)
}
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size);
int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册