提交 4e5cc99e 编写于 作者: M Ming Lei 提交者: Jens Axboe

blk-mq: manage hctx map via xarray

First code becomes more clean by switching to xarray from plain array.

Second use-after-free on q->queue_hw_ctx can be fixed because
queue_for_each_hw_ctx() may be run when updating nr_hw_queues is
in-progress. With this patch, q->hctx_table is defined as xarray, and
this structure will share same lifetime with request queue, so
queue_for_each_hw_ctx() can use q->hctx_table to lookup hctx reliably.
Reported-by: NYu Kuai <yukuai3@huawei.com>
Signed-off-by: NMing Lei <ming.lei@redhat.com>
Reviewed-by: NHannes Reinecke <hare@suse.de>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220308073219.91173-7-ming.lei@redhat.com
[axboe: fix blk_mq_hw_ctx forward declaration]
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 4f481208
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
struct blk_mq_hw_ctx;
struct blk_mq_debugfs_attr { struct blk_mq_debugfs_attr {
const char *name; const char *name;
umode_t mode; umode_t mode;
......
...@@ -498,7 +498,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, ...@@ -498,7 +498,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
void *priv) void *priv)
{ {
/* /*
* __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
* while the queue is frozen. So we can use q_usage_counter to avoid * while the queue is frozen. So we can use q_usage_counter to avoid
* racing with it. * racing with it.
*/ */
......
...@@ -71,7 +71,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq) ...@@ -71,7 +71,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
blk_qc_t qc) blk_qc_t qc)
{ {
return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; return xa_load(&q->hctx_table,
(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT);
} }
static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
...@@ -573,7 +574,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -573,7 +574,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
* If not tell the caller that it should skip this queue. * If not tell the caller that it should skip this queue.
*/ */
ret = -EXDEV; ret = -EXDEV;
data.hctx = q->queue_hw_ctx[hctx_idx]; data.hctx = xa_load(&q->hctx_table, hctx_idx);
if (!blk_mq_hw_queue_mapped(data.hctx)) if (!blk_mq_hw_queue_mapped(data.hctx))
goto out_queue_exit; goto out_queue_exit;
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
...@@ -3437,6 +3438,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, ...@@ -3437,6 +3438,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
blk_mq_remove_cpuhp(hctx); blk_mq_remove_cpuhp(hctx);
xa_erase(&q->hctx_table, hctx_idx);
spin_lock(&q->unused_hctx_lock); spin_lock(&q->unused_hctx_lock);
list_add(&hctx->hctx_list, &q->unused_hctx_list); list_add(&hctx->hctx_list, &q->unused_hctx_list);
spin_unlock(&q->unused_hctx_lock); spin_unlock(&q->unused_hctx_lock);
...@@ -3476,8 +3479,15 @@ static int blk_mq_init_hctx(struct request_queue *q, ...@@ -3476,8 +3479,15 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
hctx->numa_node)) hctx->numa_node))
goto exit_hctx; goto exit_hctx;
if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
goto exit_flush_rq;
return 0; return 0;
exit_flush_rq:
if (set->ops->exit_request)
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
exit_hctx: exit_hctx:
if (set->ops->exit_hctx) if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx); set->ops->exit_hctx(hctx, hctx_idx);
...@@ -3856,7 +3866,7 @@ void blk_mq_release(struct request_queue *q) ...@@ -3856,7 +3866,7 @@ void blk_mq_release(struct request_queue *q)
kobject_put(&hctx->kobj); kobject_put(&hctx->kobj);
} }
kfree(q->queue_hw_ctx); xa_destroy(&q->hctx_table);
/* /*
* release .mq_kobj and sw queue's kobject now because * release .mq_kobj and sw queue's kobject now because
...@@ -3945,46 +3955,28 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( ...@@ -3945,46 +3955,28 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *q) struct request_queue *q)
{ {
int i, j, end; struct blk_mq_hw_ctx *hctx;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; unsigned long i, j;
if (q->nr_hw_queues < set->nr_hw_queues) {
struct blk_mq_hw_ctx **new_hctxs;
new_hctxs = kcalloc_node(set->nr_hw_queues,
sizeof(*new_hctxs), GFP_KERNEL,
set->numa_node);
if (!new_hctxs)
return;
if (hctxs)
memcpy(new_hctxs, hctxs, q->nr_hw_queues *
sizeof(*hctxs));
q->queue_hw_ctx = new_hctxs;
kfree(hctxs);
hctxs = new_hctxs;
}
/* protect against switching io scheduler */ /* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) { for (i = 0; i < set->nr_hw_queues; i++) {
int old_node; int old_node;
int node = blk_mq_get_hctx_node(set, i); int node = blk_mq_get_hctx_node(set, i);
struct blk_mq_hw_ctx *old_hctx = hctxs[i]; struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
if (old_hctx) { if (old_hctx) {
old_node = old_hctx->numa_node; old_node = old_hctx->numa_node;
blk_mq_exit_hctx(q, set, old_hctx, i); blk_mq_exit_hctx(q, set, old_hctx, i);
} }
hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, node); if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
if (!hctxs[i]) {
if (!old_hctx) if (!old_hctx)
break; break;
pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
node, old_node); node, old_node);
hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
old_node); WARN_ON_ONCE(!hctx);
WARN_ON_ONCE(!hctxs[i]);
} }
} }
/* /*
...@@ -3993,21 +3985,13 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, ...@@ -3993,21 +3985,13 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
*/ */
if (i != set->nr_hw_queues) { if (i != set->nr_hw_queues) {
j = q->nr_hw_queues; j = q->nr_hw_queues;
end = i;
} else { } else {
j = i; j = i;
end = q->nr_hw_queues;
q->nr_hw_queues = set->nr_hw_queues; q->nr_hw_queues = set->nr_hw_queues;
} }
for (; j < end; j++) { xa_for_each_start(&q->hctx_table, j, hctx, j)
struct blk_mq_hw_ctx *hctx = hctxs[j]; blk_mq_exit_hctx(q, set, hctx, j);
if (hctx) {
blk_mq_exit_hctx(q, set, hctx, j);
hctxs[j] = NULL;
}
}
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
} }
...@@ -4046,6 +4030,8 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -4046,6 +4030,8 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&q->unused_hctx_list); INIT_LIST_HEAD(&q->unused_hctx_list);
spin_lock_init(&q->unused_hctx_lock); spin_lock_init(&q->unused_hctx_lock);
xa_init(&q->hctx_table);
blk_mq_realloc_hw_ctxs(set, q); blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues) if (!q->nr_hw_queues)
goto err_hctxs; goto err_hctxs;
...@@ -4075,7 +4061,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -4075,7 +4061,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
return 0; return 0;
err_hctxs: err_hctxs:
kfree(q->queue_hw_ctx); xa_destroy(&q->hctx_table);
q->nr_hw_queues = 0; q->nr_hw_queues = 0;
blk_mq_sysfs_deinit(q); blk_mq_sysfs_deinit(q);
err_poll: err_poll:
......
...@@ -83,7 +83,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue * ...@@ -83,7 +83,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
enum hctx_type type, enum hctx_type type,
unsigned int cpu) unsigned int cpu)
{ {
return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
} }
static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
......
...@@ -917,8 +917,7 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) ...@@ -917,8 +917,7 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
} }
#define queue_for_each_hw_ctx(q, hctx, i) \ #define queue_for_each_hw_ctx(q, hctx, i) \
for ((i) = 0; (i) < (q)->nr_hw_queues && \ xa_for_each(&(q)->hctx_table, (i), (hctx))
({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
#define hctx_for_each_ctx(hctx, ctx, i) \ #define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \ for ((i) = 0; (i) < (hctx)->nr_ctx && \
......
...@@ -355,7 +355,7 @@ struct request_queue { ...@@ -355,7 +355,7 @@ struct request_queue {
unsigned int queue_depth; unsigned int queue_depth;
/* hw dispatch queues */ /* hw dispatch queues */
struct blk_mq_hw_ctx **queue_hw_ctx; struct xarray hctx_table;
unsigned int nr_hw_queues; unsigned int nr_hw_queues;
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册