diff --git a/block/blk-mq.c b/block/blk-mq.c index 9ae1663348ac4b0d46eda97482fef5113738758b..cedc355218db4f1c3abd2e9a2a8ed0313ad9aac1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3280,7 +3280,15 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, if (hctxs) memcpy(new_hctxs, hctxs, q->nr_hw_queues * sizeof(*hctxs)); - q->queue_hw_ctx = new_hctxs; + + rcu_assign_pointer(q->queue_hw_ctx, new_hctxs); + /* + * Make sure reading the old queue_hw_ctx from other + * context concurrently won't trigger uaf. and when + * it is in start up time, no need to sync rcu. + */ + if (hctxs) + synchronize_rcu(); kfree(hctxs); hctxs = new_hctxs; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b2db9a5c10e8fd98a14e7ad72b895c9f2d493eff..c9210fb70e4db36efecc6b41ccead2267eb5861f 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -612,9 +612,20 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) return rq + 1; } +static inline struct blk_mq_hw_ctx *queue_hctx(struct request_queue *q, int id) +{ + struct blk_mq_hw_ctx *hctx; + + rcu_read_lock(); + hctx = *(rcu_dereference(q->queue_hw_ctx) + id); + rcu_read_unlock(); + + return hctx; +} + #define queue_for_each_hw_ctx(q, hctx, i) \ for ((i) = 0; (i) < (q)->nr_hw_queues && \ - ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) + ({ hctx = queue_hctx((q), i); 1; }); (i)++) #define hctx_for_each_ctx(hctx, ctx, i) \ for ((i) = 0; (i) < (hctx)->nr_ctx && \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 433485f8b1cc0169bc724fe04cb0ad56a629ce3c..23dfe7608e793c076b96bafbdcad7fbe4cfcd343 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -421,7 +421,7 @@ struct request_queue { unsigned int queue_depth; /* hw dispatch queues */ - struct blk_mq_hw_ctx **queue_hw_ctx; + struct blk_mq_hw_ctx __rcu **queue_hw_ctx; unsigned int nr_hw_queues; struct backing_dev_info *backing_dev_info;