提交 80086651 编写于 作者: J Jan Kara 提交者: Yang Yingliang

blk: Fix lock inversion between ioc lock and bfqd lock

mainline inclusion
from mainline-v5.14-rc1
commit  fd2ef39c
category: bugfix
bugzilla: 185777, 185811
CVE: NA

Lockdep complains about lock inversion between ioc->lock and bfqd->lock:

bfqd -> ioc:
 put_io_context+0x33/0x90 -> ioc->lock grabbed
 blk_mq_free_request+0x51/0x140
 blk_put_request+0xe/0x10
 blk_attempt_req_merge+0x1d/0x30
 elv_attempt_insert_merge+0x56/0xa0
 blk_mq_sched_try_insert_merge+0x4b/0x60
 bfq_insert_requests+0x9e/0x18c0 -> bfqd->lock grabbed
 blk_mq_sched_insert_requests+0xd6/0x2b0
 blk_mq_flush_plug_list+0x154/0x280
 blk_finish_plug+0x40/0x60
 ext4_writepages+0x696/0x1320
 do_writepages+0x1c/0x80
 __filemap_fdatawrite_range+0xd7/0x120
 sync_file_range+0xac/0xf0

ioc->bfqd:
 bfq_exit_icq+0xa3/0xe0 -> bfqd->lock grabbed
 put_io_context_active+0x78/0xb0 -> ioc->lock grabbed
 exit_io_context+0x48/0x50
 do_exit+0x7e9/0xdd0
 do_group_exit+0x54/0xc0

To avoid this inversion we change blk_mq_sched_try_insert_merge() to not
free the merged request but rather leave that upto the caller similarly
to blk_mq_sched_try_merge(). And in bfq_insert_requests() we make sure
to free all the merged requests after dropping bfqd->lock.

Fixes: aee69d78 ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler")
Reviewed-by: NMing Lei <ming.lei@redhat.com>
Acked-by: NPaolo Valente <paolo.valente@linaro.org>
Signed-off-by: NJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20210623093634.27879-3-jack@suse.czSigned-off-by: NJens Axboe <axboe@kernel.dk>

Conflict: 1. mainline include/linux/elevator.h file change;
          2. mainline block/mq-deadline-main.c file change, now change
             to block/mq-deadline.c;
	  3. in block/blk-merge.c, in function blk_attempt_req_merge,
	     4.19 have difference from mainline;
	  4. in 4.19, elv_attempt_insert_merge has beed called twice
	     but once in mainline;
Reviewed-by: NJason Yan <yanaijie@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 996a09b6
...@@ -1854,9 +1854,9 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) ...@@ -1854,9 +1854,9 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
ret = blk_mq_sched_try_merge(q, bio, &free); ret = blk_mq_sched_try_merge(q, bio, &free);
spin_unlock_irq(&bfqd->lock);
if (free) if (free)
blk_mq_free_request(free); blk_mq_free_request(free);
spin_unlock_irq(&bfqd->lock);
return ret; return ret;
} }
...@@ -4669,10 +4669,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -4669,10 +4669,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct bfq_queue *bfqq; struct bfq_queue *bfqq;
bool idle_timer_disabled = false; bool idle_timer_disabled = false;
unsigned int cmd_flags; unsigned int cmd_flags;
LIST_HEAD(free);
spin_lock_irq(&bfqd->lock); spin_lock_irq(&bfqd->lock);
if (blk_mq_sched_try_insert_merge(q, rq)) { if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
spin_unlock_irq(&bfqd->lock); spin_unlock_irq(&bfqd->lock);
blk_mq_free_requests(&free);
return; return;
} }
......
...@@ -822,23 +822,21 @@ struct request *attempt_front_merge(struct request_queue *q, struct request *rq) ...@@ -822,23 +822,21 @@ struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
return NULL; return NULL;
} }
int blk_attempt_req_merge(struct request_queue *q, struct request *rq, /*
* Try to merge 'next' into 'rq'. Return true if the merge happened, false
* otherwise. The caller is responsible for freeing 'next' if the merge
* happened.
*/
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next) struct request *next)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct request *free;
if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
return 0; return 0;
free = attempt_merge(q, rq, next); return attempt_merge(q, rq, next);
if (free) {
__blk_put_request(q, free);
return 1;
}
return 0;
} }
bool blk_rq_merge_ok(struct request *rq, struct bio *bio) bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
......
...@@ -359,9 +359,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) ...@@ -359,9 +359,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
return ret; return ret;
} }
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
struct list_head *free)
{ {
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
} }
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
......
...@@ -14,7 +14,8 @@ void blk_mq_sched_request_inserted(struct request *rq); ...@@ -14,7 +14,8 @@ void blk_mq_sched_request_inserted(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
struct request **merged_request); struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
struct list_head *free);
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
......
...@@ -220,4 +220,14 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set) ...@@ -220,4 +220,14 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
set->mq_map[cpu] = 0; set->mq_map[cpu] = 0;
} }
static inline void blk_mq_free_requests(struct list_head *list)
{
while (!list_empty(list)) {
struct request *rq = list_entry_rq(list->next);
list_del_init(&rq->queuelist);
blk_mq_free_request(rq);
}
}
#endif #endif
...@@ -280,8 +280,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, ...@@ -280,8 +280,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio); struct bio *bio);
struct request *attempt_back_merge(struct request_queue *q, struct request *rq); struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
struct request *attempt_front_merge(struct request_queue *q, struct request *rq); struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
int blk_attempt_req_merge(struct request_queue *q, struct request *rq, bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next); struct request *next);
void blk_recalc_rq_segments(struct request *rq); void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq); void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio); bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
......
...@@ -469,9 +469,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, ...@@ -469,9 +469,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
* we can append 'rq' to an existing request, so we can throw 'rq' away * we can append 'rq' to an existing request, so we can throw 'rq' away
* afterwards. * afterwards.
* *
* Returns true if we merged, false otherwise * Returns true if we merged, false otherwise. 'free' will contain all
* requests that need to be freed.
*/ */
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
struct list_head *free)
{ {
struct request *__rq; struct request *__rq;
bool ret; bool ret;
...@@ -482,8 +484,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) ...@@ -482,8 +484,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
/* /*
* First try one-hit cache. * First try one-hit cache.
*/ */
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
list_add(&rq->queuelist, free);
return true; return true;
}
if (blk_queue_noxmerges(q)) if (blk_queue_noxmerges(q))
return false; return false;
...@@ -497,6 +501,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) ...@@ -497,6 +501,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
break; break;
list_add(&rq->queuelist, free);
/* The merged request could be merged with others, try again */ /* The merged request could be merged with others, try again */
ret = true; ret = true;
rq = __rq; rq = __rq;
...@@ -618,6 +623,7 @@ void elv_drain_elevator(struct request_queue *q) ...@@ -618,6 +623,7 @@ void elv_drain_elevator(struct request_queue *q)
void __elv_add_request(struct request_queue *q, struct request *rq, int where) void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{ {
LIST_HEAD(free);
trace_block_rq_insert(q, rq); trace_block_rq_insert(q, rq);
blk_pm_add_request(q, rq); blk_pm_add_request(q, rq);
...@@ -665,8 +671,10 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) ...@@ -665,8 +671,10 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
* queue already, we are done - rq has now been freed, * queue already, we are done - rq has now been freed,
* so no need to do anything further. * so no need to do anything further.
*/ */
if (elv_attempt_insert_merge(q, rq)) if (elv_attempt_insert_merge(q, rq, &free)) {
blk_mq_free_requests(&free);
break; break;
}
/* fall through */ /* fall through */
case ELEVATOR_INSERT_SORT: case ELEVATOR_INSERT_SORT:
BUG_ON(blk_rq_is_passthrough(rq)); BUG_ON(blk_rq_is_passthrough(rq));
......
...@@ -484,6 +484,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -484,6 +484,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
const int data_dir = rq_data_dir(rq); const int data_dir = rq_data_dir(rq);
LIST_HEAD(free);
/* /*
* This may be a requeue of a write request that has locked its * This may be a requeue of a write request that has locked its
...@@ -491,8 +492,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -491,8 +492,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
*/ */
blk_req_zone_write_unlock(rq); blk_req_zone_write_unlock(rq);
if (blk_mq_sched_try_insert_merge(q, rq)) if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
blk_mq_free_requests(&free);
return; return;
}
blk_mq_sched_request_inserted(rq); blk_mq_sched_request_inserted(rq);
......
...@@ -209,7 +209,8 @@ extern void elv_merged_request(struct request_queue *, struct request *, ...@@ -209,7 +209,8 @@ extern void elv_merged_request(struct request_queue *, struct request *,
enum elv_merge); enum elv_merge);
extern void elv_bio_merged(struct request_queue *q, struct request *, extern void elv_bio_merged(struct request_queue *q, struct request *,
struct bio *); struct bio *);
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); extern bool elv_attempt_insert_merge(struct request_queue *, struct request *,
struct list_head *);
extern void elv_requeue_request(struct request_queue *, struct request *); extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册