提交 7b36a718 编写于 作者: J Jens Axboe

block: don't call ioc_exit_icq() with the queue lock held for blk-mq

For legacy scheduling, we always call ioc_exit_icq() with both the
ioc and queue lock held. This poses a problem for blk-mq with
scheduling, since the queue lock isn't what we use in the scheduler.
And since we don't need the queue lock held for ioc exit there,
don't grab it and leave any extra locking up to the blk-mq scheduler.
Reported-by: NPaolo Valente <paolo.valente@linaro.org>
Tested-by: NPaolo Valente <paolo.valente@linaro.org>
Reviewed-by: NOmar Sandoval <osandov@fb.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 a5a79d00
...@@ -36,8 +36,8 @@ static void icq_free_icq_rcu(struct rcu_head *head) ...@@ -36,8 +36,8 @@ static void icq_free_icq_rcu(struct rcu_head *head)
} }
/* /*
* Exit an icq. Called with both ioc and q locked for sq, only ioc locked for * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
* mq. * and queue locked for legacy.
*/ */
static void ioc_exit_icq(struct io_cq *icq) static void ioc_exit_icq(struct io_cq *icq)
{ {
...@@ -54,7 +54,10 @@ static void ioc_exit_icq(struct io_cq *icq) ...@@ -54,7 +54,10 @@ static void ioc_exit_icq(struct io_cq *icq)
icq->flags |= ICQ_EXITED; icq->flags |= ICQ_EXITED;
} }
/* Release an icq. Called with both ioc and q locked. */ /*
* Release an icq. Called with ioc locked for blk-mq, and with both ioc
* and queue locked for legacy.
*/
static void ioc_destroy_icq(struct io_cq *icq) static void ioc_destroy_icq(struct io_cq *icq)
{ {
struct io_context *ioc = icq->ioc; struct io_context *ioc = icq->ioc;
...@@ -62,7 +65,6 @@ static void ioc_destroy_icq(struct io_cq *icq) ...@@ -62,7 +65,6 @@ static void ioc_destroy_icq(struct io_cq *icq)
struct elevator_type *et = q->elevator->type; struct elevator_type *et = q->elevator->type;
lockdep_assert_held(&ioc->lock); lockdep_assert_held(&ioc->lock);
lockdep_assert_held(q->queue_lock);
radix_tree_delete(&ioc->icq_tree, icq->q->id); radix_tree_delete(&ioc->icq_tree, icq->q->id);
hlist_del_init(&icq->ioc_node); hlist_del_init(&icq->ioc_node);
...@@ -222,24 +224,40 @@ void exit_io_context(struct task_struct *task) ...@@ -222,24 +224,40 @@ void exit_io_context(struct task_struct *task)
put_io_context_active(ioc); put_io_context_active(ioc);
} }
static void __ioc_clear_queue(struct list_head *icq_list)
{
unsigned long flags;
while (!list_empty(icq_list)) {
struct io_cq *icq = list_entry(icq_list->next,
struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock_irqsave(&ioc->lock, flags);
ioc_destroy_icq(icq);
spin_unlock_irqrestore(&ioc->lock, flags);
}
}
/** /**
* ioc_clear_queue - break any ioc association with the specified queue * ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared * @q: request_queue being cleared
* *
* Walk @q->icq_list and exit all io_cq's. Must be called with @q locked. * Walk @q->icq_list and exit all io_cq's.
*/ */
void ioc_clear_queue(struct request_queue *q) void ioc_clear_queue(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock); LIST_HEAD(icq_list);
while (!list_empty(&q->icq_list)) { spin_lock_irq(q->queue_lock);
struct io_cq *icq = list_entry(q->icq_list.next, list_splice_init(&q->icq_list, &icq_list);
struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock(&ioc->lock); if (q->mq_ops) {
ioc_destroy_icq(icq); spin_unlock_irq(q->queue_lock);
spin_unlock(&ioc->lock); __ioc_clear_queue(&icq_list);
} else {
__ioc_clear_queue(&icq_list);
spin_unlock_irq(q->queue_lock);
} }
} }
......
...@@ -815,9 +815,7 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -815,9 +815,7 @@ static void blk_release_queue(struct kobject *kobj)
blkcg_exit_queue(q); blkcg_exit_queue(q);
if (q->elevator) { if (q->elevator) {
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q); ioc_clear_queue(q);
spin_unlock_irq(q->queue_lock);
elevator_exit(q->elevator); elevator_exit(q->elevator);
} }
......
...@@ -983,9 +983,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) ...@@ -983,9 +983,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
if (old_registered) if (old_registered)
elv_unregister_queue(q); elv_unregister_queue(q);
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q); ioc_clear_queue(q);
spin_unlock_irq(q->queue_lock);
} }
/* allocate, init and register new elevator */ /* allocate, init and register new elevator */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册