提交 1654e741 编写于 作者: T Tejun Heo 提交者: Jens Axboe

block: add @force_kblockd to __blk_run_queue()

__blk_run_queue() automatically either calls q->request_fn() directly
or schedules kblockd depending on whether the function is recursed.
blk-flush implementation needs to be able to explicitly choose
kblockd.  Add @force_kblockd.

All the current users are converted to specify %false for the
parameter and this patch doesn't introduce any behavior change.

stable: This is prerequisite for fixing ide oops caused by the new
        blk-flush implementation.
Signed-off-by: NTejun Heo <tj@kernel.org>
Cc: Jan Beulich <JBeulich@novell.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: stable@kernel.org
Signed-off-by: NJens Axboe <jaxboe@fusionio.com>
上级 291d24f6
...@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q) ...@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q); queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q); __blk_run_queue(q, false);
} }
EXPORT_SYMBOL(blk_start_queue); EXPORT_SYMBOL(blk_start_queue);
...@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue); ...@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/** /**
* __blk_run_queue - run a single device queue * __blk_run_queue - run a single device queue
* @q: The queue to run * @q: The queue to run
* @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
* *
* Description: * Description:
* See @blk_run_queue. This variant must be called with the queue lock * See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled. * held and interrupts disabled.
* *
*/ */
void __blk_run_queue(struct request_queue *q) void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{ {
blk_remove_plug(q); blk_remove_plug(q);
...@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q) ...@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
* Only recurse once to avoid overrunning the stack, let the unplug * Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there. * handling reinvoke the handler shortly if we already got there.
*/ */
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q); q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q); queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else { } else {
...@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q) ...@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_run_queue); EXPORT_SYMBOL(blk_run_queue);
...@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, ...@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1); drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0); __elv_add_request(q, rq, where, 0);
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_insert_request); EXPORT_SYMBOL(blk_insert_request);
......
...@@ -69,7 +69,7 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q, ...@@ -69,7 +69,7 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
* queue. Kick the queue in those cases. * queue. Kick the queue in those cases.
*/ */
if (was_empty && next_rq) if (was_empty && next_rq)
__blk_run_queue(q); __blk_run_queue(q, false);
} }
static void pre_flush_end_io(struct request *rq, int error) static void pre_flush_end_io(struct request *rq, int error)
......
...@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) { cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq); cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
} else { } else {
cfq_blkiocg_update_idle_time_stats( cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg); &cfqq->cfqg->blkg);
...@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE * this new queue is RT and the current one is BE
*/ */
cfq_preempt_queue(cfqd, cfqq); cfq_preempt_queue(cfqd, cfqq);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
} }
} }
...@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work) ...@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue; struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
......
...@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q) ...@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
*/ */
elv_drain_elevator(q); elv_drain_elevator(q);
while (q->rq.elvpriv) { while (q->rq.elvpriv) {
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
msleep(10); msleep(10);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue * with anything. There's no point in delaying queue
* processing. * processing.
*/ */
__blk_run_queue(q); __blk_run_queue(q, false);
break; break;
case ELEVATOR_INSERT_SORT: case ELEVATOR_INSERT_SORT:
......
...@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
&sdev->request_queue->queue_flags); &sdev->request_queue->queue_flags);
if (flagset) if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
__blk_run_queue(sdev->request_queue); __blk_run_queue(sdev->request_queue, false);
if (flagset) if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
spin_unlock(sdev->request_queue->queue_lock); spin_unlock(sdev->request_queue->queue_lock);
......
...@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) ...@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset) if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
__blk_run_queue(rport->rqst_q); __blk_run_queue(rport->rqst_q, false);
if (flagset) if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
......
...@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q); ...@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *); extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
extern void blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *);
extern int blk_rq_map_user(struct request_queue *, struct request *, extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long, struct rq_map_data *, void __user *, unsigned long,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册