提交 dc72ef4a 编写于 作者: J Jens Axboe 提交者: Jens Axboe

[PATCH] Add blk_start_queueing() helper

CFQ implements this on its own now, but it's really block layer
knowledge. Tells a device queue to start dispatching requests to
the driver, taking care to unplug if needed. Also fixes the issue
where as/cfq will invoke a stopped queue, which we really don't
want.
Signed-off-by: NJens Axboe <axboe@suse.de>
上级 981a7973
...@@ -1280,8 +1280,7 @@ static void as_work_handler(void *data) ...@@ -1280,8 +1280,7 @@ static void as_work_handler(void *data)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
if (!as_queue_empty(q)) blk_start_queueing(q);
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
......
...@@ -1552,19 +1552,6 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -1552,19 +1552,6 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
__cfq_set_active_queue(cfqd, cfqq); __cfq_set_active_queue(cfqd, cfqq);
} }
/*
* should really be a ll_rw_blk.c helper
*/
static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
request_queue_t *q = cfqd->queue;
if (!blk_queue_plugged(q))
q->request_fn(q);
else
__generic_unplug_device(q);
}
/* /*
* Called when a new fs request (rq) is added (to cfqq). Check if there's * Called when a new fs request (rq) is added (to cfqq). Check if there's
* something we should do about it * something we should do about it
...@@ -1593,7 +1580,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -1593,7 +1580,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (cic == cfqd->active_cic && if (cic == cfqd->active_cic &&
del_timer(&cfqd->idle_slice_timer)) { del_timer(&cfqd->idle_slice_timer)) {
cfq_slice_expired(cfqd, 0); cfq_slice_expired(cfqd, 0);
cfq_start_queueing(cfqd, cfqq); blk_start_queueing(cfqd->queue);
} }
return; return;
} }
...@@ -1614,7 +1601,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -1614,7 +1601,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (cfq_cfqq_wait_request(cfqq)) { if (cfq_cfqq_wait_request(cfqq)) {
cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_must_dispatch(cfqq);
del_timer(&cfqd->idle_slice_timer); del_timer(&cfqd->idle_slice_timer);
cfq_start_queueing(cfqd, cfqq); blk_start_queueing(cfqd->queue);
} }
} else if (cfq_should_preempt(cfqd, cfqq, rq)) { } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
/* /*
...@@ -1624,7 +1611,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -1624,7 +1611,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
*/ */
cfq_preempt_queue(cfqd, cfqq); cfq_preempt_queue(cfqd, cfqq);
cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_must_dispatch(cfqq);
cfq_start_queueing(cfqd, cfqq); blk_start_queueing(cfqd->queue);
} }
} }
...@@ -1832,8 +1819,7 @@ static void cfq_kick_queue(void *data) ...@@ -1832,8 +1819,7 @@ static void cfq_kick_queue(void *data)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_remove_plug(q); blk_start_queueing(q);
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
......
...@@ -2266,6 +2266,25 @@ struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) ...@@ -2266,6 +2266,25 @@ struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
} }
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
/**
* blk_start_queueing - initiate dispatch of requests to device
* @q: request queue to kick into gear
*
* This is basically a helper to remove the need to know whether a queue
* is plugged or not if someone just wants to initiate dispatch of requests
* for this queue.
*
* The queue lock must be held with interrupts disabled.
*/
void blk_start_queueing(request_queue_t *q)
{
if (!blk_queue_plugged(q))
q->request_fn(q);
else
__generic_unplug_device(q);
}
EXPORT_SYMBOL(blk_start_queueing);
/** /**
* blk_requeue_request - put a request back on queue * blk_requeue_request - put a request back on queue
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
...@@ -2333,11 +2352,7 @@ void blk_insert_request(request_queue_t *q, struct request *rq, ...@@ -2333,11 +2352,7 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
drive_stat_acct(rq, rq->nr_sectors, 1); drive_stat_acct(rq, rq->nr_sectors, 1);
__elv_add_request(q, rq, where, 0); __elv_add_request(q, rq, where, 0);
blk_start_queueing(q);
if (blk_queue_plugged(q))
__generic_unplug_device(q);
else
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
......
...@@ -635,6 +635,7 @@ extern void blk_stop_queue(request_queue_t *q); ...@@ -635,6 +635,7 @@ extern void blk_stop_queue(request_queue_t *q);
extern void blk_sync_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(request_queue_t *q); extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *); extern void blk_run_queue(request_queue_t *);
extern void blk_start_queueing(request_queue_t *);
extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
extern int blk_rq_unmap_user(struct bio *, unsigned int); extern int blk_rq_unmap_user(struct bio *, unsigned int);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册