提交 dd831006 编写于 作者: T Tejun Heo 提交者: Jens Axboe

block: misc cleanups in barrier code

Make the following cleanups in preparation of barrier/flush update.

* blk_do_ordered() declaration is moved from include/linux/blkdev.h to
  block/blk.h.

* blk_do_ordered() now returns pointer to struct request, with %NULL
  meaning "try the next request" and ERR_PTR(-EAGAIN) "try again
  later".  The third case will be dropped with further changes.

* In the initialization of proxy barrier request, data direction is
  already set by init_request_from_bio().  Drop unnecessary explicit
  REQ_WRITE setting and move init_request_from_bio() above REQ_FUA
  flag setting.

* add_request() is collapsed into __make_request().

These changes don't make any functional difference.
Signed-off-by: NTejun Heo <tj@kernel.org>
Signed-off-by: NJens Axboe <jaxboe@fusionio.com>
上级 9cbbdca4
...@@ -110,9 +110,9 @@ static void queue_flush(struct request_queue *q, unsigned which) ...@@ -110,9 +110,9 @@ static void queue_flush(struct request_queue *q, unsigned which)
elv_insert(q, rq, ELEVATOR_INSERT_FRONT); elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
} }
static inline bool start_ordered(struct request_queue *q, struct request **rqp) static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{ {
struct request *rq = *rqp;
unsigned skip = 0; unsigned skip = 0;
q->orderr = 0; q->orderr = 0;
...@@ -149,11 +149,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) ...@@ -149,11 +149,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
/* initialize proxy request and queue it */ /* initialize proxy request and queue it */
blk_rq_init(q, rq); blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->cmd_flags |= REQ_WRITE;
if (q->ordered & QUEUE_ORDERED_DO_FUA) if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA; rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io; rq->end_io = bar_end_io;
elv_insert(q, rq, ELEVATOR_INSERT_FRONT); elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
...@@ -171,27 +169,26 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) ...@@ -171,27 +169,26 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
else else
skip |= QUEUE_ORDSEQ_DRAIN; skip |= QUEUE_ORDSEQ_DRAIN;
*rqp = rq;
/* /*
* Complete skipped sequences. If whole sequence is complete, * Complete skipped sequences. If whole sequence is complete,
* return false to tell elevator that this request is gone. * return %NULL to tell elevator that this request is gone.
*/ */
return !blk_ordered_complete_seq(q, skip, 0); if (blk_ordered_complete_seq(q, skip, 0))
rq = NULL;
return rq;
} }
bool blk_do_ordered(struct request_queue *q, struct request **rqp) struct request *blk_do_ordered(struct request_queue *q, struct request *rq)
{ {
struct request *rq = *rqp;
const int is_barrier = rq->cmd_type == REQ_TYPE_FS && const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
(rq->cmd_flags & REQ_HARDBARRIER); (rq->cmd_flags & REQ_HARDBARRIER);
if (!q->ordseq) { if (!q->ordseq) {
if (!is_barrier) if (!is_barrier)
return true; return rq;
if (q->next_ordered != QUEUE_ORDERED_NONE) if (q->next_ordered != QUEUE_ORDERED_NONE)
return start_ordered(q, rqp); return start_ordered(q, rq);
else { else {
/* /*
* Queue ordering not supported. Terminate * Queue ordering not supported. Terminate
...@@ -199,8 +196,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp) ...@@ -199,8 +196,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
*/ */
blk_dequeue_request(rq); blk_dequeue_request(rq);
__blk_end_request_all(rq, -EOPNOTSUPP); __blk_end_request_all(rq, -EOPNOTSUPP);
*rqp = NULL; return NULL;
return false;
} }
} }
...@@ -211,14 +207,14 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp) ...@@ -211,14 +207,14 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
/* Special requests are not subject to ordering rules. */ /* Special requests are not subject to ordering rules. */
if (rq->cmd_type != REQ_TYPE_FS && if (rq->cmd_type != REQ_TYPE_FS &&
rq != &q->pre_flush_rq && rq != &q->post_flush_rq) rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
return true; return rq;
/* Ordered by draining. Wait for turn. */ /* Ordered by draining. Wait for turn. */
WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
*rqp = NULL; rq = ERR_PTR(-EAGAIN);
return true; return rq;
} }
static void bio_end_empty_barrier(struct bio *bio, int err) static void bio_end_empty_barrier(struct bio *bio, int err)
......
...@@ -1037,22 +1037,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq, ...@@ -1037,22 +1037,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
} }
EXPORT_SYMBOL(blk_insert_request); EXPORT_SYMBOL(blk_insert_request);
/*
* add-request adds a request to the linked list.
* queue lock is held and interrupts disabled, as we muck with the
* request queue list.
*/
static inline void add_request(struct request_queue *q, struct request *req)
{
drive_stat_acct(req, 1);
/*
* elevator indicated where it wants this request to be
* inserted at elevator_merge time
*/
__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
}
static void part_round_stats_single(int cpu, struct hd_struct *part, static void part_round_stats_single(int cpu, struct hd_struct *part,
unsigned long now) unsigned long now)
{ {
...@@ -1316,7 +1300,10 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1316,7 +1300,10 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->cpu = blk_cpu_to_group(smp_processor_id()); req->cpu = blk_cpu_to_group(smp_processor_id());
if (queue_should_plug(q) && elv_queue_empty(q)) if (queue_should_plug(q) && elv_queue_empty(q))
blk_plug_device(q); blk_plug_device(q);
add_request(q, req);
/* insert the request into the elevator */
drive_stat_acct(req, 1);
__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
out: out:
if (unplug || !queue_should_plug(q)) if (unplug || !queue_should_plug(q))
__generic_unplug_device(q); __generic_unplug_device(q);
......
...@@ -51,6 +51,8 @@ static inline void blk_clear_rq_complete(struct request *rq) ...@@ -51,6 +51,8 @@ static inline void blk_clear_rq_complete(struct request *rq)
*/ */
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
struct request *blk_do_ordered(struct request_queue *q, struct request *rq);
static inline struct request *__elv_next_request(struct request_queue *q) static inline struct request *__elv_next_request(struct request_queue *q)
{ {
struct request *rq; struct request *rq;
...@@ -58,8 +60,9 @@ static inline struct request *__elv_next_request(struct request_queue *q) ...@@ -58,8 +60,9 @@ static inline struct request *__elv_next_request(struct request_queue *q)
while (1) { while (1) {
while (!list_empty(&q->queue_head)) { while (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next); rq = list_entry_rq(q->queue_head.next);
if (blk_do_ordered(q, &rq)) rq = blk_do_ordered(q, rq);
return rq; if (rq)
return !IS_ERR(rq) ? rq : NULL;
} }
if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
......
...@@ -869,7 +869,6 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); ...@@ -869,7 +869,6 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush(struct request_queue *q, unsigned int flush); extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern bool blk_do_ordered(struct request_queue *, struct request **);
extern unsigned blk_ordered_cur_seq(struct request_queue *); extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *); extern unsigned blk_ordered_req_seq(struct request *);
extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册