提交 a7384677 编写于 作者: T Tejun Heo 提交者: Jens Axboe

block: remove duplicate or unused barrier/discard error paths

* Because barrier mode can be changed dynamically, whether barrier is
  supported or not can be determined only when actually issuing the
  barrier and there is no point in checking it earlier.  Drop barrier
  support check in generic_make_request() and __make_request(), and
  update comment around the support check in blk_do_ordered().

* There is no reason to check discard support in both
  generic_make_request() and __make_request().  Drop the check in
  __make_request().  While at it, move error action block to the end
  of the function and add unlikely() to q existence test.

* Barrier request, be it empty or not, is never passed to low level
  driver and thus it's meaningless to try to copy back req->sector to
  bio->bi_sector on error.  In addition, the notion of failed sector
  doesn't make any sense for empty barrier to begin with.  Drop the
  code block from __end_that_request_first().
Signed-off-by: NTejun Heo <tj@kernel.org>
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 313e4299
...@@ -216,8 +216,8 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) ...@@ -216,8 +216,8 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
return 1; return 1;
} else { } else {
/* /*
* This can happen when the queue switches to * Queue ordering not supported. Terminate
* ORDERED_NONE while this request is on it. * with prejudice.
*/ */
elv_dequeue_request(q, rq); elv_dequeue_request(q, rq);
if (__blk_end_request(rq, -EOPNOTSUPP, if (__blk_end_request(rq, -EOPNOTSUPP,
......
...@@ -1139,7 +1139,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) ...@@ -1139,7 +1139,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static int __make_request(struct request_queue *q, struct bio *bio) static int __make_request(struct request_queue *q, struct bio *bio)
{ {
struct request *req; struct request *req;
int el_ret, nr_sectors, barrier, discard, err; int el_ret, nr_sectors;
const unsigned short prio = bio_prio(bio); const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio); const int sync = bio_sync(bio);
int rw_flags; int rw_flags;
...@@ -1153,22 +1153,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1153,22 +1153,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
*/ */
blk_queue_bounce(q, &bio); blk_queue_bounce(q, &bio);
barrier = bio_barrier(bio);
if (unlikely(barrier) && bio_has_data(bio) &&
(q->next_ordered == QUEUE_ORDERED_NONE)) {
err = -EOPNOTSUPP;
goto end_io;
}
discard = bio_discard(bio);
if (unlikely(discard) && !q->prepare_discard_fn) {
err = -EOPNOTSUPP;
goto end_io;
}
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (unlikely(barrier) || elv_queue_empty(q)) if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
goto get_rq; goto get_rq;
el_ret = elv_merge(q, &req, bio); el_ret = elv_merge(q, &req, bio);
...@@ -1262,10 +1249,6 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1262,10 +1249,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
__generic_unplug_device(q); __generic_unplug_device(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return 0; return 0;
end_io:
bio_endio(bio, err);
return 0;
} }
/* /*
...@@ -1418,15 +1401,13 @@ static inline void __generic_make_request(struct bio *bio) ...@@ -1418,15 +1401,13 @@ static inline void __generic_make_request(struct bio *bio)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
q = bdev_get_queue(bio->bi_bdev); q = bdev_get_queue(bio->bi_bdev);
if (!q) { if (unlikely(!q)) {
printk(KERN_ERR printk(KERN_ERR
"generic_make_request: Trying to access " "generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n", "nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b), bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector); (long long) bio->bi_sector);
end_io: goto end_io;
bio_endio(bio, err);
break;
} }
if (unlikely(nr_sectors > q->max_hw_sectors)) { if (unlikely(nr_sectors > q->max_hw_sectors)) {
...@@ -1463,14 +1444,19 @@ static inline void __generic_make_request(struct bio *bio) ...@@ -1463,14 +1444,19 @@ static inline void __generic_make_request(struct bio *bio)
if (bio_check_eod(bio, nr_sectors)) if (bio_check_eod(bio, nr_sectors))
goto end_io; goto end_io;
if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
(bio_discard(bio) && !q->prepare_discard_fn)) { if (bio_discard(bio) && !q->prepare_discard_fn) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto end_io; goto end_io;
} }
ret = q->make_request_fn(q, bio); ret = q->make_request_fn(q, bio);
} while (ret); } while (ret);
return;
end_io:
bio_endio(bio, err);
} }
/* /*
...@@ -1720,14 +1706,6 @@ static int __end_that_request_first(struct request *req, int error, ...@@ -1720,14 +1706,6 @@ static int __end_that_request_first(struct request *req, int error,
while ((bio = req->bio) != NULL) { while ((bio = req->bio) != NULL) {
int nbytes; int nbytes;
/*
* For an empty barrier request, the low level driver must
* store a potential error location in ->sector. We pass
* that back up in ->bi_sector.
*/
if (blk_empty_barrier(req))
bio->bi_sector = req->sector;
if (nr_bytes >= bio->bi_size) { if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next; req->bio = bio->bi_next;
nbytes = bio->bi_size; nbytes = bio->bi_size;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册