提交 b679281a 编写于 作者: T Tejun Heo 提交者: Jens Axboe

block: restructure get_request()

get_request() is structured a bit unusually in that failure path is
inlined in the usual flow with goto labels atop and inside it.
Relocate the error path to the end of the function.

This is to prepare for icq handling changes in get_request() and
doesn't introduce any behavior change.
Signed-off-by: NTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 c875f4d0
...@@ -826,7 +826,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio) ...@@ -826,7 +826,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
static struct request *get_request(struct request_queue *q, int rw_flags, static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask) struct bio *bio, gfp_t gfp_mask)
{ {
struct request *rq = NULL; struct request *rq;
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
struct elevator_type *et; struct elevator_type *et;
struct io_context *ioc; struct io_context *ioc;
...@@ -878,7 +878,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -878,7 +878,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* process is not a "batcher", and not * process is not a "batcher", and not
* exempted by the IO scheduler * exempted by the IO scheduler
*/ */
goto out; return NULL;
} }
} }
} }
...@@ -891,7 +891,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -891,7 +891,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* allocated with any setting of ->nr_requests * allocated with any setting of ->nr_requests
*/ */
if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
goto out; return NULL;
rl->count[is_sync]++; rl->count[is_sync]++;
rl->starved[is_sync] = 0; rl->starved[is_sync] = 0;
...@@ -921,49 +921,47 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -921,49 +921,47 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
icq = ioc_create_icq(q, gfp_mask); icq = ioc_create_icq(q, gfp_mask);
if (!icq) if (!icq)
goto fail_icq; goto fail_alloc;
} }
rq = blk_alloc_request(q, icq, rw_flags, gfp_mask); rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
if (unlikely(!rq))
goto fail_alloc;
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
* not count toward the nr_batch_requests limit. There will always
* be some limit enforced by BLK_BATCH_TIME.
*/
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1);
return rq;
fail_icq: fail_alloc:
if (unlikely(!rq)) {
/* /*
* Allocation failed presumably due to memory. Undo anything * Allocation failed presumably due to memory. Undo anything we
* we might have messed up. * might have messed up.
* *
* Allocating task should really be put onto the front of the * Allocating task should really be put onto the front of the wait
* wait queue, but this is pretty rare. * queue, but this is pretty rare.
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
freed_request(q, rw_flags); freed_request(q, rw_flags);
/* /*
* in the very unlikely event that allocation failed and no * in the very unlikely event that allocation failed and no
* requests for this direction was pending, mark us starved * requests for this direction was pending, mark us starved so that
* so that freeing of a request in the other direction will * freeing of a request in the other direction will notice
* notice us. another possible fix would be to split the * us. another possible fix would be to split the rq mempool into
* rq mempool into READ and WRITE * READ and WRITE
*/ */
rq_starved: rq_starved:
if (unlikely(rl->count[is_sync] == 0)) if (unlikely(rl->count[is_sync] == 0))
rl->starved[is_sync] = 1; rl->starved[is_sync] = 1;
return NULL;
goto out;
}
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
* not count toward the nr_batch_requests limit. There will always
* be some limit enforced by BLK_BATCH_TIME.
*/
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1);
out:
return rq;
} }
/** /**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册