提交 e6a40b09 编写于 作者: M Mike Christie 提交者: Jens Axboe

block: prepare request creation/destruction code to use REQ_OPs

This patch prepares *_get_request/*_put_request and freed_request,
to use separate variables for the operation and flags. In the
next patches the struct request users will be converted like
was done for bios where the op and flags are set separately.
Signed-off-by: NMike Christie <mchristi@redhat.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NHannes Reinecke <hare@suse.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 4993b77d
......@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
static void freed_request(struct request_list *rl, unsigned int flags)
static void freed_request(struct request_list *rl, int op, unsigned int flags)
{
struct request_queue *q = rl->q;
int sync = rw_is_sync(flags);
int sync = rw_is_sync(op | flags);
q->nr_rqs[sync]--;
rl->count[sync]--;
......@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio)
/**
* __get_request - get a free request
* @rl: request list to allocate from
* @rw_flags: RW and SYNC flags
* @op: REQ_OP_READ/REQ_OP_WRITE
* @op_flags: rq_flag_bits
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
......@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *__get_request(struct request_list *rl, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
static struct request *__get_request(struct request_list *rl, int op,
int op_flags, struct bio *bio,
gfp_t gfp_mask)
{
struct request_queue *q = rl->q;
struct request *rq;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
const bool is_sync = rw_is_sync(op | op_flags) != 0;
int may_queue;
if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);
may_queue = elv_may_queue(q, rw_flags);
may_queue = elv_may_queue(q, op | op_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
......@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
/*
* Decide whether the new request will be managed by elevator. If
* so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
* so, mark @op_flags and increment elvpriv. Non-zero elvpriv will
* prevent the current elevator from being destroyed until the new
* request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe.
......@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* it will be created after releasing queue_lock.
*/
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV;
op_flags |= REQ_ELVPRIV;
q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q);
}
if (blk_queue_io_stat(q))
rw_flags |= REQ_IO_STAT;
op_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
/* allocate and init request */
......@@ -1149,10 +1151,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
rq->cmd_flags = rw_flags | REQ_ALLOCED;
req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED);
/* init elvpriv */
if (rw_flags & REQ_ELVPRIV) {
if (op_flags & REQ_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
......@@ -1178,7 +1180,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1);
trace_block_getrq(q, bio, op);
return rq;
fail_elvpriv:
......@@ -1208,7 +1210,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
freed_request(rl, rw_flags);
freed_request(rl, op, op_flags);
/*
* in the very unlikely event that allocation failed and no
......@@ -1226,7 +1228,8 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
/**
* get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
* @op: REQ_OP_READ/REQ_OP_WRITE
* @op_flags: rq_flag_bits
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
......@@ -1237,17 +1240,18 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
static struct request *get_request(struct request_queue *q, int op,
int op_flags, struct bio *bio,
gfp_t gfp_mask)
{
const bool is_sync = rw_is_sync(rw_flags) != 0;
const bool is_sync = rw_is_sync(op | op_flags) != 0;
DEFINE_WAIT(wait);
struct request_list *rl;
struct request *rq;
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
rq = __get_request(rl, rw_flags, bio, gfp_mask);
rq = __get_request(rl, op, op_flags, bio, gfp_mask);
if (!IS_ERR(rq))
return rq;
......@@ -1260,7 +1264,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
TASK_UNINTERRUPTIBLE);
trace_block_sleeprq(q, bio, rw_flags & 1);
trace_block_sleeprq(q, bio, op);
spin_unlock_irq(q->queue_lock);
io_schedule();
......@@ -1289,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, NULL, gfp_mask);
rq = get_request(q, rw, 0, NULL, gfp_mask);
if (IS_ERR(rq))
spin_unlock_irq(q->queue_lock);
/* q->queue_lock is unlocked at this point */
......@@ -1491,13 +1495,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
*/
if (req->cmd_flags & REQ_ALLOCED) {
unsigned int flags = req->cmd_flags;
int op = req_op(req);
struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req);
freed_request(rl, flags);
freed_request(rl, op, flags);
blk_put_rl(rl);
}
}
......@@ -1712,7 +1717,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
const bool sync = !!(bio->bi_rw & REQ_SYNC);
struct blk_plug *plug;
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
......@@ -1772,7 +1777,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
* but we need to set it earlier to expose the sync flag to the
* rq allocator and io schedulers.
*/
rw_flags = bio_data_dir(bio);
if (sync)
rw_flags |= REQ_SYNC;
......@@ -1780,7 +1784,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
req = get_request(q, rw_flags, bio, GFP_NOIO);
req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册