提交 77089926 编写于 作者: K Konrad Rzeszutek Wilk 提交者: Konrad Rzeszutek Wilk

xen/blkback: Seperate the bio allocation and the bio submission.

We seperate the bio allocation (bio_alloc) from the bio submission so
that the error paths are much easier, and also so that the bio
submission can be done in one tight loop. It also makes the
plug/unplug calls much much easier.
Signed-off-by: NKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
上级 0faa8cca
...@@ -421,7 +421,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -421,7 +421,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int nseg; unsigned int nseg;
struct bio *bio = NULL; struct bio *bio = NULL;
int ret, i; struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int ret, i, nbio = 0;
int operation; int operation;
struct blk_plug plug; struct blk_plug plug;
struct request_queue *q; struct request_queue *q;
...@@ -529,14 +530,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -529,14 +530,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
goto fail_flush; goto fail_flush;
} }
/* Get a reference count for the disk queue and start sending I/O */ /* This corresponding blkif_put is done in __end_block_io_op */
blk_get_queue(q);
blk_start_plug(&plug);
/* We set it one so that the last submit_bio does not have to call
* atomic_inc.
*/
atomic_set(&pending_req->pendcnt, 1);
blkif_get(blkif); blkif_get(blkif);
for (i = 0; i < nseg; i++) { for (i = 0; i < nseg; i++) {
...@@ -552,12 +546,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -552,12 +546,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
blkbk->pending_page(pending_req, i), blkbk->pending_page(pending_req, i),
seg[i].nsec << 9, seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) { seg[i].buf & ~PAGE_MASK) == 0)) {
if (bio) {
atomic_inc(&pending_req->pendcnt);
submit_bio(operation, bio);
}
bio = bio_alloc(GFP_KERNEL, nseg-i); bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
if (unlikely(bio == NULL)) if (unlikely(bio == NULL))
goto fail_put_bio; goto fail_put_bio;
...@@ -573,7 +563,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -573,7 +563,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
/* This will be hit if the operation was a barrier. */ /* This will be hit if the operation was a barrier. */
if (!bio) { if (!bio) {
BUG_ON(operation != WRITE_BARRIER); BUG_ON(operation != WRITE_BARRIER);
bio = bio_alloc(GFP_KERNEL, 0); bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL)) if (unlikely(bio == NULL))
goto fail_put_bio; goto fail_put_bio;
...@@ -583,15 +573,28 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -583,15 +573,28 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
bio->bi_sector = -1; bio->bi_sector = -1;
} }
submit_bio(operation, bio);
/* We set it one so that the last submit_bio does not have to call
* atomic_inc.
*/
atomic_set(&pending_req->pendcnt, nbio);
/* Get a reference count for the disk queue and start sending I/O */
blk_get_queue(q);
blk_start_plug(&plug);
for (i = 0; i < nbio; i++)
submit_bio(operation, biolist[i]);
blk_finish_plug(&plug);
/* Let the I/Os go.. */
blk_put_queue(q);
if (operation == READ) if (operation == READ)
blkif->st_rd_sect += preq.nr_sects; blkif->st_rd_sect += preq.nr_sects;
else if (operation == WRITE || operation == WRITE_BARRIER) else if (operation == WRITE || operation == WRITE_BARRIER)
blkif->st_wr_sect += preq.nr_sects; blkif->st_wr_sect += preq.nr_sects;
blk_finish_plug(&plug);
blk_put_queue(q);
return; return;
fail_flush: fail_flush:
...@@ -604,11 +607,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -604,11 +607,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
return; return;
fail_put_bio: fail_put_bio:
for (i = 0; i < (nbio-1); i++)
bio_put(biolist[i]);
__end_block_io_op(pending_req, -EINVAL); __end_block_io_op(pending_req, -EINVAL);
if (bio)
bio_put(bio);
blk_finish_plug(&plug);
blk_put_queue(q);
msleep(1); /* back off a bit */ msleep(1); /* back off a bit */
return; return;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册