提交 fc53bf75 编写于 作者: K Konrad Rzeszutek Wilk

xen/blkback: Squash the checking for operation into dispatch_rw_block_io

We do a check for the operations right before calling dispatch_rw_block_io.
And then we do the same check in dispatch_rw_block_io. This patch
squashes those checks into the 'dispatch_rw_block_io' function.
Signed-off-by: NKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
上级 24f567f9
...@@ -123,7 +123,7 @@ static inline unsigned long vaddr(struct pending_req *req, int seg) ...@@ -123,7 +123,7 @@ static inline unsigned long vaddr(struct pending_req *req, int seg)
static int do_block_io_op(struct blkif_st *blkif); static int do_block_io_op(struct blkif_st *blkif);
static void dispatch_rw_block_io(struct blkif_st *blkif, static int dispatch_rw_block_io(struct blkif_st *blkif,
struct blkif_request *req, struct blkif_request *req,
struct pending_req *pending_req); struct pending_req *pending_req);
static void make_response(struct blkif_st *blkif, u64 id, static void make_response(struct blkif_st *blkif, u64 id,
...@@ -499,30 +499,8 @@ static int do_block_io_op(struct blkif_st *blkif) ...@@ -499,30 +499,8 @@ static int do_block_io_op(struct blkif_st *blkif)
/* Apply all sanity checks to /private copy/ of request. */ /* Apply all sanity checks to /private copy/ of request. */
barrier(); barrier();
switch (req.operation) { if (dispatch_rw_block_io(blkif, &req, pending_req))
case BLKIF_OP_READ:
blkif->st_rd_req++;
dispatch_rw_block_io(blkif, &req, pending_req);
break;
case BLKIF_OP_FLUSH_DISKCACHE:
blkif->st_f_req++;
/* fall through */
case BLKIF_OP_WRITE:
blkif->st_wr_req++;
dispatch_rw_block_io(blkif, &req, pending_req);
break;
case BLKIF_OP_WRITE_BARRIER:
default:
/* A good sign something is wrong: sleep for a while to
* avoid excessive CPU consumption by a bad guest. */
msleep(1);
DPRINTK("error: unknown block io operation [%d]\n",
req.operation);
make_response(blkif, req.id, req.operation,
BLKIF_RSP_ERROR);
free_req(pending_req);
break; break;
}
/* Yield point for this unbounded loop. */ /* Yield point for this unbounded loop. */
cond_resched(); cond_resched();
...@@ -535,7 +513,7 @@ static int do_block_io_op(struct blkif_st *blkif) ...@@ -535,7 +513,7 @@ static int do_block_io_op(struct blkif_st *blkif)
* Transumation of the 'struct blkif_request' to a proper 'struct bio' * Transumation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlaying storage. * and call the 'submit_bio' to pass it to the underlaying storage.
*/ */
static void dispatch_rw_block_io(struct blkif_st *blkif, static int dispatch_rw_block_io(struct blkif_st *blkif,
struct blkif_request *req, struct blkif_request *req,
struct pending_req *pending_req) struct pending_req *pending_req)
{ {
...@@ -550,22 +528,25 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -550,22 +528,25 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
switch (req->operation) { switch (req->operation) {
case BLKIF_OP_READ: case BLKIF_OP_READ:
blkif->st_rd_req++;
operation = READ; operation = READ;
break; break;
case BLKIF_OP_WRITE: case BLKIF_OP_WRITE:
blkif->st_wr_req++;
operation = WRITE_ODIRECT; operation = WRITE_ODIRECT;
break; break;
case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_FLUSH_DISKCACHE:
blkif->st_f_req++;
operation = WRITE_FLUSH; operation = WRITE_FLUSH;
/* The frontend likes to set this to -1, which vbd_translate /* The frontend likes to set this to -1, which vbd_translate
* is alergic too. */ * is alergic too. */
req->u.rw.sector_number = 0; req->u.rw.sector_number = 0;
break; break;
case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_WRITE_BARRIER:
/* Should never get here. */
default: default:
operation = 0; /* make gcc happy */ operation = 0; /* make gcc happy */
BUG(); goto fail_response;
break;
} }
/* Check that the number of segments is sane. */ /* Check that the number of segments is sane. */
...@@ -677,7 +658,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -677,7 +658,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
else if (operation == WRITE || operation == WRITE_FLUSH) else if (operation == WRITE || operation == WRITE_FLUSH)
blkif->st_wr_sect += preq.nr_sects; blkif->st_wr_sect += preq.nr_sects;
return; return 0;
fail_flush: fail_flush:
xen_blkbk_unmap(pending_req); xen_blkbk_unmap(pending_req);
...@@ -686,14 +667,14 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -686,14 +667,14 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
free_req(pending_req); free_req(pending_req);
msleep(1); /* back off a bit */ msleep(1); /* back off a bit */
return; return -EIO;
fail_put_bio: fail_put_bio:
for (i = 0; i < (nbio-1); i++) for (i = 0; i < (nbio-1); i++)
bio_put(biolist[i]); bio_put(biolist[i]);
__end_block_io_op(pending_req, -EINVAL); __end_block_io_op(pending_req, -EINVAL);
msleep(1); /* back off a bit */ msleep(1); /* back off a bit */
return; return -EIO;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册