提交 a022606e 编写于 作者: M Mike Christie 提交者: Jens Axboe

xen: use bio op accessors

Separate the op from the rq_flag_bits and have xen
set/get the bio using bio_set_op_attrs/bio_op.
Signed-off-by: NMike Christie <mchristi@redhat.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NHannes Reinecke <hare@suse.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 e742fc32
...@@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, ...@@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
struct xen_vbd *vbd = &blkif->vbd; struct xen_vbd *vbd = &blkif->vbd;
int rc = -EACCES; int rc = -EACCES;
if ((operation != READ) && vbd->readonly) if ((operation != REQ_OP_READ) && vbd->readonly)
goto out; goto out;
if (likely(req->nr_sects)) { if (likely(req->nr_sects)) {
...@@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring, ...@@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
preq.sector_number = req->u.discard.sector_number; preq.sector_number = req->u.discard.sector_number;
preq.nr_sects = req->u.discard.nr_sectors; preq.nr_sects = req->u.discard.nr_sectors;
err = xen_vbd_translate(&preq, blkif, WRITE); err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
if (err) { if (err) {
pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
preq.sector_number, preq.sector_number,
...@@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct bio **biolist = pending_req->biolist; struct bio **biolist = pending_req->biolist;
int i, nbio = 0; int i, nbio = 0;
int operation; int operation;
int operation_flags = 0;
struct blk_plug plug; struct blk_plug plug;
bool drain = false; bool drain = false;
struct grant_page **pages = pending_req->segments; struct grant_page **pages = pending_req->segments;
...@@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
switch (req_operation) { switch (req_operation) {
case BLKIF_OP_READ: case BLKIF_OP_READ:
ring->st_rd_req++; ring->st_rd_req++;
operation = READ; operation = REQ_OP_READ;
break; break;
case BLKIF_OP_WRITE: case BLKIF_OP_WRITE:
ring->st_wr_req++; ring->st_wr_req++;
operation = WRITE_ODIRECT; operation = REQ_OP_WRITE;
operation_flags = WRITE_ODIRECT;
break; break;
case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_WRITE_BARRIER:
drain = true; drain = true;
case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++; ring->st_f_req++;
operation = WRITE_FLUSH; operation = REQ_OP_WRITE;
operation_flags = WRITE_FLUSH;
break; break;
default: default:
operation = 0; /* make gcc happy */ operation = 0; /* make gcc happy */
...@@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
nseg = req->operation == BLKIF_OP_INDIRECT ? nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments; req->u.indirect.nr_segments : req->u.rw.nr_segments;
if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) && unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) && unlikely((req->operation == BLKIF_OP_INDIRECT) &&
...@@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
operation == READ ? "read" : "write", operation == REQ_OP_READ ? "read" : "write",
preq.sector_number, preq.sector_number,
preq.sector_number + preq.nr_sects, preq.sector_number + preq.nr_sects,
ring->blkif->vbd.pdevice); ring->blkif->vbd.pdevice);
...@@ -1369,7 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1369,7 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
bio->bi_private = pending_req; bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op; bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number; bio->bi_iter.bi_sector = preq.sector_number;
bio->bi_rw = operation; bio_set_op_attrs(bio, operation, operation_flags);
} }
preq.sector_number += seg[i].nsec; preq.sector_number += seg[i].nsec;
...@@ -1377,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1377,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
/* This will be hit if the operation was a flush or discard. */ /* This will be hit if the operation was a flush or discard. */
if (!bio) { if (!bio) {
BUG_ON(operation != WRITE_FLUSH); BUG_ON(operation_flags != WRITE_FLUSH);
bio = bio_alloc(GFP_KERNEL, 0); bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL)) if (unlikely(bio == NULL))
...@@ -1387,7 +1390,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1387,7 +1390,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
bio->bi_bdev = preq.bdev; bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req; bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op; bio->bi_end_io = end_block_io_op;
bio->bi_rw = operation; bio_set_op_attrs(bio, operation, operation_flags);
} }
atomic_set(&pending_req->pendcnt, nbio); atomic_set(&pending_req->pendcnt, nbio);
...@@ -1399,9 +1402,9 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1399,9 +1402,9 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
/* Let the I/Os go.. */ /* Let the I/Os go.. */
blk_finish_plug(&plug); blk_finish_plug(&plug);
if (operation == READ) if (operation == REQ_OP_READ)
ring->st_rd_sect += preq.nr_sects; ring->st_rd_sect += preq.nr_sects;
else if (operation & WRITE) else if (operation == REQ_OP_WRITE)
ring->st_wr_sect += preq.nr_sects; ring->st_wr_sect += preq.nr_sects;
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册