提交 40cbbb78 编写于 作者: T Tejun Heo 提交者: Jens Axboe

block: implement and use [__]blk_end_request_all()

There are many [__]blk_end_request() call sites which call it with
full request length and expect full completion.  Many of them ensure
that the request actually completes by doing BUG_ON() the return
value, which is awkward and error-prone.

This patch adds [__]blk_end_request_all() which takes @rq and @error
and fully completes the request.  BUG_ON() is added to to ensure that
this actually happens.

Most conversions are simple but there are a few noteworthy ones.

* cdrom/viocd: viocd_end_request() replaced with direct calls to
  __blk_end_request_all().

* s390/block/dasd: dasd_end_request() replaced with direct calls to
  __blk_end_request_all().

* s390/char/tape_block: tapeblock_end_request() replaced with direct
  calls to blk_end_request_all().

[ Impact: cleanup ]
Signed-off-by: NTejun Heo <tj@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Mike Miller <mike.miller@hp.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Jeff Garzik <jgarzik@pobox.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Alex Dubov <oakad@yahoo.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
上级 b243ddcb
......@@ -192,8 +192,7 @@ static void mbox_tx_work(struct work_struct *work)
}
spin_lock(q->queue_lock);
if (__blk_end_request(rq, 0, 0))
BUG();
__blk_end_request_all(rq, 0);
spin_unlock(q->queue_lock);
}
}
......@@ -224,10 +223,7 @@ static void mbox_rx_work(struct work_struct *work)
break;
msg = (mbox_msg_t) rq->data;
if (blk_end_request(rq, 0, 0))
BUG();
blk_end_request_all(rq, 0);
mbox->rxq->callback((void *)msg);
}
}
......@@ -337,8 +333,7 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
*p = (mbox_msg_t) rq->data;
if (blk_end_request(rq, 0, 0))
BUG();
blk_end_request_all(rq, 0);
if (unlikely(mbox_seq_test(mbox, *p))) {
pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
......
......@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
*/
q->ordseq = 0;
rq = q->orig_bar_rq;
if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
BUG();
__blk_end_request_all(rq, q->orderr);
return true;
}
......@@ -252,9 +249,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
* with prejudice.
*/
elv_dequeue_request(q, rq);
if (__blk_end_request(rq, -EOPNOTSUPP,
blk_rq_bytes(rq)))
BUG();
__blk_end_request_all(rq, -EOPNOTSUPP);
*rqp = NULL;
return false;
}
......
......@@ -1780,7 +1780,7 @@ struct request *elv_next_request(struct request_queue *q)
break;
} else if (ret == BLKPREP_KILL) {
rq->cmd_flags |= REQ_QUIET;
__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
__blk_end_request_all(rq, -EIO);
} else {
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
......
......@@ -810,7 +810,7 @@ void elv_abort_queue(struct request_queue *q)
rq = list_entry_rq(q->queue_head.next);
rq->cmd_flags |= REQ_QUIET;
trace_block_rq_abort(q, rq);
__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
__blk_end_request_all(rq, -EIO);
}
}
EXPORT_SYMBOL(elv_abort_queue);
......
......@@ -1024,8 +1024,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
cmd->req.sg[i].size, ddir);
DBGPX(printk("Done with %p\n", rq););
if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
BUG();
__blk_end_request_all(rq, error);
}
/*
......
......@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
struct request *req = crq->rq;
int rc;
rc = __blk_end_request(req, error, blk_rq_bytes(req));
assert(rc == 0);
__blk_end_request_all(req, error);
rc = carm_put_request(host, crq);
assert(rc == 0);
......
......@@ -62,7 +62,7 @@ static void blk_done(struct virtqueue *vq)
break;
}
__blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
__blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
mempool_free(vbr, vblk->pool);
}
......
......@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
int ret;
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
......@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
ret = __blk_end_request(req, error, blk_rq_bytes(req));
BUG_ON(ret);
__blk_end_request_all(req, error);
break;
default:
BUG();
......
......@@ -632,7 +632,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
* before handling ending the request */
spin_lock(&gdrom_lock);
list_del_init(&req->queuelist);
__blk_end_request(req, err, blk_rq_bytes(req));
__blk_end_request_all(req, err);
}
spin_unlock(&gdrom_lock);
kfree(read_command);
......
......@@ -291,23 +291,6 @@ static int send_request(struct request *req)
return 0;
}
static void viocd_end_request(struct request *req, int error)
{
int nsectors = req->hard_nr_sectors;
/*
* Make sure it's fully ended, and ensure that we process
* at least one sector.
*/
if (blk_pc_request(req))
nsectors = (req->data_len + 511) >> 9;
if (!nsectors)
nsectors = 1;
if (__blk_end_request(req, error, nsectors << 9))
BUG();
}
static int rwreq;
static void do_viocd_request(struct request_queue *q)
......@@ -316,11 +299,11 @@ static void do_viocd_request(struct request_queue *q)
while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
if (!blk_fs_request(req))
viocd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
else if (send_request(req) < 0) {
printk(VIOCD_KERN_WARNING
"unable to send message to OS/400!");
viocd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
} else
rwreq++;
}
......@@ -531,9 +514,9 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
"with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
viocd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
} else
viocd_end_request(req, 0);
__blk_end_request_all(req, 0);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);
......
......@@ -826,7 +826,7 @@ static void mspro_block_submit_req(struct request_queue *q)
if (msb->eject) {
while ((req = elv_next_request(q)) != NULL)
__blk_end_request(req, -ENODEV, blk_rq_bytes(req));
__blk_end_request_all(req, -ENODEV);
return;
}
......
......@@ -1613,15 +1613,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
del_timer(&block->timer);
}
/*
* posts the buffer_cache about a finalized request
*/
static inline void dasd_end_request(struct request *req, int error)
{
if (__blk_end_request(req, error, blk_rq_bytes(req)))
BUG();
}
/*
* Process finished error recovery ccw.
*/
......@@ -1676,7 +1667,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"Rejecting write request %p",
req);
blkdev_dequeue_request(req);
dasd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
continue;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
......@@ -1705,7 +1696,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"on request %p",
PTR_ERR(cqr), req);
blkdev_dequeue_request(req);
dasd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
continue;
}
/*
......@@ -1731,7 +1722,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
dasd_end_request(req, error);
__blk_end_request_all(req, error);
}
/*
......@@ -2040,7 +2031,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
spin_lock_irq(&block->request_queue_lock);
while ((req = elv_next_request(block->request_queue))) {
blkdev_dequeue_request(req);
dasd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
}
spin_unlock_irq(&block->request_queue_lock);
}
......
......@@ -73,13 +73,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
/*
* Post finished request.
*/
static void
tapeblock_end_request(struct request *req, int error)
{
if (blk_end_request(req, error, blk_rq_bytes(req)))
BUG();
}
static void
__tapeblock_end_request(struct tape_request *ccw_req, void *data)
{
......@@ -90,7 +83,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
device = ccw_req->device;
req = (struct request *) data;
tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
if (ccw_req->rc == 0)
/* Update position. */
device->blk_data.block_position =
......@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
ccw_req = device->discipline->bread(device, req);
if (IS_ERR(ccw_req)) {
DBF_EVENT(1, "TBLOCK: bread failed\n");
tapeblock_end_request(req, -EIO);
blk_end_request_all(req, -EIO);
return PTR_ERR(ccw_req);
}
ccw_req->callback = __tapeblock_end_request;
......@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
* Start/enqueueing failed. No retries in
* this case.
*/
tapeblock_end_request(req, -EIO);
blk_end_request_all(req, -EIO);
device->discipline->free_bread(ccw_req);
}
......@@ -177,7 +170,7 @@ tapeblock_requeue(struct work_struct *work) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
blkdev_dequeue_request(req);
spin_unlock_irq(&device->blk_data.request_queue_lock);
tapeblock_end_request(req, -EIO);
blk_end_request_all(req, -EIO);
spin_lock_irq(&device->blk_data.request_queue_lock);
continue;
}
......
......@@ -922,7 +922,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd);
}
blk_end_request(req, -EIO, blk_rq_bytes(req));
blk_end_request_all(req, -EIO);
scsi_next_command(cmd);
break;
case ACTION_REPREP:
......
......@@ -882,6 +882,22 @@ static inline bool blk_end_request(struct request *rq, int error,
return blk_end_bidi_request(rq, error, nr_bytes, 0);
}
/**
* blk_end_request_all - Helper function for drives to finish the request.
* @rq: the request to finish
* @err: %0 for success, < %0 for error
*
* Description:
* Completely finish @rq.
*/
static inline void blk_end_request_all(struct request *rq, int error)
{
bool pending;
pending = blk_end_request(rq, error, blk_rq_bytes(rq));
BUG_ON(pending);
}
/**
* __blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
......@@ -901,6 +917,22 @@ static inline bool __blk_end_request(struct request *rq, int error,
return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}
/**
* __blk_end_request_all - Helper function for drives to finish the request.
* @rq: the request to finish
* @err: %0 for success, < %0 for error
*
* Description:
* Completely finish @rq. Must be called with queue lock held.
*/
static inline void __blk_end_request_all(struct request *rq, int error)
{
bool pending;
pending = __blk_end_request(rq, error, blk_rq_bytes(rq));
BUG_ON(pending);
}
/**
* end_request - end I/O on the current segment of the request
* @rq: the request being processed
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册