提交 5b93629b 编写于 作者: T Tejun Heo 提交者: Jens Axboe

block: implement blk_rq_pos/[cur_]sectors() and convert obvious ones

Implement accessors - blk_rq_pos(), blk_rq_sectors() and
blk_rq_cur_sectors() which return rq->hard_sector, rq->hard_nr_sectors
and rq->hard_cur_sectors respectively and convert direct references of
the said fields to the accessors.

This is in preparation of request data length handling cleanup.

Geert	: suggested adding const to struct request * parameter to accessors
Sergei	: spotted error in patch description

[ Impact: cleanup ]
Signed-off-by: NTejun Heo <tj@kernel.org>
Acked-by: NGeert Uytterhoeven <Geert.Uytterhoeven@sonycom.com>
Acked-by: NStephen Rothwell <sfr@canb.auug.org.au>
Tested-by: NGrant Likely <grant.likely@secretlab.ca>
Acked-by: NGrant Likely <grant.likely@secretlab.ca>
Ackec-by: NSergei Shtylyov <sshtylyov@ru.mvista.com>
Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Cc: Borislav Petkov <petkovbb@googlemail.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 c3a4d78c
...@@ -163,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) ...@@ -163,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
* For an empty barrier, there's no actual BAR request, which * For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off. * in turn makes POSTFLUSH unnecessary. Mask them off.
*/ */
if (!rq->hard_nr_sectors) { if (!blk_rq_sectors(rq)) {
q->ordered &= ~(QUEUE_ORDERED_DO_BAR | q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH); QUEUE_ORDERED_DO_POSTFLUSH);
/* /*
......
...@@ -1683,7 +1683,7 @@ static void blk_account_io_done(struct request *req) ...@@ -1683,7 +1683,7 @@ static void blk_account_io_done(struct request *req)
unsigned int blk_rq_bytes(struct request *rq) unsigned int blk_rq_bytes(struct request *rq)
{ {
if (blk_fs_request(rq)) if (blk_fs_request(rq))
return rq->hard_nr_sectors << 9; return blk_rq_sectors(rq) << 9;
return rq->data_len; return rq->data_len;
} }
......
...@@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) ...@@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
cfqd->rq_in_driver); cfqd->rq_in_driver);
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
} }
static void cfq_deactivate_request(struct request_queue *q, struct request *rq) static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
......
...@@ -136,7 +136,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, ...@@ -136,7 +136,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
__func__, __LINE__, op, n, req->nr_sectors, __func__, __LINE__, op, n, req->nr_sectors,
req->hard_nr_sectors); blk_rq_sectors(req));
#endif #endif
start_sector = req->sector * priv->blocking_factor; start_sector = req->sector * priv->blocking_factor;
......
...@@ -368,12 +368,12 @@ static void do_viodasd_request(struct request_queue *q) ...@@ -368,12 +368,12 @@ static void do_viodasd_request(struct request_queue *q)
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
/* check that request contains a valid command */ /* check that request contains a valid command */
if (!blk_fs_request(req)) { if (!blk_fs_request(req)) {
viodasd_end_request(req, -EIO, req->hard_nr_sectors); viodasd_end_request(req, -EIO, blk_rq_sectors(req));
continue; continue;
} }
/* Try sending the request */ /* Try sending the request */
if (send_request(req) != 0) if (send_request(req) != 0)
viodasd_end_request(req, -EIO, req->hard_nr_sectors); viodasd_end_request(req, -EIO, blk_rq_sectors(req));
} }
} }
...@@ -590,7 +590,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent) ...@@ -590,7 +590,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
event->xRc, bevent->sub_result, err->msg); event->xRc, bevent->sub_result, err->msg);
num_sect = req->hard_nr_sectors; num_sect = blk_rq_sectors(req);
} }
qlock = req->q->queue_lock; qlock = req->q->queue_lock;
spin_lock_irqsave(qlock, irq_flags); spin_lock_irqsave(qlock, irq_flags);
......
...@@ -645,8 +645,8 @@ static void ace_fsm_dostate(struct ace_device *ace) ...@@ -645,8 +645,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
/* Okay, it's a data request, set it up for transfer */ /* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev, dev_dbg(ace->dev,
"request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n", "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
(unsigned long long) req->sector, req->hard_nr_sectors, (unsigned long long) req->sector, blk_rq_sectors(req),
req->current_nr_sectors, rq_data_dir(req)); req->current_nr_sectors, rq_data_dir(req));
ace->req = req; ace->req = req;
...@@ -654,7 +654,7 @@ static void ace_fsm_dostate(struct ace_device *ace) ...@@ -654,7 +654,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
count = req->hard_nr_sectors; count = blk_rq_sectors(req);
if (rq_data_dir(req)) { if (rq_data_dir(req)) {
/* Kick off write request */ /* Kick off write request */
dev_dbg(ace->dev, "write data\n"); dev_dbg(ace->dev, "write data\n");
...@@ -719,8 +719,8 @@ static void ace_fsm_dostate(struct ace_device *ace) ...@@ -719,8 +719,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
/* bio finished; is there another one? */ /* bio finished; is there another one? */
if (__blk_end_request(ace->req, 0, if (__blk_end_request(ace->req, 0,
blk_rq_cur_bytes(ace->req))) { blk_rq_cur_bytes(ace->req))) {
/* dev_dbg(ace->dev, "next block; h=%li c=%i\n", /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
* ace->req->hard_nr_sectors, * blk_rq_sectors(ace->req),
* ace->req->current_nr_sectors); * ace->req->current_nr_sectors);
*/ */
ace->data_ptr = ace->req->buffer; ace->data_ptr = ace->req->buffer;
......
...@@ -730,7 +730,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) ...@@ -730,7 +730,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (blk_pc_request(rq)) if (blk_pc_request(rq))
nsectors = (rq->data_len + 511) >> 9; nsectors = (rq->data_len + 511) >> 9;
else else
nsectors = rq->hard_nr_sectors; nsectors = blk_rq_sectors(rq);
if (nsectors == 0) if (nsectors == 0)
nsectors = 1; nsectors = 1;
...@@ -875,7 +875,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, ...@@ -875,7 +875,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
return ide_issue_pc(drive, &cmd); return ide_issue_pc(drive, &cmd);
out_end: out_end:
nsectors = rq->hard_nr_sectors; nsectors = blk_rq_sectors(rq);
if (nsectors == 0) if (nsectors == 0)
nsectors = 1; nsectors = 1;
...@@ -1359,8 +1359,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) ...@@ -1359,8 +1359,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{ {
int hard_sect = queue_hardsect_size(q); int hard_sect = queue_hardsect_size(q);
long block = (long)rq->hard_sector / (hard_sect >> 9); long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
memset(rq->cmd, 0, BLK_MAX_CDB); memset(rq->cmd, 0, BLK_MAX_CDB);
......
...@@ -118,7 +118,7 @@ unsigned int ide_rq_bytes(struct request *rq) ...@@ -118,7 +118,7 @@ unsigned int ide_rq_bytes(struct request *rq)
if (blk_pc_request(rq)) if (blk_pc_request(rq))
return rq->data_len; return rq->data_len;
else else
return rq->hard_cur_sectors << 9; return blk_rq_cur_sectors(rq) << 9;
} }
EXPORT_SYMBOL_GPL(ide_rq_bytes); EXPORT_SYMBOL_GPL(ide_rq_bytes);
...@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) ...@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
* and complete the whole request right now * and complete the whole request right now
*/ */
if (blk_noretry_request(rq) && error <= 0) if (blk_noretry_request(rq) && error <= 0)
nr_bytes = rq->hard_nr_sectors << 9; nr_bytes = blk_rq_sectors(rq) << 9;
rc = ide_end_rq(drive, rq, error, nr_bytes); rc = ide_end_rq(drive, rq, error, nr_bytes);
if (rc == 0) if (rc == 0)
......
...@@ -427,7 +427,7 @@ static void i2o_block_end_request(struct request *req, int error, ...@@ -427,7 +427,7 @@ static void i2o_block_end_request(struct request *req, int error,
unsigned long flags; unsigned long flags;
if (blk_end_request(req, error, nr_bytes)) { if (blk_end_request(req, error, nr_bytes)) {
int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); int leftover = (blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT);
if (blk_pc_request(req)) if (blk_pc_request(req))
leftover = req->data_len; leftover = req->data_len;
......
...@@ -546,7 +546,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, ...@@ -546,7 +546,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
* to queue the remainder of them. * to queue the remainder of them.
*/ */
if (blk_end_request(req, error, bytes)) { if (blk_end_request(req, error, bytes)) {
int leftover = (req->hard_nr_sectors << 9); int leftover = blk_rq_sectors(req) << 9;
if (blk_pc_request(req)) if (blk_pc_request(req))
leftover = req->resid_len; leftover = req->resid_len;
......
...@@ -832,13 +832,30 @@ static inline void blk_run_address_space(struct address_space *mapping) ...@@ -832,13 +832,30 @@ static inline void blk_run_address_space(struct address_space *mapping)
extern void blkdev_dequeue_request(struct request *req); extern void blkdev_dequeue_request(struct request *req);
/* /*
* blk_end_request() takes bytes instead of sectors as a complete size. * blk_rq_pos() : the current sector
* blk_rq_bytes() returns bytes left to complete in the entire request. * blk_rq_bytes() : bytes left in the entire request
* blk_rq_cur_bytes() returns bytes left to complete in the current segment. * blk_rq_cur_bytes() : bytes left in the current segment
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
*/ */
static inline sector_t blk_rq_pos(const struct request *rq)
{
return rq->hard_sector;
}
extern unsigned int blk_rq_bytes(struct request *rq); extern unsigned int blk_rq_bytes(struct request *rq);
extern unsigned int blk_rq_cur_bytes(struct request *rq); extern unsigned int blk_rq_cur_bytes(struct request *rq);
static inline unsigned int blk_rq_sectors(const struct request *rq)
{
return rq->hard_nr_sectors;
}
static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
{
return rq->hard_cur_sectors;
}
/* /*
* Request completion related functions. * Request completion related functions.
* *
......
...@@ -646,7 +646,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, ...@@ -646,7 +646,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
rq->cmd_len, rq->cmd); rq->cmd_len, rq->cmd);
} else { } else {
what |= BLK_TC_ACT(BLK_TC_FS); what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9,
rw, what, rq->errors, 0, NULL); rw, what, rq->errors, 0, NULL);
} }
} }
...@@ -857,7 +857,7 @@ void blk_add_driver_data(struct request_queue *q, ...@@ -857,7 +857,7 @@ void blk_add_driver_data(struct request_queue *q,
__blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
rq->errors, len, data); rq->errors, len, data);
else else
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9,
0, BLK_TA_DRV_DATA, rq->errors, len, data); 0, BLK_TA_DRV_DATA, rq->errors, len, data);
} }
EXPORT_SYMBOL_GPL(blk_add_driver_data); EXPORT_SYMBOL_GPL(blk_add_driver_data);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册