提交 c39c6c77 编写于 作者: B Bart Van Assche 提交者: Jens Axboe

skd: Inline skd_process_request()

This patch does not change any functionality but makes the skd
driver code more similar to that of other blk-mq kernel drivers.
Signed-off-by: NBart Van Assche <bart.vanassche@wdc.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 49f16e2f
...@@ -478,8 +478,10 @@ static bool skd_fail_all(struct request_queue *q) ...@@ -478,8 +478,10 @@ static bool skd_fail_all(struct request_queue *q)
} }
} }
static void skd_process_request(struct request *req, bool last) static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *mqd)
{ {
struct request *const req = mqd->rq;
struct request_queue *const q = req->q; struct request_queue *const q = req->q;
struct skd_device *skdev = q->queuedata; struct skd_device *skdev = q->queuedata;
struct skd_fitmsg_context *skmsg; struct skd_fitmsg_context *skmsg;
...@@ -492,6 +494,11 @@ static void skd_process_request(struct request *req, bool last) ...@@ -492,6 +494,11 @@ static void skd_process_request(struct request *req, bool last)
const u32 count = blk_rq_sectors(req); const u32 count = blk_rq_sectors(req);
const int data_dir = rq_data_dir(req); const int data_dir = rq_data_dir(req);
if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
blk_mq_start_request(req);
WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n", WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
tag, skd_max_queue_depth, q->nr_requests); tag, skd_max_queue_depth, q->nr_requests);
...@@ -514,7 +521,7 @@ static void skd_process_request(struct request *req, bool last) ...@@ -514,7 +521,7 @@ static void skd_process_request(struct request *req, bool last)
dev_dbg(&skdev->pdev->dev, "error Out\n"); dev_dbg(&skdev->pdev->dev, "error Out\n");
skd_end_request(skdev, blk_mq_rq_from_pdu(skreq), skd_end_request(skdev, blk_mq_rq_from_pdu(skreq),
BLK_STS_RESOURCE); BLK_STS_RESOURCE);
return; return BLK_STS_OK;
} }
dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address, dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
...@@ -578,30 +585,13 @@ static void skd_process_request(struct request *req, bool last) ...@@ -578,30 +585,13 @@ static void skd_process_request(struct request *req, bool last)
if (skd_max_req_per_msg == 1) { if (skd_max_req_per_msg == 1) {
skd_send_fitmsg(skdev, skmsg); skd_send_fitmsg(skdev, skmsg);
} else { } else {
if (last || if (mqd->last ||
fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
skd_send_fitmsg(skdev, skmsg); skd_send_fitmsg(skdev, skmsg);
skdev->skmsg = NULL; skdev->skmsg = NULL;
} }
spin_unlock_irqrestore(&skdev->lock, flags); spin_unlock_irqrestore(&skdev->lock, flags);
} }
}
static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *mqd)
{
struct request *req = mqd->rq;
struct request_queue *q = req->q;
struct skd_device *skdev = q->queuedata;
if (skdev->state == SKD_DRVR_STATE_ONLINE) {
blk_mq_start_request(req);
skd_process_request(req, mqd->last);
return BLK_STS_OK;
} else {
return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
}
return BLK_STS_OK; return BLK_STS_OK;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册