提交 d285203c 编写于 作者: C Christoph Hellwig

scsi: add support for a blk-mq based I/O path.

This patch adds support for an alternate I/O path in the scsi midlayer
which uses the blk-mq infrastructure instead of the legacy request code.

Use of blk-mq is fully transparent to drivers, although for now a host
template field is provided to opt out of blk-mq usage in case any unforseen
incompatibilities arise.

In general replacing the legacy request code with blk-mq is a simple and
mostly mechanical transformation.  The biggest exception is the new code
that deals with the fact the I/O submissions in blk-mq must happen from
process context, which slightly complicates the I/O completion handler.
The second biggest differences is that blk-mq is build around the concept
of preallocated requests that also include driver specific data, which
in SCSI context means the scsi_cmnd structure.  This completely avoids
dynamic memory allocations for the fast path through I/O submission.

Due the preallocated requests the MQ code path exclusively uses the
host-wide shared tag allocator instead of a per-LUN one.  This only
affects drivers actually using the block layer provided tag allocator
instead of their own.  Unlike the old path blk-mq always provides a tag,
although drivers don't have to use it.

For now the blk-mq path is disable by defauly and must be enabled using
the "use_blk_mq" module parameter.  Once the remaining work in the block
layer to make blk-mq more suitable for slow devices is complete I hope
to make it the default and eventually even remove the old code path.

Based on the earlier scsi-mq prototype by Nicholas Bellinger.

Thanks to Bart Van Assche and Robert Elliot for testing, benchmarking and
various sugestions and code contributions.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: NHannes Reinecke <hare@suse.de>
Reviewed-by: NWebb Scales <webbnh@hp.com>
Acked-by: NJens Axboe <axboe@kernel.dk>
Tested-by: NBart Van Assche <bvanassche@acm.org>
Tested-by: NRobert Elliott <elliott@hp.com>
上级 c53c6d6a
...@@ -213,9 +213,24 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, ...@@ -213,9 +213,24 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail; goto fail;
} }
if (shost_use_blk_mq(shost)) {
error = scsi_mq_setup_tags(shost);
if (error)
goto fail;
}
/*
* Note that we allocate the freelist even for the MQ case for now,
* as we need a command set aside for scsi_reset_provider. Having
* the full host freelist and one command available for that is a
* little heavy-handed, but avoids introducing a special allocator
* just for this. Eventually the structure of scsi_reset_provider
* will need a major overhaul.
*/
error = scsi_setup_command_freelist(shost); error = scsi_setup_command_freelist(shost);
if (error) if (error)
goto fail; goto out_destroy_tags;
if (!shost->shost_gendev.parent) if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus; shost->shost_gendev.parent = dev ? dev : &platform_bus;
...@@ -226,7 +241,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, ...@@ -226,7 +241,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
error = device_add(&shost->shost_gendev); error = device_add(&shost->shost_gendev);
if (error) if (error)
goto out; goto out_destroy_freelist;
pm_runtime_set_active(&shost->shost_gendev); pm_runtime_set_active(&shost->shost_gendev);
pm_runtime_enable(&shost->shost_gendev); pm_runtime_enable(&shost->shost_gendev);
...@@ -279,8 +294,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, ...@@ -279,8 +294,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_del(&shost->shost_dev); device_del(&shost->shost_dev);
out_del_gendev: out_del_gendev:
device_del(&shost->shost_gendev); device_del(&shost->shost_gendev);
out: out_destroy_freelist:
scsi_destroy_command_freelist(shost); scsi_destroy_command_freelist(shost);
out_destroy_tags:
if (shost_use_blk_mq(shost))
scsi_mq_destroy_tags(shost);
fail: fail:
return error; return error;
} }
...@@ -309,8 +327,13 @@ static void scsi_host_dev_release(struct device *dev) ...@@ -309,8 +327,13 @@ static void scsi_host_dev_release(struct device *dev)
} }
scsi_destroy_command_freelist(shost); scsi_destroy_command_freelist(shost);
if (shost->bqt) if (shost_use_blk_mq(shost)) {
blk_free_tags(shost->bqt); if (shost->tag_set.tags)
scsi_mq_destroy_tags(shost);
} else {
if (shost->bqt)
blk_free_tags(shost->bqt);
}
kfree(shost->shost_data); kfree(shost->shost_data);
...@@ -436,6 +459,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) ...@@ -436,6 +459,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else else
shost->dma_boundary = 0xffffffff; shost->dma_boundary = 0xffffffff;
shost->use_blk_mq = scsi_use_blk_mq && !shost->hostt->disable_blk_mq;
device_initialize(&shost->shost_gendev); device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type; shost->shost_gendev.bus = &scsi_bus_type;
......
...@@ -805,7 +805,7 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) ...@@ -805,7 +805,7 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
* is more IO than the LLD's can_queue (so there are not enuogh * is more IO than the LLD's can_queue (so there are not enuogh
* tags) request_fn's host queue ready check will handle it. * tags) request_fn's host queue ready check will handle it.
*/ */
if (!sdev->host->bqt) { if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
if (blk_queue_tagged(sdev->request_queue) && if (blk_queue_tagged(sdev->request_queue) &&
blk_queue_resize_tags(sdev->request_queue, tags) != 0) blk_queue_resize_tags(sdev->request_queue, tags) != 0)
goto out; goto out;
...@@ -1361,6 +1361,9 @@ MODULE_LICENSE("GPL"); ...@@ -1361,6 +1361,9 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
bool scsi_use_blk_mq = false;
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
static int __init init_scsi(void) static int __init init_scsi(void)
{ {
int error; int error;
......
/* /*
* scsi_lib.c Copyright (C) 1999 Eric Youngdale * Copyright (C) 1999 Eric Youngdale
* Copyright (C) 2014 Christoph Hellwig
* *
* SCSI queueing library. * SCSI queueing library.
* Initial versions: Eric Youngdale (eric@andante.org). * Initial versions: Eric Youngdale (eric@andante.org).
...@@ -20,6 +21,7 @@ ...@@ -20,6 +21,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
...@@ -113,6 +115,16 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason) ...@@ -113,6 +115,16 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
} }
} }
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request_queue *q = cmd->request->q;
blk_mq_requeue_request(cmd->request);
blk_mq_kick_requeue_list(q);
put_device(&sdev->sdev_gendev);
}
/** /**
* __scsi_queue_insert - private queue insertion * __scsi_queue_insert - private queue insertion
* @cmd: The SCSI command being requeued * @cmd: The SCSI command being requeued
...@@ -150,6 +162,10 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) ...@@ -150,6 +162,10 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
* before blk_cleanup_queue() finishes. * before blk_cleanup_queue() finishes.
*/ */
cmd->result = 0; cmd->result = 0;
if (q->mq_ops) {
scsi_mq_requeue_cmd(cmd);
return;
}
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request); blk_requeue_request(q, cmd->request);
kblockd_schedule_work(&device->requeue_work); kblockd_schedule_work(&device->requeue_work);
...@@ -308,6 +324,14 @@ void scsi_device_unbusy(struct scsi_device *sdev) ...@@ -308,6 +324,14 @@ void scsi_device_unbusy(struct scsi_device *sdev)
atomic_dec(&sdev->device_busy); atomic_dec(&sdev->device_busy);
} }
static void scsi_kick_queue(struct request_queue *q)
{
if (q->mq_ops)
blk_mq_start_hw_queues(q);
else
blk_run_queue(q);
}
/* /*
* Called for single_lun devices on IO completion. Clear starget_sdev_user, * Called for single_lun devices on IO completion. Clear starget_sdev_user,
* and call blk_run_queue for all the scsi_devices on the target - * and call blk_run_queue for all the scsi_devices on the target -
...@@ -332,7 +356,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) ...@@ -332,7 +356,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
* but in most cases, we will be first. Ideally, each LU on the * but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target. * target would get some limited time or requests on the target.
*/ */
blk_run_queue(current_sdev->request_queue); scsi_kick_queue(current_sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
if (starget->starget_sdev_user) if (starget->starget_sdev_user)
...@@ -345,7 +369,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) ...@@ -345,7 +369,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
continue; continue;
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
blk_run_queue(sdev->request_queue); scsi_kick_queue(sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
scsi_device_put(sdev); scsi_device_put(sdev);
...@@ -435,7 +459,7 @@ static void scsi_starved_list_run(struct Scsi_Host *shost) ...@@ -435,7 +459,7 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
continue; continue;
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
blk_run_queue(slq); scsi_kick_queue(slq);
blk_put_queue(slq); blk_put_queue(slq);
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
...@@ -466,7 +490,10 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -466,7 +490,10 @@ static void scsi_run_queue(struct request_queue *q)
if (!list_empty(&sdev->host->starved_list)) if (!list_empty(&sdev->host->starved_list))
scsi_starved_list_run(sdev->host); scsi_starved_list_run(sdev->host);
blk_run_queue(q); if (q->mq_ops)
blk_mq_start_stopped_hw_queues(q, false);
else
blk_run_queue(q);
} }
void scsi_requeue_run_queue(struct work_struct *work) void scsi_requeue_run_queue(struct work_struct *work)
...@@ -564,25 +591,72 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) ...@@ -564,25 +591,72 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
return mempool_alloc(sgp->pool, gfp_mask); return mempool_alloc(sgp->pool, gfp_mask);
} }
static void scsi_free_sgtable(struct scsi_data_buffer *sdb) static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
{ {
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, false, scsi_sg_free); if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
return;
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
} }
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
gfp_t gfp_mask) gfp_t gfp_mask, bool mq)
{ {
struct scatterlist *first_chunk = NULL;
int ret; int ret;
BUG_ON(!nents); BUG_ON(!nents);
if (mq) {
if (nents <= SCSI_MAX_SG_SEGMENTS) {
sdb->table.nents = nents;
sg_init_table(sdb->table.sgl, sdb->table.nents);
return 0;
}
first_chunk = sdb->table.sgl;
}
ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
NULL, gfp_mask, scsi_sg_alloc); first_chunk, gfp_mask, scsi_sg_alloc);
if (unlikely(ret)) if (unlikely(ret))
scsi_free_sgtable(sdb); scsi_free_sgtable(sdb, mq);
return ret; return ret;
} }
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
if (cmd->request->cmd_type == REQ_TYPE_FS) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
drv->uninit_command(cmd);
}
}
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
scsi_free_sgtable(&cmd->sdb, true);
if (cmd->request->next_rq && cmd->request->next_rq->special)
scsi_free_sgtable(cmd->request->next_rq->special, true);
if (scsi_prot_sg_count(cmd))
scsi_free_sgtable(cmd->prot_sdb, true);
}
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
unsigned long flags;
BUG_ON(list_empty(&cmd->list));
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
spin_lock_irqsave(&sdev->list_lock, flags);
list_del_init(&cmd->list);
spin_unlock_irqrestore(&sdev->list_lock, flags);
}
/* /*
* Function: scsi_release_buffers() * Function: scsi_release_buffers()
* *
...@@ -602,19 +676,19 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, ...@@ -602,19 +676,19 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
static void scsi_release_buffers(struct scsi_cmnd *cmd) static void scsi_release_buffers(struct scsi_cmnd *cmd)
{ {
if (cmd->sdb.table.nents) if (cmd->sdb.table.nents)
scsi_free_sgtable(&cmd->sdb); scsi_free_sgtable(&cmd->sdb, false);
memset(&cmd->sdb, 0, sizeof(cmd->sdb)); memset(&cmd->sdb, 0, sizeof(cmd->sdb));
if (scsi_prot_sg_count(cmd)) if (scsi_prot_sg_count(cmd))
scsi_free_sgtable(cmd->prot_sdb); scsi_free_sgtable(cmd->prot_sdb, false);
} }
static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
{ {
struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
scsi_free_sgtable(bidi_sdb); scsi_free_sgtable(bidi_sdb, false);
kmem_cache_free(scsi_sdb_cache, bidi_sdb); kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL; cmd->request->next_rq->special = NULL;
} }
...@@ -625,8 +699,6 @@ static bool scsi_end_request(struct request *req, int error, ...@@ -625,8 +699,6 @@ static bool scsi_end_request(struct request *req, int error,
struct scsi_cmnd *cmd = req->special; struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device; struct scsi_device *sdev = cmd->device;
struct request_queue *q = sdev->request_queue; struct request_queue *q = sdev->request_queue;
unsigned long flags;
if (blk_update_request(req, error, bytes)) if (blk_update_request(req, error, bytes))
return true; return true;
...@@ -639,14 +711,38 @@ static bool scsi_end_request(struct request *req, int error, ...@@ -639,14 +711,38 @@ static bool scsi_end_request(struct request *req, int error,
if (blk_queue_add_random(q)) if (blk_queue_add_random(q))
add_disk_randomness(req->rq_disk); add_disk_randomness(req->rq_disk);
spin_lock_irqsave(q->queue_lock, flags); if (req->mq_ctx) {
blk_finish_request(req, error); /*
spin_unlock_irqrestore(q->queue_lock, flags); * In the MQ case the command gets freed by __blk_mq_end_io,
* so we have to do all cleanup that depends on it earlier.
*
* We also can't kick the queues from irq context, so we
* will have to defer it to a workqueue.
*/
scsi_mq_uninit_cmd(cmd);
__blk_mq_end_io(req, error);
if (scsi_target(sdev)->single_lun ||
!list_empty(&sdev->host->starved_list))
kblockd_schedule_work(&sdev->requeue_work);
else
blk_mq_start_stopped_hw_queues(q, true);
put_device(&sdev->sdev_gendev);
} else {
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_finish_request(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
if (bidi_bytes)
scsi_release_bidi_buffers(cmd);
scsi_release_buffers(cmd);
scsi_next_command(cmd);
}
if (bidi_bytes)
scsi_release_bidi_buffers(cmd);
scsi_release_buffers(cmd);
scsi_next_command(cmd);
return false; return false;
} }
...@@ -953,8 +1049,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) ...@@ -953,8 +1049,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
/* Unprep the request and put it back at the head of the queue. /* Unprep the request and put it back at the head of the queue.
* A new command will be prepared and issued. * A new command will be prepared and issued.
*/ */
scsi_release_buffers(cmd); if (q->mq_ops) {
scsi_requeue_command(q, cmd); cmd->request->cmd_flags &= ~REQ_DONTPREP;
scsi_mq_uninit_cmd(cmd);
scsi_mq_requeue_cmd(cmd);
} else {
scsi_release_buffers(cmd);
scsi_requeue_command(q, cmd);
}
break; break;
case ACTION_RETRY: case ACTION_RETRY:
/* Retry the same command immediately */ /* Retry the same command immediately */
...@@ -976,9 +1078,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, ...@@ -976,9 +1078,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
* If sg table allocation fails, requeue request later. * If sg table allocation fails, requeue request later.
*/ */
if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
gfp_mask))) { gfp_mask, req->mq_ctx != NULL)))
return BLKPREP_DEFER; return BLKPREP_DEFER;
}
/* /*
* Next, walk the list, and fill in the addresses and sizes of * Next, walk the list, and fill in the addresses and sizes of
...@@ -1006,6 +1107,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) ...@@ -1006,6 +1107,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{ {
struct scsi_device *sdev = cmd->device; struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request; struct request *rq = cmd->request;
bool is_mq = (rq->mq_ctx != NULL);
int error; int error;
BUG_ON(!rq->nr_phys_segments); BUG_ON(!rq->nr_phys_segments);
...@@ -1015,15 +1117,19 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) ...@@ -1015,15 +1117,19 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
goto err_exit; goto err_exit;
if (blk_bidi_rq(rq)) { if (blk_bidi_rq(rq)) {
struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( if (!rq->q->mq_ops) {
scsi_sdb_cache, GFP_ATOMIC); struct scsi_data_buffer *bidi_sdb =
if (!bidi_sdb) { kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
error = BLKPREP_DEFER; if (!bidi_sdb) {
goto err_exit; error = BLKPREP_DEFER;
goto err_exit;
}
rq->next_rq->special = bidi_sdb;
} }
rq->next_rq->special = bidi_sdb; error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special,
error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC); GFP_ATOMIC);
if (error) if (error)
goto err_exit; goto err_exit;
} }
...@@ -1035,7 +1141,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) ...@@ -1035,7 +1141,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
BUG_ON(prot_sdb == NULL); BUG_ON(prot_sdb == NULL);
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask, is_mq)) {
error = BLKPREP_DEFER; error = BLKPREP_DEFER;
goto err_exit; goto err_exit;
} }
...@@ -1049,13 +1155,16 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) ...@@ -1049,13 +1155,16 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
cmd->prot_sdb->table.nents = count; cmd->prot_sdb->table.nents = count;
} }
return BLKPREP_OK ; return BLKPREP_OK;
err_exit: err_exit:
scsi_release_buffers(cmd); if (is_mq) {
cmd->request->special = NULL; scsi_mq_free_sgtables(cmd);
scsi_put_command(cmd); } else {
put_device(&sdev->sdev_gendev); scsi_release_buffers(cmd);
cmd->request->special = NULL;
scsi_put_command(cmd);
put_device(&sdev->sdev_gendev);
}
return error; return error;
} }
EXPORT_SYMBOL(scsi_init_io); EXPORT_SYMBOL(scsi_init_io);
...@@ -1266,13 +1375,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) ...@@ -1266,13 +1375,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
static void scsi_unprep_fn(struct request_queue *q, struct request *req) static void scsi_unprep_fn(struct request_queue *q, struct request *req)
{ {
if (req->cmd_type == REQ_TYPE_FS) { scsi_uninit_cmd(req->special);
struct scsi_cmnd *cmd = req->special;
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
drv->uninit_command(cmd);
}
} }
/* /*
...@@ -1295,7 +1398,11 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, ...@@ -1295,7 +1398,11 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
* unblock after device_blocked iterates to zero * unblock after device_blocked iterates to zero
*/ */
if (atomic_dec_return(&sdev->device_blocked) > 0) { if (atomic_dec_return(&sdev->device_blocked) > 0) {
blk_delay_queue(q, SCSI_QUEUE_DELAY); /*
* For the MQ case we take care of this in the caller.
*/
if (!q->mq_ops)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
goto out_dec; goto out_dec;
} }
SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
...@@ -1671,6 +1778,180 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1671,6 +1778,180 @@ static void scsi_request_fn(struct request_queue *q)
blk_delay_queue(q, SCSI_QUEUE_DELAY); blk_delay_queue(q, SCSI_QUEUE_DELAY);
} }
static inline int prep_to_mq(int ret)
{
switch (ret) {
case BLKPREP_OK:
return 0;
case BLKPREP_DEFER:
return BLK_MQ_RQ_QUEUE_BUSY;
default:
return BLK_MQ_RQ_QUEUE_ERROR;
}
}
static int scsi_mq_prep_fn(struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
unsigned char *sense_buf = cmd->sense_buffer;
struct scatterlist *sg;
memset(cmd, 0, sizeof(struct scsi_cmnd));
req->special = cmd;
cmd->request = req;
cmd->device = sdev;
cmd->sense_buffer = sense_buf;
cmd->tag = req->tag;
req->cmd = req->__cmd;
cmd->cmnd = req->cmd;
cmd->prot_op = SCSI_PROT_NORMAL;
INIT_LIST_HEAD(&cmd->list);
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies;
/*
* XXX: cmd_list lookups are only used by two drivers, try to get
* rid of this list in common code.
*/
spin_lock_irq(&sdev->list_lock);
list_add_tail(&cmd->list, &sdev->cmd_list);
spin_unlock_irq(&sdev->list_lock);
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
cmd->sdb.table.sgl = sg;
if (scsi_host_get_prot(shost)) {
cmd->prot_sdb = (void *)sg +
shost->sg_tablesize * sizeof(struct scatterlist);
memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
cmd->prot_sdb->table.sgl =
(struct scatterlist *)(cmd->prot_sdb + 1);
}
if (blk_bidi_rq(req)) {
struct request *next_rq = req->next_rq;
struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
bidi_sdb->table.sgl =
(struct scatterlist *)(bidi_sdb + 1);
next_rq->special = bidi_sdb;
}
return scsi_setup_cmnd(sdev, req);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
trace_scsi_dispatch_cmd_done(cmd);
blk_mq_complete_request(cmd->request);
}
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
{
struct request_queue *q = req->q;
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
int ret;
int reason;
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
if (ret)
goto out;
ret = BLK_MQ_RQ_QUEUE_BUSY;
if (!get_device(&sdev->sdev_gendev))
goto out;
if (!scsi_dev_queue_ready(q, sdev))
goto out_put_device;
if (!scsi_target_queue_ready(shost, sdev))
goto out_dec_device_busy;
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
if (!(req->cmd_flags & REQ_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
if (ret)
goto out_dec_host_busy;
req->cmd_flags |= REQ_DONTPREP;
}
scsi_init_cmd_errh(cmd);
cmd->scsi_done = scsi_mq_done;
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
ret = BLK_MQ_RQ_QUEUE_BUSY;
goto out_dec_host_busy;
}
return BLK_MQ_RQ_QUEUE_OK;
out_dec_host_busy:
atomic_dec(&shost->host_busy);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
out_dec_device_busy:
atomic_dec(&sdev->device_busy);
out_put_device:
put_device(&sdev->sdev_gendev);
out:
switch (ret) {
case BLK_MQ_RQ_QUEUE_BUSY:
blk_mq_stop_hw_queue(hctx);
if (atomic_read(&sdev->device_busy) == 0 &&
!scsi_device_blocked(sdev))
blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
break;
case BLK_MQ_RQ_QUEUE_ERROR:
/*
* Make sure to release all allocated ressources when
* we hit an error, as we will never see this command
* again.
*/
if (req->cmd_flags & REQ_DONTPREP)
scsi_mq_uninit_cmd(cmd);
break;
default:
break;
}
return ret;
}
static int scsi_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
return 0;
}
static void scsi_exit_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
kfree(cmd->sense_buffer);
}
static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{ {
struct device *host_dev; struct device *host_dev;
...@@ -1692,16 +1973,10 @@ static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) ...@@ -1692,16 +1973,10 @@ static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
return bounce_limit; return bounce_limit;
} }
struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
request_fn_proc *request_fn)
{ {
struct request_queue *q;
struct device *dev = shost->dma_dev; struct device *dev = shost->dma_dev;
q = blk_init_queue(request_fn, NULL);
if (!q)
return NULL;
/* /*
* this limit is imposed by hardware restrictions * this limit is imposed by hardware restrictions
*/ */
...@@ -1732,7 +2007,17 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, ...@@ -1732,7 +2007,17 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
* blk_queue_update_dma_alignment() later. * blk_queue_update_dma_alignment() later.
*/ */
blk_queue_dma_alignment(q, 0x03); blk_queue_dma_alignment(q, 0x03);
}
struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
request_fn_proc *request_fn)
{
struct request_queue *q;
q = blk_init_queue(request_fn, NULL);
if (!q)
return NULL;
__scsi_init_queue(shost, q);
return q; return q;
} }
EXPORT_SYMBOL(__scsi_alloc_queue); EXPORT_SYMBOL(__scsi_alloc_queue);
...@@ -1753,6 +2038,55 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) ...@@ -1753,6 +2038,55 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
return q; return q;
} }
static struct blk_mq_ops scsi_mq_ops = {
.map_queue = blk_mq_map_queue,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_times_out,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
{
sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
if (IS_ERR(sdev->request_queue))
return NULL;
sdev->request_queue->queuedata = sdev;
__scsi_init_queue(sdev->host, sdev->request_queue);
return sdev->request_queue;
}
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size, tbl_size;
tbl_size = shost->sg_tablesize;
if (tbl_size > SCSI_MAX_SG_SEGMENTS)
tbl_size = SCSI_MAX_SG_SEGMENTS;
sgl_size = tbl_size * sizeof(struct scatterlist);
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
memset(&shost->tag_set, 0, sizeof(shost->tag_set));
shost->tag_set.ops = &scsi_mq_ops;
shost->tag_set.nr_hw_queues = 1;
shost->tag_set.queue_depth = shost->can_queue;
shost->tag_set.cmd_size = cmd_size;
shost->tag_set.numa_node = NUMA_NO_NODE;
shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
shost->tag_set.driver_data = shost;
return blk_mq_alloc_tag_set(&shost->tag_set);
}
void scsi_mq_destroy_tags(struct Scsi_Host *shost)
{
blk_mq_free_tag_set(&shost->tag_set);
}
/* /*
* Function: scsi_block_requests() * Function: scsi_block_requests()
* *
...@@ -2498,9 +2832,13 @@ scsi_internal_device_block(struct scsi_device *sdev) ...@@ -2498,9 +2832,13 @@ scsi_internal_device_block(struct scsi_device *sdev)
* block layer from calling the midlayer with this device's * block layer from calling the midlayer with this device's
* request queue. * request queue.
*/ */
spin_lock_irqsave(q->queue_lock, flags); if (q->mq_ops) {
blk_stop_queue(q); blk_mq_stop_hw_queues(q);
spin_unlock_irqrestore(q->queue_lock, flags); } else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
return 0; return 0;
} }
...@@ -2546,9 +2884,13 @@ scsi_internal_device_unblock(struct scsi_device *sdev, ...@@ -2546,9 +2884,13 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
sdev->sdev_state != SDEV_OFFLINE) sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(q->queue_lock, flags); if (q->mq_ops) {
blk_start_queue(q); blk_mq_start_stopped_hw_queues(q, false);
spin_unlock_irqrestore(q->queue_lock, flags); } else {
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
return 0; return 0;
} }
......
...@@ -88,6 +88,9 @@ extern void scsi_next_command(struct scsi_cmnd *cmd); ...@@ -88,6 +88,9 @@ extern void scsi_next_command(struct scsi_cmnd *cmd);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost); extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern int scsi_init_queue(void); extern int scsi_init_queue(void);
extern void scsi_exit_queue(void); extern void scsi_exit_queue(void);
struct request_queue; struct request_queue;
......
...@@ -273,7 +273,10 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, ...@@ -273,7 +273,10 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/ */
sdev->borken = 1; sdev->borken = 1;
sdev->request_queue = scsi_alloc_queue(sdev); if (shost_use_blk_mq(shost))
sdev->request_queue = scsi_mq_alloc_queue(sdev);
else
sdev->request_queue = scsi_alloc_queue(sdev);
if (!sdev->request_queue) { if (!sdev->request_queue) {
/* release fn is set up in scsi_sysfs_device_initialise, so /* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */ * have to free and put manually here */
......
...@@ -333,6 +333,7 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, ...@@ -333,6 +333,7 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
shost_rd_attr(use_blk_mq, "%d\n");
shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(cmd_per_lun, "%hd\n"); shost_rd_attr(cmd_per_lun, "%hd\n");
shost_rd_attr(can_queue, "%hd\n"); shost_rd_attr(can_queue, "%hd\n");
...@@ -352,6 +353,7 @@ show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) ...@@ -352,6 +353,7 @@ show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
static struct attribute *scsi_sysfs_shost_attrs[] = { static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_use_blk_mq.attr,
&dev_attr_unique_id.attr, &dev_attr_unique_id.attr,
&dev_attr_host_busy.attr, &dev_attr_host_busy.attr,
&dev_attr_cmd_per_lun.attr, &dev_attr_cmd_per_lun.attr,
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/blk-mq.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
struct request_queue; struct request_queue;
...@@ -510,6 +511,9 @@ struct scsi_host_template { ...@@ -510,6 +511,9 @@ struct scsi_host_template {
*/ */
unsigned int cmd_size; unsigned int cmd_size;
struct scsi_host_cmd_pool *cmd_pool; struct scsi_host_cmd_pool *cmd_pool;
/* temporary flag to disable blk-mq I/O path */
bool disable_blk_mq;
}; };
/* /*
...@@ -580,7 +584,10 @@ struct Scsi_Host { ...@@ -580,7 +584,10 @@ struct Scsi_Host {
* Area to keep a shared tag map (if needed, will be * Area to keep a shared tag map (if needed, will be
* NULL if not). * NULL if not).
*/ */
struct blk_queue_tag *bqt; union {
struct blk_queue_tag *bqt;
struct blk_mq_tag_set tag_set;
};
atomic_t host_busy; /* commands actually active on low-level */ atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked; atomic_t host_blocked;
...@@ -672,6 +679,8 @@ struct Scsi_Host { ...@@ -672,6 +679,8 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */ /* The controller does not support WRITE SAME */
unsigned no_write_same:1; unsigned no_write_same:1;
unsigned use_blk_mq:1;
/* /*
* Optional work queue to be utilized by the transport * Optional work queue to be utilized by the transport
*/ */
...@@ -772,6 +781,13 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost) ...@@ -772,6 +781,13 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
shost->tmf_in_progress; shost->tmf_in_progress;
} }
extern bool scsi_use_blk_mq;
static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
{
return shost->use_blk_mq;
}
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
extern void scsi_flush_work(struct Scsi_Host *); extern void scsi_flush_work(struct Scsi_Host *);
......
...@@ -67,7 +67,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) ...@@ -67,7 +67,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
if (!sdev->tagged_supported) if (!sdev->tagged_supported)
return; return;
if (!blk_queue_tagged(sdev->request_queue)) if (!shost_use_blk_mq(sdev->host) &&
blk_queue_tagged(sdev->request_queue))
blk_queue_init_tags(sdev->request_queue, depth, blk_queue_init_tags(sdev->request_queue, depth,
sdev->host->bqt); sdev->host->bqt);
...@@ -80,7 +81,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) ...@@ -80,7 +81,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
**/ **/
static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
{ {
if (blk_queue_tagged(sdev->request_queue)) if (!shost_use_blk_mq(sdev->host) &&
blk_queue_tagged(sdev->request_queue))
blk_queue_free_tags(sdev->request_queue); blk_queue_free_tags(sdev->request_queue);
scsi_adjust_queue_depth(sdev, 0, depth); scsi_adjust_queue_depth(sdev, 0, depth);
} }
...@@ -108,6 +110,15 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) ...@@ -108,6 +110,15 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
return 0; return 0;
} }
static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost,
unsigned int hw_ctx, int tag)
{
struct request *req;
req = blk_mq_tag_to_rq(shost->tag_set.tags[hw_ctx], tag);
return req ? (struct scsi_cmnd *)req->special : NULL;
}
/** /**
* scsi_find_tag - find a tagged command by device * scsi_find_tag - find a tagged command by device
* @SDpnt: pointer to the ScSI device * @SDpnt: pointer to the ScSI device
...@@ -118,10 +129,12 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) ...@@ -118,10 +129,12 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
**/ **/
static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag)
{ {
struct request *req; struct request *req;
if (tag != SCSI_NO_TAG) { if (tag != SCSI_NO_TAG) {
if (shost_use_blk_mq(sdev->host))
return scsi_mq_find_tag(sdev->host, 0, tag);
req = blk_queue_find_tag(sdev->request_queue, tag); req = blk_queue_find_tag(sdev->request_queue, tag);
return req ? (struct scsi_cmnd *)req->special : NULL; return req ? (struct scsi_cmnd *)req->special : NULL;
} }
...@@ -130,6 +143,7 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) ...@@ -130,6 +143,7 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag)
return sdev->current_cmnd; return sdev->current_cmnd;
} }
/** /**
* scsi_init_shared_tag_map - create a shared tag map * scsi_init_shared_tag_map - create a shared tag map
* @shost: the host to share the tag map among all devices * @shost: the host to share the tag map among all devices
...@@ -137,6 +151,12 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) ...@@ -137,6 +151,12 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag)
*/ */
static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth) static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth)
{ {
/*
* We always have a shared tag map around when using blk-mq.
*/
if (shost_use_blk_mq(shost))
return 0;
/* /*
* If the shared tag map isn't already initialized, do it now. * If the shared tag map isn't already initialized, do it now.
* This saves callers from having to check ->bqt when setting up * This saves callers from having to check ->bqt when setting up
...@@ -165,6 +185,8 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost, ...@@ -165,6 +185,8 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
struct request *req; struct request *req;
if (tag != SCSI_NO_TAG) { if (tag != SCSI_NO_TAG) {
if (shost_use_blk_mq(shost))
return scsi_mq_find_tag(shost, 0, tag);
req = blk_map_queue_find_tag(shost->bqt, tag); req = blk_map_queue_find_tag(shost->bqt, tag);
return req ? (struct scsi_cmnd *)req->special : NULL; return req ? (struct scsi_cmnd *)req->special : NULL;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册