提交 00bfef2c 编写于 作者: B Brian King 提交者: James Bottomley

[SCSI] ipr: Reduce queuecommand lock time

Reduce the amount of time the host lock is held in queuecommand
for improved performance.

[jejb: fix up checkpatch noise]
Signed-off-by: NBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: NJames Bottomley <JBottomley@Parallels.com>
上级 3013d918
...@@ -620,24 +620,38 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd) ...@@ -620,24 +620,38 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
} }
/** /**
* ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
* @ioa_cfg: ioa config struct * @ioa_cfg: ioa config struct
* *
* Return value: * Return value:
* pointer to ipr command struct * pointer to ipr command struct
**/ **/
static static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{ {
struct ipr_cmnd *ipr_cmd; struct ipr_cmnd *ipr_cmd;
ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
list_del(&ipr_cmd->queue); list_del(&ipr_cmd->queue);
ipr_init_ipr_cmnd(ipr_cmd);
return ipr_cmd; return ipr_cmd;
} }
/**
* ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
* @ioa_cfg: ioa config struct
*
* Return value:
* pointer to ipr command struct
**/
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
ipr_init_ipr_cmnd(ipr_cmd);
return ipr_cmd;
}
/** /**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct * @ioa_cfg: ioa config struct
...@@ -5783,8 +5797,8 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) ...@@ -5783,8 +5797,8 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
/** /**
* ipr_queuecommand - Queue a mid-layer request * ipr_queuecommand - Queue a mid-layer request
* @shost: scsi host struct
* @scsi_cmd: scsi command struct * @scsi_cmd: scsi command struct
* @done: done function
* *
* This function queues a request generated by the mid-layer. * This function queues a request generated by the mid-layer.
* *
...@@ -5793,61 +5807,58 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) ...@@ -5793,61 +5807,58 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy * SCSI_MLQUEUE_HOST_BUSY if host is busy
**/ **/
static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, static int ipr_queuecommand(struct Scsi_Host *shost,
void (*done) (struct scsi_cmnd *)) struct scsi_cmnd *scsi_cmd)
{ {
struct ipr_ioa_cfg *ioa_cfg; struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res; struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb; struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd; struct ipr_cmnd *ipr_cmd;
unsigned long lock_flags;
int rc = 0; int rc = 0;
scsi_cmd->scsi_done = done; ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
res = scsi_cmd->device->hostdata; spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16); scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
/* /*
* We are currently blocking all devices due to a host reset * We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but * We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME * ERP ops don't count. FIXME
*/ */
if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
}
/* /*
* FIXME - Create scsi_set_host_offline interface * FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed * and the ioa_is_dead check can be removed
*/ */
if (unlikely(ioa_cfg->ioa_is_dead || !res)) { if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); spin_unlock_irqrestore(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_NO_CONNECT << 16); goto err_nodev;
scsi_cmd->scsi_done(scsi_cmd);
return 0;
} }
if (ipr_is_gata(res) && res->sata_port) if (ipr_is_gata(res) && res->sata_port)
return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
ipr_init_ipr_cmnd(ipr_cmd);
ioarcb = &ipr_cmd->ioarcb; ioarcb = &ipr_cmd->ioarcb;
list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
ipr_cmd->scsi_cmd = scsi_cmd; ipr_cmd->scsi_cmd = scsi_cmd;
ioarcb->res_handle = res->res_handle;
ipr_cmd->done = ipr_scsi_done; ipr_cmd->done = ipr_scsi_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
if (scsi_cmd->underflow == 0) if (scsi_cmd->underflow == 0)
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
if (res->needs_sync_complete) {
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
if (ipr_is_gscsi(res)) if (ipr_is_gscsi(res))
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
...@@ -5866,16 +5877,41 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, ...@@ -5866,16 +5877,41 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
} }
if (unlikely(rc != 0)) { spin_lock_irqsave(shost->host_lock, lock_flags);
list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
if (!rc)
scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
if (unlikely(ioa_cfg->ioa_is_dead)) {
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
scsi_dma_unmap(scsi_cmd);
goto err_nodev;
}
ioarcb->res_handle = res->res_handle;
if (res->needs_sync_complete) {
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd); ipr_send_command(ipr_cmd);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0; return 0;
}
static DEF_SCSI_QCMD(ipr_queuecommand) err_nodev:
spin_lock_irqsave(shost->host_lock, lock_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
}
/** /**
* ipr_ioctl - IOCTL handler * ipr_ioctl - IOCTL handler
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册