提交 dec943f5 编写于 作者: L Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "This is a set of six fixes and a MAINTAINER update.

  The fixes are two multipath (one in Test Unit Ready handling for the
  path checkers and one in the section of code that sends a start unit
  after failover; both of these were perturbed by the scsi-mq update), a
  CD-ROM door locking fix that was likewise introduced by scsi-mq and
  three driver fixes for a previous code update in cxgb4i, megaraid_sas
  and bnx2fc"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  bnx2fc: fix tgt spinlock locking
  megaraid_sas: fix bug in handling return value of pci_enable_msix_range()
  cxgb4i: send abort_rpl correctly
  cxgbi: add maintainer for cxgb3i/cxgb4i
  scsi: TUR path is down after adapter gets reset with multipath
  scsi: call device handler for failed TUR command
  scsi: only re-lock door after EH on devices that were reset
...@@ -2744,6 +2744,13 @@ W: http://www.chelsio.com ...@@ -2744,6 +2744,13 @@ W: http://www.chelsio.com
S: Supported S: Supported
F: drivers/net/ethernet/chelsio/cxgb3/ F: drivers/net/ethernet/chelsio/cxgb3/
CXGB3 ISCSI DRIVER (CXGB3I)
M: Karen Xie <kxie@chelsio.com>
L: linux-scsi@vger.kernel.org
W: http://www.chelsio.com
S: Supported
F: drivers/scsi/cxgbi/cxgb3i
CXGB3 IWARP RNIC DRIVER (IW_CXGB3) CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
M: Steve Wise <swise@chelsio.com> M: Steve Wise <swise@chelsio.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
...@@ -2758,6 +2765,13 @@ W: http://www.chelsio.com ...@@ -2758,6 +2765,13 @@ W: http://www.chelsio.com
S: Supported S: Supported
F: drivers/net/ethernet/chelsio/cxgb4/ F: drivers/net/ethernet/chelsio/cxgb4/
CXGB4 ISCSI DRIVER (CXGB4I)
M: Karen Xie <kxie@chelsio.com>
L: linux-scsi@vger.kernel.org
W: http://www.chelsio.com
S: Supported
F: drivers/scsi/cxgbi/cxgb4i
CXGB4 IWARP RNIC DRIVER (IW_CXGB4) CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
M: Steve Wise <swise@chelsio.com> M: Steve Wise <swise@chelsio.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
......
...@@ -480,9 +480,7 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) ...@@ -480,9 +480,7 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
bnx2fc_initiate_cleanup(orig_io_req); bnx2fc_initiate_cleanup(orig_io_req);
/* Post a new IO req with the same sc_cmd */ /* Post a new IO req with the same sc_cmd */
BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
spin_unlock_bh(&tgt->tgt_lock);
rc = bnx2fc_post_io_req(tgt, new_io_req); rc = bnx2fc_post_io_req(tgt, new_io_req);
spin_lock_bh(&tgt->tgt_lock);
if (!rc) if (!rc)
goto free_frame; goto free_frame;
BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
......
...@@ -1894,18 +1894,24 @@ int bnx2fc_queuecommand(struct Scsi_Host *host, ...@@ -1894,18 +1894,24 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
goto exit_qcmd; goto exit_qcmd;
} }
} }
spin_lock_bh(&tgt->tgt_lock);
io_req = bnx2fc_cmd_alloc(tgt); io_req = bnx2fc_cmd_alloc(tgt);
if (!io_req) { if (!io_req) {
rc = SCSI_MLQUEUE_HOST_BUSY; rc = SCSI_MLQUEUE_HOST_BUSY;
goto exit_qcmd; goto exit_qcmd_tgtlock;
} }
io_req->sc_cmd = sc_cmd; io_req->sc_cmd = sc_cmd;
if (bnx2fc_post_io_req(tgt, io_req)) { if (bnx2fc_post_io_req(tgt, io_req)) {
printk(KERN_ERR PFX "Unable to post io_req\n"); printk(KERN_ERR PFX "Unable to post io_req\n");
rc = SCSI_MLQUEUE_HOST_BUSY; rc = SCSI_MLQUEUE_HOST_BUSY;
goto exit_qcmd; goto exit_qcmd_tgtlock;
} }
exit_qcmd_tgtlock:
spin_unlock_bh(&tgt->tgt_lock);
exit_qcmd: exit_qcmd:
return rc; return rc;
} }
...@@ -2020,6 +2026,8 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, ...@@ -2020,6 +2026,8 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
int task_idx, index; int task_idx, index;
u16 xid; u16 xid;
/* bnx2fc_post_io_req() is called with the tgt_lock held */
/* Initialize rest of io_req fields */ /* Initialize rest of io_req fields */
io_req->cmd_type = BNX2FC_SCSI_CMD; io_req->cmd_type = BNX2FC_SCSI_CMD;
io_req->port = port; io_req->port = port;
...@@ -2047,9 +2055,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, ...@@ -2047,9 +2055,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
/* Build buffer descriptor list for firmware from sg list */ /* Build buffer descriptor list for firmware from sg list */
if (bnx2fc_build_bd_list_from_sg(io_req)) { if (bnx2fc_build_bd_list_from_sg(io_req)) {
printk(KERN_ERR PFX "BD list creation failed\n"); printk(KERN_ERR PFX "BD list creation failed\n");
spin_lock_bh(&tgt->tgt_lock);
kref_put(&io_req->refcount, bnx2fc_cmd_release); kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
return -EAGAIN; return -EAGAIN;
} }
...@@ -2061,19 +2067,15 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, ...@@ -2061,19 +2067,15 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
task = &(task_page[index]); task = &(task_page[index]);
bnx2fc_init_task(io_req, task); bnx2fc_init_task(io_req, task);
spin_lock_bh(&tgt->tgt_lock);
if (tgt->flush_in_prog) { if (tgt->flush_in_prog) {
printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
kref_put(&io_req->refcount, bnx2fc_cmd_release); kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
return -EAGAIN; return -EAGAIN;
} }
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
printk(KERN_ERR PFX "Session not ready...post_io\n"); printk(KERN_ERR PFX "Session not ready...post_io\n");
kref_put(&io_req->refcount, bnx2fc_cmd_release); kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
return -EAGAIN; return -EAGAIN;
} }
...@@ -2091,6 +2093,5 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, ...@@ -2091,6 +2093,5 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
/* Ring doorbell */ /* Ring doorbell */
bnx2fc_ring_doorbell(tgt); bnx2fc_ring_doorbell(tgt);
spin_unlock_bh(&tgt->tgt_lock);
return 0; return 0;
} }
...@@ -936,20 +936,23 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -936,20 +936,23 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
cxgbi_sock_get(csk); cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock); spin_lock_bh(&csk->lock);
if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
cxgbi_sock_set_state(csk, CTP_ABORTING); if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
goto done; send_tx_flowc_wr(csk);
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
} }
cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
cxgbi_sock_set_state(csk, CTP_ABORTING);
send_abort_rpl(csk, rst_status); send_abort_rpl(csk, rst_status);
if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
csk->err = abort_status_to_errno(csk, req->status, &rst_status); csk->err = abort_status_to_errno(csk, req->status, &rst_status);
cxgbi_sock_closed(csk); cxgbi_sock_closed(csk);
} }
done:
spin_unlock_bh(&csk->lock); spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk); cxgbi_sock_put(csk);
rel_skb: rel_skb:
......
...@@ -905,18 +905,16 @@ void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) ...@@ -905,18 +905,16 @@ void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
{ {
cxgbi_sock_get(csk); cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock); spin_lock_bh(&csk->lock);
cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
else { pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); cxgbi_sock_closed(csk);
if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_closed(csk);
}
} }
spin_unlock_bh(&csk->lock); spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk); cxgbi_sock_put(csk);
} }
......
...@@ -474,6 +474,13 @@ static int alua_check_sense(struct scsi_device *sdev, ...@@ -474,6 +474,13 @@ static int alua_check_sense(struct scsi_device *sdev,
* LUN Not Ready -- Offline * LUN Not Ready -- Offline
*/ */
return SUCCESS; return SUCCESS;
if (sdev->allow_restart &&
sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x02)
/*
* if the device is not started, we need to wake
* the error handler to start the motor
*/
return FAILED;
break; break;
case UNIT_ATTENTION: case UNIT_ATTENTION:
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
......
...@@ -4453,7 +4453,7 @@ static int megasas_init_fw(struct megasas_instance *instance) ...@@ -4453,7 +4453,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msixentry[i].entry = i; instance->msixentry[i].entry = i;
i = pci_enable_msix_range(instance->pdev, instance->msixentry, i = pci_enable_msix_range(instance->pdev, instance->msixentry,
1, instance->msix_vectors); 1, instance->msix_vectors);
if (i) if (i > 0)
instance->msix_vectors = i; instance->msix_vectors = i;
else else
instance->msix_vectors = 0; instance->msix_vectors = 0;
......
...@@ -459,14 +459,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) ...@@ -459,14 +459,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (! scsi_command_normalize_sense(scmd, &sshdr)) if (! scsi_command_normalize_sense(scmd, &sshdr))
return FAILED; /* no valid sense data */ return FAILED; /* no valid sense data */
if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
/*
* nasty: for mid-layer issued TURs, we need to return the
* actual sense data without any recovery attempt. For eh
* issued ones, we need to try to recover and interpret
*/
return SUCCESS;
scsi_report_sense(sdev, &sshdr); scsi_report_sense(sdev, &sshdr);
if (scsi_sense_is_deferred(&sshdr)) if (scsi_sense_is_deferred(&sshdr))
...@@ -482,6 +474,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) ...@@ -482,6 +474,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
/* handler does not care. Drop down to default handling */ /* handler does not care. Drop down to default handling */
} }
if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
/*
* nasty: for mid-layer issued TURs, we need to return the
* actual sense data without any recovery attempt. For eh
* issued ones, we need to try to recover and interpret
*/
return SUCCESS;
/* /*
* Previous logic looked for FILEMARK, EOM or ILI which are * Previous logic looked for FILEMARK, EOM or ILI which are
* mainly associated with tapes and returned SUCCESS. * mainly associated with tapes and returned SUCCESS.
...@@ -2001,8 +2001,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost) ...@@ -2001,8 +2001,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* is no point trying to lock the door of an off-line device. * is no point trying to lock the door of an off-line device.
*/ */
shost_for_each_device(sdev, shost) { shost_for_each_device(sdev, shost) {
if (scsi_device_online(sdev) && sdev->locked) if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
scsi_eh_lock_door(sdev); scsi_eh_lock_door(sdev);
sdev->was_reset = 0;
}
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册