提交 9a689bc4 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
  [SCSI] stex: switch to block timeout
  [SCSI] make scsi_eh_try_stu use block timeout
  [SCSI] megaraid_sas: switch to block timeout
  [SCSI] ibmvscsi: switch to block timeout
  [SCSI] aacraid: switch to block timeout
  [SCSI] zfcp: prevent double decrement on host_busy while being busy
  [SCSI] zfcp: fix deadlock between wq triggered port scan and ERP
  [SCSI] zfcp: eliminate race between validation and locking
  [SCSI] zfcp: verify for correct rport state before scanning for SCSI devs
  [SCSI] zfcp: returning an ERR_PTR where a NULL value is expected
  [SCSI] zfcp: Fix opening of wka ports
  [SCSI] zfcp: fix remote port status check
  [SCSI] fc_transport: fix old bug on bitflag definitions
  [SCSI] Fix hang in starved list processing
...@@ -720,7 +720,6 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act, ...@@ -720,7 +720,6 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
goto failed_openfcp; goto failed_openfcp;
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status); atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);
schedule_work(&act->adapter->scan_work);
return ZFCP_ERP_SUCCEEDED; return ZFCP_ERP_SUCCEEDED;
...@@ -1186,7 +1185,9 @@ static void zfcp_erp_scsi_scan(struct work_struct *work) ...@@ -1186,7 +1185,9 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
container_of(work, struct zfcp_erp_add_work, work); container_of(work, struct zfcp_erp_add_work, work);
struct zfcp_unit *unit = p->unit; struct zfcp_unit *unit = p->unit;
struct fc_rport *rport = unit->port->rport; struct fc_rport *rport = unit->port->rport;
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0); scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
zfcp_unit_put(unit); zfcp_unit_put(unit);
...@@ -1282,6 +1283,8 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) ...@@ -1282,6 +1283,8 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
case ZFCP_ERP_ACTION_REOPEN_ADAPTER: case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (result != ZFCP_ERP_SUCCEEDED) if (result != ZFCP_ERP_SUCCEEDED)
zfcp_erp_rports_del(adapter); zfcp_erp_rports_del(adapter);
else
schedule_work(&adapter->scan_work);
zfcp_adapter_put(adapter); zfcp_adapter_put(adapter);
break; break;
} }
......
...@@ -50,7 +50,8 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) ...@@ -50,7 +50,8 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
if (mutex_lock_interruptible(&wka_port->mutex)) if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS; return -ERESTARTSYS;
if (wka_port->status != ZFCP_WKA_PORT_ONLINE) { if (wka_port->status == ZFCP_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_WKA_PORT_OPENING; wka_port->status = ZFCP_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port)) if (zfcp_fsf_open_wka_port(wka_port))
wka_port->status = ZFCP_WKA_PORT_OFFLINE; wka_port->status = ZFCP_WKA_PORT_OFFLINE;
...@@ -125,8 +126,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, ...@@ -125,8 +126,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
read_lock_irqsave(&zfcp_data.config_lock, flags); read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
/* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */ if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_PHYS_OPEN))
if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID))
/* Try to connect to unused ports anyway. */ /* Try to connect to unused ports anyway. */
zfcp_erp_port_reopen(port, zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_STATUS_COMMON_ERP_FAILED,
...@@ -610,7 +610,6 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) ...@@ -610,7 +610,6 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
int ret, i; int ret, i;
struct zfcp_gpn_ft *gpn_ft; struct zfcp_gpn_ft *gpn_ft;
zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
return 0; return 0;
......
...@@ -930,8 +930,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, ...@@ -930,8 +930,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
goto out; goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
req_flags, adapter->pool.fsf_req_abort); req_flags, adapter->pool.fsf_req_abort);
if (IS_ERR(req)) if (IS_ERR(req)) {
req = NULL;
goto out; goto out;
}
if (unlikely(!(atomic_read(&unit->status) & if (unlikely(!(atomic_read(&unit->status) &
ZFCP_STATUS_COMMON_UNBLOCKED))) ZFCP_STATUS_COMMON_UNBLOCKED)))
...@@ -1584,6 +1586,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) ...@@ -1584,6 +1586,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
wka_port->status = ZFCP_WKA_PORT_OFFLINE; wka_port->status = ZFCP_WKA_PORT_OFFLINE;
break; break;
case FSF_PORT_ALREADY_OPEN: case FSF_PORT_ALREADY_OPEN:
break;
case FSF_GOOD: case FSF_GOOD:
wka_port->handle = header->port_handle; wka_port->handle = header->port_handle;
wka_port->status = ZFCP_WKA_PORT_ONLINE; wka_port->status = ZFCP_WKA_PORT_ONLINE;
...@@ -2113,18 +2116,21 @@ static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) ...@@ -2113,18 +2116,21 @@ static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
{ {
struct scsi_cmnd *scpnt = req->data; struct scsi_cmnd *scpnt;
struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
&(req->qtcb->bottom.io.fcp_rsp); &(req->qtcb->bottom.io.fcp_rsp);
u32 sns_len; u32 sns_len;
char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
unsigned long flags; unsigned long flags;
if (unlikely(!scpnt))
return;
read_lock_irqsave(&req->adapter->abort_lock, flags); read_lock_irqsave(&req->adapter->abort_lock, flags);
scpnt = req->data;
if (unlikely(!scpnt)) {
read_unlock_irqrestore(&req->adapter->abort_lock, flags);
return;
}
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
set_host_byte(scpnt, DID_SOFT_ERROR); set_host_byte(scpnt, DID_SOFT_ERROR);
set_driver_byte(scpnt, SUGGEST_RETRY); set_driver_byte(scpnt, SUGGEST_RETRY);
...@@ -2442,8 +2448,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter, ...@@ -2442,8 +2448,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
goto out; goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
adapter->pool.fsf_req_scsi); adapter->pool.fsf_req_scsi);
if (IS_ERR(req)) if (IS_ERR(req)) {
req = NULL;
goto out; goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
req->data = unit; req->data = unit;
......
...@@ -88,7 +88,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, ...@@ -88,7 +88,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0, ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0,
ZFCP_REQ_AUTO_CLEANUP); ZFCP_REQ_AUTO_CLEANUP);
if (unlikely(ret == -EBUSY)) if (unlikely(ret == -EBUSY))
zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); return SCSI_MLQUEUE_DEVICE_BUSY;
else if (unlikely(ret < 0)) else if (unlikely(ret < 0))
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
......
...@@ -427,8 +427,8 @@ static int aac_slave_configure(struct scsi_device *sdev) ...@@ -427,8 +427,8 @@ static int aac_slave_configure(struct scsi_device *sdev)
* Firmware has an individual device recovery time typically * Firmware has an individual device recovery time typically
* of 35 seconds, give us a margin. * of 35 seconds, give us a margin.
*/ */
if (sdev->timeout < (45 * HZ)) if (sdev->request_queue->rq_timeout < (45 * HZ))
sdev->timeout = 45 * HZ; blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
for (cid = 0; cid < aac->maximum_num_containers; ++cid) for (cid = 0; cid < aac->maximum_num_containers; ++cid)
if (aac->fsa_dev[cid].valid) if (aac->fsa_dev[cid].valid)
++num_lsu; ++num_lsu;
......
...@@ -1442,7 +1442,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) ...@@ -1442,7 +1442,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, lock_flags); spin_lock_irqsave(shost->host_lock, lock_flags);
if (sdev->type == TYPE_DISK) { if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1; sdev->allow_restart = 1;
sdev->timeout = 60 * HZ; blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
} }
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
spin_unlock_irqrestore(shost->host_lock, lock_flags); spin_unlock_irqrestore(shost->host_lock, lock_flags);
......
...@@ -1016,7 +1016,8 @@ static int megasas_slave_configure(struct scsi_device *sdev) ...@@ -1016,7 +1016,8 @@ static int megasas_slave_configure(struct scsi_device *sdev)
* The RAID firmware may require extended timeouts. * The RAID firmware may require extended timeouts.
*/ */
if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS)
sdev->timeout = MEGASAS_DEFAULT_CMD_TIMEOUT * HZ; blk_queue_rq_timeout(sdev->request_queue,
MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
return 0; return 0;
} }
......
...@@ -932,8 +932,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd) ...@@ -932,8 +932,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
int i, rtn = NEEDS_RETRY; int i, rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
scmd->device->timeout, 0);
if (rtn == SUCCESS) if (rtn == SUCCESS)
return 0; return 0;
......
...@@ -567,15 +567,18 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost) ...@@ -567,15 +567,18 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)
*/ */
static void scsi_run_queue(struct request_queue *q) static void scsi_run_queue(struct request_queue *q)
{ {
struct scsi_device *starved_head = NULL, *sdev = q->queuedata; struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host; struct Scsi_Host *shost = sdev->host;
LIST_HEAD(starved_list);
unsigned long flags; unsigned long flags;
if (scsi_target(sdev)->single_lun) if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev); scsi_single_lun_run(sdev);
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
while (!list_empty(&shost->starved_list) && !scsi_host_is_busy(shost)) { list_splice_init(&shost->starved_list, &starved_list);
while (!list_empty(&starved_list)) {
int flagset; int flagset;
/* /*
...@@ -588,24 +591,18 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -588,24 +591,18 @@ static void scsi_run_queue(struct request_queue *q)
* scsi_request_fn must get the host_lock before checking * scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry. * or modifying starved_list or starved_entry.
*/ */
sdev = list_entry(shost->starved_list.next, if (scsi_host_is_busy(shost))
struct scsi_device, starved_entry);
/*
* The *queue_ready functions can add a device back onto the
* starved list's tail, so we must check for a infinite loop.
*/
if (sdev == starved_head)
break; break;
if (!starved_head)
starved_head = sdev;
sdev = list_entry(starved_list.next,
struct scsi_device, starved_entry);
list_del_init(&sdev->starved_entry);
if (scsi_target_is_busy(scsi_target(sdev))) { if (scsi_target_is_busy(scsi_target(sdev))) {
list_move_tail(&sdev->starved_entry, list_move_tail(&sdev->starved_entry,
&shost->starved_list); &shost->starved_list);
continue; continue;
} }
list_del_init(&sdev->starved_entry);
spin_unlock(shost->host_lock); spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock); spin_lock(sdev->request_queue->queue_lock);
...@@ -621,6 +618,8 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -621,6 +618,8 @@ static void scsi_run_queue(struct request_queue *q)
spin_lock(shost->host_lock); spin_lock(shost->host_lock);
} }
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
blk_run_queue(q); blk_run_queue(q);
......
...@@ -477,7 +477,7 @@ stex_slave_config(struct scsi_device *sdev) ...@@ -477,7 +477,7 @@ stex_slave_config(struct scsi_device *sdev)
{ {
sdev->use_10_for_rw = 1; sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1; sdev->use_10_for_ms = 1;
sdev->timeout = 60 * HZ; blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
sdev->tagged_supported = 1; sdev->tagged_supported = 1;
return 0; return 0;
......
...@@ -357,7 +357,7 @@ struct fc_rport { /* aka fc_starget_attrs */ ...@@ -357,7 +357,7 @@ struct fc_rport { /* aka fc_starget_attrs */
/* bit field values for struct fc_rport "flags" field: */ /* bit field values for struct fc_rport "flags" field: */
#define FC_RPORT_DEVLOSS_PENDING 0x01 #define FC_RPORT_DEVLOSS_PENDING 0x01
#define FC_RPORT_SCAN_PENDING 0x02 #define FC_RPORT_SCAN_PENDING 0x02
#define FC_RPORT_FAST_FAIL_TIMEDOUT 0x03 #define FC_RPORT_FAST_FAIL_TIMEDOUT 0x04
#define dev_to_rport(d) \ #define dev_to_rport(d) \
container_of(d, struct fc_rport, dev) container_of(d, struct fc_rport, dev)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册