提交 f7a014af 编写于 作者: L Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6: (25 commits)
  [SCSI] mptfc: race between mptfc_register_dev and mptfc_target_alloc
  [SCSI] lpfc 8.1.6 : Fix Data Corruption in Bus Reset Path
  [SCSI] mptspi: revalidate negotiation parameters after host reset and resume
  [SCSI] srp.h: avoid padding of structs
  [SCSI] ibmvscsi: fix leak when failing to send srp event
  [SCSI] qla2xxx: Correct eh_abort recovery logic.
  [SCSI] megaraid_{mm,mbox}: fix a bug in reset handler
  [SCSI] fusion - bug fix stack overflow in mptbase
  [SCSI] scsi: Add IBM 2104-DU3 to blist
  [SCSI] Fix DVD burning issues.
  [SCSI] SCSI: aic7xxx_osm_pci resource leak fix.
  [SCSI] - fusion - mptfc bug fix's to prevent deadlock situations
  [SCSI] mptfusion: bug fix's for raid components adding/deleting
  [SCSI] aic7xxx: ahc_pci_write_config() fix
  [SCSI] megaraid: unused variable
  [SCSI] qla2xxx: only free_irq() after request_irq() succeeds
  [SCSI] Overrun in drivers/scsi/sim710.c
  [SCSI] lpfc 8.1.5 : Change version number to 8.1.5
  [SCSI] lpfc 8.1.5 : Misc small fixes
  [SCSI] lpfc 8.1.5 : Additional fixes to LOGO, PLOGI, and RSCN processing
  ...
Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)
1. Fixed a bug in megaraid_reset_handler().
Customer reported "Unable to handle kernel NULL pointer dereference
at virtual address 00000000" when system goes to reset condition
for some reason. It happened randomly.
Root Cause: in the megaraid_reset_handler(), there is possibility not
returning pending packets in the pend_list if there are multiple
pending packets.
Fix: Made the change in the driver so that it will return all packets
in the pend_list.
2. Added change request.
As found in the following URL, rmb() only didn't help the
problem. I had to increase the loop counter to 0xFFFFFF. (6 F's)
http://marc.theaimsgroup.com/?l=linux-scsi&m=110971060502497&w=2
I attached a patch for your reference, too.
Could you check and get this fix in your driver?
Best Regards,
Jun'ichi Nomura
Release Date : Fri Nov 11 12:27:22 EST 2005 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.6 (scsi module), 2.20.2.6 (cmm module)
......
......@@ -1189,7 +1189,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->diagPending = 0;
spin_lock_init(&ioc->diagLock);
spin_lock_init(&ioc->fc_rescan_work_lock);
spin_lock_init(&ioc->fc_rport_lock);
spin_lock_init(&ioc->initializing_hba_lock);
/* Initialize the event logging.
......@@ -5736,11 +5735,13 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
return rc;
}
# define EVENT_DESCR_STR_SZ 100
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void
EventDescriptionStr(u8 event, u32 evData0, char *evStr)
{
char *ds;
char *ds = NULL;
switch(event) {
case MPI_EVENT_NONE:
......@@ -5777,9 +5778,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
ds = "Loop State(LIP) Change";
else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
ds = "Loop State(LPE) Change"; /* ??? */
ds = "Loop State(LPE) Change"; /* ??? */
else
ds = "Loop State(LPB) Change"; /* ??? */
ds = "Loop State(LPB) Change"; /* ??? */
break;
case MPI_EVENT_LOGOUT:
ds = "Logout";
......@@ -5841,27 +5842,32 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
break;
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
{
char buf[50];
u8 id = (u8)(evData0);
u8 ReasonCode = (u8)(evData0 >> 16);
switch (ReasonCode) {
case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
sprintf(buf,"SAS Device Status Change: Added: id=%d", id);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Added: id=%d", id);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
sprintf(buf,"SAS Device Status Change: Deleted: id=%d", id);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Deleted: id=%d", id);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
sprintf(buf,"SAS Device Status Change: SMART Data: id=%d", id);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: SMART Data: id=%d",
id);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
sprintf(buf,"SAS Device Status Change: No Persistancy Added: id=%d", id);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: No Persistancy "
"Added: id=%d", id);
break;
default:
sprintf(buf,"SAS Device Status Change: Unknown: id=%d", id);
break;
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Unknown: id=%d", id);
break;
}
ds = buf;
break;
}
case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
......@@ -5878,41 +5884,46 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
break;
case MPI_EVENT_SAS_PHY_LINK_STATUS:
{
char buf[50];
u8 LinkRates = (u8)(evData0 >> 8);
u8 PhyNumber = (u8)(evData0);
LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >>
MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT;
switch (LinkRates) {
case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN:
sprintf(buf,"SAS PHY Link Status: Phy=%d:"
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate Unknown",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED:
sprintf(buf,"SAS PHY Link Status: Phy=%d:"
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Phy Disabled",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION:
sprintf(buf,"SAS PHY Link Status: Phy=%d:"
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Failed Speed Nego",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE:
sprintf(buf,"SAS PHY Link Status: Phy=%d:"
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Sata OOB Completed",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_1_5:
sprintf(buf,"SAS PHY Link Status: Phy=%d:"
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate 1.5 Gbps",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
sprintf(buf,"SAS PHY Link Status: Phy=%d:"
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate 3.0 Gpbs",PhyNumber);
break;
default:
sprintf(buf,"SAS PHY Link Status: Phy=%d", PhyNumber);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d", PhyNumber);
break;
}
ds = buf;
break;
}
case MPI_EVENT_SAS_DISCOVERY_ERROR:
......@@ -5921,9 +5932,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
case MPI_EVENT_IR_RESYNC_UPDATE:
{
u8 resync_complete = (u8)(evData0 >> 16);
char buf[40];
sprintf(buf,"IR Resync Update: Complete = %d:",resync_complete);
ds = buf;
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR Resync Update: Complete = %d:",resync_complete);
break;
}
case MPI_EVENT_IR2:
......@@ -5976,7 +5986,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
ds = "Unknown";
break;
}
strcpy(evStr,ds);
if (ds)
strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
......@@ -5998,7 +6009,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
int ii;
int r = 0;
int handlers = 0;
char evStr[100];
char evStr[EVENT_DESCR_STR_SZ];
u8 event;
/*
......
......@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
#endif
#define MPT_LINUX_VERSION_COMMON "3.03.08"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.08"
#define MPT_LINUX_VERSION_COMMON "3.03.09"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.09"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
......@@ -489,7 +489,6 @@ typedef struct _RaidCfgData {
#define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */
#define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */
#define MPT_RPORT_INFO_FLAGS_MAPPED_VDEV 0x04 /* target mapped in vdev */
/*
* data allocated for each fc rport device
......@@ -501,7 +500,6 @@ struct mptfc_rport_info
struct scsi_target *starget;
FCDevicePage0_t pg0;
u8 flags;
u8 remap_needed;
};
/*
......@@ -628,11 +626,11 @@ typedef struct _MPT_ADAPTER
struct work_struct mptscsih_persistTask;
struct list_head fc_rports;
spinlock_t fc_rport_lock; /* list and ri flags */
spinlock_t fc_rescan_work_lock;
int fc_rescan_work_count;
struct work_struct fc_rescan_work;
char fc_rescan_work_q_name[KOBJ_NAME_LEN];
struct workqueue_struct *fc_rescan_work_q;
} MPT_ADAPTER;
/*
......
......@@ -341,9 +341,6 @@ mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid)
rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low;
rid->port_id = pg0->PortIdentifier;
rid->roles = FC_RPORT_ROLE_UNKNOWN;
rid->roles |= FC_RPORT_ROLE_FCP_TARGET;
if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
rid->roles |= FC_RPORT_ROLE_FCP_INITIATOR;
return 0;
}
......@@ -355,15 +352,18 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
struct fc_rport *rport;
struct mptfc_rport_info *ri;
int new_ri = 1;
u64 pn;
unsigned long flags;
u64 pn, nn;
VirtTarget *vtarget;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0)
return;
roles |= FC_RPORT_ROLE_FCP_TARGET;
if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
/* scan list looking for a match */
spin_lock_irqsave(&ioc->fc_rport_lock, flags);
list_for_each_entry(ri, &ioc->fc_rports, list) {
pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
if (pn == rport_ids.port_name) { /* match */
......@@ -373,11 +373,9 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
}
}
if (new_ri) { /* allocate one */
spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL);
if (!ri)
return;
spin_lock_irqsave(&ioc->fc_rport_lock, flags);
list_add_tail(&ri->list, &ioc->fc_rports);
}
......@@ -387,14 +385,11 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
/* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */
if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) {
ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED;
spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
rport = fc_remote_port_add(ioc->sh, channel, &rport_ids);
spin_lock_irqsave(&ioc->fc_rport_lock, flags);
if (rport) {
ri->rport = rport;
if (new_ri) /* may have been reset by user */
rport->dev_loss_tmo = mptfc_dev_loss_tmo;
*((struct mptfc_rport_info **)rport->dd_data) = ri;
/*
* if already mapped, remap here. If not mapped,
* target_alloc will allocate vtarget and map,
......@@ -406,16 +401,21 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
vtarget->target_id = pg0->CurrentTargetID;
vtarget->bus_id = pg0->CurrentBus;
}
ri->remap_needed = 0;
}
*((struct mptfc_rport_info **)rport->dd_data) = ri;
/* scan will be scheduled once rport becomes a target */
fc_remote_port_rolechg(rport,roles);
pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
"rport tid %d, tmo %d\n",
ioc->name,
ioc->sh->host_no,
pg0->PortIdentifier,
pg0->WWNN,
pg0->WWPN,
(unsigned long long)nn,
(unsigned long long)pn,
pg0->CurrentTargetID,
ri->rport->scsi_target_id,
ri->rport->dev_loss_tmo));
......@@ -425,8 +425,6 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
ri = NULL;
}
}
spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
}
/*
......@@ -476,7 +474,6 @@ mptfc_target_alloc(struct scsi_target *starget)
vtarget->target_id = ri->pg0.CurrentTargetID;
vtarget->bus_id = ri->pg0.CurrentBus;
ri->starget = starget;
ri->remap_needed = 0;
rc = 0;
}
}
......@@ -502,10 +499,10 @@ mptfc_slave_alloc(struct scsi_device *sdev)
VirtDevice *vdev;
struct scsi_target *starget;
struct fc_rport *rport;
unsigned long flags;
rport = starget_to_rport(scsi_target(sdev));
starget = scsi_target(sdev);
rport = starget_to_rport(starget);
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
......@@ -519,10 +516,8 @@ mptfc_slave_alloc(struct scsi_device *sdev)
return -ENOMEM;
}
spin_lock_irqsave(&hd->ioc->fc_rport_lock,flags);
sdev->hostdata = vdev;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
if (vtarget->num_luns == 0) {
......@@ -535,14 +530,16 @@ mptfc_slave_alloc(struct scsi_device *sdev)
vdev->vtarget = vtarget;
vdev->lun = sdev->lun;
spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags);
vtarget->num_luns++;
#ifdef DMPT_DEBUG_FC
{
{
u64 nn, pn;
struct mptfc_rport_info *ri;
ri = *((struct mptfc_rport_info **)rport->dd_data);
pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
"CurrentTargetID %d, %x %llx %llx\n",
......@@ -550,7 +547,9 @@ mptfc_slave_alloc(struct scsi_device *sdev)
sdev->host->host_no,
vtarget->num_luns,
sdev->id, ri->pg0.CurrentTargetID,
ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN));
ri->pg0.PortIdentifier,
(unsigned long long)pn,
(unsigned long long)nn));
}
#endif
......@@ -570,11 +569,31 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
done(SCpnt);
return 0;
}
/* dd_data is null until finished adding target */
ri = *((struct mptfc_rport_info **)rport->dd_data);
if (unlikely(ri->remap_needed))
return SCSI_MLQUEUE_HOST_BUSY;
if (unlikely(!ri)) {
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_qcmd.%d: %d:%d, dd_data is null.\n",
((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name,
((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no,
SCpnt->device->id,SCpnt->device->lun));
SCpnt->result = DID_IMM_RETRY << 16;
done(SCpnt);
return 0;
}
return mptscsih_qcmd(SCpnt,done);
err = mptscsih_qcmd(SCpnt,done);
#ifdef DMPT_DEBUG_FC
if (unlikely(err)) {
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_qcmd.%d: %d:%d, mptscsih_qcmd returns non-zero.\n",
((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name,
((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no,
SCpnt->device->id,SCpnt->device->lun));
}
#endif
return err;
}
static void
......@@ -615,18 +634,17 @@ mptfc_rescan_devices(void *arg)
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
int ii;
int work_to_do;
u64 pn;
unsigned long flags;
struct mptfc_rport_info *ri;
do {
/* start by tagging all ports as missing */
spin_lock_irqsave(&ioc->fc_rport_lock,flags);
list_for_each_entry(ri, &ioc->fc_rports, list) {
if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
}
}
spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
/*
* now rescan devices known to adapter,
......@@ -639,33 +657,24 @@ mptfc_rescan_devices(void *arg)
}
/* delete devices still missing */
spin_lock_irqsave(&ioc->fc_rport_lock, flags);
list_for_each_entry(ri, &ioc->fc_rports, list) {
/* if newly missing, delete it */
if ((ri->flags & (MPT_RPORT_INFO_FLAGS_REGISTERED |
MPT_RPORT_INFO_FLAGS_MISSING))
== (MPT_RPORT_INFO_FLAGS_REGISTERED |
MPT_RPORT_INFO_FLAGS_MISSING)) {
if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
MPT_RPORT_INFO_FLAGS_MISSING);
ri->remap_needed = 1;
fc_remote_port_delete(ri->rport);
/*
* remote port not really deleted 'cause
* binding is by WWPN and driver only
* registers FCP_TARGETs but cannot trust
* data structures.
*/
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_rescan.%d: %llx deleted\n",
ioc->name,
ioc->sh->host_no,
ri->pg0.WWPN));
(unsigned long long)pn));
}
}
spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
/*
* allow multiple passes as target state
......@@ -870,10 +879,23 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_mptfc_probe;
}
for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
mptfc_init_host_attr(ioc,ii);
mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev);
}
/* initialize workqueue */
snprintf(ioc->fc_rescan_work_q_name, KOBJ_NAME_LEN, "mptfc_wq_%d",
sh->host_no);
ioc->fc_rescan_work_q =
create_singlethread_workqueue(ioc->fc_rescan_work_q_name);
if (!ioc->fc_rescan_work_q)
goto out_mptfc_probe;
/*
* scan for rports -
* by doing it via the workqueue, some locking is eliminated
*/
ioc->fc_rescan_work_count = 1;
queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
flush_workqueue(ioc->fc_rescan_work_q);
return 0;
......@@ -949,8 +971,18 @@ mptfc_init(void)
static void __devexit
mptfc_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptfc_rport_info *p, *n;
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptfc_rport_info *p, *n;
struct workqueue_struct *work_q;
unsigned long flags;
/* destroy workqueue */
if ((work_q=ioc->fc_rescan_work_q)) {
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
ioc->fc_rescan_work_q = NULL;
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
destroy_workqueue(work_q);
}
fc_remove_host(ioc->sh);
......
......@@ -91,6 +91,7 @@ enum mptsas_hotplug_action {
MPTSAS_DEL_DEVICE,
MPTSAS_ADD_RAID,
MPTSAS_DEL_RAID,
MPTSAS_IGNORE_EVENT,
};
struct mptsas_hotplug_event {
......@@ -298,6 +299,26 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
return rc;
}
/*
* Returns true if there is a scsi end device
*/
static inline int
mptsas_is_end_device(struct mptsas_devinfo * attached)
{
if ((attached->handle) &&
(attached->device_info &
MPI_SAS_DEVICE_INFO_END_DEVICE) &&
((attached->device_info &
MPI_SAS_DEVICE_INFO_SSP_TARGET) |
(attached->device_info &
MPI_SAS_DEVICE_INFO_STP_TARGET) |
(attached->device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
return 1;
else
return 0;
}
static int
mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
u32 form, u32 form_specific)
......@@ -872,7 +893,11 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
SasDevicePage0_t *buffer;
dma_addr_t dma_handle;
__le64 sas_address;
int error;
int error=0;
if (ioc->sas_discovery_runtime &&
mptsas_is_end_device(device_info))
goto out;
hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
hdr.ExtPageLength = 0;
......@@ -1009,7 +1034,11 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
CONFIGPARMS cfg;
SasExpanderPage1_t *buffer;
dma_addr_t dma_handle;
int error;
int error=0;
if (ioc->sas_discovery_runtime &&
mptsas_is_end_device(&phy_info->attached))
goto out;
hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
hdr.ExtPageLength = 0;
......@@ -1068,26 +1097,6 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
return error;
}
/*
* Returns true if there is a scsi end device
*/
static inline int
mptsas_is_end_device(struct mptsas_devinfo * attached)
{
if ((attached->handle) &&
(attached->device_info &
MPI_SAS_DEVICE_INFO_END_DEVICE) &&
((attached->device_info &
MPI_SAS_DEVICE_INFO_SSP_TARGET) |
(attached->device_info &
MPI_SAS_DEVICE_INFO_STP_TARGET) |
(attached->device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
return 1;
else
return 0;
}
static void
mptsas_parse_device_info(struct sas_identify *identify,
struct mptsas_devinfo *device_info)
......@@ -1737,6 +1746,9 @@ mptsas_hotplug_work(void *arg)
break;
case MPTSAS_ADD_DEVICE:
if (ev->phys_disk_num_valid)
mpt_findImVolumes(ioc);
/*
* Refresh sas device pg0 data
*/
......@@ -1868,6 +1880,9 @@ mptsas_hotplug_work(void *arg)
scsi_device_put(sdev);
mpt_findImVolumes(ioc);
break;
case MPTSAS_IGNORE_EVENT:
default:
break;
}
kfree(ev);
......@@ -1940,7 +1955,8 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
EVENT_DATA_RAID *raid_event_data)
{
struct mptsas_hotplug_event *ev;
RAID_VOL0_STATUS * volumeStatus;
int status = le32_to_cpu(raid_event_data->SettingsStatus);
int state = (status >> 8) & 0xff;
if (ioc->bus_type != SAS)
return;
......@@ -1955,6 +1971,7 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
ev->ioc = ioc;
ev->id = raid_event_data->VolumeID;
ev->event_type = MPTSAS_IGNORE_EVENT;
switch (raid_event_data->ReasonCode) {
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
......@@ -1966,6 +1983,25 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
ev->phys_disk_num = raid_event_data->PhysDiskNum;
ev->event_type = MPTSAS_DEL_DEVICE;
break;
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
switch (state) {
case MPI_PD_STATE_ONLINE:
ioc->raid_data.isRaid = 1;
ev->phys_disk_num_valid = 1;
ev->phys_disk_num = raid_event_data->PhysDiskNum;
ev->event_type = MPTSAS_ADD_DEVICE;
break;
case MPI_PD_STATE_MISSING:
case MPI_PD_STATE_NOT_COMPATIBLE:
case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
ev->event_type = MPTSAS_DEL_DEVICE;
break;
default:
break;
}
break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED:
ev->event_type = MPTSAS_DEL_RAID;
break;
......@@ -1973,11 +2009,18 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
ev->event_type = MPTSAS_ADD_RAID;
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
volumeStatus = (RAID_VOL0_STATUS *) &
raid_event_data->SettingsStatus;
ev->event_type = (volumeStatus->State ==
MPI_RAIDVOL0_STATUS_STATE_FAILED) ?
MPTSAS_DEL_RAID : MPTSAS_ADD_RAID;
switch (state) {
case MPI_RAIDVOL0_STATUS_STATE_FAILED:
case MPI_RAIDVOL0_STATUS_STATE_MISSING:
ev->event_type = MPTSAS_DEL_RAID;
break;
case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
ev->event_type = MPTSAS_ADD_RAID;
break;
default:
break;
}
break;
default:
break;
......
......@@ -632,7 +632,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
/* Spoof to SCSI Selection Timeout! */
sc->result = DID_NO_CONNECT << 16;
if (ioc->bus_type != FC)
sc->result = DID_NO_CONNECT << 16;
/* else fibre, just stall until rescan event */
else
sc->result = DID_REQUEUE << 16;
if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
hd->sel_timeout[pScsiReq->TargetID]++;
......@@ -877,7 +881,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
struct scsi_cmnd *sc;
dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
vdevice->target_id, vdevice->lun, max));
vdevice->vtarget->target_id, vdevice->lun, max));
for (ii=0; ii < max; ii++) {
if ((sc = hd->ScsiLookup[ii]) != NULL) {
......@@ -1645,7 +1649,6 @@ int
mptscsih_abort(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
MPT_FRAME_HDR *mf;
u32 ctx2abort;
int scpnt_idx;
......@@ -1663,14 +1666,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
return FAILED;
}
ioc = hd->ioc;
if (hd->resetPending) {
return FAILED;
}
if (hd->timeouts < -1)
hd->timeouts++;
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
......@@ -1684,6 +1679,13 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
return SUCCESS;
}
if (hd->resetPending) {
return FAILED;
}
if (hd->timeouts < -1)
hd->timeouts++;
printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
hd->ioc->name, SCpnt);
scsi_print_command(SCpnt);
......@@ -1703,7 +1705,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun,
ctx2abort, mptscsih_get_tm_timeout(ioc));
ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
hd->ioc->name,
......@@ -2521,15 +2523,15 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/* 7. FC: Rescan for blocked rports which might have returned.
*/
else if (ioc->bus_type == FC) {
int work_count;
unsigned long flags;
if (ioc->bus_type == FC) {
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
work_count = ++ioc->fc_rescan_work_count;
if (ioc->fc_rescan_work_q) {
if (ioc->fc_rescan_work_count++ == 0) {
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_rescan_work);
}
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
if (work_count == 1)
schedule_work(&ioc->fc_rescan_work);
}
dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name));
......@@ -2544,7 +2546,6 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
MPT_SCSI_HOST *hd;
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
int work_count;
unsigned long flags;
devtverboseprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
......@@ -2569,10 +2570,13 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
case MPI_EVENT_RESCAN: /* 06 */
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
work_count = ++ioc->fc_rescan_work_count;
if (ioc->fc_rescan_work_q) {
if (ioc->fc_rescan_work_count++ == 0) {
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_rescan_work);
}
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
if (work_count == 1)
schedule_work(&ioc->fc_rescan_work);
break;
/*
......
......@@ -783,6 +783,70 @@ static struct pci_device_id mptspi_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
/*
* renegotiate for a given target
*/
static void
mptspi_dv_renegotiate_work(void *data)
{
struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
struct _MPT_SCSI_HOST *hd = wqw->hd;
struct scsi_device *sdev;
kfree(wqw);
shost_for_each_device(sdev, hd->ioc->sh)
mptspi_dv_device(hd, sdev);
}
static void
mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
{
struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
if (!wqw)
return;
INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
wqw->hd = hd;
schedule_work(&wqw->work);
}
/*
* spi module reset handler
*/
static int
mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
if (reset_phase == MPT_IOC_POST_RESET)
mptspi_dv_renegotiate(hd);
return rc;
}
/*
* spi module resume handler
*/
static int
mptspi_resume(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
int rc;
rc = mptscsih_resume(pdev);
mptspi_dv_renegotiate(hd);
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
......@@ -1032,7 +1096,7 @@ static struct pci_driver mptspi_driver = {
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
.resume = mptscsih_resume,
.resume = mptspi_resume,
#endif
};
......@@ -1061,7 +1125,7 @@ mptspi_init(void)
": Registered for IOC event notifications\n"));
}
if (mpt_reset_register(mptspiDoneCtx, mptscsih_ioc_reset) == 0) {
if (mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset) == 0) {
dprintk((KERN_INFO MYNAM
": Registered for IOC reset notifications\n"));
}
......
......@@ -219,6 +219,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahc->flags |= AHC_39BIT_ADDRESSING;
} else {
if (dma_set_mask(dev, DMA_32BIT_MASK)) {
ahc_free(ahc);
printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
return (-ENODEV);
}
......
......@@ -2036,12 +2036,12 @@ ahc_pci_resume(struct ahc_softc *ahc)
* that the OS doesn't know about and rely on our chip
* reset handler to handle the rest.
*/
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4,
ahc->bus_softc.pci_softc.devconfig);
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1,
ahc->bus_softc.pci_softc.command);
ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1,
ahc->bus_softc.pci_softc.csize_lattime);
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
ahc->bus_softc.pci_softc.devconfig, /*bytes*/4);
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
ahc->bus_softc.pci_softc.command, /*bytes*/1);
ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1);
if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
struct seeprom_descriptor sd;
u_int sxfrctl1;
......
......@@ -739,7 +739,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
{
struct viosrp_adapter_info *req;
struct srp_event_struct *evt_struct;
dma_addr_t addr;
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
......@@ -757,10 +758,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
req->common.length = sizeof(hostdata->madapter_info);
req->buffer = dma_map_single(hostdata->dev,
&hostdata->madapter_info,
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
req->buffer = addr = dma_map_single(hostdata->dev,
&hostdata->madapter_info,
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(req->buffer)) {
printk(KERN_ERR
......@@ -770,8 +771,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
return;
}
if (ibmvscsi_send_srp_event(evt_struct, hostdata))
if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
dma_unmap_single(hostdata->dev,
addr,
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
}
};
/**
......@@ -1259,6 +1265,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
{
struct viosrp_host_config *host_config;
struct srp_event_struct *evt_struct;
dma_addr_t addr;
int rc;
evt_struct = get_event_struct(&hostdata->pool);
......@@ -1279,8 +1286,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
memset(host_config, 0x00, sizeof(*host_config));
host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
host_config->common.length = length;
host_config->buffer = dma_map_single(hostdata->dev, buffer, length,
DMA_BIDIRECTIONAL);
host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
length,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(host_config->buffer)) {
printk(KERN_ERR
......@@ -1291,11 +1299,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
init_completion(&evt_struct->comp);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
if (rc == 0) {
if (rc == 0)
wait_for_completion(&evt_struct->comp);
dma_unmap_single(hostdata->dev, host_config->buffer,
length, DMA_BIDIRECTIONAL);
}
dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
return rc;
}
......
......@@ -27,7 +27,6 @@ void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
......
......@@ -113,6 +113,7 @@ struct lpfc_nodelist {
#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
NPR list */
#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
/* Defines for list searchs */
#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
......
......@@ -302,10 +302,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
goto fail_free_mbox;
/*
* set_slim mailbox command needs to execute first,
* queue this command to be processed later.
*/
mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
mbox->context2 = ndlp;
......@@ -781,25 +777,26 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (disc && phba->num_disc_nodes) {
/* Check to see if there are more PLOGIs to be sent */
lpfc_more_plogi(phba);
}
if (phba->num_disc_nodes == 0) {
spin_lock_irq(phba->host->host_lock);
phba->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(phba->host->host_lock);
if (phba->num_disc_nodes == 0) {
spin_lock_irq(phba->host->host_lock);
phba->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(phba->host->host_lock);
lpfc_can_disctmo(phba);
if (phba->fc_flag & FC_RSCN_MODE) {
/* Check to see if more RSCNs came in while we were
* processing this one.
*/
if ((phba->fc_rscn_id_cnt == 0) &&
(!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
spin_lock_irq(phba->host->host_lock);
phba->fc_flag &= ~FC_RSCN_MODE;
spin_unlock_irq(phba->host->host_lock);
} else {
lpfc_els_handle_rscn(phba);
lpfc_can_disctmo(phba);
if (phba->fc_flag & FC_RSCN_MODE) {
/*
* Check to see if more RSCNs came in while
* we were processing this one.
*/
if ((phba->fc_rscn_id_cnt == 0) &&
(!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
spin_lock_irq(phba->host->host_lock);
phba->fc_flag &= ~FC_RSCN_MODE;
spin_unlock_irq(phba->host->host_lock);
} else {
lpfc_els_handle_rscn(phba);
}
}
}
}
......@@ -1263,7 +1260,7 @@ lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_LOGO);
if (!elsiocb)
......@@ -1451,22 +1448,23 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
* PLOGIs to be sent
*/
lpfc_more_plogi(phba);
}
if (phba->num_disc_nodes == 0) {
phba->fc_flag &= ~FC_NDISC_ACTIVE;
lpfc_can_disctmo(phba);
if (phba->fc_flag & FC_RSCN_MODE) {
/* Check to see if more RSCNs
* came in while we were
* processing this one.
*/
if((phba->fc_rscn_id_cnt==0) &&
(!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
phba->fc_flag &= ~FC_RSCN_MODE;
}
else {
lpfc_els_handle_rscn(phba);
if (phba->num_disc_nodes == 0) {
phba->fc_flag &= ~FC_NDISC_ACTIVE;
lpfc_can_disctmo(phba);
if (phba->fc_flag & FC_RSCN_MODE) {
/*
* Check to see if more RSCNs
* came in while we were
* processing this one.
*/
if((phba->fc_rscn_id_cnt==0) &&
!(phba->fc_flag & FC_RSCN_DISCOVERY)) {
phba->fc_flag &= ~FC_RSCN_MODE;
}
else {
lpfc_els_handle_rscn(phba);
}
}
}
}
......@@ -1872,9 +1870,6 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (mbox) {
if ((rspiocb->iocb.ulpStatus == 0)
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
/* set_slim mailbox command needs to execute first,
* queue this command to be processed later.
*/
lpfc_unreg_rpi(phba, ndlp);
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
mbox->context2 = ndlp;
......@@ -1920,6 +1915,7 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
ELS_PKT *els_pkt_ptr;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
......@@ -1958,6 +1954,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
pcmd += sizeof (uint32_t);
memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
break;
case ELS_CMD_PRLO:
cmdsize = sizeof (uint32_t) + sizeof (PRLO);
elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
if (!elsiocb)
return 1;
icmd = &elsiocb->iocb;
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
sizeof (uint32_t) + sizeof (PRLO));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
els_pkt_ptr = (ELS_PKT *) pcmd;
els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
break;
default:
return 1;
}
......@@ -2498,7 +2511,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
/* If we are about to begin discovery, just ACC the RSCN.
* Discovery processing will satisfy it.
*/
if (phba->hba_state < LPFC_NS_QRY) {
if (phba->hba_state <= LPFC_NS_QRY) {
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
newnode);
return 0;
......
......@@ -311,8 +311,8 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
evtp->evt_arg2 = arg2;
evtp->evt = evt;
list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_lock_irq(phba->host->host_lock);
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
wake_up(phba->work_wait);
spin_unlock_irq(phba->host->host_lock);
......@@ -1071,10 +1071,6 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
/* initialize static port data */
rport->maxframe_size = ndlp->nlp_maxframe;
rport->supported_classes = ndlp->nlp_class_sup;
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < MAX_FCP_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
rdata = rport->dd_data;
rdata->pnode = ndlp;
......@@ -1087,6 +1083,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < MAX_FCP_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
return;
}
......@@ -1238,6 +1238,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
evt_listp);
}
nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
nlp->nlp_type |= NLP_FC_NODE;
break;
case NLP_MAPPED_LIST:
......@@ -1258,6 +1259,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
evt_listp);
}
nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
break;
case NLP_NPR_LIST:
nlp->nlp_flag |= list;
......@@ -1402,6 +1404,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
return 1;
case CMD_ELS_REQUEST64_CR:
if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
return 1;
case CMD_XMIT_ELS_RSP64_CX:
if (iocb->context1 == (uint8_t *) ndlp)
return 1;
......@@ -1901,10 +1905,8 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
*/
if (ndlp->nlp_flag & NLP_DELAY_TMO)
lpfc_cancel_retry_delay_tmo(phba, ndlp);
} else {
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
} else
ndlp = NULL;
}
} else {
flg = ndlp->nlp_flag & NLP_LIST_MASK;
if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
......
......@@ -449,6 +449,7 @@ struct serv_parm { /* Structure is in Big Endian format */
#define ELS_CMD_RRQ 0x12000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_PRLO 0x21100014
#define ELS_CMD_PRLO_ACC 0x02100014
#define ELS_CMD_PDISC 0x50000000
#define ELS_CMD_FDISC 0x51000000
#define ELS_CMD_ADISC 0x52000000
......@@ -484,6 +485,7 @@ struct serv_parm { /* Structure is in Big Endian format */
#define ELS_CMD_RRQ 0x12
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_PRLO 0x14001021
#define ELS_CMD_PRLO_ACC 0x14001002
#define ELS_CMD_PDISC 0x50
#define ELS_CMD_FDISC 0x51
#define ELS_CMD_ADISC 0x52
......@@ -1539,6 +1541,7 @@ typedef struct {
#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */
uint32_t link_speed;
#define LINK_SPEED_AUTO 0 /* Auto selection */
......
......@@ -294,15 +294,6 @@ lpfc_config_port_post(struct lpfc_hba * phba)
}
}
/* This should turn on DELAYED ABTS for ELS timeouts */
lpfc_set_slim(phba, pmb, 0x052198, 0x1);
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
phba->hba_state = LPFC_HBA_ERROR;
mempool_free( pmb, phba->mbox_mem_pool);
return -EIO;
}
lpfc_read_config(phba, pmb);
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
lpfc_printf_log(phba,
......@@ -804,7 +795,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
int max_speed;
char * ports;
char * bus;
} m;
} m = {"<Unknown>", 0, "", ""};
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
ports = (hdrtype == 0x80) ? "2-port " : "";
......@@ -1627,7 +1618,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
error = lpfc_alloc_sysfs_attr(phba);
if (error)
goto out_kthread_stop;
goto out_remove_host;
error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
LPFC_DRIVER_NAME, phba);
......@@ -1644,8 +1635,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
error = lpfc_sli_hba_setup(phba);
if (error)
if (error) {
error = -ENODEV;
goto out_free_irq;
}
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
spin_lock_irq(phba->host->host_lock);
......@@ -1700,6 +1693,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
free_irq(phba->pcidev->irq, phba);
out_free_sysfs_attr:
lpfc_free_sysfs_attr(phba);
out_remove_host:
fc_remove_host(phba->host);
scsi_remove_host(phba->host);
out_kthread_stop:
kthread_stop(phba->worker_thread);
out_free_iocbq:
......@@ -1721,12 +1717,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
out_idr_remove:
idr_remove(&lpfc_hba_index, phba->brd_no);
out_put_host:
phba->host = NULL;
scsi_host_put(host);
out_release_regions:
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
out:
pci_set_drvdata(pdev, NULL);
return error;
}
......
......@@ -200,6 +200,9 @@ lpfc_init_link(struct lpfc_hba * phba,
break;
}
/* Enable asynchronous ABTS responses from firmware */
mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
/* NEW_FEATURE
* Setting up the link speed
*/
......@@ -292,36 +295,6 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
return;
}
/***********************************************/
/* command to write slim */
/***********************************************/
void
lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
uint32_t value)
{
MAILBOX_t *mb;
mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
/* addr = 0x090597 is AUTO ABTS disable for ELS commands */
/* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
/*
* Always turn on DELAYED ABTS for ELS timeouts
*/
if ((addr == 0x052198) && (value == 0))
value = 1;
mb->un.varWords[0] = addr;
mb->un.varWords[1] = value;
mb->mbxCommand = MBX_SET_SLIM;
mb->mbxOwner = OWN_HOST;
return;
}
/**********************************************/
/* lpfc_read_nv Issue a READ CONFIG */
/* mailbox command */
......
......@@ -465,14 +465,18 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
static int
lpfc_rcv_logo(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp,
struct lpfc_iocbq *cmdiocb)
struct lpfc_iocbq *cmdiocb,
uint32_t els_cmd)
{
/* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
* PLOGIs during LOGO storms from a device.
*/
ndlp->nlp_flag |= NLP_LOGO_ACC;
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
if (els_cmd == ELS_CMD_PRLO)
lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
else
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
if (!(ndlp->nlp_type & NLP_FABRIC) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
......@@ -681,7 +685,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
......@@ -788,10 +792,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
if (lpfc_reg_login
(phba, irsp->un.elsreq64.remoteID,
(uint8_t *) sp, mbox, 0) == 0) {
/* set_slim mailbox command needs to
* execute first, queue this command to
* be processed later.
*/
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl =
......@@ -832,11 +832,17 @@ static uint32_t
lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
return ndlp->nlp_state;
}
else {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
......@@ -851,7 +857,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
return ndlp->nlp_state;
......@@ -905,7 +911,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp, 0);
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
......@@ -932,7 +938,7 @@ lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
/* Treat like rcv logo */
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
return ndlp->nlp_state;
}
......@@ -987,11 +993,17 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg,
uint32_t evt)
{
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp, 1);
if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
return ndlp->nlp_state;
}
else {
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp, 1);
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
......@@ -1006,7 +1018,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(phba->host->host_lock);
......@@ -1048,7 +1060,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
......@@ -1073,7 +1085,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
......@@ -1133,8 +1145,14 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg,
uint32_t evt)
{
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
return ndlp->nlp_state;
}
else {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
......@@ -1146,7 +1164,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
return ndlp->nlp_state;
}
......@@ -1186,7 +1204,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
/* Software abort outstanding PRLI before sending acc */
lpfc_els_abort(phba, ndlp, 1);
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
......@@ -1214,7 +1232,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
......@@ -1278,11 +1296,17 @@ static uint32_t
lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
/* software abort outstanding PRLI */
lpfc_els_abort(phba, ndlp, 1);
if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
return ndlp->nlp_state;
}
else {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
}
......@@ -1313,7 +1337,7 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
return ndlp->nlp_state;
}
......@@ -1351,7 +1375,7 @@ lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
......@@ -1375,7 +1399,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
......@@ -1386,7 +1410,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
lpfc_disc_set_adisc(phba, ndlp);
return ndlp->nlp_state;
......@@ -1424,7 +1448,7 @@ lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
......@@ -1456,7 +1480,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
spin_unlock_irq(phba->host->host_lock);
/* Treat like rcv logo */
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
return ndlp->nlp_state;
}
......@@ -1469,7 +1493,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
lpfc_disc_set_adisc(phba, ndlp);
return ndlp->nlp_state;
......@@ -1551,7 +1575,7 @@ lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_rcv_logo(phba, ndlp, cmdiocb);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
......@@ -1617,9 +1641,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
......@@ -1628,9 +1659,16 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
......@@ -1649,9 +1687,16 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
......@@ -1668,7 +1713,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
if (!mb->mbxStatus)
ndlp->nlp_rpi = mb->un.varWords[0];
else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
}
return ndlp->nlp_state;
}
......@@ -1677,6 +1727,10 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg,
uint32_t evt)
{
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
return ndlp->nlp_state;
}
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
......@@ -1687,7 +1741,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
uint32_t evt)
{
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
if (ndlp->nlp_flag & NLP_DELAY_TMO) {
lpfc_cancel_retry_delay_tmo(phba, ndlp);
......
......@@ -629,8 +629,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq;
IOCB_t *piocb;
struct fcp_cmnd *fcp_cmnd;
struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
struct lpfc_rport_data *rdata = scsi_dev->hostdata;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
......@@ -665,56 +664,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
piocb->ulpTimeout = lpfc_cmd->timeout;
}
lpfc_cmd->rdata = rdata;
switch (task_mgmt_cmd) {
case FCP_LUN_RESET:
/* Issue LUN Reset to TGT <num> LUN <num> */
lpfc_printf_log(phba,
KERN_INFO,
LOG_FCP,
"%d:0703 Issue LUN Reset to TGT %d LUN %d "
"Data: x%x x%x\n",
phba->brd_no,
scsi_dev->id, scsi_dev->lun,
ndlp->nlp_rpi, ndlp->nlp_flag);
break;
case FCP_ABORT_TASK_SET:
/* Issue Abort Task Set to TGT <num> LUN <num> */
lpfc_printf_log(phba,
KERN_INFO,
LOG_FCP,
"%d:0701 Issue Abort Task Set to TGT %d LUN %d "
"Data: x%x x%x\n",
phba->brd_no,
scsi_dev->id, scsi_dev->lun,
ndlp->nlp_rpi, ndlp->nlp_flag);
break;
case FCP_TARGET_RESET:
/* Issue Target Reset to TGT <num> */
lpfc_printf_log(phba,
KERN_INFO,
LOG_FCP,
"%d:0702 Issue Target Reset to TGT %d "
"Data: x%x x%x\n",
phba->brd_no,
scsi_dev->id, ndlp->nlp_rpi,
ndlp->nlp_flag);
break;
}
return (1);
}
static int
lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
unsigned tgt_id, struct lpfc_rport_data *rdata)
{
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
int ret;
lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
if (!ret)
return FAILED;
......@@ -726,6 +687,13 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
if (!iocbqrsp)
return FAILED;
/* Issue Target Reset to TGT <num> */
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0702 Issue Target Reset to TGT %d "
"Data: x%x x%x\n",
phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
rdata->pnode->nlp_flag);
ret = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
......@@ -1021,6 +989,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->timeout = 60;
lpfc_cmd->scsi_hba = phba;
lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
if (!ret)
......@@ -1033,6 +1002,11 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
if (iocbqrsp == NULL)
goto out_free_scsi_buf;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0703 Issue LUN Reset to TGT %d LUN %d "
"Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
ret = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
......@@ -1104,7 +1078,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
int match;
int ret = FAILED, i, err_count = 0;
int cnt, loopcnt;
unsigned int midlayer_id = 0;
struct lpfc_scsi_buf * lpfc_cmd;
lpfc_block_requests(phba);
......@@ -1124,7 +1097,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
* targets known to the driver. Should any target reset
* fail, this routine returns failure to the midlayer.
*/
midlayer_id = cmnd->device->id;
for (i = 0; i < MAX_FCP_TARGET; i++) {
/* Search the mapped list for this target ID */
match = 0;
......@@ -1137,9 +1109,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
if (!match)
continue;
lpfc_cmd->pCmd->device->id = i;
lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba,
i, ndlp->rport->dd_data);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0713 Bus Reset on target %d failed\n",
......@@ -1158,7 +1129,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
* the targets. Unfortunately, some targets do not abide by
* this forcing the driver to double check.
*/
cmnd->device->id = midlayer_id;
cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
if (cnt)
......
......@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.1.4"
#define LPFC_DRIVER_VERSION "8.1.6"
#define LPFC_DRIVER_NAME "lpfc"
......
......@@ -4471,7 +4471,6 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
{
Scsi_Cmnd *scmd;
struct scsi_device *sdev;
unsigned long flags = 0;
scb_t *scb;
int rval;
......
......@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_mbox.c
* Version : v2.20.4.7 (Nov 14 2005)
* Version : v2.20.4.8 (Apr 11 2006)
*
* Authors:
* Atul Mukker <Atul.Mukker@lsil.com>
......@@ -2278,6 +2278,7 @@ megaraid_mbox_dpc(unsigned long devp)
unsigned long flags;
uint8_t c;
int status;
uioc_t *kioc;
if (!adapter) return;
......@@ -2320,6 +2321,9 @@ megaraid_mbox_dpc(unsigned long devp)
// remove from local clist
list_del_init(&scb->list);
kioc = (uioc_t *)scb->gp;
kioc->status = 0;
megaraid_mbox_mm_done(adapter, scb);
continue;
......@@ -2636,6 +2640,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
int recovery_window;
int recovering;
int i;
uioc_t *kioc;
adapter = SCP2ADAPTER(scp);
raid_dev = ADAP2RAIDDEV(adapter);
......@@ -2655,32 +2660,51 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
// Also, reset all the commands currently owned by the driver
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
list_del_init(&scb->list); // from pending list
con_log(CL_ANN, (KERN_WARNING
"megaraid: %ld:%d[%d:%d], reset from pending list\n",
scp->serial_number, scb->sno,
scb->dev_channel, scb->dev_target));
if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: IOCTL packet with %d[%d:%d] being reset\n",
scb->sno, scb->dev_channel, scb->dev_target));
scp->result = (DID_RESET << 16);
scp->scsi_done(scp);
scb->status = -1;
megaraid_dealloc_scb(adapter, scb);
kioc = (uioc_t *)scb->gp;
kioc->status = -EFAULT;
megaraid_mbox_mm_done(adapter, scb);
} else {
if (scb->scp == scp) { // Found command
con_log(CL_ANN, (KERN_WARNING
"megaraid: %ld:%d[%d:%d], reset from pending list\n",
scp->serial_number, scb->sno,
scb->dev_channel, scb->dev_target));
} else {
con_log(CL_ANN, (KERN_WARNING
"megaraid: IO packet with %d[%d:%d] being reset\n",
scb->sno, scb->dev_channel, scb->dev_target));
}
scb->scp->result = (DID_RESET << 16);
scb->scp->scsi_done(scb->scp);
megaraid_dealloc_scb(adapter, scb);
}
}
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
if (adapter->outstanding_cmds) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: %d outstanding commands. Max wait %d sec\n",
adapter->outstanding_cmds, MBOX_RESET_WAIT));
adapter->outstanding_cmds,
(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
}
recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
recovering = adapter->outstanding_cmds;
for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) {
for (i = 0; i < recovery_window; i++) {
megaraid_ack_sequence(adapter);
......@@ -2689,12 +2713,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
con_log(CL_ANN, (
"megaraid mbox: Wait for %d commands to complete:%d\n",
adapter->outstanding_cmds,
MBOX_RESET_WAIT - i));
(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
}
// bailout if no recovery happended in reset time
if ((i == MBOX_RESET_WAIT) &&
(recovering == adapter->outstanding_cmds)) {
if (adapter->outstanding_cmds == 0) {
break;
}
......@@ -2918,12 +2941,13 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
for (i = 0; i < 0xFFFFF; i++) {
for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
if (mbox->numstatus != 0xFF) break;
rmb();
udelay(MBOX_SYNC_DELAY_200);
}
if (i == 0xFFFFF) {
if (i == MBOX_SYNC_WAIT_CNT) {
// We may need to re-calibrate the counter
con_log(CL_ANN, (KERN_CRIT
"megaraid: fast sync command timed out\n"));
......@@ -3475,7 +3499,7 @@ megaraid_cmm_register(adapter_t *adapter)
adp.drvr_data = (unsigned long)adapter;
adp.pdev = adapter->pdev;
adp.issue_uioc = megaraid_mbox_mm_handler;
adp.timeout = 300;
adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
adp.max_kioc = MBOX_MAX_USER_CMDS;
if ((rval = mraid_mm_register_adp(&adp)) != 0) {
......@@ -3702,7 +3726,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
unsigned long flags;
kioc = (uioc_t *)scb->gp;
kioc->status = 0;
mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
mbox64->mbox32.status = scb->status;
raw_mbox = (uint8_t *)&mbox64->mbox32;
......
......@@ -21,8 +21,8 @@
#include "megaraid_ioctl.h"
#define MEGARAID_VERSION "2.20.4.7"
#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)"
#define MEGARAID_VERSION "2.20.4.8"
#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)"
/*
......@@ -100,6 +100,9 @@
#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox
#define MBOX_RESET_WAIT 180 // wait these many seconds in reset
#define MBOX_RESET_EXT_WAIT 120 // extended wait reset
#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode
#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds
/*
* maximum transfer that can happen through the firmware commands issued
......
......@@ -898,10 +898,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
if (!adapter) {
rval = -ENOMEM;
goto memalloc_error;
}
if (!adapter)
return -ENOMEM;
memset(adapter, 0, sizeof(mraid_mmadp_t));
......
......@@ -599,6 +599,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
* Either SUCCESS or FAILED.
*
* Note:
* Only return FAILED if command not returned by firmware.
**************************************************************************/
int
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
......@@ -609,11 +610,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned int id, lun;
unsigned long serial;
unsigned long flags;
int wait = 0;
if (!CMD_SP(cmd))
return FAILED;
return SUCCESS;
ret = FAILED;
ret = SUCCESS;
id = cmd->device->id;
lun = cmd->device->lun;
......@@ -642,7 +644,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
} else {
DEBUG3(printk("%s(%ld): abort_command "
"mbx success.\n", __func__, ha->host_no));
ret = SUCCESS;
wait = 1;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
......@@ -651,17 +653,18 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for the command to be returned. */
if (ret == SUCCESS) {
if (wait) {
if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
"scsi(%ld:%d:%d): Abort handler timed out -- %lx "
"%x.\n", ha->host_no, id, lun, serial, ret);
ret = FAILED;
}
}
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no,
id, lun, serial, ret);
"scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
ha->host_no, id, lun, wait, serial, ret);
return ret;
}
......@@ -1700,8 +1703,8 @@ qla2x00_free_device(scsi_qla_host_t *ha)
ha->flags.online = 0;
/* Detach interrupts */
if (ha->pdev->irq)
free_irq(ha->pdev->irq, ha);
if (ha->host->irq)
free_irq(ha->host->irq, ha);
/* release io space registers */
if (ha->iobase)
......
......@@ -56,6 +56,8 @@ static struct {
{"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */
{"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */
{"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */
{"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */
{"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */
{"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */
......
......@@ -1067,16 +1067,29 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
break;
case NOT_READY:
/*
* If the device is in the process of becoming ready,
* retry.
* If the device is in the process of becoming
* ready, or has a temporary blockage, retry.
*/
if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
scsi_requeue_command(q, cmd);
return;
if (sshdr.asc == 0x04) {
switch (sshdr.ascq) {
case 0x01: /* becoming ready */
case 0x04: /* format in progress */
case 0x05: /* rebuild in progress */
case 0x06: /* recalculation in progress */
case 0x07: /* operation in progress */
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
scsi_requeue_command(q, cmd);
return;
default:
break;
}
}
if (!(req->flags & REQ_QUIET))
if (!(req->flags & REQ_QUIET)) {
scmd_printk(KERN_INFO, cmd,
"Device not ready.\n");
"Device not ready: ");
scsi_print_sense_hdr("", &sshdr);
}
scsi_end_request(cmd, 0, this_count, 1);
return;
case VOLUME_OVERFLOW:
......
......@@ -75,7 +75,7 @@ param_setup(char *str)
else if(!strncmp(pos, "id:", 3)) {
if(slot == -1) {
printk(KERN_WARNING "sim710: Must specify slot for id parameter\n");
} else if(slot > MAX_SLOTS) {
} else if(slot >= MAX_SLOTS) {
printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val);
} else {
id_array[slot] = val;
......
......@@ -95,14 +95,15 @@ struct srp_direct_buf {
/*
* We need the packed attribute because the SRP spec puts the list of
* descriptors at an offset of 20, which is not aligned to the size
* of struct srp_direct_buf.
* descriptors at an offset of 20, which is not aligned to the size of
* struct srp_direct_buf. The whole structure must be packed to avoid
* having the 20-byte structure padded to 24 bytes on 64-bit architectures.
*/
struct srp_indirect_buf {
struct srp_direct_buf table_desc;
__be32 len;
struct srp_direct_buf desc_list[0] __attribute__((packed));
};
struct srp_direct_buf desc_list[0];
} __attribute__((packed));
enum {
SRP_MULTICHAN_SINGLE = 0,
......@@ -122,6 +123,11 @@ struct srp_login_req {
u8 target_port_id[16];
};
/*
* The SRP spec defines the size of the LOGIN_RSP structure to be 52
* bytes, so it needs to be packed to avoid having it padded to 56
* bytes on 64-bit architectures.
*/
struct srp_login_rsp {
u8 opcode;
u8 reserved1[3];
......@@ -132,7 +138,7 @@ struct srp_login_rsp {
__be16 buf_fmt;
u8 rsp_flags;
u8 reserved2[25];
};
} __attribute__((packed));
struct srp_login_rej {
u8 opcode;
......@@ -207,6 +213,11 @@ enum {
SRP_RSP_FLAG_DIUNDER = 1 << 5
};
/*
* The SRP spec defines the size of the RSP structure to be 36 bytes,
* so it needs to be packed to avoid having it padded to 40 bytes on
* 64-bit architectures.
*/
struct srp_rsp {
u8 opcode;
u8 sol_not;
......@@ -221,6 +232,6 @@ struct srp_rsp {
__be32 sense_data_len;
__be32 resp_data_len;
u8 data[0];
};
} __attribute__((packed));
#endif /* SCSI_SRP_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册